1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5 Contributed by A. Lichnewsky, lich@inria.inria.fr.
6 Changes by Michael Meissner, meissner@osf.org.
7 64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
8 Brendan Eich, brendan@microunity.com.
10 This file is part of GCC.
12 GCC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 3, or (at your option)
17 GCC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
28 #include "coretypes.h"
33 #include "hard-reg-set.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "insn-attr.h"
53 #include "target-def.h"
54 #include "integrate.h"
55 #include "langhooks.h"
56 #include "cfglayout.h"
57 #include "sched-int.h"
58 #include "tree-gimple.h"
60 #include "diagnostic.h"
62 /* True if X is an unspec wrapper around a SYMBOL_REF or LABEL_REF. */
63 #define UNSPEC_ADDRESS_P(X) \
64 (GET_CODE (X) == UNSPEC \
65 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
66 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
68 /* Extract the symbol or label from UNSPEC wrapper X. */
69 #define UNSPEC_ADDRESS(X) \
72 /* Extract the symbol type from UNSPEC wrapper X. */
73 #define UNSPEC_ADDRESS_TYPE(X) \
74 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
76 /* The maximum distance between the top of the stack frame and the
77 value $sp has when we save and restore registers.
79 The value for normal-mode code must be a SMALL_OPERAND and must
80 preserve the maximum stack alignment. We therefore use a value
81 of 0x7ff0 in this case.
83 MIPS16e SAVE and RESTORE instructions can adjust the stack pointer by
84 up to 0x7f8 bytes and can usually save or restore all the registers
85 that we need to save or restore. (Note that we can only use these
86 instructions for o32, for which the stack alignment is 8 bytes.)
88 We use a maximum gap of 0x100 or 0x400 for MIPS16 code when SAVE and
89 RESTORE are not available. We can then use unextended instructions
90 to save and restore registers, and to allocate and deallocate the top
92 #define MIPS_MAX_FIRST_STACK_STEP \
93 (!TARGET_MIPS16 ? 0x7ff0 \
94 : GENERATE_MIPS16E_SAVE_RESTORE ? 0x7f8 \
95 : TARGET_64BIT ? 0x100 : 0x400)
97 /* True if INSN is a mips.md pattern or asm statement. */
98 #define USEFUL_INSN_P(INSN) \
100 && GET_CODE (PATTERN (INSN)) != USE \
101 && GET_CODE (PATTERN (INSN)) != CLOBBER \
102 && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
103 && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
105 /* If INSN is a delayed branch sequence, return the first instruction
106 in the sequence, otherwise return INSN itself. */
107 #define SEQ_BEGIN(INSN) \
108 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
109 ? XVECEXP (PATTERN (INSN), 0, 0) \
112 /* Likewise for the last instruction in a delayed branch sequence. */
113 #define SEQ_END(INSN) \
114 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
115 ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
118 /* Execute the following loop body with SUBINSN set to each instruction
119 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
120 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
121 for ((SUBINSN) = SEQ_BEGIN (INSN); \
122 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
123 (SUBINSN) = NEXT_INSN (SUBINSN))
125 /* True if bit BIT is set in VALUE. */
126 #define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
128 /* Classifies an address.
131 A natural register + offset address. The register satisfies
132 mips_valid_base_register_p and the offset is a const_arith_operand.
135 A LO_SUM rtx. The first operand is a valid base register and
136 the second operand is a symbolic address.
139 A signed 16-bit constant address.
142 A constant symbolic address (equivalent to CONSTANT_SYMBOLIC). */
143 enum mips_address_type {
150 /* Classifies the prototype of a builtin function. */
151 enum mips_function_type
153 MIPS_V2SF_FTYPE_V2SF,
154 MIPS_V2SF_FTYPE_V2SF_V2SF,
155 MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
156 MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF,
157 MIPS_V2SF_FTYPE_SF_SF,
158 MIPS_INT_FTYPE_V2SF_V2SF,
159 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF,
160 MIPS_INT_FTYPE_SF_SF,
161 MIPS_INT_FTYPE_DF_DF,
168 /* For MIPS DSP ASE */
170 MIPS_DI_FTYPE_DI_SI_SI,
171 MIPS_DI_FTYPE_DI_V2HI_V2HI,
172 MIPS_DI_FTYPE_DI_V4QI_V4QI,
174 MIPS_SI_FTYPE_PTR_SI,
178 MIPS_SI_FTYPE_V2HI_V2HI,
180 MIPS_SI_FTYPE_V4QI_V4QI,
183 MIPS_V2HI_FTYPE_SI_SI,
184 MIPS_V2HI_FTYPE_V2HI,
185 MIPS_V2HI_FTYPE_V2HI_SI,
186 MIPS_V2HI_FTYPE_V2HI_V2HI,
187 MIPS_V2HI_FTYPE_V4QI,
188 MIPS_V2HI_FTYPE_V4QI_V2HI,
190 MIPS_V4QI_FTYPE_V2HI_V2HI,
191 MIPS_V4QI_FTYPE_V4QI_SI,
192 MIPS_V4QI_FTYPE_V4QI_V4QI,
193 MIPS_VOID_FTYPE_SI_SI,
194 MIPS_VOID_FTYPE_V2HI_V2HI,
195 MIPS_VOID_FTYPE_V4QI_V4QI,
197 /* For MIPS DSP REV 2 ASE. */
198 MIPS_V4QI_FTYPE_V4QI,
199 MIPS_SI_FTYPE_SI_SI_SI,
200 MIPS_DI_FTYPE_DI_USI_USI,
202 MIPS_DI_FTYPE_USI_USI,
203 MIPS_V2HI_FTYPE_SI_SI_SI,
209 /* Specifies how a builtin function should be converted into rtl. */
210 enum mips_builtin_type
212 /* The builtin corresponds directly to an .md pattern. The return
213 value is mapped to operand 0 and the arguments are mapped to
214 operands 1 and above. */
217 /* The builtin corresponds directly to an .md pattern. There is no return
218 value and the arguments are mapped to operands 0 and above. */
219 MIPS_BUILTIN_DIRECT_NO_TARGET,
221 /* The builtin corresponds to a comparison instruction followed by
222 a mips_cond_move_tf_ps pattern. The first two arguments are the
223 values to compare and the second two arguments are the vector
224 operands for the movt.ps or movf.ps instruction (in assembly order). */
228 /* The builtin corresponds to a V2SF comparison instruction. Operand 0
229 of this instruction is the result of the comparison, which has mode
230 CCV2 or CCV4. The function arguments are mapped to operands 1 and
231 above. The function's return value is an SImode boolean that is
232 true under the following conditions:
234 MIPS_BUILTIN_CMP_ANY: one of the registers is true
235 MIPS_BUILTIN_CMP_ALL: all of the registers are true
236 MIPS_BUILTIN_CMP_LOWER: the first register is true
237 MIPS_BUILTIN_CMP_UPPER: the second register is true. */
238 MIPS_BUILTIN_CMP_ANY,
239 MIPS_BUILTIN_CMP_ALL,
240 MIPS_BUILTIN_CMP_UPPER,
241 MIPS_BUILTIN_CMP_LOWER,
243 /* As above, but the instruction only sets a single $fcc register. */
244 MIPS_BUILTIN_CMP_SINGLE,
246 /* For generating bposge32 branch instructions in MIPS32 DSP ASE. */
247 MIPS_BUILTIN_BPOSGE32
250 /* Invokes MACRO (COND) for each c.cond.fmt condition. */
251 #define MIPS_FP_CONDITIONS(MACRO) \
269 /* Enumerates the codes above as MIPS_FP_COND_<X>. */
270 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
271 enum mips_fp_condition {
272 MIPS_FP_CONDITIONS (DECLARE_MIPS_COND)
275 /* Index X provides the string representation of MIPS_FP_COND_<X>. */
276 #define STRINGIFY(X) #X
277 static const char *const mips_fp_conditions[] = {
278 MIPS_FP_CONDITIONS (STRINGIFY)
281 /* A function to save or store a register. The first argument is the
282 register and the second is the stack slot. */
283 typedef void (*mips_save_restore_fn) (rtx, rtx);
285 struct mips16_constant;
286 struct mips_arg_info;
287 struct mips_address_info;
288 struct mips_integer_op;
291 static bool mips_valid_base_register_p (rtx, enum machine_mode, int);
292 static bool mips_classify_address (struct mips_address_info *, rtx,
293 enum machine_mode, int);
294 static bool mips_cannot_force_const_mem (rtx);
295 static bool mips_use_blocks_for_constant_p (enum machine_mode, const_rtx);
296 static int mips_symbol_insns (enum mips_symbol_type, enum machine_mode);
297 static bool mips16_unextended_reference_p (enum machine_mode mode, rtx, rtx);
298 static rtx mips_force_temporary (rtx, rtx);
299 static rtx mips_unspec_offset_high (rtx, rtx, rtx, enum mips_symbol_type);
300 static rtx mips_add_offset (rtx, rtx, HOST_WIDE_INT);
301 static unsigned int mips_build_shift (struct mips_integer_op *, HOST_WIDE_INT);
302 static unsigned int mips_build_lower (struct mips_integer_op *,
303 unsigned HOST_WIDE_INT);
304 static unsigned int mips_build_integer (struct mips_integer_op *,
305 unsigned HOST_WIDE_INT);
306 static void mips_legitimize_const_move (enum machine_mode, rtx, rtx);
307 static int m16_check_op (rtx, int, int, int);
308 static bool mips_rtx_costs (rtx, int, int, int *);
309 static int mips_address_cost (rtx);
310 static void mips_emit_compare (enum rtx_code *, rtx *, rtx *, bool);
311 static void mips_load_call_address (rtx, rtx, int);
312 static bool mips_function_ok_for_sibcall (tree, tree);
313 static void mips_block_move_straight (rtx, rtx, HOST_WIDE_INT);
314 static void mips_adjust_block_mem (rtx, HOST_WIDE_INT, rtx *, rtx *);
315 static void mips_block_move_loop (rtx, rtx, HOST_WIDE_INT);
316 static void mips_arg_info (const CUMULATIVE_ARGS *, enum machine_mode,
317 tree, int, struct mips_arg_info *);
318 static bool mips_get_unaligned_mem (rtx *, unsigned int, int, rtx *, rtx *);
319 static void mips_set_architecture (const struct mips_cpu_info *);
320 static void mips_set_tune (const struct mips_cpu_info *);
321 static bool mips_handle_option (size_t, const char *, int);
322 static struct machine_function *mips_init_machine_status (void);
323 static void print_operand_reloc (FILE *, rtx, enum mips_symbol_context,
325 static void mips_file_start (void);
326 static int mips_small_data_pattern_1 (rtx *, void *);
327 static int mips_rewrite_small_data_1 (rtx *, void *);
328 static bool mips_function_has_gp_insn (void);
329 static unsigned int mips_global_pointer (void);
330 static bool mips_save_reg_p (unsigned int);
331 static void mips_save_restore_reg (enum machine_mode, int, HOST_WIDE_INT,
332 mips_save_restore_fn);
333 static void mips_for_each_saved_reg (HOST_WIDE_INT, mips_save_restore_fn);
334 static void mips_output_cplocal (void);
335 static void mips_emit_loadgp (void);
336 static void mips_output_function_prologue (FILE *, HOST_WIDE_INT);
337 static void mips_set_frame_expr (rtx);
338 static rtx mips_frame_set (rtx, rtx);
339 static void mips_save_reg (rtx, rtx);
340 static void mips_output_function_epilogue (FILE *, HOST_WIDE_INT);
341 static void mips_restore_reg (rtx, rtx);
342 static void mips_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
343 HOST_WIDE_INT, tree);
344 static section *mips_select_rtx_section (enum machine_mode, rtx,
345 unsigned HOST_WIDE_INT);
346 static section *mips_function_rodata_section (tree);
347 static bool mips_in_small_data_p (const_tree);
348 static bool mips_use_anchors_for_symbol_p (const_rtx);
349 static int mips_fpr_return_fields (const_tree, tree *);
350 static bool mips_return_in_msb (const_tree);
351 static rtx mips_return_fpr_pair (enum machine_mode mode,
352 enum machine_mode mode1, HOST_WIDE_INT,
353 enum machine_mode mode2, HOST_WIDE_INT);
354 static rtx mips16_gp_pseudo_reg (void);
355 static void mips16_fp_args (FILE *, int, int);
356 static void build_mips16_function_stub (FILE *);
357 static rtx dump_constants_1 (enum machine_mode, rtx, rtx);
358 static void dump_constants (struct mips16_constant *, rtx);
359 static int mips16_insn_length (rtx);
360 static int mips16_rewrite_pool_refs (rtx *, void *);
361 static void mips16_lay_out_constants (void);
362 static void mips_sim_reset (struct mips_sim *);
363 static void mips_sim_init (struct mips_sim *, state_t);
364 static void mips_sim_next_cycle (struct mips_sim *);
365 static void mips_sim_wait_reg (struct mips_sim *, rtx, rtx);
366 static int mips_sim_wait_regs_2 (rtx *, void *);
367 static void mips_sim_wait_regs_1 (rtx *, void *);
368 static void mips_sim_wait_regs (struct mips_sim *, rtx);
369 static void mips_sim_wait_units (struct mips_sim *, rtx);
370 static void mips_sim_wait_insn (struct mips_sim *, rtx);
371 static void mips_sim_record_set (rtx, const_rtx, void *);
372 static void mips_sim_issue_insn (struct mips_sim *, rtx);
373 static void mips_sim_issue_nop (struct mips_sim *);
374 static void mips_sim_finish_insn (struct mips_sim *, rtx);
375 static void vr4130_avoid_branch_rt_conflict (rtx);
376 static void vr4130_align_insns (void);
377 static void mips_avoid_hazard (rtx, rtx, int *, rtx *, rtx);
378 static void mips_avoid_hazards (void);
379 static void mips_reorg (void);
380 static bool mips_strict_matching_cpu_name_p (const char *, const char *);
381 static bool mips_matching_cpu_name_p (const char *, const char *);
382 static const struct mips_cpu_info *mips_parse_cpu (const char *);
383 static const struct mips_cpu_info *mips_cpu_info_from_isa (int);
384 static bool mips_return_in_memory (const_tree, const_tree);
385 static bool mips_strict_argument_naming (CUMULATIVE_ARGS *);
386 static void mips_macc_chains_record (rtx);
387 static void mips_macc_chains_reorder (rtx *, int);
388 static void vr4130_true_reg_dependence_p_1 (rtx, const_rtx, void *);
389 static bool vr4130_true_reg_dependence_p (rtx);
390 static bool vr4130_swap_insns_p (rtx, rtx);
391 static void vr4130_reorder (rtx *, int);
392 static void mips_promote_ready (rtx *, int, int);
393 static void mips_sched_init (FILE *, int, int);
394 static int mips_sched_reorder (FILE *, int, rtx *, int *, int);
395 static int mips_variable_issue (FILE *, int, rtx, int);
396 static int mips_adjust_cost (rtx, rtx, rtx, int);
397 static int mips_issue_rate (void);
398 static int mips_multipass_dfa_lookahead (void);
399 static void mips_init_libfuncs (void);
400 static void mips_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
402 static tree mips_build_builtin_va_list (void);
403 static tree mips_gimplify_va_arg_expr (tree, tree, tree *, tree *);
404 static bool mips_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode mode,
406 static bool mips_callee_copies (CUMULATIVE_ARGS *, enum machine_mode mode,
408 static int mips_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode mode,
410 static bool mips_valid_pointer_mode (enum machine_mode);
411 static bool mips_scalar_mode_supported_p (enum machine_mode);
412 static bool mips_vector_mode_supported_p (enum machine_mode);
413 static rtx mips_prepare_builtin_arg (enum insn_code, unsigned int, tree, unsigned int);
414 static rtx mips_prepare_builtin_target (enum insn_code, unsigned int, rtx);
415 static rtx mips_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
416 static void mips_init_builtins (void);
417 static rtx mips_expand_builtin_direct (enum insn_code, rtx, tree, bool);
418 static rtx mips_expand_builtin_movtf (enum mips_builtin_type,
419 enum insn_code, enum mips_fp_condition,
421 static rtx mips_expand_builtin_compare (enum mips_builtin_type,
422 enum insn_code, enum mips_fp_condition,
424 static rtx mips_expand_builtin_bposge (enum mips_builtin_type, rtx);
425 static void mips_encode_section_info (tree, rtx, int);
426 static void mips_extra_live_on_entry (bitmap);
427 static int mips_comp_type_attributes (const_tree, const_tree);
428 static void mips_set_mips16_mode (int);
429 static void mips_set_current_function (tree);
430 static int mips_mode_rep_extended (enum machine_mode, enum machine_mode);
431 static bool mips_offset_within_alignment_p (rtx, HOST_WIDE_INT);
432 static void mips_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
434 /* Structure to be filled in by compute_frame_size with register
435 save masks, and offsets for the current function. */
437 struct mips_frame_info GTY(())
439 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
440 HOST_WIDE_INT var_size; /* # bytes that variables take up */
441 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
442 HOST_WIDE_INT cprestore_size; /* # bytes that the .cprestore slot takes up */
443 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
444 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
445 unsigned int mask; /* mask of saved gp registers */
446 unsigned int fmask; /* mask of saved fp registers */
447 HOST_WIDE_INT gp_save_offset; /* offset from vfp to store gp registers */
448 HOST_WIDE_INT fp_save_offset; /* offset from vfp to store fp registers */
449 HOST_WIDE_INT gp_sp_offset; /* offset from new sp to store gp registers */
450 HOST_WIDE_INT fp_sp_offset; /* offset from new sp to store fp registers */
451 bool initialized; /* true if frame size already calculated */
452 int num_gp; /* number of gp registers saved */
453 int num_fp; /* number of fp registers saved */
456 struct machine_function GTY(()) {
457 /* Pseudo-reg holding the value of $28 in a mips16 function which
458 refers to GP relative global variables. */
459 rtx mips16_gp_pseudo_rtx;
461 /* The number of extra stack bytes taken up by register varargs.
462 This area is allocated by the callee at the very top of the frame. */
465 /* Current frame information, calculated by compute_frame_size. */
466 struct mips_frame_info frame;
468 /* The register to use as the global pointer within this function. */
469 unsigned int global_pointer;
471 /* True if mips_adjust_insn_length should ignore an instruction's
473 bool ignore_hazard_length_p;
475 /* True if the whole function is suitable for .set noreorder and
477 bool all_noreorder_p;
479 /* True if the function is known to have an instruction that needs $gp. */
482 /* True if we have emitted an instruction to initialize
483 mips16_gp_pseudo_rtx. */
484 bool initialized_mips16_gp_pseudo_p;
487 /* Information about a single argument. */
490 /* True if the argument is passed in a floating-point register, or
491 would have been if we hadn't run out of registers. */
494 /* The number of words passed in registers, rounded up. */
495 unsigned int reg_words;
497 /* For EABI, the offset of the first register from GP_ARG_FIRST or
498 FP_ARG_FIRST. For other ABIs, the offset of the first register from
499 the start of the ABI's argument structure (see the CUMULATIVE_ARGS
500 comment for details).
502 The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
504 unsigned int reg_offset;
506 /* The number of words that must be passed on the stack, rounded up. */
507 unsigned int stack_words;
509 /* The offset from the start of the stack overflow area of the argument's
510 first stack word. Only meaningful when STACK_WORDS is nonzero. */
511 unsigned int stack_offset;
515 /* Information about an address described by mips_address_type.
521 REG is the base register and OFFSET is the constant offset.
524 REG is the register that contains the high part of the address,
525 OFFSET is the symbolic address being referenced and SYMBOL_TYPE
526 is the type of OFFSET's symbol.
529 SYMBOL_TYPE is the type of symbol being referenced. */
531 struct mips_address_info
533 enum mips_address_type type;
536 enum mips_symbol_type symbol_type;
540 /* One stage in a constant building sequence. These sequences have
544 A = A CODE[1] VALUE[1]
545 A = A CODE[2] VALUE[2]
548 where A is an accumulator, each CODE[i] is a binary rtl operation
549 and each VALUE[i] is a constant integer. */
550 struct mips_integer_op {
552 unsigned HOST_WIDE_INT value;
556 /* The largest number of operations needed to load an integer constant.
557 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
558 When the lowest bit is clear, we can try, but reject a sequence with
559 an extra SLL at the end. */
560 #define MIPS_MAX_INTEGER_OPS 7
562 /* Information about a MIPS16e SAVE or RESTORE instruction. */
563 struct mips16e_save_restore_info {
564 /* The number of argument registers saved by a SAVE instruction.
565 0 for RESTORE instructions. */
568 /* Bit X is set if the instruction saves or restores GPR X. */
571 /* The total number of bytes to allocate. */
575 /* Global variables for machine-dependent things. */
577 /* Threshold for data being put into the small data/bss area, instead
578 of the normal data area. */
579 int mips_section_threshold = -1;
581 /* Count the number of .file directives, so that .loc is up to date. */
582 int num_source_filenames = 0;
584 /* Count the number of sdb related labels are generated (to find block
585 start and end boundaries). */
586 int sdb_label_count = 0;
588 /* Next label # for each statement for Silicon Graphics IRIS systems. */
591 /* Name of the file containing the current function. */
592 const char *current_function_file = "";
594 /* Number of nested .set noreorder, noat, nomacro, and volatile requests. */
600 /* The next branch instruction is a branch likely, not branch normal. */
601 int mips_branch_likely;
603 /* The operands passed to the last cmpMM expander. */
606 /* The target cpu for code generation. */
607 enum processor_type mips_arch;
608 const struct mips_cpu_info *mips_arch_info;
610 /* The target cpu for optimization and scheduling. */
611 enum processor_type mips_tune;
612 const struct mips_cpu_info *mips_tune_info;
614 /* Which instruction set architecture to use. */
617 /* Which ABI to use. */
618 int mips_abi = MIPS_ABI_DEFAULT;
620 /* Cost information to use. */
621 const struct mips_rtx_cost_data *mips_cost;
623 /* Remember the ambient target flags, excluding mips16. */
624 static int mips_base_target_flags;
625 /* The mips16 command-line target flags only. */
626 static bool mips_base_mips16;
627 /* Similar copies of option settings. */
628 static int mips_base_schedule_insns; /* flag_schedule_insns */
629 static int mips_base_reorder_blocks_and_partition; /* flag_reorder... */
630 static int mips_base_move_loop_invariants; /* flag_move_loop_invariants */
631 static int mips_base_align_loops; /* align_loops */
632 static int mips_base_align_jumps; /* align_jumps */
633 static int mips_base_align_functions; /* align_functions */
634 static GTY(()) int mips16_flipper;
636 /* The -mtext-loads setting. */
637 enum mips_code_readable_setting mips_code_readable = CODE_READABLE_YES;
639 /* The architecture selected by -mipsN. */
640 static const struct mips_cpu_info *mips_isa_info;
642 /* If TRUE, we split addresses into their high and low parts in the RTL. */
643 int mips_split_addresses;
645 /* Mode used for saving/restoring general purpose registers. */
646 static enum machine_mode gpr_mode;
648 /* Array giving truth value on whether or not a given hard register
649 can support a given mode. */
650 char mips_hard_regno_mode_ok[(int)MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
652 /* List of all MIPS punctuation characters used by print_operand. */
653 char mips_print_operand_punct[256];
655 /* Map GCC register number to debugger register number. */
656 int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
657 int mips_dwarf_regno[FIRST_PSEUDO_REGISTER];
659 /* A copy of the original flag_delayed_branch: see override_options. */
660 static int mips_flag_delayed_branch;
662 static GTY (()) int mips_output_filename_first_time = 1;
664 /* mips_split_p[X] is true if symbols of type X can be split by
665 mips_split_symbol(). */
666 bool mips_split_p[NUM_SYMBOL_TYPES];
668 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
669 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
670 if they are matched by a special .md file pattern. */
671 static const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
673 /* Likewise for HIGHs. */
674 static const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
676 /* Map hard register number to register class */
677 const enum reg_class mips_regno_to_class[] =
679 LEA_REGS, LEA_REGS, M16_NA_REGS, V1_REG,
680 M16_REGS, M16_REGS, M16_REGS, M16_REGS,
681 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
682 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
683 M16_NA_REGS, M16_NA_REGS, LEA_REGS, LEA_REGS,
684 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
685 T_REG, PIC_FN_ADDR_REG, LEA_REGS, LEA_REGS,
686 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
687 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
688 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
689 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
690 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
691 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
692 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
693 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
694 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
695 MD0_REG, MD1_REG, NO_REGS, ST_REGS,
696 ST_REGS, ST_REGS, ST_REGS, ST_REGS,
697 ST_REGS, ST_REGS, ST_REGS, NO_REGS,
698 NO_REGS, ALL_REGS, ALL_REGS, NO_REGS,
699 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
700 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
701 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
702 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
703 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
704 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
705 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
706 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
707 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
708 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
709 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
710 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
711 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
712 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
713 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
714 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
715 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
716 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
717 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
718 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
719 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
720 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
721 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
722 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
723 DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS,
724 DSP_ACC_REGS, DSP_ACC_REGS, ALL_REGS, ALL_REGS,
725 ALL_REGS, ALL_REGS, ALL_REGS, ALL_REGS
728 /* Table of machine dependent attributes. */
729 const struct attribute_spec mips_attribute_table[] =
731 { "long_call", 0, 0, false, true, true, NULL },
732 { "far", 0, 0, false, true, true, NULL },
733 { "near", 0, 0, false, true, true, NULL },
734 /* Switch MIPS16 ASE on and off per-function. */
735 { "mips16", 0, 0, false, true, true, NULL },
736 { "nomips16", 0, 0, false, true, true, NULL },
737 { NULL, 0, 0, false, false, false, NULL }
740 /* A table describing all the processors gcc knows about. Names are
741 matched in the order listed. The first mention of an ISA level is
742 taken as the canonical name for that ISA.
744 To ease comparison, please keep this table in the same order
745 as gas's mips_cpu_info_table[]. Please also make sure that
746 MIPS_ISA_LEVEL_SPEC and MIPS_ARCH_FLOAT_SPEC handle all -march
747 options correctly. */
748 const struct mips_cpu_info mips_cpu_info_table[] = {
749 /* Entries for generic ISAs */
750 { "mips1", PROCESSOR_R3000, 1 },
751 { "mips2", PROCESSOR_R6000, 2 },
752 { "mips3", PROCESSOR_R4000, 3 },
753 { "mips4", PROCESSOR_R8000, 4 },
754 { "mips32", PROCESSOR_4KC, 32 },
755 { "mips32r2", PROCESSOR_M4K, 33 },
756 { "mips64", PROCESSOR_5KC, 64 },
759 { "r3000", PROCESSOR_R3000, 1 },
760 { "r2000", PROCESSOR_R3000, 1 }, /* = r3000 */
761 { "r3900", PROCESSOR_R3900, 1 },
764 { "r6000", PROCESSOR_R6000, 2 },
767 { "r4000", PROCESSOR_R4000, 3 },
768 { "vr4100", PROCESSOR_R4100, 3 },
769 { "vr4111", PROCESSOR_R4111, 3 },
770 { "vr4120", PROCESSOR_R4120, 3 },
771 { "vr4130", PROCESSOR_R4130, 3 },
772 { "vr4300", PROCESSOR_R4300, 3 },
773 { "r4400", PROCESSOR_R4000, 3 }, /* = r4000 */
774 { "r4600", PROCESSOR_R4600, 3 },
775 { "orion", PROCESSOR_R4600, 3 }, /* = r4600 */
776 { "r4650", PROCESSOR_R4650, 3 },
779 { "r8000", PROCESSOR_R8000, 4 },
780 { "vr5000", PROCESSOR_R5000, 4 },
781 { "vr5400", PROCESSOR_R5400, 4 },
782 { "vr5500", PROCESSOR_R5500, 4 },
783 { "rm7000", PROCESSOR_R7000, 4 },
784 { "rm9000", PROCESSOR_R9000, 4 },
787 { "4kc", PROCESSOR_4KC, 32 },
788 { "4km", PROCESSOR_4KC, 32 }, /* = 4kc */
789 { "4kp", PROCESSOR_4KP, 32 },
790 { "4ksc", PROCESSOR_4KC, 32 },
792 /* MIPS32 Release 2 */
793 { "m4k", PROCESSOR_M4K, 33 },
794 { "4kec", PROCESSOR_4KC, 33 },
795 { "4kem", PROCESSOR_4KC, 33 },
796 { "4kep", PROCESSOR_4KP, 33 },
797 { "4ksd", PROCESSOR_4KC, 33 },
799 { "24kc", PROCESSOR_24KC, 33 },
800 { "24kf2_1", PROCESSOR_24KF2_1, 33 },
801 { "24kf", PROCESSOR_24KF2_1, 33 },
802 { "24kf1_1", PROCESSOR_24KF1_1, 33 },
803 { "24kfx", PROCESSOR_24KF1_1, 33 },
804 { "24kx", PROCESSOR_24KF1_1, 33 },
806 { "24kec", PROCESSOR_24KC, 33 }, /* 24K with DSP */
807 { "24kef2_1", PROCESSOR_24KF2_1, 33 },
808 { "24kef", PROCESSOR_24KF2_1, 33 },
809 { "24kef1_1", PROCESSOR_24KF1_1, 33 },
810 { "24kefx", PROCESSOR_24KF1_1, 33 },
811 { "24kex", PROCESSOR_24KF1_1, 33 },
813 { "34kc", PROCESSOR_24KC, 33 }, /* 34K with MT/DSP */
814 { "34kf2_1", PROCESSOR_24KF2_1, 33 },
815 { "34kf", PROCESSOR_24KF2_1, 33 },
816 { "34kf1_1", PROCESSOR_24KF1_1, 33 },
817 { "34kfx", PROCESSOR_24KF1_1, 33 },
818 { "34kx", PROCESSOR_24KF1_1, 33 },
820 { "74kc", PROCESSOR_74KC, 33 }, /* 74K with DSPr2 */
821 { "74kf2_1", PROCESSOR_74KF2_1, 33 },
822 { "74kf", PROCESSOR_74KF2_1, 33 },
823 { "74kf1_1", PROCESSOR_74KF1_1, 33 },
824 { "74kfx", PROCESSOR_74KF1_1, 33 },
825 { "74kx", PROCESSOR_74KF1_1, 33 },
826 { "74kf3_2", PROCESSOR_74KF3_2, 33 },
829 { "5kc", PROCESSOR_5KC, 64 },
830 { "5kf", PROCESSOR_5KF, 64 },
831 { "20kc", PROCESSOR_20KC, 64 },
832 { "sb1", PROCESSOR_SB1, 64 },
833 { "sb1a", PROCESSOR_SB1A, 64 },
834 { "sr71000", PROCESSOR_SR71000, 64 },
840 /* Default costs. If these are used for a processor we should look
841 up the actual costs. */
842 #define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \
843 COSTS_N_INSNS (7), /* fp_mult_sf */ \
844 COSTS_N_INSNS (8), /* fp_mult_df */ \
845 COSTS_N_INSNS (23), /* fp_div_sf */ \
846 COSTS_N_INSNS (36), /* fp_div_df */ \
847 COSTS_N_INSNS (10), /* int_mult_si */ \
848 COSTS_N_INSNS (10), /* int_mult_di */ \
849 COSTS_N_INSNS (69), /* int_div_si */ \
850 COSTS_N_INSNS (69), /* int_div_di */ \
851 2, /* branch_cost */ \
852 4 /* memory_latency */
854 /* Need to replace these with the costs of calling the appropriate
856 #define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \
857 COSTS_N_INSNS (256), /* fp_mult_sf */ \
858 COSTS_N_INSNS (256), /* fp_mult_df */ \
859 COSTS_N_INSNS (256), /* fp_div_sf */ \
860 COSTS_N_INSNS (256) /* fp_div_df */
862 static struct mips_rtx_cost_data const mips_rtx_cost_optimize_size =
864 COSTS_N_INSNS (1), /* fp_add */
865 COSTS_N_INSNS (1), /* fp_mult_sf */
866 COSTS_N_INSNS (1), /* fp_mult_df */
867 COSTS_N_INSNS (1), /* fp_div_sf */
868 COSTS_N_INSNS (1), /* fp_div_df */
869 COSTS_N_INSNS (1), /* int_mult_si */
870 COSTS_N_INSNS (1), /* int_mult_di */
871 COSTS_N_INSNS (1), /* int_div_si */
872 COSTS_N_INSNS (1), /* int_div_di */
874 4 /* memory_latency */
877 static struct mips_rtx_cost_data const mips_rtx_cost_data[PROCESSOR_MAX] =
880 COSTS_N_INSNS (2), /* fp_add */
881 COSTS_N_INSNS (4), /* fp_mult_sf */
882 COSTS_N_INSNS (5), /* fp_mult_df */
883 COSTS_N_INSNS (12), /* fp_div_sf */
884 COSTS_N_INSNS (19), /* fp_div_df */
885 COSTS_N_INSNS (12), /* int_mult_si */
886 COSTS_N_INSNS (12), /* int_mult_di */
887 COSTS_N_INSNS (35), /* int_div_si */
888 COSTS_N_INSNS (35), /* int_div_di */
890 4 /* memory_latency */
895 COSTS_N_INSNS (6), /* int_mult_si */
896 COSTS_N_INSNS (6), /* int_mult_di */
897 COSTS_N_INSNS (36), /* int_div_si */
898 COSTS_N_INSNS (36), /* int_div_di */
900 4 /* memory_latency */
904 COSTS_N_INSNS (36), /* int_mult_si */
905 COSTS_N_INSNS (36), /* int_mult_di */
906 COSTS_N_INSNS (37), /* int_div_si */
907 COSTS_N_INSNS (37), /* int_div_di */
909 4 /* memory_latency */
913 COSTS_N_INSNS (4), /* int_mult_si */
914 COSTS_N_INSNS (11), /* int_mult_di */
915 COSTS_N_INSNS (36), /* int_div_si */
916 COSTS_N_INSNS (68), /* int_div_di */
918 4 /* memory_latency */
921 COSTS_N_INSNS (4), /* fp_add */
922 COSTS_N_INSNS (4), /* fp_mult_sf */
923 COSTS_N_INSNS (5), /* fp_mult_df */
924 COSTS_N_INSNS (17), /* fp_div_sf */
925 COSTS_N_INSNS (32), /* fp_div_df */
926 COSTS_N_INSNS (4), /* int_mult_si */
927 COSTS_N_INSNS (11), /* int_mult_di */
928 COSTS_N_INSNS (36), /* int_div_si */
929 COSTS_N_INSNS (68), /* int_div_di */
931 4 /* memory_latency */
934 COSTS_N_INSNS (4), /* fp_add */
935 COSTS_N_INSNS (4), /* fp_mult_sf */
936 COSTS_N_INSNS (5), /* fp_mult_df */
937 COSTS_N_INSNS (17), /* fp_div_sf */
938 COSTS_N_INSNS (32), /* fp_div_df */
939 COSTS_N_INSNS (4), /* int_mult_si */
940 COSTS_N_INSNS (7), /* int_mult_di */
941 COSTS_N_INSNS (42), /* int_div_si */
942 COSTS_N_INSNS (72), /* int_div_di */
944 4 /* memory_latency */
948 COSTS_N_INSNS (5), /* int_mult_si */
949 COSTS_N_INSNS (5), /* int_mult_di */
950 COSTS_N_INSNS (41), /* int_div_si */
951 COSTS_N_INSNS (41), /* int_div_di */
953 4 /* memory_latency */
956 COSTS_N_INSNS (8), /* fp_add */
957 COSTS_N_INSNS (8), /* fp_mult_sf */
958 COSTS_N_INSNS (10), /* fp_mult_df */
959 COSTS_N_INSNS (34), /* fp_div_sf */
960 COSTS_N_INSNS (64), /* fp_div_df */
961 COSTS_N_INSNS (5), /* int_mult_si */
962 COSTS_N_INSNS (5), /* int_mult_di */
963 COSTS_N_INSNS (41), /* int_div_si */
964 COSTS_N_INSNS (41), /* int_div_di */
966 4 /* memory_latency */
969 COSTS_N_INSNS (4), /* fp_add */
970 COSTS_N_INSNS (4), /* fp_mult_sf */
971 COSTS_N_INSNS (5), /* fp_mult_df */
972 COSTS_N_INSNS (17), /* fp_div_sf */
973 COSTS_N_INSNS (32), /* fp_div_df */
974 COSTS_N_INSNS (5), /* int_mult_si */
975 COSTS_N_INSNS (5), /* int_mult_di */
976 COSTS_N_INSNS (41), /* int_div_si */
977 COSTS_N_INSNS (41), /* int_div_di */
979 4 /* memory_latency */
983 COSTS_N_INSNS (5), /* int_mult_si */
984 COSTS_N_INSNS (5), /* int_mult_di */
985 COSTS_N_INSNS (41), /* int_div_si */
986 COSTS_N_INSNS (41), /* int_div_di */
988 4 /* memory_latency */
991 COSTS_N_INSNS (8), /* fp_add */
992 COSTS_N_INSNS (8), /* fp_mult_sf */
993 COSTS_N_INSNS (10), /* fp_mult_df */
994 COSTS_N_INSNS (34), /* fp_div_sf */
995 COSTS_N_INSNS (64), /* fp_div_df */
996 COSTS_N_INSNS (5), /* int_mult_si */
997 COSTS_N_INSNS (5), /* int_mult_di */
998 COSTS_N_INSNS (41), /* int_div_si */
999 COSTS_N_INSNS (41), /* int_div_di */
1000 1, /* branch_cost */
1001 4 /* memory_latency */
1004 COSTS_N_INSNS (4), /* fp_add */
1005 COSTS_N_INSNS (4), /* fp_mult_sf */
1006 COSTS_N_INSNS (5), /* fp_mult_df */
1007 COSTS_N_INSNS (17), /* fp_div_sf */
1008 COSTS_N_INSNS (32), /* fp_div_df */
1009 COSTS_N_INSNS (5), /* int_mult_si */
1010 COSTS_N_INSNS (5), /* int_mult_di */
1011 COSTS_N_INSNS (41), /* int_div_si */
1012 COSTS_N_INSNS (41), /* int_div_di */
1013 1, /* branch_cost */
1014 4 /* memory_latency */
1017 COSTS_N_INSNS (6), /* fp_add */
1018 COSTS_N_INSNS (6), /* fp_mult_sf */
1019 COSTS_N_INSNS (7), /* fp_mult_df */
1020 COSTS_N_INSNS (25), /* fp_div_sf */
1021 COSTS_N_INSNS (48), /* fp_div_df */
1022 COSTS_N_INSNS (5), /* int_mult_si */
1023 COSTS_N_INSNS (5), /* int_mult_di */
1024 COSTS_N_INSNS (41), /* int_div_si */
1025 COSTS_N_INSNS (41), /* int_div_di */
1026 1, /* branch_cost */
1027 4 /* memory_latency */
1033 COSTS_N_INSNS (2), /* fp_add */
1034 COSTS_N_INSNS (4), /* fp_mult_sf */
1035 COSTS_N_INSNS (5), /* fp_mult_df */
1036 COSTS_N_INSNS (12), /* fp_div_sf */
1037 COSTS_N_INSNS (19), /* fp_div_df */
1038 COSTS_N_INSNS (2), /* int_mult_si */
1039 COSTS_N_INSNS (2), /* int_mult_di */
1040 COSTS_N_INSNS (35), /* int_div_si */
1041 COSTS_N_INSNS (35), /* int_div_di */
1042 1, /* branch_cost */
1043 4 /* memory_latency */
1046 COSTS_N_INSNS (3), /* fp_add */
1047 COSTS_N_INSNS (5), /* fp_mult_sf */
1048 COSTS_N_INSNS (6), /* fp_mult_df */
1049 COSTS_N_INSNS (15), /* fp_div_sf */
1050 COSTS_N_INSNS (16), /* fp_div_df */
1051 COSTS_N_INSNS (17), /* int_mult_si */
1052 COSTS_N_INSNS (17), /* int_mult_di */
1053 COSTS_N_INSNS (38), /* int_div_si */
1054 COSTS_N_INSNS (38), /* int_div_di */
1055 2, /* branch_cost */
1056 6 /* memory_latency */
1059 COSTS_N_INSNS (6), /* fp_add */
1060 COSTS_N_INSNS (7), /* fp_mult_sf */
1061 COSTS_N_INSNS (8), /* fp_mult_df */
1062 COSTS_N_INSNS (23), /* fp_div_sf */
1063 COSTS_N_INSNS (36), /* fp_div_df */
1064 COSTS_N_INSNS (10), /* int_mult_si */
1065 COSTS_N_INSNS (10), /* int_mult_di */
1066 COSTS_N_INSNS (69), /* int_div_si */
1067 COSTS_N_INSNS (69), /* int_div_di */
1068 2, /* branch_cost */
1069 6 /* memory_latency */
1081 /* The only costs that appear to be updated here are
1082 integer multiplication. */
1084 COSTS_N_INSNS (4), /* int_mult_si */
1085 COSTS_N_INSNS (6), /* int_mult_di */
1086 COSTS_N_INSNS (69), /* int_div_si */
1087 COSTS_N_INSNS (69), /* int_div_di */
1088 1, /* branch_cost */
1089 4 /* memory_latency */
1101 COSTS_N_INSNS (6), /* fp_add */
1102 COSTS_N_INSNS (4), /* fp_mult_sf */
1103 COSTS_N_INSNS (5), /* fp_mult_df */
1104 COSTS_N_INSNS (23), /* fp_div_sf */
1105 COSTS_N_INSNS (36), /* fp_div_df */
1106 COSTS_N_INSNS (5), /* int_mult_si */
1107 COSTS_N_INSNS (5), /* int_mult_di */
1108 COSTS_N_INSNS (36), /* int_div_si */
1109 COSTS_N_INSNS (36), /* int_div_di */
1110 1, /* branch_cost */
1111 4 /* memory_latency */
1114 COSTS_N_INSNS (6), /* fp_add */
1115 COSTS_N_INSNS (5), /* fp_mult_sf */
1116 COSTS_N_INSNS (6), /* fp_mult_df */
1117 COSTS_N_INSNS (30), /* fp_div_sf */
1118 COSTS_N_INSNS (59), /* fp_div_df */
1119 COSTS_N_INSNS (3), /* int_mult_si */
1120 COSTS_N_INSNS (4), /* int_mult_di */
1121 COSTS_N_INSNS (42), /* int_div_si */
1122 COSTS_N_INSNS (74), /* int_div_di */
1123 1, /* branch_cost */
1124 4 /* memory_latency */
1127 COSTS_N_INSNS (6), /* fp_add */
1128 COSTS_N_INSNS (5), /* fp_mult_sf */
1129 COSTS_N_INSNS (6), /* fp_mult_df */
1130 COSTS_N_INSNS (30), /* fp_div_sf */
1131 COSTS_N_INSNS (59), /* fp_div_df */
1132 COSTS_N_INSNS (5), /* int_mult_si */
1133 COSTS_N_INSNS (9), /* int_mult_di */
1134 COSTS_N_INSNS (42), /* int_div_si */
1135 COSTS_N_INSNS (74), /* int_div_di */
1136 1, /* branch_cost */
1137 4 /* memory_latency */
1140 /* The only costs that are changed here are
1141 integer multiplication. */
1142 COSTS_N_INSNS (6), /* fp_add */
1143 COSTS_N_INSNS (7), /* fp_mult_sf */
1144 COSTS_N_INSNS (8), /* fp_mult_df */
1145 COSTS_N_INSNS (23), /* fp_div_sf */
1146 COSTS_N_INSNS (36), /* fp_div_df */
1147 COSTS_N_INSNS (5), /* int_mult_si */
1148 COSTS_N_INSNS (9), /* int_mult_di */
1149 COSTS_N_INSNS (69), /* int_div_si */
1150 COSTS_N_INSNS (69), /* int_div_di */
1151 1, /* branch_cost */
1152 4 /* memory_latency */
1158 /* The only costs that are changed here are
1159 integer multiplication. */
1160 COSTS_N_INSNS (6), /* fp_add */
1161 COSTS_N_INSNS (7), /* fp_mult_sf */
1162 COSTS_N_INSNS (8), /* fp_mult_df */
1163 COSTS_N_INSNS (23), /* fp_div_sf */
1164 COSTS_N_INSNS (36), /* fp_div_df */
1165 COSTS_N_INSNS (3), /* int_mult_si */
1166 COSTS_N_INSNS (8), /* int_mult_di */
1167 COSTS_N_INSNS (69), /* int_div_si */
1168 COSTS_N_INSNS (69), /* int_div_di */
1169 1, /* branch_cost */
1170 4 /* memory_latency */
1173 /* These costs are the same as the SB-1A below. */
1174 COSTS_N_INSNS (4), /* fp_add */
1175 COSTS_N_INSNS (4), /* fp_mult_sf */
1176 COSTS_N_INSNS (4), /* fp_mult_df */
1177 COSTS_N_INSNS (24), /* fp_div_sf */
1178 COSTS_N_INSNS (32), /* fp_div_df */
1179 COSTS_N_INSNS (3), /* int_mult_si */
1180 COSTS_N_INSNS (4), /* int_mult_di */
1181 COSTS_N_INSNS (36), /* int_div_si */
1182 COSTS_N_INSNS (68), /* int_div_di */
1183 1, /* branch_cost */
1184 4 /* memory_latency */
1187 /* These costs are the same as the SB-1 above. */
1188 COSTS_N_INSNS (4), /* fp_add */
1189 COSTS_N_INSNS (4), /* fp_mult_sf */
1190 COSTS_N_INSNS (4), /* fp_mult_df */
1191 COSTS_N_INSNS (24), /* fp_div_sf */
1192 COSTS_N_INSNS (32), /* fp_div_df */
1193 COSTS_N_INSNS (3), /* int_mult_si */
1194 COSTS_N_INSNS (4), /* int_mult_di */
1195 COSTS_N_INSNS (36), /* int_div_si */
1196 COSTS_N_INSNS (68), /* int_div_di */
1197 1, /* branch_cost */
1198 4 /* memory_latency */
1205 /* If a MIPS16e SAVE or RESTORE instruction saves or restores register
1206 mips16e_s2_s8_regs[X], it must also save the registers in indexes
1207 X + 1 onwards. Likewise mips16e_a0_a3_regs. */
1208 static const unsigned char mips16e_s2_s8_regs[] = {
1209 30, 23, 22, 21, 20, 19, 18
1211 static const unsigned char mips16e_a0_a3_regs[] = {
1215 /* A list of the registers that can be saved by the MIPS16e SAVE instruction,
1216 ordered from the uppermost in memory to the lowest in memory. */
1217 static const unsigned char mips16e_save_restore_regs[] = {
1218 31, 30, 23, 22, 21, 20, 19, 18, 17, 16, 7, 6, 5, 4
1221 /* Initialize the GCC target structure. */
1222 #undef TARGET_ASM_ALIGNED_HI_OP
1223 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
1224 #undef TARGET_ASM_ALIGNED_SI_OP
1225 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
1226 #undef TARGET_ASM_ALIGNED_DI_OP
1227 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
1229 #undef TARGET_ASM_FUNCTION_PROLOGUE
1230 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
1231 #undef TARGET_ASM_FUNCTION_EPILOGUE
1232 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
1233 #undef TARGET_ASM_SELECT_RTX_SECTION
1234 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
1235 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
1236 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
1238 #undef TARGET_SCHED_INIT
1239 #define TARGET_SCHED_INIT mips_sched_init
1240 #undef TARGET_SCHED_REORDER
1241 #define TARGET_SCHED_REORDER mips_sched_reorder
1242 #undef TARGET_SCHED_REORDER2
1243 #define TARGET_SCHED_REORDER2 mips_sched_reorder
1244 #undef TARGET_SCHED_VARIABLE_ISSUE
1245 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
1246 #undef TARGET_SCHED_ADJUST_COST
1247 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
1248 #undef TARGET_SCHED_ISSUE_RATE
1249 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
1250 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1251 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1252 mips_multipass_dfa_lookahead
1254 #undef TARGET_DEFAULT_TARGET_FLAGS
1255 #define TARGET_DEFAULT_TARGET_FLAGS \
1257 | TARGET_CPU_DEFAULT \
1258 | TARGET_ENDIAN_DEFAULT \
1259 | TARGET_FP_EXCEPTIONS_DEFAULT \
1260 | MASK_CHECK_ZERO_DIV \
1262 #undef TARGET_HANDLE_OPTION
1263 #define TARGET_HANDLE_OPTION mips_handle_option
1265 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1266 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
1268 #undef TARGET_SET_CURRENT_FUNCTION
1269 #define TARGET_SET_CURRENT_FUNCTION mips_set_current_function
1271 #undef TARGET_VALID_POINTER_MODE
1272 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
1273 #undef TARGET_RTX_COSTS
1274 #define TARGET_RTX_COSTS mips_rtx_costs
1275 #undef TARGET_ADDRESS_COST
1276 #define TARGET_ADDRESS_COST mips_address_cost
1278 #undef TARGET_IN_SMALL_DATA_P
1279 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
1281 #undef TARGET_MACHINE_DEPENDENT_REORG
1282 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
1284 #undef TARGET_ASM_FILE_START
1285 #define TARGET_ASM_FILE_START mips_file_start
1286 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
1287 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
1289 #undef TARGET_INIT_LIBFUNCS
1290 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
1292 #undef TARGET_BUILD_BUILTIN_VA_LIST
1293 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
1294 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1295 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
1297 #undef TARGET_PROMOTE_FUNCTION_ARGS
1298 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
1299 #undef TARGET_PROMOTE_FUNCTION_RETURN
1300 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
1301 #undef TARGET_PROMOTE_PROTOTYPES
1302 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
1304 #undef TARGET_RETURN_IN_MEMORY
1305 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
1306 #undef TARGET_RETURN_IN_MSB
1307 #define TARGET_RETURN_IN_MSB mips_return_in_msb
1309 #undef TARGET_ASM_OUTPUT_MI_THUNK
1310 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
1311 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1312 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1314 #undef TARGET_SETUP_INCOMING_VARARGS
1315 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
1316 #undef TARGET_STRICT_ARGUMENT_NAMING
1317 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
1318 #undef TARGET_MUST_PASS_IN_STACK
1319 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
1320 #undef TARGET_PASS_BY_REFERENCE
1321 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
1322 #undef TARGET_CALLEE_COPIES
1323 #define TARGET_CALLEE_COPIES mips_callee_copies
1324 #undef TARGET_ARG_PARTIAL_BYTES
1325 #define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
1327 #undef TARGET_MODE_REP_EXTENDED
1328 #define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
1330 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1331 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
1333 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1334 #define TARGET_SCALAR_MODE_SUPPORTED_P mips_scalar_mode_supported_p
1336 #undef TARGET_INIT_BUILTINS
1337 #define TARGET_INIT_BUILTINS mips_init_builtins
1338 #undef TARGET_EXPAND_BUILTIN
1339 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
1341 #undef TARGET_HAVE_TLS
1342 #define TARGET_HAVE_TLS HAVE_AS_TLS
1344 #undef TARGET_CANNOT_FORCE_CONST_MEM
1345 #define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
1347 #undef TARGET_ENCODE_SECTION_INFO
1348 #define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
1350 #undef TARGET_ATTRIBUTE_TABLE
1351 #define TARGET_ATTRIBUTE_TABLE mips_attribute_table
1353 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1354 #define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
1356 #undef TARGET_MIN_ANCHOR_OFFSET
1357 #define TARGET_MIN_ANCHOR_OFFSET -32768
1358 #undef TARGET_MAX_ANCHOR_OFFSET
1359 #define TARGET_MAX_ANCHOR_OFFSET 32767
1360 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1361 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
1362 #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
1363 #define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
1365 #undef TARGET_COMP_TYPE_ATTRIBUTES
1366 #define TARGET_COMP_TYPE_ATTRIBUTES mips_comp_type_attributes
1368 #ifdef HAVE_AS_DTPRELWORD
1369 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1370 #define TARGET_ASM_OUTPUT_DWARF_DTPREL mips_output_dwarf_dtprel
1373 struct gcc_target targetm = TARGET_INITIALIZER;
1376 /* Predicates to test for presence of "near" and "far"/"long_call"
1377 attributes on the given TYPE. */
1380 mips_near_type_p (const_tree type)
1382 return lookup_attribute ("near", TYPE_ATTRIBUTES (type)) != NULL;
1386 mips_far_type_p (const_tree type)
1388 return (lookup_attribute ("long_call", TYPE_ATTRIBUTES (type)) != NULL
1389 || lookup_attribute ("far", TYPE_ATTRIBUTES (type)) != NULL);
1392 /* Similar predicates for "mips16"/"nomips16" attributes. */
1395 mips_mips16_type_p (const_tree type)
1397 return lookup_attribute ("mips16", TYPE_ATTRIBUTES (type)) != NULL;
1401 mips_nomips16_type_p (const_tree type)
1403 return lookup_attribute ("nomips16", TYPE_ATTRIBUTES (type)) != NULL;
1406 /* Return 0 if the attributes for two types are incompatible, 1 if they
1407 are compatible, and 2 if they are nearly compatible (which causes a
1408 warning to be generated). */
1411 mips_comp_type_attributes (const_tree type1, const_tree type2)
1413 /* Check for mismatch of non-default calling convention. */
1414 if (TREE_CODE (type1) != FUNCTION_TYPE)
1417 /* Disallow mixed near/far attributes. */
1418 if (mips_far_type_p (type1) && mips_near_type_p (type2))
1420 if (mips_near_type_p (type1) && mips_far_type_p (type2))
1423 /* Mips16/nomips16 attributes must match exactly. */
1424 if (mips_nomips16_type_p (type1) != mips_nomips16_type_p (type2)
1425 || mips_mips16_type_p (type1) != mips_mips16_type_p (type2))
1431 /* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR
1432 and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */
1435 mips_split_plus (rtx x, rtx *base_ptr, HOST_WIDE_INT *offset_ptr)
1437 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
1439 *base_ptr = XEXP (x, 0);
1440 *offset_ptr = INTVAL (XEXP (x, 1));
1449 /* Return true if SYMBOL_REF X is associated with a global symbol
1450 (in the STB_GLOBAL sense). */
1453 mips_global_symbol_p (const_rtx x)
1455 const_tree const decl = SYMBOL_REF_DECL (x);
1458 return !SYMBOL_REF_LOCAL_P (x);
1460 /* Weakref symbols are not TREE_PUBLIC, but their targets are global
1461 or weak symbols. Relocations in the object file will be against
1462 the target symbol, so it's that symbol's binding that matters here. */
1463 return DECL_P (decl) && (TREE_PUBLIC (decl) || DECL_WEAK (decl));
1466 /* Return true if SYMBOL_REF X binds locally. */
1469 mips_symbol_binds_local_p (const_rtx x)
1471 return (SYMBOL_REF_DECL (x)
1472 ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
1473 : SYMBOL_REF_LOCAL_P (x));
1476 /* Return true if rtx constants of mode MODE should be put into a small
1480 mips_rtx_constant_in_small_data_p (enum machine_mode mode)
1482 return (!TARGET_EMBEDDED_DATA
1483 && TARGET_LOCAL_SDATA
1484 && GET_MODE_SIZE (mode) <= mips_section_threshold);
1487 /* Return the method that should be used to access SYMBOL_REF or
1488 LABEL_REF X in context CONTEXT. */
1490 static enum mips_symbol_type
1491 mips_classify_symbol (const_rtx x, enum mips_symbol_context context)
1494 return SYMBOL_GOT_DISP;
1496 if (GET_CODE (x) == LABEL_REF)
1498 /* LABEL_REFs are used for jump tables as well as text labels.
1499 Only return SYMBOL_PC_RELATIVE if we know the label is in
1500 the text section. */
1501 if (TARGET_MIPS16_SHORT_JUMP_TABLES)
1502 return SYMBOL_PC_RELATIVE;
1503 if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
1504 return SYMBOL_GOT_PAGE_OFST;
1505 return SYMBOL_ABSOLUTE;
1508 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1510 if (SYMBOL_REF_TLS_MODEL (x))
1513 if (CONSTANT_POOL_ADDRESS_P (x))
1515 if (TARGET_MIPS16_TEXT_LOADS)
1516 return SYMBOL_PC_RELATIVE;
1518 if (TARGET_MIPS16_PCREL_LOADS && context == SYMBOL_CONTEXT_MEM)
1519 return SYMBOL_PC_RELATIVE;
1521 if (mips_rtx_constant_in_small_data_p (get_pool_mode (x)))
1522 return SYMBOL_GP_RELATIVE;
1525 /* Do not use small-data accesses for weak symbols; they may end up
1528 && SYMBOL_REF_SMALL_P (x)
1529 && !SYMBOL_REF_WEAK (x))
1530 return SYMBOL_GP_RELATIVE;
1532 /* Don't use GOT accesses for locally-binding symbols when -mno-shared
1535 && !(TARGET_ABSOLUTE_ABICALLS && mips_symbol_binds_local_p (x)))
1537 /* There are three cases to consider:
1539 - o32 PIC (either with or without explicit relocs)
1540 - n32/n64 PIC without explicit relocs
1541 - n32/n64 PIC with explicit relocs
1543 In the first case, both local and global accesses will use an
1544 R_MIPS_GOT16 relocation. We must correctly predict which of
1545 the two semantics (local or global) the assembler and linker
1546 will apply. The choice depends on the symbol's binding rather
1547 than its visibility.
1549 In the second case, the assembler will not use R_MIPS_GOT16
1550 relocations, but it chooses between local and global accesses
1551 in the same way as for o32 PIC.
1553 In the third case we have more freedom since both forms of
1554 access will work for any kind of symbol. However, there seems
1555 little point in doing things differently. */
1556 if (mips_global_symbol_p (x))
1557 return SYMBOL_GOT_DISP;
1559 return SYMBOL_GOT_PAGE_OFST;
1562 if (TARGET_MIPS16_PCREL_LOADS && context != SYMBOL_CONTEXT_CALL)
1563 return SYMBOL_FORCE_TO_MEM;
1564 return SYMBOL_ABSOLUTE;
1567 /* Classify symbolic expression X, given that it appears in context
1570 static enum mips_symbol_type
1571 mips_classify_symbolic_expression (rtx x, enum mips_symbol_context context)
1575 split_const (x, &x, &offset);
1576 if (UNSPEC_ADDRESS_P (x))
1577 return UNSPEC_ADDRESS_TYPE (x);
1579 return mips_classify_symbol (x, context);
1582 /* Return true if OFFSET is within the range [0, ALIGN), where ALIGN
1583 is the alignment (in bytes) of SYMBOL_REF X. */
1586 mips_offset_within_alignment_p (rtx x, HOST_WIDE_INT offset)
1588 /* If for some reason we can't get the alignment for the
1589 symbol, initializing this to one means we will only accept
1591 HOST_WIDE_INT align = 1;
1594 /* Get the alignment of the symbol we're referring to. */
1595 t = SYMBOL_REF_DECL (x);
1597 align = DECL_ALIGN_UNIT (t);
1599 return offset >= 0 && offset < align;
1602 /* Return true if X is a symbolic constant that can be used in context
1603 CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */
1606 mips_symbolic_constant_p (rtx x, enum mips_symbol_context context,
1607 enum mips_symbol_type *symbol_type)
1611 split_const (x, &x, &offset);
1612 if (UNSPEC_ADDRESS_P (x))
1614 *symbol_type = UNSPEC_ADDRESS_TYPE (x);
1615 x = UNSPEC_ADDRESS (x);
1617 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1619 *symbol_type = mips_classify_symbol (x, context);
1620 if (*symbol_type == SYMBOL_TLS)
1626 if (offset == const0_rtx)
1629 /* Check whether a nonzero offset is valid for the underlying
1631 switch (*symbol_type)
1633 case SYMBOL_ABSOLUTE:
1634 case SYMBOL_FORCE_TO_MEM:
1635 case SYMBOL_32_HIGH:
1636 case SYMBOL_64_HIGH:
1639 /* If the target has 64-bit pointers and the object file only
1640 supports 32-bit symbols, the values of those symbols will be
1641 sign-extended. In this case we can't allow an arbitrary offset
1642 in case the 32-bit value X + OFFSET has a different sign from X. */
1643 if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
1644 return offset_within_block_p (x, INTVAL (offset));
1646 /* In other cases the relocations can handle any offset. */
1649 case SYMBOL_PC_RELATIVE:
1650 /* Allow constant pool references to be converted to LABEL+CONSTANT.
1651 In this case, we no longer have access to the underlying constant,
1652 but the original symbol-based access was known to be valid. */
1653 if (GET_CODE (x) == LABEL_REF)
1658 case SYMBOL_GP_RELATIVE:
1659 /* Make sure that the offset refers to something within the
1660 same object block. This should guarantee that the final
1661 PC- or GP-relative offset is within the 16-bit limit. */
1662 return offset_within_block_p (x, INTVAL (offset));
1664 case SYMBOL_GOT_PAGE_OFST:
1665 case SYMBOL_GOTOFF_PAGE:
1666 /* If the symbol is global, the GOT entry will contain the symbol's
1667 address, and we will apply a 16-bit offset after loading it.
1668 If the symbol is local, the linker should provide enough local
1669 GOT entries for a 16-bit offset, but larger offsets may lead
1671 return SMALL_INT (offset);
1675 /* There is no carry between the HI and LO REL relocations, so the
1676 offset is only valid if we know it won't lead to such a carry. */
1677 return mips_offset_within_alignment_p (x, INTVAL (offset));
1679 case SYMBOL_GOT_DISP:
1680 case SYMBOL_GOTOFF_DISP:
1681 case SYMBOL_GOTOFF_CALL:
1682 case SYMBOL_GOTOFF_LOADGP:
1685 case SYMBOL_GOTTPREL:
1694 /* This function is used to implement REG_MODE_OK_FOR_BASE_P. */
1697 mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode, int strict)
1699 if (!HARD_REGISTER_NUM_P (regno))
1703 regno = reg_renumber[regno];
1706 /* These fake registers will be eliminated to either the stack or
1707 hard frame pointer, both of which are usually valid base registers.
1708 Reload deals with the cases where the eliminated form isn't valid. */
1709 if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
1712 /* In mips16 mode, the stack pointer can only address word and doubleword
1713 values, nothing smaller. There are two problems here:
1715 (a) Instantiating virtual registers can introduce new uses of the
1716 stack pointer. If these virtual registers are valid addresses,
1717 the stack pointer should be too.
1719 (b) Most uses of the stack pointer are not made explicit until
1720 FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
1721 We don't know until that stage whether we'll be eliminating to the
1722 stack pointer (which needs the restriction) or the hard frame
1723 pointer (which doesn't).
1725 All in all, it seems more consistent to only enforce this restriction
1726 during and after reload. */
1727 if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
1728 return !strict || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1730 return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
1734 /* Return true if X is a valid base register for the given mode.
1735 Allow only hard registers if STRICT. */
1738 mips_valid_base_register_p (rtx x, enum machine_mode mode, int strict)
1740 if (!strict && GET_CODE (x) == SUBREG)
1744 && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict));
1748 /* Return true if X is a valid address for machine mode MODE. If it is,
1749 fill in INFO appropriately. STRICT is true if we should only accept
1750 hard base registers. */
1753 mips_classify_address (struct mips_address_info *info, rtx x,
1754 enum machine_mode mode, int strict)
1756 switch (GET_CODE (x))
1760 info->type = ADDRESS_REG;
1762 info->offset = const0_rtx;
1763 return mips_valid_base_register_p (info->reg, mode, strict);
1766 info->type = ADDRESS_REG;
1767 info->reg = XEXP (x, 0);
1768 info->offset = XEXP (x, 1);
1769 return (mips_valid_base_register_p (info->reg, mode, strict)
1770 && const_arith_operand (info->offset, VOIDmode));
1773 info->type = ADDRESS_LO_SUM;
1774 info->reg = XEXP (x, 0);
1775 info->offset = XEXP (x, 1);
1776 /* We have to trust the creator of the LO_SUM to do something vaguely
1777 sane. Target-independent code that creates a LO_SUM should also
1778 create and verify the matching HIGH. Target-independent code that
1779 adds an offset to a LO_SUM must prove that the offset will not
1780 induce a carry. Failure to do either of these things would be
1781 a bug, and we are not required to check for it here. The MIPS
1782 backend itself should only create LO_SUMs for valid symbolic
1783 constants, with the high part being either a HIGH or a copy
1786 = mips_classify_symbolic_expression (info->offset, SYMBOL_CONTEXT_MEM);
1787 return (mips_valid_base_register_p (info->reg, mode, strict)
1788 && mips_symbol_insns (info->symbol_type, mode) > 0
1789 && mips_lo_relocs[info->symbol_type] != 0);
1792 /* Small-integer addresses don't occur very often, but they
1793 are legitimate if $0 is a valid base register. */
1794 info->type = ADDRESS_CONST_INT;
1795 return !TARGET_MIPS16 && SMALL_INT (x);
1800 info->type = ADDRESS_SYMBOLIC;
1801 return (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_MEM,
1803 && mips_symbol_insns (info->symbol_type, mode) > 0
1804 && !mips_split_p[info->symbol_type]);
1811 /* Return true if X is a thread-local symbol. */
1814 mips_tls_operand_p (rtx x)
1816 return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
1819 /* Return true if X can not be forced into a constant pool. */
1822 mips_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1824 return mips_tls_operand_p (*x);
1827 /* Return true if X can not be forced into a constant pool. */
1830 mips_cannot_force_const_mem (rtx x)
1836 /* As an optimization, reject constants that mips_legitimize_move
1839 Suppose we have a multi-instruction sequence that loads constant C
1840 into register R. If R does not get allocated a hard register, and
1841 R is used in an operand that allows both registers and memory
1842 references, reload will consider forcing C into memory and using
1843 one of the instruction's memory alternatives. Returning false
1844 here will force it to use an input reload instead. */
1845 if (GET_CODE (x) == CONST_INT)
1848 split_const (x, &base, &offset);
1849 if (symbolic_operand (base, VOIDmode) && SMALL_INT (offset))
1853 if (TARGET_HAVE_TLS && for_each_rtx (&x, &mips_tls_symbol_ref_1, 0))
1859 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. We can't use blocks for
1860 constants when we're using a per-function constant pool. */
1863 mips_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
1864 const_rtx x ATTRIBUTE_UNUSED)
1866 return !TARGET_MIPS16_PCREL_LOADS;
1869 /* Like mips_symbol_insns, but treat extended MIPS16 instructions as a
1870 single instruction. We rely on the fact that, in the worst case,
1871 all instructions involved in a MIPS16 address calculation are usually
1875 mips_symbol_insns_1 (enum mips_symbol_type type, enum machine_mode mode)
1879 case SYMBOL_ABSOLUTE:
1880 /* When using 64-bit symbols, we need 5 preparatory instructions,
1883 lui $at,%highest(symbol)
1884 daddiu $at,$at,%higher(symbol)
1886 daddiu $at,$at,%hi(symbol)
1889 The final address is then $at + %lo(symbol). With 32-bit
1890 symbols we just need a preparatory lui for normal mode and
1891 a preparatory "li; sll" for MIPS16. */
1892 return ABI_HAS_64BIT_SYMBOLS ? 6 : TARGET_MIPS16 ? 3 : 2;
1894 case SYMBOL_GP_RELATIVE:
1895 /* Treat GP-relative accesses as taking a single instruction on
1896 MIPS16 too; the copy of $gp can often be shared. */
1899 case SYMBOL_PC_RELATIVE:
1900 /* PC-relative constants can be only be used with addiupc,
1902 if (mode == MAX_MACHINE_MODE
1903 || GET_MODE_SIZE (mode) == 4
1904 || GET_MODE_SIZE (mode) == 8)
1907 /* The constant must be loaded using addiupc first. */
1910 case SYMBOL_FORCE_TO_MEM:
1911 /* LEAs will be converted into constant-pool references by
1913 if (mode == MAX_MACHINE_MODE)
1916 /* The constant must be loaded from the constant pool. */
1919 case SYMBOL_GOT_DISP:
1920 /* The constant will have to be loaded from the GOT before it
1921 is used in an address. */
1922 if (mode != MAX_MACHINE_MODE)
1927 case SYMBOL_GOT_PAGE_OFST:
1928 /* Unless -funit-at-a-time is in effect, we can't be sure whether
1929 the local/global classification is accurate. See override_options
1932 The worst cases are:
1934 (1) For local symbols when generating o32 or o64 code. The assembler
1940 ...and the final address will be $at + %lo(symbol).
1942 (2) For global symbols when -mxgot. The assembler will use:
1944 lui $at,%got_hi(symbol)
1947 ...and the final address will be $at + %got_lo(symbol). */
1950 case SYMBOL_GOTOFF_PAGE:
1951 case SYMBOL_GOTOFF_DISP:
1952 case SYMBOL_GOTOFF_CALL:
1953 case SYMBOL_GOTOFF_LOADGP:
1954 case SYMBOL_32_HIGH:
1955 case SYMBOL_64_HIGH:
1961 case SYMBOL_GOTTPREL:
1964 /* A 16-bit constant formed by a single relocation, or a 32-bit
1965 constant formed from a high 16-bit relocation and a low 16-bit
1966 relocation. Use mips_split_p to determine which. */
1967 return !mips_split_p[type] ? 1 : TARGET_MIPS16 ? 3 : 2;
1970 /* We don't treat a bare TLS symbol as a constant. */
1976 /* If MODE is MAX_MACHINE_MODE, return the number of instructions needed
1977 to load symbols of type TYPE into a register. Return 0 if the given
1978 type of symbol cannot be used as an immediate operand.
1980 Otherwise, return the number of instructions needed to load or store
1981 values of mode MODE to or from addresses of type TYPE. Return 0 if
1982 the given type of symbol is not valid in addresses.
1984 In both cases, treat extended MIPS16 instructions as two instructions. */
1987 mips_symbol_insns (enum mips_symbol_type type, enum machine_mode mode)
1989 return mips_symbol_insns_1 (type, mode) * (TARGET_MIPS16 ? 2 : 1);
1992 /* Return true if X is a legitimate $sp-based address for mode MDOE. */
1995 mips_stack_address_p (rtx x, enum machine_mode mode)
1997 struct mips_address_info addr;
1999 return (mips_classify_address (&addr, x, mode, false)
2000 && addr.type == ADDRESS_REG
2001 && addr.reg == stack_pointer_rtx);
2004 /* Return true if a value at OFFSET bytes from BASE can be accessed
2005 using an unextended mips16 instruction. MODE is the mode of the
2008 Usually the offset in an unextended instruction is a 5-bit field.
2009 The offset is unsigned and shifted left once for HIs, twice
2010 for SIs, and so on. An exception is SImode accesses off the
2011 stack pointer, which have an 8-bit immediate field. */
2014 mips16_unextended_reference_p (enum machine_mode mode, rtx base, rtx offset)
2017 && GET_CODE (offset) == CONST_INT
2018 && INTVAL (offset) >= 0
2019 && (INTVAL (offset) & (GET_MODE_SIZE (mode) - 1)) == 0)
2021 if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
2022 return INTVAL (offset) < 256 * GET_MODE_SIZE (mode);
2023 return INTVAL (offset) < 32 * GET_MODE_SIZE (mode);
2029 /* Return the number of instructions needed to load or store a value
2030 of mode MODE at X. Return 0 if X isn't valid for MODE. Assume that
2031 multiword moves may need to be split into word moves if MIGHT_SPLIT_P,
2032 otherwise assume that a single load or store is enough.
2034 For mips16 code, count extended instructions as two instructions. */
2037 mips_address_insns (rtx x, enum machine_mode mode, bool might_split_p)
2039 struct mips_address_info addr;
2042 /* BLKmode is used for single unaligned loads and stores and should
2043 not count as a multiword mode. (GET_MODE_SIZE (BLKmode) is pretty
2044 meaningless, so we have to single it out as a special case one way
2046 if (mode != BLKmode && might_split_p)
2047 factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2051 if (mips_classify_address (&addr, x, mode, false))
2056 && !mips16_unextended_reference_p (mode, addr.reg, addr.offset))
2060 case ADDRESS_LO_SUM:
2061 return (TARGET_MIPS16 ? factor * 2 : factor);
2063 case ADDRESS_CONST_INT:
2066 case ADDRESS_SYMBOLIC:
2067 return factor * mips_symbol_insns (addr.symbol_type, mode);
2073 /* Likewise for constant X. */
2076 mips_const_insns (rtx x)
2078 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2079 enum mips_symbol_type symbol_type;
2082 switch (GET_CODE (x))
2085 if (!mips_symbolic_constant_p (XEXP (x, 0), SYMBOL_CONTEXT_LEA,
2087 || !mips_split_p[symbol_type])
2090 /* This is simply an lui for normal mode. It is an extended
2091 "li" followed by an extended "sll" for MIPS16. */
2092 return TARGET_MIPS16 ? 4 : 1;
2096 /* Unsigned 8-bit constants can be loaded using an unextended
2097 LI instruction. Unsigned 16-bit constants can be loaded
2098 using an extended LI. Negative constants must be loaded
2099 using LI and then negated. */
2100 return (INTVAL (x) >= 0 && INTVAL (x) < 256 ? 1
2101 : SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
2102 : INTVAL (x) > -256 && INTVAL (x) < 0 ? 2
2103 : SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
2106 return mips_build_integer (codes, INTVAL (x));
2110 return (!TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0);
2116 /* See if we can refer to X directly. */
2117 if (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_LEA, &symbol_type))
2118 return mips_symbol_insns (symbol_type, MAX_MACHINE_MODE);
2120 /* Otherwise try splitting the constant into a base and offset.
2121 16-bit offsets can be added using an extra addiu. Larger offsets
2122 must be calculated separately and then added to the base. */
2123 split_const (x, &x, &offset);
2126 int n = mips_const_insns (x);
2129 if (SMALL_INT (offset))
2132 return n + 1 + mips_build_integer (codes, INTVAL (offset));
2139 return mips_symbol_insns (mips_classify_symbol (x, SYMBOL_CONTEXT_LEA),
2148 /* Return the number of instructions needed to implement INSN,
2149 given that it loads from or stores to MEM. Count extended
2150 mips16 instructions as two instructions. */
2153 mips_load_store_insns (rtx mem, rtx insn)
2155 enum machine_mode mode;
2159 gcc_assert (MEM_P (mem));
2160 mode = GET_MODE (mem);
2162 /* Try to prove that INSN does not need to be split. */
2163 might_split_p = true;
2164 if (GET_MODE_BITSIZE (mode) == 64)
2166 set = single_set (insn);
2167 if (set && !mips_split_64bit_move_p (SET_DEST (set), SET_SRC (set)))
2168 might_split_p = false;
2171 return mips_address_insns (XEXP (mem, 0), mode, might_split_p);
2175 /* Return the number of instructions needed for an integer division. */
2178 mips_idiv_insns (void)
2183 if (TARGET_CHECK_ZERO_DIV)
2185 if (GENERATE_DIVIDE_TRAPS)
2191 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
2196 /* This function is used to implement GO_IF_LEGITIMATE_ADDRESS. It
2197 returns a nonzero value if X is a legitimate address for a memory
2198 operand of the indicated MODE. STRICT is nonzero if this function
2199 is called during reload. */
2202 mips_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
2204 struct mips_address_info addr;
2206 return mips_classify_address (&addr, x, mode, strict);
2209 /* Emit a move from SRC to DEST. Assume that the move expanders can
2210 handle all moves if !can_create_pseudo_p (). The distinction is
2211 important because, unlike emit_move_insn, the move expanders know
2212 how to force Pmode objects into the constant pool even when the
2213 constant pool address is not itself legitimate. */
2216 mips_emit_move (rtx dest, rtx src)
2218 return (can_create_pseudo_p ()
2219 ? emit_move_insn (dest, src)
2220 : emit_move_insn_1 (dest, src));
2223 /* Copy VALUE to a register and return that register. If new psuedos
2224 are allowed, copy it into a new register, otherwise use DEST. */
2227 mips_force_temporary (rtx dest, rtx value)
2229 if (can_create_pseudo_p ())
2230 return force_reg (Pmode, value);
2233 mips_emit_move (copy_rtx (dest), value);
2239 /* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
2240 it appears in a MEM of that mode. Return true if ADDR is a legitimate
2241 constant in that context and can be split into a high part and a LO_SUM.
2242 If so, and if LO_SUM_OUT is nonnull, emit the high part and return
2243 the LO_SUM in *LO_SUM_OUT. Leave *LO_SUM_OUT unchanged otherwise.
2245 TEMP is as for mips_force_temporary and is used to load the high
2246 part into a register. */
2249 mips_split_symbol (rtx temp, rtx addr, enum machine_mode mode, rtx *lo_sum_out)
2251 enum mips_symbol_context context;
2252 enum mips_symbol_type symbol_type;
2255 context = (mode == MAX_MACHINE_MODE
2256 ? SYMBOL_CONTEXT_LEA
2257 : SYMBOL_CONTEXT_MEM);
2258 if (!mips_symbolic_constant_p (addr, context, &symbol_type)
2259 || mips_symbol_insns (symbol_type, mode) == 0
2260 || !mips_split_p[symbol_type])
2265 if (symbol_type == SYMBOL_GP_RELATIVE)
2267 if (!can_create_pseudo_p ())
2269 emit_insn (gen_load_const_gp (copy_rtx (temp)));
2273 high = mips16_gp_pseudo_reg ();
2277 high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
2278 high = mips_force_temporary (temp, high);
2280 *lo_sum_out = gen_rtx_LO_SUM (Pmode, high, addr);
2286 /* Wrap symbol or label BASE in an unspec address of type SYMBOL_TYPE
2287 and add CONST_INT OFFSET to the result. */
2290 mips_unspec_address_offset (rtx base, rtx offset,
2291 enum mips_symbol_type symbol_type)
2293 base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
2294 UNSPEC_ADDRESS_FIRST + symbol_type);
2295 if (offset != const0_rtx)
2296 base = gen_rtx_PLUS (Pmode, base, offset);
2297 return gen_rtx_CONST (Pmode, base);
2300 /* Return an UNSPEC address with underlying address ADDRESS and symbol
2301 type SYMBOL_TYPE. */
2304 mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
2308 split_const (address, &base, &offset);
2309 return mips_unspec_address_offset (base, offset, symbol_type);
2313 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
2314 high part to BASE and return the result. Just return BASE otherwise.
2315 TEMP is available as a temporary register if needed.
2317 The returned expression can be used as the first operand to a LO_SUM. */
2320 mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
2321 enum mips_symbol_type symbol_type)
2323 if (mips_split_p[symbol_type])
2325 addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
2326 addr = mips_force_temporary (temp, addr);
2327 return mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
2333 /* Return a legitimate address for REG + OFFSET. TEMP is as for
2334 mips_force_temporary; it is only needed when OFFSET is not a
2338 mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
2340 if (!SMALL_OPERAND (offset))
2345 /* Load the full offset into a register so that we can use
2346 an unextended instruction for the address itself. */
2347 high = GEN_INT (offset);
2352 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. */
2353 high = GEN_INT (CONST_HIGH_PART (offset));
2354 offset = CONST_LOW_PART (offset);
2356 high = mips_force_temporary (temp, high);
2357 reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
2359 return plus_constant (reg, offset);
2362 /* Emit a call to __tls_get_addr. SYM is the TLS symbol we are
2363 referencing, and TYPE is the symbol type to use (either global
2364 dynamic or local dynamic). V0 is an RTX for the return value
2365 location. The entire insn sequence is returned. */
2367 static GTY(()) rtx mips_tls_symbol;
2370 mips_call_tls_get_addr (rtx sym, enum mips_symbol_type type, rtx v0)
2372 rtx insn, loc, tga, a0;
2374 a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
2376 if (!mips_tls_symbol)
2377 mips_tls_symbol = init_one_libfunc ("__tls_get_addr");
2379 loc = mips_unspec_address (sym, type);
2383 emit_insn (gen_rtx_SET (Pmode, a0,
2384 gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, loc)));
2385 tga = gen_rtx_MEM (Pmode, mips_tls_symbol);
2386 insn = emit_call_insn (gen_call_value (v0, tga, const0_rtx, const0_rtx));
2387 CONST_OR_PURE_CALL_P (insn) = 1;
2388 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), v0);
2389 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
2390 insn = get_insns ();
2397 /* Generate the code to access LOC, a thread local SYMBOL_REF. The
2398 return value will be a valid address and move_operand (either a REG
2402 mips_legitimize_tls_address (rtx loc)
2404 rtx dest, insn, v0, v1, tmp1, tmp2, eqv;
2405 enum tls_model model;
2409 sorry ("MIPS16 TLS");
2410 return gen_reg_rtx (Pmode);
2413 v0 = gen_rtx_REG (Pmode, GP_RETURN);
2414 v1 = gen_rtx_REG (Pmode, GP_RETURN + 1);
2416 model = SYMBOL_REF_TLS_MODEL (loc);
2417 /* Only TARGET_ABICALLS code can have more than one module; other
2418 code must be be static and should not use a GOT. All TLS models
2419 reduce to local exec in this situation. */
2420 if (!TARGET_ABICALLS)
2421 model = TLS_MODEL_LOCAL_EXEC;
2425 case TLS_MODEL_GLOBAL_DYNAMIC:
2426 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSGD, v0);
2427 dest = gen_reg_rtx (Pmode);
2428 emit_libcall_block (insn, dest, v0, loc);
2431 case TLS_MODEL_LOCAL_DYNAMIC:
2432 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSLDM, v0);
2433 tmp1 = gen_reg_rtx (Pmode);
2435 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2436 share the LDM result with other LD model accesses. */
2437 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2439 emit_libcall_block (insn, tmp1, v0, eqv);
2441 tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_DTPREL);
2442 dest = gen_rtx_LO_SUM (Pmode, tmp2,
2443 mips_unspec_address (loc, SYMBOL_DTPREL));
2446 case TLS_MODEL_INITIAL_EXEC:
2447 tmp1 = gen_reg_rtx (Pmode);
2448 tmp2 = mips_unspec_address (loc, SYMBOL_GOTTPREL);
2449 if (Pmode == DImode)
2451 emit_insn (gen_tls_get_tp_di (v1));
2452 emit_insn (gen_load_gotdi (tmp1, pic_offset_table_rtx, tmp2));
2456 emit_insn (gen_tls_get_tp_si (v1));
2457 emit_insn (gen_load_gotsi (tmp1, pic_offset_table_rtx, tmp2));
2459 dest = gen_reg_rtx (Pmode);
2460 emit_insn (gen_add3_insn (dest, tmp1, v1));
2463 case TLS_MODEL_LOCAL_EXEC:
2464 if (Pmode == DImode)
2465 emit_insn (gen_tls_get_tp_di (v1));
2467 emit_insn (gen_tls_get_tp_si (v1));
2469 tmp1 = mips_unspec_offset_high (NULL, v1, loc, SYMBOL_TPREL);
2470 dest = gen_rtx_LO_SUM (Pmode, tmp1,
2471 mips_unspec_address (loc, SYMBOL_TPREL));
2481 /* This function is used to implement LEGITIMIZE_ADDRESS. If *XLOC can
2482 be legitimized in a way that the generic machinery might not expect,
2483 put the new address in *XLOC and return true. MODE is the mode of
2484 the memory being accessed. */
2487 mips_legitimize_address (rtx *xloc, enum machine_mode mode)
2489 if (mips_tls_operand_p (*xloc))
2491 *xloc = mips_legitimize_tls_address (*xloc);
2495 /* See if the address can split into a high part and a LO_SUM. */
2496 if (mips_split_symbol (NULL, *xloc, mode, xloc))
2499 if (GET_CODE (*xloc) == PLUS && GET_CODE (XEXP (*xloc, 1)) == CONST_INT)
2501 /* Handle REG + CONSTANT using mips_add_offset. */
2504 reg = XEXP (*xloc, 0);
2505 if (!mips_valid_base_register_p (reg, mode, 0))
2506 reg = copy_to_mode_reg (Pmode, reg);
2507 *xloc = mips_add_offset (0, reg, INTVAL (XEXP (*xloc, 1)));
2515 /* Subroutine of mips_build_integer (with the same interface).
2516 Assume that the final action in the sequence should be a left shift. */
2519 mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
2521 unsigned int i, shift;
2523 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
2524 since signed numbers are easier to load than unsigned ones. */
2526 while ((value & 1) == 0)
2527 value /= 2, shift++;
2529 i = mips_build_integer (codes, value);
2530 codes[i].code = ASHIFT;
2531 codes[i].value = shift;
2536 /* As for mips_build_shift, but assume that the final action will be
2537 an IOR or PLUS operation. */
2540 mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
2542 unsigned HOST_WIDE_INT high;
2545 high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
2546 if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
2548 /* The constant is too complex to load with a simple lui/ori pair
2549 so our goal is to clear as many trailing zeros as possible.
2550 In this case, we know bit 16 is set and that the low 16 bits
2551 form a negative number. If we subtract that number from VALUE,
2552 we will clear at least the lowest 17 bits, maybe more. */
2553 i = mips_build_integer (codes, CONST_HIGH_PART (value));
2554 codes[i].code = PLUS;
2555 codes[i].value = CONST_LOW_PART (value);
2559 i = mips_build_integer (codes, high);
2560 codes[i].code = IOR;
2561 codes[i].value = value & 0xffff;
2567 /* Fill CODES with a sequence of rtl operations to load VALUE.
2568 Return the number of operations needed. */
2571 mips_build_integer (struct mips_integer_op *codes,
2572 unsigned HOST_WIDE_INT value)
2574 if (SMALL_OPERAND (value)
2575 || SMALL_OPERAND_UNSIGNED (value)
2576 || LUI_OPERAND (value))
2578 /* The value can be loaded with a single instruction. */
2579 codes[0].code = UNKNOWN;
2580 codes[0].value = value;
2583 else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
2585 /* Either the constant is a simple LUI/ORI combination or its
2586 lowest bit is set. We don't want to shift in this case. */
2587 return mips_build_lower (codes, value);
2589 else if ((value & 0xffff) == 0)
2591 /* The constant will need at least three actions. The lowest
2592 16 bits are clear, so the final action will be a shift. */
2593 return mips_build_shift (codes, value);
2597 /* The final action could be a shift, add or inclusive OR.
2598 Rather than use a complex condition to select the best
2599 approach, try both mips_build_shift and mips_build_lower
2600 and pick the one that gives the shortest sequence.
2601 Note that this case is only used once per constant. */
2602 struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
2603 unsigned int cost, alt_cost;
2605 cost = mips_build_shift (codes, value);
2606 alt_cost = mips_build_lower (alt_codes, value);
2607 if (alt_cost < cost)
2609 memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
2617 /* Load VALUE into DEST, using TEMP as a temporary register if need be. */
2620 mips_move_integer (rtx dest, rtx temp, unsigned HOST_WIDE_INT value)
2622 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2623 enum machine_mode mode;
2624 unsigned int i, cost;
2627 mode = GET_MODE (dest);
2628 cost = mips_build_integer (codes, value);
2630 /* Apply each binary operation to X. Invariant: X is a legitimate
2631 source operand for a SET pattern. */
2632 x = GEN_INT (codes[0].value);
2633 for (i = 1; i < cost; i++)
2635 if (!can_create_pseudo_p ())
2637 emit_insn (gen_rtx_SET (VOIDmode, temp, x));
2641 x = force_reg (mode, x);
2642 x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
2645 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
2649 /* Subroutine of mips_legitimize_move. Move constant SRC into register
2650 DEST given that SRC satisfies immediate_operand but doesn't satisfy
2654 mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
2658 /* Split moves of big integers into smaller pieces. */
2659 if (splittable_const_int_operand (src, mode))
2661 mips_move_integer (dest, dest, INTVAL (src));
2665 /* Split moves of symbolic constants into high/low pairs. */
2666 if (mips_split_symbol (dest, src, MAX_MACHINE_MODE, &src))
2668 emit_insn (gen_rtx_SET (VOIDmode, dest, src));
2672 if (mips_tls_operand_p (src))
2674 mips_emit_move (dest, mips_legitimize_tls_address (src));
2678 /* If we have (const (plus symbol offset)), and that expression cannot
2679 be forced into memory, load the symbol first and add in the offset.
2680 In non-MIPS16 mode, prefer to do this even if the constant _can_ be
2681 forced into memory, as it usually produces better code. */
2682 split_const (src, &base, &offset);
2683 if (offset != const0_rtx
2684 && (targetm.cannot_force_const_mem (src)
2685 || (!TARGET_MIPS16 && can_create_pseudo_p ())))
2687 base = mips_force_temporary (dest, base);
2688 mips_emit_move (dest, mips_add_offset (0, base, INTVAL (offset)));
2692 src = force_const_mem (mode, src);
2694 /* When using explicit relocs, constant pool references are sometimes
2695 not legitimate addresses. */
2696 mips_split_symbol (dest, XEXP (src, 0), mode, &XEXP (src, 0));
2697 mips_emit_move (dest, src);
2701 /* If (set DEST SRC) is not a valid instruction, emit an equivalent
2702 sequence that is valid. */
2705 mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
2707 if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
2709 mips_emit_move (dest, force_reg (mode, src));
2713 /* Check for individual, fully-reloaded mflo and mfhi instructions. */
2714 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
2715 && REG_P (src) && MD_REG_P (REGNO (src))
2716 && REG_P (dest) && GP_REG_P (REGNO (dest)))
2718 int other_regno = REGNO (src) == HI_REGNUM ? LO_REGNUM : HI_REGNUM;
2719 if (GET_MODE_SIZE (mode) <= 4)
2720 emit_insn (gen_mfhilo_si (gen_rtx_REG (SImode, REGNO (dest)),
2721 gen_rtx_REG (SImode, REGNO (src)),
2722 gen_rtx_REG (SImode, other_regno)));
2724 emit_insn (gen_mfhilo_di (gen_rtx_REG (DImode, REGNO (dest)),
2725 gen_rtx_REG (DImode, REGNO (src)),
2726 gen_rtx_REG (DImode, other_regno)));
2730 /* We need to deal with constants that would be legitimate
2731 immediate_operands but not legitimate move_operands. */
2732 if (CONSTANT_P (src) && !move_operand (src, mode))
2734 mips_legitimize_const_move (mode, dest, src);
2735 set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
2741 /* We need a lot of little routines to check constant values on the
2742 mips16. These are used to figure out how long the instruction will
2743 be. It would be much better to do this using constraints, but
2744 there aren't nearly enough letters available. */
2747 m16_check_op (rtx op, int low, int high, int mask)
2749 return (GET_CODE (op) == CONST_INT
2750 && INTVAL (op) >= low
2751 && INTVAL (op) <= high
2752 && (INTVAL (op) & mask) == 0);
2756 m16_uimm3_b (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2758 return m16_check_op (op, 0x1, 0x8, 0);
2762 m16_simm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2764 return m16_check_op (op, - 0x8, 0x7, 0);
2768 m16_nsimm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2770 return m16_check_op (op, - 0x7, 0x8, 0);
2774 m16_simm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2776 return m16_check_op (op, - 0x10, 0xf, 0);
2780 m16_nsimm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2782 return m16_check_op (op, - 0xf, 0x10, 0);
2786 m16_uimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2788 return m16_check_op (op, (- 0x10) << 2, 0xf << 2, 3);
2792 m16_nuimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2794 return m16_check_op (op, (- 0xf) << 2, 0x10 << 2, 3);
2798 m16_simm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2800 return m16_check_op (op, - 0x80, 0x7f, 0);
2804 m16_nsimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2806 return m16_check_op (op, - 0x7f, 0x80, 0);
2810 m16_uimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2812 return m16_check_op (op, 0x0, 0xff, 0);
2816 m16_nuimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2818 return m16_check_op (op, - 0xff, 0x0, 0);
2822 m16_uimm8_m1_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2824 return m16_check_op (op, - 0x1, 0xfe, 0);
2828 m16_uimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2830 return m16_check_op (op, 0x0, 0xff << 2, 3);
2834 m16_nuimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2836 return m16_check_op (op, (- 0xff) << 2, 0x0, 3);
2840 m16_simm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2842 return m16_check_op (op, (- 0x80) << 3, 0x7f << 3, 7);
2846 m16_nsimm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2848 return m16_check_op (op, (- 0x7f) << 3, 0x80 << 3, 7);
2851 /* Return true if ADDR matches the pattern for the lwxs load scaled indexed
2852 address instruction. */
2855 mips_lwxs_address_p (rtx addr)
2858 && GET_CODE (addr) == PLUS
2859 && REG_P (XEXP (addr, 1)))
2861 rtx offset = XEXP (addr, 0);
2862 if (GET_CODE (offset) == MULT
2863 && REG_P (XEXP (offset, 0))
2864 && GET_CODE (XEXP (offset, 1)) == CONST_INT
2865 && INTVAL (XEXP (offset, 1)) == 4)
2871 /* The cost of loading values from the constant pool. It should be
2872 larger than the cost of any constant we want to synthesize inline. */
2874 #define CONSTANT_POOL_COST COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 8)
2876 /* Return the cost of X when used as an operand to the MIPS16 instruction
2877 that implements CODE. Return -1 if there is no such instruction, or if
2878 X is not a valid immediate operand for it. */
2881 mips16_constant_cost (int code, HOST_WIDE_INT x)
2888 /* Shifts by between 1 and 8 bits (inclusive) are unextended,
2889 other shifts are extended. The shift patterns truncate the shift
2890 count to the right size, so there are no out-of-range values. */
2891 if (IN_RANGE (x, 1, 8))
2893 return COSTS_N_INSNS (1);
2896 if (IN_RANGE (x, -128, 127))
2898 if (SMALL_OPERAND (x))
2899 return COSTS_N_INSNS (1);
2903 /* Like LE, but reject the always-true case. */
2907 /* We add 1 to the immediate and use SLT. */
2910 /* We can use CMPI for an xor with an unsigned 16-bit X. */
2913 if (IN_RANGE (x, 0, 255))
2915 if (SMALL_OPERAND_UNSIGNED (x))
2916 return COSTS_N_INSNS (1);
2921 /* Equality comparisons with 0 are cheap. */
2931 /* Return true if there is a non-MIPS16 instruction that implements CODE
2932 and if that instruction accepts X as an immediate operand. */
2935 mips_immediate_operand_p (int code, HOST_WIDE_INT x)
2942 /* All shift counts are truncated to a valid constant. */
2947 /* Likewise rotates, if the target supports rotates at all. */
2953 /* These instructions take 16-bit unsigned immediates. */
2954 return SMALL_OPERAND_UNSIGNED (x);
2959 /* These instructions take 16-bit signed immediates. */
2960 return SMALL_OPERAND (x);
2966 /* The "immediate" forms of these instructions are really
2967 implemented as comparisons with register 0. */
2972 /* Likewise, meaning that the only valid immediate operand is 1. */
2976 /* We add 1 to the immediate and use SLT. */
2977 return SMALL_OPERAND (x + 1);
2980 /* Likewise SLTU, but reject the always-true case. */
2981 return SMALL_OPERAND (x + 1) && x + 1 != 0;
2985 /* The bit position and size are immediate operands. */
2986 return ISA_HAS_EXT_INS;
2989 /* By default assume that $0 can be used for 0. */
2994 /* Return the cost of binary operation X, given that the instruction
2995 sequence for a word-sized or smaller operation has cost SINGLE_COST
2996 and that the sequence of a double-word operation has cost DOUBLE_COST. */
2999 mips_binary_cost (rtx x, int single_cost, int double_cost)
3003 if (GET_MODE_SIZE (GET_MODE (x)) == UNITS_PER_WORD * 2)
3008 + rtx_cost (XEXP (x, 0), 0)
3009 + rtx_cost (XEXP (x, 1), GET_CODE (x)));
3012 /* Return the cost of floating-point multiplications of mode MODE. */
3015 mips_fp_mult_cost (enum machine_mode mode)
3017 return mode == DFmode ? mips_cost->fp_mult_df : mips_cost->fp_mult_sf;
3020 /* Return the cost of floating-point divisions of mode MODE. */
3023 mips_fp_div_cost (enum machine_mode mode)
3025 return mode == DFmode ? mips_cost->fp_div_df : mips_cost->fp_div_sf;
3028 /* Return the cost of sign-extending OP to mode MODE, not including the
3029 cost of OP itself. */
3032 mips_sign_extend_cost (enum machine_mode mode, rtx op)
3035 /* Extended loads are as cheap as unextended ones. */
3038 if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
3039 /* A sign extension from SImode to DImode in 64-bit mode is free. */
3042 if (ISA_HAS_SEB_SEH || GENERATE_MIPS16E)
3043 /* We can use SEB or SEH. */
3044 return COSTS_N_INSNS (1);
3046 /* We need to use a shift left and a shift right. */
3047 return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
3050 /* Return the cost of zero-extending OP to mode MODE, not including the
3051 cost of OP itself. */
3054 mips_zero_extend_cost (enum machine_mode mode, rtx op)
3057 /* Extended loads are as cheap as unextended ones. */
3060 if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
3061 /* We need a shift left by 32 bits and a shift right by 32 bits. */
3062 return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
3064 if (GENERATE_MIPS16E)
3065 /* We can use ZEB or ZEH. */
3066 return COSTS_N_INSNS (1);
3069 /* We need to load 0xff or 0xffff into a register and use AND. */
3070 return COSTS_N_INSNS (GET_MODE (op) == QImode ? 2 : 3);
3072 /* We can use ANDI. */
3073 return COSTS_N_INSNS (1);
3076 /* Implement TARGET_RTX_COSTS. */
3079 mips_rtx_costs (rtx x, int code, int outer_code, int *total)
3081 enum machine_mode mode = GET_MODE (x);
3082 bool float_mode_p = FLOAT_MODE_P (mode);
3086 /* The cost of a COMPARE is hard to define for MIPS. COMPAREs don't
3087 appear in the instruction stream, and the cost of a comparison is
3088 really the cost of the branch or scc condition. At the time of
3089 writing, gcc only uses an explicit outer COMPARE code when optabs
3090 is testing whether a constant is expensive enough to force into a
3091 register. We want optabs to pass such constants through the MIPS
3092 expanders instead, so make all constants very cheap here. */
3093 if (outer_code == COMPARE)
3095 gcc_assert (CONSTANT_P (x));
3103 /* Treat *clear_upper32-style ANDs as having zero cost in the
3104 second operand. The cost is entirely in the first operand.
3106 ??? This is needed because we would otherwise try to CSE
3107 the constant operand. Although that's the right thing for
3108 instructions that continue to be a register operation throughout
3109 compilation, it is disastrous for instructions that could
3110 later be converted into a memory operation. */
3112 && outer_code == AND
3113 && UINTVAL (x) == 0xffffffff)
3121 cost = mips16_constant_cost (outer_code, INTVAL (x));
3130 /* When not optimizing for size, we care more about the cost
3131 of hot code, and hot code is often in a loop. If a constant
3132 operand needs to be forced into a register, we will often be
3133 able to hoist the constant load out of the loop, so the load
3134 should not contribute to the cost. */
3136 || mips_immediate_operand_p (outer_code, INTVAL (x)))
3148 if (force_to_mem_operand (x, VOIDmode))
3150 *total = COSTS_N_INSNS (1);
3153 cost = mips_const_insns (x);
3156 /* If the constant is likely to be stored in a GPR, SETs of
3157 single-insn constants are as cheap as register sets; we
3158 never want to CSE them.
3160 Don't reduce the cost of storing a floating-point zero in
3161 FPRs. If we have a zero in an FPR for other reasons, we
3162 can get better cfg-cleanup and delayed-branch results by
3163 using it consistently, rather than using $0 sometimes and
3164 an FPR at other times. Also, moves between floating-point
3165 registers are sometimes cheaper than (D)MTC1 $0. */
3167 && outer_code == SET
3168 && !(float_mode_p && TARGET_HARD_FLOAT))
3170 /* When non-MIPS16 code loads a constant N>1 times, we rarely
3171 want to CSE the constant itself. It is usually better to
3172 have N copies of the last operation in the sequence and one
3173 shared copy of the other operations. (Note that this is
3174 not true for MIPS16 code, where the final operation in the
3175 sequence is often an extended instruction.)
3177 Also, if we have a CONST_INT, we don't know whether it is
3178 for a word or doubleword operation, so we cannot rely on
3179 the result of mips_build_integer. */
3180 else if (!TARGET_MIPS16
3181 && (outer_code == SET || mode == VOIDmode))
3183 *total = COSTS_N_INSNS (cost);
3186 /* The value will need to be fetched from the constant pool. */
3187 *total = CONSTANT_POOL_COST;
3191 /* If the address is legitimate, return the number of
3192 instructions it needs. */
3194 cost = mips_address_insns (addr, mode, true);
3197 *total = COSTS_N_INSNS (cost + 1);
3200 /* Check for a scaled indexed address. */
3201 if (mips_lwxs_address_p (addr))
3203 *total = COSTS_N_INSNS (2);
3206 /* Otherwise use the default handling. */
3210 *total = COSTS_N_INSNS (6);
3214 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1);
3218 /* Check for a *clear_upper32 pattern and treat it like a zero
3219 extension. See the pattern's comment for details. */
3222 && CONST_INT_P (XEXP (x, 1))
3223 && UINTVAL (XEXP (x, 1)) == 0xffffffff)
3225 *total = (mips_zero_extend_cost (mode, XEXP (x, 0))
3226 + rtx_cost (XEXP (x, 0), 0));
3233 /* Double-word operations use two single-word operations. */
3234 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (2));
3242 if (CONSTANT_P (XEXP (x, 1)))
3243 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4));
3245 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (12));
3250 *total = mips_cost->fp_add;
3252 *total = COSTS_N_INSNS (4);
3256 /* Low-part immediates need an extended MIPS16 instruction. */
3257 *total = (COSTS_N_INSNS (TARGET_MIPS16 ? 2 : 1)
3258 + rtx_cost (XEXP (x, 0), 0));
3273 /* Branch comparisons have VOIDmode, so use the first operand's
3275 mode = GET_MODE (XEXP (x, 0));
3276 if (FLOAT_MODE_P (mode))
3278 *total = mips_cost->fp_add;
3281 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4));
3286 && ISA_HAS_NMADD_NMSUB
3287 && TARGET_FUSED_MADD
3288 && !HONOR_NANS (mode)
3289 && !HONOR_SIGNED_ZEROS (mode))
3291 /* See if we can use NMADD or NMSUB. See mips.md for the
3292 associated patterns. */
3293 rtx op0 = XEXP (x, 0);
3294 rtx op1 = XEXP (x, 1);
3295 if (GET_CODE (op0) == MULT && GET_CODE (XEXP (op0, 0)) == NEG)
3297 *total = (mips_fp_mult_cost (mode)
3298 + rtx_cost (XEXP (XEXP (op0, 0), 0), 0)
3299 + rtx_cost (XEXP (op0, 1), 0)
3300 + rtx_cost (op1, 0));
3303 if (GET_CODE (op1) == MULT)
3305 *total = (mips_fp_mult_cost (mode)
3307 + rtx_cost (XEXP (op1, 0), 0)
3308 + rtx_cost (XEXP (op1, 1), 0));
3318 && TARGET_FUSED_MADD
3319 && GET_CODE (XEXP (x, 0)) == MULT)
3322 *total = mips_cost->fp_add;
3326 /* Double-word operations require three single-word operations and
3327 an SLTU. The MIPS16 version then needs to move the result of
3328 the SLTU from $24 to a MIPS16 register. */
3329 *total = mips_binary_cost (x, COSTS_N_INSNS (1),
3330 COSTS_N_INSNS (TARGET_MIPS16 ? 5 : 4));
3335 && ISA_HAS_NMADD_NMSUB
3336 && TARGET_FUSED_MADD
3337 && !HONOR_NANS (mode)
3338 && HONOR_SIGNED_ZEROS (mode))
3340 /* See if we can use NMADD or NMSUB. See mips.md for the
3341 associated patterns. */
3342 rtx op = XEXP (x, 0);
3343 if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
3344 && GET_CODE (XEXP (op, 0)) == MULT)
3346 *total = (mips_fp_mult_cost (mode)
3347 + rtx_cost (XEXP (XEXP (op, 0), 0), 0)
3348 + rtx_cost (XEXP (XEXP (op, 0), 1), 0)
3349 + rtx_cost (XEXP (op, 1), 0));
3355 *total = mips_cost->fp_add;
3357 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 4 : 1);
3362 *total = mips_fp_mult_cost (mode);
3363 else if (mode == DImode && !TARGET_64BIT)
3364 /* Synthesized from 2 mulsi3s, 1 mulsidi3 and two additions,
3365 where the mulsidi3 always includes an MFHI and an MFLO. */
3366 *total = (optimize_size
3367 ? COSTS_N_INSNS (ISA_HAS_MUL3 ? 7 : 9)
3368 : mips_cost->int_mult_si * 3 + 6);
3369 else if (optimize_size)
3370 *total = (ISA_HAS_MUL3 ? 1 : 2);
3371 else if (mode == DImode)
3372 *total = mips_cost->int_mult_di;
3374 *total = mips_cost->int_mult_si;
3378 /* Check for a reciprocal. */
3379 if (float_mode_p && XEXP (x, 0) == CONST1_RTX (mode))
3382 && flag_unsafe_math_optimizations
3383 && (outer_code == SQRT || GET_CODE (XEXP (x, 1)) == SQRT))
3385 /* An rsqrt<mode>a or rsqrt<mode>b pattern. Count the
3386 division as being free. */
3387 *total = rtx_cost (XEXP (x, 1), 0);
3392 *total = mips_fp_div_cost (mode) + rtx_cost (XEXP (x, 1), 0);
3402 *total = mips_fp_div_cost (mode);
3411 /* It is our responsibility to make division by a power of 2
3412 as cheap as 2 register additions if we want the division
3413 expanders to be used for such operations; see the setting
3414 of sdiv_pow2_cheap in optabs.c. Using (D)DIV for MIPS16
3415 should always produce shorter code than using
3416 expand_sdiv2_pow2. */
3418 && CONST_INT_P (XEXP (x, 1))
3419 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
3421 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), 0);
3424 *total = COSTS_N_INSNS (mips_idiv_insns ());
3426 else if (mode == DImode)
3427 *total = mips_cost->int_div_di;
3429 *total = mips_cost->int_div_si;
3433 *total = mips_sign_extend_cost (mode, XEXP (x, 0));
3437 *total = mips_zero_extend_cost (mode, XEXP (x, 0));
3441 case UNSIGNED_FLOAT:
3444 case FLOAT_TRUNCATE:
3445 *total = mips_cost->fp_add;
3453 /* Provide the costs of an addressing mode that contains ADDR.
3454 If ADDR is not a valid address, its cost is irrelevant. */
3457 mips_address_cost (rtx addr)
3459 return mips_address_insns (addr, SImode, false);
3462 /* Return one word of double-word value OP, taking into account the fixed
3463 endianness of certain registers. HIGH_P is true to select the high part,
3464 false to select the low part. */
3467 mips_subword (rtx op, int high_p)
3470 enum machine_mode mode;
3472 mode = GET_MODE (op);
3473 if (mode == VOIDmode)
3476 if (TARGET_BIG_ENDIAN ? !high_p : high_p)
3477 byte = UNITS_PER_WORD;
3481 if (FP_REG_RTX_P (op))
3482 return gen_rtx_REG (word_mode, high_p ? REGNO (op) + 1 : REGNO (op));
3485 return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
3487 return simplify_gen_subreg (word_mode, op, mode, byte);
3491 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
3494 mips_split_64bit_move_p (rtx dest, rtx src)
3499 /* FP->FP moves can be done in a single instruction. */
3500 if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
3503 /* Check for floating-point loads and stores. They can be done using
3504 ldc1 and sdc1 on MIPS II and above. */
3507 if (FP_REG_RTX_P (dest) && MEM_P (src))
3509 if (FP_REG_RTX_P (src) && MEM_P (dest))
3516 /* Split a 64-bit move from SRC to DEST assuming that
3517 mips_split_64bit_move_p holds.
3519 Moves into and out of FPRs cause some difficulty here. Such moves
3520 will always be DFmode, since paired FPRs are not allowed to store
3521 DImode values. The most natural representation would be two separate
3522 32-bit moves, such as:
3524 (set (reg:SI $f0) (mem:SI ...))
3525 (set (reg:SI $f1) (mem:SI ...))
3527 However, the second insn is invalid because odd-numbered FPRs are
3528 not allowed to store independent values. Use the patterns load_df_low,
3529 load_df_high and store_df_high instead. */
3532 mips_split_64bit_move (rtx dest, rtx src)
3534 if (FP_REG_RTX_P (dest))
3536 /* Loading an FPR from memory or from GPRs. */
3539 dest = gen_lowpart (DFmode, dest);
3540 emit_insn (gen_load_df_low (dest, mips_subword (src, 0)));
3541 emit_insn (gen_mthc1 (dest, mips_subword (src, 1),
3546 emit_insn (gen_load_df_low (copy_rtx (dest),
3547 mips_subword (src, 0)));
3548 emit_insn (gen_load_df_high (dest, mips_subword (src, 1),
3552 else if (FP_REG_RTX_P (src))
3554 /* Storing an FPR into memory or GPRs. */
3557 src = gen_lowpart (DFmode, src);
3558 mips_emit_move (mips_subword (dest, 0), mips_subword (src, 0));
3559 emit_insn (gen_mfhc1 (mips_subword (dest, 1), src));
3563 mips_emit_move (mips_subword (dest, 0), mips_subword (src, 0));
3564 emit_insn (gen_store_df_high (mips_subword (dest, 1), src));
3569 /* The operation can be split into two normal moves. Decide in
3570 which order to do them. */
3573 low_dest = mips_subword (dest, 0);
3574 if (REG_P (low_dest)
3575 && reg_overlap_mentioned_p (low_dest, src))
3577 mips_emit_move (mips_subword (dest, 1), mips_subword (src, 1));
3578 mips_emit_move (low_dest, mips_subword (src, 0));
3582 mips_emit_move (low_dest, mips_subword (src, 0));
3583 mips_emit_move (mips_subword (dest, 1), mips_subword (src, 1));
3588 /* Return the appropriate instructions to move SRC into DEST. Assume
3589 that SRC is operand 1 and DEST is operand 0. */
3592 mips_output_move (rtx dest, rtx src)
3594 enum rtx_code dest_code, src_code;
3595 enum mips_symbol_type symbol_type;
3598 dest_code = GET_CODE (dest);
3599 src_code = GET_CODE (src);
3600 dbl_p = (GET_MODE_SIZE (GET_MODE (dest)) == 8);
3602 if (dbl_p && mips_split_64bit_move_p (dest, src))
3605 if ((src_code == REG && GP_REG_P (REGNO (src)))
3606 || (!TARGET_MIPS16 && src == CONST0_RTX (GET_MODE (dest))))
3608 if (dest_code == REG)
3610 if (GP_REG_P (REGNO (dest)))
3611 return "move\t%0,%z1";
3613 if (MD_REG_P (REGNO (dest)))
3616 if (DSP_ACC_REG_P (REGNO (dest)))
3618 static char retval[] = "mt__\t%z1,%q0";
3619 retval[2] = reg_names[REGNO (dest)][4];
3620 retval[3] = reg_names[REGNO (dest)][5];
3624 if (FP_REG_P (REGNO (dest)))
3625 return (dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0");
3627 if (ALL_COP_REG_P (REGNO (dest)))
3629 static char retval[] = "dmtc_\t%z1,%0";
3631 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3632 return (dbl_p ? retval : retval + 1);
3635 if (dest_code == MEM)
3636 return (dbl_p ? "sd\t%z1,%0" : "sw\t%z1,%0");
3638 if (dest_code == REG && GP_REG_P (REGNO (dest)))
3640 if (src_code == REG)
3642 if (DSP_ACC_REG_P (REGNO (src)))
3644 static char retval[] = "mf__\t%0,%q1";
3645 retval[2] = reg_names[REGNO (src)][4];
3646 retval[3] = reg_names[REGNO (src)][5];
3650 if (ST_REG_P (REGNO (src)) && ISA_HAS_8CC)
3651 return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
3653 if (FP_REG_P (REGNO (src)))
3654 return (dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1");
3656 if (ALL_COP_REG_P (REGNO (src)))
3658 static char retval[] = "dmfc_\t%0,%1";
3660 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3661 return (dbl_p ? retval : retval + 1);
3665 if (src_code == MEM)
3666 return (dbl_p ? "ld\t%0,%1" : "lw\t%0,%1");
3668 if (src_code == CONST_INT)
3670 /* Don't use the X format, because that will give out of
3671 range numbers for 64-bit hosts and 32-bit targets. */
3673 return "li\t%0,%1\t\t\t# %X1";
3675 if (INTVAL (src) >= 0 && INTVAL (src) <= 0xffff)
3678 if (INTVAL (src) < 0 && INTVAL (src) >= -0xffff)
3682 if (src_code == HIGH)
3683 return TARGET_MIPS16 ? "#" : "lui\t%0,%h1";
3685 if (CONST_GP_P (src))
3686 return "move\t%0,%1";
3688 if (mips_symbolic_constant_p (src, SYMBOL_CONTEXT_LEA, &symbol_type)
3689 && mips_lo_relocs[symbol_type] != 0)
3691 /* A signed 16-bit constant formed by applying a relocation
3692 operator to a symbolic address. */
3693 gcc_assert (!mips_split_p[symbol_type]);
3694 return "li\t%0,%R1";
3697 if (symbolic_operand (src, VOIDmode))
3699 gcc_assert (TARGET_MIPS16
3700 ? TARGET_MIPS16_TEXT_LOADS
3701 : !TARGET_EXPLICIT_RELOCS);
3702 return (dbl_p ? "dla\t%0,%1" : "la\t%0,%1");
3705 if (src_code == REG && FP_REG_P (REGNO (src)))
3707 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3709 if (GET_MODE (dest) == V2SFmode)
3710 return "mov.ps\t%0,%1";
3712 return (dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1");
3715 if (dest_code == MEM)
3716 return (dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0");
3718 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3720 if (src_code == MEM)
3721 return (dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1");
3723 if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
3725 static char retval[] = "l_c_\t%0,%1";
3727 retval[1] = (dbl_p ? 'd' : 'w');
3728 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3731 if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
3733 static char retval[] = "s_c_\t%1,%0";
3735 retval[1] = (dbl_p ? 'd' : 'w');
3736 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3742 /* Restore $gp from its save slot. Valid only when using o32 or
3746 mips_restore_gp (void)
3750 gcc_assert (TARGET_ABICALLS && TARGET_OLDABI);
3752 address = mips_add_offset (pic_offset_table_rtx,
3753 frame_pointer_needed
3754 ? hard_frame_pointer_rtx
3755 : stack_pointer_rtx,
3756 current_function_outgoing_args_size);
3757 slot = gen_rtx_MEM (Pmode, address);
3759 mips_emit_move (pic_offset_table_rtx, slot);
3760 if (!TARGET_EXPLICIT_RELOCS)
3761 emit_insn (gen_blockage ());
3764 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
3767 mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
3769 emit_insn (gen_rtx_SET (VOIDmode, target,
3770 gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
3773 /* Return true if CMP1 is a suitable second operand for relational
3774 operator CODE. See also the *sCC patterns in mips.md. */
3777 mips_relational_operand_ok_p (enum rtx_code code, rtx cmp1)
3783 return reg_or_0_operand (cmp1, VOIDmode);
3787 return !TARGET_MIPS16 && cmp1 == const1_rtx;
3791 return arith_operand (cmp1, VOIDmode);
3794 return sle_operand (cmp1, VOIDmode);
3797 return sleu_operand (cmp1, VOIDmode);
3804 /* Canonicalize LE or LEU comparisons into LT comparisons when
3805 possible to avoid extra instructions or inverting the
3809 mips_canonicalize_comparison (enum rtx_code *code, rtx *cmp1,
3810 enum machine_mode mode)
3812 HOST_WIDE_INT original, plus_one;
3814 if (GET_CODE (*cmp1) != CONST_INT)
3817 original = INTVAL (*cmp1);
3818 plus_one = trunc_int_for_mode ((unsigned HOST_WIDE_INT) original + 1, mode);
3823 if (original < plus_one)
3826 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3835 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3848 /* Compare CMP0 and CMP1 using relational operator CODE and store the
3849 result in TARGET. CMP0 and TARGET are register_operands that have
3850 the same integer mode. If INVERT_PTR is nonnull, it's OK to set
3851 TARGET to the inverse of the result and flip *INVERT_PTR instead. */
3854 mips_emit_int_relational (enum rtx_code code, bool *invert_ptr,
3855 rtx target, rtx cmp0, rtx cmp1)
3857 /* First see if there is a MIPS instruction that can do this operation
3858 with CMP1 in its current form. If not, try to canonicalize the
3859 comparison to LT. If that fails, try doing the same for the
3860 inverse operation. If that also fails, force CMP1 into a register
3862 if (mips_relational_operand_ok_p (code, cmp1))
3863 mips_emit_binary (code, target, cmp0, cmp1);
3864 else if (mips_canonicalize_comparison (&code, &cmp1, GET_MODE (target)))
3865 mips_emit_binary (code, target, cmp0, cmp1);
3868 enum rtx_code inv_code = reverse_condition (code);
3869 if (!mips_relational_operand_ok_p (inv_code, cmp1))
3871 cmp1 = force_reg (GET_MODE (cmp0), cmp1);
3872 mips_emit_int_relational (code, invert_ptr, target, cmp0, cmp1);
3874 else if (invert_ptr == 0)
3876 rtx inv_target = gen_reg_rtx (GET_MODE (target));
3877 mips_emit_binary (inv_code, inv_target, cmp0, cmp1);
3878 mips_emit_binary (XOR, target, inv_target, const1_rtx);
3882 *invert_ptr = !*invert_ptr;
3883 mips_emit_binary (inv_code, target, cmp0, cmp1);
3888 /* Return a register that is zero iff CMP0 and CMP1 are equal.
3889 The register will have the same mode as CMP0. */
3892 mips_zero_if_equal (rtx cmp0, rtx cmp1)
3894 if (cmp1 == const0_rtx)
3897 if (uns_arith_operand (cmp1, VOIDmode))
3898 return expand_binop (GET_MODE (cmp0), xor_optab,
3899 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3901 return expand_binop (GET_MODE (cmp0), sub_optab,
3902 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3905 /* Convert *CODE into a code that can be used in a floating-point
3906 scc instruction (c.<cond>.<fmt>). Return true if the values of
3907 the condition code registers will be inverted, with 0 indicating
3908 that the condition holds. */
3911 mips_reverse_fp_cond_p (enum rtx_code *code)
3918 *code = reverse_condition_maybe_unordered (*code);
3926 /* Convert a comparison into something that can be used in a branch or
3927 conditional move. cmp_operands[0] and cmp_operands[1] are the values
3928 being compared and *CODE is the code used to compare them.
3930 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
3931 If NEED_EQ_NE_P, then only EQ/NE comparisons against zero are possible,
3932 otherwise any standard branch condition can be used. The standard branch
3935 - EQ/NE between two registers.
3936 - any comparison between a register and zero. */
3939 mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
3941 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) == MODE_INT)
3943 if (!need_eq_ne_p && cmp_operands[1] == const0_rtx)
3945 *op0 = cmp_operands[0];
3946 *op1 = cmp_operands[1];
3948 else if (*code == EQ || *code == NE)
3952 *op0 = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3957 *op0 = cmp_operands[0];
3958 *op1 = force_reg (GET_MODE (*op0), cmp_operands[1]);
3963 /* The comparison needs a separate scc instruction. Store the
3964 result of the scc in *OP0 and compare it against zero. */
3965 bool invert = false;
3966 *op0 = gen_reg_rtx (GET_MODE (cmp_operands[0]));
3968 mips_emit_int_relational (*code, &invert, *op0,
3969 cmp_operands[0], cmp_operands[1]);
3970 *code = (invert ? EQ : NE);
3973 else if (ALL_FIXED_POINT_MODE_P (GET_MODE (cmp_operands[0])))
3975 *op0 = gen_rtx_REG (CCDSPmode, CCDSP_CC_REGNUM);
3976 mips_emit_binary (*code, *op0, cmp_operands[0], cmp_operands[1]);
3982 enum rtx_code cmp_code;
3984 /* Floating-point tests use a separate c.cond.fmt comparison to
3985 set a condition code register. The branch or conditional move
3986 will then compare that register against zero.
3988 Set CMP_CODE to the code of the comparison instruction and
3989 *CODE to the code that the branch or move should use. */
3991 *code = mips_reverse_fp_cond_p (&cmp_code) ? EQ : NE;
3993 ? gen_reg_rtx (CCmode)
3994 : gen_rtx_REG (CCmode, FPSW_REGNUM));
3996 mips_emit_binary (cmp_code, *op0, cmp_operands[0], cmp_operands[1]);
4000 /* Try comparing cmp_operands[0] and cmp_operands[1] using rtl code CODE.
4001 Store the result in TARGET and return true if successful.
4003 On 64-bit targets, TARGET may be wider than cmp_operands[0]. */
4006 mips_emit_scc (enum rtx_code code, rtx target)
4008 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) != MODE_INT)
4011 target = gen_lowpart (GET_MODE (cmp_operands[0]), target);
4012 if (code == EQ || code == NE)
4014 rtx zie = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
4015 mips_emit_binary (code, target, zie, const0_rtx);
4018 mips_emit_int_relational (code, 0, target,
4019 cmp_operands[0], cmp_operands[1]);
4023 /* Emit the common code for doing conditional branches.
4024 operand[0] is the label to jump to.
4025 The comparison operands are saved away by cmp{si,di,sf,df}. */
4028 gen_conditional_branch (rtx *operands, enum rtx_code code)
4030 rtx op0, op1, condition;
4032 mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
4033 condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
4034 emit_jump_insn (gen_condjump (condition, operands[0]));
4039 (set temp (COND:CCV2 CMP_OP0 CMP_OP1))
4040 (set DEST (unspec [TRUE_SRC FALSE_SRC temp] UNSPEC_MOVE_TF_PS)) */
4043 mips_expand_vcondv2sf (rtx dest, rtx true_src, rtx false_src,
4044 enum rtx_code cond, rtx cmp_op0, rtx cmp_op1)
4049 reversed_p = mips_reverse_fp_cond_p (&cond);
4050 cmp_result = gen_reg_rtx (CCV2mode);
4051 emit_insn (gen_scc_ps (cmp_result,
4052 gen_rtx_fmt_ee (cond, VOIDmode, cmp_op0, cmp_op1)));
4054 emit_insn (gen_mips_cond_move_tf_ps (dest, false_src, true_src,
4057 emit_insn (gen_mips_cond_move_tf_ps (dest, true_src, false_src,
4061 /* Emit the common code for conditional moves. OPERANDS is the array
4062 of operands passed to the conditional move define_expand. */
4065 gen_conditional_move (rtx *operands)
4070 code = GET_CODE (operands[1]);
4071 mips_emit_compare (&code, &op0, &op1, true);
4072 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
4073 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
4074 gen_rtx_fmt_ee (code,
4077 operands[2], operands[3])));
4080 /* Emit a conditional trap. OPERANDS is the array of operands passed to
4081 the conditional_trap expander. */
4084 mips_gen_conditional_trap (rtx *operands)
4087 enum rtx_code cmp_code = GET_CODE (operands[0]);
4088 enum machine_mode mode = GET_MODE (cmp_operands[0]);
4090 /* MIPS conditional trap machine instructions don't have GT or LE
4091 flavors, so we must invert the comparison and convert to LT and
4092 GE, respectively. */
4095 case GT: cmp_code = LT; break;
4096 case LE: cmp_code = GE; break;
4097 case GTU: cmp_code = LTU; break;
4098 case LEU: cmp_code = GEU; break;
4101 if (cmp_code == GET_CODE (operands[0]))
4103 op0 = cmp_operands[0];
4104 op1 = cmp_operands[1];
4108 op0 = cmp_operands[1];
4109 op1 = cmp_operands[0];
4111 op0 = force_reg (mode, op0);
4112 if (!arith_operand (op1, mode))
4113 op1 = force_reg (mode, op1);
4115 emit_insn (gen_rtx_TRAP_IF (VOIDmode,
4116 gen_rtx_fmt_ee (cmp_code, mode, op0, op1),
4120 /* Return true if calls to X can use R_MIPS_CALL* relocations. */
4123 mips_ok_for_lazy_binding_p (rtx x)
4125 return (TARGET_USE_GOT
4126 && GET_CODE (x) == SYMBOL_REF
4127 && !mips_symbol_binds_local_p (x));
4130 /* Load function address ADDR into register DEST. SIBCALL_P is true
4131 if the address is needed for a sibling call. */
4134 mips_load_call_address (rtx dest, rtx addr, int sibcall_p)
4136 /* If we're generating PIC, and this call is to a global function,
4137 try to allow its address to be resolved lazily. This isn't
4138 possible if TARGET_CALL_SAVED_GP since the value of $gp on entry
4139 to the stub would be our caller's gp, not ours. */
4140 if (TARGET_EXPLICIT_RELOCS
4141 && !(sibcall_p && TARGET_CALL_SAVED_GP)
4142 && mips_ok_for_lazy_binding_p (addr))
4144 rtx high, lo_sum_symbol;
4146 high = mips_unspec_offset_high (dest, pic_offset_table_rtx,
4147 addr, SYMBOL_GOTOFF_CALL);
4148 lo_sum_symbol = mips_unspec_address (addr, SYMBOL_GOTOFF_CALL);
4149 if (Pmode == SImode)
4150 emit_insn (gen_load_callsi (dest, high, lo_sum_symbol));
4152 emit_insn (gen_load_calldi (dest, high, lo_sum_symbol));
4155 mips_emit_move (dest, addr);
4159 /* Expand a call or call_value instruction. RESULT is where the
4160 result will go (null for calls), ADDR is the address of the
4161 function, ARGS_SIZE is the size of the arguments and AUX is
4162 the value passed to us by mips_function_arg. SIBCALL_P is true
4163 if we are expanding a sibling call, false if we're expanding
4167 mips_expand_call (rtx result, rtx addr, rtx args_size, rtx aux, int sibcall_p)
4169 rtx orig_addr, pattern, insn;
4172 if (!call_insn_operand (addr, VOIDmode))
4174 addr = gen_reg_rtx (Pmode);
4175 mips_load_call_address (addr, orig_addr, sibcall_p);
4179 && TARGET_HARD_FLOAT_ABI
4180 && build_mips16_call_stub (result, addr, args_size,
4181 aux == 0 ? 0 : (int) GET_MODE (aux)))
4185 pattern = (sibcall_p
4186 ? gen_sibcall_internal (addr, args_size)
4187 : gen_call_internal (addr, args_size));
4188 else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
4192 reg1 = XEXP (XVECEXP (result, 0, 0), 0);
4193 reg2 = XEXP (XVECEXP (result, 0, 1), 0);
4196 ? gen_sibcall_value_multiple_internal (reg1, addr, args_size, reg2)
4197 : gen_call_value_multiple_internal (reg1, addr, args_size, reg2));
4200 pattern = (sibcall_p
4201 ? gen_sibcall_value_internal (result, addr, args_size)
4202 : gen_call_value_internal (result, addr, args_size));
4204 insn = emit_call_insn (pattern);
4206 /* Lazy-binding stubs require $gp to be valid on entry. */
4207 if (mips_ok_for_lazy_binding_p (orig_addr))
4208 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
4212 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */
4215 mips_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
4217 if (!TARGET_SIBCALLS)
4220 /* We can't do a sibcall if the called function is a MIPS16 function
4221 because there is no direct "jx" instruction equivalent to "jalx" to
4222 switch the ISA mode. */
4223 if (decl && SYMBOL_REF_MIPS16_FUNC_P (XEXP (DECL_RTL (decl), 0)))
4230 /* Emit code to move general operand SRC into condition-code
4231 register DEST. SCRATCH is a scratch TFmode float register.
4238 where FP1 and FP2 are single-precision float registers
4239 taken from SCRATCH. */
4242 mips_emit_fcc_reload (rtx dest, rtx src, rtx scratch)
4246 /* Change the source to SFmode. */
4248 src = adjust_address (src, SFmode, 0);
4249 else if (REG_P (src) || GET_CODE (src) == SUBREG)
4250 src = gen_rtx_REG (SFmode, true_regnum (src));
4252 fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
4253 fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + MAX_FPRS_PER_FMT);
4255 mips_emit_move (copy_rtx (fp1), src);
4256 mips_emit_move (copy_rtx (fp2), CONST0_RTX (SFmode));
4257 emit_insn (gen_slt_sf (dest, fp2, fp1));
4260 /* Emit code to change the current function's return address to
4261 ADDRESS. SCRATCH is available as a scratch register, if needed.
4262 ADDRESS and SCRATCH are both word-mode GPRs. */
4265 mips_set_return_address (rtx address, rtx scratch)
4269 compute_frame_size (get_frame_size ());
4270 gcc_assert ((cfun->machine->frame.mask >> 31) & 1);
4271 slot_address = mips_add_offset (scratch, stack_pointer_rtx,
4272 cfun->machine->frame.gp_sp_offset);
4274 mips_emit_move (gen_rtx_MEM (GET_MODE (address), slot_address), address);
4277 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
4278 Assume that the areas do not overlap. */
4281 mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
4283 HOST_WIDE_INT offset, delta;
4284 unsigned HOST_WIDE_INT bits;
4286 enum machine_mode mode;
4289 /* Work out how many bits to move at a time. If both operands have
4290 half-word alignment, it is usually better to move in half words.
4291 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
4292 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
4293 Otherwise move word-sized chunks. */
4294 if (MEM_ALIGN (src) == BITS_PER_WORD / 2
4295 && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
4296 bits = BITS_PER_WORD / 2;
4298 bits = BITS_PER_WORD;
4300 mode = mode_for_size (bits, MODE_INT, 0);
4301 delta = bits / BITS_PER_UNIT;
4303 /* Allocate a buffer for the temporary registers. */
4304 regs = alloca (sizeof (rtx) * length / delta);
4306 /* Load as many BITS-sized chunks as possible. Use a normal load if
4307 the source has enough alignment, otherwise use left/right pairs. */
4308 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
4310 regs[i] = gen_reg_rtx (mode);
4311 if (MEM_ALIGN (src) >= bits)
4312 mips_emit_move (regs[i], adjust_address (src, mode, offset));
4315 rtx part = adjust_address (src, BLKmode, offset);
4316 if (!mips_expand_unaligned_load (regs[i], part, bits, 0))
4321 /* Copy the chunks to the destination. */
4322 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
4323 if (MEM_ALIGN (dest) >= bits)
4324 mips_emit_move (adjust_address (dest, mode, offset), regs[i]);
4327 rtx part = adjust_address (dest, BLKmode, offset);
4328 if (!mips_expand_unaligned_store (part, regs[i], bits, 0))
4332 /* Mop up any left-over bytes. */
4333 if (offset < length)
4335 src = adjust_address (src, BLKmode, offset);
4336 dest = adjust_address (dest, BLKmode, offset);
4337 move_by_pieces (dest, src, length - offset,
4338 MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
4342 #define MAX_MOVE_REGS 4
4343 #define MAX_MOVE_BYTES (MAX_MOVE_REGS * UNITS_PER_WORD)
4346 /* Helper function for doing a loop-based block operation on memory
4347 reference MEM. Each iteration of the loop will operate on LENGTH
4350 Create a new base register for use within the loop and point it to
4351 the start of MEM. Create a new memory reference that uses this
4352 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
4355 mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
4356 rtx *loop_reg, rtx *loop_mem)
4358 *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
4360 /* Although the new mem does not refer to a known location,
4361 it does keep up to LENGTH bytes of alignment. */
4362 *loop_mem = change_address (mem, BLKmode, *loop_reg);
4363 set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
4367 /* Move LENGTH bytes from SRC to DEST using a loop that moves MAX_MOVE_BYTES
4368 per iteration. LENGTH must be at least MAX_MOVE_BYTES. Assume that the
4369 memory regions do not overlap. */
4372 mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length)
4374 rtx label, src_reg, dest_reg, final_src;
4375 HOST_WIDE_INT leftover;
4377 leftover = length % MAX_MOVE_BYTES;
4380 /* Create registers and memory references for use within the loop. */
4381 mips_adjust_block_mem (src, MAX_MOVE_BYTES, &src_reg, &src);
4382 mips_adjust_block_mem (dest, MAX_MOVE_BYTES, &dest_reg, &dest);
4384 /* Calculate the value that SRC_REG should have after the last iteration
4386 final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
4389 /* Emit the start of the loop. */
4390 label = gen_label_rtx ();
4393 /* Emit the loop body. */
4394 mips_block_move_straight (dest, src, MAX_MOVE_BYTES);
4396 /* Move on to the next block. */
4397 mips_emit_move (src_reg, plus_constant (src_reg, MAX_MOVE_BYTES));
4398 mips_emit_move (dest_reg, plus_constant (dest_reg, MAX_MOVE_BYTES));
4400 /* Emit the loop condition. */
4401 if (Pmode == DImode)
4402 emit_insn (gen_cmpdi (src_reg, final_src));
4404 emit_insn (gen_cmpsi (src_reg, final_src));
4405 emit_jump_insn (gen_bne (label));
4407 /* Mop up any left-over bytes. */
4409 mips_block_move_straight (dest, src, leftover);
4413 /* Expand a loop of synci insns for the address range [BEGIN, END). */
4416 mips_expand_synci_loop (rtx begin, rtx end)
4418 rtx inc, label, cmp, cmp_result;
4420 /* Load INC with the cache line size (rdhwr INC,$1). */
4421 inc = gen_reg_rtx (SImode);
4422 emit_insn (gen_rdhwr (inc, const1_rtx));
4424 /* Loop back to here. */
4425 label = gen_label_rtx ();
4428 emit_insn (gen_synci (begin));
4430 cmp = gen_reg_rtx (Pmode);
4431 mips_emit_binary (GTU, cmp, begin, end);
4433 mips_emit_binary (PLUS, begin, begin, inc);
4435 cmp_result = gen_rtx_EQ (VOIDmode, cmp, const0_rtx);
4436 emit_jump_insn (gen_condjump (cmp_result, label));
4439 /* Expand a movmemsi instruction. */
4442 mips_expand_block_move (rtx dest, rtx src, rtx length)
4444 if (GET_CODE (length) == CONST_INT)
4446 if (INTVAL (length) <= 2 * MAX_MOVE_BYTES)
4448 mips_block_move_straight (dest, src, INTVAL (length));
4453 mips_block_move_loop (dest, src, INTVAL (length));
4460 /* Argument support functions. */
4462 /* Initialize CUMULATIVE_ARGS for a function. */
4465 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
4466 rtx libname ATTRIBUTE_UNUSED)
4468 static CUMULATIVE_ARGS zero_cum;
4469 tree param, next_param;
4472 cum->prototype = (fntype && TYPE_ARG_TYPES (fntype));
4474 /* Determine if this function has variable arguments. This is
4475 indicated by the last argument being 'void_type_mode' if there
4476 are no variable arguments. The standard MIPS calling sequence
4477 passes all arguments in the general purpose registers in this case. */
4479 for (param = fntype ? TYPE_ARG_TYPES (fntype) : 0;
4480 param != 0; param = next_param)
4482 next_param = TREE_CHAIN (param);
4483 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
4484 cum->gp_reg_found = 1;
4489 /* Fill INFO with information about a single argument. CUM is the
4490 cumulative state for earlier arguments. MODE is the mode of this
4491 argument and TYPE is its type (if known). NAMED is true if this
4492 is a named (fixed) argument rather than a variable one. */
4495 mips_arg_info (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
4496 tree type, int named, struct mips_arg_info *info)
4498 bool doubleword_aligned_p;
4499 unsigned int num_bytes, num_words, max_regs;
4501 /* Work out the size of the argument. */
4502 num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
4503 num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4505 /* Decide whether it should go in a floating-point register, assuming
4506 one is free. Later code checks for availability.
4508 The checks against UNITS_PER_FPVALUE handle the soft-float and
4509 single-float cases. */
4513 /* The EABI conventions have traditionally been defined in terms
4514 of TYPE_MODE, regardless of the actual type. */
4515 info->fpr_p = ((GET_MODE_CLASS (mode) == MODE_FLOAT
4516 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4517 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4522 /* Only leading floating-point scalars are passed in
4523 floating-point registers. We also handle vector floats the same
4524 say, which is OK because they are not covered by the standard ABI. */
4525 info->fpr_p = (!cum->gp_reg_found
4526 && cum->arg_number < 2
4527 && (type == 0 || SCALAR_FLOAT_TYPE_P (type)
4528 || VECTOR_FLOAT_TYPE_P (type))
4529 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4530 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4531 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4536 /* Scalar and complex floating-point types are passed in
4537 floating-point registers. */
4538 info->fpr_p = (named
4539 && (type == 0 || FLOAT_TYPE_P (type))
4540 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4541 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4542 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4543 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
4545 /* ??? According to the ABI documentation, the real and imaginary
4546 parts of complex floats should be passed in individual registers.
4547 The real and imaginary parts of stack arguments are supposed
4548 to be contiguous and there should be an extra word of padding
4551 This has two problems. First, it makes it impossible to use a
4552 single "void *" va_list type, since register and stack arguments
4553 are passed differently. (At the time of writing, MIPSpro cannot
4554 handle complex float varargs correctly.) Second, it's unclear
4555 what should happen when there is only one register free.
4557 For now, we assume that named complex floats should go into FPRs
4558 if there are two FPRs free, otherwise they should be passed in the
4559 same way as a struct containing two floats. */
4561 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4562 && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
4564 if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
4565 info->fpr_p = false;
4575 /* See whether the argument has doubleword alignment. */
4576 doubleword_aligned_p = FUNCTION_ARG_BOUNDARY (mode, type) > BITS_PER_WORD;
4578 /* Set REG_OFFSET to the register count we're interested in.
4579 The EABI allocates the floating-point registers separately,
4580 but the other ABIs allocate them like integer registers. */
4581 info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
4585 /* Advance to an even register if the argument is doubleword-aligned. */
4586 if (doubleword_aligned_p)
4587 info->reg_offset += info->reg_offset & 1;
4589 /* Work out the offset of a stack argument. */
4590 info->stack_offset = cum->stack_words;
4591 if (doubleword_aligned_p)
4592 info->stack_offset += info->stack_offset & 1;
4594 max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
4596 /* Partition the argument between registers and stack. */
4597 info->reg_words = MIN (num_words, max_regs);
4598 info->stack_words = num_words - info->reg_words;
4602 /* INFO describes an argument that is passed in a single-register value.
4603 Return the register it uses, assuming that FPRs are available if
4607 mips_arg_regno (const struct mips_arg_info *info, bool hard_float_p)
4609 if (!info->fpr_p || !hard_float_p)
4610 return GP_ARG_FIRST + info->reg_offset;
4611 else if (mips_abi == ABI_32 && TARGET_DOUBLE_FLOAT && info->reg_offset > 0)
4612 /* In o32, the second argument is always passed in $f14
4613 for TARGET_DOUBLE_FLOAT, regardless of whether the
4614 first argument was a word or doubleword. */
4615 return FP_ARG_FIRST + 2;
4617 return FP_ARG_FIRST + info->reg_offset;
4620 /* Implement FUNCTION_ARG_ADVANCE. */
4623 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4624 tree type, int named)
4626 struct mips_arg_info info;
4628 mips_arg_info (cum, mode, type, named, &info);
4631 cum->gp_reg_found = true;
4633 /* See the comment above the cumulative args structure in mips.h
4634 for an explanation of what this code does. It assumes the O32
4635 ABI, which passes at most 2 arguments in float registers. */
4636 if (cum->arg_number < 2 && info.fpr_p)
4637 cum->fp_code += (mode == SFmode ? 1 : 2) << (cum->arg_number * 2);
4639 if (mips_abi != ABI_EABI || !info.fpr_p)
4640 cum->num_gprs = info.reg_offset + info.reg_words;
4641 else if (info.reg_words > 0)
4642 cum->num_fprs += MAX_FPRS_PER_FMT;
4644 if (info.stack_words > 0)
4645 cum->stack_words = info.stack_offset + info.stack_words;
4650 /* Implement FUNCTION_ARG. */
4653 function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
4654 tree type, int named)
4656 struct mips_arg_info info;
4658 /* We will be called with a mode of VOIDmode after the last argument
4659 has been seen. Whatever we return will be passed to the call
4660 insn. If we need a mips16 fp_code, return a REG with the code
4661 stored as the mode. */
4662 if (mode == VOIDmode)
4664 if (TARGET_MIPS16 && cum->fp_code != 0)
4665 return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
4671 mips_arg_info (cum, mode, type, named, &info);
4673 /* Return straight away if the whole argument is passed on the stack. */
4674 if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
4678 && TREE_CODE (type) == RECORD_TYPE
4680 && TYPE_SIZE_UNIT (type)
4681 && host_integerp (TYPE_SIZE_UNIT (type), 1)
4684 /* The Irix 6 n32/n64 ABIs say that if any 64-bit chunk of the
4685 structure contains a double in its entirety, then that 64-bit
4686 chunk is passed in a floating point register. */
4689 /* First check to see if there is any such field. */
4690 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4691 if (TREE_CODE (field) == FIELD_DECL
4692 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
4693 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
4694 && host_integerp (bit_position (field), 0)
4695 && int_bit_position (field) % BITS_PER_WORD == 0)
4700 /* Now handle the special case by returning a PARALLEL
4701 indicating where each 64-bit chunk goes. INFO.REG_WORDS
4702 chunks are passed in registers. */
4704 HOST_WIDE_INT bitpos;
4707 /* assign_parms checks the mode of ENTRY_PARM, so we must
4708 use the actual mode here. */
4709 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
4712 field = TYPE_FIELDS (type);
4713 for (i = 0; i < info.reg_words; i++)
4717 for (; field; field = TREE_CHAIN (field))
4718 if (TREE_CODE (field) == FIELD_DECL
4719 && int_bit_position (field) >= bitpos)
4723 && int_bit_position (field) == bitpos
4724 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
4725 && !TARGET_SOFT_FLOAT
4726 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
4727 reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
4729 reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
4732 = gen_rtx_EXPR_LIST (VOIDmode, reg,
4733 GEN_INT (bitpos / BITS_PER_UNIT));
4735 bitpos += BITS_PER_WORD;
4741 /* Handle the n32/n64 conventions for passing complex floating-point
4742 arguments in FPR pairs. The real part goes in the lower register
4743 and the imaginary part goes in the upper register. */
4746 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4749 enum machine_mode inner;
4752 inner = GET_MODE_INNER (mode);
4753 reg = FP_ARG_FIRST + info.reg_offset;
4754 if (info.reg_words * UNITS_PER_WORD == GET_MODE_SIZE (inner))
4756 /* Real part in registers, imaginary part on stack. */
4757 gcc_assert (info.stack_words == info.reg_words);
4758 return gen_rtx_REG (inner, reg);
4762 gcc_assert (info.stack_words == 0);
4763 real = gen_rtx_EXPR_LIST (VOIDmode,
4764 gen_rtx_REG (inner, reg),
4766 imag = gen_rtx_EXPR_LIST (VOIDmode,
4768 reg + info.reg_words / 2),
4769 GEN_INT (GET_MODE_SIZE (inner)));
4770 return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
4774 return gen_rtx_REG (mode, mips_arg_regno (&info, TARGET_HARD_FLOAT));
4778 /* Implement TARGET_ARG_PARTIAL_BYTES. */
4781 mips_arg_partial_bytes (CUMULATIVE_ARGS *cum,
4782 enum machine_mode mode, tree type, bool named)
4784 struct mips_arg_info info;
4786 mips_arg_info (cum, mode, type, named, &info);
4787 return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
4791 /* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
4792 PARM_BOUNDARY bits of alignment, but will be given anything up
4793 to STACK_BOUNDARY bits if the type requires it. */
4796 function_arg_boundary (enum machine_mode mode, tree type)
4798 unsigned int alignment;
4800 alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
4801 if (alignment < PARM_BOUNDARY)
4802 alignment = PARM_BOUNDARY;
4803 if (alignment > STACK_BOUNDARY)
4804 alignment = STACK_BOUNDARY;
4808 /* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
4809 upward rather than downward. In other words, return true if the
4810 first byte of the stack slot has useful data, false if the last
4814 mips_pad_arg_upward (enum machine_mode mode, const_tree type)
4816 /* On little-endian targets, the first byte of every stack argument
4817 is passed in the first byte of the stack slot. */
4818 if (!BYTES_BIG_ENDIAN)
4821 /* Otherwise, integral types are padded downward: the last byte of a
4822 stack argument is passed in the last byte of the stack slot. */
4824 ? (INTEGRAL_TYPE_P (type)
4825 || POINTER_TYPE_P (type)
4826 || FIXED_POINT_TYPE_P (type))
4827 : (GET_MODE_CLASS (mode) == MODE_INT
4828 || ALL_SCALAR_FIXED_POINT_MODE_P (mode)))
4831 /* Big-endian o64 pads floating-point arguments downward. */
4832 if (mips_abi == ABI_O64)
4833 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4836 /* Other types are padded upward for o32, o64, n32 and n64. */
4837 if (mips_abi != ABI_EABI)
4840 /* Arguments smaller than a stack slot are padded downward. */
4841 if (mode != BLKmode)
4842 return (GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY);
4844 return (int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT));
4848 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
4849 if the least significant byte of the register has useful data. Return
4850 the opposite if the most significant byte does. */
4853 mips_pad_reg_upward (enum machine_mode mode, tree type)
4855 /* No shifting is required for floating-point arguments. */
4856 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4857 return !BYTES_BIG_ENDIAN;
4859 /* Otherwise, apply the same padding to register arguments as we do
4860 to stack arguments. */
4861 return mips_pad_arg_upward (mode, type);
4865 mips_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4866 tree type, int *pretend_size ATTRIBUTE_UNUSED,
4869 CUMULATIVE_ARGS local_cum;
4870 int gp_saved, fp_saved;
4872 /* The caller has advanced CUM up to, but not beyond, the last named
4873 argument. Advance a local copy of CUM past the last "real" named
4874 argument, to find out how many registers are left over. */
4877 FUNCTION_ARG_ADVANCE (local_cum, mode, type, 1);
4879 /* Found out how many registers we need to save. */
4880 gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
4881 fp_saved = (EABI_FLOAT_VARARGS_P
4882 ? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
4891 ptr = plus_constant (virtual_incoming_args_rtx,
4892 REG_PARM_STACK_SPACE (cfun->decl)
4893 - gp_saved * UNITS_PER_WORD);
4894 mem = gen_rtx_MEM (BLKmode, ptr);
4895 set_mem_alias_set (mem, get_varargs_alias_set ());
4897 move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
4902 /* We can't use move_block_from_reg, because it will use
4904 enum machine_mode mode;
4907 /* Set OFF to the offset from virtual_incoming_args_rtx of
4908 the first float register. The FP save area lies below
4909 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
4910 off = -gp_saved * UNITS_PER_WORD;
4911 off &= ~(UNITS_PER_FPVALUE - 1);
4912 off -= fp_saved * UNITS_PER_FPREG;
4914 mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
4916 for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS;
4917 i += MAX_FPRS_PER_FMT)
4921 ptr = plus_constant (virtual_incoming_args_rtx, off);
4922 mem = gen_rtx_MEM (mode, ptr);
4923 set_mem_alias_set (mem, get_varargs_alias_set ());
4924 mips_emit_move (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
4925 off += UNITS_PER_HWFPVALUE;
4929 if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
4930 cfun->machine->varargs_size = (gp_saved * UNITS_PER_WORD
4931 + fp_saved * UNITS_PER_FPREG);
4934 /* Create the va_list data type.
4935 We keep 3 pointers, and two offsets.
4936 Two pointers are to the overflow area, which starts at the CFA.
4937 One of these is constant, for addressing into the GPR save area below it.
4938 The other is advanced up the stack through the overflow region.
4939 The third pointer is to the GPR save area. Since the FPR save area
4940 is just below it, we can address FPR slots off this pointer.
4941 We also keep two one-byte offsets, which are to be subtracted from the
4942 constant pointers to yield addresses in the GPR and FPR save areas.
4943 These are downcounted as float or non-float arguments are used,
4944 and when they get to zero, the argument must be obtained from the
4946 If !EABI_FLOAT_VARARGS_P, then no FPR save area exists, and a single
4947 pointer is enough. It's started at the GPR save area, and is
4949 Note that the GPR save area is not constant size, due to optimization
4950 in the prologue. Hence, we can't use a design with two pointers
4951 and two offsets, although we could have designed this with two pointers
4952 and three offsets. */
4955 mips_build_builtin_va_list (void)
4957 if (EABI_FLOAT_VARARGS_P)
4959 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
4962 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
4964 f_ovfl = build_decl (FIELD_DECL, get_identifier ("__overflow_argptr"),
4966 f_gtop = build_decl (FIELD_DECL, get_identifier ("__gpr_top"),
4968 f_ftop = build_decl (FIELD_DECL, get_identifier ("__fpr_top"),
4970 f_goff = build_decl (FIELD_DECL, get_identifier ("__gpr_offset"),
4971 unsigned_char_type_node);
4972 f_foff = build_decl (FIELD_DECL, get_identifier ("__fpr_offset"),
4973 unsigned_char_type_node);
4974 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
4975 warn on every user file. */
4976 index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1);
4977 array = build_array_type (unsigned_char_type_node,
4978 build_index_type (index));
4979 f_res = build_decl (FIELD_DECL, get_identifier ("__reserved"), array);
4981 DECL_FIELD_CONTEXT (f_ovfl) = record;
4982 DECL_FIELD_CONTEXT (f_gtop) = record;
4983 DECL_FIELD_CONTEXT (f_ftop) = record;
4984 DECL_FIELD_CONTEXT (f_goff) = record;
4985 DECL_FIELD_CONTEXT (f_foff) = record;
4986 DECL_FIELD_CONTEXT (f_res) = record;
4988 TYPE_FIELDS (record) = f_ovfl;
4989 TREE_CHAIN (f_ovfl) = f_gtop;
4990 TREE_CHAIN (f_gtop) = f_ftop;
4991 TREE_CHAIN (f_ftop) = f_goff;
4992 TREE_CHAIN (f_goff) = f_foff;
4993 TREE_CHAIN (f_foff) = f_res;
4995 layout_type (record);
4998 else if (TARGET_IRIX && TARGET_IRIX6)
4999 /* On IRIX 6, this type is 'char *'. */
5000 return build_pointer_type (char_type_node);
5002 /* Otherwise, we use 'void *'. */
5003 return ptr_type_node;
5006 /* Implement va_start. */
5009 mips_va_start (tree valist, rtx nextarg)
5011 if (EABI_FLOAT_VARARGS_P)
5013 const CUMULATIVE_ARGS *cum;
5014 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
5015 tree ovfl, gtop, ftop, goff, foff;
5017 int gpr_save_area_size;
5018 int fpr_save_area_size;
5021 cum = ¤t_function_args_info;
5023 = (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
5025 = (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
5027 f_ovfl = TYPE_FIELDS (va_list_type_node);
5028 f_gtop = TREE_CHAIN (f_ovfl);
5029 f_ftop = TREE_CHAIN (f_gtop);
5030 f_goff = TREE_CHAIN (f_ftop);
5031 f_foff = TREE_CHAIN (f_goff);
5033 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
5035 gtop = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
5037 ftop = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
5039 goff = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
5041 foff = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
5044 /* Emit code to initialize OVFL, which points to the next varargs
5045 stack argument. CUM->STACK_WORDS gives the number of stack
5046 words used by named arguments. */
5047 t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
5048 if (cum->stack_words > 0)
5049 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), t,
5050 size_int (cum->stack_words * UNITS_PER_WORD));
5051 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
5052 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5054 /* Emit code to initialize GTOP, the top of the GPR save area. */
5055 t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
5056 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (gtop), gtop, t);
5057 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5059 /* Emit code to initialize FTOP, the top of the FPR save area.
5060 This address is gpr_save_area_bytes below GTOP, rounded
5061 down to the next fp-aligned boundary. */
5062 t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
5063 fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
5064 fpr_offset &= ~(UNITS_PER_FPVALUE - 1);
5066 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ftop), t,
5067 size_int (-fpr_offset));
5068 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ftop), ftop, t);
5069 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5071 /* Emit code to initialize GOFF, the offset from GTOP of the
5072 next GPR argument. */
5073 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (goff), goff,
5074 build_int_cst (NULL_TREE, gpr_save_area_size));
5075 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5077 /* Likewise emit code to initialize FOFF, the offset from FTOP
5078 of the next FPR argument. */
5079 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (foff), foff,
5080 build_int_cst (NULL_TREE, fpr_save_area_size));
5081 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5085 nextarg = plus_constant (nextarg, -cfun->machine->varargs_size);
5086 std_expand_builtin_va_start (valist, nextarg);
5090 /* Implement va_arg. */
5093 mips_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
5095 HOST_WIDE_INT size, rsize;
5099 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
5102 type = build_pointer_type (type);
5104 size = int_size_in_bytes (type);
5105 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
5107 if (mips_abi != ABI_EABI || !EABI_FLOAT_VARARGS_P)
5108 addr = std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
5111 /* Not a simple merged stack. */
5113 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
5114 tree ovfl, top, off, align;
5115 HOST_WIDE_INT osize;
5118 f_ovfl = TYPE_FIELDS (va_list_type_node);
5119 f_gtop = TREE_CHAIN (f_ovfl);
5120 f_ftop = TREE_CHAIN (f_gtop);
5121 f_goff = TREE_CHAIN (f_ftop);
5122 f_foff = TREE_CHAIN (f_goff);
5124 /* We maintain separate pointers and offsets for floating-point
5125 and integer arguments, but we need similar code in both cases.
5128 TOP be the top of the register save area;
5129 OFF be the offset from TOP of the next register;
5130 ADDR_RTX be the address of the argument;
5131 RSIZE be the number of bytes used to store the argument
5132 when it's in the register save area;
5133 OSIZE be the number of bytes used to store it when it's
5134 in the stack overflow area; and
5135 PADDING be (BYTES_BIG_ENDIAN ? OSIZE - RSIZE : 0)
5137 The code we want is:
5139 1: off &= -rsize; // round down
5142 4: addr_rtx = top - off;
5147 9: ovfl += ((intptr_t) ovfl + osize - 1) & -osize;
5148 10: addr_rtx = ovfl + PADDING;
5152 [1] and [9] can sometimes be optimized away. */
5154 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
5157 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
5158 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
5160 top = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
5162 off = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
5165 /* When floating-point registers are saved to the stack,
5166 each one will take up UNITS_PER_HWFPVALUE bytes, regardless
5167 of the float's precision. */
5168 rsize = UNITS_PER_HWFPVALUE;
5170 /* Overflow arguments are padded to UNITS_PER_WORD bytes
5171 (= PARM_BOUNDARY bits). This can be different from RSIZE
5174 (1) On 32-bit targets when TYPE is a structure such as:
5176 struct s { float f; };
5178 Such structures are passed in paired FPRs, so RSIZE
5179 will be 8 bytes. However, the structure only takes
5180 up 4 bytes of memory, so OSIZE will only be 4.
5182 (2) In combinations such as -mgp64 -msingle-float
5183 -fshort-double. Doubles passed in registers
5184 will then take up 4 (UNITS_PER_HWFPVALUE) bytes,
5185 but those passed on the stack take up
5186 UNITS_PER_WORD bytes. */
5187 osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
5191 top = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
5193 off = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
5195 if (rsize > UNITS_PER_WORD)
5197 /* [1] Emit code for: off &= -rsize. */
5198 t = build2 (BIT_AND_EXPR, TREE_TYPE (off), off,
5199 build_int_cst (NULL_TREE, -rsize));
5200 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (off), off, t);
5201 gimplify_and_add (t, pre_p);
5206 /* [2] Emit code to branch if off == 0. */
5207 t = build2 (NE_EXPR, boolean_type_node, off,
5208 build_int_cst (TREE_TYPE (off), 0));
5209 addr = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
5211 /* [5] Emit code for: off -= rsize. We do this as a form of
5212 post-increment not available to C. Also widen for the
5213 coming pointer arithmetic. */
5214 t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize));
5215 t = build2 (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
5216 t = fold_convert (sizetype, t);
5217 t = fold_build1 (NEGATE_EXPR, sizetype, t);
5219 /* [4] Emit code for: addr_rtx = top - off. On big endian machines,
5220 the argument has RSIZE - SIZE bytes of leading padding. */
5221 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (top), top, t);
5222 if (BYTES_BIG_ENDIAN && rsize > size)
5224 u = size_int (rsize - size);
5225 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
5227 COND_EXPR_THEN (addr) = t;
5229 if (osize > UNITS_PER_WORD)
5231 /* [9] Emit: ovfl += ((intptr_t) ovfl + osize - 1) & -osize. */
5232 u = size_int (osize - 1);
5233 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), ovfl, u);
5234 t = fold_convert (sizetype, t);
5235 u = size_int (-osize);
5236 t = build2 (BIT_AND_EXPR, sizetype, t, u);
5237 t = fold_convert (TREE_TYPE (ovfl), t);
5238 align = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
5243 /* [10, 11]. Emit code to store ovfl in addr_rtx, then
5244 post-increment ovfl by osize. On big-endian machines,
5245 the argument has OSIZE - SIZE bytes of leading padding. */
5246 u = fold_convert (TREE_TYPE (ovfl),
5247 build_int_cst (NULL_TREE, osize));
5248 t = build2 (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
5249 if (BYTES_BIG_ENDIAN && osize > size)
5251 u = size_int (osize - size);
5252 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
5255 /* String [9] and [10,11] together. */
5257 t = build2 (COMPOUND_EXPR, TREE_TYPE (t), align, t);
5258 COND_EXPR_ELSE (addr) = t;
5260 addr = fold_convert (build_pointer_type (type), addr);
5261 addr = build_va_arg_indirect_ref (addr);
5265 addr = build_va_arg_indirect_ref (addr);
5270 /* Return true if it is possible to use left/right accesses for a
5271 bitfield of WIDTH bits starting BITPOS bits into *OP. When
5272 returning true, update *OP, *LEFT and *RIGHT as follows:
5274 *OP is a BLKmode reference to the whole field.
5276 *LEFT is a QImode reference to the first byte if big endian or
5277 the last byte if little endian. This address can be used in the
5278 left-side instructions (lwl, swl, ldl, sdl).
5280 *RIGHT is a QImode reference to the opposite end of the field and
5281 can be used in the patterning right-side instruction. */
5284 mips_get_unaligned_mem (rtx *op, unsigned int width, int bitpos,
5285 rtx *left, rtx *right)
5289 /* Check that the operand really is a MEM. Not all the extv and
5290 extzv predicates are checked. */
5294 /* Check that the size is valid. */
5295 if (width != 32 && (!TARGET_64BIT || width != 64))
5298 /* We can only access byte-aligned values. Since we are always passed
5299 a reference to the first byte of the field, it is not necessary to
5300 do anything with BITPOS after this check. */
5301 if (bitpos % BITS_PER_UNIT != 0)
5304 /* Reject aligned bitfields: we want to use a normal load or store
5305 instead of a left/right pair. */
5306 if (MEM_ALIGN (*op) >= width)
5309 /* Adjust *OP to refer to the whole field. This also has the effect
5310 of legitimizing *OP's address for BLKmode, possibly simplifying it. */
5311 *op = adjust_address (*op, BLKmode, 0);
5312 set_mem_size (*op, GEN_INT (width / BITS_PER_UNIT));
5314 /* Get references to both ends of the field. We deliberately don't
5315 use the original QImode *OP for FIRST since the new BLKmode one
5316 might have a simpler address. */
5317 first = adjust_address (*op, QImode, 0);
5318 last = adjust_address (*op, QImode, width / BITS_PER_UNIT - 1);
5320 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
5321 be the upper word and RIGHT the lower word. */
5322 if (TARGET_BIG_ENDIAN)
5323 *left = first, *right = last;
5325 *left = last, *right = first;
5331 /* Try to emit the equivalent of (set DEST (zero_extract SRC WIDTH BITPOS)).
5332 Return true on success. We only handle cases where zero_extract is
5333 equivalent to sign_extract. */
5336 mips_expand_unaligned_load (rtx dest, rtx src, unsigned int width, int bitpos)
5338 rtx left, right, temp;
5340 /* If TARGET_64BIT, the destination of a 32-bit load will be a
5341 paradoxical word_mode subreg. This is the only case in which
5342 we allow the destination to be larger than the source. */
5343 if (GET_CODE (dest) == SUBREG
5344 && GET_MODE (dest) == DImode
5345 && SUBREG_BYTE (dest) == 0
5346 && GET_MODE (SUBREG_REG (dest)) == SImode)
5347 dest = SUBREG_REG (dest);
5349 /* After the above adjustment, the destination must be the same
5350 width as the source. */
5351 if (GET_MODE_BITSIZE (GET_MODE (dest)) != width)
5354 if (!mips_get_unaligned_mem (&src, width, bitpos, &left, &right))
5357 temp = gen_reg_rtx (GET_MODE (dest));
5358 if (GET_MODE (dest) == DImode)
5360 emit_insn (gen_mov_ldl (temp, src, left));
5361 emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
5365 emit_insn (gen_mov_lwl (temp, src, left));
5366 emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
5372 /* Try to expand (set (zero_extract DEST WIDTH BITPOS) SRC). Return
5376 mips_expand_unaligned_store (rtx dest, rtx src, unsigned int width, int bitpos)
5379 enum machine_mode mode;
5381 if (!mips_get_unaligned_mem (&dest, width, bitpos, &left, &right))
5384 mode = mode_for_size (width, MODE_INT, 0);
5385 src = gen_lowpart (mode, src);
5389 emit_insn (gen_mov_sdl (dest, src, left));
5390 emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
5394 emit_insn (gen_mov_swl (dest, src, left));
5395 emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
5400 /* Return true if X is a MEM with the same size as MODE. */
5403 mips_mem_fits_mode_p (enum machine_mode mode, rtx x)
5410 size = MEM_SIZE (x);
5411 return size && INTVAL (size) == GET_MODE_SIZE (mode);
5414 /* Return true if (zero_extract OP SIZE POSITION) can be used as the
5415 source of an "ext" instruction or the destination of an "ins"
5416 instruction. OP must be a register operand and the following
5417 conditions must hold:
5419 0 <= POSITION < GET_MODE_BITSIZE (GET_MODE (op))
5420 0 < SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
5421 0 < POSITION + SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
5423 Also reject lengths equal to a word as they are better handled
5424 by the move patterns. */
5427 mips_use_ins_ext_p (rtx op, rtx size, rtx position)
5429 HOST_WIDE_INT len, pos;
5431 if (!ISA_HAS_EXT_INS
5432 || !register_operand (op, VOIDmode)
5433 || GET_MODE_BITSIZE (GET_MODE (op)) > BITS_PER_WORD)
5436 len = INTVAL (size);
5437 pos = INTVAL (position);
5439 if (len <= 0 || len >= GET_MODE_BITSIZE (GET_MODE (op))
5440 || pos < 0 || pos + len > GET_MODE_BITSIZE (GET_MODE (op)))
5446 /* Set up globals to generate code for the ISA or processor
5447 described by INFO. */
5450 mips_set_architecture (const struct mips_cpu_info *info)
5454 mips_arch_info = info;
5455 mips_arch = info->cpu;
5456 mips_isa = info->isa;
5461 /* Likewise for tuning. */
5464 mips_set_tune (const struct mips_cpu_info *info)
5468 mips_tune_info = info;
5469 mips_tune = info->cpu;
5473 /* Initialize mips_split_addresses from the associated command-line
5476 mips_split_addresses is a half-way house between explicit
5477 relocations and the traditional assembler macros. It can
5478 split absolute 32-bit symbolic constants into a high/lo_sum
5479 pair but uses macros for other sorts of access.
5481 Like explicit relocation support for REL targets, it relies
5482 on GNU extensions in the assembler and the linker.
5484 Although this code should work for -O0, it has traditionally
5485 been treated as an optimization. */
5488 mips_init_split_addresses (void)
5490 if (!TARGET_MIPS16 && TARGET_SPLIT_ADDRESSES
5491 && optimize && !flag_pic
5492 && !ABI_HAS_64BIT_SYMBOLS)
5493 mips_split_addresses = 1;
5495 mips_split_addresses = 0;
5498 /* (Re-)Initialize information about relocs. */
5501 mips_init_relocs (void)
5503 memset (mips_split_p, '\0', sizeof (mips_split_p));
5504 memset (mips_hi_relocs, '\0', sizeof (mips_hi_relocs));
5505 memset (mips_lo_relocs, '\0', sizeof (mips_lo_relocs));
5507 if (ABI_HAS_64BIT_SYMBOLS)
5509 if (TARGET_EXPLICIT_RELOCS)
5511 mips_split_p[SYMBOL_64_HIGH] = true;
5512 mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
5513 mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
5515 mips_split_p[SYMBOL_64_MID] = true;
5516 mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
5517 mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
5519 mips_split_p[SYMBOL_64_LOW] = true;
5520 mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
5521 mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
5523 mips_split_p[SYMBOL_ABSOLUTE] = true;
5524 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
5529 if (TARGET_EXPLICIT_RELOCS || mips_split_addresses || TARGET_MIPS16)
5531 mips_split_p[SYMBOL_ABSOLUTE] = true;
5532 mips_hi_relocs[SYMBOL_ABSOLUTE] = "%hi(";
5533 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
5535 mips_lo_relocs[SYMBOL_32_HIGH] = "%hi(";
5541 /* The high part is provided by a pseudo copy of $gp. */
5542 mips_split_p[SYMBOL_GP_RELATIVE] = true;
5543 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gprel(";
5546 if (TARGET_EXPLICIT_RELOCS)
5548 /* Small data constants are kept whole until after reload,
5549 then lowered by mips_rewrite_small_data. */
5550 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gp_rel(";
5552 mips_split_p[SYMBOL_GOT_PAGE_OFST] = true;
5555 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
5556 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%got_ofst(";
5560 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
5561 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%lo(";
5566 /* The HIGH and LO_SUM are matched by special .md patterns. */
5567 mips_split_p[SYMBOL_GOT_DISP] = true;
5569 mips_split_p[SYMBOL_GOTOFF_DISP] = true;
5570 mips_hi_relocs[SYMBOL_GOTOFF_DISP] = "%got_hi(";
5571 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_lo(";
5573 mips_split_p[SYMBOL_GOTOFF_CALL] = true;
5574 mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
5575 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
5580 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_disp(";
5582 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got(";
5583 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
5589 mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
5590 mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
5591 mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
5594 /* Thread-local relocation operators. */
5595 mips_lo_relocs[SYMBOL_TLSGD] = "%tlsgd(";
5596 mips_lo_relocs[SYMBOL_TLSLDM] = "%tlsldm(";
5597 mips_split_p[SYMBOL_DTPREL] = 1;
5598 mips_hi_relocs[SYMBOL_DTPREL] = "%dtprel_hi(";
5599 mips_lo_relocs[SYMBOL_DTPREL] = "%dtprel_lo(";
5600 mips_lo_relocs[SYMBOL_GOTTPREL] = "%gottprel(";
5601 mips_split_p[SYMBOL_TPREL] = 1;
5602 mips_hi_relocs[SYMBOL_TPREL] = "%tprel_hi(";
5603 mips_lo_relocs[SYMBOL_TPREL] = "%tprel_lo(";
5605 mips_lo_relocs[SYMBOL_HALF] = "%half(";
5608 static GTY(()) int was_mips16_p = -1;
5610 /* Set up the target-dependent global state so that it matches the
5611 current function's ISA mode. */
5614 mips_set_mips16_mode (int mips16_p)
5616 if (mips16_p == was_mips16_p)
5619 /* Restore base settings of various flags. */
5620 target_flags = mips_base_target_flags;
5621 align_loops = mips_base_align_loops;
5622 align_jumps = mips_base_align_jumps;
5623 align_functions = mips_base_align_functions;
5624 flag_schedule_insns = mips_base_schedule_insns;
5625 flag_reorder_blocks_and_partition = mips_base_reorder_blocks_and_partition;
5626 flag_move_loop_invariants = mips_base_move_loop_invariants;
5627 flag_delayed_branch = mips_flag_delayed_branch;
5631 /* Select mips16 instruction set. */
5632 target_flags |= MASK_MIPS16;
5634 /* Don't run the scheduler before reload, since it tends to
5635 increase register pressure. */
5636 flag_schedule_insns = 0;
5638 /* Don't do hot/cold partitioning. The constant layout code expects
5639 the whole function to be in a single section. */
5640 flag_reorder_blocks_and_partition = 0;
5642 /* Don't move loop invariants, because it tends to increase
5643 register pressure. It also introduces an extra move in cases
5644 where the constant is the first operand in a two-operand binary
5645 instruction, or when it forms a register argument to a functon
5647 flag_move_loop_invariants = 0;
5649 /* Silently disable -mexplicit-relocs since it doesn't apply
5650 to mips16 code. Even so, it would overly pedantic to warn
5651 about "-mips16 -mexplicit-relocs", especially given that
5652 we use a %gprel() operator. */
5653 target_flags &= ~MASK_EXPLICIT_RELOCS;
5655 /* Silently disable DSP extensions. */
5656 target_flags &= ~MASK_DSP;
5657 target_flags &= ~MASK_DSPR2;
5661 /* Reset to select base non-mips16 ISA. */
5662 target_flags &= ~MASK_MIPS16;
5664 /* When using explicit relocs, we call dbr_schedule from within
5666 if (TARGET_EXPLICIT_RELOCS)
5667 flag_delayed_branch = 0;
5669 /* Provide default values for align_* for 64-bit targets. */
5672 if (align_loops == 0)
5674 if (align_jumps == 0)
5676 if (align_functions == 0)
5677 align_functions = 8;
5681 /* (Re)initialize mips target internals for new ISA. */
5682 mips_init_split_addresses ();
5683 mips_init_relocs ();
5685 if (was_mips16_p >= 0)
5686 /* Reinitialize target-dependent state. */
5689 was_mips16_p = TARGET_MIPS16;
5692 /* Implement TARGET_SET_CURRENT_FUNCTION. Decide whether the current
5693 function should use the MIPS16 ISA and switch modes accordingly. */
5696 mips_set_current_function (tree fndecl ATTRIBUTE_UNUSED)
5700 /* Implement TARGET_HANDLE_OPTION. */
5703 mips_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
5708 if (strcmp (arg, "32") == 0)
5710 else if (strcmp (arg, "o64") == 0)
5712 else if (strcmp (arg, "n32") == 0)
5714 else if (strcmp (arg, "64") == 0)
5716 else if (strcmp (arg, "eabi") == 0)
5717 mips_abi = ABI_EABI;
5724 return mips_parse_cpu (arg) != 0;
5727 mips_isa_info = mips_parse_cpu (ACONCAT (("mips", arg, NULL)));
5728 return mips_isa_info != 0;
5730 case OPT_mno_flush_func:
5731 mips_cache_flush_func = NULL;
5734 case OPT_mcode_readable_:
5735 if (strcmp (arg, "yes") == 0)
5736 mips_code_readable = CODE_READABLE_YES;
5737 else if (strcmp (arg, "pcrel") == 0)
5738 mips_code_readable = CODE_READABLE_PCREL;
5739 else if (strcmp (arg, "no") == 0)
5740 mips_code_readable = CODE_READABLE_NO;
5750 /* Set up the threshold for data to go into the small data area, instead
5751 of the normal data area, and detect any conflicts in the switches. */
5754 override_options (void)
5756 int i, start, regno;
5757 enum machine_mode mode;
5759 #ifdef SUBTARGET_OVERRIDE_OPTIONS
5760 SUBTARGET_OVERRIDE_OPTIONS;
5763 mips_section_threshold = g_switch_set ? g_switch_value : MIPS_DEFAULT_GVALUE;
5765 /* The following code determines the architecture and register size.
5766 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
5767 The GAS and GCC code should be kept in sync as much as possible. */
5769 if (mips_arch_string != 0)
5770 mips_set_architecture (mips_parse_cpu (mips_arch_string));
5772 if (mips_isa_info != 0)
5774 if (mips_arch_info == 0)
5775 mips_set_architecture (mips_isa_info);
5776 else if (mips_arch_info->isa != mips_isa_info->isa)
5777 error ("-%s conflicts with the other architecture options, "
5778 "which specify a %s processor",
5779 mips_isa_info->name,
5780 mips_cpu_info_from_isa (mips_arch_info->isa)->name);
5783 if (mips_arch_info == 0)
5785 #ifdef MIPS_CPU_STRING_DEFAULT
5786 mips_set_architecture (mips_parse_cpu (MIPS_CPU_STRING_DEFAULT));
5788 mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT));
5792 if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
5793 error ("-march=%s is not compatible with the selected ABI",
5794 mips_arch_info->name);
5796 /* Optimize for mips_arch, unless -mtune selects a different processor. */
5797 if (mips_tune_string != 0)
5798 mips_set_tune (mips_parse_cpu (mips_tune_string));
5800 if (mips_tune_info == 0)
5801 mips_set_tune (mips_arch_info);
5803 /* Set cost structure for the processor. */
5805 mips_cost = &mips_rtx_cost_optimize_size;
5807 mips_cost = &mips_rtx_cost_data[mips_tune];
5809 /* If the user hasn't specified a branch cost, use the processor's
5811 if (mips_branch_cost == 0)
5812 mips_branch_cost = mips_cost->branch_cost;
5814 if ((target_flags_explicit & MASK_64BIT) != 0)
5816 /* The user specified the size of the integer registers. Make sure
5817 it agrees with the ABI and ISA. */
5818 if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
5819 error ("-mgp64 used with a 32-bit processor");
5820 else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
5821 error ("-mgp32 used with a 64-bit ABI");
5822 else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
5823 error ("-mgp64 used with a 32-bit ABI");
5827 /* Infer the integer register size from the ABI and processor.
5828 Restrict ourselves to 32-bit registers if that's all the
5829 processor has, or if the ABI cannot handle 64-bit registers. */
5830 if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
5831 target_flags &= ~MASK_64BIT;
5833 target_flags |= MASK_64BIT;
5836 if ((target_flags_explicit & MASK_FLOAT64) != 0)
5838 /* Really, -mfp32 and -mfp64 are ornamental options. There's
5839 only one right answer here. */
5840 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
5841 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
5842 else if (!TARGET_64BIT && TARGET_FLOAT64
5843 && !(ISA_HAS_MXHC1 && mips_abi == ABI_32))
5844 error ("-mgp32 and -mfp64 can only be combined if the target"
5845 " supports the mfhc1 and mthc1 instructions");
5846 else if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
5847 error ("unsupported combination: %s", "-mfp64 -msingle-float");
5851 /* -msingle-float selects 32-bit float registers. Otherwise the
5852 float registers should be the same size as the integer ones. */
5853 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
5854 target_flags |= MASK_FLOAT64;
5856 target_flags &= ~MASK_FLOAT64;
5859 /* End of code shared with GAS. */
5861 if ((target_flags_explicit & MASK_LONG64) == 0)
5863 if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
5864 target_flags |= MASK_LONG64;
5866 target_flags &= ~MASK_LONG64;
5870 flag_pcc_struct_return = 0;
5872 if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
5874 /* If neither -mbranch-likely nor -mno-branch-likely was given
5875 on the command line, set MASK_BRANCHLIKELY based on the target
5878 By default, we enable use of Branch Likely instructions on
5879 all architectures which support them with the following
5880 exceptions: when creating MIPS32 or MIPS64 code, and when
5881 tuning for architectures where their use tends to hurt
5884 The MIPS32 and MIPS64 architecture specifications say "Software
5885 is strongly encouraged to avoid use of Branch Likely
5886 instructions, as they will be removed from a future revision
5887 of the [MIPS32 and MIPS64] architecture." Therefore, we do not
5888 issue those instructions unless instructed to do so by
5890 if (ISA_HAS_BRANCHLIKELY
5891 && !(ISA_MIPS32 || ISA_MIPS32R2 || ISA_MIPS64)
5892 && !(TUNE_MIPS5500 || TUNE_SB1))
5893 target_flags |= MASK_BRANCHLIKELY;
5895 target_flags &= ~MASK_BRANCHLIKELY;
5897 if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
5898 warning (0, "generation of Branch Likely instructions enabled, but not supported by architecture");
5900 /* The effect of -mabicalls isn't defined for the EABI. */
5901 if (mips_abi == ABI_EABI && TARGET_ABICALLS)
5903 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
5904 target_flags &= ~MASK_ABICALLS;
5907 /* MIPS16 cannot generate PIC yet. */
5908 if (TARGET_MIPS16 && (flag_pic || TARGET_ABICALLS))
5910 sorry ("MIPS16 PIC");
5911 target_flags &= ~MASK_ABICALLS;
5912 flag_pic = flag_pie = flag_shlib = 0;
5915 if (TARGET_ABICALLS)
5916 /* We need to set flag_pic for executables as well as DSOs
5917 because we may reference symbols that are not defined in
5918 the final executable. (MIPS does not use things like
5919 copy relocs, for example.)
5921 Also, there is a body of code that uses __PIC__ to distinguish
5922 between -mabicalls and -mno-abicalls code. */
5925 /* -mvr4130-align is a "speed over size" optimization: it usually produces
5926 faster code, but at the expense of more nops. Enable it at -O3 and
5928 if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
5929 target_flags |= MASK_VR4130_ALIGN;
5931 /* Prefer a call to memcpy over inline code when optimizing for size,
5932 though see MOVE_RATIO in mips.h. */
5933 if (optimize_size && (target_flags_explicit & MASK_MEMCPY) == 0)
5934 target_flags |= MASK_MEMCPY;
5936 /* If we have a nonzero small-data limit, check that the -mgpopt
5937 setting is consistent with the other target flags. */
5938 if (mips_section_threshold > 0)
5942 if (!TARGET_MIPS16 && !TARGET_EXPLICIT_RELOCS)
5943 error ("%<-mno-gpopt%> needs %<-mexplicit-relocs%>");
5945 TARGET_LOCAL_SDATA = false;
5946 TARGET_EXTERN_SDATA = false;
5950 if (TARGET_VXWORKS_RTP)
5951 warning (0, "cannot use small-data accesses for %qs", "-mrtp");
5953 if (TARGET_ABICALLS)
5954 warning (0, "cannot use small-data accesses for %qs",
5959 #ifdef MIPS_TFMODE_FORMAT
5960 REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT;
5963 /* Make sure that the user didn't turn off paired single support when
5964 MIPS-3D support is requested. */
5965 if (TARGET_MIPS3D && (target_flags_explicit & MASK_PAIRED_SINGLE_FLOAT)
5966 && !TARGET_PAIRED_SINGLE_FLOAT)
5967 error ("-mips3d requires -mpaired-single");
5969 /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
5971 target_flags |= MASK_PAIRED_SINGLE_FLOAT;
5973 /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
5974 and TARGET_HARD_FLOAT are both true. */
5975 if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT))
5976 error ("-mips3d/-mpaired-single must be used with -mfp64 -mhard-float");
5978 /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
5980 if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_MIPS64)
5981 error ("-mips3d/-mpaired-single must be used with -mips64");
5983 /* If TARGET_DSPR2, enable MASK_DSP. */
5985 target_flags |= MASK_DSP;
5987 mips_print_operand_punct['?'] = 1;
5988 mips_print_operand_punct['#'] = 1;
5989 mips_print_operand_punct['/'] = 1;
5990 mips_print_operand_punct['&'] = 1;
5991 mips_print_operand_punct['!'] = 1;
5992 mips_print_operand_punct['*'] = 1;
5993 mips_print_operand_punct['@'] = 1;
5994 mips_print_operand_punct['.'] = 1;
5995 mips_print_operand_punct['('] = 1;
5996 mips_print_operand_punct[')'] = 1;
5997 mips_print_operand_punct['['] = 1;
5998 mips_print_operand_punct[']'] = 1;
5999 mips_print_operand_punct['<'] = 1;
6000 mips_print_operand_punct['>'] = 1;
6001 mips_print_operand_punct['{'] = 1;
6002 mips_print_operand_punct['}'] = 1;
6003 mips_print_operand_punct['^'] = 1;
6004 mips_print_operand_punct['$'] = 1;
6005 mips_print_operand_punct['+'] = 1;
6006 mips_print_operand_punct['~'] = 1;
6008 /* Set up array to map GCC register number to debug register number.
6009 Ignore the special purpose register numbers. */
6011 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
6013 mips_dbx_regno[i] = INVALID_REGNUM;
6014 if (GP_REG_P (i) || FP_REG_P (i) || ALL_COP_REG_P (i))
6015 mips_dwarf_regno[i] = i;
6017 mips_dwarf_regno[i] = INVALID_REGNUM;
6020 start = GP_DBX_FIRST - GP_REG_FIRST;
6021 for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
6022 mips_dbx_regno[i] = i + start;
6024 start = FP_DBX_FIRST - FP_REG_FIRST;
6025 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
6026 mips_dbx_regno[i] = i + start;
6028 /* HI and LO debug registers use big-endian ordering. */
6029 mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
6030 mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
6031 mips_dwarf_regno[HI_REGNUM] = MD_REG_FIRST + 0;
6032 mips_dwarf_regno[LO_REGNUM] = MD_REG_FIRST + 1;
6033 for (i = DSP_ACC_REG_FIRST; i <= DSP_ACC_REG_LAST; i += 2)
6035 mips_dwarf_regno[i + TARGET_LITTLE_ENDIAN] = i;
6036 mips_dwarf_regno[i + TARGET_BIG_ENDIAN] = i + 1;
6039 /* Set up array giving whether a given register can hold a given mode. */
6041 for (mode = VOIDmode;
6042 mode != MAX_MACHINE_MODE;
6043 mode = (enum machine_mode) ((int)mode + 1))
6045 register int size = GET_MODE_SIZE (mode);
6046 register enum mode_class class = GET_MODE_CLASS (mode);
6048 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
6052 if (mode == CCV2mode)
6055 && (regno - ST_REG_FIRST) % 2 == 0);
6057 else if (mode == CCV4mode)
6060 && (regno - ST_REG_FIRST) % 4 == 0);
6062 else if (mode == CCmode)
6065 temp = (regno == FPSW_REGNUM);
6067 temp = (ST_REG_P (regno) || GP_REG_P (regno)
6068 || FP_REG_P (regno));
6071 else if (GP_REG_P (regno))
6072 temp = ((regno & 1) == 0 || size <= UNITS_PER_WORD);
6074 else if (FP_REG_P (regno))
6075 temp = ((((regno % MAX_FPRS_PER_FMT) == 0)
6076 || (MIN_FPRS_PER_FMT == 1
6077 && size <= UNITS_PER_FPREG))
6078 && (((class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT
6079 || class == MODE_VECTOR_FLOAT)
6080 && size <= UNITS_PER_FPVALUE)
6081 /* Allow integer modes that fit into a single
6082 register. We need to put integers into FPRs
6083 when using instructions like cvt and trunc.
6084 We can't allow sizes smaller than a word,
6085 the FPU has no appropriate load/store
6086 instructions for those. */
6087 || (class == MODE_INT
6088 && size >= MIN_UNITS_PER_WORD
6089 && size <= UNITS_PER_FPREG)
6090 /* Allow TFmode for CCmode reloads. */
6091 || (ISA_HAS_8CC && mode == TFmode)));
6093 else if (ACC_REG_P (regno))
6094 temp = ((INTEGRAL_MODE_P (mode) || ALL_FIXED_POINT_MODE_P (mode))
6095 && size <= UNITS_PER_WORD * 2
6096 && (size <= UNITS_PER_WORD
6097 || regno == MD_REG_FIRST
6098 || (DSP_ACC_REG_P (regno)
6099 && ((regno - DSP_ACC_REG_FIRST) & 1) == 0)));
6101 else if (ALL_COP_REG_P (regno))
6102 temp = (class == MODE_INT && size <= UNITS_PER_WORD);
6106 mips_hard_regno_mode_ok[(int)mode][regno] = temp;
6110 /* Save GPR registers in word_mode sized hunks. word_mode hasn't been
6111 initialized yet, so we can't use that here. */
6112 gpr_mode = TARGET_64BIT ? DImode : SImode;
6114 /* Function to allocate machine-dependent function status. */
6115 init_machine_status = &mips_init_machine_status;
6117 /* Default to working around R4000 errata only if the processor
6118 was selected explicitly. */
6119 if ((target_flags_explicit & MASK_FIX_R4000) == 0
6120 && mips_matching_cpu_name_p (mips_arch_info->name, "r4000"))
6121 target_flags |= MASK_FIX_R4000;
6123 /* Default to working around R4400 errata only if the processor
6124 was selected explicitly. */
6125 if ((target_flags_explicit & MASK_FIX_R4400) == 0
6126 && mips_matching_cpu_name_p (mips_arch_info->name, "r4400"))
6127 target_flags |= MASK_FIX_R4400;
6129 /* Save base state of options. */
6130 mips_base_mips16 = TARGET_MIPS16;
6131 mips_base_target_flags = target_flags;
6132 mips_base_schedule_insns = flag_schedule_insns;
6133 mips_base_reorder_blocks_and_partition = flag_reorder_blocks_and_partition;
6134 mips_base_move_loop_invariants = flag_move_loop_invariants;
6135 mips_base_align_loops = align_loops;
6136 mips_base_align_jumps = align_jumps;
6137 mips_base_align_functions = align_functions;
6138 mips_flag_delayed_branch = flag_delayed_branch;
6140 /* Now select the mips16 or 32-bit instruction set, as requested. */
6141 mips_set_mips16_mode (mips_base_mips16);
6144 /* Swap the register information for registers I and I + 1, which
6145 currently have the wrong endianness. Note that the registers'
6146 fixedness and call-clobberedness might have been set on the
6150 mips_swap_registers (unsigned int i)
6155 #define SWAP_INT(X, Y) (tmpi = (X), (X) = (Y), (Y) = tmpi)
6156 #define SWAP_STRING(X, Y) (tmps = (X), (X) = (Y), (Y) = tmps)
6158 SWAP_INT (fixed_regs[i], fixed_regs[i + 1]);
6159 SWAP_INT (call_used_regs[i], call_used_regs[i + 1]);
6160 SWAP_INT (call_really_used_regs[i], call_really_used_regs[i + 1]);
6161 SWAP_STRING (reg_names[i], reg_names[i + 1]);
6167 /* Implement CONDITIONAL_REGISTER_USAGE. */
6170 mips_conditional_register_usage (void)
6176 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
6177 fixed_regs[regno] = call_used_regs[regno] = 1;
6179 if (!TARGET_HARD_FLOAT)
6183 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
6184 fixed_regs[regno] = call_used_regs[regno] = 1;
6185 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
6186 fixed_regs[regno] = call_used_regs[regno] = 1;
6188 else if (! ISA_HAS_8CC)
6192 /* We only have a single condition code register. We
6193 implement this by hiding all the condition code registers,
6194 and generating RTL that refers directly to ST_REG_FIRST. */
6195 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
6196 fixed_regs[regno] = call_used_regs[regno] = 1;
6198 /* In mips16 mode, we permit the $t temporary registers to be used
6199 for reload. We prohibit the unused $s registers, since they
6200 are caller saved, and saving them via a mips16 register would
6201 probably waste more time than just reloading the value. */
6204 fixed_regs[18] = call_used_regs[18] = 1;
6205 fixed_regs[19] = call_used_regs[19] = 1;
6206 fixed_regs[20] = call_used_regs[20] = 1;
6207 fixed_regs[21] = call_used_regs[21] = 1;
6208 fixed_regs[22] = call_used_regs[22] = 1;
6209 fixed_regs[23] = call_used_regs[23] = 1;
6210 fixed_regs[26] = call_used_regs[26] = 1;
6211 fixed_regs[27] = call_used_regs[27] = 1;
6212 fixed_regs[30] = call_used_regs[30] = 1;
6214 /* fp20-23 are now caller saved. */
6215 if (mips_abi == ABI_64)
6218 for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
6219 call_really_used_regs[regno] = call_used_regs[regno] = 1;
6221 /* Odd registers from fp21 to fp31 are now caller saved. */
6222 if (mips_abi == ABI_N32)
6225 for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
6226 call_really_used_regs[regno] = call_used_regs[regno] = 1;
6228 /* Make sure that double-register accumulator values are correctly
6229 ordered for the current endianness. */
6230 if (TARGET_LITTLE_ENDIAN)
6233 mips_swap_registers (MD_REG_FIRST);
6234 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno += 2)
6235 mips_swap_registers (regno);
6239 /* Allocate a chunk of memory for per-function machine-dependent data. */
6240 static struct machine_function *
6241 mips_init_machine_status (void)
6243 return ((struct machine_function *)
6244 ggc_alloc_cleared (sizeof (struct machine_function)));
6247 /* On the mips16, we want to allocate $24 (T_REG) before other
6248 registers for instructions for which it is possible. This helps
6249 avoid shuffling registers around in order to set up for an xor,
6250 encouraging the compiler to use a cmp instead. */
6253 mips_order_regs_for_local_alloc (void)
6257 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
6258 reg_alloc_order[i] = i;
6262 /* It really doesn't matter where we put register 0, since it is
6263 a fixed register anyhow. */
6264 reg_alloc_order[0] = 24;
6265 reg_alloc_order[24] = 0;
6270 /* The MIPS debug format wants all automatic variables and arguments
6271 to be in terms of the virtual frame pointer (stack pointer before
6272 any adjustment in the function), while the MIPS 3.0 linker wants
6273 the frame pointer to be the stack pointer after the initial
6274 adjustment. So, we do the adjustment here. The arg pointer (which
6275 is eliminated) points to the virtual frame pointer, while the frame
6276 pointer (which may be eliminated) points to the stack pointer after
6277 the initial adjustments. */
6280 mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
6282 rtx offset2 = const0_rtx;
6283 rtx reg = eliminate_constant_term (addr, &offset2);
6286 offset = INTVAL (offset2);
6288 if (reg == stack_pointer_rtx || reg == frame_pointer_rtx
6289 || reg == hard_frame_pointer_rtx)
6291 HOST_WIDE_INT frame_size = (!cfun->machine->frame.initialized)
6292 ? compute_frame_size (get_frame_size ())
6293 : cfun->machine->frame.total_size;
6295 /* MIPS16 frame is smaller */
6296 if (frame_pointer_needed && TARGET_MIPS16)
6297 frame_size -= cfun->machine->frame.args_size;
6299 offset = offset - frame_size;
6302 /* sdbout_parms does not want this to crash for unrecognized cases. */
6304 else if (reg != arg_pointer_rtx)
6305 fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
6312 /* If OP is an UNSPEC address, return the address to which it refers,
6313 otherwise return OP itself. */
6316 mips_strip_unspec_address (rtx op)
6320 split_const (op, &base, &offset);
6321 if (UNSPEC_ADDRESS_P (base))
6322 op = plus_constant (UNSPEC_ADDRESS (base), INTVAL (offset));
6326 /* Implement the PRINT_OPERAND macro. The MIPS-specific operand codes are:
6328 'X' OP is CONST_INT, prints 32 bits in hexadecimal format = "0x%08x",
6329 'x' OP is CONST_INT, prints 16 bits in hexadecimal format = "0x%04x",
6330 'h' OP is HIGH, prints %hi(X),
6331 'd' output integer constant in decimal,
6332 'z' if the operand is 0, use $0 instead of normal operand.
6333 'D' print second part of double-word register or memory operand.
6334 'L' print low-order register of double-word register operand.
6335 'M' print high-order register of double-word register operand.
6336 'C' print part of opcode for a branch condition.
6337 'F' print part of opcode for a floating-point branch condition.
6338 'N' print part of opcode for a branch condition, inverted.
6339 'W' print part of opcode for a floating-point branch condition, inverted.
6340 'T' print 'f' for (eq:CC ...), 't' for (ne:CC ...),
6341 'z' for (eq:?I ...), 'n' for (ne:?I ...).
6342 't' like 'T', but with the EQ/NE cases reversed
6343 'Y' for a CONST_INT X, print mips_fp_conditions[X]
6344 'Z' print the operand and a comma for ISA_HAS_8CC, otherwise print nothing
6345 'R' print the reloc associated with LO_SUM
6346 'q' print DSP accumulator registers
6348 The punctuation characters are:
6350 '(' Turn on .set noreorder
6351 ')' Turn on .set reorder
6352 '[' Turn on .set noat
6354 '<' Turn on .set nomacro
6355 '>' Turn on .set macro
6356 '{' Turn on .set volatile (not GAS)
6357 '}' Turn on .set novolatile (not GAS)
6358 '&' Turn on .set noreorder if filling delay slots
6359 '*' Turn on both .set noreorder and .set nomacro if filling delay slots
6360 '!' Turn on .set nomacro if filling delay slots
6361 '#' Print nop if in a .set noreorder section.
6362 '/' Like '#', but does nothing within a delayed branch sequence
6363 '?' Print 'l' if we are to use a branch likely instead of normal branch.
6364 '@' Print the name of the assembler temporary register (at or $1).
6365 '.' Print the name of the register with a hard-wired zero (zero or $0).
6366 '^' Print the name of the pic call-through register (t9 or $25).
6367 '$' Print the name of the stack pointer register (sp or $29).
6368 '+' Print the name of the gp register (usually gp or $28).
6369 '~' Output a branch alignment to LABEL_ALIGN(NULL). */
6372 print_operand (FILE *file, rtx op, int letter)
6374 register enum rtx_code code;
6376 if (PRINT_OPERAND_PUNCT_VALID_P (letter))
6381 if (mips_branch_likely)
6386 fputs (reg_names [GP_REG_FIRST + 1], file);
6390 fputs (reg_names [PIC_FUNCTION_ADDR_REGNUM], file);
6394 fputs (reg_names [GP_REG_FIRST + 0], file);
6398 fputs (reg_names[STACK_POINTER_REGNUM], file);
6402 fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
6406 if (final_sequence != 0 && set_noreorder++ == 0)
6407 fputs (".set\tnoreorder\n\t", file);
6411 if (final_sequence != 0)
6413 if (set_noreorder++ == 0)
6414 fputs (".set\tnoreorder\n\t", file);
6416 if (set_nomacro++ == 0)
6417 fputs (".set\tnomacro\n\t", file);
6422 if (final_sequence != 0 && set_nomacro++ == 0)
6423 fputs ("\n\t.set\tnomacro", file);
6427 if (set_noreorder != 0)
6428 fputs ("\n\tnop", file);
6432 /* Print an extra newline so that the delayed insn is separated
6433 from the following ones. This looks neater and is consistent
6434 with non-nop delayed sequences. */
6435 if (set_noreorder != 0 && final_sequence == 0)
6436 fputs ("\n\tnop\n", file);
6440 if (set_noreorder++ == 0)
6441 fputs (".set\tnoreorder\n\t", file);
6445 if (set_noreorder == 0)
6446 error ("internal error: %%) found without a %%( in assembler pattern");
6448 else if (--set_noreorder == 0)
6449 fputs ("\n\t.set\treorder", file);
6454 if (set_noat++ == 0)
6455 fputs (".set\tnoat\n\t", file);
6460 error ("internal error: %%] found without a %%[ in assembler pattern");
6461 else if (--set_noat == 0)
6462 fputs ("\n\t.set\tat", file);
6467 if (set_nomacro++ == 0)
6468 fputs (".set\tnomacro\n\t", file);
6472 if (set_nomacro == 0)
6473 error ("internal error: %%> found without a %%< in assembler pattern");
6474 else if (--set_nomacro == 0)
6475 fputs ("\n\t.set\tmacro", file);
6480 if (set_volatile++ == 0)
6481 fputs ("#.set\tvolatile\n\t", file);
6485 if (set_volatile == 0)
6486 error ("internal error: %%} found without a %%{ in assembler pattern");
6487 else if (--set_volatile == 0)
6488 fputs ("\n\t#.set\tnovolatile", file);
6494 if (align_labels_log > 0)
6495 ASM_OUTPUT_ALIGN (file, align_labels_log);
6500 error ("PRINT_OPERAND: unknown punctuation '%c'", letter);
6509 error ("PRINT_OPERAND null pointer");
6513 code = GET_CODE (op);
6518 case EQ: fputs ("eq", file); break;
6519 case NE: fputs ("ne", file); break;
6520 case GT: fputs ("gt", file); break;
6521 case GE: fputs ("ge", file); break;
6522 case LT: fputs ("lt", file); break;
6523 case LE: fputs ("le", file); break;
6524 case GTU: fputs ("gtu", file); break;
6525 case GEU: fputs ("geu", file); break;
6526 case LTU: fputs ("ltu", file); break;
6527 case LEU: fputs ("leu", file); break;
6529 fatal_insn ("PRINT_OPERAND, invalid insn for %%C", op);
6532 else if (letter == 'N')
6535 case EQ: fputs ("ne", file); break;
6536 case NE: fputs ("eq", file); break;
6537 case GT: fputs ("le", file); break;
6538 case GE: fputs ("lt", file); break;
6539 case LT: fputs ("ge", file); break;
6540 case LE: fputs ("gt", file); break;
6541 case GTU: fputs ("leu", file); break;
6542 case GEU: fputs ("ltu", file); break;
6543 case LTU: fputs ("geu", file); break;
6544 case LEU: fputs ("gtu", file); break;
6546 fatal_insn ("PRINT_OPERAND, invalid insn for %%N", op);
6549 else if (letter == 'F')
6552 case EQ: fputs ("c1f", file); break;
6553 case NE: fputs ("c1t", file); break;
6555 fatal_insn ("PRINT_OPERAND, invalid insn for %%F", op);
6558 else if (letter == 'W')
6561 case EQ: fputs ("c1t", file); break;
6562 case NE: fputs ("c1f", file); break;
6564 fatal_insn ("PRINT_OPERAND, invalid insn for %%W", op);
6567 else if (letter == 'h')
6569 if (GET_CODE (op) == HIGH)
6572 print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_hi_relocs);
6575 else if (letter == 'R')
6576 print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_lo_relocs);
6578 else if (letter == 'Y')
6580 if (GET_CODE (op) == CONST_INT
6581 && ((unsigned HOST_WIDE_INT) INTVAL (op)
6582 < ARRAY_SIZE (mips_fp_conditions)))
6583 fputs (mips_fp_conditions[INTVAL (op)], file);
6585 output_operand_lossage ("invalid %%Y value");
6588 else if (letter == 'Z')
6592 print_operand (file, op, 0);
6597 else if (letter == 'q')
6602 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op);
6604 regnum = REGNO (op);
6605 if (MD_REG_P (regnum))
6606 fprintf (file, "$ac0");
6607 else if (DSP_ACC_REG_P (regnum))
6608 fprintf (file, "$ac%c", reg_names[regnum][3]);
6610 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op);
6613 else if (code == REG || code == SUBREG)
6615 register int regnum;
6618 regnum = REGNO (op);
6620 regnum = true_regnum (op);
6622 if ((letter == 'M' && ! WORDS_BIG_ENDIAN)
6623 || (letter == 'L' && WORDS_BIG_ENDIAN)
6627 fprintf (file, "%s", reg_names[regnum]);
6630 else if (code == MEM)
6633 output_address (plus_constant (XEXP (op, 0), 4));
6635 output_address (XEXP (op, 0));
6638 else if (letter == 'x' && GET_CODE (op) == CONST_INT)
6639 fprintf (file, HOST_WIDE_INT_PRINT_HEX, 0xffff & INTVAL(op));
6641 else if (letter == 'X' && GET_CODE(op) == CONST_INT)
6642 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
6644 else if (letter == 'd' && GET_CODE(op) == CONST_INT)
6645 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (INTVAL(op)));
6647 else if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
6648 fputs (reg_names[GP_REG_FIRST], file);
6650 else if (letter == 'd' || letter == 'x' || letter == 'X')
6651 output_operand_lossage ("invalid use of %%d, %%x, or %%X");
6653 else if (letter == 'T' || letter == 't')
6655 int truth = (code == NE) == (letter == 'T');
6656 fputc ("zfnt"[truth * 2 + (GET_MODE (op) == CCmode)], file);
6659 else if (CONST_GP_P (op))
6660 fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
6663 output_addr_const (file, mips_strip_unspec_address (op));
6667 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM
6668 in context CONTEXT. RELOCS is the array of relocations to use. */
6671 print_operand_reloc (FILE *file, rtx op, enum mips_symbol_context context,
6672 const char **relocs)
6674 enum mips_symbol_type symbol_type;
6677 symbol_type = mips_classify_symbolic_expression (op, context);
6678 if (relocs[symbol_type] == 0)
6679 fatal_insn ("PRINT_OPERAND, invalid operand for relocation", op);
6681 fputs (relocs[symbol_type], file);
6682 output_addr_const (file, mips_strip_unspec_address (op));
6683 for (p = relocs[symbol_type]; *p != 0; p++)
6688 /* Output address operand X to FILE. */
6691 print_operand_address (FILE *file, rtx x)
6693 struct mips_address_info addr;
6695 if (mips_classify_address (&addr, x, word_mode, true))
6699 print_operand (file, addr.offset, 0);
6700 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
6703 case ADDRESS_LO_SUM:
6704 print_operand_reloc (file, addr.offset, SYMBOL_CONTEXT_MEM,
6706 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
6709 case ADDRESS_CONST_INT:
6710 output_addr_const (file, x);
6711 fprintf (file, "(%s)", reg_names[0]);
6714 case ADDRESS_SYMBOLIC:
6715 output_addr_const (file, mips_strip_unspec_address (x));
6721 /* When using assembler macros, keep track of all of small-data externs
6722 so that mips_file_end can emit the appropriate declarations for them.
6724 In most cases it would be safe (though pointless) to emit .externs
6725 for other symbols too. One exception is when an object is within
6726 the -G limit but declared by the user to be in a section other
6727 than .sbss or .sdata. */
6730 mips_output_external (FILE *file, tree decl, const char *name)
6732 default_elf_asm_output_external (file, decl, name);
6734 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
6735 set in order to avoid putting out names that are never really
6737 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
6739 if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
6741 fputs ("\t.extern\t", file);
6742 assemble_name (file, name);
6743 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC "\n",
6744 int_size_in_bytes (TREE_TYPE (decl)));
6746 else if (TARGET_IRIX
6747 && mips_abi == ABI_32
6748 && TREE_CODE (decl) == FUNCTION_DECL)
6750 /* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
6751 `.global name .text' directive for every used but
6752 undefined function. If we don't, the linker may perform
6753 an optimization (skipping over the insns that set $gp)
6754 when it is unsafe. */
6755 fputs ("\t.globl ", file);
6756 assemble_name (file, name);
6757 fputs (" .text\n", file);
6762 /* Emit a new filename to a stream. If we are smuggling stabs, try to
6763 put out a MIPS ECOFF file and a stab. */
6766 mips_output_filename (FILE *stream, const char *name)
6769 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
6771 if (write_symbols == DWARF2_DEBUG)
6773 else if (mips_output_filename_first_time)
6775 mips_output_filename_first_time = 0;
6776 num_source_filenames += 1;
6777 current_function_file = name;
6778 fprintf (stream, "\t.file\t%d ", num_source_filenames);
6779 output_quoted_string (stream, name);
6780 putc ('\n', stream);
6783 /* If we are emitting stabs, let dbxout.c handle this (except for
6784 the mips_output_filename_first_time case). */
6785 else if (write_symbols == DBX_DEBUG)
6788 else if (name != current_function_file
6789 && strcmp (name, current_function_file) != 0)
6791 num_source_filenames += 1;
6792 current_function_file = name;
6793 fprintf (stream, "\t.file\t%d ", num_source_filenames);
6794 output_quoted_string (stream, name);
6795 putc ('\n', stream);
6799 /* Output an ASCII string, in a space-saving way. PREFIX is the string
6800 that should be written before the opening quote, such as "\t.ascii\t"
6801 for real string data or "\t# " for a comment. */
6804 mips_output_ascii (FILE *stream, const char *string_param, size_t len,
6809 register const unsigned char *string =
6810 (const unsigned char *)string_param;
6812 fprintf (stream, "%s\"", prefix);
6813 for (i = 0; i < len; i++)
6815 register int c = string[i];
6819 if (c == '\\' || c == '\"')
6821 putc ('\\', stream);
6829 fprintf (stream, "\\%03o", c);
6833 if (cur_pos > 72 && i+1 < len)
6836 fprintf (stream, "\"\n%s\"", prefix);
6839 fprintf (stream, "\"\n");
6842 /* Implement TARGET_ASM_FILE_START. */
6845 mips_file_start (void)
6847 default_file_start ();
6851 /* Generate a special section to describe the ABI switches used to
6852 produce the resultant binary. This used to be done by the assembler
6853 setting bits in the ELF header's flags field, but we have run out of
6854 bits. GDB needs this information in order to be able to correctly
6855 debug these binaries. See the function mips_gdbarch_init() in
6856 gdb/mips-tdep.c. This is unnecessary for the IRIX 5/6 ABIs and
6857 causes unnecessary IRIX 6 ld warnings. */
6858 const char * abi_string = NULL;
6862 case ABI_32: abi_string = "abi32"; break;
6863 case ABI_N32: abi_string = "abiN32"; break;
6864 case ABI_64: abi_string = "abi64"; break;
6865 case ABI_O64: abi_string = "abiO64"; break;
6866 case ABI_EABI: abi_string = TARGET_64BIT ? "eabi64" : "eabi32"; break;
6870 /* Note - we use fprintf directly rather than calling switch_to_section
6871 because in this way we can avoid creating an allocated section. We
6872 do not want this section to take up any space in the running
6874 fprintf (asm_out_file, "\t.section .mdebug.%s\n", abi_string);
6876 /* There is no ELF header flag to distinguish long32 forms of the
6877 EABI from long64 forms. Emit a special section to help tools
6878 such as GDB. Do the same for o64, which is sometimes used with
6880 if (mips_abi == ABI_EABI || mips_abi == ABI_O64)
6881 fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n",
6882 TARGET_LONG64 ? 64 : 32);
6884 /* Restore the default section. */
6885 fprintf (asm_out_file, "\t.previous\n");
6887 #ifdef HAVE_AS_GNU_ATTRIBUTE
6888 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
6889 TARGET_HARD_FLOAT_ABI ? (TARGET_DOUBLE_FLOAT ? 1 : 2) : 3);
6893 /* Generate the pseudo ops that System V.4 wants. */
6894 if (TARGET_ABICALLS)
6895 fprintf (asm_out_file, "\t.abicalls\n");
6897 if (flag_verbose_asm)
6898 fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
6900 mips_section_threshold, mips_arch_info->name, mips_isa);
6903 #ifdef BSS_SECTION_ASM_OP
6904 /* Implement ASM_OUTPUT_ALIGNED_BSS. This differs from the default only
6905 in the use of sbss. */
6908 mips_output_aligned_bss (FILE *stream, tree decl, const char *name,
6909 unsigned HOST_WIDE_INT size, int align)
6911 extern tree last_assemble_variable_decl;
6913 if (mips_in_small_data_p (decl))
6914 switch_to_section (get_named_section (NULL, ".sbss", 0));
6916 switch_to_section (bss_section);
6917 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
6918 last_assemble_variable_decl = decl;
6919 ASM_DECLARE_OBJECT_NAME (stream, name, decl);
6920 ASM_OUTPUT_SKIP (stream, size != 0 ? size : 1);
6924 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
6925 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
6928 mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
6929 unsigned HOST_WIDE_INT size,
6932 /* If the target wants uninitialized const declarations in
6933 .rdata then don't put them in .comm. */
6934 if (TARGET_EMBEDDED_DATA && TARGET_UNINIT_CONST_IN_RODATA
6935 && TREE_CODE (decl) == VAR_DECL && TREE_READONLY (decl)
6936 && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
6938 if (TREE_PUBLIC (decl) && DECL_NAME (decl))
6939 targetm.asm_out.globalize_label (stream, name);
6941 switch_to_section (readonly_data_section);
6942 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
6943 mips_declare_object (stream, name, "",
6944 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
6948 mips_declare_common_object (stream, name, "\n\t.comm\t",
6952 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
6953 NAME is the name of the object and ALIGN is the required alignment
6954 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
6955 alignment argument. */
6958 mips_declare_common_object (FILE *stream, const char *name,
6959 const char *init_string,
6960 unsigned HOST_WIDE_INT size,
6961 unsigned int align, bool takes_alignment_p)
6963 if (!takes_alignment_p)
6965 size += (align / BITS_PER_UNIT) - 1;
6966 size -= size % (align / BITS_PER_UNIT);
6967 mips_declare_object (stream, name, init_string,
6968 "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
6971 mips_declare_object (stream, name, init_string,
6972 "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
6973 size, align / BITS_PER_UNIT);
6976 /* Emit either a label, .comm, or .lcomm directive. When using assembler
6977 macros, mark the symbol as written so that mips_file_end won't emit an
6978 .extern for it. STREAM is the output file, NAME is the name of the
6979 symbol, INIT_STRING is the string that should be written before the
6980 symbol and FINAL_STRING is the string that should be written after it.
6981 FINAL_STRING is a printf() format that consumes the remaining arguments. */
6984 mips_declare_object (FILE *stream, const char *name, const char *init_string,
6985 const char *final_string, ...)
6989 fputs (init_string, stream);
6990 assemble_name (stream, name);
6991 va_start (ap, final_string);
6992 vfprintf (stream, final_string, ap);
6995 if (!TARGET_EXPLICIT_RELOCS)
6997 tree name_tree = get_identifier (name);
6998 TREE_ASM_WRITTEN (name_tree) = 1;
7002 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
7003 extern int size_directive_output;
7005 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
7006 definitions except that it uses mips_declare_object() to emit the label. */
7009 mips_declare_object_name (FILE *stream, const char *name,
7010 tree decl ATTRIBUTE_UNUSED)
7012 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
7013 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
7016 size_directive_output = 0;
7017 if (!flag_inhibit_size_directive && DECL_SIZE (decl))
7021 size_directive_output = 1;
7022 size = int_size_in_bytes (TREE_TYPE (decl));
7023 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
7026 mips_declare_object (stream, name, "", ":\n");
7029 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
7032 mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
7036 name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
7037 if (!flag_inhibit_size_directive
7038 && DECL_SIZE (decl) != 0
7039 && !at_end && top_level
7040 && DECL_INITIAL (decl) == error_mark_node
7041 && !size_directive_output)
7045 size_directive_output = 1;
7046 size = int_size_in_bytes (TREE_TYPE (decl));
7047 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
7052 /* Return true if X in context CONTEXT is a small data address that can
7053 be rewritten as a LO_SUM. */
7056 mips_rewrite_small_data_p (rtx x, enum mips_symbol_context context)
7058 enum mips_symbol_type symbol_type;
7060 return (TARGET_EXPLICIT_RELOCS
7061 && mips_symbolic_constant_p (x, context, &symbol_type)
7062 && symbol_type == SYMBOL_GP_RELATIVE);
7066 /* A for_each_rtx callback for mips_small_data_pattern_p. DATA is the
7067 containing MEM, or null if none. */
7070 mips_small_data_pattern_1 (rtx *loc, void *data)
7072 enum mips_symbol_context context;
7074 if (GET_CODE (*loc) == LO_SUM)
7079 if (for_each_rtx (&XEXP (*loc, 0), mips_small_data_pattern_1, *loc))
7084 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
7085 return mips_rewrite_small_data_p (*loc, context);
7088 /* Return true if OP refers to small data symbols directly, not through
7092 mips_small_data_pattern_p (rtx op)
7094 return for_each_rtx (&op, mips_small_data_pattern_1, 0);
7097 /* A for_each_rtx callback, used by mips_rewrite_small_data.
7098 DATA is the containing MEM, or null if none. */
7101 mips_rewrite_small_data_1 (rtx *loc, void *data)
7103 enum mips_symbol_context context;
7107 for_each_rtx (&XEXP (*loc, 0), mips_rewrite_small_data_1, *loc);
7111 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
7112 if (mips_rewrite_small_data_p (*loc, context))
7113 *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
7115 if (GET_CODE (*loc) == LO_SUM)
7121 /* If possible, rewrite OP so that it refers to small data using
7122 explicit relocations. */
7125 mips_rewrite_small_data (rtx op)
7127 op = copy_insn (op);
7128 for_each_rtx (&op, mips_rewrite_small_data_1, 0);
7132 /* Return true if the current function has an insn that implicitly
7136 mips_function_has_gp_insn (void)
7138 /* Don't bother rechecking if we found one last time. */
7139 if (!cfun->machine->has_gp_insn_p)
7143 push_topmost_sequence ();
7144 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7146 && GET_CODE (PATTERN (insn)) != USE
7147 && GET_CODE (PATTERN (insn)) != CLOBBER
7148 && (get_attr_got (insn) != GOT_UNSET
7149 || small_data_pattern (PATTERN (insn), VOIDmode)))
7151 pop_topmost_sequence ();
7153 cfun->machine->has_gp_insn_p = (insn != 0);
7155 return cfun->machine->has_gp_insn_p;
7159 /* Return the register that should be used as the global pointer
7160 within this function. Return 0 if the function doesn't need
7161 a global pointer. */
7164 mips_global_pointer (void)
7168 /* $gp is always available unless we're using a GOT. */
7169 if (!TARGET_USE_GOT)
7170 return GLOBAL_POINTER_REGNUM;
7172 /* We must always provide $gp when it is used implicitly. */
7173 if (!TARGET_EXPLICIT_RELOCS)
7174 return GLOBAL_POINTER_REGNUM;
7176 /* FUNCTION_PROFILER includes a jal macro, so we need to give it
7178 if (current_function_profile)
7179 return GLOBAL_POINTER_REGNUM;
7181 /* If the function has a nonlocal goto, $gp must hold the correct
7182 global pointer for the target function. */
7183 if (current_function_has_nonlocal_goto)
7184 return GLOBAL_POINTER_REGNUM;
7186 /* If the gp is never referenced, there's no need to initialize it.
7187 Note that reload can sometimes introduce constant pool references
7188 into a function that otherwise didn't need them. For example,
7189 suppose we have an instruction like:
7191 (set (reg:DF R1) (float:DF (reg:SI R2)))
7193 If R2 turns out to be constant such as 1, the instruction may have a
7194 REG_EQUAL note saying that R1 == 1.0. Reload then has the option of
7195 using this constant if R2 doesn't get allocated to a register.
7197 In cases like these, reload will have added the constant to the pool
7198 but no instruction will yet refer to it. */
7199 if (!df_regs_ever_live_p (GLOBAL_POINTER_REGNUM)
7200 && !current_function_uses_const_pool
7201 && !mips_function_has_gp_insn ())
7204 /* We need a global pointer, but perhaps we can use a call-clobbered
7205 register instead of $gp. */
7206 if (TARGET_CALL_SAVED_GP && current_function_is_leaf)
7207 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
7208 if (!df_regs_ever_live_p (regno)
7209 && call_really_used_regs[regno]
7210 && !fixed_regs[regno]
7211 && regno != PIC_FUNCTION_ADDR_REGNUM)
7214 return GLOBAL_POINTER_REGNUM;
7218 /* Return true if the function return value MODE will get returned in a
7219 floating-point register. */
7222 mips_return_mode_in_fpr_p (enum machine_mode mode)
7224 return ((GET_MODE_CLASS (mode) == MODE_FLOAT
7225 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
7226 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
7227 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_HWFPVALUE);
7230 /* Return a two-character string representing a function floating-point
7231 return mode, used to name MIPS16 function stubs. */
7234 mips16_call_stub_mode_suffix (enum machine_mode mode)
7238 else if (mode == DFmode)
7240 else if (mode == SCmode)
7242 else if (mode == DCmode)
7244 else if (mode == V2SFmode)
7250 /* Return true if the current function returns its value in a floating-point
7251 register in MIPS16 mode. */
7254 mips16_cfun_returns_in_fpr_p (void)
7256 tree return_type = DECL_RESULT (current_function_decl);
7257 return (TARGET_MIPS16
7258 && TARGET_HARD_FLOAT_ABI
7259 && !aggregate_value_p (return_type, current_function_decl)
7260 && mips_return_mode_in_fpr_p (DECL_MODE (return_type)));
7264 /* Return true if the current function must save REGNO. */
7267 mips_save_reg_p (unsigned int regno)
7269 /* We only need to save $gp if TARGET_CALL_SAVED_GP and only then
7270 if we have not chosen a call-clobbered substitute. */
7271 if (regno == GLOBAL_POINTER_REGNUM)
7272 return TARGET_CALL_SAVED_GP && cfun->machine->global_pointer == regno;
7274 /* Check call-saved registers. */
7275 if ((current_function_saves_all_registers || df_regs_ever_live_p (regno))
7276 && !call_really_used_regs[regno])
7279 /* Save both registers in an FPR pair if either one is used. This is
7280 needed for the case when MIN_FPRS_PER_FMT == 1, which allows the odd
7281 register to be used without the even register. */
7282 if (FP_REG_P (regno)
7283 && MAX_FPRS_PER_FMT == 2
7284 && df_regs_ever_live_p (regno + 1)
7285 && !call_really_used_regs[regno + 1])
7288 /* We need to save the old frame pointer before setting up a new one. */
7289 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
7292 /* We need to save the incoming return address if it is ever clobbered
7293 within the function, if __builtin_eh_return is being used to set a
7294 different return address, or if a stub is being used to return a
7296 if (regno == GP_REG_FIRST + 31
7297 && (df_regs_ever_live_p (regno)
7298 || current_function_calls_eh_return
7299 || mips16_cfun_returns_in_fpr_p ()))
7305 /* Return the index of the lowest X in the range [0, SIZE) for which
7306 bit REGS[X] is set in MASK. Return SIZE if there is no such X. */
7309 mips16e_find_first_register (unsigned int mask, const unsigned char *regs,
7314 for (i = 0; i < size; i++)
7315 if (BITSET_P (mask, regs[i]))
7321 /* *MASK_PTR is a mask of general purpose registers and *GP_REG_SIZE_PTR
7322 is the number of bytes that they occupy. If *MASK_PTR contains REGS[X]
7323 for some X in [0, SIZE), adjust *MASK_PTR and *GP_REG_SIZE_PTR so that
7324 the same is true for all indexes (X, SIZE). */
7327 mips16e_mask_registers (unsigned int *mask_ptr, const unsigned char *regs,
7328 unsigned int size, HOST_WIDE_INT *gp_reg_size_ptr)
7332 i = mips16e_find_first_register (*mask_ptr, regs, size);
7333 for (i++; i < size; i++)
7334 if (!BITSET_P (*mask_ptr, regs[i]))
7336 *gp_reg_size_ptr += GET_MODE_SIZE (gpr_mode);
7337 *mask_ptr |= 1 << regs[i];
7341 /* Return the bytes needed to compute the frame pointer from the current
7342 stack pointer. SIZE is the size (in bytes) of the local variables.
7344 MIPS stack frames look like:
7346 Before call After call
7347 high +-----------------------+ +-----------------------+
7349 | caller's temps. | | caller's temps. |
7351 +-----------------------+ +-----------------------+
7353 | arguments on stack. | | arguments on stack. |
7355 +-----------------------+ +-----------------------+
7356 | 4 words to save | | 4 words to save |
7357 | arguments passed | | arguments passed |
7358 | in registers, even | | in registers, even |
7359 | if not passed. | | if not passed. |
7360 SP->+-----------------------+ VFP->+-----------------------+
7361 (VFP = SP+fp_sp_offset) | |\
7362 | fp register save | | fp_reg_size
7364 SP+gp_sp_offset->+-----------------------+
7366 | | gp register save | | gp_reg_size
7367 gp_reg_rounded | | |/
7368 | +-----------------------+
7369 \| alignment padding |
7370 +-----------------------+
7372 | local variables | | var_size
7374 +-----------------------+
7376 | alloca allocations |
7378 +-----------------------+
7380 cprestore_size | | GP save for V.4 abi |
7382 +-----------------------+
7384 | arguments on stack | |
7386 +-----------------------+ |
7387 | 4 words to save | | args_size
7388 | arguments passed | |
7389 | in registers, even | |
7390 | if not passed. | |
7391 low | (TARGET_OLDABI only) |/
7392 memory SP->+-----------------------+
7397 compute_frame_size (HOST_WIDE_INT size)
7400 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
7401 HOST_WIDE_INT var_size; /* # bytes that variables take up */
7402 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
7403 HOST_WIDE_INT cprestore_size; /* # bytes that the cprestore slot takes up */
7404 HOST_WIDE_INT gp_reg_rounded; /* # bytes needed to store gp after rounding */
7405 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
7406 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
7407 unsigned int mask; /* mask of saved gp registers */
7408 unsigned int fmask; /* mask of saved fp registers */
7410 cfun->machine->global_pointer = mips_global_pointer ();
7416 var_size = MIPS_STACK_ALIGN (size);
7417 args_size = current_function_outgoing_args_size;
7418 cprestore_size = MIPS_STACK_ALIGN (STARTING_FRAME_OFFSET) - args_size;
7420 /* The space set aside by STARTING_FRAME_OFFSET isn't needed in leaf
7421 functions. If the function has local variables, we're committed
7422 to allocating it anyway. Otherwise reclaim it here. */
7423 if (var_size == 0 && current_function_is_leaf)
7424 cprestore_size = args_size = 0;
7426 /* The MIPS 3.0 linker does not like functions that dynamically
7427 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
7428 looks like we are trying to create a second frame pointer to the
7429 function, so allocate some stack space to make it happy. */
7431 if (args_size == 0 && current_function_calls_alloca)
7432 args_size = 4 * UNITS_PER_WORD;
7434 total_size = var_size + args_size + cprestore_size;
7436 /* Calculate space needed for gp registers. */
7437 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
7438 if (mips_save_reg_p (regno))
7440 gp_reg_size += GET_MODE_SIZE (gpr_mode);
7441 mask |= 1 << (regno - GP_REG_FIRST);
7444 /* We need to restore these for the handler. */
7445 if (current_function_calls_eh_return)
7450 regno = EH_RETURN_DATA_REGNO (i);
7451 if (regno == INVALID_REGNUM)
7453 gp_reg_size += GET_MODE_SIZE (gpr_mode);
7454 mask |= 1 << (regno - GP_REG_FIRST);
7458 /* The MIPS16e SAVE and RESTORE instructions have two ranges of registers:
7459 $a3-$a0 and $s2-$s8. If we save one register in the range, we must
7460 save all later registers too. */
7461 if (GENERATE_MIPS16E_SAVE_RESTORE)
7463 mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
7464 ARRAY_SIZE (mips16e_s2_s8_regs), &gp_reg_size);
7465 mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
7466 ARRAY_SIZE (mips16e_a0_a3_regs), &gp_reg_size);
7469 /* This loop must iterate over the same space as its companion in
7470 mips_for_each_saved_reg. */
7471 if (TARGET_HARD_FLOAT)
7472 for (regno = (FP_REG_LAST - MAX_FPRS_PER_FMT + 1);
7473 regno >= FP_REG_FIRST;
7474 regno -= MAX_FPRS_PER_FMT)
7475 if (mips_save_reg_p (regno))
7477 fp_reg_size += MAX_FPRS_PER_FMT * UNITS_PER_FPREG;
7478 fmask |= ((1 << MAX_FPRS_PER_FMT) - 1) << (regno - FP_REG_FIRST);
7481 gp_reg_rounded = MIPS_STACK_ALIGN (gp_reg_size);
7482 total_size += gp_reg_rounded + MIPS_STACK_ALIGN (fp_reg_size);
7484 /* Add in the space required for saving incoming register arguments. */
7485 total_size += current_function_pretend_args_size;
7486 total_size += MIPS_STACK_ALIGN (cfun->machine->varargs_size);
7488 /* Save other computed information. */
7489 cfun->machine->frame.total_size = total_size;
7490 cfun->machine->frame.var_size = var_size;
7491 cfun->machine->frame.args_size = args_size;
7492 cfun->machine->frame.cprestore_size = cprestore_size;
7493 cfun->machine->frame.gp_reg_size = gp_reg_size;
7494 cfun->machine->frame.fp_reg_size = fp_reg_size;
7495 cfun->machine->frame.mask = mask;
7496 cfun->machine->frame.fmask = fmask;
7497 cfun->machine->frame.initialized = reload_completed;
7498 cfun->machine->frame.num_gp = gp_reg_size / UNITS_PER_WORD;
7499 cfun->machine->frame.num_fp = (fp_reg_size
7500 / (MAX_FPRS_PER_FMT * UNITS_PER_FPREG));
7504 HOST_WIDE_INT offset;
7506 if (GENERATE_MIPS16E_SAVE_RESTORE)
7507 /* MIPS16e SAVE and RESTORE instructions require the GP save area
7508 to be aligned at the high end with any padding at the low end.
7509 It is only safe to use this calculation for o32, where we never
7510 have pretend arguments, and where any varargs will be saved in
7511 the caller-allocated area rather than at the top of the frame. */
7512 offset = (total_size - GET_MODE_SIZE (gpr_mode));
7514 offset = (args_size + cprestore_size + var_size
7515 + gp_reg_size - GET_MODE_SIZE (gpr_mode));
7516 cfun->machine->frame.gp_sp_offset = offset;
7517 cfun->machine->frame.gp_save_offset = offset - total_size;
7521 cfun->machine->frame.gp_sp_offset = 0;
7522 cfun->machine->frame.gp_save_offset = 0;
7527 HOST_WIDE_INT offset;
7529 offset = (args_size + cprestore_size + var_size
7530 + gp_reg_rounded + fp_reg_size
7531 - MAX_FPRS_PER_FMT * UNITS_PER_FPREG);
7532 cfun->machine->frame.fp_sp_offset = offset;
7533 cfun->machine->frame.fp_save_offset = offset - total_size;
7537 cfun->machine->frame.fp_sp_offset = 0;
7538 cfun->machine->frame.fp_save_offset = 0;
7541 /* Ok, we're done. */
7545 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame
7546 pointer or argument pointer. TO is either the stack pointer or
7547 hard frame pointer. */
7550 mips_initial_elimination_offset (int from, int to)
7552 HOST_WIDE_INT offset;
7554 compute_frame_size (get_frame_size ());
7556 /* Set OFFSET to the offset from the stack pointer. */
7559 case FRAME_POINTER_REGNUM:
7563 case ARG_POINTER_REGNUM:
7564 offset = (cfun->machine->frame.total_size
7565 - current_function_pretend_args_size);
7572 if (TARGET_MIPS16 && to == HARD_FRAME_POINTER_REGNUM)
7573 offset -= cfun->machine->frame.args_size;
7578 /* Implement RETURN_ADDR_RTX. Note, we do not support moving
7579 back to a previous frame. */
7581 mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
7586 return get_hard_reg_initial_val (Pmode, GP_REG_FIRST + 31);
7589 /* Use FN to save or restore register REGNO. MODE is the register's
7590 mode and OFFSET is the offset of its save slot from the current
7594 mips_save_restore_reg (enum machine_mode mode, int regno,
7595 HOST_WIDE_INT offset, mips_save_restore_fn fn)
7599 mem = gen_frame_mem (mode, plus_constant (stack_pointer_rtx, offset));
7601 fn (gen_rtx_REG (mode, regno), mem);
7605 /* Call FN for each register that is saved by the current function.
7606 SP_OFFSET is the offset of the current stack pointer from the start
7610 mips_for_each_saved_reg (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
7612 enum machine_mode fpr_mode;
7613 HOST_WIDE_INT offset;
7616 /* Save registers starting from high to low. The debuggers prefer at least
7617 the return register be stored at func+4, and also it allows us not to
7618 need a nop in the epilogue if at least one register is reloaded in
7619 addition to return address. */
7620 offset = cfun->machine->frame.gp_sp_offset - sp_offset;
7621 for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
7622 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
7624 mips_save_restore_reg (gpr_mode, regno, offset, fn);
7625 offset -= GET_MODE_SIZE (gpr_mode);
7628 /* This loop must iterate over the same space as its companion in
7629 compute_frame_size. */
7630 offset = cfun->machine->frame.fp_sp_offset - sp_offset;
7631 fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
7632 for (regno = (FP_REG_LAST - MAX_FPRS_PER_FMT + 1);
7633 regno >= FP_REG_FIRST;
7634 regno -= MAX_FPRS_PER_FMT)
7635 if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
7637 mips_save_restore_reg (fpr_mode, regno, offset, fn);
7638 offset -= GET_MODE_SIZE (fpr_mode);
7642 /* If we're generating n32 or n64 abicalls, and the current function
7643 does not use $28 as its global pointer, emit a cplocal directive.
7644 Use pic_offset_table_rtx as the argument to the directive. */
7647 mips_output_cplocal (void)
7649 if (!TARGET_EXPLICIT_RELOCS
7650 && cfun->machine->global_pointer > 0
7651 && cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
7652 output_asm_insn (".cplocal %+", 0);
7655 /* Return the style of GP load sequence that is being used for the
7656 current function. */
7658 enum mips_loadgp_style
7659 mips_current_loadgp_style (void)
7661 if (!TARGET_USE_GOT || cfun->machine->global_pointer == 0)
7667 if (TARGET_ABSOLUTE_ABICALLS)
7668 return LOADGP_ABSOLUTE;
7670 return TARGET_NEWABI ? LOADGP_NEWABI : LOADGP_OLDABI;
7673 /* The __gnu_local_gp symbol. */
7675 static GTY(()) rtx mips_gnu_local_gp;
7677 /* If we're generating n32 or n64 abicalls, emit instructions
7678 to set up the global pointer. */
7681 mips_emit_loadgp (void)
7683 rtx addr, offset, incoming_address, base, index;
7685 switch (mips_current_loadgp_style ())
7687 case LOADGP_ABSOLUTE:
7688 if (mips_gnu_local_gp == NULL)
7690 mips_gnu_local_gp = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp");
7691 SYMBOL_REF_FLAGS (mips_gnu_local_gp) |= SYMBOL_FLAG_LOCAL;
7693 emit_insn (gen_loadgp_absolute (mips_gnu_local_gp));
7697 addr = XEXP (DECL_RTL (current_function_decl), 0);
7698 offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
7699 incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
7700 emit_insn (gen_loadgp_newabi (offset, incoming_address));
7701 if (!TARGET_EXPLICIT_RELOCS)
7702 emit_insn (gen_loadgp_blockage ());
7706 base = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_BASE));
7707 index = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_INDEX));
7708 emit_insn (gen_loadgp_rtp (base, index));
7709 if (!TARGET_EXPLICIT_RELOCS)
7710 emit_insn (gen_loadgp_blockage ());
7718 /* Set up the stack and frame (if desired) for the function. */
7721 mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
7724 HOST_WIDE_INT tsize = cfun->machine->frame.total_size;
7726 #ifdef SDB_DEBUGGING_INFO
7727 if (debug_info_level != DINFO_LEVEL_TERSE && write_symbols == SDB_DEBUG)
7728 SDB_OUTPUT_SOURCE_LINE (file, DECL_SOURCE_LINE (current_function_decl));
7731 /* In mips16 mode, we may need to generate a 32 bit to handle
7732 floating point arguments. The linker will arrange for any 32-bit
7733 functions to call this stub, which will then jump to the 16-bit
7736 && TARGET_HARD_FLOAT_ABI
7737 && current_function_args_info.fp_code != 0)
7738 build_mips16_function_stub (file);
7740 /* Select the mips16 mode for this function. */
7742 fprintf (file, "\t.set\tmips16\n");
7744 fprintf (file, "\t.set\tnomips16\n");
7746 if (!FUNCTION_NAME_ALREADY_DECLARED)
7748 /* Get the function name the same way that toplev.c does before calling
7749 assemble_start_function. This is needed so that the name used here
7750 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
7751 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
7753 if (!flag_inhibit_size_directive)
7755 fputs ("\t.ent\t", file);
7756 assemble_name (file, fnname);
7760 assemble_name (file, fnname);
7761 fputs (":\n", file);
7764 /* Stop mips_file_end from treating this function as external. */
7765 if (TARGET_IRIX && mips_abi == ABI_32)
7766 TREE_ASM_WRITTEN (DECL_NAME (cfun->decl)) = 1;
7768 if (!flag_inhibit_size_directive)
7770 /* .frame FRAMEREG, FRAMESIZE, RETREG */
7772 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
7773 "# vars= " HOST_WIDE_INT_PRINT_DEC ", regs= %d/%d"
7774 ", args= " HOST_WIDE_INT_PRINT_DEC
7775 ", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
7776 (reg_names[(frame_pointer_needed)
7777 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM]),
7778 ((frame_pointer_needed && TARGET_MIPS16)
7779 ? tsize - cfun->machine->frame.args_size
7781 reg_names[GP_REG_FIRST + 31],
7782 cfun->machine->frame.var_size,
7783 cfun->machine->frame.num_gp,
7784 cfun->machine->frame.num_fp,
7785 cfun->machine->frame.args_size,
7786 cfun->machine->frame.cprestore_size);
7788 /* .mask MASK, GPOFFSET; .fmask FPOFFSET */
7789 fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
7790 cfun->machine->frame.mask,
7791 cfun->machine->frame.gp_save_offset);
7792 fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
7793 cfun->machine->frame.fmask,
7794 cfun->machine->frame.fp_save_offset);
7797 OLD_SP == *FRAMEREG + FRAMESIZE => can find old_sp from nominated FP reg.
7798 HIGHEST_GP_SAVED == *FRAMEREG + FRAMESIZE + GPOFFSET => can find saved regs. */
7801 if (mips_current_loadgp_style () == LOADGP_OLDABI)
7803 /* Handle the initialization of $gp for SVR4 PIC. */
7804 if (!cfun->machine->all_noreorder_p)
7805 output_asm_insn ("%(.cpload\t%^%)", 0);
7807 output_asm_insn ("%(.cpload\t%^\n\t%<", 0);
7809 else if (cfun->machine->all_noreorder_p)
7810 output_asm_insn ("%(%<", 0);
7812 /* Tell the assembler which register we're using as the global
7813 pointer. This is needed for thunks, since they can use either
7814 explicit relocs or assembler macros. */
7815 mips_output_cplocal ();
7818 /* Make the last instruction frame related and note that it performs
7819 the operation described by FRAME_PATTERN. */
7822 mips_set_frame_expr (rtx frame_pattern)
7826 insn = get_last_insn ();
7827 RTX_FRAME_RELATED_P (insn) = 1;
7828 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7834 /* Return a frame-related rtx that stores REG at MEM.
7835 REG must be a single register. */
7838 mips_frame_set (rtx mem, rtx reg)
7842 /* If we're saving the return address register and the dwarf return
7843 address column differs from the hard register number, adjust the
7844 note reg to refer to the former. */
7845 if (REGNO (reg) == GP_REG_FIRST + 31
7846 && DWARF_FRAME_RETURN_COLUMN != GP_REG_FIRST + 31)
7847 reg = gen_rtx_REG (GET_MODE (reg), DWARF_FRAME_RETURN_COLUMN);
7849 set = gen_rtx_SET (VOIDmode, mem, reg);
7850 RTX_FRAME_RELATED_P (set) = 1;
7856 /* Save register REG to MEM. Make the instruction frame-related. */
7859 mips_save_reg (rtx reg, rtx mem)
7861 if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
7865 if (mips_split_64bit_move_p (mem, reg))
7866 mips_split_64bit_move (mem, reg);
7868 mips_emit_move (mem, reg);
7870 x1 = mips_frame_set (mips_subword (mem, 0), mips_subword (reg, 0));
7871 x2 = mips_frame_set (mips_subword (mem, 1), mips_subword (reg, 1));
7872 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
7877 && REGNO (reg) != GP_REG_FIRST + 31
7878 && !M16_REG_P (REGNO (reg)))
7880 /* Save a non-mips16 register by moving it through a temporary.
7881 We don't need to do this for $31 since there's a special
7882 instruction for it. */
7883 mips_emit_move (MIPS_PROLOGUE_TEMP (GET_MODE (reg)), reg);
7884 mips_emit_move (mem, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
7887 mips_emit_move (mem, reg);
7889 mips_set_frame_expr (mips_frame_set (mem, reg));
7893 /* Return a move between register REGNO and memory location SP + OFFSET.
7894 Make the move a load if RESTORE_P, otherwise make it a frame-related
7898 mips16e_save_restore_reg (bool restore_p, HOST_WIDE_INT offset,
7903 mem = gen_frame_mem (SImode, plus_constant (stack_pointer_rtx, offset));
7904 reg = gen_rtx_REG (SImode, regno);
7906 ? gen_rtx_SET (VOIDmode, reg, mem)
7907 : mips_frame_set (mem, reg));
7910 /* Return RTL for a MIPS16e SAVE or RESTORE instruction; RESTORE_P says which.
7911 The instruction must:
7913 - Allocate or deallocate SIZE bytes in total; SIZE is known
7916 - Save or restore as many registers in *MASK_PTR as possible.
7917 The instruction saves the first registers at the top of the
7918 allocated area, with the other registers below it.
7920 - Save NARGS argument registers above the allocated area.
7922 (NARGS is always zero if RESTORE_P.)
7924 The SAVE and RESTORE instructions cannot save and restore all general
7925 registers, so there may be some registers left over for the caller to
7926 handle. Destructively modify *MASK_PTR so that it contains the registers
7927 that still need to be saved or restored. The caller can save these
7928 registers in the memory immediately below *OFFSET_PTR, which is a
7929 byte offset from the bottom of the allocated stack area. */
7932 mips16e_build_save_restore (bool restore_p, unsigned int *mask_ptr,
7933 HOST_WIDE_INT *offset_ptr, unsigned int nargs,
7937 HOST_WIDE_INT offset, top_offset;
7938 unsigned int i, regno;
7941 gcc_assert (cfun->machine->frame.fp_reg_size == 0);
7943 /* Calculate the number of elements in the PARALLEL. We need one element
7944 for the stack adjustment, one for each argument register save, and one
7945 for each additional register move. */
7947 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
7948 if (BITSET_P (*mask_ptr, mips16e_save_restore_regs[i]))
7951 /* Create the final PARALLEL. */
7952 pattern = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (n));
7955 /* Add the stack pointer adjustment. */
7956 set = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7957 plus_constant (stack_pointer_rtx,
7958 restore_p ? size : -size));
7959 RTX_FRAME_RELATED_P (set) = 1;
7960 XVECEXP (pattern, 0, n++) = set;
7962 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
7963 top_offset = restore_p ? size : 0;
7965 /* Save the arguments. */
7966 for (i = 0; i < nargs; i++)
7968 offset = top_offset + i * GET_MODE_SIZE (gpr_mode);
7969 set = mips16e_save_restore_reg (restore_p, offset, GP_ARG_FIRST + i);
7970 XVECEXP (pattern, 0, n++) = set;
7973 /* Then fill in the other register moves. */
7974 offset = top_offset;
7975 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
7977 regno = mips16e_save_restore_regs[i];
7978 if (BITSET_P (*mask_ptr, regno))
7980 offset -= UNITS_PER_WORD;
7981 set = mips16e_save_restore_reg (restore_p, offset, regno);
7982 XVECEXP (pattern, 0, n++) = set;
7983 *mask_ptr &= ~(1 << regno);
7987 /* Tell the caller what offset it should use for the remaining registers. */
7988 *offset_ptr = size + (offset - top_offset) + size;
7990 gcc_assert (n == XVECLEN (pattern, 0));
7995 /* PATTERN is a PARALLEL whose first element adds ADJUST to the stack
7996 pointer. Return true if PATTERN matches the kind of instruction
7997 generated by mips16e_build_save_restore. If INFO is nonnull,
7998 initialize it when returning true. */
8001 mips16e_save_restore_pattern_p (rtx pattern, HOST_WIDE_INT adjust,
8002 struct mips16e_save_restore_info *info)
8004 unsigned int i, nargs, mask;
8005 HOST_WIDE_INT top_offset, save_offset, offset, extra;
8006 rtx set, reg, mem, base;
8009 if (!GENERATE_MIPS16E_SAVE_RESTORE)
8012 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
8013 top_offset = adjust > 0 ? adjust : 0;
8015 /* Interpret all other members of the PARALLEL. */
8016 save_offset = top_offset - GET_MODE_SIZE (gpr_mode);
8020 for (n = 1; n < XVECLEN (pattern, 0); n++)
8022 /* Check that we have a SET. */
8023 set = XVECEXP (pattern, 0, n);
8024 if (GET_CODE (set) != SET)
8027 /* Check that the SET is a load (if restoring) or a store
8029 mem = adjust > 0 ? SET_SRC (set) : SET_DEST (set);
8033 /* Check that the address is the sum of the stack pointer and a
8034 possibly-zero constant offset. */
8035 mips_split_plus (XEXP (mem, 0), &base, &offset);
8036 if (base != stack_pointer_rtx)
8039 /* Check that SET's other operand is a register. */
8040 reg = adjust > 0 ? SET_DEST (set) : SET_SRC (set);
8044 /* Check for argument saves. */
8045 if (offset == top_offset + nargs * GET_MODE_SIZE (gpr_mode)
8046 && REGNO (reg) == GP_ARG_FIRST + nargs)
8048 else if (offset == save_offset)
8050 while (mips16e_save_restore_regs[i++] != REGNO (reg))
8051 if (i == ARRAY_SIZE (mips16e_save_restore_regs))
8054 mask |= 1 << REGNO (reg);
8055 save_offset -= GET_MODE_SIZE (gpr_mode);
8061 /* Check that the restrictions on register ranges are met. */
8063 mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
8064 ARRAY_SIZE (mips16e_s2_s8_regs), &extra);
8065 mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
8066 ARRAY_SIZE (mips16e_a0_a3_regs), &extra);
8070 /* Make sure that the topmost argument register is not saved twice.
8071 The checks above ensure that the same is then true for the other
8072 argument registers. */
8073 if (nargs > 0 && BITSET_P (mask, GP_ARG_FIRST + nargs - 1))
8076 /* Pass back information, if requested. */
8079 info->nargs = nargs;
8081 info->size = (adjust > 0 ? adjust : -adjust);
8087 /* Add a MIPS16e SAVE or RESTORE register-range argument to string S
8088 for the register range [MIN_REG, MAX_REG]. Return a pointer to
8089 the null terminator. */
8092 mips16e_add_register_range (char *s, unsigned int min_reg,
8093 unsigned int max_reg)
8095 if (min_reg != max_reg)
8096 s += sprintf (s, ",%s-%s", reg_names[min_reg], reg_names[max_reg]);
8098 s += sprintf (s, ",%s", reg_names[min_reg]);
8102 /* Return the assembly instruction for a MIPS16e SAVE or RESTORE instruction.
8103 PATTERN and ADJUST are as for mips16e_save_restore_pattern_p. */
8106 mips16e_output_save_restore (rtx pattern, HOST_WIDE_INT adjust)
8108 static char buffer[300];
8110 struct mips16e_save_restore_info info;
8111 unsigned int i, end;
8114 /* Parse the pattern. */
8115 if (!mips16e_save_restore_pattern_p (pattern, adjust, &info))
8118 /* Add the mnemonic. */
8119 s = strcpy (buffer, adjust > 0 ? "restore\t" : "save\t");
8122 /* Save the arguments. */
8124 s += sprintf (s, "%s-%s,", reg_names[GP_ARG_FIRST],
8125 reg_names[GP_ARG_FIRST + info.nargs - 1]);
8126 else if (info.nargs == 1)
8127 s += sprintf (s, "%s,", reg_names[GP_ARG_FIRST]);
8129 /* Emit the amount of stack space to allocate or deallocate. */
8130 s += sprintf (s, "%d", (int) info.size);
8132 /* Save or restore $16. */
8133 if (BITSET_P (info.mask, 16))
8134 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 16]);
8136 /* Save or restore $17. */
8137 if (BITSET_P (info.mask, 17))
8138 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 17]);
8140 /* Save or restore registers in the range $s2...$s8, which
8141 mips16e_s2_s8_regs lists in decreasing order. Note that this
8142 is a software register range; the hardware registers are not
8143 numbered consecutively. */
8144 end = ARRAY_SIZE (mips16e_s2_s8_regs);
8145 i = mips16e_find_first_register (info.mask, mips16e_s2_s8_regs, end);
8147 s = mips16e_add_register_range (s, mips16e_s2_s8_regs[end - 1],
8148 mips16e_s2_s8_regs[i]);
8150 /* Save or restore registers in the range $a0...$a3. */
8151 end = ARRAY_SIZE (mips16e_a0_a3_regs);
8152 i = mips16e_find_first_register (info.mask, mips16e_a0_a3_regs, end);
8154 s = mips16e_add_register_range (s, mips16e_a0_a3_regs[i],
8155 mips16e_a0_a3_regs[end - 1]);
8157 /* Save or restore $31. */
8158 if (BITSET_P (info.mask, 31))
8159 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 31]);
8164 /* Return a simplified form of X using the register values in REG_VALUES.
8165 REG_VALUES[R] is the last value assigned to hard register R, or null
8166 if R has not been modified.
8168 This function is rather limited, but is good enough for our purposes. */
8171 mips16e_collect_propagate_value (rtx x, rtx *reg_values)
8175 x = avoid_constant_pool_reference (x);
8179 x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
8180 return simplify_gen_unary (GET_CODE (x), GET_MODE (x),
8181 x0, GET_MODE (XEXP (x, 0)));
8184 if (ARITHMETIC_P (x))
8186 x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
8187 x1 = mips16e_collect_propagate_value (XEXP (x, 1), reg_values);
8188 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), x0, x1);
8192 && reg_values[REGNO (x)]
8193 && !rtx_unstable_p (reg_values[REGNO (x)]))
8194 return reg_values[REGNO (x)];
8199 /* Return true if (set DEST SRC) stores an argument register into its
8200 caller-allocated save slot, storing the number of that argument
8201 register in *REGNO_PTR if so. REG_VALUES is as for
8202 mips16e_collect_propagate_value. */
8205 mips16e_collect_argument_save_p (rtx dest, rtx src, rtx *reg_values,
8206 unsigned int *regno_ptr)
8208 unsigned int argno, regno;
8209 HOST_WIDE_INT offset, required_offset;
8212 /* Check that this is a word-mode store. */
8213 if (!MEM_P (dest) || !REG_P (src) || GET_MODE (dest) != word_mode)
8216 /* Check that the register being saved is an unmodified argument
8218 regno = REGNO (src);
8219 if (regno < GP_ARG_FIRST || regno > GP_ARG_LAST || reg_values[regno])
8221 argno = regno - GP_ARG_FIRST;
8223 /* Check whether the address is an appropriate stack pointer or
8224 frame pointer access. The frame pointer is offset from the
8225 stack pointer by the size of the outgoing arguments. */
8226 addr = mips16e_collect_propagate_value (XEXP (dest, 0), reg_values);
8227 mips_split_plus (addr, &base, &offset);
8228 required_offset = cfun->machine->frame.total_size + argno * UNITS_PER_WORD;
8229 if (base == hard_frame_pointer_rtx)
8230 required_offset -= cfun->machine->frame.args_size;
8231 else if (base != stack_pointer_rtx)
8233 if (offset != required_offset)
8240 /* A subroutine of mips_expand_prologue, called only when generating
8241 MIPS16e SAVE instructions. Search the start of the function for any
8242 instructions that save argument registers into their caller-allocated
8243 save slots. Delete such instructions and return a value N such that
8244 saving [GP_ARG_FIRST, GP_ARG_FIRST + N) would make all the deleted
8245 instructions redundant. */
8248 mips16e_collect_argument_saves (void)
8250 rtx reg_values[FIRST_PSEUDO_REGISTER];
8251 rtx insn, next, set, dest, src;
8252 unsigned int nargs, regno;
8254 push_topmost_sequence ();
8256 memset (reg_values, 0, sizeof (reg_values));
8257 for (insn = get_insns (); insn; insn = next)
8259 next = NEXT_INSN (insn);
8266 set = PATTERN (insn);
8267 if (GET_CODE (set) != SET)
8270 dest = SET_DEST (set);
8271 src = SET_SRC (set);
8272 if (mips16e_collect_argument_save_p (dest, src, reg_values, ®no))
8274 if (!BITSET_P (cfun->machine->frame.mask, regno))
8277 nargs = MAX (nargs, (regno - GP_ARG_FIRST) + 1);
8280 else if (REG_P (dest) && GET_MODE (dest) == word_mode)
8281 reg_values[REGNO (dest)]
8282 = mips16e_collect_propagate_value (src, reg_values);
8286 pop_topmost_sequence ();
8291 /* Expand the prologue into a bunch of separate insns. */
8294 mips_expand_prologue (void)
8300 if (cfun->machine->global_pointer > 0)
8301 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
8303 size = compute_frame_size (get_frame_size ());
8305 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
8306 bytes beforehand; this is enough to cover the register save area
8307 without going out of range. */
8308 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
8310 HOST_WIDE_INT step1;
8312 step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
8314 if (GENERATE_MIPS16E_SAVE_RESTORE)
8316 HOST_WIDE_INT offset;
8317 unsigned int mask, regno;
8319 /* Try to merge argument stores into the save instruction. */
8320 nargs = mips16e_collect_argument_saves ();
8322 /* Build the save instruction. */
8323 mask = cfun->machine->frame.mask;
8324 insn = mips16e_build_save_restore (false, &mask, &offset,
8326 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
8329 /* Check if we need to save other registers. */
8330 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
8331 if (BITSET_P (mask, regno - GP_REG_FIRST))
8333 offset -= GET_MODE_SIZE (gpr_mode);
8334 mips_save_restore_reg (gpr_mode, regno, offset, mips_save_reg);
8339 insn = gen_add3_insn (stack_pointer_rtx,
8342 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
8344 mips_for_each_saved_reg (size, mips_save_reg);
8348 /* Allocate the rest of the frame. */
8351 if (SMALL_OPERAND (-size))
8352 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
8354 GEN_INT (-size)))) = 1;
8357 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
8360 /* There are no instructions to add or subtract registers
8361 from the stack pointer, so use the frame pointer as a
8362 temporary. We should always be using a frame pointer
8363 in this case anyway. */
8364 gcc_assert (frame_pointer_needed);
8365 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
8366 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
8367 hard_frame_pointer_rtx,
8368 MIPS_PROLOGUE_TEMP (Pmode)));
8369 mips_emit_move (stack_pointer_rtx, hard_frame_pointer_rtx);
8372 emit_insn (gen_sub3_insn (stack_pointer_rtx,
8374 MIPS_PROLOGUE_TEMP (Pmode)));
8376 /* Describe the combined effect of the previous instructions. */
8378 (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8379 plus_constant (stack_pointer_rtx, -size)));
8383 /* Set up the frame pointer, if we're using one. In mips16 code,
8384 we point the frame pointer ahead of the outgoing argument area.
8385 This should allow more variables & incoming arguments to be
8386 accessed with unextended instructions. */
8387 if (frame_pointer_needed)
8389 if (TARGET_MIPS16 && cfun->machine->frame.args_size != 0)
8391 rtx offset = GEN_INT (cfun->machine->frame.args_size);
8392 if (SMALL_OPERAND (cfun->machine->frame.args_size))
8394 (emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
8399 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), offset);
8400 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
8401 emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
8402 hard_frame_pointer_rtx,
8403 MIPS_PROLOGUE_TEMP (Pmode)));
8405 (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
8406 plus_constant (stack_pointer_rtx,
8407 cfun->machine->frame.args_size)));
8411 RTX_FRAME_RELATED_P (mips_emit_move (hard_frame_pointer_rtx,
8412 stack_pointer_rtx)) = 1;
8415 mips_emit_loadgp ();
8417 /* If generating o32/o64 abicalls, save $gp on the stack. */
8418 if (TARGET_ABICALLS && TARGET_OLDABI && !current_function_is_leaf)
8419 emit_insn (gen_cprestore (GEN_INT (current_function_outgoing_args_size)));
8421 /* If we are profiling, make sure no instructions are scheduled before
8422 the call to mcount. */
8424 if (current_function_profile)
8425 emit_insn (gen_blockage ());
8428 /* Do any necessary cleanup after a function to restore stack, frame,
8431 #define RA_MASK BITMASK_HIGH /* 1 << 31 */
8434 mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
8435 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
8437 /* Reinstate the normal $gp. */
8438 SET_REGNO (pic_offset_table_rtx, GLOBAL_POINTER_REGNUM);
8439 mips_output_cplocal ();
8441 if (cfun->machine->all_noreorder_p)
8443 /* Avoid using %>%) since it adds excess whitespace. */
8444 output_asm_insn (".set\tmacro", 0);
8445 output_asm_insn (".set\treorder", 0);
8446 set_noreorder = set_nomacro = 0;
8449 if (!FUNCTION_NAME_ALREADY_DECLARED && !flag_inhibit_size_directive)
8453 /* Get the function name the same way that toplev.c does before calling
8454 assemble_start_function. This is needed so that the name used here
8455 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
8456 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
8457 fputs ("\t.end\t", file);
8458 assemble_name (file, fnname);
8463 /* Emit instructions to restore register REG from slot MEM. */
8466 mips_restore_reg (rtx reg, rtx mem)
8468 /* There's no mips16 instruction to load $31 directly. Load into
8469 $7 instead and adjust the return insn appropriately. */
8470 if (TARGET_MIPS16 && REGNO (reg) == GP_REG_FIRST + 31)
8471 reg = gen_rtx_REG (GET_MODE (reg), 7);
8473 if (TARGET_MIPS16 && !M16_REG_P (REGNO (reg)))
8475 /* Can't restore directly; move through a temporary. */
8476 mips_emit_move (MIPS_EPILOGUE_TEMP (GET_MODE (reg)), mem);
8477 mips_emit_move (reg, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
8480 mips_emit_move (reg, mem);
8484 /* Expand the epilogue into a bunch of separate insns. SIBCALL_P is true
8485 if this epilogue precedes a sibling call, false if it is for a normal
8486 "epilogue" pattern. */
8489 mips_expand_epilogue (int sibcall_p)
8491 HOST_WIDE_INT step1, step2;
8494 if (!sibcall_p && mips_can_use_return_insn ())
8496 emit_jump_insn (gen_return ());
8500 /* In mips16 mode, if the return value should go into a floating-point
8501 register, we need to call a helper routine to copy it over. */
8502 if (mips16_cfun_returns_in_fpr_p ())
8511 enum machine_mode return_mode;
8513 return_type = DECL_RESULT (current_function_decl);
8514 return_mode = DECL_MODE (return_type);
8516 name = ACONCAT (("__mips16_ret_",
8517 mips16_call_stub_mode_suffix (return_mode),
8519 id = get_identifier (name);
8520 func = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
8521 retval = gen_rtx_REG (return_mode, GP_RETURN);
8522 call = gen_call_value_internal (retval, func, const0_rtx);
8523 insn = emit_call_insn (call);
8524 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), retval);
8527 /* Split the frame into two. STEP1 is the amount of stack we should
8528 deallocate before restoring the registers. STEP2 is the amount we
8529 should deallocate afterwards.
8531 Start off by assuming that no registers need to be restored. */
8532 step1 = cfun->machine->frame.total_size;
8535 /* Work out which register holds the frame address. Account for the
8536 frame pointer offset used by mips16 code. */
8537 if (!frame_pointer_needed)
8538 base = stack_pointer_rtx;
8541 base = hard_frame_pointer_rtx;
8543 step1 -= cfun->machine->frame.args_size;
8546 /* If we need to restore registers, deallocate as much stack as
8547 possible in the second step without going out of range. */
8548 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
8550 step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
8554 /* Set TARGET to BASE + STEP1. */
8560 /* Get an rtx for STEP1 that we can add to BASE. */
8561 adjust = GEN_INT (step1);
8562 if (!SMALL_OPERAND (step1))
8564 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), adjust);
8565 adjust = MIPS_EPILOGUE_TEMP (Pmode);
8568 /* Normal mode code can copy the result straight into $sp. */
8570 target = stack_pointer_rtx;
8572 emit_insn (gen_add3_insn (target, base, adjust));
8575 /* Copy TARGET into the stack pointer. */
8576 if (target != stack_pointer_rtx)
8577 mips_emit_move (stack_pointer_rtx, target);
8579 /* If we're using addressing macros, $gp is implicitly used by all
8580 SYMBOL_REFs. We must emit a blockage insn before restoring $gp
8582 if (TARGET_CALL_SAVED_GP && !TARGET_EXPLICIT_RELOCS)
8583 emit_insn (gen_blockage ());
8585 if (GENERATE_MIPS16E_SAVE_RESTORE && cfun->machine->frame.mask != 0)
8587 unsigned int regno, mask;
8588 HOST_WIDE_INT offset;
8591 /* Generate the restore instruction. */
8592 mask = cfun->machine->frame.mask;
8593 restore = mips16e_build_save_restore (true, &mask, &offset, 0, step2);
8595 /* Restore any other registers manually. */
8596 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
8597 if (BITSET_P (mask, regno - GP_REG_FIRST))
8599 offset -= GET_MODE_SIZE (gpr_mode);
8600 mips_save_restore_reg (gpr_mode, regno, offset, mips_restore_reg);
8603 /* Restore the remaining registers and deallocate the final bit
8605 emit_insn (restore);
8609 /* Restore the registers. */
8610 mips_for_each_saved_reg (cfun->machine->frame.total_size - step2,
8613 /* Deallocate the final bit of the frame. */
8615 emit_insn (gen_add3_insn (stack_pointer_rtx,
8620 /* Add in the __builtin_eh_return stack adjustment. We need to
8621 use a temporary in mips16 code. */
8622 if (current_function_calls_eh_return)
8626 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
8627 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
8628 MIPS_EPILOGUE_TEMP (Pmode),
8629 EH_RETURN_STACKADJ_RTX));
8630 mips_emit_move (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
8633 emit_insn (gen_add3_insn (stack_pointer_rtx,
8635 EH_RETURN_STACKADJ_RTX));
8640 /* When generating MIPS16 code, the normal mips_for_each_saved_reg
8641 path will restore the return address into $7 rather than $31. */
8643 && !GENERATE_MIPS16E_SAVE_RESTORE
8644 && (cfun->machine->frame.mask & RA_MASK) != 0)
8645 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
8646 GP_REG_FIRST + 7)));
8648 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
8649 GP_REG_FIRST + 31)));
8653 /* Return nonzero if this function is known to have a null epilogue.
8654 This allows the optimizer to omit jumps to jumps if no stack
8658 mips_can_use_return_insn (void)
8660 if (! reload_completed)
8663 if (df_regs_ever_live_p (31) || current_function_profile)
8666 /* In mips16 mode, a function that returns a floating point value
8667 needs to arrange to copy the return value into the floating point
8669 if (mips16_cfun_returns_in_fpr_p ())
8672 if (cfun->machine->frame.initialized)
8673 return cfun->machine->frame.total_size == 0;
8675 return compute_frame_size (get_frame_size ()) == 0;
8678 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
8679 in order to avoid duplicating too much logic from elsewhere. */
8682 mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8683 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8686 rtx this, temp1, temp2, insn, fnaddr;
8688 /* Pretend to be a post-reload pass while generating rtl. */
8689 reload_completed = 1;
8691 /* Mark the end of the (empty) prologue. */
8692 emit_note (NOTE_INSN_PROLOGUE_END);
8694 /* Pick a global pointer. Use a call-clobbered register if
8695 TARGET_CALL_SAVED_GP, so that we can use a sibcall. */
8698 cfun->machine->global_pointer =
8699 TARGET_CALL_SAVED_GP ? 15 : GLOBAL_POINTER_REGNUM;
8701 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
8704 /* Set up the global pointer for n32 or n64 abicalls. If
8705 LOADGP_ABSOLUTE then the thunk does not use the gp and there is
8706 no need to load it.*/
8707 if (mips_current_loadgp_style () != LOADGP_ABSOLUTE
8708 || !targetm.binds_local_p (function))
8709 mips_emit_loadgp ();
8711 /* We need two temporary registers in some cases. */
8712 temp1 = gen_rtx_REG (Pmode, 2);
8713 temp2 = gen_rtx_REG (Pmode, 3);
8715 /* Find out which register contains the "this" pointer. */
8716 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8717 this = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
8719 this = gen_rtx_REG (Pmode, GP_ARG_FIRST);
8721 /* Add DELTA to THIS. */
8724 rtx offset = GEN_INT (delta);
8725 if (!SMALL_OPERAND (delta))
8727 mips_emit_move (temp1, offset);
8730 emit_insn (gen_add3_insn (this, this, offset));
8733 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
8734 if (vcall_offset != 0)
8738 /* Set TEMP1 to *THIS. */
8739 mips_emit_move (temp1, gen_rtx_MEM (Pmode, this));
8741 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
8742 addr = mips_add_offset (temp2, temp1, vcall_offset);
8744 /* Load the offset and add it to THIS. */
8745 mips_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
8746 emit_insn (gen_add3_insn (this, this, temp1));
8749 /* Jump to the target function. Use a sibcall if direct jumps are
8750 allowed, otherwise load the address into a register first. */
8751 fnaddr = XEXP (DECL_RTL (function), 0);
8752 if (TARGET_MIPS16 || TARGET_USE_GOT || SYMBOL_REF_LONG_CALL_P (fnaddr)
8753 || SYMBOL_REF_MIPS16_FUNC_P (fnaddr))
8755 /* This is messy. gas treats "la $25,foo" as part of a call
8756 sequence and may allow a global "foo" to be lazily bound.
8757 The general move patterns therefore reject this combination.
8759 In this context, lazy binding would actually be OK
8760 for TARGET_CALL_CLOBBERED_GP, but it's still wrong for
8761 TARGET_CALL_SAVED_GP; see mips_load_call_address.
8762 We must therefore load the address via a temporary
8763 register if mips_dangerous_for_la25_p.
8765 If we jump to the temporary register rather than $25, the assembler
8766 can use the move insn to fill the jump's delay slot. */
8767 if (TARGET_USE_PIC_FN_ADDR_REG
8768 && !mips_dangerous_for_la25_p (fnaddr))
8769 temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
8770 mips_load_call_address (temp1, fnaddr, true);
8772 if (TARGET_USE_PIC_FN_ADDR_REG
8773 && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
8774 mips_emit_move (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
8775 emit_jump_insn (gen_indirect_jump (temp1));
8779 insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
8780 SIBLING_CALL_P (insn) = 1;
8783 /* Run just enough of rest_of_compilation. This sequence was
8784 "borrowed" from alpha.c. */
8785 insn = get_insns ();
8786 insn_locators_alloc ();
8787 split_all_insns_noflow ();
8788 mips16_lay_out_constants ();
8789 shorten_branches (insn);
8790 final_start_function (insn, file, 1);
8791 final (insn, file, 1);
8792 final_end_function ();
8794 /* Clean up the vars set above. Note that final_end_function resets
8795 the global pointer for us. */
8796 reload_completed = 0;
8799 /* Implement TARGET_SELECT_RTX_SECTION. */
8802 mips_select_rtx_section (enum machine_mode mode, rtx x,
8803 unsigned HOST_WIDE_INT align)
8805 /* ??? Consider using mergeable small data sections. */
8806 if (mips_rtx_constant_in_small_data_p (mode))
8807 return get_named_section (NULL, ".sdata", 0);
8809 return default_elf_select_rtx_section (mode, x, align);
8812 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
8814 The complication here is that, with the combination TARGET_ABICALLS
8815 && !TARGET_GPWORD, jump tables will use absolute addresses, and should
8816 therefore not be included in the read-only part of a DSO. Handle such
8817 cases by selecting a normal data section instead of a read-only one.
8818 The logic apes that in default_function_rodata_section. */
8821 mips_function_rodata_section (tree decl)
8823 if (!TARGET_ABICALLS || TARGET_GPWORD)
8824 return default_function_rodata_section (decl);
8826 if (decl && DECL_SECTION_NAME (decl))
8828 const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
8829 if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
8831 char *rname = ASTRDUP (name);
8833 return get_section (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
8835 else if (flag_function_sections && flag_data_sections
8836 && strncmp (name, ".text.", 6) == 0)
8838 char *rname = ASTRDUP (name);
8839 memcpy (rname + 1, "data", 4);
8840 return get_section (rname, SECTION_WRITE, decl);
8843 return data_section;
8846 /* Implement TARGET_IN_SMALL_DATA_P. This function controls whether
8847 locally-defined objects go in a small data section. It also controls
8848 the setting of the SYMBOL_REF_SMALL_P flag, which in turn helps
8849 mips_classify_symbol decide when to use %gp_rel(...)($gp) accesses. */
8852 mips_in_small_data_p (const_tree decl)
8856 if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
8859 /* We don't yet generate small-data references for -mabicalls or
8860 VxWorks RTP code. See the related -G handling in override_options. */
8861 if (TARGET_ABICALLS || TARGET_VXWORKS_RTP)
8864 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
8868 /* Reject anything that isn't in a known small-data section. */
8869 name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
8870 if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
8873 /* If a symbol is defined externally, the assembler will use the
8874 usual -G rules when deciding how to implement macros. */
8875 if (mips_lo_relocs[SYMBOL_GP_RELATIVE] || !DECL_EXTERNAL (decl))
8878 else if (TARGET_EMBEDDED_DATA)
8880 /* Don't put constants into the small data section: we want them
8881 to be in ROM rather than RAM. */
8882 if (TREE_CODE (decl) != VAR_DECL)
8885 if (TREE_READONLY (decl)
8886 && !TREE_SIDE_EFFECTS (decl)
8887 && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
8891 /* Enforce -mlocal-sdata. */
8892 if (!TARGET_LOCAL_SDATA && !TREE_PUBLIC (decl))
8895 /* Enforce -mextern-sdata. */
8896 if (!TARGET_EXTERN_SDATA && DECL_P (decl))
8898 if (DECL_EXTERNAL (decl))
8900 if (DECL_COMMON (decl) && DECL_INITIAL (decl) == NULL)
8904 size = int_size_in_bytes (TREE_TYPE (decl));
8905 return (size > 0 && size <= mips_section_threshold);
8908 /* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
8909 anchors for small data: the GP register acts as an anchor in that
8910 case. We also don't want to use them for PC-relative accesses,
8911 where the PC acts as an anchor. */
8914 mips_use_anchors_for_symbol_p (const_rtx symbol)
8916 switch (mips_classify_symbol (symbol, SYMBOL_CONTEXT_MEM))
8918 case SYMBOL_PC_RELATIVE:
8919 case SYMBOL_GP_RELATIVE:
8927 /* See whether VALTYPE is a record whose fields should be returned in
8928 floating-point registers. If so, return the number of fields and
8929 list them in FIELDS (which should have two elements). Return 0
8932 For n32 & n64, a structure with one or two fields is returned in
8933 floating-point registers as long as every field has a floating-point
8937 mips_fpr_return_fields (const_tree valtype, tree *fields)
8945 if (TREE_CODE (valtype) != RECORD_TYPE)
8949 for (field = TYPE_FIELDS (valtype); field != 0; field = TREE_CHAIN (field))
8951 if (TREE_CODE (field) != FIELD_DECL)
8954 if (TREE_CODE (TREE_TYPE (field)) != REAL_TYPE)
8960 fields[i++] = field;
8966 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
8967 a value in the most significant part of $2/$3 if:
8969 - the target is big-endian;
8971 - the value has a structure or union type (we generalize this to
8972 cover aggregates from other languages too); and
8974 - the structure is not returned in floating-point registers. */
8977 mips_return_in_msb (const_tree valtype)
8981 return (TARGET_NEWABI
8982 && TARGET_BIG_ENDIAN
8983 && AGGREGATE_TYPE_P (valtype)
8984 && mips_fpr_return_fields (valtype, fields) == 0);
8988 /* Return a composite value in a pair of floating-point registers.
8989 MODE1 and OFFSET1 are the mode and byte offset for the first value,
8990 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
8993 For n32 & n64, $f0 always holds the first value and $f2 the second.
8994 Otherwise the values are packed together as closely as possible. */
8997 mips_return_fpr_pair (enum machine_mode mode,
8998 enum machine_mode mode1, HOST_WIDE_INT offset1,
8999 enum machine_mode mode2, HOST_WIDE_INT offset2)
9003 inc = (TARGET_NEWABI ? 2 : MAX_FPRS_PER_FMT);
9004 return gen_rtx_PARALLEL
9007 gen_rtx_EXPR_LIST (VOIDmode,
9008 gen_rtx_REG (mode1, FP_RETURN),
9010 gen_rtx_EXPR_LIST (VOIDmode,
9011 gen_rtx_REG (mode2, FP_RETURN + inc),
9012 GEN_INT (offset2))));
9017 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
9018 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
9019 VALTYPE is null and MODE is the mode of the return value. */
9022 mips_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
9023 enum machine_mode mode)
9030 mode = TYPE_MODE (valtype);
9031 unsignedp = TYPE_UNSIGNED (valtype);
9033 /* Since we define TARGET_PROMOTE_FUNCTION_RETURN that returns
9034 true, we must promote the mode just as PROMOTE_MODE does. */
9035 mode = promote_mode (valtype, mode, &unsignedp, 1);
9037 /* Handle structures whose fields are returned in $f0/$f2. */
9038 switch (mips_fpr_return_fields (valtype, fields))
9041 return gen_rtx_REG (mode, FP_RETURN);
9044 return mips_return_fpr_pair (mode,
9045 TYPE_MODE (TREE_TYPE (fields[0])),
9046 int_byte_position (fields[0]),
9047 TYPE_MODE (TREE_TYPE (fields[1])),
9048 int_byte_position (fields[1]));
9051 /* If a value is passed in the most significant part of a register, see
9052 whether we have to round the mode up to a whole number of words. */
9053 if (mips_return_in_msb (valtype))
9055 HOST_WIDE_INT size = int_size_in_bytes (valtype);
9056 if (size % UNITS_PER_WORD != 0)
9058 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
9059 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
9063 /* For EABI, the class of return register depends entirely on MODE.
9064 For example, "struct { some_type x; }" and "union { some_type x; }"
9065 are returned in the same way as a bare "some_type" would be.
9066 Other ABIs only use FPRs for scalar, complex or vector types. */
9067 if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
9068 return gen_rtx_REG (mode, GP_RETURN);
9073 /* Handle long doubles for n32 & n64. */
9075 return mips_return_fpr_pair (mode,
9077 DImode, GET_MODE_SIZE (mode) / 2);
9079 if (mips_return_mode_in_fpr_p (mode))
9081 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
9082 return mips_return_fpr_pair (mode,
9083 GET_MODE_INNER (mode), 0,
9084 GET_MODE_INNER (mode),
9085 GET_MODE_SIZE (mode) / 2);
9087 return gen_rtx_REG (mode, FP_RETURN);
9091 return gen_rtx_REG (mode, GP_RETURN);
9094 /* Return nonzero when an argument must be passed by reference. */
9097 mips_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
9098 enum machine_mode mode, const_tree type,
9099 bool named ATTRIBUTE_UNUSED)
9101 if (mips_abi == ABI_EABI)
9105 /* ??? How should SCmode be handled? */
9106 if (mode == DImode || mode == DFmode
9107 || mode == DQmode || mode == UDQmode
9108 || mode == DAmode || mode == UDAmode)
9111 size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
9112 return size == -1 || size > UNITS_PER_WORD;
9116 /* If we have a variable-sized parameter, we have no choice. */
9117 return targetm.calls.must_pass_in_stack (mode, type);
9122 mips_callee_copies (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
9123 enum machine_mode mode ATTRIBUTE_UNUSED,
9124 const_tree type ATTRIBUTE_UNUSED, bool named)
9126 return mips_abi == ABI_EABI && named;
9129 /* Return true if registers of class CLASS cannot change from mode FROM
9133 mips_cannot_change_mode_class (enum machine_mode from,
9134 enum machine_mode to, enum reg_class class)
9136 if (MIN (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) <= UNITS_PER_WORD
9137 && MAX (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) > UNITS_PER_WORD)
9139 if (TARGET_BIG_ENDIAN)
9141 /* When a multi-word value is stored in paired floating-point
9142 registers, the first register always holds the low word.
9143 We therefore can't allow FPRs to change between single-word
9144 and multi-word modes. */
9145 if (MAX_FPRS_PER_FMT > 1 && reg_classes_intersect_p (FP_REGS, class))
9150 /* gcc assumes that each word of a multiword register can be accessed
9151 individually using SUBREGs. This is not true for floating-point
9152 registers if they are bigger than a word. */
9153 if (UNITS_PER_FPREG > UNITS_PER_WORD
9154 && GET_MODE_SIZE (from) > UNITS_PER_WORD
9155 && GET_MODE_SIZE (to) < UNITS_PER_FPREG
9156 && reg_classes_intersect_p (FP_REGS, class))
9159 /* Loading a 32-bit value into a 64-bit floating-point register
9160 will not sign-extend the value, despite what LOAD_EXTEND_OP says.
9161 We can't allow 64-bit float registers to change from SImode to
9166 && GET_MODE_SIZE (to) >= UNITS_PER_WORD
9167 && reg_classes_intersect_p (FP_REGS, class))
9173 /* Return true if X should not be moved directly into register $25.
9174 We need this because many versions of GAS will treat "la $25,foo" as
9175 part of a call sequence and so allow a global "foo" to be lazily bound. */
9178 mips_dangerous_for_la25_p (rtx x)
9180 return (!TARGET_EXPLICIT_RELOCS
9182 && GET_CODE (x) == SYMBOL_REF
9183 && mips_global_symbol_p (x));
9186 /* Implement PREFERRED_RELOAD_CLASS. */
9189 mips_preferred_reload_class (rtx x, enum reg_class class)
9191 if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, class))
9194 if (TARGET_HARD_FLOAT
9195 && FLOAT_MODE_P (GET_MODE (x))
9196 && reg_class_subset_p (FP_REGS, class))
9199 if (reg_class_subset_p (GR_REGS, class))
9202 if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, class))
9208 /* This function returns the register class required for a secondary
9209 register when copying between one of the registers in CLASS, and X,
9210 using MODE. If IN_P is nonzero, the copy is going from X to the
9211 register, otherwise the register is the source. A return value of
9212 NO_REGS means that no secondary register is required. */
9215 mips_secondary_reload_class (enum reg_class class,
9216 enum machine_mode mode, rtx x, int in_p)
9218 enum reg_class gr_regs = TARGET_MIPS16 ? M16_REGS : GR_REGS;
9222 if (REG_P (x)|| GET_CODE (x) == SUBREG)
9223 regno = true_regnum (x);
9225 gp_reg_p = TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
9227 if (mips_dangerous_for_la25_p (x))
9230 if (TEST_HARD_REG_BIT (reg_class_contents[(int) class], 25))
9234 /* Copying from HI or LO to anywhere other than a general register
9235 requires a general register.
9236 This rule applies to both the original HI/LO pair and the new
9237 DSP accumulators. */
9238 if (reg_class_subset_p (class, ACC_REGS))
9240 if (TARGET_MIPS16 && in_p)
9242 /* We can't really copy to HI or LO at all in mips16 mode. */
9245 return gp_reg_p ? NO_REGS : gr_regs;
9247 if (ACC_REG_P (regno))
9249 if (TARGET_MIPS16 && ! in_p)
9251 /* We can't really copy to HI or LO at all in mips16 mode. */
9254 return class == gr_regs ? NO_REGS : gr_regs;
9257 /* We can only copy a value to a condition code register from a
9258 floating point register, and even then we require a scratch
9259 floating point register. We can only copy a value out of a
9260 condition code register into a general register. */
9261 if (class == ST_REGS)
9265 return gp_reg_p ? NO_REGS : gr_regs;
9267 if (ST_REG_P (regno))
9271 return class == gr_regs ? NO_REGS : gr_regs;
9274 if (class == FP_REGS)
9278 /* In this case we can use lwc1, swc1, ldc1 or sdc1. */
9281 else if (CONSTANT_P (x) && GET_MODE_CLASS (mode) == MODE_FLOAT)
9283 /* We can use the l.s and l.d macros to load floating-point
9284 constants. ??? For l.s, we could probably get better
9285 code by returning GR_REGS here. */
9288 else if (gp_reg_p || x == CONST0_RTX (mode))
9290 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
9293 else if (FP_REG_P (regno))
9295 /* In this case we can use mov.s or mov.d. */
9300 /* Otherwise, we need to reload through an integer register. */
9305 /* In mips16 mode, going between memory and anything but M16_REGS
9306 requires an M16_REG. */
9309 if (class != M16_REGS && class != M16_NA_REGS)
9317 if (class == M16_REGS || class == M16_NA_REGS)
9326 /* Implement CLASS_MAX_NREGS.
9328 - UNITS_PER_FPREG controls the number of registers needed by FP_REGS.
9330 - ST_REGS are always hold CCmode values, and CCmode values are
9331 considered to be 4 bytes wide.
9333 All other register classes are covered by UNITS_PER_WORD. Note that
9334 this is true even for unions of integer and float registers when the
9335 latter are smaller than the former. The only supported combination
9336 in which case this occurs is -mgp64 -msingle-float, which has 64-bit
9337 words but 32-bit float registers. A word-based calculation is correct
9338 in that case since -msingle-float disallows multi-FPR values. */
9341 mips_class_max_nregs (enum reg_class class ATTRIBUTE_UNUSED,
9342 enum machine_mode mode)
9344 if (class == ST_REGS)
9345 return (GET_MODE_SIZE (mode) + 3) / 4;
9346 else if (class == FP_REGS)
9347 return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
9349 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
9353 mips_valid_pointer_mode (enum machine_mode mode)
9355 return (mode == SImode || (TARGET_64BIT && mode == DImode));
9358 /* Target hook for vector_mode_supported_p. */
9361 mips_vector_mode_supported_p (enum machine_mode mode)
9366 return TARGET_PAIRED_SINGLE_FLOAT;
9383 /* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
9386 mips_scalar_mode_supported_p (enum machine_mode mode)
9388 if (ALL_FIXED_POINT_MODE_P (mode)
9389 && GET_MODE_PRECISION (mode) <= 2 * BITS_PER_WORD)
9392 return default_scalar_mode_supported_p (mode);
9395 /* If we can access small data directly (using gp-relative relocation
9396 operators) return the small data pointer, otherwise return null.
9398 For each mips16 function which refers to GP relative symbols, we
9399 use a pseudo register, initialized at the start of the function, to
9400 hold the $gp value. */
9403 mips16_gp_pseudo_reg (void)
9405 if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
9406 cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
9408 /* Don't initialize the pseudo register if we are being called from
9409 the tree optimizers' cost-calculation routines. */
9410 if (!cfun->machine->initialized_mips16_gp_pseudo_p
9411 && (current_ir_type () != IR_GIMPLE || currently_expanding_to_rtl))
9415 /* We want to initialize this to a value which gcc will believe
9417 insn = gen_load_const_gp (cfun->machine->mips16_gp_pseudo_rtx);
9419 push_topmost_sequence ();
9420 /* We need to emit the initialization after the FUNCTION_BEG
9421 note, so that it will be integrated. */
9422 for (scan = get_insns (); scan != NULL_RTX; scan = NEXT_INSN (scan))
9424 && NOTE_KIND (scan) == NOTE_INSN_FUNCTION_BEG)
9426 if (scan == NULL_RTX)
9427 scan = get_insns ();
9428 insn = emit_insn_after (insn, scan);
9429 pop_topmost_sequence ();
9431 cfun->machine->initialized_mips16_gp_pseudo_p = true;
9434 return cfun->machine->mips16_gp_pseudo_rtx;
9437 /* Write out code to move floating point arguments in or out of
9438 general registers. Output the instructions to FILE. FP_CODE is
9439 the code describing which arguments are present (see the comment at
9440 the definition of CUMULATIVE_ARGS in mips.h). FROM_FP_P is nonzero if
9441 we are copying from the floating point registers. */
9444 mips16_fp_args (FILE *file, int fp_code, int from_fp_p)
9449 CUMULATIVE_ARGS cum;
9451 /* This code only works for the original 32-bit ABI and the O64 ABI. */
9452 gcc_assert (TARGET_OLDABI);
9459 init_cumulative_args (&cum, NULL, NULL);
9461 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
9463 enum machine_mode mode;
9464 struct mips_arg_info info;
9468 else if ((f & 3) == 2)
9473 mips_arg_info (&cum, mode, NULL, true, &info);
9474 gparg = mips_arg_regno (&info, false);
9475 fparg = mips_arg_regno (&info, true);
9478 fprintf (file, "\t%s\t%s,%s\n", s,
9479 reg_names[gparg], reg_names[fparg]);
9480 else if (TARGET_64BIT)
9481 fprintf (file, "\td%s\t%s,%s\n", s,
9482 reg_names[gparg], reg_names[fparg]);
9483 else if (ISA_HAS_MXHC1)
9484 /* -mips32r2 -mfp64 */
9485 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n",
9487 reg_names[gparg + (WORDS_BIG_ENDIAN ? 1 : 0)],
9489 from_fp_p ? "mfhc1" : "mthc1",
9490 reg_names[gparg + (WORDS_BIG_ENDIAN ? 0 : 1)],
9492 else if (TARGET_BIG_ENDIAN)
9493 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
9494 reg_names[gparg], reg_names[fparg + 1], s,
9495 reg_names[gparg + 1], reg_names[fparg]);
9497 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
9498 reg_names[gparg], reg_names[fparg], s,
9499 reg_names[gparg + 1], reg_names[fparg + 1]);
9501 function_arg_advance (&cum, mode, NULL, true);
9505 /* Build a mips16 function stub. This is used for functions which
9506 take arguments in the floating point registers. It is 32-bit code
9507 that moves the floating point args into the general registers, and
9508 then jumps to the 16-bit code. */
9511 build_mips16_function_stub (FILE *file)
9514 char *secname, *stubname;
9515 tree stubid, stubdecl;
9519 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
9520 secname = (char *) alloca (strlen (fnname) + 20);
9521 sprintf (secname, ".mips16.fn.%s", fnname);
9522 stubname = (char *) alloca (strlen (fnname) + 20);
9523 sprintf (stubname, "__fn_stub_%s", fnname);
9524 stubid = get_identifier (stubname);
9525 stubdecl = build_decl (FUNCTION_DECL, stubid,
9526 build_function_type (void_type_node, NULL_TREE));
9527 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
9528 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
9530 fprintf (file, "\t# Stub function for %s (", current_function_name ());
9532 for (f = (unsigned int) current_function_args_info.fp_code; f != 0; f >>= 2)
9534 fprintf (file, "%s%s",
9535 need_comma ? ", " : "",
9536 (f & 3) == 1 ? "float" : "double");
9539 fprintf (file, ")\n");
9541 fprintf (file, "\t.set\tnomips16\n");
9542 switch_to_section (function_section (stubdecl));
9543 ASM_OUTPUT_ALIGN (file, floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT));
9545 /* ??? If FUNCTION_NAME_ALREADY_DECLARED is defined, then we are
9546 within a .ent, and we cannot emit another .ent. */
9547 if (!FUNCTION_NAME_ALREADY_DECLARED)
9549 fputs ("\t.ent\t", file);
9550 assemble_name (file, stubname);
9554 assemble_name (file, stubname);
9555 fputs (":\n", file);
9557 /* We don't want the assembler to insert any nops here. */
9558 fprintf (file, "\t.set\tnoreorder\n");
9560 mips16_fp_args (file, current_function_args_info.fp_code, 1);
9562 fprintf (asm_out_file, "\t.set\tnoat\n");
9563 fprintf (asm_out_file, "\tla\t%s,", reg_names[GP_REG_FIRST + 1]);
9564 assemble_name (file, fnname);
9565 fprintf (file, "\n");
9566 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
9567 fprintf (asm_out_file, "\t.set\tat\n");
9569 /* Unfortunately, we can't fill the jump delay slot. We can't fill
9570 with one of the mfc1 instructions, because the result is not
9571 available for one instruction, so if the very first instruction
9572 in the function refers to the register, it will see the wrong
9574 fprintf (file, "\tnop\n");
9576 fprintf (file, "\t.set\treorder\n");
9578 if (!FUNCTION_NAME_ALREADY_DECLARED)
9580 fputs ("\t.end\t", file);
9581 assemble_name (file, stubname);
9585 switch_to_section (function_section (current_function_decl));
9588 /* We keep a list of functions for which we have already built stubs
9589 in build_mips16_call_stub. */
9593 struct mips16_stub *next;
9598 static struct mips16_stub *mips16_stubs;
9600 /* Emit code to return a double value from a mips16 stub. GPREG is the
9601 first GP reg to use, FPREG is the first FP reg to use. */
9604 mips16_fpret_double (int gpreg, int fpreg)
9607 fprintf (asm_out_file, "\tdmfc1\t%s,%s\n",
9608 reg_names[gpreg], reg_names[fpreg]);
9609 else if (TARGET_FLOAT64)
9611 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9612 reg_names[gpreg + WORDS_BIG_ENDIAN],
9614 fprintf (asm_out_file, "\tmfhc1\t%s,%s\n",
9615 reg_names[gpreg + !WORDS_BIG_ENDIAN],
9620 if (TARGET_BIG_ENDIAN)
9622 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9623 reg_names[gpreg + 0],
9624 reg_names[fpreg + 1]);
9625 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9626 reg_names[gpreg + 1],
9627 reg_names[fpreg + 0]);
9631 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9632 reg_names[gpreg + 0],
9633 reg_names[fpreg + 0]);
9634 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9635 reg_names[gpreg + 1],
9636 reg_names[fpreg + 1]);
9641 /* Build a call stub for a mips16 call. A stub is needed if we are
9642 passing any floating point values which should go into the floating
9643 point registers. If we are, and the call turns out to be to a
9644 32-bit function, the stub will be used to move the values into the
9645 floating point registers before calling the 32-bit function. The
9646 linker will magically adjust the function call to either the 16-bit
9647 function or the 32-bit stub, depending upon where the function call
9648 is actually defined.
9650 Similarly, we need a stub if the return value might come back in a
9651 floating point register.
9653 RETVAL is the location of the return value, or null if this is
9654 a call rather than a call_value. FN is the address of the
9655 function and ARG_SIZE is the size of the arguments. FP_CODE
9656 is the code built by function_arg. This function returns a nonzero
9657 value if it builds the call instruction itself. */
9660 build_mips16_call_stub (rtx retval, rtx fn, rtx arg_size, int fp_code)
9664 char *secname, *stubname;
9665 struct mips16_stub *l;
9666 tree stubid, stubdecl;
9671 /* We don't need to do anything if we aren't in mips16 mode, or if
9672 we were invoked with the -msoft-float option. */
9673 if (!TARGET_MIPS16 || TARGET_SOFT_FLOAT_ABI)
9676 /* Figure out whether the value might come back in a floating point
9679 fpret = mips_return_mode_in_fpr_p (GET_MODE (retval));
9681 /* We don't need to do anything if there were no floating point
9682 arguments and the value will not be returned in a floating point
9684 if (fp_code == 0 && ! fpret)
9687 /* We don't need to do anything if this is a call to a special
9688 mips16 support function. */
9689 if (GET_CODE (fn) == SYMBOL_REF
9690 && strncmp (XSTR (fn, 0), "__mips16_", 9) == 0)
9693 /* This code will only work for o32 and o64 abis. The other ABI's
9694 require more sophisticated support. */
9695 gcc_assert (TARGET_OLDABI);
9697 /* If we're calling via a function pointer, then we must always call
9698 via a stub. There are magic stubs provided in libgcc.a for each
9699 of the required cases. Each of them expects the function address
9700 to arrive in register $2. */
9702 if (GET_CODE (fn) != SYMBOL_REF)
9708 /* ??? If this code is modified to support other ABI's, we need
9709 to handle PARALLEL return values here. */
9712 sprintf (buf, "__mips16_call_stub_%s_%d",
9713 mips16_call_stub_mode_suffix (GET_MODE (retval)),
9716 sprintf (buf, "__mips16_call_stub_%d",
9719 id = get_identifier (buf);
9720 stub_fn = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
9722 mips_emit_move (gen_rtx_REG (Pmode, 2), fn);
9724 if (retval == NULL_RTX)
9725 insn = gen_call_internal (stub_fn, arg_size);
9727 insn = gen_call_value_internal (retval, stub_fn, arg_size);
9728 insn = emit_call_insn (insn);
9730 /* Put the register usage information on the CALL. */
9731 CALL_INSN_FUNCTION_USAGE (insn) =
9732 gen_rtx_EXPR_LIST (VOIDmode,
9733 gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 2)),
9734 CALL_INSN_FUNCTION_USAGE (insn));
9736 /* If we are handling a floating point return value, we need to
9737 save $18 in the function prologue. Putting a note on the
9738 call will mean that df_regs_ever_live_p ($18) will be true if the
9739 call is not eliminated, and we can check that in the prologue
9742 CALL_INSN_FUNCTION_USAGE (insn) =
9743 gen_rtx_EXPR_LIST (VOIDmode,
9744 gen_rtx_USE (VOIDmode,
9745 gen_rtx_REG (word_mode, 18)),
9746 CALL_INSN_FUNCTION_USAGE (insn));
9748 /* Return 1 to tell the caller that we've generated the call
9753 /* We know the function we are going to call. If we have already
9754 built a stub, we don't need to do anything further. */
9756 fnname = XSTR (fn, 0);
9757 for (l = mips16_stubs; l != NULL; l = l->next)
9758 if (strcmp (l->name, fnname) == 0)
9763 /* Build a special purpose stub. When the linker sees a
9764 function call in mips16 code, it will check where the target
9765 is defined. If the target is a 32-bit call, the linker will
9766 search for the section defined here. It can tell which
9767 symbol this section is associated with by looking at the
9768 relocation information (the name is unreliable, since this
9769 might be a static function). If such a section is found, the
9770 linker will redirect the call to the start of the magic
9773 If the function does not return a floating point value, the
9774 special stub section is named
9777 If the function does return a floating point value, the stub
9779 .mips16.call.fp.FNNAME
9782 secname = (char *) alloca (strlen (fnname) + 40);
9783 sprintf (secname, ".mips16.call.%s%s",
9786 stubname = (char *) alloca (strlen (fnname) + 20);
9787 sprintf (stubname, "__call_stub_%s%s",
9790 stubid = get_identifier (stubname);
9791 stubdecl = build_decl (FUNCTION_DECL, stubid,
9792 build_function_type (void_type_node, NULL_TREE));
9793 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
9794 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
9796 fprintf (asm_out_file, "\t# Stub function to call %s%s (",
9798 ? (GET_MODE (retval) == SFmode ? "float " : "double ")
9802 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
9804 fprintf (asm_out_file, "%s%s",
9805 need_comma ? ", " : "",
9806 (f & 3) == 1 ? "float" : "double");
9809 fprintf (asm_out_file, ")\n");
9811 fprintf (asm_out_file, "\t.set\tnomips16\n");
9812 assemble_start_function (stubdecl, stubname);
9814 if (!FUNCTION_NAME_ALREADY_DECLARED)
9816 fputs ("\t.ent\t", asm_out_file);
9817 assemble_name (asm_out_file, stubname);
9818 fputs ("\n", asm_out_file);
9820 assemble_name (asm_out_file, stubname);
9821 fputs (":\n", asm_out_file);
9824 /* We build the stub code by hand. That's the only way we can
9825 do it, since we can't generate 32-bit code during a 16-bit
9828 /* We don't want the assembler to insert any nops here. */
9829 fprintf (asm_out_file, "\t.set\tnoreorder\n");
9831 mips16_fp_args (asm_out_file, fp_code, 0);
9835 fprintf (asm_out_file, "\t.set\tnoat\n");
9836 fprintf (asm_out_file, "\tla\t%s,%s\n", reg_names[GP_REG_FIRST + 1],
9838 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
9839 fprintf (asm_out_file, "\t.set\tat\n");
9840 /* Unfortunately, we can't fill the jump delay slot. We
9841 can't fill with one of the mtc1 instructions, because the
9842 result is not available for one instruction, so if the
9843 very first instruction in the function refers to the
9844 register, it will see the wrong value. */
9845 fprintf (asm_out_file, "\tnop\n");
9849 fprintf (asm_out_file, "\tmove\t%s,%s\n",
9850 reg_names[GP_REG_FIRST + 18], reg_names[GP_REG_FIRST + 31]);
9851 fprintf (asm_out_file, "\tjal\t%s\n", fnname);
9852 /* As above, we can't fill the delay slot. */
9853 fprintf (asm_out_file, "\tnop\n");
9854 if (GET_MODE (retval) == SFmode)
9855 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9856 reg_names[GP_REG_FIRST + 2], reg_names[FP_REG_FIRST + 0]);
9857 else if (GET_MODE (retval) == SCmode)
9859 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9860 reg_names[GP_REG_FIRST + 2],
9861 reg_names[FP_REG_FIRST + 0]);
9862 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9863 reg_names[GP_REG_FIRST + 3],
9864 reg_names[FP_REG_FIRST + MAX_FPRS_PER_FMT]);
9866 else if (GET_MODE (retval) == DFmode
9867 || GET_MODE (retval) == V2SFmode)
9869 mips16_fpret_double (GP_REG_FIRST + 2, FP_REG_FIRST + 0);
9871 else if (GET_MODE (retval) == DCmode)
9873 mips16_fpret_double (GP_REG_FIRST + 2,
9875 mips16_fpret_double (GP_REG_FIRST + 4,
9876 FP_REG_FIRST + MAX_FPRS_PER_FMT);
9880 if (TARGET_BIG_ENDIAN)
9882 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9883 reg_names[GP_REG_FIRST + 2],
9884 reg_names[FP_REG_FIRST + 1]);
9885 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9886 reg_names[GP_REG_FIRST + 3],
9887 reg_names[FP_REG_FIRST + 0]);
9891 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9892 reg_names[GP_REG_FIRST + 2],
9893 reg_names[FP_REG_FIRST + 0]);
9894 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9895 reg_names[GP_REG_FIRST + 3],
9896 reg_names[FP_REG_FIRST + 1]);
9899 fprintf (asm_out_file, "\tj\t%s\n", reg_names[GP_REG_FIRST + 18]);
9900 /* As above, we can't fill the delay slot. */
9901 fprintf (asm_out_file, "\tnop\n");
9904 fprintf (asm_out_file, "\t.set\treorder\n");
9906 #ifdef ASM_DECLARE_FUNCTION_SIZE
9907 ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
9910 if (!FUNCTION_NAME_ALREADY_DECLARED)
9912 fputs ("\t.end\t", asm_out_file);
9913 assemble_name (asm_out_file, stubname);
9914 fputs ("\n", asm_out_file);
9917 /* Record this stub. */
9918 l = (struct mips16_stub *) xmalloc (sizeof *l);
9919 l->name = xstrdup (fnname);
9921 l->next = mips16_stubs;
9925 /* If we expect a floating point return value, but we've built a
9926 stub which does not expect one, then we're in trouble. We can't
9927 use the existing stub, because it won't handle the floating point
9928 value. We can't build a new stub, because the linker won't know
9929 which stub to use for the various calls in this object file.
9930 Fortunately, this case is illegal, since it means that a function
9931 was declared in two different ways in a single compilation. */
9932 if (fpret && ! l->fpret)
9933 error ("cannot handle inconsistent calls to %qs", fnname);
9935 if (retval == NULL_RTX)
9936 insn = gen_call_internal_direct (fn, arg_size);
9938 insn = gen_call_value_internal_direct (retval, fn, arg_size);
9939 insn = emit_call_insn (insn);
9941 /* If we are calling a stub which handles a floating point return
9942 value, we need to arrange to save $18 in the prologue. We do
9943 this by marking the function call as using the register. The
9944 prologue will later see that it is used, and emit code to save
9947 CALL_INSN_FUNCTION_USAGE (insn) =
9948 gen_rtx_EXPR_LIST (VOIDmode,
9949 gen_rtx_USE (VOIDmode, gen_rtx_REG (word_mode, 18)),
9950 CALL_INSN_FUNCTION_USAGE (insn));
9952 /* Return 1 to tell the caller that we've generated the call
9957 /* An entry in the mips16 constant pool. VALUE is the pool constant,
9958 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
9960 struct mips16_constant {
9961 struct mips16_constant *next;
9964 enum machine_mode mode;
9967 /* Information about an incomplete mips16 constant pool. FIRST is the
9968 first constant, HIGHEST_ADDRESS is the highest address that the first
9969 byte of the pool can have, and INSN_ADDRESS is the current instruction
9972 struct mips16_constant_pool {
9973 struct mips16_constant *first;
9974 int highest_address;
9978 /* Add constant VALUE to POOL and return its label. MODE is the
9979 value's mode (used for CONST_INTs, etc.). */
9982 add_constant (struct mips16_constant_pool *pool,
9983 rtx value, enum machine_mode mode)
9985 struct mips16_constant **p, *c;
9986 bool first_of_size_p;
9988 /* See whether the constant is already in the pool. If so, return the
9989 existing label, otherwise leave P pointing to the place where the
9990 constant should be added.
9992 Keep the pool sorted in increasing order of mode size so that we can
9993 reduce the number of alignments needed. */
9994 first_of_size_p = true;
9995 for (p = &pool->first; *p != 0; p = &(*p)->next)
9997 if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
9999 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
10001 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
10002 first_of_size_p = false;
10005 /* In the worst case, the constant needed by the earliest instruction
10006 will end up at the end of the pool. The entire pool must then be
10007 accessible from that instruction.
10009 When adding the first constant, set the pool's highest address to
10010 the address of the first out-of-range byte. Adjust this address
10011 downwards each time a new constant is added. */
10012 if (pool->first == 0)
10013 /* For pc-relative lw, addiu and daddiu instructions, the base PC value
10014 is the address of the instruction with the lowest two bits clear.
10015 The base PC value for ld has the lowest three bits clear. Assume
10016 the worst case here. */
10017 pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
10018 pool->highest_address -= GET_MODE_SIZE (mode);
10019 if (first_of_size_p)
10020 /* Take into account the worst possible padding due to alignment. */
10021 pool->highest_address -= GET_MODE_SIZE (mode) - 1;
10023 /* Create a new entry. */
10024 c = (struct mips16_constant *) xmalloc (sizeof *c);
10027 c->label = gen_label_rtx ();
10034 /* Output constant VALUE after instruction INSN and return the last
10035 instruction emitted. MODE is the mode of the constant. */
10038 dump_constants_1 (enum machine_mode mode, rtx value, rtx insn)
10040 switch (GET_MODE_CLASS (mode))
10044 rtx size = GEN_INT (GET_MODE_SIZE (mode));
10045 return emit_insn_after (gen_consttable_int (value, size), insn);
10049 return emit_insn_after (gen_consttable_float (value), insn);
10051 case MODE_VECTOR_FLOAT:
10052 case MODE_VECTOR_INT:
10055 for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
10056 insn = dump_constants_1 (GET_MODE_INNER (mode),
10057 CONST_VECTOR_ELT (value, i), insn);
10062 gcc_unreachable ();
10067 /* Dump out the constants in CONSTANTS after INSN. */
10070 dump_constants (struct mips16_constant *constants, rtx insn)
10072 struct mips16_constant *c, *next;
10076 for (c = constants; c != NULL; c = next)
10078 /* If necessary, increase the alignment of PC. */
10079 if (align < GET_MODE_SIZE (c->mode))
10081 int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
10082 insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
10084 align = GET_MODE_SIZE (c->mode);
10086 insn = emit_label_after (c->label, insn);
10087 insn = dump_constants_1 (c->mode, c->value, insn);
10093 emit_barrier_after (insn);
10096 /* Return the length of instruction INSN. */
10099 mips16_insn_length (rtx insn)
10103 rtx body = PATTERN (insn);
10104 if (GET_CODE (body) == ADDR_VEC)
10105 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
10106 if (GET_CODE (body) == ADDR_DIFF_VEC)
10107 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
10109 return get_attr_length (insn);
10112 /* If *X is a symbolic constant that refers to the constant pool, add
10113 the constant to POOL and rewrite *X to use the constant's label. */
10116 mips16_rewrite_pool_constant (struct mips16_constant_pool *pool, rtx *x)
10118 rtx base, offset, label;
10120 split_const (*x, &base, &offset);
10121 if (GET_CODE (base) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (base))
10123 label = add_constant (pool, get_pool_constant (base),
10124 get_pool_mode (base));
10125 base = gen_rtx_LABEL_REF (Pmode, label);
10126 *x = mips_unspec_address_offset (base, offset, SYMBOL_PC_RELATIVE);
10130 /* This structure is used to communicate with mips16_rewrite_pool_refs.
10131 INSN is the instruction we're rewriting and POOL points to the current
10133 struct mips16_rewrite_pool_refs_info {
10135 struct mips16_constant_pool *pool;
10138 /* Rewrite *X so that constant pool references refer to the constant's
10139 label instead. DATA points to a mips16_rewrite_pool_refs_info
10143 mips16_rewrite_pool_refs (rtx *x, void *data)
10145 struct mips16_rewrite_pool_refs_info *info = data;
10147 if (force_to_mem_operand (*x, Pmode))
10149 rtx mem = force_const_mem (GET_MODE (*x), *x);
10150 validate_change (info->insn, x, mem, false);
10155 mips16_rewrite_pool_constant (info->pool, &XEXP (*x, 0));
10159 if (TARGET_MIPS16_TEXT_LOADS)
10160 mips16_rewrite_pool_constant (info->pool, x);
10162 return GET_CODE (*x) == CONST ? -1 : 0;
10165 /* Build MIPS16 constant pools. */
10168 mips16_lay_out_constants (void)
10170 struct mips16_constant_pool pool;
10171 struct mips16_rewrite_pool_refs_info info;
10174 if (!TARGET_MIPS16_PCREL_LOADS)
10178 memset (&pool, 0, sizeof (pool));
10179 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10181 /* Rewrite constant pool references in INSN. */
10186 for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &info);
10189 pool.insn_address += mips16_insn_length (insn);
10191 if (pool.first != NULL)
10193 /* If there are no natural barriers between the first user of
10194 the pool and the highest acceptable address, we'll need to
10195 create a new instruction to jump around the constant pool.
10196 In the worst case, this instruction will be 4 bytes long.
10198 If it's too late to do this transformation after INSN,
10199 do it immediately before INSN. */
10200 if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
10204 label = gen_label_rtx ();
10206 jump = emit_jump_insn_before (gen_jump (label), insn);
10207 JUMP_LABEL (jump) = label;
10208 LABEL_NUSES (label) = 1;
10209 barrier = emit_barrier_after (jump);
10211 emit_label_after (label, barrier);
10212 pool.insn_address += 4;
10215 /* See whether the constant pool is now out of range of the first
10216 user. If so, output the constants after the previous barrier.
10217 Note that any instructions between BARRIER and INSN (inclusive)
10218 will use negative offsets to refer to the pool. */
10219 if (pool.insn_address > pool.highest_address)
10221 dump_constants (pool.first, barrier);
10225 else if (BARRIER_P (insn))
10229 dump_constants (pool.first, get_last_insn ());
10232 /* A temporary variable used by for_each_rtx callbacks, etc. */
10233 static rtx mips_sim_insn;
10235 /* A structure representing the state of the processor pipeline.
10236 Used by the mips_sim_* family of functions. */
10238 /* The maximum number of instructions that can be issued in a cycle.
10239 (Caches mips_issue_rate.) */
10240 unsigned int issue_rate;
10242 /* The current simulation time. */
10245 /* How many more instructions can be issued in the current cycle. */
10246 unsigned int insns_left;
10248 /* LAST_SET[X].INSN is the last instruction to set register X.
10249 LAST_SET[X].TIME is the time at which that instruction was issued.
10250 INSN is null if no instruction has yet set register X. */
10254 } last_set[FIRST_PSEUDO_REGISTER];
10256 /* The pipeline's current DFA state. */
10260 /* Reset STATE to the initial simulation state. */
10263 mips_sim_reset (struct mips_sim *state)
10266 state->insns_left = state->issue_rate;
10267 memset (&state->last_set, 0, sizeof (state->last_set));
10268 state_reset (state->dfa_state);
10271 /* Initialize STATE before its first use. DFA_STATE points to an
10272 allocated but uninitialized DFA state. */
10275 mips_sim_init (struct mips_sim *state, state_t dfa_state)
10277 state->issue_rate = mips_issue_rate ();
10278 state->dfa_state = dfa_state;
10279 mips_sim_reset (state);
10282 /* Advance STATE by one clock cycle. */
10285 mips_sim_next_cycle (struct mips_sim *state)
10288 state->insns_left = state->issue_rate;
10289 state_transition (state->dfa_state, 0);
10292 /* Advance simulation state STATE until instruction INSN can read
10296 mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
10300 for (i = 0; i < HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)); i++)
10301 if (state->last_set[REGNO (reg) + i].insn != 0)
10305 t = state->last_set[REGNO (reg) + i].time;
10306 t += insn_latency (state->last_set[REGNO (reg) + i].insn, insn);
10307 while (state->time < t)
10308 mips_sim_next_cycle (state);
10312 /* A for_each_rtx callback. If *X is a register, advance simulation state
10313 DATA until mips_sim_insn can read the register's value. */
10316 mips_sim_wait_regs_2 (rtx *x, void *data)
10319 mips_sim_wait_reg (data, mips_sim_insn, *x);
10323 /* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
10326 mips_sim_wait_regs_1 (rtx *x, void *data)
10328 for_each_rtx (x, mips_sim_wait_regs_2, data);
10331 /* Advance simulation state STATE until all of INSN's register
10332 dependencies are satisfied. */
10335 mips_sim_wait_regs (struct mips_sim *state, rtx insn)
10337 mips_sim_insn = insn;
10338 note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
10341 /* Advance simulation state STATE until the units required by
10342 instruction INSN are available. */
10345 mips_sim_wait_units (struct mips_sim *state, rtx insn)
10349 tmp_state = alloca (state_size ());
10350 while (state->insns_left == 0
10351 || (memcpy (tmp_state, state->dfa_state, state_size ()),
10352 state_transition (tmp_state, insn) >= 0))
10353 mips_sim_next_cycle (state);
10356 /* Advance simulation state STATE until INSN is ready to issue. */
10359 mips_sim_wait_insn (struct mips_sim *state, rtx insn)
10361 mips_sim_wait_regs (state, insn);
10362 mips_sim_wait_units (state, insn);
10365 /* mips_sim_insn has just set X. Update the LAST_SET array
10366 in simulation state DATA. */
10369 mips_sim_record_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
10371 struct mips_sim *state;
10376 for (i = 0; i < HARD_REGNO_NREGS (REGNO (x), GET_MODE (x)); i++)
10378 state->last_set[REGNO (x) + i].insn = mips_sim_insn;
10379 state->last_set[REGNO (x) + i].time = state->time;
10383 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
10384 can issue immediately (i.e., that mips_sim_wait_insn has already
10388 mips_sim_issue_insn (struct mips_sim *state, rtx insn)
10390 state_transition (state->dfa_state, insn);
10391 state->insns_left--;
10393 mips_sim_insn = insn;
10394 note_stores (PATTERN (insn), mips_sim_record_set, state);
10397 /* Simulate issuing a NOP in state STATE. */
10400 mips_sim_issue_nop (struct mips_sim *state)
10402 if (state->insns_left == 0)
10403 mips_sim_next_cycle (state);
10404 state->insns_left--;
10407 /* Update simulation state STATE so that it's ready to accept the instruction
10408 after INSN. INSN should be part of the main rtl chain, not a member of a
10412 mips_sim_finish_insn (struct mips_sim *state, rtx insn)
10414 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
10416 mips_sim_issue_nop (state);
10418 switch (GET_CODE (SEQ_BEGIN (insn)))
10422 /* We can't predict the processor state after a call or label. */
10423 mips_sim_reset (state);
10427 /* The delay slots of branch likely instructions are only executed
10428 when the branch is taken. Therefore, if the caller has simulated
10429 the delay slot instruction, STATE does not really reflect the state
10430 of the pipeline for the instruction after the delay slot. Also,
10431 branch likely instructions tend to incur a penalty when not taken,
10432 so there will probably be an extra delay between the branch and
10433 the instruction after the delay slot. */
10434 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
10435 mips_sim_reset (state);
10443 /* The VR4130 pipeline issues aligned pairs of instructions together,
10444 but it stalls the second instruction if it depends on the first.
10445 In order to cut down the amount of logic required, this dependence
10446 check is not based on a full instruction decode. Instead, any non-SPECIAL
10447 instruction is assumed to modify the register specified by bits 20-16
10448 (which is usually the "rt" field).
10450 In beq, beql, bne and bnel instructions, the rt field is actually an
10451 input, so we can end up with a false dependence between the branch
10452 and its delay slot. If this situation occurs in instruction INSN,
10453 try to avoid it by swapping rs and rt. */
10456 vr4130_avoid_branch_rt_conflict (rtx insn)
10460 first = SEQ_BEGIN (insn);
10461 second = SEQ_END (insn);
10463 && NONJUMP_INSN_P (second)
10464 && GET_CODE (PATTERN (first)) == SET
10465 && GET_CODE (SET_DEST (PATTERN (first))) == PC
10466 && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
10468 /* Check for the right kind of condition. */
10469 rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
10470 if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
10471 && REG_P (XEXP (cond, 0))
10472 && REG_P (XEXP (cond, 1))
10473 && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
10474 && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
10476 /* SECOND mentions the rt register but not the rs register. */
10477 rtx tmp = XEXP (cond, 0);
10478 XEXP (cond, 0) = XEXP (cond, 1);
10479 XEXP (cond, 1) = tmp;
10484 /* Implement -mvr4130-align. Go through each basic block and simulate the
10485 processor pipeline. If we find that a pair of instructions could execute
10486 in parallel, and the first of those instruction is not 8-byte aligned,
10487 insert a nop to make it aligned. */
10490 vr4130_align_insns (void)
10492 struct mips_sim state;
10493 rtx insn, subinsn, last, last2, next;
10498 /* LAST is the last instruction before INSN to have a nonzero length.
10499 LAST2 is the last such instruction before LAST. */
10503 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
10506 mips_sim_init (&state, alloca (state_size ()));
10507 for (insn = get_insns (); insn != 0; insn = next)
10509 unsigned int length;
10511 next = NEXT_INSN (insn);
10513 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
10514 This isn't really related to the alignment pass, but we do it on
10515 the fly to avoid a separate instruction walk. */
10516 vr4130_avoid_branch_rt_conflict (insn);
10518 if (USEFUL_INSN_P (insn))
10519 FOR_EACH_SUBINSN (subinsn, insn)
10521 mips_sim_wait_insn (&state, subinsn);
10523 /* If we want this instruction to issue in parallel with the
10524 previous one, make sure that the previous instruction is
10525 aligned. There are several reasons why this isn't worthwhile
10526 when the second instruction is a call:
10528 - Calls are less likely to be performance critical,
10529 - There's a good chance that the delay slot can execute
10530 in parallel with the call.
10531 - The return address would then be unaligned.
10533 In general, if we're going to insert a nop between instructions
10534 X and Y, it's better to insert it immediately after X. That
10535 way, if the nop makes Y aligned, it will also align any labels
10536 between X and Y. */
10537 if (state.insns_left != state.issue_rate
10538 && !CALL_P (subinsn))
10540 if (subinsn == SEQ_BEGIN (insn) && aligned_p)
10542 /* SUBINSN is the first instruction in INSN and INSN is
10543 aligned. We want to align the previous instruction
10544 instead, so insert a nop between LAST2 and LAST.
10546 Note that LAST could be either a single instruction
10547 or a branch with a delay slot. In the latter case,
10548 LAST, like INSN, is already aligned, but the delay
10549 slot must have some extra delay that stops it from
10550 issuing at the same time as the branch. We therefore
10551 insert a nop before the branch in order to align its
10553 emit_insn_after (gen_nop (), last2);
10556 else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
10558 /* SUBINSN is the delay slot of INSN, but INSN is
10559 currently unaligned. Insert a nop between
10560 LAST and INSN to align it. */
10561 emit_insn_after (gen_nop (), last);
10565 mips_sim_issue_insn (&state, subinsn);
10567 mips_sim_finish_insn (&state, insn);
10569 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
10570 length = get_attr_length (insn);
10573 /* If the instruction is an asm statement or multi-instruction
10574 mips.md patern, the length is only an estimate. Insert an
10575 8 byte alignment after it so that the following instructions
10576 can be handled correctly. */
10577 if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
10578 && (recog_memoized (insn) < 0 || length >= 8))
10580 next = emit_insn_after (gen_align (GEN_INT (3)), insn);
10581 next = NEXT_INSN (next);
10582 mips_sim_next_cycle (&state);
10585 else if (length & 4)
10586 aligned_p = !aligned_p;
10591 /* See whether INSN is an aligned label. */
10592 if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
10598 /* Subroutine of mips_reorg. If there is a hazard between INSN
10599 and a previous instruction, avoid it by inserting nops after
10602 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
10603 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
10604 before using the value of that register. *HILO_DELAY counts the
10605 number of instructions since the last hilo hazard (that is,
10606 the number of instructions since the last mflo or mfhi).
10608 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
10609 for the next instruction.
10611 LO_REG is an rtx for the LO register, used in dependence checking. */
10614 mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
10615 rtx *delayed_reg, rtx lo_reg)
10620 if (!INSN_P (insn))
10623 pattern = PATTERN (insn);
10625 /* Do not put the whole function in .set noreorder if it contains
10626 an asm statement. We don't know whether there will be hazards
10627 between the asm statement and the gcc-generated code. */
10628 if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
10629 cfun->machine->all_noreorder_p = false;
10631 /* Ignore zero-length instructions (barriers and the like). */
10632 ninsns = get_attr_length (insn) / 4;
10636 /* Work out how many nops are needed. Note that we only care about
10637 registers that are explicitly mentioned in the instruction's pattern.
10638 It doesn't matter that calls use the argument registers or that they
10639 clobber hi and lo. */
10640 if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
10641 nops = 2 - *hilo_delay;
10642 else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
10647 /* Insert the nops between this instruction and the previous one.
10648 Each new nop takes us further from the last hilo hazard. */
10649 *hilo_delay += nops;
10651 emit_insn_after (gen_hazard_nop (), after);
10653 /* Set up the state for the next instruction. */
10654 *hilo_delay += ninsns;
10656 if (INSN_CODE (insn) >= 0)
10657 switch (get_attr_hazard (insn))
10667 set = single_set (insn);
10668 gcc_assert (set != 0);
10669 *delayed_reg = SET_DEST (set);
10675 /* Go through the instruction stream and insert nops where necessary.
10676 See if the whole function can then be put into .set noreorder &
10680 mips_avoid_hazards (void)
10682 rtx insn, last_insn, lo_reg, delayed_reg;
10685 /* Force all instructions to be split into their final form. */
10686 split_all_insns_noflow ();
10688 /* Recalculate instruction lengths without taking nops into account. */
10689 cfun->machine->ignore_hazard_length_p = true;
10690 shorten_branches (get_insns ());
10692 cfun->machine->all_noreorder_p = true;
10694 /* Profiled functions can't be all noreorder because the profiler
10695 support uses assembler macros. */
10696 if (current_function_profile)
10697 cfun->machine->all_noreorder_p = false;
10699 /* Code compiled with -mfix-vr4120 can't be all noreorder because
10700 we rely on the assembler to work around some errata. */
10701 if (TARGET_FIX_VR4120)
10702 cfun->machine->all_noreorder_p = false;
10704 /* The same is true for -mfix-vr4130 if we might generate mflo or
10705 mfhi instructions. Note that we avoid using mflo and mfhi if
10706 the VR4130 macc and dmacc instructions are available instead;
10707 see the *mfhilo_{si,di}_macc patterns. */
10708 if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
10709 cfun->machine->all_noreorder_p = false;
10714 lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
10716 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
10719 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
10720 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
10721 mips_avoid_hazard (last_insn, XVECEXP (PATTERN (insn), 0, i),
10722 &hilo_delay, &delayed_reg, lo_reg);
10724 mips_avoid_hazard (last_insn, insn, &hilo_delay,
10725 &delayed_reg, lo_reg);
10732 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
10737 mips16_lay_out_constants ();
10738 if (TARGET_EXPLICIT_RELOCS)
10740 if (mips_flag_delayed_branch)
10741 dbr_schedule (get_insns ());
10742 mips_avoid_hazards ();
10743 if (TUNE_MIPS4130 && TARGET_VR4130_ALIGN)
10744 vr4130_align_insns ();
10748 /* This function does three things:
10750 - Register the special divsi3 and modsi3 functions if -mfix-vr4120.
10751 - Register the mips16 hardware floating point stubs.
10752 - Register the gofast functions if selected using --enable-gofast. */
10754 #include "config/gofast.h"
10757 mips_init_libfuncs (void)
10759 if (TARGET_FIX_VR4120)
10761 set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
10762 set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
10765 if (TARGET_MIPS16 && TARGET_HARD_FLOAT_ABI)
10767 set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
10768 set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
10769 set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
10770 set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
10772 set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
10773 set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
10774 set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
10775 set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
10776 set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
10777 set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
10778 set_optab_libfunc (unord_optab, SFmode, "__mips16_unordsf2");
10780 set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
10781 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
10782 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__mips16_floatunsisf");
10784 if (TARGET_DOUBLE_FLOAT)
10786 set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
10787 set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
10788 set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
10789 set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
10791 set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
10792 set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
10793 set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
10794 set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
10795 set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
10796 set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
10797 set_optab_libfunc (unord_optab, DFmode, "__mips16_unorddf2");
10799 set_conv_libfunc (sext_optab, DFmode, SFmode, "__mips16_extendsfdf2");
10800 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__mips16_truncdfsf2");
10802 set_conv_libfunc (sfix_optab, SImode, DFmode, "__mips16_fix_truncdfsi");
10803 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__mips16_floatsidf");
10804 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__mips16_floatunsidf");
10808 gofast_maybe_init_libfuncs ();
10811 /* Return a number assessing the cost of moving a register in class
10812 FROM to class TO. The classes are expressed using the enumeration
10813 values such as `GENERAL_REGS'. A value of 2 is the default; other
10814 values are interpreted relative to that.
10816 It is not required that the cost always equal 2 when FROM is the
10817 same as TO; on some machines it is expensive to move between
10818 registers if they are not general registers.
10820 If reload sees an insn consisting of a single `set' between two
10821 hard registers, and if `REGISTER_MOVE_COST' applied to their
10822 classes returns a value of 2, reload does not check to ensure that
10823 the constraints of the insn are met. Setting a cost of other than
10824 2 will allow reload to verify that the constraints are met. You
10825 should do this if the `movM' pattern's constraints do not allow
10828 ??? We make the cost of moving from HI/LO into general
10829 registers the same as for one of moving general registers to
10830 HI/LO for TARGET_MIPS16 in order to prevent allocating a
10831 pseudo to HI/LO. This might hurt optimizations though, it
10832 isn't clear if it is wise. And it might not work in all cases. We
10833 could solve the DImode LO reg problem by using a multiply, just
10834 like reload_{in,out}si. We could solve the SImode/HImode HI reg
10835 problem by using divide instructions. divu puts the remainder in
10836 the HI reg, so doing a divide by -1 will move the value in the HI
10837 reg for all values except -1. We could handle that case by using a
10838 signed divide, e.g. -1 / 2 (or maybe 1 / -2?). We'd have to emit
10839 a compare/branch to test the input value to see which instruction
10840 we need to use. This gets pretty messy, but it is feasible. */
10843 mips_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
10844 enum reg_class to, enum reg_class from)
10846 if (from == M16_REGS && reg_class_subset_p (to, GENERAL_REGS))
10848 else if (from == M16_NA_REGS && reg_class_subset_p (to, GENERAL_REGS))
10850 else if (reg_class_subset_p (from, GENERAL_REGS))
10852 if (to == M16_REGS)
10854 else if (to == M16_NA_REGS)
10856 else if (reg_class_subset_p (to, GENERAL_REGS))
10863 else if (to == FP_REGS)
10865 else if (reg_class_subset_p (to, ACC_REGS))
10872 else if (reg_class_subset_p (to, ALL_COP_REGS))
10877 else if (from == FP_REGS)
10879 if (reg_class_subset_p (to, GENERAL_REGS))
10881 else if (to == FP_REGS)
10883 else if (to == ST_REGS)
10886 else if (reg_class_subset_p (from, ACC_REGS))
10888 if (reg_class_subset_p (to, GENERAL_REGS))
10896 else if (from == ST_REGS && reg_class_subset_p (to, GENERAL_REGS))
10898 else if (reg_class_subset_p (from, ALL_COP_REGS))
10904 ??? What cases are these? Shouldn't we return 2 here? */
10909 /* Return the length of INSN. LENGTH is the initial length computed by
10910 attributes in the machine-description file. */
10913 mips_adjust_insn_length (rtx insn, int length)
10915 /* A unconditional jump has an unfilled delay slot if it is not part
10916 of a sequence. A conditional jump normally has a delay slot, but
10917 does not on MIPS16. */
10918 if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
10921 /* See how many nops might be needed to avoid hardware hazards. */
10922 if (!cfun->machine->ignore_hazard_length_p && INSN_CODE (insn) >= 0)
10923 switch (get_attr_hazard (insn))
10937 /* All MIPS16 instructions are a measly two bytes. */
10945 /* Return an asm sequence to start a noat block and load the address
10946 of a label into $1. */
10949 mips_output_load_label (void)
10951 if (TARGET_EXPLICIT_RELOCS)
10955 return "%[lw\t%@,%%got_page(%0)(%+)\n\taddiu\t%@,%@,%%got_ofst(%0)";
10958 return "%[ld\t%@,%%got_page(%0)(%+)\n\tdaddiu\t%@,%@,%%got_ofst(%0)";
10961 if (ISA_HAS_LOAD_DELAY)
10962 return "%[lw\t%@,%%got(%0)(%+)%#\n\taddiu\t%@,%@,%%lo(%0)";
10963 return "%[lw\t%@,%%got(%0)(%+)\n\taddiu\t%@,%@,%%lo(%0)";
10967 if (Pmode == DImode)
10968 return "%[dla\t%@,%0";
10970 return "%[la\t%@,%0";
10974 /* Return the assembly code for INSN, which has the operands given by
10975 OPERANDS, and which branches to OPERANDS[1] if some condition is true.
10976 BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[1]
10977 is in range of a direct branch. BRANCH_IF_FALSE is an inverted
10978 version of BRANCH_IF_TRUE. */
10981 mips_output_conditional_branch (rtx insn, rtx *operands,
10982 const char *branch_if_true,
10983 const char *branch_if_false)
10985 unsigned int length;
10986 rtx taken, not_taken;
10988 length = get_attr_length (insn);
10991 /* Just a simple conditional branch. */
10992 mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
10993 return branch_if_true;
10996 /* Generate a reversed branch around a direct jump. This fallback does
10997 not use branch-likely instructions. */
10998 mips_branch_likely = false;
10999 not_taken = gen_label_rtx ();
11000 taken = operands[1];
11002 /* Generate the reversed branch to NOT_TAKEN. */
11003 operands[1] = not_taken;
11004 output_asm_insn (branch_if_false, operands);
11006 /* If INSN has a delay slot, we must provide delay slots for both the
11007 branch to NOT_TAKEN and the conditional jump. We must also ensure
11008 that INSN's delay slot is executed in the appropriate cases. */
11009 if (final_sequence)
11011 /* This first delay slot will always be executed, so use INSN's
11012 delay slot if is not annulled. */
11013 if (!INSN_ANNULLED_BRANCH_P (insn))
11015 final_scan_insn (XVECEXP (final_sequence, 0, 1),
11016 asm_out_file, optimize, 1, NULL);
11017 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
11020 output_asm_insn ("nop", 0);
11021 fprintf (asm_out_file, "\n");
11024 /* Output the unconditional branch to TAKEN. */
11026 output_asm_insn ("j\t%0%/", &taken);
11029 output_asm_insn (mips_output_load_label (), &taken);
11030 output_asm_insn ("jr\t%@%]%/", 0);
11033 /* Now deal with its delay slot; see above. */
11034 if (final_sequence)
11036 /* This delay slot will only be executed if the branch is taken.
11037 Use INSN's delay slot if is annulled. */
11038 if (INSN_ANNULLED_BRANCH_P (insn))
11040 final_scan_insn (XVECEXP (final_sequence, 0, 1),
11041 asm_out_file, optimize, 1, NULL);
11042 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
11045 output_asm_insn ("nop", 0);
11046 fprintf (asm_out_file, "\n");
11049 /* Output NOT_TAKEN. */
11050 (*targetm.asm_out.internal_label) (asm_out_file, "L",
11051 CODE_LABEL_NUMBER (not_taken));
11055 /* Return the assembly code for INSN, which branches to OPERANDS[1]
11056 if some ordered condition is true. The condition is given by
11057 OPERANDS[0] if !INVERTED_P, otherwise it is the inverse of
11058 OPERANDS[0]. OPERANDS[2] is the comparison's first operand;
11059 its second is always zero. */
11062 mips_output_order_conditional_branch (rtx insn, rtx *operands, bool inverted_p)
11064 const char *branch[2];
11066 /* Make BRANCH[1] branch to OPERANDS[1] when the condition is true.
11067 Make BRANCH[0] branch on the inverse condition. */
11068 switch (GET_CODE (operands[0]))
11070 /* These cases are equivalent to comparisons against zero. */
11072 inverted_p = !inverted_p;
11073 /* Fall through. */
11075 branch[!inverted_p] = MIPS_BRANCH ("bne", "%2,%.,%1");
11076 branch[inverted_p] = MIPS_BRANCH ("beq", "%2,%.,%1");
11079 /* These cases are always true or always false. */
11081 inverted_p = !inverted_p;
11082 /* Fall through. */
11084 branch[!inverted_p] = MIPS_BRANCH ("beq", "%.,%.,%1");
11085 branch[inverted_p] = MIPS_BRANCH ("bne", "%.,%.,%1");
11089 branch[!inverted_p] = MIPS_BRANCH ("b%C0z", "%2,%1");
11090 branch[inverted_p] = MIPS_BRANCH ("b%N0z", "%2,%1");
11093 return mips_output_conditional_branch (insn, operands, branch[1], branch[0]);
11096 /* Used to output div or ddiv instruction DIVISION, which has the operands
11097 given by OPERANDS. Add in a divide-by-zero check if needed.
11099 When working around R4000 and R4400 errata, we need to make sure that
11100 the division is not immediately followed by a shift[1][2]. We also
11101 need to stop the division from being put into a branch delay slot[3].
11102 The easiest way to avoid both problems is to add a nop after the
11103 division. When a divide-by-zero check is needed, this nop can be
11104 used to fill the branch delay slot.
11106 [1] If a double-word or a variable shift executes immediately
11107 after starting an integer division, the shift may give an
11108 incorrect result. See quotations of errata #16 and #28 from
11109 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
11110 in mips.md for details.
11112 [2] A similar bug to [1] exists for all revisions of the
11113 R4000 and the R4400 when run in an MC configuration.
11114 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
11116 "19. In this following sequence:
11118 ddiv (or ddivu or div or divu)
11119 dsll32 (or dsrl32, dsra32)
11121 if an MPT stall occurs, while the divide is slipping the cpu
11122 pipeline, then the following double shift would end up with an
11125 Workaround: The compiler needs to avoid generating any
11126 sequence with divide followed by extended double shift."
11128 This erratum is also present in "MIPS R4400MC Errata, Processor
11129 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
11130 & 3.0" as errata #10 and #4, respectively.
11132 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
11133 (also valid for MIPS R4000MC processors):
11135 "52. R4000SC: This bug does not apply for the R4000PC.
11137 There are two flavors of this bug:
11139 1) If the instruction just after divide takes an RF exception
11140 (tlb-refill, tlb-invalid) and gets an instruction cache
11141 miss (both primary and secondary) and the line which is
11142 currently in secondary cache at this index had the first
11143 data word, where the bits 5..2 are set, then R4000 would
11144 get a wrong result for the div.
11149 ------------------- # end-of page. -tlb-refill
11154 ------------------- # end-of page. -tlb-invalid
11157 2) If the divide is in the taken branch delay slot, where the
11158 target takes RF exception and gets an I-cache miss for the
11159 exception vector or where I-cache miss occurs for the
11160 target address, under the above mentioned scenarios, the
11161 div would get wrong results.
11164 j r2 # to next page mapped or unmapped
11165 div r8,r9 # this bug would be there as long
11166 # as there is an ICache miss and
11167 nop # the "data pattern" is present
11170 beq r0, r0, NextPage # to Next page
11174 This bug is present for div, divu, ddiv, and ddivu
11177 Workaround: For item 1), OS could make sure that the next page
11178 after the divide instruction is also mapped. For item 2), the
11179 compiler could make sure that the divide instruction is not in
11180 the branch delay slot."
11182 These processors have PRId values of 0x00004220 and 0x00004300 for
11183 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
11186 mips_output_division (const char *division, rtx *operands)
11191 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
11193 output_asm_insn (s, operands);
11196 if (TARGET_CHECK_ZERO_DIV)
11200 output_asm_insn (s, operands);
11201 s = "bnez\t%2,1f\n\tbreak\t7\n1:";
11203 else if (GENERATE_DIVIDE_TRAPS)
11205 output_asm_insn (s, operands);
11206 s = "teq\t%2,%.,7";
11210 output_asm_insn ("%(bne\t%2,%.,1f", operands);
11211 output_asm_insn (s, operands);
11212 s = "break\t7%)\n1:";
11218 /* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
11219 with a final "000" replaced by "k". Ignore case.
11221 Note: this function is shared between GCC and GAS. */
11224 mips_strict_matching_cpu_name_p (const char *canonical, const char *given)
11226 while (*given != 0 && TOLOWER (*given) == TOLOWER (*canonical))
11227 given++, canonical++;
11229 return ((*given == 0 && *canonical == 0)
11230 || (strcmp (canonical, "000") == 0 && strcasecmp (given, "k") == 0));
11234 /* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
11235 CPU name. We've traditionally allowed a lot of variation here.
11237 Note: this function is shared between GCC and GAS. */
11240 mips_matching_cpu_name_p (const char *canonical, const char *given)
11242 /* First see if the name matches exactly, or with a final "000"
11243 turned into "k". */
11244 if (mips_strict_matching_cpu_name_p (canonical, given))
11247 /* If not, try comparing based on numerical designation alone.
11248 See if GIVEN is an unadorned number, or 'r' followed by a number. */
11249 if (TOLOWER (*given) == 'r')
11251 if (!ISDIGIT (*given))
11254 /* Skip over some well-known prefixes in the canonical name,
11255 hoping to find a number there too. */
11256 if (TOLOWER (canonical[0]) == 'v' && TOLOWER (canonical[1]) == 'r')
11258 else if (TOLOWER (canonical[0]) == 'r' && TOLOWER (canonical[1]) == 'm')
11260 else if (TOLOWER (canonical[0]) == 'r')
11263 return mips_strict_matching_cpu_name_p (canonical, given);
11267 /* Return the mips_cpu_info entry for the processor or ISA given
11268 by CPU_STRING. Return null if the string isn't recognized.
11270 A similar function exists in GAS. */
11272 static const struct mips_cpu_info *
11273 mips_parse_cpu (const char *cpu_string)
11275 const struct mips_cpu_info *p;
11278 /* In the past, we allowed upper-case CPU names, but it doesn't
11279 work well with the multilib machinery. */
11280 for (s = cpu_string; *s != 0; s++)
11283 warning (0, "the cpu name must be lower case");
11287 /* 'from-abi' selects the most compatible architecture for the given
11288 ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
11289 EABIs, we have to decide whether we're using the 32-bit or 64-bit
11290 version. Look first at the -mgp options, if given, otherwise base
11291 the choice on MASK_64BIT in TARGET_DEFAULT. */
11292 if (strcasecmp (cpu_string, "from-abi") == 0)
11293 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
11294 : ABI_NEEDS_64BIT_REGS ? 3
11295 : (TARGET_64BIT ? 3 : 1));
11297 /* 'default' has traditionally been a no-op. Probably not very useful. */
11298 if (strcasecmp (cpu_string, "default") == 0)
11301 for (p = mips_cpu_info_table; p->name != 0; p++)
11302 if (mips_matching_cpu_name_p (p->name, cpu_string))
11309 /* Return the processor associated with the given ISA level, or null
11310 if the ISA isn't valid. */
11312 static const struct mips_cpu_info *
11313 mips_cpu_info_from_isa (int isa)
11315 const struct mips_cpu_info *p;
11317 for (p = mips_cpu_info_table; p->name != 0; p++)
11324 /* Implement HARD_REGNO_NREGS. The size of FP registers is controlled
11325 by UNITS_PER_FPREG. The size of FP status registers is always 4, because
11326 they only hold condition code modes, and CCmode is always considered to
11327 be 4 bytes wide. All other registers are word sized. */
11330 mips_hard_regno_nregs (int regno, enum machine_mode mode)
11332 if (ST_REG_P (regno))
11333 return ((GET_MODE_SIZE (mode) + 3) / 4);
11334 else if (! FP_REG_P (regno))
11335 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
11337 return ((GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG);
11340 /* Implement TARGET_RETURN_IN_MEMORY. Under the old (i.e., 32 and O64 ABIs)
11341 all BLKmode objects are returned in memory. Under the new (N32 and
11342 64-bit MIPS ABIs) small structures are returned in a register.
11343 Objects with varying size must still be returned in memory, of
11347 mips_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
11350 return (TYPE_MODE (type) == BLKmode);
11352 return ((int_size_in_bytes (type) > (2 * UNITS_PER_WORD))
11353 || (int_size_in_bytes (type) == -1));
11357 mips_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
11359 return !TARGET_OLDABI;
11362 /* Return true if INSN is a multiply-add or multiply-subtract
11363 instruction and PREV assigns to the accumulator operand. */
11366 mips_linked_madd_p (rtx prev, rtx insn)
11370 x = single_set (insn);
11376 if (GET_CODE (x) == PLUS
11377 && GET_CODE (XEXP (x, 0)) == MULT
11378 && reg_set_p (XEXP (x, 1), prev))
11381 if (GET_CODE (x) == MINUS
11382 && GET_CODE (XEXP (x, 1)) == MULT
11383 && reg_set_p (XEXP (x, 0), prev))
11389 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
11390 that may clobber hi or lo. */
11392 static rtx mips_macc_chains_last_hilo;
11394 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
11395 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
11398 mips_macc_chains_record (rtx insn)
11400 if (get_attr_may_clobber_hilo (insn))
11401 mips_macc_chains_last_hilo = insn;
11404 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
11405 has NREADY elements, looking for a multiply-add or multiply-subtract
11406 instruction that is cumulative with mips_macc_chains_last_hilo.
11407 If there is one, promote it ahead of anything else that might
11408 clobber hi or lo. */
11411 mips_macc_chains_reorder (rtx *ready, int nready)
11415 if (mips_macc_chains_last_hilo != 0)
11416 for (i = nready - 1; i >= 0; i--)
11417 if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
11419 for (j = nready - 1; j > i; j--)
11420 if (recog_memoized (ready[j]) >= 0
11421 && get_attr_may_clobber_hilo (ready[j]))
11423 mips_promote_ready (ready, i, j);
11430 /* The last instruction to be scheduled. */
11432 static rtx vr4130_last_insn;
11434 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
11435 points to an rtx that is initially an instruction. Nullify the rtx
11436 if the instruction uses the value of register X. */
11439 vr4130_true_reg_dependence_p_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
11441 rtx *insn_ptr = data;
11444 && reg_referenced_p (x, PATTERN (*insn_ptr)))
11448 /* Return true if there is true register dependence between vr4130_last_insn
11452 vr4130_true_reg_dependence_p (rtx insn)
11454 note_stores (PATTERN (vr4130_last_insn),
11455 vr4130_true_reg_dependence_p_1, &insn);
11459 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
11460 the ready queue and that INSN2 is the instruction after it, return
11461 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
11462 in which INSN1 and INSN2 can probably issue in parallel, but for
11463 which (INSN2, INSN1) should be less sensitive to instruction
11464 alignment than (INSN1, INSN2). See 4130.md for more details. */
11467 vr4130_swap_insns_p (rtx insn1, rtx insn2)
11469 sd_iterator_def sd_it;
11472 /* Check for the following case:
11474 1) there is some other instruction X with an anti dependence on INSN1;
11475 2) X has a higher priority than INSN2; and
11476 3) X is an arithmetic instruction (and thus has no unit restrictions).
11478 If INSN1 is the last instruction blocking X, it would better to
11479 choose (INSN1, X) over (INSN2, INSN1). */
11480 FOR_EACH_DEP (insn1, SD_LIST_FORW, sd_it, dep)
11481 if (DEP_TYPE (dep) == REG_DEP_ANTI
11482 && INSN_PRIORITY (DEP_CON (dep)) > INSN_PRIORITY (insn2)
11483 && recog_memoized (DEP_CON (dep)) >= 0
11484 && get_attr_vr4130_class (DEP_CON (dep)) == VR4130_CLASS_ALU)
11487 if (vr4130_last_insn != 0
11488 && recog_memoized (insn1) >= 0
11489 && recog_memoized (insn2) >= 0)
11491 /* See whether INSN1 and INSN2 use different execution units,
11492 or if they are both ALU-type instructions. If so, they can
11493 probably execute in parallel. */
11494 enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
11495 enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
11496 if (class1 != class2 || class1 == VR4130_CLASS_ALU)
11498 /* If only one of the instructions has a dependence on
11499 vr4130_last_insn, prefer to schedule the other one first. */
11500 bool dep1 = vr4130_true_reg_dependence_p (insn1);
11501 bool dep2 = vr4130_true_reg_dependence_p (insn2);
11505 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
11506 is not an ALU-type instruction and if INSN1 uses the same
11507 execution unit. (Note that if this condition holds, we already
11508 know that INSN2 uses a different execution unit.) */
11509 if (class1 != VR4130_CLASS_ALU
11510 && recog_memoized (vr4130_last_insn) >= 0
11511 && class1 == get_attr_vr4130_class (vr4130_last_insn))
11518 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
11519 queue with at least two instructions. Swap the first two if
11520 vr4130_swap_insns_p says that it could be worthwhile. */
11523 vr4130_reorder (rtx *ready, int nready)
11525 if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
11526 mips_promote_ready (ready, nready - 2, nready - 1);
11529 /* Remove the instruction at index LOWER from ready queue READY and
11530 reinsert it in front of the instruction at index HIGHER. LOWER must
11534 mips_promote_ready (rtx *ready, int lower, int higher)
11539 new_head = ready[lower];
11540 for (i = lower; i < higher; i++)
11541 ready[i] = ready[i + 1];
11542 ready[i] = new_head;
11545 /* If the priority of the instruction at POS2 in the ready queue READY
11546 is within LIMIT units of that of the instruction at POS1, swap the
11547 instructions if POS2 is not already less than POS1. */
11550 mips_maybe_swap_ready (rtx *ready, int pos1, int pos2, int limit)
11553 && INSN_PRIORITY (ready[pos1]) + limit >= INSN_PRIORITY (ready[pos2]))
11556 temp = ready[pos1];
11557 ready[pos1] = ready[pos2];
11558 ready[pos2] = temp;
11562 /* Record whether last 74k AGEN instruction was a load or store. */
11564 static enum attr_type mips_last_74k_agen_insn = TYPE_UNKNOWN;
11566 /* Initialize mips_last_74k_agen_insn from INSN. A null argument
11567 resets to TYPE_UNKNOWN state. */
11570 mips_74k_agen_init (rtx insn)
11572 if (!insn || !NONJUMP_INSN_P (insn))
11573 mips_last_74k_agen_insn = TYPE_UNKNOWN;
11574 else if (USEFUL_INSN_P (insn))
11576 enum attr_type type = get_attr_type (insn);
11577 if (type == TYPE_LOAD || type == TYPE_STORE)
11578 mips_last_74k_agen_insn = type;
11582 /* A TUNE_74K helper function. The 74K AGEN pipeline likes multiple
11583 loads to be grouped together, and multiple stores to be grouped
11584 together. Swap things around in the ready queue to make this happen. */
11587 mips_74k_agen_reorder (rtx *ready, int nready)
11590 int store_pos, load_pos;
11595 for (i = nready - 1; i >= 0; i--)
11597 rtx insn = ready[i];
11598 if (USEFUL_INSN_P (insn))
11599 switch (get_attr_type (insn))
11602 if (store_pos == -1)
11607 if (load_pos == -1)
11616 if (load_pos == -1 || store_pos == -1)
11619 switch (mips_last_74k_agen_insn)
11622 /* Prefer to schedule loads since they have a higher latency. */
11624 /* Swap loads to the front of the queue. */
11625 mips_maybe_swap_ready (ready, load_pos, store_pos, 4);
11628 /* Swap stores to the front of the queue. */
11629 mips_maybe_swap_ready (ready, store_pos, load_pos, 4);
11636 /* Implement TARGET_SCHED_INIT. */
11639 mips_sched_init (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
11640 int max_ready ATTRIBUTE_UNUSED)
11642 mips_macc_chains_last_hilo = 0;
11643 vr4130_last_insn = 0;
11644 mips_74k_agen_init (NULL_RTX);
11647 /* Implement TARGET_SCHED_REORDER and TARG_SCHED_REORDER2. */
11650 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
11651 rtx *ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
11653 if (!reload_completed
11654 && TUNE_MACC_CHAINS
11656 mips_macc_chains_reorder (ready, *nreadyp);
11657 if (reload_completed
11659 && !TARGET_VR4130_ALIGN
11661 vr4130_reorder (ready, *nreadyp);
11663 mips_74k_agen_reorder (ready, *nreadyp);
11664 return mips_issue_rate ();
11667 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
11670 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
11671 rtx insn, int more)
11674 mips_74k_agen_init (insn);
11675 switch (GET_CODE (PATTERN (insn)))
11679 /* Don't count USEs and CLOBBERs against the issue rate. */
11684 if (!reload_completed && TUNE_MACC_CHAINS)
11685 mips_macc_chains_record (insn);
11686 vr4130_last_insn = insn;
11692 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
11693 dependencies have no cost, except on the 20Kc where output-dependence
11694 is treated like input-dependence. */
11697 mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
11698 rtx dep ATTRIBUTE_UNUSED, int cost)
11700 if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
11703 if (REG_NOTE_KIND (link) != 0)
11708 /* Return the number of instructions that can be issued per cycle. */
11711 mips_issue_rate (void)
11715 case PROCESSOR_74KC:
11716 case PROCESSOR_74KF2_1:
11717 case PROCESSOR_74KF1_1:
11718 case PROCESSOR_74KF3_2:
11719 /* The 74k is not strictly quad-issue cpu, but can be seen as one
11720 by the scheduler. It can issue 1 ALU, 1 AGEN and 2 FPU insns,
11721 but in reality only a maximum of 3 insns can be issued as the
11722 floating point load/stores also require a slot in the AGEN pipe. */
11725 case PROCESSOR_20KC:
11726 case PROCESSOR_R4130:
11727 case PROCESSOR_R5400:
11728 case PROCESSOR_R5500:
11729 case PROCESSOR_R7000:
11730 case PROCESSOR_R9000:
11733 case PROCESSOR_SB1:
11734 case PROCESSOR_SB1A:
11735 /* This is actually 4, but we get better performance if we claim 3.
11736 This is partly because of unwanted speculative code motion with the
11737 larger number, and partly because in most common cases we can't
11738 reach the theoretical max of 4. */
11746 /* Implements TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
11747 be as wide as the scheduling freedom in the DFA. */
11750 mips_multipass_dfa_lookahead (void)
11752 /* Can schedule up to 4 of the 6 function units in any one cycle. */
11759 /* Implements a store data bypass check. We need this because the cprestore
11760 pattern is type store, but defined using an UNSPEC. This UNSPEC causes the
11761 default routine to abort. We just return false for that case. */
11762 /* ??? Should try to give a better result here than assuming false. */
11765 mips_store_data_bypass_p (rtx out_insn, rtx in_insn)
11767 if (GET_CODE (PATTERN (in_insn)) == UNSPEC_VOLATILE)
11770 return ! store_data_bypass_p (out_insn, in_insn);
11773 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
11774 return the first operand of the associated "pref" or "prefx" insn. */
11777 mips_prefetch_cookie (rtx write, rtx locality)
11779 /* store_streamed / load_streamed. */
11780 if (INTVAL (locality) <= 0)
11781 return GEN_INT (INTVAL (write) + 4);
11783 /* store / load. */
11784 if (INTVAL (locality) <= 2)
11787 /* store_retained / load_retained. */
11788 return GEN_INT (INTVAL (write) + 6);
11791 /* MIPS builtin function support. */
11793 struct builtin_description
11795 /* The code of the main .md file instruction. See mips_builtin_type
11796 for more information. */
11797 enum insn_code icode;
11799 /* The floating-point comparison code to use with ICODE, if any. */
11800 enum mips_fp_condition cond;
11802 /* The name of the builtin function. */
11805 /* Specifies how the function should be expanded. */
11806 enum mips_builtin_type builtin_type;
11808 /* The function's prototype. */
11809 enum mips_function_type function_type;
11811 /* The target flags required for this function. */
11815 /* Define a MIPS_BUILTIN_DIRECT function for instruction CODE_FOR_mips_<INSN>.
11816 FUNCTION_TYPE and TARGET_FLAGS are builtin_description fields. */
11817 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
11818 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
11819 MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, TARGET_FLAGS }
11821 /* Define __builtin_mips_<INSN>_<COND>_{s,d}, both of which require
11823 #define CMP_SCALAR_BUILTINS(INSN, COND, TARGET_FLAGS) \
11824 { CODE_FOR_mips_ ## INSN ## _cond_s, MIPS_FP_COND_ ## COND, \
11825 "__builtin_mips_" #INSN "_" #COND "_s", \
11826 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, TARGET_FLAGS }, \
11827 { CODE_FOR_mips_ ## INSN ## _cond_d, MIPS_FP_COND_ ## COND, \
11828 "__builtin_mips_" #INSN "_" #COND "_d", \
11829 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, TARGET_FLAGS }
11831 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
11832 The lower and upper forms require TARGET_FLAGS while the any and all
11833 forms require MASK_MIPS3D. */
11834 #define CMP_PS_BUILTINS(INSN, COND, TARGET_FLAGS) \
11835 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11836 "__builtin_mips_any_" #INSN "_" #COND "_ps", \
11837 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
11838 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11839 "__builtin_mips_all_" #INSN "_" #COND "_ps", \
11840 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
11841 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11842 "__builtin_mips_lower_" #INSN "_" #COND "_ps", \
11843 MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }, \
11844 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11845 "__builtin_mips_upper_" #INSN "_" #COND "_ps", \
11846 MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }
11848 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
11849 require MASK_MIPS3D. */
11850 #define CMP_4S_BUILTINS(INSN, COND) \
11851 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
11852 "__builtin_mips_any_" #INSN "_" #COND "_4s", \
11853 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11855 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
11856 "__builtin_mips_all_" #INSN "_" #COND "_4s", \
11857 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11860 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
11861 instruction requires TARGET_FLAGS. */
11862 #define MOVTF_BUILTINS(INSN, COND, TARGET_FLAGS) \
11863 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11864 "__builtin_mips_movt_" #INSN "_" #COND "_ps", \
11865 MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11867 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11868 "__builtin_mips_movf_" #INSN "_" #COND "_ps", \
11869 MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11872 /* Define all the builtins related to c.cond.fmt condition COND. */
11873 #define CMP_BUILTINS(COND) \
11874 MOVTF_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
11875 MOVTF_BUILTINS (cabs, COND, MASK_MIPS3D), \
11876 CMP_SCALAR_BUILTINS (cabs, COND, MASK_MIPS3D), \
11877 CMP_PS_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
11878 CMP_PS_BUILTINS (cabs, COND, MASK_MIPS3D), \
11879 CMP_4S_BUILTINS (c, COND), \
11880 CMP_4S_BUILTINS (cabs, COND)
11882 static const struct builtin_description mips_bdesc[] =
11884 DIRECT_BUILTIN (pll_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11885 DIRECT_BUILTIN (pul_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11886 DIRECT_BUILTIN (plu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11887 DIRECT_BUILTIN (puu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11888 DIRECT_BUILTIN (cvt_ps_s, MIPS_V2SF_FTYPE_SF_SF, MASK_PAIRED_SINGLE_FLOAT),
11889 DIRECT_BUILTIN (cvt_s_pl, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11890 DIRECT_BUILTIN (cvt_s_pu, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11891 DIRECT_BUILTIN (abs_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11893 DIRECT_BUILTIN (alnv_ps, MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
11894 MASK_PAIRED_SINGLE_FLOAT),
11895 DIRECT_BUILTIN (addr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
11896 DIRECT_BUILTIN (mulr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
11897 DIRECT_BUILTIN (cvt_pw_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
11898 DIRECT_BUILTIN (cvt_ps_pw, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
11900 DIRECT_BUILTIN (recip1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
11901 DIRECT_BUILTIN (recip1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
11902 DIRECT_BUILTIN (recip1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
11903 DIRECT_BUILTIN (recip2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
11904 DIRECT_BUILTIN (recip2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
11905 DIRECT_BUILTIN (recip2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
11907 DIRECT_BUILTIN (rsqrt1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
11908 DIRECT_BUILTIN (rsqrt1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
11909 DIRECT_BUILTIN (rsqrt1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
11910 DIRECT_BUILTIN (rsqrt2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
11911 DIRECT_BUILTIN (rsqrt2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
11912 DIRECT_BUILTIN (rsqrt2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
11914 MIPS_FP_CONDITIONS (CMP_BUILTINS)
11917 /* Builtin functions for the SB-1 processor. */
11919 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
11921 static const struct builtin_description sb1_bdesc[] =
11923 DIRECT_BUILTIN (sqrt_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT)
11926 /* Builtin functions for DSP ASE. */
11928 #define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
11929 #define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
11930 #define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
11931 #define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
11932 #define CODE_FOR_mips_mul_ph CODE_FOR_mulv2hi3
11934 /* Define a MIPS_BUILTIN_DIRECT_NO_TARGET function for instruction
11935 CODE_FOR_mips_<INSN>. FUNCTION_TYPE and TARGET_FLAGS are
11936 builtin_description fields. */
11937 #define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
11938 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
11939 MIPS_BUILTIN_DIRECT_NO_TARGET, FUNCTION_TYPE, TARGET_FLAGS }
11941 /* Define __builtin_mips_bposge<VALUE>. <VALUE> is 32 for the MIPS32 DSP
11942 branch instruction. TARGET_FLAGS is a builtin_description field. */
11943 #define BPOSGE_BUILTIN(VALUE, TARGET_FLAGS) \
11944 { CODE_FOR_mips_bposge, 0, "__builtin_mips_bposge" #VALUE, \
11945 MIPS_BUILTIN_BPOSGE ## VALUE, MIPS_SI_FTYPE_VOID, TARGET_FLAGS }
11947 static const struct builtin_description dsp_bdesc[] =
11949 DIRECT_BUILTIN (addq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11950 DIRECT_BUILTIN (addq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11951 DIRECT_BUILTIN (addq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11952 DIRECT_BUILTIN (addu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11953 DIRECT_BUILTIN (addu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11954 DIRECT_BUILTIN (subq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11955 DIRECT_BUILTIN (subq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11956 DIRECT_BUILTIN (subq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11957 DIRECT_BUILTIN (subu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11958 DIRECT_BUILTIN (subu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11959 DIRECT_BUILTIN (addsc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11960 DIRECT_BUILTIN (addwc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11961 DIRECT_BUILTIN (modsub, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11962 DIRECT_BUILTIN (raddu_w_qb, MIPS_SI_FTYPE_V4QI, MASK_DSP),
11963 DIRECT_BUILTIN (absq_s_ph, MIPS_V2HI_FTYPE_V2HI, MASK_DSP),
11964 DIRECT_BUILTIN (absq_s_w, MIPS_SI_FTYPE_SI, MASK_DSP),
11965 DIRECT_BUILTIN (precrq_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
11966 DIRECT_BUILTIN (precrq_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
11967 DIRECT_BUILTIN (precrq_rs_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
11968 DIRECT_BUILTIN (precrqu_s_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
11969 DIRECT_BUILTIN (preceq_w_phl, MIPS_SI_FTYPE_V2HI, MASK_DSP),
11970 DIRECT_BUILTIN (preceq_w_phr, MIPS_SI_FTYPE_V2HI, MASK_DSP),
11971 DIRECT_BUILTIN (precequ_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11972 DIRECT_BUILTIN (precequ_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11973 DIRECT_BUILTIN (precequ_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11974 DIRECT_BUILTIN (precequ_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11975 DIRECT_BUILTIN (preceu_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11976 DIRECT_BUILTIN (preceu_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11977 DIRECT_BUILTIN (preceu_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11978 DIRECT_BUILTIN (preceu_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11979 DIRECT_BUILTIN (shll_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
11980 DIRECT_BUILTIN (shll_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
11981 DIRECT_BUILTIN (shll_s_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
11982 DIRECT_BUILTIN (shll_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11983 DIRECT_BUILTIN (shrl_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
11984 DIRECT_BUILTIN (shra_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
11985 DIRECT_BUILTIN (shra_r_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
11986 DIRECT_BUILTIN (shra_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11987 DIRECT_BUILTIN (muleu_s_ph_qbl, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
11988 DIRECT_BUILTIN (muleu_s_ph_qbr, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
11989 DIRECT_BUILTIN (mulq_rs_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11990 DIRECT_BUILTIN (muleq_s_w_phl, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
11991 DIRECT_BUILTIN (muleq_s_w_phr, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
11992 DIRECT_BUILTIN (bitrev, MIPS_SI_FTYPE_SI, MASK_DSP),
11993 DIRECT_BUILTIN (insv, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11994 DIRECT_BUILTIN (repl_qb, MIPS_V4QI_FTYPE_SI, MASK_DSP),
11995 DIRECT_BUILTIN (repl_ph, MIPS_V2HI_FTYPE_SI, MASK_DSP),
11996 DIRECT_NO_TARGET_BUILTIN (cmpu_eq_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
11997 DIRECT_NO_TARGET_BUILTIN (cmpu_lt_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
11998 DIRECT_NO_TARGET_BUILTIN (cmpu_le_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
11999 DIRECT_BUILTIN (cmpgu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
12000 DIRECT_BUILTIN (cmpgu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
12001 DIRECT_BUILTIN (cmpgu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
12002 DIRECT_NO_TARGET_BUILTIN (cmp_eq_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
12003 DIRECT_NO_TARGET_BUILTIN (cmp_lt_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
12004 DIRECT_NO_TARGET_BUILTIN (cmp_le_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
12005 DIRECT_BUILTIN (pick_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
12006 DIRECT_BUILTIN (pick_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
12007 DIRECT_BUILTIN (packrl_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
12008 DIRECT_NO_TARGET_BUILTIN (wrdsp, MIPS_VOID_FTYPE_SI_SI, MASK_DSP),
12009 DIRECT_BUILTIN (rddsp, MIPS_SI_FTYPE_SI, MASK_DSP),
12010 DIRECT_BUILTIN (lbux, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
12011 DIRECT_BUILTIN (lhx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
12012 DIRECT_BUILTIN (lwx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
12013 BPOSGE_BUILTIN (32, MASK_DSP),
12015 /* The following are for the MIPS DSP ASE REV 2. */
12016 DIRECT_BUILTIN (absq_s_qb, MIPS_V4QI_FTYPE_V4QI, MASK_DSPR2),
12017 DIRECT_BUILTIN (addu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12018 DIRECT_BUILTIN (addu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12019 DIRECT_BUILTIN (adduh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
12020 DIRECT_BUILTIN (adduh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
12021 DIRECT_BUILTIN (append, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
12022 DIRECT_BUILTIN (balign, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
12023 DIRECT_BUILTIN (cmpgdu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
12024 DIRECT_BUILTIN (cmpgdu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
12025 DIRECT_BUILTIN (cmpgdu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
12026 DIRECT_BUILTIN (mul_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12027 DIRECT_BUILTIN (mul_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12028 DIRECT_BUILTIN (mulq_rs_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
12029 DIRECT_BUILTIN (mulq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12030 DIRECT_BUILTIN (mulq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
12031 DIRECT_BUILTIN (precr_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12032 DIRECT_BUILTIN (precr_sra_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, MASK_DSPR2),
12033 DIRECT_BUILTIN (precr_sra_r_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, MASK_DSPR2),
12034 DIRECT_BUILTIN (prepend, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
12035 DIRECT_BUILTIN (shra_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSPR2),
12036 DIRECT_BUILTIN (shra_r_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSPR2),
12037 DIRECT_BUILTIN (shrl_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSPR2),
12038 DIRECT_BUILTIN (subu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12039 DIRECT_BUILTIN (subu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12040 DIRECT_BUILTIN (subuh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
12041 DIRECT_BUILTIN (subuh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
12042 DIRECT_BUILTIN (addqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12043 DIRECT_BUILTIN (addqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12044 DIRECT_BUILTIN (addqh_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
12045 DIRECT_BUILTIN (addqh_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
12046 DIRECT_BUILTIN (subqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12047 DIRECT_BUILTIN (subqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12048 DIRECT_BUILTIN (subqh_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
12049 DIRECT_BUILTIN (subqh_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2)
12052 static const struct builtin_description dsp_32only_bdesc[] =
12054 DIRECT_BUILTIN (dpau_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
12055 DIRECT_BUILTIN (dpau_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
12056 DIRECT_BUILTIN (dpsu_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
12057 DIRECT_BUILTIN (dpsu_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
12058 DIRECT_BUILTIN (dpaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
12059 DIRECT_BUILTIN (dpsq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
12060 DIRECT_BUILTIN (mulsaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
12061 DIRECT_BUILTIN (dpaq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
12062 DIRECT_BUILTIN (dpsq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
12063 DIRECT_BUILTIN (maq_s_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
12064 DIRECT_BUILTIN (maq_s_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
12065 DIRECT_BUILTIN (maq_sa_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
12066 DIRECT_BUILTIN (maq_sa_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
12067 DIRECT_BUILTIN (extr_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
12068 DIRECT_BUILTIN (extr_r_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
12069 DIRECT_BUILTIN (extr_rs_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
12070 DIRECT_BUILTIN (extr_s_h, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
12071 DIRECT_BUILTIN (extp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
12072 DIRECT_BUILTIN (extpdp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
12073 DIRECT_BUILTIN (shilo, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
12074 DIRECT_BUILTIN (mthlip, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
12076 /* The following are for the MIPS DSP ASE REV 2. */
12077 DIRECT_BUILTIN (dpa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
12078 DIRECT_BUILTIN (dps_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
12079 DIRECT_BUILTIN (madd, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSPR2),
12080 DIRECT_BUILTIN (maddu, MIPS_DI_FTYPE_DI_USI_USI, MASK_DSPR2),
12081 DIRECT_BUILTIN (msub, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSPR2),
12082 DIRECT_BUILTIN (msubu, MIPS_DI_FTYPE_DI_USI_USI, MASK_DSPR2),
12083 DIRECT_BUILTIN (mulsa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
12084 DIRECT_BUILTIN (mult, MIPS_DI_FTYPE_SI_SI, MASK_DSPR2),
12085 DIRECT_BUILTIN (multu, MIPS_DI_FTYPE_USI_USI, MASK_DSPR2),
12086 DIRECT_BUILTIN (dpax_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
12087 DIRECT_BUILTIN (dpsx_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
12088 DIRECT_BUILTIN (dpaqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
12089 DIRECT_BUILTIN (dpaqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
12090 DIRECT_BUILTIN (dpsqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
12091 DIRECT_BUILTIN (dpsqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2)
12094 /* This helps provide a mapping from builtin function codes to bdesc
12099 /* The builtin function table that this entry describes. */
12100 const struct builtin_description *bdesc;
12102 /* The number of entries in the builtin function table. */
12105 /* The target processor that supports these builtin functions.
12106 PROCESSOR_MAX means we enable them for all processors. */
12107 enum processor_type proc;
12109 /* If the target has these flags, this builtin function table
12110 will not be supported. */
12111 int unsupported_target_flags;
12114 static const struct bdesc_map bdesc_arrays[] =
12116 { mips_bdesc, ARRAY_SIZE (mips_bdesc), PROCESSOR_MAX, 0 },
12117 { sb1_bdesc, ARRAY_SIZE (sb1_bdesc), PROCESSOR_SB1, 0 },
12118 { dsp_bdesc, ARRAY_SIZE (dsp_bdesc), PROCESSOR_MAX, 0 },
12119 { dsp_32only_bdesc, ARRAY_SIZE (dsp_32only_bdesc), PROCESSOR_MAX,
12123 /* Take the argument ARGNUM of the arglist of EXP and convert it into a form
12124 suitable for input operand OP of instruction ICODE. Return the value. */
12127 mips_prepare_builtin_arg (enum insn_code icode,
12128 unsigned int op, tree exp, unsigned int argnum)
12131 enum machine_mode mode;
12133 value = expand_normal (CALL_EXPR_ARG (exp, argnum));
12134 mode = insn_data[icode].operand[op].mode;
12135 if (!insn_data[icode].operand[op].predicate (value, mode))
12137 value = copy_to_mode_reg (mode, value);
12138 /* Check the predicate again. */
12139 if (!insn_data[icode].operand[op].predicate (value, mode))
12141 error ("invalid argument to builtin function");
12149 /* Return an rtx suitable for output operand OP of instruction ICODE.
12150 If TARGET is non-null, try to use it where possible. */
12153 mips_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
12155 enum machine_mode mode;
12157 mode = insn_data[icode].operand[op].mode;
12158 if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
12159 target = gen_reg_rtx (mode);
12164 /* Expand builtin functions. This is called from TARGET_EXPAND_BUILTIN. */
12167 mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
12168 enum machine_mode mode ATTRIBUTE_UNUSED,
12169 int ignore ATTRIBUTE_UNUSED)
12171 enum insn_code icode;
12172 enum mips_builtin_type type;
12174 unsigned int fcode;
12175 const struct builtin_description *bdesc;
12176 const struct bdesc_map *m;
12178 fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
12179 fcode = DECL_FUNCTION_CODE (fndecl);
12183 error ("built-in function %qs not supported for MIPS16",
12184 IDENTIFIER_POINTER (DECL_NAME (fndecl)));
12189 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
12191 if (fcode < m->size)
12194 icode = bdesc[fcode].icode;
12195 type = bdesc[fcode].builtin_type;
12205 case MIPS_BUILTIN_DIRECT:
12206 return mips_expand_builtin_direct (icode, target, exp, true);
12208 case MIPS_BUILTIN_DIRECT_NO_TARGET:
12209 return mips_expand_builtin_direct (icode, target, exp, false);
12211 case MIPS_BUILTIN_MOVT:
12212 case MIPS_BUILTIN_MOVF:
12213 return mips_expand_builtin_movtf (type, icode, bdesc[fcode].cond,
12216 case MIPS_BUILTIN_CMP_ANY:
12217 case MIPS_BUILTIN_CMP_ALL:
12218 case MIPS_BUILTIN_CMP_UPPER:
12219 case MIPS_BUILTIN_CMP_LOWER:
12220 case MIPS_BUILTIN_CMP_SINGLE:
12221 return mips_expand_builtin_compare (type, icode, bdesc[fcode].cond,
12224 case MIPS_BUILTIN_BPOSGE32:
12225 return mips_expand_builtin_bposge (type, target);
12232 /* Init builtin functions. This is called from TARGET_INIT_BUILTIN. */
12235 mips_init_builtins (void)
12237 const struct builtin_description *d;
12238 const struct bdesc_map *m;
12239 tree types[(int) MIPS_MAX_FTYPE_MAX];
12240 tree V2SF_type_node;
12241 tree V2HI_type_node;
12242 tree V4QI_type_node;
12243 unsigned int offset;
12245 /* We have only builtins for -mpaired-single, -mips3d and -mdsp. */
12246 if (!TARGET_PAIRED_SINGLE_FLOAT && !TARGET_DSP)
12249 if (TARGET_PAIRED_SINGLE_FLOAT)
12251 V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
12253 types[MIPS_V2SF_FTYPE_V2SF]
12254 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
12256 types[MIPS_V2SF_FTYPE_V2SF_V2SF]
12257 = build_function_type_list (V2SF_type_node,
12258 V2SF_type_node, V2SF_type_node, NULL_TREE);
12260 types[MIPS_V2SF_FTYPE_V2SF_V2SF_INT]
12261 = build_function_type_list (V2SF_type_node,
12262 V2SF_type_node, V2SF_type_node,
12263 integer_type_node, NULL_TREE);
12265 types[MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF]
12266 = build_function_type_list (V2SF_type_node,
12267 V2SF_type_node, V2SF_type_node,
12268 V2SF_type_node, V2SF_type_node, NULL_TREE);
12270 types[MIPS_V2SF_FTYPE_SF_SF]
12271 = build_function_type_list (V2SF_type_node,
12272 float_type_node, float_type_node, NULL_TREE);
12274 types[MIPS_INT_FTYPE_V2SF_V2SF]
12275 = build_function_type_list (integer_type_node,
12276 V2SF_type_node, V2SF_type_node, NULL_TREE);
12278 types[MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF]
12279 = build_function_type_list (integer_type_node,
12280 V2SF_type_node, V2SF_type_node,
12281 V2SF_type_node, V2SF_type_node, NULL_TREE);
12283 types[MIPS_INT_FTYPE_SF_SF]
12284 = build_function_type_list (integer_type_node,
12285 float_type_node, float_type_node, NULL_TREE);
12287 types[MIPS_INT_FTYPE_DF_DF]
12288 = build_function_type_list (integer_type_node,
12289 double_type_node, double_type_node, NULL_TREE);
12291 types[MIPS_SF_FTYPE_V2SF]
12292 = build_function_type_list (float_type_node, V2SF_type_node, NULL_TREE);
12294 types[MIPS_SF_FTYPE_SF]
12295 = build_function_type_list (float_type_node,
12296 float_type_node, NULL_TREE);
12298 types[MIPS_SF_FTYPE_SF_SF]
12299 = build_function_type_list (float_type_node,
12300 float_type_node, float_type_node, NULL_TREE);
12302 types[MIPS_DF_FTYPE_DF]
12303 = build_function_type_list (double_type_node,
12304 double_type_node, NULL_TREE);
12306 types[MIPS_DF_FTYPE_DF_DF]
12307 = build_function_type_list (double_type_node,
12308 double_type_node, double_type_node, NULL_TREE);
12313 V2HI_type_node = build_vector_type_for_mode (intHI_type_node, V2HImode);
12314 V4QI_type_node = build_vector_type_for_mode (intQI_type_node, V4QImode);
12316 types[MIPS_V2HI_FTYPE_V2HI_V2HI]
12317 = build_function_type_list (V2HI_type_node,
12318 V2HI_type_node, V2HI_type_node,
12321 types[MIPS_SI_FTYPE_SI_SI]
12322 = build_function_type_list (intSI_type_node,
12323 intSI_type_node, intSI_type_node,
12326 types[MIPS_V4QI_FTYPE_V4QI_V4QI]
12327 = build_function_type_list (V4QI_type_node,
12328 V4QI_type_node, V4QI_type_node,
12331 types[MIPS_SI_FTYPE_V4QI]
12332 = build_function_type_list (intSI_type_node,
12336 types[MIPS_V2HI_FTYPE_V2HI]
12337 = build_function_type_list (V2HI_type_node,
12341 types[MIPS_SI_FTYPE_SI]
12342 = build_function_type_list (intSI_type_node,
12346 types[MIPS_V4QI_FTYPE_V2HI_V2HI]
12347 = build_function_type_list (V4QI_type_node,
12348 V2HI_type_node, V2HI_type_node,
12351 types[MIPS_V2HI_FTYPE_SI_SI]
12352 = build_function_type_list (V2HI_type_node,
12353 intSI_type_node, intSI_type_node,
12356 types[MIPS_SI_FTYPE_V2HI]
12357 = build_function_type_list (intSI_type_node,
12361 types[MIPS_V2HI_FTYPE_V4QI]
12362 = build_function_type_list (V2HI_type_node,
12366 types[MIPS_V4QI_FTYPE_V4QI_SI]
12367 = build_function_type_list (V4QI_type_node,
12368 V4QI_type_node, intSI_type_node,
12371 types[MIPS_V2HI_FTYPE_V2HI_SI]
12372 = build_function_type_list (V2HI_type_node,
12373 V2HI_type_node, intSI_type_node,
12376 types[MIPS_V2HI_FTYPE_V4QI_V2HI]
12377 = build_function_type_list (V2HI_type_node,
12378 V4QI_type_node, V2HI_type_node,
12381 types[MIPS_SI_FTYPE_V2HI_V2HI]
12382 = build_function_type_list (intSI_type_node,
12383 V2HI_type_node, V2HI_type_node,
12386 types[MIPS_DI_FTYPE_DI_V4QI_V4QI]
12387 = build_function_type_list (intDI_type_node,
12388 intDI_type_node, V4QI_type_node, V4QI_type_node,
12391 types[MIPS_DI_FTYPE_DI_V2HI_V2HI]
12392 = build_function_type_list (intDI_type_node,
12393 intDI_type_node, V2HI_type_node, V2HI_type_node,
12396 types[MIPS_DI_FTYPE_DI_SI_SI]
12397 = build_function_type_list (intDI_type_node,
12398 intDI_type_node, intSI_type_node, intSI_type_node,
12401 types[MIPS_V4QI_FTYPE_SI]
12402 = build_function_type_list (V4QI_type_node,
12406 types[MIPS_V2HI_FTYPE_SI]
12407 = build_function_type_list (V2HI_type_node,
12411 types[MIPS_VOID_FTYPE_V4QI_V4QI]
12412 = build_function_type_list (void_type_node,
12413 V4QI_type_node, V4QI_type_node,
12416 types[MIPS_SI_FTYPE_V4QI_V4QI]
12417 = build_function_type_list (intSI_type_node,
12418 V4QI_type_node, V4QI_type_node,
12421 types[MIPS_VOID_FTYPE_V2HI_V2HI]
12422 = build_function_type_list (void_type_node,
12423 V2HI_type_node, V2HI_type_node,
12426 types[MIPS_SI_FTYPE_DI_SI]
12427 = build_function_type_list (intSI_type_node,
12428 intDI_type_node, intSI_type_node,
12431 types[MIPS_DI_FTYPE_DI_SI]
12432 = build_function_type_list (intDI_type_node,
12433 intDI_type_node, intSI_type_node,
12436 types[MIPS_VOID_FTYPE_SI_SI]
12437 = build_function_type_list (void_type_node,
12438 intSI_type_node, intSI_type_node,
12441 types[MIPS_SI_FTYPE_PTR_SI]
12442 = build_function_type_list (intSI_type_node,
12443 ptr_type_node, intSI_type_node,
12446 types[MIPS_SI_FTYPE_VOID]
12447 = build_function_type (intSI_type_node, void_list_node);
12451 types[MIPS_V4QI_FTYPE_V4QI]
12452 = build_function_type_list (V4QI_type_node,
12456 types[MIPS_SI_FTYPE_SI_SI_SI]
12457 = build_function_type_list (intSI_type_node,
12458 intSI_type_node, intSI_type_node,
12459 intSI_type_node, NULL_TREE);
12461 types[MIPS_DI_FTYPE_DI_USI_USI]
12462 = build_function_type_list (intDI_type_node,
12464 unsigned_intSI_type_node,
12465 unsigned_intSI_type_node, NULL_TREE);
12467 types[MIPS_DI_FTYPE_SI_SI]
12468 = build_function_type_list (intDI_type_node,
12469 intSI_type_node, intSI_type_node,
12472 types[MIPS_DI_FTYPE_USI_USI]
12473 = build_function_type_list (intDI_type_node,
12474 unsigned_intSI_type_node,
12475 unsigned_intSI_type_node, NULL_TREE);
12477 types[MIPS_V2HI_FTYPE_SI_SI_SI]
12478 = build_function_type_list (V2HI_type_node,
12479 intSI_type_node, intSI_type_node,
12480 intSI_type_node, NULL_TREE);
12485 /* Iterate through all of the bdesc arrays, initializing all of the
12486 builtin functions. */
12489 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
12491 if ((m->proc == PROCESSOR_MAX || (m->proc == mips_arch))
12492 && (m->unsupported_target_flags & target_flags) == 0)
12493 for (d = m->bdesc; d < &m->bdesc[m->size]; d++)
12494 if ((d->target_flags & target_flags) == d->target_flags)
12495 add_builtin_function (d->name, types[d->function_type],
12496 d - m->bdesc + offset,
12497 BUILT_IN_MD, NULL, NULL);
12502 /* Expand a MIPS_BUILTIN_DIRECT function. ICODE is the code of the
12503 .md pattern and CALL is the function expr with arguments. TARGET,
12504 if nonnull, suggests a good place to put the result.
12505 HAS_TARGET indicates the function must return something. */
12508 mips_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
12511 rtx ops[MAX_RECOG_OPERANDS];
12517 /* We save target to ops[0]. */
12518 ops[0] = mips_prepare_builtin_target (icode, 0, target);
12522 /* We need to test if the arglist is not zero. Some instructions have extra
12523 clobber registers. */
12524 for (; i < insn_data[icode].n_operands && i <= call_expr_nargs (exp); i++, j++)
12525 ops[i] = mips_prepare_builtin_arg (icode, i, exp, j);
12530 emit_insn (GEN_FCN (icode) (ops[0], ops[1]));
12534 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2]));
12538 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]));
12542 gcc_unreachable ();
12547 /* Expand a __builtin_mips_movt_*_ps() or __builtin_mips_movf_*_ps()
12548 function (TYPE says which). EXP is the tree for the function
12549 function, ICODE is the instruction that should be used to compare
12550 the first two arguments, and COND is the condition it should test.
12551 TARGET, if nonnull, suggests a good place to put the result. */
12554 mips_expand_builtin_movtf (enum mips_builtin_type type,
12555 enum insn_code icode, enum mips_fp_condition cond,
12556 rtx target, tree exp)
12558 rtx cmp_result, op0, op1;
12560 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
12561 op0 = mips_prepare_builtin_arg (icode, 1, exp, 0);
12562 op1 = mips_prepare_builtin_arg (icode, 2, exp, 1);
12563 emit_insn (GEN_FCN (icode) (cmp_result, op0, op1, GEN_INT (cond)));
12565 icode = CODE_FOR_mips_cond_move_tf_ps;
12566 target = mips_prepare_builtin_target (icode, 0, target);
12567 if (type == MIPS_BUILTIN_MOVT)
12569 op1 = mips_prepare_builtin_arg (icode, 2, exp, 2);
12570 op0 = mips_prepare_builtin_arg (icode, 1, exp, 3);
12574 op0 = mips_prepare_builtin_arg (icode, 1, exp, 2);
12575 op1 = mips_prepare_builtin_arg (icode, 2, exp, 3);
12577 emit_insn (gen_mips_cond_move_tf_ps (target, op0, op1, cmp_result));
12581 /* Move VALUE_IF_TRUE into TARGET if CONDITION is true; move VALUE_IF_FALSE
12582 into TARGET otherwise. Return TARGET. */
12585 mips_builtin_branch_and_move (rtx condition, rtx target,
12586 rtx value_if_true, rtx value_if_false)
12588 rtx true_label, done_label;
12590 true_label = gen_label_rtx ();
12591 done_label = gen_label_rtx ();
12593 /* First assume that CONDITION is false. */
12594 mips_emit_move (target, value_if_false);
12596 /* Branch to TRUE_LABEL if CONDITION is true and DONE_LABEL otherwise. */
12597 emit_jump_insn (gen_condjump (condition, true_label));
12598 emit_jump_insn (gen_jump (done_label));
12601 /* Fix TARGET if CONDITION is true. */
12602 emit_label (true_label);
12603 mips_emit_move (target, value_if_true);
12605 emit_label (done_label);
12609 /* Expand a comparison builtin of type BUILTIN_TYPE. ICODE is the code
12610 of the comparison instruction and COND is the condition it should test.
12611 EXP is the function call and arguments and TARGET, if nonnull,
12612 suggests a good place to put the boolean result. */
12615 mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
12616 enum insn_code icode, enum mips_fp_condition cond,
12617 rtx target, tree exp)
12619 rtx offset, condition, cmp_result, ops[MAX_RECOG_OPERANDS];
12623 if (target == 0 || GET_MODE (target) != SImode)
12624 target = gen_reg_rtx (SImode);
12626 /* Prepare the operands to the comparison. */
12627 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
12628 for (i = 1; i < insn_data[icode].n_operands - 1; i++, j++)
12629 ops[i] = mips_prepare_builtin_arg (icode, i, exp, j);
12631 switch (insn_data[icode].n_operands)
12634 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2], GEN_INT (cond)));
12638 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2],
12639 ops[3], ops[4], GEN_INT (cond)));
12643 gcc_unreachable ();
12646 /* If the comparison sets more than one register, we define the result
12647 to be 0 if all registers are false and -1 if all registers are true.
12648 The value of the complete result is indeterminate otherwise. */
12649 switch (builtin_type)
12651 case MIPS_BUILTIN_CMP_ALL:
12652 condition = gen_rtx_NE (VOIDmode, cmp_result, constm1_rtx);
12653 return mips_builtin_branch_and_move (condition, target,
12654 const0_rtx, const1_rtx);
12656 case MIPS_BUILTIN_CMP_UPPER:
12657 case MIPS_BUILTIN_CMP_LOWER:
12658 offset = GEN_INT (builtin_type == MIPS_BUILTIN_CMP_UPPER);
12659 condition = gen_single_cc (cmp_result, offset);
12660 return mips_builtin_branch_and_move (condition, target,
12661 const1_rtx, const0_rtx);
12664 condition = gen_rtx_NE (VOIDmode, cmp_result, const0_rtx);
12665 return mips_builtin_branch_and_move (condition, target,
12666 const1_rtx, const0_rtx);
12670 /* Expand a bposge builtin of type BUILTIN_TYPE. TARGET, if nonnull,
12671 suggests a good place to put the boolean result. */
12674 mips_expand_builtin_bposge (enum mips_builtin_type builtin_type, rtx target)
12676 rtx condition, cmp_result;
12679 if (target == 0 || GET_MODE (target) != SImode)
12680 target = gen_reg_rtx (SImode);
12682 cmp_result = gen_rtx_REG (CCDSPmode, CCDSP_PO_REGNUM);
12684 if (builtin_type == MIPS_BUILTIN_BPOSGE32)
12689 condition = gen_rtx_GE (VOIDmode, cmp_result, GEN_INT (cmp_value));
12690 return mips_builtin_branch_and_move (condition, target,
12691 const1_rtx, const0_rtx);
12694 /* Return true if we should force MIPS16 mode for the function named by
12695 the SYMBOL_REF SYMBOL, which belongs to DECL and has type TYPE.
12696 FIRST is true if this is the first time handling this decl. */
12699 mips_use_mips16_mode_p (rtx symbol, tree decl, int first, tree type)
12703 /* Explicit function attributes take precedence. */
12704 if (mips_mips16_type_p (type))
12706 if (mips_nomips16_type_p (type))
12709 /* A nested function should inherit the MIPS16 setting from its parent. */
12710 parent = decl_function_context (decl);
12712 return SYMBOL_REF_MIPS16_FUNC_P (XEXP (DECL_RTL (parent), 0));
12714 /* Handle -mflip-mips16. */
12715 if (TARGET_FLIP_MIPS16
12716 && !DECL_BUILT_IN (decl)
12717 && !DECL_ARTIFICIAL (decl))
12720 /* Use the setting we picked first time around. */
12721 return SYMBOL_REF_MIPS16_FUNC_P (symbol);
12723 mips16_flipper = !mips16_flipper;
12724 if (mips16_flipper)
12725 return !mips_base_mips16;
12728 return mips_base_mips16;
12731 /* Set SYMBOL_REF_FLAGS for the SYMBOL_REF inside RTL, which belongs to DECL.
12732 FIRST is true if this is the first time handling this decl. */
12735 mips_encode_section_info (tree decl, rtx rtl, int first)
12737 default_encode_section_info (decl, rtl, first);
12739 if (TREE_CODE (decl) == FUNCTION_DECL)
12741 rtx symbol = XEXP (rtl, 0);
12742 tree type = TREE_TYPE (decl);
12744 if ((TARGET_LONG_CALLS && !mips_near_type_p (type))
12745 || mips_far_type_p (type))
12746 SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL;
12748 if (mips_use_mips16_mode_p (symbol, decl, first, type))
12750 if (flag_pic || TARGET_ABICALLS)
12751 sorry ("MIPS16 PIC");
12753 SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_MIPS16_FUNC;
12758 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. Some code models use the incoming
12759 value of PIC_FUNCTION_ADDR_REGNUM to set up the global pointer. */
12762 mips_extra_live_on_entry (bitmap regs)
12764 if (TARGET_USE_GOT && !TARGET_ABSOLUTE_ABICALLS)
12765 bitmap_set_bit (regs, PIC_FUNCTION_ADDR_REGNUM);
12768 /* SImode values are represented as sign-extended to DImode. */
12771 mips_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
12773 if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
12774 return SIGN_EXTEND;
12779 /* MIPS implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
12782 mips_output_dwarf_dtprel (FILE *file, int size, rtx x)
12787 fputs ("\t.dtprelword\t", file);
12791 fputs ("\t.dtpreldword\t", file);
12795 gcc_unreachable ();
12797 output_addr_const (file, x);
12798 fputs ("+0x8000", file);
12801 #include "gt-mips.h"