1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5 Contributed by A. Lichnewsky, lich@inria.inria.fr.
6 Changes by Michael Meissner, meissner@osf.org.
7 64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
8 Brendan Eich, brendan@microunity.com.
10 This file is part of GCC.
12 GCC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 3, or (at your option)
17 GCC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
28 #include "coretypes.h"
33 #include "hard-reg-set.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "insn-attr.h"
53 #include "target-def.h"
54 #include "integrate.h"
55 #include "langhooks.h"
56 #include "cfglayout.h"
57 #include "sched-int.h"
58 #include "tree-gimple.h"
60 #include "diagnostic.h"
62 /* True if X is an unspec wrapper around a SYMBOL_REF or LABEL_REF. */
63 #define UNSPEC_ADDRESS_P(X) \
64 (GET_CODE (X) == UNSPEC \
65 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
66 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
68 /* Extract the symbol or label from UNSPEC wrapper X. */
69 #define UNSPEC_ADDRESS(X) \
72 /* Extract the symbol type from UNSPEC wrapper X. */
73 #define UNSPEC_ADDRESS_TYPE(X) \
74 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
76 /* The maximum distance between the top of the stack frame and the
77 value $sp has when we save and restore registers.
79 The value for normal-mode code must be a SMALL_OPERAND and must
80 preserve the maximum stack alignment. We therefore use a value
81 of 0x7ff0 in this case.
83 MIPS16e SAVE and RESTORE instructions can adjust the stack pointer by
84 up to 0x7f8 bytes and can usually save or restore all the registers
85 that we need to save or restore. (Note that we can only use these
86 instructions for o32, for which the stack alignment is 8 bytes.)
88 We use a maximum gap of 0x100 or 0x400 for MIPS16 code when SAVE and
89 RESTORE are not available. We can then use unextended instructions
90 to save and restore registers, and to allocate and deallocate the top
92 #define MIPS_MAX_FIRST_STACK_STEP \
93 (!TARGET_MIPS16 ? 0x7ff0 \
94 : GENERATE_MIPS16E_SAVE_RESTORE ? 0x7f8 \
95 : TARGET_64BIT ? 0x100 : 0x400)
97 /* True if INSN is a mips.md pattern or asm statement. */
98 #define USEFUL_INSN_P(INSN) \
100 && GET_CODE (PATTERN (INSN)) != USE \
101 && GET_CODE (PATTERN (INSN)) != CLOBBER \
102 && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
103 && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
105 /* If INSN is a delayed branch sequence, return the first instruction
106 in the sequence, otherwise return INSN itself. */
107 #define SEQ_BEGIN(INSN) \
108 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
109 ? XVECEXP (PATTERN (INSN), 0, 0) \
112 /* Likewise for the last instruction in a delayed branch sequence. */
113 #define SEQ_END(INSN) \
114 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
115 ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
118 /* Execute the following loop body with SUBINSN set to each instruction
119 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
120 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
121 for ((SUBINSN) = SEQ_BEGIN (INSN); \
122 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
123 (SUBINSN) = NEXT_INSN (SUBINSN))
125 /* True if bit BIT is set in VALUE. */
126 #define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
128 /* Classifies an address.
131 A natural register + offset address. The register satisfies
132 mips_valid_base_register_p and the offset is a const_arith_operand.
135 A LO_SUM rtx. The first operand is a valid base register and
136 the second operand is a symbolic address.
139 A signed 16-bit constant address.
142 A constant symbolic address (equivalent to CONSTANT_SYMBOLIC). */
143 enum mips_address_type {
150 /* Classifies the prototype of a builtin function. */
151 enum mips_function_type
153 MIPS_V2SF_FTYPE_V2SF,
154 MIPS_V2SF_FTYPE_V2SF_V2SF,
155 MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
156 MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF,
157 MIPS_V2SF_FTYPE_SF_SF,
158 MIPS_INT_FTYPE_V2SF_V2SF,
159 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF,
160 MIPS_INT_FTYPE_SF_SF,
161 MIPS_INT_FTYPE_DF_DF,
168 /* For MIPS DSP ASE */
170 MIPS_DI_FTYPE_DI_SI_SI,
171 MIPS_DI_FTYPE_DI_V2HI_V2HI,
172 MIPS_DI_FTYPE_DI_V4QI_V4QI,
174 MIPS_SI_FTYPE_PTR_SI,
178 MIPS_SI_FTYPE_V2HI_V2HI,
180 MIPS_SI_FTYPE_V4QI_V4QI,
183 MIPS_V2HI_FTYPE_SI_SI,
184 MIPS_V2HI_FTYPE_V2HI,
185 MIPS_V2HI_FTYPE_V2HI_SI,
186 MIPS_V2HI_FTYPE_V2HI_V2HI,
187 MIPS_V2HI_FTYPE_V4QI,
188 MIPS_V2HI_FTYPE_V4QI_V2HI,
190 MIPS_V4QI_FTYPE_V2HI_V2HI,
191 MIPS_V4QI_FTYPE_V4QI_SI,
192 MIPS_V4QI_FTYPE_V4QI_V4QI,
193 MIPS_VOID_FTYPE_SI_SI,
194 MIPS_VOID_FTYPE_V2HI_V2HI,
195 MIPS_VOID_FTYPE_V4QI_V4QI,
197 /* For MIPS DSP REV 2 ASE. */
198 MIPS_V4QI_FTYPE_V4QI,
199 MIPS_SI_FTYPE_SI_SI_SI,
200 MIPS_DI_FTYPE_DI_USI_USI,
202 MIPS_DI_FTYPE_USI_USI,
203 MIPS_V2HI_FTYPE_SI_SI_SI,
209 /* Specifies how a builtin function should be converted into rtl. */
210 enum mips_builtin_type
212 /* The builtin corresponds directly to an .md pattern. The return
213 value is mapped to operand 0 and the arguments are mapped to
214 operands 1 and above. */
217 /* The builtin corresponds directly to an .md pattern. There is no return
218 value and the arguments are mapped to operands 0 and above. */
219 MIPS_BUILTIN_DIRECT_NO_TARGET,
221 /* The builtin corresponds to a comparison instruction followed by
222 a mips_cond_move_tf_ps pattern. The first two arguments are the
223 values to compare and the second two arguments are the vector
224 operands for the movt.ps or movf.ps instruction (in assembly order). */
228 /* The builtin corresponds to a V2SF comparison instruction. Operand 0
229 of this instruction is the result of the comparison, which has mode
230 CCV2 or CCV4. The function arguments are mapped to operands 1 and
231 above. The function's return value is an SImode boolean that is
232 true under the following conditions:
234 MIPS_BUILTIN_CMP_ANY: one of the registers is true
235 MIPS_BUILTIN_CMP_ALL: all of the registers are true
236 MIPS_BUILTIN_CMP_LOWER: the first register is true
237 MIPS_BUILTIN_CMP_UPPER: the second register is true. */
238 MIPS_BUILTIN_CMP_ANY,
239 MIPS_BUILTIN_CMP_ALL,
240 MIPS_BUILTIN_CMP_UPPER,
241 MIPS_BUILTIN_CMP_LOWER,
243 /* As above, but the instruction only sets a single $fcc register. */
244 MIPS_BUILTIN_CMP_SINGLE,
246 /* For generating bposge32 branch instructions in MIPS32 DSP ASE. */
247 MIPS_BUILTIN_BPOSGE32
250 /* Invokes MACRO (COND) for each c.cond.fmt condition. */
251 #define MIPS_FP_CONDITIONS(MACRO) \
269 /* Enumerates the codes above as MIPS_FP_COND_<X>. */
270 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
271 enum mips_fp_condition {
272 MIPS_FP_CONDITIONS (DECLARE_MIPS_COND)
275 /* Index X provides the string representation of MIPS_FP_COND_<X>. */
276 #define STRINGIFY(X) #X
277 static const char *const mips_fp_conditions[] = {
278 MIPS_FP_CONDITIONS (STRINGIFY)
281 /* A function to save or store a register. The first argument is the
282 register and the second is the stack slot. */
283 typedef void (*mips_save_restore_fn) (rtx, rtx);
285 struct mips16_constant;
286 struct mips_arg_info;
287 struct mips_address_info;
288 struct mips_integer_op;
291 static bool mips_valid_base_register_p (rtx, enum machine_mode, int);
292 static bool mips_classify_address (struct mips_address_info *, rtx,
293 enum machine_mode, int);
294 static bool mips_cannot_force_const_mem (rtx);
295 static bool mips_use_blocks_for_constant_p (enum machine_mode, const_rtx);
296 static int mips_symbol_insns (enum mips_symbol_type, enum machine_mode);
297 static bool mips16_unextended_reference_p (enum machine_mode mode, rtx, rtx);
298 static rtx mips_force_temporary (rtx, rtx);
299 static rtx mips_unspec_offset_high (rtx, rtx, rtx, enum mips_symbol_type);
300 static rtx mips_add_offset (rtx, rtx, HOST_WIDE_INT);
301 static unsigned int mips_build_shift (struct mips_integer_op *, HOST_WIDE_INT);
302 static unsigned int mips_build_lower (struct mips_integer_op *,
303 unsigned HOST_WIDE_INT);
304 static unsigned int mips_build_integer (struct mips_integer_op *,
305 unsigned HOST_WIDE_INT);
306 static void mips_legitimize_const_move (enum machine_mode, rtx, rtx);
307 static int m16_check_op (rtx, int, int, int);
308 static bool mips_rtx_costs (rtx, int, int, int *);
309 static int mips_address_cost (rtx);
310 static void mips_emit_compare (enum rtx_code *, rtx *, rtx *, bool);
311 static void mips_load_call_address (rtx, rtx, int);
312 static bool mips_function_ok_for_sibcall (tree, tree);
313 static void mips_block_move_straight (rtx, rtx, HOST_WIDE_INT);
314 static void mips_adjust_block_mem (rtx, HOST_WIDE_INT, rtx *, rtx *);
315 static void mips_block_move_loop (rtx, rtx, HOST_WIDE_INT);
316 static void mips_arg_info (const CUMULATIVE_ARGS *, enum machine_mode,
317 tree, int, struct mips_arg_info *);
318 static bool mips_get_unaligned_mem (rtx *, unsigned int, int, rtx *, rtx *);
319 static void mips_set_architecture (const struct mips_cpu_info *);
320 static void mips_set_tune (const struct mips_cpu_info *);
321 static bool mips_handle_option (size_t, const char *, int);
322 static struct machine_function *mips_init_machine_status (void);
323 static void print_operand_reloc (FILE *, rtx, enum mips_symbol_context,
325 static void mips_file_start (void);
326 static int mips_small_data_pattern_1 (rtx *, void *);
327 static int mips_rewrite_small_data_1 (rtx *, void *);
328 static bool mips_function_has_gp_insn (void);
329 static unsigned int mips_global_pointer (void);
330 static bool mips_save_reg_p (unsigned int);
331 static void mips_save_restore_reg (enum machine_mode, int, HOST_WIDE_INT,
332 mips_save_restore_fn);
333 static void mips_for_each_saved_reg (HOST_WIDE_INT, mips_save_restore_fn);
334 static void mips_output_cplocal (void);
335 static void mips_emit_loadgp (void);
336 static void mips_output_function_prologue (FILE *, HOST_WIDE_INT);
337 static void mips_set_frame_expr (rtx);
338 static rtx mips_frame_set (rtx, rtx);
339 static void mips_save_reg (rtx, rtx);
340 static void mips_output_function_epilogue (FILE *, HOST_WIDE_INT);
341 static void mips_restore_reg (rtx, rtx);
342 static void mips_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
343 HOST_WIDE_INT, tree);
344 static section *mips_select_rtx_section (enum machine_mode, rtx,
345 unsigned HOST_WIDE_INT);
346 static section *mips_function_rodata_section (tree);
347 static bool mips_in_small_data_p (const_tree);
348 static bool mips_use_anchors_for_symbol_p (const_rtx);
349 static int mips_fpr_return_fields (const_tree, tree *);
350 static bool mips_return_in_msb (const_tree);
351 static rtx mips_return_fpr_pair (enum machine_mode mode,
352 enum machine_mode mode1, HOST_WIDE_INT,
353 enum machine_mode mode2, HOST_WIDE_INT);
354 static rtx mips16_gp_pseudo_reg (void);
355 static void mips16_fp_args (FILE *, int, int);
356 static void build_mips16_function_stub (FILE *);
357 static rtx dump_constants_1 (enum machine_mode, rtx, rtx);
358 static void dump_constants (struct mips16_constant *, rtx);
359 static int mips16_insn_length (rtx);
360 static int mips16_rewrite_pool_refs (rtx *, void *);
361 static void mips16_lay_out_constants (void);
362 static void mips_sim_reset (struct mips_sim *);
363 static void mips_sim_init (struct mips_sim *, state_t);
364 static void mips_sim_next_cycle (struct mips_sim *);
365 static void mips_sim_wait_reg (struct mips_sim *, rtx, rtx);
366 static int mips_sim_wait_regs_2 (rtx *, void *);
367 static void mips_sim_wait_regs_1 (rtx *, void *);
368 static void mips_sim_wait_regs (struct mips_sim *, rtx);
369 static void mips_sim_wait_units (struct mips_sim *, rtx);
370 static void mips_sim_wait_insn (struct mips_sim *, rtx);
371 static void mips_sim_record_set (rtx, const_rtx, void *);
372 static void mips_sim_issue_insn (struct mips_sim *, rtx);
373 static void mips_sim_issue_nop (struct mips_sim *);
374 static void mips_sim_finish_insn (struct mips_sim *, rtx);
375 static void vr4130_avoid_branch_rt_conflict (rtx);
376 static void vr4130_align_insns (void);
377 static void mips_avoid_hazard (rtx, rtx, int *, rtx *, rtx);
378 static void mips_avoid_hazards (void);
379 static void mips_reorg (void);
380 static bool mips_strict_matching_cpu_name_p (const char *, const char *);
381 static bool mips_matching_cpu_name_p (const char *, const char *);
382 static const struct mips_cpu_info *mips_parse_cpu (const char *);
383 static const struct mips_cpu_info *mips_cpu_info_from_isa (int);
384 static bool mips_return_in_memory (const_tree, const_tree);
385 static bool mips_strict_argument_naming (CUMULATIVE_ARGS *);
386 static void mips_macc_chains_record (rtx);
387 static void mips_macc_chains_reorder (rtx *, int);
388 static void vr4130_true_reg_dependence_p_1 (rtx, const_rtx, void *);
389 static bool vr4130_true_reg_dependence_p (rtx);
390 static bool vr4130_swap_insns_p (rtx, rtx);
391 static void vr4130_reorder (rtx *, int);
392 static void mips_promote_ready (rtx *, int, int);
393 static void mips_sched_init (FILE *, int, int);
394 static int mips_sched_reorder (FILE *, int, rtx *, int *, int);
395 static int mips_variable_issue (FILE *, int, rtx, int);
396 static int mips_adjust_cost (rtx, rtx, rtx, int);
397 static int mips_issue_rate (void);
398 static int mips_multipass_dfa_lookahead (void);
399 static void mips_init_libfuncs (void);
400 static void mips_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
402 static tree mips_build_builtin_va_list (void);
403 static tree mips_gimplify_va_arg_expr (tree, tree, tree *, tree *);
404 static bool mips_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode mode,
406 static bool mips_callee_copies (CUMULATIVE_ARGS *, enum machine_mode mode,
408 static int mips_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode mode,
410 static bool mips_valid_pointer_mode (enum machine_mode);
411 static bool mips_scalar_mode_supported_p (enum machine_mode);
412 static bool mips_vector_mode_supported_p (enum machine_mode);
413 static rtx mips_prepare_builtin_arg (enum insn_code, unsigned int, tree, unsigned int);
414 static rtx mips_prepare_builtin_target (enum insn_code, unsigned int, rtx);
415 static rtx mips_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
416 static void mips_init_builtins (void);
417 static rtx mips_expand_builtin_direct (enum insn_code, rtx, tree, bool);
418 static rtx mips_expand_builtin_movtf (enum mips_builtin_type,
419 enum insn_code, enum mips_fp_condition,
421 static rtx mips_expand_builtin_compare (enum mips_builtin_type,
422 enum insn_code, enum mips_fp_condition,
424 static rtx mips_expand_builtin_bposge (enum mips_builtin_type, rtx);
425 static void mips_encode_section_info (tree, rtx, int);
426 static void mips_extra_live_on_entry (bitmap);
427 static int mips_comp_type_attributes (const_tree, const_tree);
428 static void mips_set_mips16_mode (int);
429 static void mips_set_current_function (tree);
430 static int mips_mode_rep_extended (enum machine_mode, enum machine_mode);
431 static bool mips_offset_within_alignment_p (rtx, HOST_WIDE_INT);
432 static void mips_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
434 /* Structure to be filled in by compute_frame_size with register
435 save masks, and offsets for the current function. */
437 struct mips_frame_info GTY(())
439 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
440 HOST_WIDE_INT var_size; /* # bytes that variables take up */
441 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
442 HOST_WIDE_INT cprestore_size; /* # bytes that the .cprestore slot takes up */
443 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
444 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
445 unsigned int mask; /* mask of saved gp registers */
446 unsigned int fmask; /* mask of saved fp registers */
447 HOST_WIDE_INT gp_save_offset; /* offset from vfp to store gp registers */
448 HOST_WIDE_INT fp_save_offset; /* offset from vfp to store fp registers */
449 HOST_WIDE_INT gp_sp_offset; /* offset from new sp to store gp registers */
450 HOST_WIDE_INT fp_sp_offset; /* offset from new sp to store fp registers */
451 bool initialized; /* true if frame size already calculated */
452 int num_gp; /* number of gp registers saved */
453 int num_fp; /* number of fp registers saved */
456 struct machine_function GTY(()) {
457 /* Pseudo-reg holding the value of $28 in a mips16 function which
458 refers to GP relative global variables. */
459 rtx mips16_gp_pseudo_rtx;
461 /* The number of extra stack bytes taken up by register varargs.
462 This area is allocated by the callee at the very top of the frame. */
465 /* Current frame information, calculated by compute_frame_size. */
466 struct mips_frame_info frame;
468 /* The register to use as the global pointer within this function. */
469 unsigned int global_pointer;
471 /* True if mips_adjust_insn_length should ignore an instruction's
473 bool ignore_hazard_length_p;
475 /* True if the whole function is suitable for .set noreorder and
477 bool all_noreorder_p;
479 /* True if the function is known to have an instruction that needs $gp. */
482 /* True if we have emitted an instruction to initialize
483 mips16_gp_pseudo_rtx. */
484 bool initialized_mips16_gp_pseudo_p;
487 /* Information about a single argument. */
490 /* True if the argument is passed in a floating-point register, or
491 would have been if we hadn't run out of registers. */
494 /* The number of words passed in registers, rounded up. */
495 unsigned int reg_words;
497 /* For EABI, the offset of the first register from GP_ARG_FIRST or
498 FP_ARG_FIRST. For other ABIs, the offset of the first register from
499 the start of the ABI's argument structure (see the CUMULATIVE_ARGS
500 comment for details).
502 The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
504 unsigned int reg_offset;
506 /* The number of words that must be passed on the stack, rounded up. */
507 unsigned int stack_words;
509 /* The offset from the start of the stack overflow area of the argument's
510 first stack word. Only meaningful when STACK_WORDS is nonzero. */
511 unsigned int stack_offset;
515 /* Information about an address described by mips_address_type.
521 REG is the base register and OFFSET is the constant offset.
524 REG is the register that contains the high part of the address,
525 OFFSET is the symbolic address being referenced and SYMBOL_TYPE
526 is the type of OFFSET's symbol.
529 SYMBOL_TYPE is the type of symbol being referenced. */
531 struct mips_address_info
533 enum mips_address_type type;
536 enum mips_symbol_type symbol_type;
540 /* One stage in a constant building sequence. These sequences have
544 A = A CODE[1] VALUE[1]
545 A = A CODE[2] VALUE[2]
548 where A is an accumulator, each CODE[i] is a binary rtl operation
549 and each VALUE[i] is a constant integer. */
550 struct mips_integer_op {
552 unsigned HOST_WIDE_INT value;
556 /* The largest number of operations needed to load an integer constant.
557 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
558 When the lowest bit is clear, we can try, but reject a sequence with
559 an extra SLL at the end. */
560 #define MIPS_MAX_INTEGER_OPS 7
562 /* Information about a MIPS16e SAVE or RESTORE instruction. */
563 struct mips16e_save_restore_info {
564 /* The number of argument registers saved by a SAVE instruction.
565 0 for RESTORE instructions. */
568 /* Bit X is set if the instruction saves or restores GPR X. */
571 /* The total number of bytes to allocate. */
575 /* Global variables for machine-dependent things. */
577 /* Threshold for data being put into the small data/bss area, instead
578 of the normal data area. */
579 int mips_section_threshold = -1;
581 /* Count the number of .file directives, so that .loc is up to date. */
582 int num_source_filenames = 0;
584 /* Count the number of sdb related labels are generated (to find block
585 start and end boundaries). */
586 int sdb_label_count = 0;
588 /* Next label # for each statement for Silicon Graphics IRIS systems. */
591 /* Name of the file containing the current function. */
592 const char *current_function_file = "";
594 /* Number of nested .set noreorder, noat, nomacro, and volatile requests. */
600 /* The next branch instruction is a branch likely, not branch normal. */
601 int mips_branch_likely;
603 /* The operands passed to the last cmpMM expander. */
606 /* The target cpu for code generation. */
607 enum processor_type mips_arch;
608 const struct mips_cpu_info *mips_arch_info;
610 /* The target cpu for optimization and scheduling. */
611 enum processor_type mips_tune;
612 const struct mips_cpu_info *mips_tune_info;
614 /* Which instruction set architecture to use. */
617 /* Which ABI to use. */
618 int mips_abi = MIPS_ABI_DEFAULT;
620 /* Cost information to use. */
621 const struct mips_rtx_cost_data *mips_cost;
623 /* Remember the ambient target flags, excluding mips16. */
624 static int mips_base_target_flags;
625 /* The mips16 command-line target flags only. */
626 static bool mips_base_mips16;
627 /* Similar copies of option settings. */
628 static int mips_base_schedule_insns; /* flag_schedule_insns */
629 static int mips_base_reorder_blocks_and_partition; /* flag_reorder... */
630 static int mips_base_move_loop_invariants; /* flag_move_loop_invariants */
631 static int mips_base_align_loops; /* align_loops */
632 static int mips_base_align_jumps; /* align_jumps */
633 static int mips_base_align_functions; /* align_functions */
634 static GTY(()) int mips16_flipper;
636 /* The -mtext-loads setting. */
637 enum mips_code_readable_setting mips_code_readable = CODE_READABLE_YES;
639 /* The architecture selected by -mipsN. */
640 static const struct mips_cpu_info *mips_isa_info;
642 /* If TRUE, we split addresses into their high and low parts in the RTL. */
643 int mips_split_addresses;
645 /* Mode used for saving/restoring general purpose registers. */
646 static enum machine_mode gpr_mode;
648 /* Array giving truth value on whether or not a given hard register
649 can support a given mode. */
650 char mips_hard_regno_mode_ok[(int)MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
652 /* List of all MIPS punctuation characters used by print_operand. */
653 char mips_print_operand_punct[256];
655 /* Map GCC register number to debugger register number. */
656 int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
657 int mips_dwarf_regno[FIRST_PSEUDO_REGISTER];
659 /* A copy of the original flag_delayed_branch: see override_options. */
660 static int mips_flag_delayed_branch;
662 static GTY (()) int mips_output_filename_first_time = 1;
664 /* mips_split_p[X] is true if symbols of type X can be split by
665 mips_split_symbol(). */
666 bool mips_split_p[NUM_SYMBOL_TYPES];
668 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
669 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
670 if they are matched by a special .md file pattern. */
671 static const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
673 /* Likewise for HIGHs. */
674 static const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
676 /* Map hard register number to register class */
677 const enum reg_class mips_regno_to_class[] =
679 LEA_REGS, LEA_REGS, M16_NA_REGS, V1_REG,
680 M16_REGS, M16_REGS, M16_REGS, M16_REGS,
681 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
682 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
683 M16_NA_REGS, M16_NA_REGS, LEA_REGS, LEA_REGS,
684 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
685 T_REG, PIC_FN_ADDR_REG, LEA_REGS, LEA_REGS,
686 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
687 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
688 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
689 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
690 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
691 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
692 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
693 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
694 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
695 MD0_REG, MD1_REG, NO_REGS, ST_REGS,
696 ST_REGS, ST_REGS, ST_REGS, ST_REGS,
697 ST_REGS, ST_REGS, ST_REGS, NO_REGS,
698 NO_REGS, ALL_REGS, ALL_REGS, NO_REGS,
699 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
700 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
701 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
702 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
703 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
704 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
705 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
706 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
707 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
708 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
709 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
710 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
711 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
712 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
713 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
714 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
715 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
716 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
717 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
718 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
719 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
720 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
721 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
722 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
723 DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS,
724 DSP_ACC_REGS, DSP_ACC_REGS, ALL_REGS, ALL_REGS,
725 ALL_REGS, ALL_REGS, ALL_REGS, ALL_REGS
728 /* Table of machine dependent attributes. */
729 const struct attribute_spec mips_attribute_table[] =
731 { "long_call", 0, 0, false, true, true, NULL },
732 { "far", 0, 0, false, true, true, NULL },
733 { "near", 0, 0, false, true, true, NULL },
734 /* Switch MIPS16 ASE on and off per-function. */
735 { "mips16", 0, 0, false, true, true, NULL },
736 { "nomips16", 0, 0, false, true, true, NULL },
737 { NULL, 0, 0, false, false, false, NULL }
740 /* A table describing all the processors gcc knows about. Names are
741 matched in the order listed. The first mention of an ISA level is
742 taken as the canonical name for that ISA.
744 To ease comparison, please keep this table in the same order
745 as gas's mips_cpu_info_table[]. Please also make sure that
746 MIPS_ISA_LEVEL_SPEC and MIPS_ARCH_FLOAT_SPEC handle all -march
747 options correctly. */
748 const struct mips_cpu_info mips_cpu_info_table[] = {
749 /* Entries for generic ISAs */
750 { "mips1", PROCESSOR_R3000, 1 },
751 { "mips2", PROCESSOR_R6000, 2 },
752 { "mips3", PROCESSOR_R4000, 3 },
753 { "mips4", PROCESSOR_R8000, 4 },
754 { "mips32", PROCESSOR_4KC, 32 },
755 { "mips32r2", PROCESSOR_M4K, 33 },
756 { "mips64", PROCESSOR_5KC, 64 },
759 { "r3000", PROCESSOR_R3000, 1 },
760 { "r2000", PROCESSOR_R3000, 1 }, /* = r3000 */
761 { "r3900", PROCESSOR_R3900, 1 },
764 { "r6000", PROCESSOR_R6000, 2 },
767 { "r4000", PROCESSOR_R4000, 3 },
768 { "vr4100", PROCESSOR_R4100, 3 },
769 { "vr4111", PROCESSOR_R4111, 3 },
770 { "vr4120", PROCESSOR_R4120, 3 },
771 { "vr4130", PROCESSOR_R4130, 3 },
772 { "vr4300", PROCESSOR_R4300, 3 },
773 { "r4400", PROCESSOR_R4000, 3 }, /* = r4000 */
774 { "r4600", PROCESSOR_R4600, 3 },
775 { "orion", PROCESSOR_R4600, 3 }, /* = r4600 */
776 { "r4650", PROCESSOR_R4650, 3 },
779 { "r8000", PROCESSOR_R8000, 4 },
780 { "vr5000", PROCESSOR_R5000, 4 },
781 { "vr5400", PROCESSOR_R5400, 4 },
782 { "vr5500", PROCESSOR_R5500, 4 },
783 { "rm7000", PROCESSOR_R7000, 4 },
784 { "rm9000", PROCESSOR_R9000, 4 },
787 { "4kc", PROCESSOR_4KC, 32 },
788 { "4km", PROCESSOR_4KC, 32 }, /* = 4kc */
789 { "4kp", PROCESSOR_4KP, 32 },
790 { "4ksc", PROCESSOR_4KC, 32 },
792 /* MIPS32 Release 2 */
793 { "m4k", PROCESSOR_M4K, 33 },
794 { "4kec", PROCESSOR_4KC, 33 },
795 { "4kem", PROCESSOR_4KC, 33 },
796 { "4kep", PROCESSOR_4KP, 33 },
797 { "4ksd", PROCESSOR_4KC, 33 },
799 { "24kc", PROCESSOR_24KC, 33 },
800 { "24kf2_1", PROCESSOR_24KF2_1, 33 },
801 { "24kf", PROCESSOR_24KF2_1, 33 },
802 { "24kf1_1", PROCESSOR_24KF1_1, 33 },
803 { "24kfx", PROCESSOR_24KF1_1, 33 },
804 { "24kx", PROCESSOR_24KF1_1, 33 },
806 { "24kec", PROCESSOR_24KC, 33 }, /* 24K with DSP */
807 { "24kef2_1", PROCESSOR_24KF2_1, 33 },
808 { "24kef", PROCESSOR_24KF2_1, 33 },
809 { "24kef1_1", PROCESSOR_24KF1_1, 33 },
810 { "24kefx", PROCESSOR_24KF1_1, 33 },
811 { "24kex", PROCESSOR_24KF1_1, 33 },
813 { "34kc", PROCESSOR_24KC, 33 }, /* 34K with MT/DSP */
814 { "34kf2_1", PROCESSOR_24KF2_1, 33 },
815 { "34kf", PROCESSOR_24KF2_1, 33 },
816 { "34kf1_1", PROCESSOR_24KF1_1, 33 },
817 { "34kfx", PROCESSOR_24KF1_1, 33 },
818 { "34kx", PROCESSOR_24KF1_1, 33 },
820 { "74kc", PROCESSOR_74KC, 33 }, /* 74K with DSPr2 */
821 { "74kf2_1", PROCESSOR_74KF2_1, 33 },
822 { "74kf", PROCESSOR_74KF2_1, 33 },
823 { "74kf1_1", PROCESSOR_74KF1_1, 33 },
824 { "74kfx", PROCESSOR_74KF1_1, 33 },
825 { "74kx", PROCESSOR_74KF1_1, 33 },
826 { "74kf3_2", PROCESSOR_74KF3_2, 33 },
829 { "5kc", PROCESSOR_5KC, 64 },
830 { "5kf", PROCESSOR_5KF, 64 },
831 { "20kc", PROCESSOR_20KC, 64 },
832 { "sb1", PROCESSOR_SB1, 64 },
833 { "sb1a", PROCESSOR_SB1A, 64 },
834 { "sr71000", PROCESSOR_SR71000, 64 },
840 /* Default costs. If these are used for a processor we should look
841 up the actual costs. */
842 #define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \
843 COSTS_N_INSNS (7), /* fp_mult_sf */ \
844 COSTS_N_INSNS (8), /* fp_mult_df */ \
845 COSTS_N_INSNS (23), /* fp_div_sf */ \
846 COSTS_N_INSNS (36), /* fp_div_df */ \
847 COSTS_N_INSNS (10), /* int_mult_si */ \
848 COSTS_N_INSNS (10), /* int_mult_di */ \
849 COSTS_N_INSNS (69), /* int_div_si */ \
850 COSTS_N_INSNS (69), /* int_div_di */ \
851 2, /* branch_cost */ \
852 4 /* memory_latency */
854 /* Need to replace these with the costs of calling the appropriate
856 #define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \
857 COSTS_N_INSNS (256), /* fp_mult_sf */ \
858 COSTS_N_INSNS (256), /* fp_mult_df */ \
859 COSTS_N_INSNS (256), /* fp_div_sf */ \
860 COSTS_N_INSNS (256) /* fp_div_df */
862 static struct mips_rtx_cost_data const mips_rtx_cost_optimize_size =
864 COSTS_N_INSNS (1), /* fp_add */
865 COSTS_N_INSNS (1), /* fp_mult_sf */
866 COSTS_N_INSNS (1), /* fp_mult_df */
867 COSTS_N_INSNS (1), /* fp_div_sf */
868 COSTS_N_INSNS (1), /* fp_div_df */
869 COSTS_N_INSNS (1), /* int_mult_si */
870 COSTS_N_INSNS (1), /* int_mult_di */
871 COSTS_N_INSNS (1), /* int_div_si */
872 COSTS_N_INSNS (1), /* int_div_di */
874 4 /* memory_latency */
877 static struct mips_rtx_cost_data const mips_rtx_cost_data[PROCESSOR_MAX] =
880 COSTS_N_INSNS (2), /* fp_add */
881 COSTS_N_INSNS (4), /* fp_mult_sf */
882 COSTS_N_INSNS (5), /* fp_mult_df */
883 COSTS_N_INSNS (12), /* fp_div_sf */
884 COSTS_N_INSNS (19), /* fp_div_df */
885 COSTS_N_INSNS (12), /* int_mult_si */
886 COSTS_N_INSNS (12), /* int_mult_di */
887 COSTS_N_INSNS (35), /* int_div_si */
888 COSTS_N_INSNS (35), /* int_div_di */
890 4 /* memory_latency */
895 COSTS_N_INSNS (6), /* int_mult_si */
896 COSTS_N_INSNS (6), /* int_mult_di */
897 COSTS_N_INSNS (36), /* int_div_si */
898 COSTS_N_INSNS (36), /* int_div_di */
900 4 /* memory_latency */
904 COSTS_N_INSNS (36), /* int_mult_si */
905 COSTS_N_INSNS (36), /* int_mult_di */
906 COSTS_N_INSNS (37), /* int_div_si */
907 COSTS_N_INSNS (37), /* int_div_di */
909 4 /* memory_latency */
913 COSTS_N_INSNS (4), /* int_mult_si */
914 COSTS_N_INSNS (11), /* int_mult_di */
915 COSTS_N_INSNS (36), /* int_div_si */
916 COSTS_N_INSNS (68), /* int_div_di */
918 4 /* memory_latency */
921 COSTS_N_INSNS (4), /* fp_add */
922 COSTS_N_INSNS (4), /* fp_mult_sf */
923 COSTS_N_INSNS (5), /* fp_mult_df */
924 COSTS_N_INSNS (17), /* fp_div_sf */
925 COSTS_N_INSNS (32), /* fp_div_df */
926 COSTS_N_INSNS (4), /* int_mult_si */
927 COSTS_N_INSNS (11), /* int_mult_di */
928 COSTS_N_INSNS (36), /* int_div_si */
929 COSTS_N_INSNS (68), /* int_div_di */
931 4 /* memory_latency */
934 COSTS_N_INSNS (4), /* fp_add */
935 COSTS_N_INSNS (4), /* fp_mult_sf */
936 COSTS_N_INSNS (5), /* fp_mult_df */
937 COSTS_N_INSNS (17), /* fp_div_sf */
938 COSTS_N_INSNS (32), /* fp_div_df */
939 COSTS_N_INSNS (4), /* int_mult_si */
940 COSTS_N_INSNS (7), /* int_mult_di */
941 COSTS_N_INSNS (42), /* int_div_si */
942 COSTS_N_INSNS (72), /* int_div_di */
944 4 /* memory_latency */
948 COSTS_N_INSNS (5), /* int_mult_si */
949 COSTS_N_INSNS (5), /* int_mult_di */
950 COSTS_N_INSNS (41), /* int_div_si */
951 COSTS_N_INSNS (41), /* int_div_di */
953 4 /* memory_latency */
956 COSTS_N_INSNS (8), /* fp_add */
957 COSTS_N_INSNS (8), /* fp_mult_sf */
958 COSTS_N_INSNS (10), /* fp_mult_df */
959 COSTS_N_INSNS (34), /* fp_div_sf */
960 COSTS_N_INSNS (64), /* fp_div_df */
961 COSTS_N_INSNS (5), /* int_mult_si */
962 COSTS_N_INSNS (5), /* int_mult_di */
963 COSTS_N_INSNS (41), /* int_div_si */
964 COSTS_N_INSNS (41), /* int_div_di */
966 4 /* memory_latency */
969 COSTS_N_INSNS (4), /* fp_add */
970 COSTS_N_INSNS (4), /* fp_mult_sf */
971 COSTS_N_INSNS (5), /* fp_mult_df */
972 COSTS_N_INSNS (17), /* fp_div_sf */
973 COSTS_N_INSNS (32), /* fp_div_df */
974 COSTS_N_INSNS (5), /* int_mult_si */
975 COSTS_N_INSNS (5), /* int_mult_di */
976 COSTS_N_INSNS (41), /* int_div_si */
977 COSTS_N_INSNS (41), /* int_div_di */
979 4 /* memory_latency */
983 COSTS_N_INSNS (5), /* int_mult_si */
984 COSTS_N_INSNS (5), /* int_mult_di */
985 COSTS_N_INSNS (41), /* int_div_si */
986 COSTS_N_INSNS (41), /* int_div_di */
988 4 /* memory_latency */
991 COSTS_N_INSNS (8), /* fp_add */
992 COSTS_N_INSNS (8), /* fp_mult_sf */
993 COSTS_N_INSNS (10), /* fp_mult_df */
994 COSTS_N_INSNS (34), /* fp_div_sf */
995 COSTS_N_INSNS (64), /* fp_div_df */
996 COSTS_N_INSNS (5), /* int_mult_si */
997 COSTS_N_INSNS (5), /* int_mult_di */
998 COSTS_N_INSNS (41), /* int_div_si */
999 COSTS_N_INSNS (41), /* int_div_di */
1000 1, /* branch_cost */
1001 4 /* memory_latency */
1004 COSTS_N_INSNS (4), /* fp_add */
1005 COSTS_N_INSNS (4), /* fp_mult_sf */
1006 COSTS_N_INSNS (5), /* fp_mult_df */
1007 COSTS_N_INSNS (17), /* fp_div_sf */
1008 COSTS_N_INSNS (32), /* fp_div_df */
1009 COSTS_N_INSNS (5), /* int_mult_si */
1010 COSTS_N_INSNS (5), /* int_mult_di */
1011 COSTS_N_INSNS (41), /* int_div_si */
1012 COSTS_N_INSNS (41), /* int_div_di */
1013 1, /* branch_cost */
1014 4 /* memory_latency */
1017 COSTS_N_INSNS (6), /* fp_add */
1018 COSTS_N_INSNS (6), /* fp_mult_sf */
1019 COSTS_N_INSNS (7), /* fp_mult_df */
1020 COSTS_N_INSNS (25), /* fp_div_sf */
1021 COSTS_N_INSNS (48), /* fp_div_df */
1022 COSTS_N_INSNS (5), /* int_mult_si */
1023 COSTS_N_INSNS (5), /* int_mult_di */
1024 COSTS_N_INSNS (41), /* int_div_si */
1025 COSTS_N_INSNS (41), /* int_div_di */
1026 1, /* branch_cost */
1027 4 /* memory_latency */
1033 COSTS_N_INSNS (2), /* fp_add */
1034 COSTS_N_INSNS (4), /* fp_mult_sf */
1035 COSTS_N_INSNS (5), /* fp_mult_df */
1036 COSTS_N_INSNS (12), /* fp_div_sf */
1037 COSTS_N_INSNS (19), /* fp_div_df */
1038 COSTS_N_INSNS (2), /* int_mult_si */
1039 COSTS_N_INSNS (2), /* int_mult_di */
1040 COSTS_N_INSNS (35), /* int_div_si */
1041 COSTS_N_INSNS (35), /* int_div_di */
1042 1, /* branch_cost */
1043 4 /* memory_latency */
1046 COSTS_N_INSNS (3), /* fp_add */
1047 COSTS_N_INSNS (5), /* fp_mult_sf */
1048 COSTS_N_INSNS (6), /* fp_mult_df */
1049 COSTS_N_INSNS (15), /* fp_div_sf */
1050 COSTS_N_INSNS (16), /* fp_div_df */
1051 COSTS_N_INSNS (17), /* int_mult_si */
1052 COSTS_N_INSNS (17), /* int_mult_di */
1053 COSTS_N_INSNS (38), /* int_div_si */
1054 COSTS_N_INSNS (38), /* int_div_di */
1055 2, /* branch_cost */
1056 6 /* memory_latency */
1059 COSTS_N_INSNS (6), /* fp_add */
1060 COSTS_N_INSNS (7), /* fp_mult_sf */
1061 COSTS_N_INSNS (8), /* fp_mult_df */
1062 COSTS_N_INSNS (23), /* fp_div_sf */
1063 COSTS_N_INSNS (36), /* fp_div_df */
1064 COSTS_N_INSNS (10), /* int_mult_si */
1065 COSTS_N_INSNS (10), /* int_mult_di */
1066 COSTS_N_INSNS (69), /* int_div_si */
1067 COSTS_N_INSNS (69), /* int_div_di */
1068 2, /* branch_cost */
1069 6 /* memory_latency */
1081 /* The only costs that appear to be updated here are
1082 integer multiplication. */
1084 COSTS_N_INSNS (4), /* int_mult_si */
1085 COSTS_N_INSNS (6), /* int_mult_di */
1086 COSTS_N_INSNS (69), /* int_div_si */
1087 COSTS_N_INSNS (69), /* int_div_di */
1088 1, /* branch_cost */
1089 4 /* memory_latency */
1101 COSTS_N_INSNS (6), /* fp_add */
1102 COSTS_N_INSNS (4), /* fp_mult_sf */
1103 COSTS_N_INSNS (5), /* fp_mult_df */
1104 COSTS_N_INSNS (23), /* fp_div_sf */
1105 COSTS_N_INSNS (36), /* fp_div_df */
1106 COSTS_N_INSNS (5), /* int_mult_si */
1107 COSTS_N_INSNS (5), /* int_mult_di */
1108 COSTS_N_INSNS (36), /* int_div_si */
1109 COSTS_N_INSNS (36), /* int_div_di */
1110 1, /* branch_cost */
1111 4 /* memory_latency */
1114 COSTS_N_INSNS (6), /* fp_add */
1115 COSTS_N_INSNS (5), /* fp_mult_sf */
1116 COSTS_N_INSNS (6), /* fp_mult_df */
1117 COSTS_N_INSNS (30), /* fp_div_sf */
1118 COSTS_N_INSNS (59), /* fp_div_df */
1119 COSTS_N_INSNS (3), /* int_mult_si */
1120 COSTS_N_INSNS (4), /* int_mult_di */
1121 COSTS_N_INSNS (42), /* int_div_si */
1122 COSTS_N_INSNS (74), /* int_div_di */
1123 1, /* branch_cost */
1124 4 /* memory_latency */
1127 COSTS_N_INSNS (6), /* fp_add */
1128 COSTS_N_INSNS (5), /* fp_mult_sf */
1129 COSTS_N_INSNS (6), /* fp_mult_df */
1130 COSTS_N_INSNS (30), /* fp_div_sf */
1131 COSTS_N_INSNS (59), /* fp_div_df */
1132 COSTS_N_INSNS (5), /* int_mult_si */
1133 COSTS_N_INSNS (9), /* int_mult_di */
1134 COSTS_N_INSNS (42), /* int_div_si */
1135 COSTS_N_INSNS (74), /* int_div_di */
1136 1, /* branch_cost */
1137 4 /* memory_latency */
1140 /* The only costs that are changed here are
1141 integer multiplication. */
1142 COSTS_N_INSNS (6), /* fp_add */
1143 COSTS_N_INSNS (7), /* fp_mult_sf */
1144 COSTS_N_INSNS (8), /* fp_mult_df */
1145 COSTS_N_INSNS (23), /* fp_div_sf */
1146 COSTS_N_INSNS (36), /* fp_div_df */
1147 COSTS_N_INSNS (5), /* int_mult_si */
1148 COSTS_N_INSNS (9), /* int_mult_di */
1149 COSTS_N_INSNS (69), /* int_div_si */
1150 COSTS_N_INSNS (69), /* int_div_di */
1151 1, /* branch_cost */
1152 4 /* memory_latency */
1158 /* The only costs that are changed here are
1159 integer multiplication. */
1160 COSTS_N_INSNS (6), /* fp_add */
1161 COSTS_N_INSNS (7), /* fp_mult_sf */
1162 COSTS_N_INSNS (8), /* fp_mult_df */
1163 COSTS_N_INSNS (23), /* fp_div_sf */
1164 COSTS_N_INSNS (36), /* fp_div_df */
1165 COSTS_N_INSNS (3), /* int_mult_si */
1166 COSTS_N_INSNS (8), /* int_mult_di */
1167 COSTS_N_INSNS (69), /* int_div_si */
1168 COSTS_N_INSNS (69), /* int_div_di */
1169 1, /* branch_cost */
1170 4 /* memory_latency */
1173 /* These costs are the same as the SB-1A below. */
1174 COSTS_N_INSNS (4), /* fp_add */
1175 COSTS_N_INSNS (4), /* fp_mult_sf */
1176 COSTS_N_INSNS (4), /* fp_mult_df */
1177 COSTS_N_INSNS (24), /* fp_div_sf */
1178 COSTS_N_INSNS (32), /* fp_div_df */
1179 COSTS_N_INSNS (3), /* int_mult_si */
1180 COSTS_N_INSNS (4), /* int_mult_di */
1181 COSTS_N_INSNS (36), /* int_div_si */
1182 COSTS_N_INSNS (68), /* int_div_di */
1183 1, /* branch_cost */
1184 4 /* memory_latency */
1187 /* These costs are the same as the SB-1 above. */
1188 COSTS_N_INSNS (4), /* fp_add */
1189 COSTS_N_INSNS (4), /* fp_mult_sf */
1190 COSTS_N_INSNS (4), /* fp_mult_df */
1191 COSTS_N_INSNS (24), /* fp_div_sf */
1192 COSTS_N_INSNS (32), /* fp_div_df */
1193 COSTS_N_INSNS (3), /* int_mult_si */
1194 COSTS_N_INSNS (4), /* int_mult_di */
1195 COSTS_N_INSNS (36), /* int_div_si */
1196 COSTS_N_INSNS (68), /* int_div_di */
1197 1, /* branch_cost */
1198 4 /* memory_latency */
1205 /* If a MIPS16e SAVE or RESTORE instruction saves or restores register
1206 mips16e_s2_s8_regs[X], it must also save the registers in indexes
1207 X + 1 onwards. Likewise mips16e_a0_a3_regs. */
1208 static const unsigned char mips16e_s2_s8_regs[] = {
1209 30, 23, 22, 21, 20, 19, 18
1211 static const unsigned char mips16e_a0_a3_regs[] = {
1215 /* A list of the registers that can be saved by the MIPS16e SAVE instruction,
1216 ordered from the uppermost in memory to the lowest in memory. */
1217 static const unsigned char mips16e_save_restore_regs[] = {
1218 31, 30, 23, 22, 21, 20, 19, 18, 17, 16, 7, 6, 5, 4
1221 /* Initialize the GCC target structure. */
1222 #undef TARGET_ASM_ALIGNED_HI_OP
1223 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
1224 #undef TARGET_ASM_ALIGNED_SI_OP
1225 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
1226 #undef TARGET_ASM_ALIGNED_DI_OP
1227 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
1229 #undef TARGET_ASM_FUNCTION_PROLOGUE
1230 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
1231 #undef TARGET_ASM_FUNCTION_EPILOGUE
1232 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
1233 #undef TARGET_ASM_SELECT_RTX_SECTION
1234 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
1235 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
1236 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
1238 #undef TARGET_SCHED_INIT
1239 #define TARGET_SCHED_INIT mips_sched_init
1240 #undef TARGET_SCHED_REORDER
1241 #define TARGET_SCHED_REORDER mips_sched_reorder
1242 #undef TARGET_SCHED_REORDER2
1243 #define TARGET_SCHED_REORDER2 mips_sched_reorder
1244 #undef TARGET_SCHED_VARIABLE_ISSUE
1245 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
1246 #undef TARGET_SCHED_ADJUST_COST
1247 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
1248 #undef TARGET_SCHED_ISSUE_RATE
1249 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
1250 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1251 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1252 mips_multipass_dfa_lookahead
1254 #undef TARGET_DEFAULT_TARGET_FLAGS
1255 #define TARGET_DEFAULT_TARGET_FLAGS \
1257 | TARGET_CPU_DEFAULT \
1258 | TARGET_ENDIAN_DEFAULT \
1259 | TARGET_FP_EXCEPTIONS_DEFAULT \
1260 | MASK_CHECK_ZERO_DIV \
1262 #undef TARGET_HANDLE_OPTION
1263 #define TARGET_HANDLE_OPTION mips_handle_option
1265 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1266 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
1268 #undef TARGET_SET_CURRENT_FUNCTION
1269 #define TARGET_SET_CURRENT_FUNCTION mips_set_current_function
1271 #undef TARGET_VALID_POINTER_MODE
1272 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
1273 #undef TARGET_RTX_COSTS
1274 #define TARGET_RTX_COSTS mips_rtx_costs
1275 #undef TARGET_ADDRESS_COST
1276 #define TARGET_ADDRESS_COST mips_address_cost
1278 #undef TARGET_IN_SMALL_DATA_P
1279 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
1281 #undef TARGET_MACHINE_DEPENDENT_REORG
1282 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
1284 #undef TARGET_ASM_FILE_START
1285 #define TARGET_ASM_FILE_START mips_file_start
1286 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
1287 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
1289 #undef TARGET_INIT_LIBFUNCS
1290 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
1292 #undef TARGET_BUILD_BUILTIN_VA_LIST
1293 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
1294 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1295 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
1297 #undef TARGET_PROMOTE_FUNCTION_ARGS
1298 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
1299 #undef TARGET_PROMOTE_FUNCTION_RETURN
1300 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
1301 #undef TARGET_PROMOTE_PROTOTYPES
1302 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
1304 #undef TARGET_RETURN_IN_MEMORY
1305 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
1306 #undef TARGET_RETURN_IN_MSB
1307 #define TARGET_RETURN_IN_MSB mips_return_in_msb
1309 #undef TARGET_ASM_OUTPUT_MI_THUNK
1310 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
1311 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1312 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1314 #undef TARGET_SETUP_INCOMING_VARARGS
1315 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
1316 #undef TARGET_STRICT_ARGUMENT_NAMING
1317 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
1318 #undef TARGET_MUST_PASS_IN_STACK
1319 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
1320 #undef TARGET_PASS_BY_REFERENCE
1321 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
1322 #undef TARGET_CALLEE_COPIES
1323 #define TARGET_CALLEE_COPIES mips_callee_copies
1324 #undef TARGET_ARG_PARTIAL_BYTES
1325 #define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
1327 #undef TARGET_MODE_REP_EXTENDED
1328 #define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
1330 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1331 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
1333 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1334 #define TARGET_SCALAR_MODE_SUPPORTED_P mips_scalar_mode_supported_p
1336 #undef TARGET_INIT_BUILTINS
1337 #define TARGET_INIT_BUILTINS mips_init_builtins
1338 #undef TARGET_EXPAND_BUILTIN
1339 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
1341 #undef TARGET_HAVE_TLS
1342 #define TARGET_HAVE_TLS HAVE_AS_TLS
1344 #undef TARGET_CANNOT_FORCE_CONST_MEM
1345 #define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
1347 #undef TARGET_ENCODE_SECTION_INFO
1348 #define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
1350 #undef TARGET_ATTRIBUTE_TABLE
1351 #define TARGET_ATTRIBUTE_TABLE mips_attribute_table
1353 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1354 #define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
1356 #undef TARGET_MIN_ANCHOR_OFFSET
1357 #define TARGET_MIN_ANCHOR_OFFSET -32768
1358 #undef TARGET_MAX_ANCHOR_OFFSET
1359 #define TARGET_MAX_ANCHOR_OFFSET 32767
1360 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1361 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
1362 #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
1363 #define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
1365 #undef TARGET_COMP_TYPE_ATTRIBUTES
1366 #define TARGET_COMP_TYPE_ATTRIBUTES mips_comp_type_attributes
1368 #ifdef HAVE_AS_DTPRELWORD
1369 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1370 #define TARGET_ASM_OUTPUT_DWARF_DTPREL mips_output_dwarf_dtprel
1373 struct gcc_target targetm = TARGET_INITIALIZER;
1376 /* Predicates to test for presence of "near" and "far"/"long_call"
1377 attributes on the given TYPE. */
1380 mips_near_type_p (const_tree type)
1382 return lookup_attribute ("near", TYPE_ATTRIBUTES (type)) != NULL;
1386 mips_far_type_p (const_tree type)
1388 return (lookup_attribute ("long_call", TYPE_ATTRIBUTES (type)) != NULL
1389 || lookup_attribute ("far", TYPE_ATTRIBUTES (type)) != NULL);
1392 /* Similar predicates for "mips16"/"nomips16" attributes. */
1395 mips_mips16_type_p (const_tree type)
1397 return lookup_attribute ("mips16", TYPE_ATTRIBUTES (type)) != NULL;
1401 mips_nomips16_type_p (const_tree type)
1403 return lookup_attribute ("nomips16", TYPE_ATTRIBUTES (type)) != NULL;
1406 /* Return 0 if the attributes for two types are incompatible, 1 if they
1407 are compatible, and 2 if they are nearly compatible (which causes a
1408 warning to be generated). */
1411 mips_comp_type_attributes (const_tree type1, const_tree type2)
1413 /* Check for mismatch of non-default calling convention. */
1414 if (TREE_CODE (type1) != FUNCTION_TYPE)
1417 /* Disallow mixed near/far attributes. */
1418 if (mips_far_type_p (type1) && mips_near_type_p (type2))
1420 if (mips_near_type_p (type1) && mips_far_type_p (type2))
1423 /* Mips16/nomips16 attributes must match exactly. */
1424 if (mips_nomips16_type_p (type1) != mips_nomips16_type_p (type2)
1425 || mips_mips16_type_p (type1) != mips_mips16_type_p (type2))
1431 /* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR
1432 and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */
1435 mips_split_plus (rtx x, rtx *base_ptr, HOST_WIDE_INT *offset_ptr)
1437 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
1439 *base_ptr = XEXP (x, 0);
1440 *offset_ptr = INTVAL (XEXP (x, 1));
1449 /* Return true if SYMBOL_REF X is associated with a global symbol
1450 (in the STB_GLOBAL sense). */
1453 mips_global_symbol_p (const_rtx x)
1455 const_tree const decl = SYMBOL_REF_DECL (x);
1458 return !SYMBOL_REF_LOCAL_P (x);
1460 /* Weakref symbols are not TREE_PUBLIC, but their targets are global
1461 or weak symbols. Relocations in the object file will be against
1462 the target symbol, so it's that symbol's binding that matters here. */
1463 return DECL_P (decl) && (TREE_PUBLIC (decl) || DECL_WEAK (decl));
1466 /* Return true if SYMBOL_REF X binds locally. */
1469 mips_symbol_binds_local_p (const_rtx x)
1471 return (SYMBOL_REF_DECL (x)
1472 ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
1473 : SYMBOL_REF_LOCAL_P (x));
1476 /* Return true if rtx constants of mode MODE should be put into a small
1480 mips_rtx_constant_in_small_data_p (enum machine_mode mode)
1482 return (!TARGET_EMBEDDED_DATA
1483 && TARGET_LOCAL_SDATA
1484 && GET_MODE_SIZE (mode) <= mips_section_threshold);
1487 /* Return the method that should be used to access SYMBOL_REF or
1488 LABEL_REF X in context CONTEXT. */
1490 static enum mips_symbol_type
1491 mips_classify_symbol (const_rtx x, enum mips_symbol_context context)
1494 return SYMBOL_GOT_DISP;
1496 if (GET_CODE (x) == LABEL_REF)
1498 /* LABEL_REFs are used for jump tables as well as text labels.
1499 Only return SYMBOL_PC_RELATIVE if we know the label is in
1500 the text section. */
1501 if (TARGET_MIPS16_SHORT_JUMP_TABLES)
1502 return SYMBOL_PC_RELATIVE;
1503 if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
1504 return SYMBOL_GOT_PAGE_OFST;
1505 return SYMBOL_ABSOLUTE;
1508 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1510 if (SYMBOL_REF_TLS_MODEL (x))
1513 if (CONSTANT_POOL_ADDRESS_P (x))
1515 if (TARGET_MIPS16_TEXT_LOADS)
1516 return SYMBOL_PC_RELATIVE;
1518 if (TARGET_MIPS16_PCREL_LOADS && context == SYMBOL_CONTEXT_MEM)
1519 return SYMBOL_PC_RELATIVE;
1521 if (mips_rtx_constant_in_small_data_p (get_pool_mode (x)))
1522 return SYMBOL_GP_RELATIVE;
1525 /* Do not use small-data accesses for weak symbols; they may end up
1528 && SYMBOL_REF_SMALL_P (x)
1529 && !SYMBOL_REF_WEAK (x))
1530 return SYMBOL_GP_RELATIVE;
1532 /* Don't use GOT accesses for locally-binding symbols when -mno-shared
1535 && !(TARGET_ABSOLUTE_ABICALLS && mips_symbol_binds_local_p (x)))
1537 /* There are three cases to consider:
1539 - o32 PIC (either with or without explicit relocs)
1540 - n32/n64 PIC without explicit relocs
1541 - n32/n64 PIC with explicit relocs
1543 In the first case, both local and global accesses will use an
1544 R_MIPS_GOT16 relocation. We must correctly predict which of
1545 the two semantics (local or global) the assembler and linker
1546 will apply. The choice depends on the symbol's binding rather
1547 than its visibility.
1549 In the second case, the assembler will not use R_MIPS_GOT16
1550 relocations, but it chooses between local and global accesses
1551 in the same way as for o32 PIC.
1553 In the third case we have more freedom since both forms of
1554 access will work for any kind of symbol. However, there seems
1555 little point in doing things differently. */
1556 if (mips_global_symbol_p (x))
1557 return SYMBOL_GOT_DISP;
1559 return SYMBOL_GOT_PAGE_OFST;
1562 if (TARGET_MIPS16_PCREL_LOADS && context != SYMBOL_CONTEXT_CALL)
1563 return SYMBOL_FORCE_TO_MEM;
1564 return SYMBOL_ABSOLUTE;
1567 /* Classify symbolic expression X, given that it appears in context
1570 static enum mips_symbol_type
1571 mips_classify_symbolic_expression (rtx x, enum mips_symbol_context context)
1575 split_const (x, &x, &offset);
1576 if (UNSPEC_ADDRESS_P (x))
1577 return UNSPEC_ADDRESS_TYPE (x);
1579 return mips_classify_symbol (x, context);
1582 /* Return true if OFFSET is within the range [0, ALIGN), where ALIGN
1583 is the alignment (in bytes) of SYMBOL_REF X. */
1586 mips_offset_within_alignment_p (rtx x, HOST_WIDE_INT offset)
1588 /* If for some reason we can't get the alignment for the
1589 symbol, initializing this to one means we will only accept
1591 HOST_WIDE_INT align = 1;
1594 /* Get the alignment of the symbol we're referring to. */
1595 t = SYMBOL_REF_DECL (x);
1597 align = DECL_ALIGN_UNIT (t);
1599 return offset >= 0 && offset < align;
1602 /* Return true if X is a symbolic constant that can be used in context
1603 CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */
1606 mips_symbolic_constant_p (rtx x, enum mips_symbol_context context,
1607 enum mips_symbol_type *symbol_type)
1611 split_const (x, &x, &offset);
1612 if (UNSPEC_ADDRESS_P (x))
1614 *symbol_type = UNSPEC_ADDRESS_TYPE (x);
1615 x = UNSPEC_ADDRESS (x);
1617 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1619 *symbol_type = mips_classify_symbol (x, context);
1620 if (*symbol_type == SYMBOL_TLS)
1626 if (offset == const0_rtx)
1629 /* Check whether a nonzero offset is valid for the underlying
1631 switch (*symbol_type)
1633 case SYMBOL_ABSOLUTE:
1634 case SYMBOL_FORCE_TO_MEM:
1635 case SYMBOL_32_HIGH:
1636 case SYMBOL_64_HIGH:
1639 /* If the target has 64-bit pointers and the object file only
1640 supports 32-bit symbols, the values of those symbols will be
1641 sign-extended. In this case we can't allow an arbitrary offset
1642 in case the 32-bit value X + OFFSET has a different sign from X. */
1643 if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
1644 return offset_within_block_p (x, INTVAL (offset));
1646 /* In other cases the relocations can handle any offset. */
1649 case SYMBOL_PC_RELATIVE:
1650 /* Allow constant pool references to be converted to LABEL+CONSTANT.
1651 In this case, we no longer have access to the underlying constant,
1652 but the original symbol-based access was known to be valid. */
1653 if (GET_CODE (x) == LABEL_REF)
1658 case SYMBOL_GP_RELATIVE:
1659 /* Make sure that the offset refers to something within the
1660 same object block. This should guarantee that the final
1661 PC- or GP-relative offset is within the 16-bit limit. */
1662 return offset_within_block_p (x, INTVAL (offset));
1664 case SYMBOL_GOT_PAGE_OFST:
1665 case SYMBOL_GOTOFF_PAGE:
1666 /* If the symbol is global, the GOT entry will contain the symbol's
1667 address, and we will apply a 16-bit offset after loading it.
1668 If the symbol is local, the linker should provide enough local
1669 GOT entries for a 16-bit offset, but larger offsets may lead
1671 return SMALL_INT (offset);
1675 /* There is no carry between the HI and LO REL relocations, so the
1676 offset is only valid if we know it won't lead to such a carry. */
1677 return mips_offset_within_alignment_p (x, INTVAL (offset));
1679 case SYMBOL_GOT_DISP:
1680 case SYMBOL_GOTOFF_DISP:
1681 case SYMBOL_GOTOFF_CALL:
1682 case SYMBOL_GOTOFF_LOADGP:
1685 case SYMBOL_GOTTPREL:
1694 /* This function is used to implement REG_MODE_OK_FOR_BASE_P. */
1697 mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode, int strict)
1699 if (!HARD_REGISTER_NUM_P (regno))
1703 regno = reg_renumber[regno];
1706 /* These fake registers will be eliminated to either the stack or
1707 hard frame pointer, both of which are usually valid base registers.
1708 Reload deals with the cases where the eliminated form isn't valid. */
1709 if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
1712 /* In mips16 mode, the stack pointer can only address word and doubleword
1713 values, nothing smaller. There are two problems here:
1715 (a) Instantiating virtual registers can introduce new uses of the
1716 stack pointer. If these virtual registers are valid addresses,
1717 the stack pointer should be too.
1719 (b) Most uses of the stack pointer are not made explicit until
1720 FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
1721 We don't know until that stage whether we'll be eliminating to the
1722 stack pointer (which needs the restriction) or the hard frame
1723 pointer (which doesn't).
1725 All in all, it seems more consistent to only enforce this restriction
1726 during and after reload. */
1727 if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
1728 return !strict || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1730 return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
1734 /* Return true if X is a valid base register for the given mode.
1735 Allow only hard registers if STRICT. */
1738 mips_valid_base_register_p (rtx x, enum machine_mode mode, int strict)
1740 if (!strict && GET_CODE (x) == SUBREG)
1744 && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict));
1748 /* Return true if X is a valid address for machine mode MODE. If it is,
1749 fill in INFO appropriately. STRICT is true if we should only accept
1750 hard base registers. */
1753 mips_classify_address (struct mips_address_info *info, rtx x,
1754 enum machine_mode mode, int strict)
1756 switch (GET_CODE (x))
1760 info->type = ADDRESS_REG;
1762 info->offset = const0_rtx;
1763 return mips_valid_base_register_p (info->reg, mode, strict);
1766 info->type = ADDRESS_REG;
1767 info->reg = XEXP (x, 0);
1768 info->offset = XEXP (x, 1);
1769 return (mips_valid_base_register_p (info->reg, mode, strict)
1770 && const_arith_operand (info->offset, VOIDmode));
1773 info->type = ADDRESS_LO_SUM;
1774 info->reg = XEXP (x, 0);
1775 info->offset = XEXP (x, 1);
1776 /* We have to trust the creator of the LO_SUM to do something vaguely
1777 sane. Target-independent code that creates a LO_SUM should also
1778 create and verify the matching HIGH. Target-independent code that
1779 adds an offset to a LO_SUM must prove that the offset will not
1780 induce a carry. Failure to do either of these things would be
1781 a bug, and we are not required to check for it here. The MIPS
1782 backend itself should only create LO_SUMs for valid symbolic
1783 constants, with the high part being either a HIGH or a copy
1786 = mips_classify_symbolic_expression (info->offset, SYMBOL_CONTEXT_MEM);
1787 return (mips_valid_base_register_p (info->reg, mode, strict)
1788 && mips_symbol_insns (info->symbol_type, mode) > 0
1789 && mips_lo_relocs[info->symbol_type] != 0);
1792 /* Small-integer addresses don't occur very often, but they
1793 are legitimate if $0 is a valid base register. */
1794 info->type = ADDRESS_CONST_INT;
1795 return !TARGET_MIPS16 && SMALL_INT (x);
1800 info->type = ADDRESS_SYMBOLIC;
1801 return (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_MEM,
1803 && mips_symbol_insns (info->symbol_type, mode) > 0
1804 && !mips_split_p[info->symbol_type]);
1811 /* Return true if X is a thread-local symbol. */
1814 mips_tls_operand_p (rtx x)
1816 return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
1819 /* Return true if X can not be forced into a constant pool. */
1822 mips_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1824 return mips_tls_operand_p (*x);
1827 /* Return true if X can not be forced into a constant pool. */
1830 mips_cannot_force_const_mem (rtx x)
1836 /* As an optimization, reject constants that mips_legitimize_move
1839 Suppose we have a multi-instruction sequence that loads constant C
1840 into register R. If R does not get allocated a hard register, and
1841 R is used in an operand that allows both registers and memory
1842 references, reload will consider forcing C into memory and using
1843 one of the instruction's memory alternatives. Returning false
1844 here will force it to use an input reload instead. */
1845 if (GET_CODE (x) == CONST_INT)
1848 split_const (x, &base, &offset);
1849 if (symbolic_operand (base, VOIDmode) && SMALL_INT (offset))
1853 if (TARGET_HAVE_TLS && for_each_rtx (&x, &mips_tls_symbol_ref_1, 0))
1859 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. We can't use blocks for
1860 constants when we're using a per-function constant pool. */
1863 mips_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
1864 const_rtx x ATTRIBUTE_UNUSED)
1866 return !TARGET_MIPS16_PCREL_LOADS;
1869 /* Like mips_symbol_insns, but treat extended MIPS16 instructions as a
1870 single instruction. We rely on the fact that, in the worst case,
1871 all instructions involved in a MIPS16 address calculation are usually
1875 mips_symbol_insns_1 (enum mips_symbol_type type, enum machine_mode mode)
1879 case SYMBOL_ABSOLUTE:
1880 /* When using 64-bit symbols, we need 5 preparatory instructions,
1883 lui $at,%highest(symbol)
1884 daddiu $at,$at,%higher(symbol)
1886 daddiu $at,$at,%hi(symbol)
1889 The final address is then $at + %lo(symbol). With 32-bit
1890 symbols we just need a preparatory lui for normal mode and
1891 a preparatory "li; sll" for MIPS16. */
1892 return ABI_HAS_64BIT_SYMBOLS ? 6 : TARGET_MIPS16 ? 3 : 2;
1894 case SYMBOL_GP_RELATIVE:
1895 /* Treat GP-relative accesses as taking a single instruction on
1896 MIPS16 too; the copy of $gp can often be shared. */
1899 case SYMBOL_PC_RELATIVE:
1900 /* PC-relative constants can be only be used with addiupc,
1902 if (mode == MAX_MACHINE_MODE
1903 || GET_MODE_SIZE (mode) == 4
1904 || GET_MODE_SIZE (mode) == 8)
1907 /* The constant must be loaded using addiupc first. */
1910 case SYMBOL_FORCE_TO_MEM:
1911 /* The constant must be loaded from the constant pool. */
1914 case SYMBOL_GOT_DISP:
1915 /* The constant will have to be loaded from the GOT before it
1916 is used in an address. */
1917 if (mode != MAX_MACHINE_MODE)
1922 case SYMBOL_GOT_PAGE_OFST:
1923 /* Unless -funit-at-a-time is in effect, we can't be sure whether
1924 the local/global classification is accurate. See override_options
1927 The worst cases are:
1929 (1) For local symbols when generating o32 or o64 code. The assembler
1935 ...and the final address will be $at + %lo(symbol).
1937 (2) For global symbols when -mxgot. The assembler will use:
1939 lui $at,%got_hi(symbol)
1942 ...and the final address will be $at + %got_lo(symbol). */
1945 case SYMBOL_GOTOFF_PAGE:
1946 case SYMBOL_GOTOFF_DISP:
1947 case SYMBOL_GOTOFF_CALL:
1948 case SYMBOL_GOTOFF_LOADGP:
1949 case SYMBOL_32_HIGH:
1950 case SYMBOL_64_HIGH:
1956 case SYMBOL_GOTTPREL:
1959 /* A 16-bit constant formed by a single relocation, or a 32-bit
1960 constant formed from a high 16-bit relocation and a low 16-bit
1961 relocation. Use mips_split_p to determine which. */
1962 return !mips_split_p[type] ? 1 : TARGET_MIPS16 ? 3 : 2;
1965 /* We don't treat a bare TLS symbol as a constant. */
1971 /* If MODE is MAX_MACHINE_MODE, return the number of instructions needed
1972 to load symbols of type TYPE into a register. Return 0 if the given
1973 type of symbol cannot be used as an immediate operand.
1975 Otherwise, return the number of instructions needed to load or store
1976 values of mode MODE to or from addresses of type TYPE. Return 0 if
1977 the given type of symbol is not valid in addresses.
1979 In both cases, treat extended MIPS16 instructions as two instructions. */
1982 mips_symbol_insns (enum mips_symbol_type type, enum machine_mode mode)
1984 return mips_symbol_insns_1 (type, mode) * (TARGET_MIPS16 ? 2 : 1);
1987 /* Return true if X is a legitimate $sp-based address for mode MDOE. */
1990 mips_stack_address_p (rtx x, enum machine_mode mode)
1992 struct mips_address_info addr;
1994 return (mips_classify_address (&addr, x, mode, false)
1995 && addr.type == ADDRESS_REG
1996 && addr.reg == stack_pointer_rtx);
1999 /* Return true if a value at OFFSET bytes from BASE can be accessed
2000 using an unextended mips16 instruction. MODE is the mode of the
2003 Usually the offset in an unextended instruction is a 5-bit field.
2004 The offset is unsigned and shifted left once for HIs, twice
2005 for SIs, and so on. An exception is SImode accesses off the
2006 stack pointer, which have an 8-bit immediate field. */
2009 mips16_unextended_reference_p (enum machine_mode mode, rtx base, rtx offset)
2012 && GET_CODE (offset) == CONST_INT
2013 && INTVAL (offset) >= 0
2014 && (INTVAL (offset) & (GET_MODE_SIZE (mode) - 1)) == 0)
2016 if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
2017 return INTVAL (offset) < 256 * GET_MODE_SIZE (mode);
2018 return INTVAL (offset) < 32 * GET_MODE_SIZE (mode);
2024 /* Return the number of instructions needed to load or store a value
2025 of mode MODE at X. Return 0 if X isn't valid for MODE. Assume that
2026 multiword moves may need to be split into word moves if MIGHT_SPLIT_P,
2027 otherwise assume that a single load or store is enough.
2029 For mips16 code, count extended instructions as two instructions. */
2032 mips_address_insns (rtx x, enum machine_mode mode, bool might_split_p)
2034 struct mips_address_info addr;
2037 /* BLKmode is used for single unaligned loads and stores and should
2038 not count as a multiword mode. (GET_MODE_SIZE (BLKmode) is pretty
2039 meaningless, so we have to single it out as a special case one way
2041 if (mode != BLKmode && might_split_p)
2042 factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2046 if (mips_classify_address (&addr, x, mode, false))
2051 && !mips16_unextended_reference_p (mode, addr.reg, addr.offset))
2055 case ADDRESS_LO_SUM:
2056 return (TARGET_MIPS16 ? factor * 2 : factor);
2058 case ADDRESS_CONST_INT:
2061 case ADDRESS_SYMBOLIC:
2062 return factor * mips_symbol_insns (addr.symbol_type, mode);
2068 /* Likewise for constant X. */
2071 mips_const_insns (rtx x)
2073 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2074 enum mips_symbol_type symbol_type;
2077 switch (GET_CODE (x))
2080 if (!mips_symbolic_constant_p (XEXP (x, 0), SYMBOL_CONTEXT_LEA,
2082 || !mips_split_p[symbol_type])
2085 /* This is simply an lui for normal mode. It is an extended
2086 "li" followed by an extended "sll" for MIPS16. */
2087 return TARGET_MIPS16 ? 4 : 1;
2091 /* Unsigned 8-bit constants can be loaded using an unextended
2092 LI instruction. Unsigned 16-bit constants can be loaded
2093 using an extended LI. Negative constants must be loaded
2094 using LI and then negated. */
2095 return (INTVAL (x) >= 0 && INTVAL (x) < 256 ? 1
2096 : SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
2097 : INTVAL (x) > -256 && INTVAL (x) < 0 ? 2
2098 : SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
2101 return mips_build_integer (codes, INTVAL (x));
2105 return (!TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0);
2111 /* See if we can refer to X directly. */
2112 if (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_LEA, &symbol_type))
2113 return mips_symbol_insns (symbol_type, MAX_MACHINE_MODE);
2115 /* Otherwise try splitting the constant into a base and offset.
2116 16-bit offsets can be added using an extra addiu. Larger offsets
2117 must be calculated separately and then added to the base. */
2118 split_const (x, &x, &offset);
2121 int n = mips_const_insns (x);
2124 if (SMALL_INT (offset))
2127 return n + 1 + mips_build_integer (codes, INTVAL (offset));
2134 return mips_symbol_insns (mips_classify_symbol (x, SYMBOL_CONTEXT_LEA),
2143 /* Return the number of instructions needed to implement INSN,
2144 given that it loads from or stores to MEM. Count extended
2145 mips16 instructions as two instructions. */
2148 mips_load_store_insns (rtx mem, rtx insn)
2150 enum machine_mode mode;
2154 gcc_assert (MEM_P (mem));
2155 mode = GET_MODE (mem);
2157 /* Try to prove that INSN does not need to be split. */
2158 might_split_p = true;
2159 if (GET_MODE_BITSIZE (mode) == 64)
2161 set = single_set (insn);
2162 if (set && !mips_split_64bit_move_p (SET_DEST (set), SET_SRC (set)))
2163 might_split_p = false;
2166 return mips_address_insns (XEXP (mem, 0), mode, might_split_p);
2170 /* Return the number of instructions needed for an integer division. */
2173 mips_idiv_insns (void)
2178 if (TARGET_CHECK_ZERO_DIV)
2180 if (GENERATE_DIVIDE_TRAPS)
2186 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
2191 /* This function is used to implement GO_IF_LEGITIMATE_ADDRESS. It
2192 returns a nonzero value if X is a legitimate address for a memory
2193 operand of the indicated MODE. STRICT is nonzero if this function
2194 is called during reload. */
2197 mips_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
2199 struct mips_address_info addr;
2201 return mips_classify_address (&addr, x, mode, strict);
2204 /* Emit a move from SRC to DEST. Assume that the move expanders can
2205 handle all moves if !can_create_pseudo_p (). The distinction is
2206 important because, unlike emit_move_insn, the move expanders know
2207 how to force Pmode objects into the constant pool even when the
2208 constant pool address is not itself legitimate. */
2211 mips_emit_move (rtx dest, rtx src)
2213 return (can_create_pseudo_p ()
2214 ? emit_move_insn (dest, src)
2215 : emit_move_insn_1 (dest, src));
2218 /* Copy VALUE to a register and return that register. If new psuedos
2219 are allowed, copy it into a new register, otherwise use DEST. */
2222 mips_force_temporary (rtx dest, rtx value)
2224 if (can_create_pseudo_p ())
2225 return force_reg (Pmode, value);
2228 mips_emit_move (copy_rtx (dest), value);
2234 /* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
2235 it appears in a MEM of that mode. Return true if ADDR is a legitimate
2236 constant in that context and can be split into a high part and a LO_SUM.
2237 If so, and if LO_SUM_OUT is nonnull, emit the high part and return
2238 the LO_SUM in *LO_SUM_OUT. Leave *LO_SUM_OUT unchanged otherwise.
2240 TEMP is as for mips_force_temporary and is used to load the high
2241 part into a register. */
2244 mips_split_symbol (rtx temp, rtx addr, enum machine_mode mode, rtx *lo_sum_out)
2246 enum mips_symbol_context context;
2247 enum mips_symbol_type symbol_type;
2250 context = (mode == MAX_MACHINE_MODE
2251 ? SYMBOL_CONTEXT_LEA
2252 : SYMBOL_CONTEXT_MEM);
2253 if (!mips_symbolic_constant_p (addr, context, &symbol_type)
2254 || mips_symbol_insns (symbol_type, mode) == 0
2255 || !mips_split_p[symbol_type])
2260 if (symbol_type == SYMBOL_GP_RELATIVE)
2262 if (!can_create_pseudo_p ())
2264 emit_insn (gen_load_const_gp (copy_rtx (temp)));
2268 high = mips16_gp_pseudo_reg ();
2272 high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
2273 high = mips_force_temporary (temp, high);
2275 *lo_sum_out = gen_rtx_LO_SUM (Pmode, high, addr);
2281 /* Wrap symbol or label BASE in an unspec address of type SYMBOL_TYPE
2282 and add CONST_INT OFFSET to the result. */
2285 mips_unspec_address_offset (rtx base, rtx offset,
2286 enum mips_symbol_type symbol_type)
2288 base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
2289 UNSPEC_ADDRESS_FIRST + symbol_type);
2290 if (offset != const0_rtx)
2291 base = gen_rtx_PLUS (Pmode, base, offset);
2292 return gen_rtx_CONST (Pmode, base);
2295 /* Return an UNSPEC address with underlying address ADDRESS and symbol
2296 type SYMBOL_TYPE. */
2299 mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
2303 split_const (address, &base, &offset);
2304 return mips_unspec_address_offset (base, offset, symbol_type);
2308 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
2309 high part to BASE and return the result. Just return BASE otherwise.
2310 TEMP is available as a temporary register if needed.
2312 The returned expression can be used as the first operand to a LO_SUM. */
2315 mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
2316 enum mips_symbol_type symbol_type)
2318 if (mips_split_p[symbol_type])
2320 addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
2321 addr = mips_force_temporary (temp, addr);
2322 return mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
2328 /* Return a legitimate address for REG + OFFSET. TEMP is as for
2329 mips_force_temporary; it is only needed when OFFSET is not a
2333 mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
2335 if (!SMALL_OPERAND (offset))
2340 /* Load the full offset into a register so that we can use
2341 an unextended instruction for the address itself. */
2342 high = GEN_INT (offset);
2347 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. */
2348 high = GEN_INT (CONST_HIGH_PART (offset));
2349 offset = CONST_LOW_PART (offset);
2351 high = mips_force_temporary (temp, high);
2352 reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
2354 return plus_constant (reg, offset);
2357 /* Emit a call to __tls_get_addr. SYM is the TLS symbol we are
2358 referencing, and TYPE is the symbol type to use (either global
2359 dynamic or local dynamic). V0 is an RTX for the return value
2360 location. The entire insn sequence is returned. */
2362 static GTY(()) rtx mips_tls_symbol;
2365 mips_call_tls_get_addr (rtx sym, enum mips_symbol_type type, rtx v0)
2367 rtx insn, loc, tga, a0;
2369 a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
2371 if (!mips_tls_symbol)
2372 mips_tls_symbol = init_one_libfunc ("__tls_get_addr");
2374 loc = mips_unspec_address (sym, type);
2378 emit_insn (gen_rtx_SET (Pmode, a0,
2379 gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, loc)));
2380 tga = gen_rtx_MEM (Pmode, mips_tls_symbol);
2381 insn = emit_call_insn (gen_call_value (v0, tga, const0_rtx, const0_rtx));
2382 CONST_OR_PURE_CALL_P (insn) = 1;
2383 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), v0);
2384 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
2385 insn = get_insns ();
2392 /* Generate the code to access LOC, a thread local SYMBOL_REF. The
2393 return value will be a valid address and move_operand (either a REG
2397 mips_legitimize_tls_address (rtx loc)
2399 rtx dest, insn, v0, v1, tmp1, tmp2, eqv;
2400 enum tls_model model;
2404 sorry ("MIPS16 TLS");
2405 return gen_reg_rtx (Pmode);
2408 v0 = gen_rtx_REG (Pmode, GP_RETURN);
2409 v1 = gen_rtx_REG (Pmode, GP_RETURN + 1);
2411 model = SYMBOL_REF_TLS_MODEL (loc);
2412 /* Only TARGET_ABICALLS code can have more than one module; other
2413 code must be be static and should not use a GOT. All TLS models
2414 reduce to local exec in this situation. */
2415 if (!TARGET_ABICALLS)
2416 model = TLS_MODEL_LOCAL_EXEC;
2420 case TLS_MODEL_GLOBAL_DYNAMIC:
2421 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSGD, v0);
2422 dest = gen_reg_rtx (Pmode);
2423 emit_libcall_block (insn, dest, v0, loc);
2426 case TLS_MODEL_LOCAL_DYNAMIC:
2427 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSLDM, v0);
2428 tmp1 = gen_reg_rtx (Pmode);
2430 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2431 share the LDM result with other LD model accesses. */
2432 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2434 emit_libcall_block (insn, tmp1, v0, eqv);
2436 tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_DTPREL);
2437 dest = gen_rtx_LO_SUM (Pmode, tmp2,
2438 mips_unspec_address (loc, SYMBOL_DTPREL));
2441 case TLS_MODEL_INITIAL_EXEC:
2442 tmp1 = gen_reg_rtx (Pmode);
2443 tmp2 = mips_unspec_address (loc, SYMBOL_GOTTPREL);
2444 if (Pmode == DImode)
2446 emit_insn (gen_tls_get_tp_di (v1));
2447 emit_insn (gen_load_gotdi (tmp1, pic_offset_table_rtx, tmp2));
2451 emit_insn (gen_tls_get_tp_si (v1));
2452 emit_insn (gen_load_gotsi (tmp1, pic_offset_table_rtx, tmp2));
2454 dest = gen_reg_rtx (Pmode);
2455 emit_insn (gen_add3_insn (dest, tmp1, v1));
2458 case TLS_MODEL_LOCAL_EXEC:
2459 if (Pmode == DImode)
2460 emit_insn (gen_tls_get_tp_di (v1));
2462 emit_insn (gen_tls_get_tp_si (v1));
2464 tmp1 = mips_unspec_offset_high (NULL, v1, loc, SYMBOL_TPREL);
2465 dest = gen_rtx_LO_SUM (Pmode, tmp1,
2466 mips_unspec_address (loc, SYMBOL_TPREL));
2476 /* This function is used to implement LEGITIMIZE_ADDRESS. If *XLOC can
2477 be legitimized in a way that the generic machinery might not expect,
2478 put the new address in *XLOC and return true. MODE is the mode of
2479 the memory being accessed. */
2482 mips_legitimize_address (rtx *xloc, enum machine_mode mode)
2484 if (mips_tls_operand_p (*xloc))
2486 *xloc = mips_legitimize_tls_address (*xloc);
2490 /* See if the address can split into a high part and a LO_SUM. */
2491 if (mips_split_symbol (NULL, *xloc, mode, xloc))
2494 if (GET_CODE (*xloc) == PLUS && GET_CODE (XEXP (*xloc, 1)) == CONST_INT)
2496 /* Handle REG + CONSTANT using mips_add_offset. */
2499 reg = XEXP (*xloc, 0);
2500 if (!mips_valid_base_register_p (reg, mode, 0))
2501 reg = copy_to_mode_reg (Pmode, reg);
2502 *xloc = mips_add_offset (0, reg, INTVAL (XEXP (*xloc, 1)));
2510 /* Subroutine of mips_build_integer (with the same interface).
2511 Assume that the final action in the sequence should be a left shift. */
2514 mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
2516 unsigned int i, shift;
2518 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
2519 since signed numbers are easier to load than unsigned ones. */
2521 while ((value & 1) == 0)
2522 value /= 2, shift++;
2524 i = mips_build_integer (codes, value);
2525 codes[i].code = ASHIFT;
2526 codes[i].value = shift;
2531 /* As for mips_build_shift, but assume that the final action will be
2532 an IOR or PLUS operation. */
2535 mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
2537 unsigned HOST_WIDE_INT high;
2540 high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
2541 if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
2543 /* The constant is too complex to load with a simple lui/ori pair
2544 so our goal is to clear as many trailing zeros as possible.
2545 In this case, we know bit 16 is set and that the low 16 bits
2546 form a negative number. If we subtract that number from VALUE,
2547 we will clear at least the lowest 17 bits, maybe more. */
2548 i = mips_build_integer (codes, CONST_HIGH_PART (value));
2549 codes[i].code = PLUS;
2550 codes[i].value = CONST_LOW_PART (value);
2554 i = mips_build_integer (codes, high);
2555 codes[i].code = IOR;
2556 codes[i].value = value & 0xffff;
2562 /* Fill CODES with a sequence of rtl operations to load VALUE.
2563 Return the number of operations needed. */
2566 mips_build_integer (struct mips_integer_op *codes,
2567 unsigned HOST_WIDE_INT value)
2569 if (SMALL_OPERAND (value)
2570 || SMALL_OPERAND_UNSIGNED (value)
2571 || LUI_OPERAND (value))
2573 /* The value can be loaded with a single instruction. */
2574 codes[0].code = UNKNOWN;
2575 codes[0].value = value;
2578 else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
2580 /* Either the constant is a simple LUI/ORI combination or its
2581 lowest bit is set. We don't want to shift in this case. */
2582 return mips_build_lower (codes, value);
2584 else if ((value & 0xffff) == 0)
2586 /* The constant will need at least three actions. The lowest
2587 16 bits are clear, so the final action will be a shift. */
2588 return mips_build_shift (codes, value);
2592 /* The final action could be a shift, add or inclusive OR.
2593 Rather than use a complex condition to select the best
2594 approach, try both mips_build_shift and mips_build_lower
2595 and pick the one that gives the shortest sequence.
2596 Note that this case is only used once per constant. */
2597 struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
2598 unsigned int cost, alt_cost;
2600 cost = mips_build_shift (codes, value);
2601 alt_cost = mips_build_lower (alt_codes, value);
2602 if (alt_cost < cost)
2604 memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
2612 /* Load VALUE into DEST, using TEMP as a temporary register if need be. */
2615 mips_move_integer (rtx dest, rtx temp, unsigned HOST_WIDE_INT value)
2617 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2618 enum machine_mode mode;
2619 unsigned int i, cost;
2622 mode = GET_MODE (dest);
2623 cost = mips_build_integer (codes, value);
2625 /* Apply each binary operation to X. Invariant: X is a legitimate
2626 source operand for a SET pattern. */
2627 x = GEN_INT (codes[0].value);
2628 for (i = 1; i < cost; i++)
2630 if (!can_create_pseudo_p ())
2632 emit_insn (gen_rtx_SET (VOIDmode, temp, x));
2636 x = force_reg (mode, x);
2637 x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
2640 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
2644 /* Subroutine of mips_legitimize_move. Move constant SRC into register
2645 DEST given that SRC satisfies immediate_operand but doesn't satisfy
2649 mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
2653 /* Split moves of big integers into smaller pieces. */
2654 if (splittable_const_int_operand (src, mode))
2656 mips_move_integer (dest, dest, INTVAL (src));
2660 /* Split moves of symbolic constants into high/low pairs. */
2661 if (mips_split_symbol (dest, src, MAX_MACHINE_MODE, &src))
2663 emit_insn (gen_rtx_SET (VOIDmode, dest, src));
2667 if (mips_tls_operand_p (src))
2669 mips_emit_move (dest, mips_legitimize_tls_address (src));
2673 /* If we have (const (plus symbol offset)), and that expression cannot
2674 be forced into memory, load the symbol first and add in the offset.
2675 In non-MIPS16 mode, prefer to do this even if the constant _can_ be
2676 forced into memory, as it usually produces better code. */
2677 split_const (src, &base, &offset);
2678 if (offset != const0_rtx
2679 && (targetm.cannot_force_const_mem (src)
2680 || (!TARGET_MIPS16 && can_create_pseudo_p ())))
2682 base = mips_force_temporary (dest, base);
2683 mips_emit_move (dest, mips_add_offset (0, base, INTVAL (offset)));
2687 src = force_const_mem (mode, src);
2689 /* When using explicit relocs, constant pool references are sometimes
2690 not legitimate addresses. */
2691 mips_split_symbol (dest, XEXP (src, 0), mode, &XEXP (src, 0));
2692 mips_emit_move (dest, src);
2696 /* If (set DEST SRC) is not a valid instruction, emit an equivalent
2697 sequence that is valid. */
2700 mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
2702 if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
2704 mips_emit_move (dest, force_reg (mode, src));
2708 /* Check for individual, fully-reloaded mflo and mfhi instructions. */
2709 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
2710 && REG_P (src) && MD_REG_P (REGNO (src))
2711 && REG_P (dest) && GP_REG_P (REGNO (dest)))
2713 int other_regno = REGNO (src) == HI_REGNUM ? LO_REGNUM : HI_REGNUM;
2714 if (GET_MODE_SIZE (mode) <= 4)
2715 emit_insn (gen_mfhilo_si (gen_rtx_REG (SImode, REGNO (dest)),
2716 gen_rtx_REG (SImode, REGNO (src)),
2717 gen_rtx_REG (SImode, other_regno)));
2719 emit_insn (gen_mfhilo_di (gen_rtx_REG (DImode, REGNO (dest)),
2720 gen_rtx_REG (DImode, REGNO (src)),
2721 gen_rtx_REG (DImode, other_regno)));
2725 /* We need to deal with constants that would be legitimate
2726 immediate_operands but not legitimate move_operands. */
2727 if (CONSTANT_P (src) && !move_operand (src, mode))
2729 mips_legitimize_const_move (mode, dest, src);
2730 set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
2736 /* We need a lot of little routines to check constant values on the
2737 mips16. These are used to figure out how long the instruction will
2738 be. It would be much better to do this using constraints, but
2739 there aren't nearly enough letters available. */
2742 m16_check_op (rtx op, int low, int high, int mask)
2744 return (GET_CODE (op) == CONST_INT
2745 && INTVAL (op) >= low
2746 && INTVAL (op) <= high
2747 && (INTVAL (op) & mask) == 0);
2751 m16_uimm3_b (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2753 return m16_check_op (op, 0x1, 0x8, 0);
2757 m16_simm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2759 return m16_check_op (op, - 0x8, 0x7, 0);
2763 m16_nsimm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2765 return m16_check_op (op, - 0x7, 0x8, 0);
2769 m16_simm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2771 return m16_check_op (op, - 0x10, 0xf, 0);
2775 m16_nsimm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2777 return m16_check_op (op, - 0xf, 0x10, 0);
2781 m16_uimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2783 return m16_check_op (op, (- 0x10) << 2, 0xf << 2, 3);
2787 m16_nuimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2789 return m16_check_op (op, (- 0xf) << 2, 0x10 << 2, 3);
2793 m16_simm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2795 return m16_check_op (op, - 0x80, 0x7f, 0);
2799 m16_nsimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2801 return m16_check_op (op, - 0x7f, 0x80, 0);
2805 m16_uimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2807 return m16_check_op (op, 0x0, 0xff, 0);
2811 m16_nuimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2813 return m16_check_op (op, - 0xff, 0x0, 0);
2817 m16_uimm8_m1_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2819 return m16_check_op (op, - 0x1, 0xfe, 0);
2823 m16_uimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2825 return m16_check_op (op, 0x0, 0xff << 2, 3);
2829 m16_nuimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2831 return m16_check_op (op, (- 0xff) << 2, 0x0, 3);
2835 m16_simm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2837 return m16_check_op (op, (- 0x80) << 3, 0x7f << 3, 7);
2841 m16_nsimm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2843 return m16_check_op (op, (- 0x7f) << 3, 0x80 << 3, 7);
2846 /* Return true if ADDR matches the pattern for the lwxs load scaled indexed
2847 address instruction. */
2850 mips_lwxs_address_p (rtx addr)
2853 && GET_CODE (addr) == PLUS
2854 && REG_P (XEXP (addr, 1)))
2856 rtx offset = XEXP (addr, 0);
2857 if (GET_CODE (offset) == MULT
2858 && REG_P (XEXP (offset, 0))
2859 && GET_CODE (XEXP (offset, 1)) == CONST_INT
2860 && INTVAL (XEXP (offset, 1)) == 4)
2866 /* The cost of loading values from the constant pool. It should be
2867 larger than the cost of any constant we want to synthesize inline. */
2869 #define CONSTANT_POOL_COST COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 8)
2871 /* Return the cost of X when used as an operand to the MIPS16 instruction
2872 that implements CODE. Return -1 if there is no such instruction, or if
2873 X is not a valid immediate operand for it. */
2876 mips16_constant_cost (int code, HOST_WIDE_INT x)
2883 /* Shifts by between 1 and 8 bits (inclusive) are unextended,
2884 other shifts are extended. The shift patterns truncate the shift
2885 count to the right size, so there are no out-of-range values. */
2886 if (IN_RANGE (x, 1, 8))
2888 return COSTS_N_INSNS (1);
2891 if (IN_RANGE (x, -128, 127))
2893 if (SMALL_OPERAND (x))
2894 return COSTS_N_INSNS (1);
2898 /* Like LE, but reject the always-true case. */
2902 /* We add 1 to the immediate and use SLT. */
2905 /* We can use CMPI for an xor with an unsigned 16-bit X. */
2908 if (IN_RANGE (x, 0, 255))
2910 if (SMALL_OPERAND_UNSIGNED (x))
2911 return COSTS_N_INSNS (1);
2916 /* Equality comparisons with 0 are cheap. */
2926 /* Return true if there is a non-MIPS16 instruction that implements CODE
2927 and if that instruction accepts X as an immediate operand. */
2930 mips_immediate_operand_p (int code, HOST_WIDE_INT x)
2937 /* All shift counts are truncated to a valid constant. */
2942 /* Likewise rotates, if the target supports rotates at all. */
2948 /* These instructions take 16-bit unsigned immediates. */
2949 return SMALL_OPERAND_UNSIGNED (x);
2954 /* These instructions take 16-bit signed immediates. */
2955 return SMALL_OPERAND (x);
2961 /* The "immediate" forms of these instructions are really
2962 implemented as comparisons with register 0. */
2967 /* Likewise, meaning that the only valid immediate operand is 1. */
2971 /* We add 1 to the immediate and use SLT. */
2972 return SMALL_OPERAND (x + 1);
2975 /* Likewise SLTU, but reject the always-true case. */
2976 return SMALL_OPERAND (x + 1) && x + 1 != 0;
2980 /* The bit position and size are immediate operands. */
2981 return ISA_HAS_EXT_INS;
2984 /* By default assume that $0 can be used for 0. */
2989 /* Return the cost of binary operation X, given that the instruction
2990 sequence for a word-sized or smaller operation has cost SINGLE_COST
2991 and that the sequence of a double-word operation has cost DOUBLE_COST. */
2994 mips_binary_cost (rtx x, int single_cost, int double_cost)
2998 if (GET_MODE_SIZE (GET_MODE (x)) == UNITS_PER_WORD * 2)
3003 + rtx_cost (XEXP (x, 0), 0)
3004 + rtx_cost (XEXP (x, 1), GET_CODE (x)));
3007 /* Return the cost of floating-point multiplications of mode MODE. */
3010 mips_fp_mult_cost (enum machine_mode mode)
3012 return mode == DFmode ? mips_cost->fp_mult_df : mips_cost->fp_mult_sf;
3015 /* Return the cost of floating-point divisions of mode MODE. */
3018 mips_fp_div_cost (enum machine_mode mode)
3020 return mode == DFmode ? mips_cost->fp_div_df : mips_cost->fp_div_sf;
3023 /* Return the cost of sign-extending OP to mode MODE, not including the
3024 cost of OP itself. */
3027 mips_sign_extend_cost (enum machine_mode mode, rtx op)
3030 /* Extended loads are as cheap as unextended ones. */
3033 if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
3034 /* A sign extension from SImode to DImode in 64-bit mode is free. */
3037 if (ISA_HAS_SEB_SEH || GENERATE_MIPS16E)
3038 /* We can use SEB or SEH. */
3039 return COSTS_N_INSNS (1);
3041 /* We need to use a shift left and a shift right. */
3042 return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
3045 /* Return the cost of zero-extending OP to mode MODE, not including the
3046 cost of OP itself. */
3049 mips_zero_extend_cost (enum machine_mode mode, rtx op)
3052 /* Extended loads are as cheap as unextended ones. */
3055 if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
3056 /* We need a shift left by 32 bits and a shift right by 32 bits. */
3057 return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
3059 if (GENERATE_MIPS16E)
3060 /* We can use ZEB or ZEH. */
3061 return COSTS_N_INSNS (1);
3064 /* We need to load 0xff or 0xffff into a register and use AND. */
3065 return COSTS_N_INSNS (GET_MODE (op) == QImode ? 2 : 3);
3067 /* We can use ANDI. */
3068 return COSTS_N_INSNS (1);
3071 /* Implement TARGET_RTX_COSTS. */
3074 mips_rtx_costs (rtx x, int code, int outer_code, int *total)
3076 enum machine_mode mode = GET_MODE (x);
3077 bool float_mode_p = FLOAT_MODE_P (mode);
3081 /* The cost of a COMPARE is hard to define for MIPS. COMPAREs don't
3082 appear in the instruction stream, and the cost of a comparison is
3083 really the cost of the branch or scc condition. At the time of
3084 writing, gcc only uses an explicit outer COMPARE code when optabs
3085 is testing whether a constant is expensive enough to force into a
3086 register. We want optabs to pass such constants through the MIPS
3087 expanders instead, so make all constants very cheap here. */
3088 if (outer_code == COMPARE)
3090 gcc_assert (CONSTANT_P (x));
3098 /* Treat *clear_upper32-style ANDs as having zero cost in the
3099 second operand. The cost is entirely in the first operand.
3101 ??? This is needed because we would otherwise try to CSE
3102 the constant operand. Although that's the right thing for
3103 instructions that continue to be a register operation throughout
3104 compilation, it is disastrous for instructions that could
3105 later be converted into a memory operation. */
3107 && outer_code == AND
3108 && UINTVAL (x) == 0xffffffff)
3116 cost = mips16_constant_cost (outer_code, INTVAL (x));
3125 /* When not optimizing for size, we care more about the cost
3126 of hot code, and hot code is often in a loop. If a constant
3127 operand needs to be forced into a register, we will often be
3128 able to hoist the constant load out of the loop, so the load
3129 should not contribute to the cost. */
3131 || mips_immediate_operand_p (outer_code, INTVAL (x)))
3143 cost = mips_const_insns (x);
3146 /* If the constant is likely to be stored in a GPR, SETs of
3147 single-insn constants are as cheap as register sets; we
3148 never want to CSE them.
3150 Don't reduce the cost of storing a floating-point zero in
3151 FPRs. If we have a zero in an FPR for other reasons, we
3152 can get better cfg-cleanup and delayed-branch results by
3153 using it consistently, rather than using $0 sometimes and
3154 an FPR at other times. Also, moves between floating-point
3155 registers are sometimes cheaper than (D)MTC1 $0. */
3157 && outer_code == SET
3158 && !(float_mode_p && TARGET_HARD_FLOAT))
3160 /* When non-MIPS16 code loads a constant N>1 times, we rarely
3161 want to CSE the constant itself. It is usually better to
3162 have N copies of the last operation in the sequence and one
3163 shared copy of the other operations. (Note that this is
3164 not true for MIPS16 code, where the final operation in the
3165 sequence is often an extended instruction.)
3167 Also, if we have a CONST_INT, we don't know whether it is
3168 for a word or doubleword operation, so we cannot rely on
3169 the result of mips_build_integer. */
3170 else if (!TARGET_MIPS16
3171 && (outer_code == SET || mode == VOIDmode))
3173 *total = COSTS_N_INSNS (cost);
3176 /* The value will need to be fetched from the constant pool. */
3177 *total = CONSTANT_POOL_COST;
3181 /* If the address is legitimate, return the number of
3182 instructions it needs. */
3184 cost = mips_address_insns (addr, mode, true);
3187 *total = COSTS_N_INSNS (cost + 1);
3190 /* Check for a scaled indexed address. */
3191 if (mips_lwxs_address_p (addr))
3193 *total = COSTS_N_INSNS (2);
3196 /* Otherwise use the default handling. */
3200 *total = COSTS_N_INSNS (6);
3204 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1);
3208 /* Check for a *clear_upper32 pattern and treat it like a zero
3209 extension. See the pattern's comment for details. */
3212 && CONST_INT_P (XEXP (x, 1))
3213 && UINTVAL (XEXP (x, 1)) == 0xffffffff)
3215 *total = (mips_zero_extend_cost (mode, XEXP (x, 0))
3216 + rtx_cost (XEXP (x, 0), 0));
3223 /* Double-word operations use two single-word operations. */
3224 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (2));
3232 if (CONSTANT_P (XEXP (x, 1)))
3233 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4));
3235 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (12));
3240 *total = mips_cost->fp_add;
3242 *total = COSTS_N_INSNS (4);
3246 /* Low-part immediates need an extended MIPS16 instruction. */
3247 *total = (COSTS_N_INSNS (TARGET_MIPS16 ? 2 : 1)
3248 + rtx_cost (XEXP (x, 0), 0));
3263 /* Branch comparisons have VOIDmode, so use the first operand's
3265 mode = GET_MODE (XEXP (x, 0));
3266 if (FLOAT_MODE_P (mode))
3268 *total = mips_cost->fp_add;
3271 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4));
3276 && ISA_HAS_NMADD_NMSUB
3277 && TARGET_FUSED_MADD
3278 && !HONOR_NANS (mode)
3279 && !HONOR_SIGNED_ZEROS (mode))
3281 /* See if we can use NMADD or NMSUB. See mips.md for the
3282 associated patterns. */
3283 rtx op0 = XEXP (x, 0);
3284 rtx op1 = XEXP (x, 1);
3285 if (GET_CODE (op0) == MULT && GET_CODE (XEXP (op0, 0)) == NEG)
3287 *total = (mips_fp_mult_cost (mode)
3288 + rtx_cost (XEXP (XEXP (op0, 0), 0), 0)
3289 + rtx_cost (XEXP (op0, 1), 0)
3290 + rtx_cost (op1, 0));
3293 if (GET_CODE (op1) == MULT)
3295 *total = (mips_fp_mult_cost (mode)
3297 + rtx_cost (XEXP (op1, 0), 0)
3298 + rtx_cost (XEXP (op1, 1), 0));
3308 && TARGET_FUSED_MADD
3309 && GET_CODE (XEXP (x, 0)) == MULT)
3312 *total = mips_cost->fp_add;
3316 /* Double-word operations require three single-word operations and
3317 an SLTU. The MIPS16 version then needs to move the result of
3318 the SLTU from $24 to a MIPS16 register. */
3319 *total = mips_binary_cost (x, COSTS_N_INSNS (1),
3320 COSTS_N_INSNS (TARGET_MIPS16 ? 5 : 4));
3325 && ISA_HAS_NMADD_NMSUB
3326 && TARGET_FUSED_MADD
3327 && !HONOR_NANS (mode)
3328 && HONOR_SIGNED_ZEROS (mode))
3330 /* See if we can use NMADD or NMSUB. See mips.md for the
3331 associated patterns. */
3332 rtx op = XEXP (x, 0);
3333 if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
3334 && GET_CODE (XEXP (op, 0)) == MULT)
3336 *total = (mips_fp_mult_cost (mode)
3337 + rtx_cost (XEXP (XEXP (op, 0), 0), 0)
3338 + rtx_cost (XEXP (XEXP (op, 0), 1), 0)
3339 + rtx_cost (XEXP (op, 1), 0));
3345 *total = mips_cost->fp_add;
3347 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 4 : 1);
3352 *total = mips_fp_mult_cost (mode);
3353 else if (mode == DImode && !TARGET_64BIT)
3354 /* Synthesized from 2 mulsi3s, 1 mulsidi3 and two additions,
3355 where the mulsidi3 always includes an MFHI and an MFLO. */
3356 *total = (optimize_size
3357 ? COSTS_N_INSNS (ISA_HAS_MUL3 ? 7 : 9)
3358 : mips_cost->int_mult_si * 3 + 6);
3359 else if (optimize_size)
3360 *total = (ISA_HAS_MUL3 ? 1 : 2);
3361 else if (mode == DImode)
3362 *total = mips_cost->int_mult_di;
3364 *total = mips_cost->int_mult_si;
3368 /* Check for a reciprocal. */
3369 if (float_mode_p && XEXP (x, 0) == CONST1_RTX (mode))
3372 && flag_unsafe_math_optimizations
3373 && (outer_code == SQRT || GET_CODE (XEXP (x, 1)) == SQRT))
3375 /* An rsqrt<mode>a or rsqrt<mode>b pattern. Count the
3376 division as being free. */
3377 *total = rtx_cost (XEXP (x, 1), 0);
3382 *total = mips_fp_div_cost (mode) + rtx_cost (XEXP (x, 1), 0);
3392 *total = mips_fp_div_cost (mode);
3401 /* It is our responsibility to make division by a power of 2
3402 as cheap as 2 register additions if we want the division
3403 expanders to be used for such operations; see the setting
3404 of sdiv_pow2_cheap in optabs.c. Using (D)DIV for MIPS16
3405 should always produce shorter code than using
3406 expand_sdiv2_pow2. */
3408 && CONST_INT_P (XEXP (x, 1))
3409 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
3411 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), 0);
3414 *total = COSTS_N_INSNS (mips_idiv_insns ());
3416 else if (mode == DImode)
3417 *total = mips_cost->int_div_di;
3419 *total = mips_cost->int_div_si;
3423 *total = mips_sign_extend_cost (mode, XEXP (x, 0));
3427 *total = mips_zero_extend_cost (mode, XEXP (x, 0));
3431 case UNSIGNED_FLOAT:
3434 case FLOAT_TRUNCATE:
3435 *total = mips_cost->fp_add;
3443 /* Provide the costs of an addressing mode that contains ADDR.
3444 If ADDR is not a valid address, its cost is irrelevant. */
3447 mips_address_cost (rtx addr)
3449 return mips_address_insns (addr, SImode, false);
3452 /* Return one word of double-word value OP, taking into account the fixed
3453 endianness of certain registers. HIGH_P is true to select the high part,
3454 false to select the low part. */
3457 mips_subword (rtx op, int high_p)
3460 enum machine_mode mode;
3462 mode = GET_MODE (op);
3463 if (mode == VOIDmode)
3466 if (TARGET_BIG_ENDIAN ? !high_p : high_p)
3467 byte = UNITS_PER_WORD;
3471 if (FP_REG_RTX_P (op))
3472 return gen_rtx_REG (word_mode, high_p ? REGNO (op) + 1 : REGNO (op));
3475 return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
3477 return simplify_gen_subreg (word_mode, op, mode, byte);
3481 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
3484 mips_split_64bit_move_p (rtx dest, rtx src)
3489 /* FP->FP moves can be done in a single instruction. */
3490 if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
3493 /* Check for floating-point loads and stores. They can be done using
3494 ldc1 and sdc1 on MIPS II and above. */
3497 if (FP_REG_RTX_P (dest) && MEM_P (src))
3499 if (FP_REG_RTX_P (src) && MEM_P (dest))
3506 /* Split a 64-bit move from SRC to DEST assuming that
3507 mips_split_64bit_move_p holds.
3509 Moves into and out of FPRs cause some difficulty here. Such moves
3510 will always be DFmode, since paired FPRs are not allowed to store
3511 DImode values. The most natural representation would be two separate
3512 32-bit moves, such as:
3514 (set (reg:SI $f0) (mem:SI ...))
3515 (set (reg:SI $f1) (mem:SI ...))
3517 However, the second insn is invalid because odd-numbered FPRs are
3518 not allowed to store independent values. Use the patterns load_df_low,
3519 load_df_high and store_df_high instead. */
3522 mips_split_64bit_move (rtx dest, rtx src)
3524 if (FP_REG_RTX_P (dest))
3526 /* Loading an FPR from memory or from GPRs. */
3529 dest = gen_lowpart (DFmode, dest);
3530 emit_insn (gen_load_df_low (dest, mips_subword (src, 0)));
3531 emit_insn (gen_mthc1 (dest, mips_subword (src, 1),
3536 emit_insn (gen_load_df_low (copy_rtx (dest),
3537 mips_subword (src, 0)));
3538 emit_insn (gen_load_df_high (dest, mips_subword (src, 1),
3542 else if (FP_REG_RTX_P (src))
3544 /* Storing an FPR into memory or GPRs. */
3547 src = gen_lowpart (DFmode, src);
3548 mips_emit_move (mips_subword (dest, 0), mips_subword (src, 0));
3549 emit_insn (gen_mfhc1 (mips_subword (dest, 1), src));
3553 mips_emit_move (mips_subword (dest, 0), mips_subword (src, 0));
3554 emit_insn (gen_store_df_high (mips_subword (dest, 1), src));
3559 /* The operation can be split into two normal moves. Decide in
3560 which order to do them. */
3563 low_dest = mips_subword (dest, 0);
3564 if (REG_P (low_dest)
3565 && reg_overlap_mentioned_p (low_dest, src))
3567 mips_emit_move (mips_subword (dest, 1), mips_subword (src, 1));
3568 mips_emit_move (low_dest, mips_subword (src, 0));
3572 mips_emit_move (low_dest, mips_subword (src, 0));
3573 mips_emit_move (mips_subword (dest, 1), mips_subword (src, 1));
3578 /* Return the appropriate instructions to move SRC into DEST. Assume
3579 that SRC is operand 1 and DEST is operand 0. */
3582 mips_output_move (rtx dest, rtx src)
3584 enum rtx_code dest_code, src_code;
3585 enum mips_symbol_type symbol_type;
3588 dest_code = GET_CODE (dest);
3589 src_code = GET_CODE (src);
3590 dbl_p = (GET_MODE_SIZE (GET_MODE (dest)) == 8);
3592 if (dbl_p && mips_split_64bit_move_p (dest, src))
3595 if ((src_code == REG && GP_REG_P (REGNO (src)))
3596 || (!TARGET_MIPS16 && src == CONST0_RTX (GET_MODE (dest))))
3598 if (dest_code == REG)
3600 if (GP_REG_P (REGNO (dest)))
3601 return "move\t%0,%z1";
3603 if (MD_REG_P (REGNO (dest)))
3606 if (DSP_ACC_REG_P (REGNO (dest)))
3608 static char retval[] = "mt__\t%z1,%q0";
3609 retval[2] = reg_names[REGNO (dest)][4];
3610 retval[3] = reg_names[REGNO (dest)][5];
3614 if (FP_REG_P (REGNO (dest)))
3615 return (dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0");
3617 if (ALL_COP_REG_P (REGNO (dest)))
3619 static char retval[] = "dmtc_\t%z1,%0";
3621 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3622 return (dbl_p ? retval : retval + 1);
3625 if (dest_code == MEM)
3626 return (dbl_p ? "sd\t%z1,%0" : "sw\t%z1,%0");
3628 if (dest_code == REG && GP_REG_P (REGNO (dest)))
3630 if (src_code == REG)
3632 if (DSP_ACC_REG_P (REGNO (src)))
3634 static char retval[] = "mf__\t%0,%q1";
3635 retval[2] = reg_names[REGNO (src)][4];
3636 retval[3] = reg_names[REGNO (src)][5];
3640 if (ST_REG_P (REGNO (src)) && ISA_HAS_8CC)
3641 return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
3643 if (FP_REG_P (REGNO (src)))
3644 return (dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1");
3646 if (ALL_COP_REG_P (REGNO (src)))
3648 static char retval[] = "dmfc_\t%0,%1";
3650 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3651 return (dbl_p ? retval : retval + 1);
3655 if (src_code == MEM)
3656 return (dbl_p ? "ld\t%0,%1" : "lw\t%0,%1");
3658 if (src_code == CONST_INT)
3660 /* Don't use the X format, because that will give out of
3661 range numbers for 64-bit hosts and 32-bit targets. */
3663 return "li\t%0,%1\t\t\t# %X1";
3665 if (INTVAL (src) >= 0 && INTVAL (src) <= 0xffff)
3668 if (INTVAL (src) < 0 && INTVAL (src) >= -0xffff)
3672 if (src_code == HIGH)
3673 return TARGET_MIPS16 ? "#" : "lui\t%0,%h1";
3675 if (CONST_GP_P (src))
3676 return "move\t%0,%1";
3678 if (mips_symbolic_constant_p (src, SYMBOL_CONTEXT_LEA, &symbol_type)
3679 && mips_lo_relocs[symbol_type] != 0)
3681 /* A signed 16-bit constant formed by applying a relocation
3682 operator to a symbolic address. */
3683 gcc_assert (!mips_split_p[symbol_type]);
3684 return "li\t%0,%R1";
3687 if (symbolic_operand (src, VOIDmode))
3689 gcc_assert (TARGET_MIPS16
3690 ? TARGET_MIPS16_TEXT_LOADS
3691 : !TARGET_EXPLICIT_RELOCS);
3692 return (dbl_p ? "dla\t%0,%1" : "la\t%0,%1");
3695 if (src_code == REG && FP_REG_P (REGNO (src)))
3697 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3699 if (GET_MODE (dest) == V2SFmode)
3700 return "mov.ps\t%0,%1";
3702 return (dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1");
3705 if (dest_code == MEM)
3706 return (dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0");
3708 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3710 if (src_code == MEM)
3711 return (dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1");
3713 if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
3715 static char retval[] = "l_c_\t%0,%1";
3717 retval[1] = (dbl_p ? 'd' : 'w');
3718 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3721 if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
3723 static char retval[] = "s_c_\t%1,%0";
3725 retval[1] = (dbl_p ? 'd' : 'w');
3726 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3732 /* Restore $gp from its save slot. Valid only when using o32 or
3736 mips_restore_gp (void)
3740 gcc_assert (TARGET_ABICALLS && TARGET_OLDABI);
3742 address = mips_add_offset (pic_offset_table_rtx,
3743 frame_pointer_needed
3744 ? hard_frame_pointer_rtx
3745 : stack_pointer_rtx,
3746 current_function_outgoing_args_size);
3747 slot = gen_rtx_MEM (Pmode, address);
3749 mips_emit_move (pic_offset_table_rtx, slot);
3750 if (!TARGET_EXPLICIT_RELOCS)
3751 emit_insn (gen_blockage ());
3754 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
3757 mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
3759 emit_insn (gen_rtx_SET (VOIDmode, target,
3760 gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
3763 /* Return true if CMP1 is a suitable second operand for relational
3764 operator CODE. See also the *sCC patterns in mips.md. */
3767 mips_relational_operand_ok_p (enum rtx_code code, rtx cmp1)
3773 return reg_or_0_operand (cmp1, VOIDmode);
3777 return !TARGET_MIPS16 && cmp1 == const1_rtx;
3781 return arith_operand (cmp1, VOIDmode);
3784 return sle_operand (cmp1, VOIDmode);
3787 return sleu_operand (cmp1, VOIDmode);
3794 /* Canonicalize LE or LEU comparisons into LT comparisons when
3795 possible to avoid extra instructions or inverting the
3799 mips_canonicalize_comparison (enum rtx_code *code, rtx *cmp1,
3800 enum machine_mode mode)
3802 HOST_WIDE_INT original, plus_one;
3804 if (GET_CODE (*cmp1) != CONST_INT)
3807 original = INTVAL (*cmp1);
3808 plus_one = trunc_int_for_mode ((unsigned HOST_WIDE_INT) original + 1, mode);
3813 if (original < plus_one)
3816 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3825 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3838 /* Compare CMP0 and CMP1 using relational operator CODE and store the
3839 result in TARGET. CMP0 and TARGET are register_operands that have
3840 the same integer mode. If INVERT_PTR is nonnull, it's OK to set
3841 TARGET to the inverse of the result and flip *INVERT_PTR instead. */
3844 mips_emit_int_relational (enum rtx_code code, bool *invert_ptr,
3845 rtx target, rtx cmp0, rtx cmp1)
3847 /* First see if there is a MIPS instruction that can do this operation
3848 with CMP1 in its current form. If not, try to canonicalize the
3849 comparison to LT. If that fails, try doing the same for the
3850 inverse operation. If that also fails, force CMP1 into a register
3852 if (mips_relational_operand_ok_p (code, cmp1))
3853 mips_emit_binary (code, target, cmp0, cmp1);
3854 else if (mips_canonicalize_comparison (&code, &cmp1, GET_MODE (target)))
3855 mips_emit_binary (code, target, cmp0, cmp1);
3858 enum rtx_code inv_code = reverse_condition (code);
3859 if (!mips_relational_operand_ok_p (inv_code, cmp1))
3861 cmp1 = force_reg (GET_MODE (cmp0), cmp1);
3862 mips_emit_int_relational (code, invert_ptr, target, cmp0, cmp1);
3864 else if (invert_ptr == 0)
3866 rtx inv_target = gen_reg_rtx (GET_MODE (target));
3867 mips_emit_binary (inv_code, inv_target, cmp0, cmp1);
3868 mips_emit_binary (XOR, target, inv_target, const1_rtx);
3872 *invert_ptr = !*invert_ptr;
3873 mips_emit_binary (inv_code, target, cmp0, cmp1);
3878 /* Return a register that is zero iff CMP0 and CMP1 are equal.
3879 The register will have the same mode as CMP0. */
3882 mips_zero_if_equal (rtx cmp0, rtx cmp1)
3884 if (cmp1 == const0_rtx)
3887 if (uns_arith_operand (cmp1, VOIDmode))
3888 return expand_binop (GET_MODE (cmp0), xor_optab,
3889 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3891 return expand_binop (GET_MODE (cmp0), sub_optab,
3892 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3895 /* Convert *CODE into a code that can be used in a floating-point
3896 scc instruction (c.<cond>.<fmt>). Return true if the values of
3897 the condition code registers will be inverted, with 0 indicating
3898 that the condition holds. */
3901 mips_reverse_fp_cond_p (enum rtx_code *code)
3908 *code = reverse_condition_maybe_unordered (*code);
3916 /* Convert a comparison into something that can be used in a branch or
3917 conditional move. cmp_operands[0] and cmp_operands[1] are the values
3918 being compared and *CODE is the code used to compare them.
3920 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
3921 If NEED_EQ_NE_P, then only EQ/NE comparisons against zero are possible,
3922 otherwise any standard branch condition can be used. The standard branch
3925 - EQ/NE between two registers.
3926 - any comparison between a register and zero. */
3929 mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
3931 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) == MODE_INT)
3933 if (!need_eq_ne_p && cmp_operands[1] == const0_rtx)
3935 *op0 = cmp_operands[0];
3936 *op1 = cmp_operands[1];
3938 else if (*code == EQ || *code == NE)
3942 *op0 = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3947 *op0 = cmp_operands[0];
3948 *op1 = force_reg (GET_MODE (*op0), cmp_operands[1]);
3953 /* The comparison needs a separate scc instruction. Store the
3954 result of the scc in *OP0 and compare it against zero. */
3955 bool invert = false;
3956 *op0 = gen_reg_rtx (GET_MODE (cmp_operands[0]));
3958 mips_emit_int_relational (*code, &invert, *op0,
3959 cmp_operands[0], cmp_operands[1]);
3960 *code = (invert ? EQ : NE);
3963 else if (ALL_FIXED_POINT_MODE_P (GET_MODE (cmp_operands[0])))
3965 *op0 = gen_rtx_REG (CCDSPmode, CCDSP_CC_REGNUM);
3966 mips_emit_binary (*code, *op0, cmp_operands[0], cmp_operands[1]);
3972 enum rtx_code cmp_code;
3974 /* Floating-point tests use a separate c.cond.fmt comparison to
3975 set a condition code register. The branch or conditional move
3976 will then compare that register against zero.
3978 Set CMP_CODE to the code of the comparison instruction and
3979 *CODE to the code that the branch or move should use. */
3981 *code = mips_reverse_fp_cond_p (&cmp_code) ? EQ : NE;
3983 ? gen_reg_rtx (CCmode)
3984 : gen_rtx_REG (CCmode, FPSW_REGNUM));
3986 mips_emit_binary (cmp_code, *op0, cmp_operands[0], cmp_operands[1]);
3990 /* Try comparing cmp_operands[0] and cmp_operands[1] using rtl code CODE.
3991 Store the result in TARGET and return true if successful.
3993 On 64-bit targets, TARGET may be wider than cmp_operands[0]. */
3996 mips_emit_scc (enum rtx_code code, rtx target)
3998 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) != MODE_INT)
4001 target = gen_lowpart (GET_MODE (cmp_operands[0]), target);
4002 if (code == EQ || code == NE)
4004 rtx zie = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
4005 mips_emit_binary (code, target, zie, const0_rtx);
4008 mips_emit_int_relational (code, 0, target,
4009 cmp_operands[0], cmp_operands[1]);
4013 /* Emit the common code for doing conditional branches.
4014 operand[0] is the label to jump to.
4015 The comparison operands are saved away by cmp{si,di,sf,df}. */
4018 gen_conditional_branch (rtx *operands, enum rtx_code code)
4020 rtx op0, op1, condition;
4022 mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
4023 condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
4024 emit_jump_insn (gen_condjump (condition, operands[0]));
4029 (set temp (COND:CCV2 CMP_OP0 CMP_OP1))
4030 (set DEST (unspec [TRUE_SRC FALSE_SRC temp] UNSPEC_MOVE_TF_PS)) */
4033 mips_expand_vcondv2sf (rtx dest, rtx true_src, rtx false_src,
4034 enum rtx_code cond, rtx cmp_op0, rtx cmp_op1)
4039 reversed_p = mips_reverse_fp_cond_p (&cond);
4040 cmp_result = gen_reg_rtx (CCV2mode);
4041 emit_insn (gen_scc_ps (cmp_result,
4042 gen_rtx_fmt_ee (cond, VOIDmode, cmp_op0, cmp_op1)));
4044 emit_insn (gen_mips_cond_move_tf_ps (dest, false_src, true_src,
4047 emit_insn (gen_mips_cond_move_tf_ps (dest, true_src, false_src,
4051 /* Emit the common code for conditional moves. OPERANDS is the array
4052 of operands passed to the conditional move define_expand. */
4055 gen_conditional_move (rtx *operands)
4060 code = GET_CODE (operands[1]);
4061 mips_emit_compare (&code, &op0, &op1, true);
4062 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
4063 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
4064 gen_rtx_fmt_ee (code,
4067 operands[2], operands[3])));
4070 /* Emit a conditional trap. OPERANDS is the array of operands passed to
4071 the conditional_trap expander. */
4074 mips_gen_conditional_trap (rtx *operands)
4077 enum rtx_code cmp_code = GET_CODE (operands[0]);
4078 enum machine_mode mode = GET_MODE (cmp_operands[0]);
4080 /* MIPS conditional trap machine instructions don't have GT or LE
4081 flavors, so we must invert the comparison and convert to LT and
4082 GE, respectively. */
4085 case GT: cmp_code = LT; break;
4086 case LE: cmp_code = GE; break;
4087 case GTU: cmp_code = LTU; break;
4088 case LEU: cmp_code = GEU; break;
4091 if (cmp_code == GET_CODE (operands[0]))
4093 op0 = cmp_operands[0];
4094 op1 = cmp_operands[1];
4098 op0 = cmp_operands[1];
4099 op1 = cmp_operands[0];
4101 op0 = force_reg (mode, op0);
4102 if (!arith_operand (op1, mode))
4103 op1 = force_reg (mode, op1);
4105 emit_insn (gen_rtx_TRAP_IF (VOIDmode,
4106 gen_rtx_fmt_ee (cmp_code, mode, op0, op1),
4110 /* Return true if calls to X can use R_MIPS_CALL* relocations. */
4113 mips_ok_for_lazy_binding_p (rtx x)
4115 return (TARGET_USE_GOT
4116 && GET_CODE (x) == SYMBOL_REF
4117 && !mips_symbol_binds_local_p (x));
4120 /* Load function address ADDR into register DEST. SIBCALL_P is true
4121 if the address is needed for a sibling call. */
4124 mips_load_call_address (rtx dest, rtx addr, int sibcall_p)
4126 /* If we're generating PIC, and this call is to a global function,
4127 try to allow its address to be resolved lazily. This isn't
4128 possible if TARGET_CALL_SAVED_GP since the value of $gp on entry
4129 to the stub would be our caller's gp, not ours. */
4130 if (TARGET_EXPLICIT_RELOCS
4131 && !(sibcall_p && TARGET_CALL_SAVED_GP)
4132 && mips_ok_for_lazy_binding_p (addr))
4134 rtx high, lo_sum_symbol;
4136 high = mips_unspec_offset_high (dest, pic_offset_table_rtx,
4137 addr, SYMBOL_GOTOFF_CALL);
4138 lo_sum_symbol = mips_unspec_address (addr, SYMBOL_GOTOFF_CALL);
4139 if (Pmode == SImode)
4140 emit_insn (gen_load_callsi (dest, high, lo_sum_symbol));
4142 emit_insn (gen_load_calldi (dest, high, lo_sum_symbol));
4145 mips_emit_move (dest, addr);
4149 /* Expand a call or call_value instruction. RESULT is where the
4150 result will go (null for calls), ADDR is the address of the
4151 function, ARGS_SIZE is the size of the arguments and AUX is
4152 the value passed to us by mips_function_arg. SIBCALL_P is true
4153 if we are expanding a sibling call, false if we're expanding
4157 mips_expand_call (rtx result, rtx addr, rtx args_size, rtx aux, int sibcall_p)
4159 rtx orig_addr, pattern, insn;
4162 if (!call_insn_operand (addr, VOIDmode))
4164 addr = gen_reg_rtx (Pmode);
4165 mips_load_call_address (addr, orig_addr, sibcall_p);
4169 && TARGET_HARD_FLOAT_ABI
4170 && build_mips16_call_stub (result, addr, args_size,
4171 aux == 0 ? 0 : (int) GET_MODE (aux)))
4175 pattern = (sibcall_p
4176 ? gen_sibcall_internal (addr, args_size)
4177 : gen_call_internal (addr, args_size));
4178 else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
4182 reg1 = XEXP (XVECEXP (result, 0, 0), 0);
4183 reg2 = XEXP (XVECEXP (result, 0, 1), 0);
4186 ? gen_sibcall_value_multiple_internal (reg1, addr, args_size, reg2)
4187 : gen_call_value_multiple_internal (reg1, addr, args_size, reg2));
4190 pattern = (sibcall_p
4191 ? gen_sibcall_value_internal (result, addr, args_size)
4192 : gen_call_value_internal (result, addr, args_size));
4194 insn = emit_call_insn (pattern);
4196 /* Lazy-binding stubs require $gp to be valid on entry. */
4197 if (mips_ok_for_lazy_binding_p (orig_addr))
4198 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
4202 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */
4205 mips_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
4207 if (!TARGET_SIBCALLS)
4210 /* We can't do a sibcall if the called function is a MIPS16 function
4211 because there is no direct "jx" instruction equivalent to "jalx" to
4212 switch the ISA mode. */
4213 if (decl && SYMBOL_REF_MIPS16_FUNC_P (XEXP (DECL_RTL (decl), 0)))
4220 /* Emit code to move general operand SRC into condition-code
4221 register DEST. SCRATCH is a scratch TFmode float register.
4228 where FP1 and FP2 are single-precision float registers
4229 taken from SCRATCH. */
4232 mips_emit_fcc_reload (rtx dest, rtx src, rtx scratch)
4236 /* Change the source to SFmode. */
4238 src = adjust_address (src, SFmode, 0);
4239 else if (REG_P (src) || GET_CODE (src) == SUBREG)
4240 src = gen_rtx_REG (SFmode, true_regnum (src));
4242 fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
4243 fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + MAX_FPRS_PER_FMT);
4245 mips_emit_move (copy_rtx (fp1), src);
4246 mips_emit_move (copy_rtx (fp2), CONST0_RTX (SFmode));
4247 emit_insn (gen_slt_sf (dest, fp2, fp1));
4250 /* Emit code to change the current function's return address to
4251 ADDRESS. SCRATCH is available as a scratch register, if needed.
4252 ADDRESS and SCRATCH are both word-mode GPRs. */
4255 mips_set_return_address (rtx address, rtx scratch)
4259 compute_frame_size (get_frame_size ());
4260 gcc_assert ((cfun->machine->frame.mask >> 31) & 1);
4261 slot_address = mips_add_offset (scratch, stack_pointer_rtx,
4262 cfun->machine->frame.gp_sp_offset);
4264 mips_emit_move (gen_rtx_MEM (GET_MODE (address), slot_address), address);
4267 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
4268 Assume that the areas do not overlap. */
4271 mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
4273 HOST_WIDE_INT offset, delta;
4274 unsigned HOST_WIDE_INT bits;
4276 enum machine_mode mode;
4279 /* Work out how many bits to move at a time. If both operands have
4280 half-word alignment, it is usually better to move in half words.
4281 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
4282 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
4283 Otherwise move word-sized chunks. */
4284 if (MEM_ALIGN (src) == BITS_PER_WORD / 2
4285 && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
4286 bits = BITS_PER_WORD / 2;
4288 bits = BITS_PER_WORD;
4290 mode = mode_for_size (bits, MODE_INT, 0);
4291 delta = bits / BITS_PER_UNIT;
4293 /* Allocate a buffer for the temporary registers. */
4294 regs = alloca (sizeof (rtx) * length / delta);
4296 /* Load as many BITS-sized chunks as possible. Use a normal load if
4297 the source has enough alignment, otherwise use left/right pairs. */
4298 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
4300 regs[i] = gen_reg_rtx (mode);
4301 if (MEM_ALIGN (src) >= bits)
4302 mips_emit_move (regs[i], adjust_address (src, mode, offset));
4305 rtx part = adjust_address (src, BLKmode, offset);
4306 if (!mips_expand_unaligned_load (regs[i], part, bits, 0))
4311 /* Copy the chunks to the destination. */
4312 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
4313 if (MEM_ALIGN (dest) >= bits)
4314 mips_emit_move (adjust_address (dest, mode, offset), regs[i]);
4317 rtx part = adjust_address (dest, BLKmode, offset);
4318 if (!mips_expand_unaligned_store (part, regs[i], bits, 0))
4322 /* Mop up any left-over bytes. */
4323 if (offset < length)
4325 src = adjust_address (src, BLKmode, offset);
4326 dest = adjust_address (dest, BLKmode, offset);
4327 move_by_pieces (dest, src, length - offset,
4328 MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
4332 #define MAX_MOVE_REGS 4
4333 #define MAX_MOVE_BYTES (MAX_MOVE_REGS * UNITS_PER_WORD)
4336 /* Helper function for doing a loop-based block operation on memory
4337 reference MEM. Each iteration of the loop will operate on LENGTH
4340 Create a new base register for use within the loop and point it to
4341 the start of MEM. Create a new memory reference that uses this
4342 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
4345 mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
4346 rtx *loop_reg, rtx *loop_mem)
4348 *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
4350 /* Although the new mem does not refer to a known location,
4351 it does keep up to LENGTH bytes of alignment. */
4352 *loop_mem = change_address (mem, BLKmode, *loop_reg);
4353 set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
4357 /* Move LENGTH bytes from SRC to DEST using a loop that moves MAX_MOVE_BYTES
4358 per iteration. LENGTH must be at least MAX_MOVE_BYTES. Assume that the
4359 memory regions do not overlap. */
4362 mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length)
4364 rtx label, src_reg, dest_reg, final_src;
4365 HOST_WIDE_INT leftover;
4367 leftover = length % MAX_MOVE_BYTES;
4370 /* Create registers and memory references for use within the loop. */
4371 mips_adjust_block_mem (src, MAX_MOVE_BYTES, &src_reg, &src);
4372 mips_adjust_block_mem (dest, MAX_MOVE_BYTES, &dest_reg, &dest);
4374 /* Calculate the value that SRC_REG should have after the last iteration
4376 final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
4379 /* Emit the start of the loop. */
4380 label = gen_label_rtx ();
4383 /* Emit the loop body. */
4384 mips_block_move_straight (dest, src, MAX_MOVE_BYTES);
4386 /* Move on to the next block. */
4387 mips_emit_move (src_reg, plus_constant (src_reg, MAX_MOVE_BYTES));
4388 mips_emit_move (dest_reg, plus_constant (dest_reg, MAX_MOVE_BYTES));
4390 /* Emit the loop condition. */
4391 if (Pmode == DImode)
4392 emit_insn (gen_cmpdi (src_reg, final_src));
4394 emit_insn (gen_cmpsi (src_reg, final_src));
4395 emit_jump_insn (gen_bne (label));
4397 /* Mop up any left-over bytes. */
4399 mips_block_move_straight (dest, src, leftover);
4403 /* Expand a loop of synci insns for the address range [BEGIN, END). */
4406 mips_expand_synci_loop (rtx begin, rtx end)
4408 rtx inc, label, cmp, cmp_result;
4410 /* Load INC with the cache line size (rdhwr INC,$1). */
4411 inc = gen_reg_rtx (SImode);
4412 emit_insn (gen_rdhwr (inc, const1_rtx));
4414 /* Loop back to here. */
4415 label = gen_label_rtx ();
4418 emit_insn (gen_synci (begin));
4420 cmp = gen_reg_rtx (Pmode);
4421 mips_emit_binary (GTU, cmp, begin, end);
4423 mips_emit_binary (PLUS, begin, begin, inc);
4425 cmp_result = gen_rtx_EQ (VOIDmode, cmp, const0_rtx);
4426 emit_jump_insn (gen_condjump (cmp_result, label));
4429 /* Expand a movmemsi instruction. */
4432 mips_expand_block_move (rtx dest, rtx src, rtx length)
4434 if (GET_CODE (length) == CONST_INT)
4436 if (INTVAL (length) <= 2 * MAX_MOVE_BYTES)
4438 mips_block_move_straight (dest, src, INTVAL (length));
4443 mips_block_move_loop (dest, src, INTVAL (length));
4450 /* Argument support functions. */
4452 /* Initialize CUMULATIVE_ARGS for a function. */
4455 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
4456 rtx libname ATTRIBUTE_UNUSED)
4458 static CUMULATIVE_ARGS zero_cum;
4459 tree param, next_param;
4462 cum->prototype = (fntype && TYPE_ARG_TYPES (fntype));
4464 /* Determine if this function has variable arguments. This is
4465 indicated by the last argument being 'void_type_mode' if there
4466 are no variable arguments. The standard MIPS calling sequence
4467 passes all arguments in the general purpose registers in this case. */
4469 for (param = fntype ? TYPE_ARG_TYPES (fntype) : 0;
4470 param != 0; param = next_param)
4472 next_param = TREE_CHAIN (param);
4473 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
4474 cum->gp_reg_found = 1;
4479 /* Fill INFO with information about a single argument. CUM is the
4480 cumulative state for earlier arguments. MODE is the mode of this
4481 argument and TYPE is its type (if known). NAMED is true if this
4482 is a named (fixed) argument rather than a variable one. */
4485 mips_arg_info (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
4486 tree type, int named, struct mips_arg_info *info)
4488 bool doubleword_aligned_p;
4489 unsigned int num_bytes, num_words, max_regs;
4491 /* Work out the size of the argument. */
4492 num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
4493 num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4495 /* Decide whether it should go in a floating-point register, assuming
4496 one is free. Later code checks for availability.
4498 The checks against UNITS_PER_FPVALUE handle the soft-float and
4499 single-float cases. */
4503 /* The EABI conventions have traditionally been defined in terms
4504 of TYPE_MODE, regardless of the actual type. */
4505 info->fpr_p = ((GET_MODE_CLASS (mode) == MODE_FLOAT
4506 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4507 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4512 /* Only leading floating-point scalars are passed in
4513 floating-point registers. We also handle vector floats the same
4514 say, which is OK because they are not covered by the standard ABI. */
4515 info->fpr_p = (!cum->gp_reg_found
4516 && cum->arg_number < 2
4517 && (type == 0 || SCALAR_FLOAT_TYPE_P (type)
4518 || VECTOR_FLOAT_TYPE_P (type))
4519 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4520 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4521 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4526 /* Scalar and complex floating-point types are passed in
4527 floating-point registers. */
4528 info->fpr_p = (named
4529 && (type == 0 || FLOAT_TYPE_P (type))
4530 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4531 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4532 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4533 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
4535 /* ??? According to the ABI documentation, the real and imaginary
4536 parts of complex floats should be passed in individual registers.
4537 The real and imaginary parts of stack arguments are supposed
4538 to be contiguous and there should be an extra word of padding
4541 This has two problems. First, it makes it impossible to use a
4542 single "void *" va_list type, since register and stack arguments
4543 are passed differently. (At the time of writing, MIPSpro cannot
4544 handle complex float varargs correctly.) Second, it's unclear
4545 what should happen when there is only one register free.
4547 For now, we assume that named complex floats should go into FPRs
4548 if there are two FPRs free, otherwise they should be passed in the
4549 same way as a struct containing two floats. */
4551 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4552 && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
4554 if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
4555 info->fpr_p = false;
4565 /* See whether the argument has doubleword alignment. */
4566 doubleword_aligned_p = FUNCTION_ARG_BOUNDARY (mode, type) > BITS_PER_WORD;
4568 /* Set REG_OFFSET to the register count we're interested in.
4569 The EABI allocates the floating-point registers separately,
4570 but the other ABIs allocate them like integer registers. */
4571 info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
4575 /* Advance to an even register if the argument is doubleword-aligned. */
4576 if (doubleword_aligned_p)
4577 info->reg_offset += info->reg_offset & 1;
4579 /* Work out the offset of a stack argument. */
4580 info->stack_offset = cum->stack_words;
4581 if (doubleword_aligned_p)
4582 info->stack_offset += info->stack_offset & 1;
4584 max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
4586 /* Partition the argument between registers and stack. */
4587 info->reg_words = MIN (num_words, max_regs);
4588 info->stack_words = num_words - info->reg_words;
4592 /* INFO describes an argument that is passed in a single-register value.
4593 Return the register it uses, assuming that FPRs are available if
4597 mips_arg_regno (const struct mips_arg_info *info, bool hard_float_p)
4599 if (!info->fpr_p || !hard_float_p)
4600 return GP_ARG_FIRST + info->reg_offset;
4601 else if (mips_abi == ABI_32 && TARGET_DOUBLE_FLOAT && info->reg_offset > 0)
4602 /* In o32, the second argument is always passed in $f14
4603 for TARGET_DOUBLE_FLOAT, regardless of whether the
4604 first argument was a word or doubleword. */
4605 return FP_ARG_FIRST + 2;
4607 return FP_ARG_FIRST + info->reg_offset;
4610 /* Implement FUNCTION_ARG_ADVANCE. */
4613 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4614 tree type, int named)
4616 struct mips_arg_info info;
4618 mips_arg_info (cum, mode, type, named, &info);
4621 cum->gp_reg_found = true;
4623 /* See the comment above the cumulative args structure in mips.h
4624 for an explanation of what this code does. It assumes the O32
4625 ABI, which passes at most 2 arguments in float registers. */
4626 if (cum->arg_number < 2 && info.fpr_p)
4627 cum->fp_code += (mode == SFmode ? 1 : 2) << (cum->arg_number * 2);
4629 if (mips_abi != ABI_EABI || !info.fpr_p)
4630 cum->num_gprs = info.reg_offset + info.reg_words;
4631 else if (info.reg_words > 0)
4632 cum->num_fprs += MAX_FPRS_PER_FMT;
4634 if (info.stack_words > 0)
4635 cum->stack_words = info.stack_offset + info.stack_words;
4640 /* Implement FUNCTION_ARG. */
4643 function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
4644 tree type, int named)
4646 struct mips_arg_info info;
4648 /* We will be called with a mode of VOIDmode after the last argument
4649 has been seen. Whatever we return will be passed to the call
4650 insn. If we need a mips16 fp_code, return a REG with the code
4651 stored as the mode. */
4652 if (mode == VOIDmode)
4654 if (TARGET_MIPS16 && cum->fp_code != 0)
4655 return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
4661 mips_arg_info (cum, mode, type, named, &info);
4663 /* Return straight away if the whole argument is passed on the stack. */
4664 if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
4668 && TREE_CODE (type) == RECORD_TYPE
4670 && TYPE_SIZE_UNIT (type)
4671 && host_integerp (TYPE_SIZE_UNIT (type), 1)
4674 /* The Irix 6 n32/n64 ABIs say that if any 64-bit chunk of the
4675 structure contains a double in its entirety, then that 64-bit
4676 chunk is passed in a floating point register. */
4679 /* First check to see if there is any such field. */
4680 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4681 if (TREE_CODE (field) == FIELD_DECL
4682 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
4683 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
4684 && host_integerp (bit_position (field), 0)
4685 && int_bit_position (field) % BITS_PER_WORD == 0)
4690 /* Now handle the special case by returning a PARALLEL
4691 indicating where each 64-bit chunk goes. INFO.REG_WORDS
4692 chunks are passed in registers. */
4694 HOST_WIDE_INT bitpos;
4697 /* assign_parms checks the mode of ENTRY_PARM, so we must
4698 use the actual mode here. */
4699 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
4702 field = TYPE_FIELDS (type);
4703 for (i = 0; i < info.reg_words; i++)
4707 for (; field; field = TREE_CHAIN (field))
4708 if (TREE_CODE (field) == FIELD_DECL
4709 && int_bit_position (field) >= bitpos)
4713 && int_bit_position (field) == bitpos
4714 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
4715 && !TARGET_SOFT_FLOAT
4716 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
4717 reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
4719 reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
4722 = gen_rtx_EXPR_LIST (VOIDmode, reg,
4723 GEN_INT (bitpos / BITS_PER_UNIT));
4725 bitpos += BITS_PER_WORD;
4731 /* Handle the n32/n64 conventions for passing complex floating-point
4732 arguments in FPR pairs. The real part goes in the lower register
4733 and the imaginary part goes in the upper register. */
4736 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4739 enum machine_mode inner;
4742 inner = GET_MODE_INNER (mode);
4743 reg = FP_ARG_FIRST + info.reg_offset;
4744 if (info.reg_words * UNITS_PER_WORD == GET_MODE_SIZE (inner))
4746 /* Real part in registers, imaginary part on stack. */
4747 gcc_assert (info.stack_words == info.reg_words);
4748 return gen_rtx_REG (inner, reg);
4752 gcc_assert (info.stack_words == 0);
4753 real = gen_rtx_EXPR_LIST (VOIDmode,
4754 gen_rtx_REG (inner, reg),
4756 imag = gen_rtx_EXPR_LIST (VOIDmode,
4758 reg + info.reg_words / 2),
4759 GEN_INT (GET_MODE_SIZE (inner)));
4760 return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
4764 return gen_rtx_REG (mode, mips_arg_regno (&info, TARGET_HARD_FLOAT));
4768 /* Implement TARGET_ARG_PARTIAL_BYTES. */
4771 mips_arg_partial_bytes (CUMULATIVE_ARGS *cum,
4772 enum machine_mode mode, tree type, bool named)
4774 struct mips_arg_info info;
4776 mips_arg_info (cum, mode, type, named, &info);
4777 return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
4781 /* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
4782 PARM_BOUNDARY bits of alignment, but will be given anything up
4783 to STACK_BOUNDARY bits if the type requires it. */
4786 function_arg_boundary (enum machine_mode mode, tree type)
4788 unsigned int alignment;
4790 alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
4791 if (alignment < PARM_BOUNDARY)
4792 alignment = PARM_BOUNDARY;
4793 if (alignment > STACK_BOUNDARY)
4794 alignment = STACK_BOUNDARY;
4798 /* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
4799 upward rather than downward. In other words, return true if the
4800 first byte of the stack slot has useful data, false if the last
4804 mips_pad_arg_upward (enum machine_mode mode, const_tree type)
4806 /* On little-endian targets, the first byte of every stack argument
4807 is passed in the first byte of the stack slot. */
4808 if (!BYTES_BIG_ENDIAN)
4811 /* Otherwise, integral types are padded downward: the last byte of a
4812 stack argument is passed in the last byte of the stack slot. */
4814 ? (INTEGRAL_TYPE_P (type)
4815 || POINTER_TYPE_P (type)
4816 || FIXED_POINT_TYPE_P (type))
4817 : (GET_MODE_CLASS (mode) == MODE_INT
4818 || ALL_SCALAR_FIXED_POINT_MODE_P (mode)))
4821 /* Big-endian o64 pads floating-point arguments downward. */
4822 if (mips_abi == ABI_O64)
4823 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4826 /* Other types are padded upward for o32, o64, n32 and n64. */
4827 if (mips_abi != ABI_EABI)
4830 /* Arguments smaller than a stack slot are padded downward. */
4831 if (mode != BLKmode)
4832 return (GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY);
4834 return (int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT));
4838 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
4839 if the least significant byte of the register has useful data. Return
4840 the opposite if the most significant byte does. */
4843 mips_pad_reg_upward (enum machine_mode mode, tree type)
4845 /* No shifting is required for floating-point arguments. */
4846 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4847 return !BYTES_BIG_ENDIAN;
4849 /* Otherwise, apply the same padding to register arguments as we do
4850 to stack arguments. */
4851 return mips_pad_arg_upward (mode, type);
4855 mips_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4856 tree type, int *pretend_size ATTRIBUTE_UNUSED,
4859 CUMULATIVE_ARGS local_cum;
4860 int gp_saved, fp_saved;
4862 /* The caller has advanced CUM up to, but not beyond, the last named
4863 argument. Advance a local copy of CUM past the last "real" named
4864 argument, to find out how many registers are left over. */
4867 FUNCTION_ARG_ADVANCE (local_cum, mode, type, 1);
4869 /* Found out how many registers we need to save. */
4870 gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
4871 fp_saved = (EABI_FLOAT_VARARGS_P
4872 ? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
4881 ptr = plus_constant (virtual_incoming_args_rtx,
4882 REG_PARM_STACK_SPACE (cfun->decl)
4883 - gp_saved * UNITS_PER_WORD);
4884 mem = gen_rtx_MEM (BLKmode, ptr);
4885 set_mem_alias_set (mem, get_varargs_alias_set ());
4887 move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
4892 /* We can't use move_block_from_reg, because it will use
4894 enum machine_mode mode;
4897 /* Set OFF to the offset from virtual_incoming_args_rtx of
4898 the first float register. The FP save area lies below
4899 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
4900 off = -gp_saved * UNITS_PER_WORD;
4901 off &= ~(UNITS_PER_FPVALUE - 1);
4902 off -= fp_saved * UNITS_PER_FPREG;
4904 mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
4906 for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS;
4907 i += MAX_FPRS_PER_FMT)
4911 ptr = plus_constant (virtual_incoming_args_rtx, off);
4912 mem = gen_rtx_MEM (mode, ptr);
4913 set_mem_alias_set (mem, get_varargs_alias_set ());
4914 mips_emit_move (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
4915 off += UNITS_PER_HWFPVALUE;
4919 if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
4920 cfun->machine->varargs_size = (gp_saved * UNITS_PER_WORD
4921 + fp_saved * UNITS_PER_FPREG);
4924 /* Create the va_list data type.
4925 We keep 3 pointers, and two offsets.
4926 Two pointers are to the overflow area, which starts at the CFA.
4927 One of these is constant, for addressing into the GPR save area below it.
4928 The other is advanced up the stack through the overflow region.
4929 The third pointer is to the GPR save area. Since the FPR save area
4930 is just below it, we can address FPR slots off this pointer.
4931 We also keep two one-byte offsets, which are to be subtracted from the
4932 constant pointers to yield addresses in the GPR and FPR save areas.
4933 These are downcounted as float or non-float arguments are used,
4934 and when they get to zero, the argument must be obtained from the
4936 If !EABI_FLOAT_VARARGS_P, then no FPR save area exists, and a single
4937 pointer is enough. It's started at the GPR save area, and is
4939 Note that the GPR save area is not constant size, due to optimization
4940 in the prologue. Hence, we can't use a design with two pointers
4941 and two offsets, although we could have designed this with two pointers
4942 and three offsets. */
4945 mips_build_builtin_va_list (void)
4947 if (EABI_FLOAT_VARARGS_P)
4949 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
4952 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
4954 f_ovfl = build_decl (FIELD_DECL, get_identifier ("__overflow_argptr"),
4956 f_gtop = build_decl (FIELD_DECL, get_identifier ("__gpr_top"),
4958 f_ftop = build_decl (FIELD_DECL, get_identifier ("__fpr_top"),
4960 f_goff = build_decl (FIELD_DECL, get_identifier ("__gpr_offset"),
4961 unsigned_char_type_node);
4962 f_foff = build_decl (FIELD_DECL, get_identifier ("__fpr_offset"),
4963 unsigned_char_type_node);
4964 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
4965 warn on every user file. */
4966 index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1);
4967 array = build_array_type (unsigned_char_type_node,
4968 build_index_type (index));
4969 f_res = build_decl (FIELD_DECL, get_identifier ("__reserved"), array);
4971 DECL_FIELD_CONTEXT (f_ovfl) = record;
4972 DECL_FIELD_CONTEXT (f_gtop) = record;
4973 DECL_FIELD_CONTEXT (f_ftop) = record;
4974 DECL_FIELD_CONTEXT (f_goff) = record;
4975 DECL_FIELD_CONTEXT (f_foff) = record;
4976 DECL_FIELD_CONTEXT (f_res) = record;
4978 TYPE_FIELDS (record) = f_ovfl;
4979 TREE_CHAIN (f_ovfl) = f_gtop;
4980 TREE_CHAIN (f_gtop) = f_ftop;
4981 TREE_CHAIN (f_ftop) = f_goff;
4982 TREE_CHAIN (f_goff) = f_foff;
4983 TREE_CHAIN (f_foff) = f_res;
4985 layout_type (record);
4988 else if (TARGET_IRIX && TARGET_IRIX6)
4989 /* On IRIX 6, this type is 'char *'. */
4990 return build_pointer_type (char_type_node);
4992 /* Otherwise, we use 'void *'. */
4993 return ptr_type_node;
4996 /* Implement va_start. */
4999 mips_va_start (tree valist, rtx nextarg)
5001 if (EABI_FLOAT_VARARGS_P)
5003 const CUMULATIVE_ARGS *cum;
5004 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
5005 tree ovfl, gtop, ftop, goff, foff;
5007 int gpr_save_area_size;
5008 int fpr_save_area_size;
5011 cum = ¤t_function_args_info;
5013 = (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
5015 = (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
5017 f_ovfl = TYPE_FIELDS (va_list_type_node);
5018 f_gtop = TREE_CHAIN (f_ovfl);
5019 f_ftop = TREE_CHAIN (f_gtop);
5020 f_goff = TREE_CHAIN (f_ftop);
5021 f_foff = TREE_CHAIN (f_goff);
5023 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
5025 gtop = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
5027 ftop = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
5029 goff = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
5031 foff = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
5034 /* Emit code to initialize OVFL, which points to the next varargs
5035 stack argument. CUM->STACK_WORDS gives the number of stack
5036 words used by named arguments. */
5037 t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
5038 if (cum->stack_words > 0)
5039 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), t,
5040 size_int (cum->stack_words * UNITS_PER_WORD));
5041 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
5042 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5044 /* Emit code to initialize GTOP, the top of the GPR save area. */
5045 t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
5046 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (gtop), gtop, t);
5047 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5049 /* Emit code to initialize FTOP, the top of the FPR save area.
5050 This address is gpr_save_area_bytes below GTOP, rounded
5051 down to the next fp-aligned boundary. */
5052 t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
5053 fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
5054 fpr_offset &= ~(UNITS_PER_FPVALUE - 1);
5056 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ftop), t,
5057 size_int (-fpr_offset));
5058 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ftop), ftop, t);
5059 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5061 /* Emit code to initialize GOFF, the offset from GTOP of the
5062 next GPR argument. */
5063 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (goff), goff,
5064 build_int_cst (NULL_TREE, gpr_save_area_size));
5065 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5067 /* Likewise emit code to initialize FOFF, the offset from FTOP
5068 of the next FPR argument. */
5069 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (foff), foff,
5070 build_int_cst (NULL_TREE, fpr_save_area_size));
5071 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5075 nextarg = plus_constant (nextarg, -cfun->machine->varargs_size);
5076 std_expand_builtin_va_start (valist, nextarg);
5080 /* Implement va_arg. */
5083 mips_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
5085 HOST_WIDE_INT size, rsize;
5089 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
5092 type = build_pointer_type (type);
5094 size = int_size_in_bytes (type);
5095 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
5097 if (mips_abi != ABI_EABI || !EABI_FLOAT_VARARGS_P)
5098 addr = std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
5101 /* Not a simple merged stack. */
5103 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
5104 tree ovfl, top, off, align;
5105 HOST_WIDE_INT osize;
5108 f_ovfl = TYPE_FIELDS (va_list_type_node);
5109 f_gtop = TREE_CHAIN (f_ovfl);
5110 f_ftop = TREE_CHAIN (f_gtop);
5111 f_goff = TREE_CHAIN (f_ftop);
5112 f_foff = TREE_CHAIN (f_goff);
5114 /* We maintain separate pointers and offsets for floating-point
5115 and integer arguments, but we need similar code in both cases.
5118 TOP be the top of the register save area;
5119 OFF be the offset from TOP of the next register;
5120 ADDR_RTX be the address of the argument;
5121 RSIZE be the number of bytes used to store the argument
5122 when it's in the register save area;
5123 OSIZE be the number of bytes used to store it when it's
5124 in the stack overflow area; and
5125 PADDING be (BYTES_BIG_ENDIAN ? OSIZE - RSIZE : 0)
5127 The code we want is:
5129 1: off &= -rsize; // round down
5132 4: addr_rtx = top - off;
5137 9: ovfl += ((intptr_t) ovfl + osize - 1) & -osize;
5138 10: addr_rtx = ovfl + PADDING;
5142 [1] and [9] can sometimes be optimized away. */
5144 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
5147 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
5148 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
5150 top = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
5152 off = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
5155 /* When floating-point registers are saved to the stack,
5156 each one will take up UNITS_PER_HWFPVALUE bytes, regardless
5157 of the float's precision. */
5158 rsize = UNITS_PER_HWFPVALUE;
5160 /* Overflow arguments are padded to UNITS_PER_WORD bytes
5161 (= PARM_BOUNDARY bits). This can be different from RSIZE
5164 (1) On 32-bit targets when TYPE is a structure such as:
5166 struct s { float f; };
5168 Such structures are passed in paired FPRs, so RSIZE
5169 will be 8 bytes. However, the structure only takes
5170 up 4 bytes of memory, so OSIZE will only be 4.
5172 (2) In combinations such as -mgp64 -msingle-float
5173 -fshort-double. Doubles passed in registers
5174 will then take up 4 (UNITS_PER_HWFPVALUE) bytes,
5175 but those passed on the stack take up
5176 UNITS_PER_WORD bytes. */
5177 osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
5181 top = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
5183 off = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
5185 if (rsize > UNITS_PER_WORD)
5187 /* [1] Emit code for: off &= -rsize. */
5188 t = build2 (BIT_AND_EXPR, TREE_TYPE (off), off,
5189 build_int_cst (NULL_TREE, -rsize));
5190 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (off), off, t);
5191 gimplify_and_add (t, pre_p);
5196 /* [2] Emit code to branch if off == 0. */
5197 t = build2 (NE_EXPR, boolean_type_node, off,
5198 build_int_cst (TREE_TYPE (off), 0));
5199 addr = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
5201 /* [5] Emit code for: off -= rsize. We do this as a form of
5202 post-increment not available to C. Also widen for the
5203 coming pointer arithmetic. */
5204 t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize));
5205 t = build2 (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
5206 t = fold_convert (sizetype, t);
5207 t = fold_build1 (NEGATE_EXPR, sizetype, t);
5209 /* [4] Emit code for: addr_rtx = top - off. On big endian machines,
5210 the argument has RSIZE - SIZE bytes of leading padding. */
5211 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (top), top, t);
5212 if (BYTES_BIG_ENDIAN && rsize > size)
5214 u = size_int (rsize - size);
5215 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
5217 COND_EXPR_THEN (addr) = t;
5219 if (osize > UNITS_PER_WORD)
5221 /* [9] Emit: ovfl += ((intptr_t) ovfl + osize - 1) & -osize. */
5222 u = size_int (osize - 1);
5223 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), ovfl, u);
5224 t = fold_convert (sizetype, t);
5225 u = size_int (-osize);
5226 t = build2 (BIT_AND_EXPR, sizetype, t, u);
5227 t = fold_convert (TREE_TYPE (ovfl), t);
5228 align = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
5233 /* [10, 11]. Emit code to store ovfl in addr_rtx, then
5234 post-increment ovfl by osize. On big-endian machines,
5235 the argument has OSIZE - SIZE bytes of leading padding. */
5236 u = fold_convert (TREE_TYPE (ovfl),
5237 build_int_cst (NULL_TREE, osize));
5238 t = build2 (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
5239 if (BYTES_BIG_ENDIAN && osize > size)
5241 u = size_int (osize - size);
5242 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
5245 /* String [9] and [10,11] together. */
5247 t = build2 (COMPOUND_EXPR, TREE_TYPE (t), align, t);
5248 COND_EXPR_ELSE (addr) = t;
5250 addr = fold_convert (build_pointer_type (type), addr);
5251 addr = build_va_arg_indirect_ref (addr);
5255 addr = build_va_arg_indirect_ref (addr);
5260 /* Return true if it is possible to use left/right accesses for a
5261 bitfield of WIDTH bits starting BITPOS bits into *OP. When
5262 returning true, update *OP, *LEFT and *RIGHT as follows:
5264 *OP is a BLKmode reference to the whole field.
5266 *LEFT is a QImode reference to the first byte if big endian or
5267 the last byte if little endian. This address can be used in the
5268 left-side instructions (lwl, swl, ldl, sdl).
5270 *RIGHT is a QImode reference to the opposite end of the field and
5271 can be used in the patterning right-side instruction. */
5274 mips_get_unaligned_mem (rtx *op, unsigned int width, int bitpos,
5275 rtx *left, rtx *right)
5279 /* Check that the operand really is a MEM. Not all the extv and
5280 extzv predicates are checked. */
5284 /* Check that the size is valid. */
5285 if (width != 32 && (!TARGET_64BIT || width != 64))
5288 /* We can only access byte-aligned values. Since we are always passed
5289 a reference to the first byte of the field, it is not necessary to
5290 do anything with BITPOS after this check. */
5291 if (bitpos % BITS_PER_UNIT != 0)
5294 /* Reject aligned bitfields: we want to use a normal load or store
5295 instead of a left/right pair. */
5296 if (MEM_ALIGN (*op) >= width)
5299 /* Adjust *OP to refer to the whole field. This also has the effect
5300 of legitimizing *OP's address for BLKmode, possibly simplifying it. */
5301 *op = adjust_address (*op, BLKmode, 0);
5302 set_mem_size (*op, GEN_INT (width / BITS_PER_UNIT));
5304 /* Get references to both ends of the field. We deliberately don't
5305 use the original QImode *OP for FIRST since the new BLKmode one
5306 might have a simpler address. */
5307 first = adjust_address (*op, QImode, 0);
5308 last = adjust_address (*op, QImode, width / BITS_PER_UNIT - 1);
5310 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
5311 be the upper word and RIGHT the lower word. */
5312 if (TARGET_BIG_ENDIAN)
5313 *left = first, *right = last;
5315 *left = last, *right = first;
5321 /* Try to emit the equivalent of (set DEST (zero_extract SRC WIDTH BITPOS)).
5322 Return true on success. We only handle cases where zero_extract is
5323 equivalent to sign_extract. */
5326 mips_expand_unaligned_load (rtx dest, rtx src, unsigned int width, int bitpos)
5328 rtx left, right, temp;
5330 /* If TARGET_64BIT, the destination of a 32-bit load will be a
5331 paradoxical word_mode subreg. This is the only case in which
5332 we allow the destination to be larger than the source. */
5333 if (GET_CODE (dest) == SUBREG
5334 && GET_MODE (dest) == DImode
5335 && SUBREG_BYTE (dest) == 0
5336 && GET_MODE (SUBREG_REG (dest)) == SImode)
5337 dest = SUBREG_REG (dest);
5339 /* After the above adjustment, the destination must be the same
5340 width as the source. */
5341 if (GET_MODE_BITSIZE (GET_MODE (dest)) != width)
5344 if (!mips_get_unaligned_mem (&src, width, bitpos, &left, &right))
5347 temp = gen_reg_rtx (GET_MODE (dest));
5348 if (GET_MODE (dest) == DImode)
5350 emit_insn (gen_mov_ldl (temp, src, left));
5351 emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
5355 emit_insn (gen_mov_lwl (temp, src, left));
5356 emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
5362 /* Try to expand (set (zero_extract DEST WIDTH BITPOS) SRC). Return
5366 mips_expand_unaligned_store (rtx dest, rtx src, unsigned int width, int bitpos)
5369 enum machine_mode mode;
5371 if (!mips_get_unaligned_mem (&dest, width, bitpos, &left, &right))
5374 mode = mode_for_size (width, MODE_INT, 0);
5375 src = gen_lowpart (mode, src);
5379 emit_insn (gen_mov_sdl (dest, src, left));
5380 emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
5384 emit_insn (gen_mov_swl (dest, src, left));
5385 emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
5390 /* Return true if X is a MEM with the same size as MODE. */
5393 mips_mem_fits_mode_p (enum machine_mode mode, rtx x)
5400 size = MEM_SIZE (x);
5401 return size && INTVAL (size) == GET_MODE_SIZE (mode);
5404 /* Return true if (zero_extract OP SIZE POSITION) can be used as the
5405 source of an "ext" instruction or the destination of an "ins"
5406 instruction. OP must be a register operand and the following
5407 conditions must hold:
5409 0 <= POSITION < GET_MODE_BITSIZE (GET_MODE (op))
5410 0 < SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
5411 0 < POSITION + SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
5413 Also reject lengths equal to a word as they are better handled
5414 by the move patterns. */
5417 mips_use_ins_ext_p (rtx op, rtx size, rtx position)
5419 HOST_WIDE_INT len, pos;
5421 if (!ISA_HAS_EXT_INS
5422 || !register_operand (op, VOIDmode)
5423 || GET_MODE_BITSIZE (GET_MODE (op)) > BITS_PER_WORD)
5426 len = INTVAL (size);
5427 pos = INTVAL (position);
5429 if (len <= 0 || len >= GET_MODE_BITSIZE (GET_MODE (op))
5430 || pos < 0 || pos + len > GET_MODE_BITSIZE (GET_MODE (op)))
5436 /* Set up globals to generate code for the ISA or processor
5437 described by INFO. */
5440 mips_set_architecture (const struct mips_cpu_info *info)
5444 mips_arch_info = info;
5445 mips_arch = info->cpu;
5446 mips_isa = info->isa;
5451 /* Likewise for tuning. */
5454 mips_set_tune (const struct mips_cpu_info *info)
5458 mips_tune_info = info;
5459 mips_tune = info->cpu;
5463 /* Initialize mips_split_addresses from the associated command-line
5466 mips_split_addresses is a half-way house between explicit
5467 relocations and the traditional assembler macros. It can
5468 split absolute 32-bit symbolic constants into a high/lo_sum
5469 pair but uses macros for other sorts of access.
5471 Like explicit relocation support for REL targets, it relies
5472 on GNU extensions in the assembler and the linker.
5474 Although this code should work for -O0, it has traditionally
5475 been treated as an optimization. */
5478 mips_init_split_addresses (void)
5480 if (!TARGET_MIPS16 && TARGET_SPLIT_ADDRESSES
5481 && optimize && !flag_pic
5482 && !ABI_HAS_64BIT_SYMBOLS)
5483 mips_split_addresses = 1;
5485 mips_split_addresses = 0;
5488 /* (Re-)Initialize information about relocs. */
5491 mips_init_relocs (void)
5493 memset (mips_split_p, '\0', sizeof (mips_split_p));
5494 memset (mips_hi_relocs, '\0', sizeof (mips_hi_relocs));
5495 memset (mips_lo_relocs, '\0', sizeof (mips_lo_relocs));
5497 if (ABI_HAS_64BIT_SYMBOLS)
5499 if (TARGET_EXPLICIT_RELOCS)
5501 mips_split_p[SYMBOL_64_HIGH] = true;
5502 mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
5503 mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
5505 mips_split_p[SYMBOL_64_MID] = true;
5506 mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
5507 mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
5509 mips_split_p[SYMBOL_64_LOW] = true;
5510 mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
5511 mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
5513 mips_split_p[SYMBOL_ABSOLUTE] = true;
5514 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
5519 if (TARGET_EXPLICIT_RELOCS || mips_split_addresses || TARGET_MIPS16)
5521 mips_split_p[SYMBOL_ABSOLUTE] = true;
5522 mips_hi_relocs[SYMBOL_ABSOLUTE] = "%hi(";
5523 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
5525 mips_lo_relocs[SYMBOL_32_HIGH] = "%hi(";
5531 /* The high part is provided by a pseudo copy of $gp. */
5532 mips_split_p[SYMBOL_GP_RELATIVE] = true;
5533 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gprel(";
5536 if (TARGET_EXPLICIT_RELOCS)
5538 /* Small data constants are kept whole until after reload,
5539 then lowered by mips_rewrite_small_data. */
5540 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gp_rel(";
5542 mips_split_p[SYMBOL_GOT_PAGE_OFST] = true;
5545 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
5546 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%got_ofst(";
5550 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
5551 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%lo(";
5556 /* The HIGH and LO_SUM are matched by special .md patterns. */
5557 mips_split_p[SYMBOL_GOT_DISP] = true;
5559 mips_split_p[SYMBOL_GOTOFF_DISP] = true;
5560 mips_hi_relocs[SYMBOL_GOTOFF_DISP] = "%got_hi(";
5561 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_lo(";
5563 mips_split_p[SYMBOL_GOTOFF_CALL] = true;
5564 mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
5565 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
5570 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_disp(";
5572 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got(";
5573 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
5579 mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
5580 mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
5581 mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
5584 /* Thread-local relocation operators. */
5585 mips_lo_relocs[SYMBOL_TLSGD] = "%tlsgd(";
5586 mips_lo_relocs[SYMBOL_TLSLDM] = "%tlsldm(";
5587 mips_split_p[SYMBOL_DTPREL] = 1;
5588 mips_hi_relocs[SYMBOL_DTPREL] = "%dtprel_hi(";
5589 mips_lo_relocs[SYMBOL_DTPREL] = "%dtprel_lo(";
5590 mips_lo_relocs[SYMBOL_GOTTPREL] = "%gottprel(";
5591 mips_split_p[SYMBOL_TPREL] = 1;
5592 mips_hi_relocs[SYMBOL_TPREL] = "%tprel_hi(";
5593 mips_lo_relocs[SYMBOL_TPREL] = "%tprel_lo(";
5595 mips_lo_relocs[SYMBOL_HALF] = "%half(";
5598 static GTY(()) int was_mips16_p = -1;
5600 /* Set up the target-dependent global state so that it matches the
5601 current function's ISA mode. */
5604 mips_set_mips16_mode (int mips16_p)
5606 if (mips16_p == was_mips16_p)
5609 /* Restore base settings of various flags. */
5610 target_flags = mips_base_target_flags;
5611 align_loops = mips_base_align_loops;
5612 align_jumps = mips_base_align_jumps;
5613 align_functions = mips_base_align_functions;
5614 flag_schedule_insns = mips_base_schedule_insns;
5615 flag_reorder_blocks_and_partition = mips_base_reorder_blocks_and_partition;
5616 flag_move_loop_invariants = mips_base_move_loop_invariants;
5617 flag_delayed_branch = mips_flag_delayed_branch;
5621 /* Select mips16 instruction set. */
5622 target_flags |= MASK_MIPS16;
5624 /* Don't run the scheduler before reload, since it tends to
5625 increase register pressure. */
5626 flag_schedule_insns = 0;
5628 /* Don't do hot/cold partitioning. The constant layout code expects
5629 the whole function to be in a single section. */
5630 flag_reorder_blocks_and_partition = 0;
5632 /* Don't move loop invariants, because it tends to increase
5633 register pressure. It also introduces an extra move in cases
5634 where the constant is the first operand in a two-operand binary
5635 instruction, or when it forms a register argument to a functon
5637 flag_move_loop_invariants = 0;
5639 /* Silently disable -mexplicit-relocs since it doesn't apply
5640 to mips16 code. Even so, it would overly pedantic to warn
5641 about "-mips16 -mexplicit-relocs", especially given that
5642 we use a %gprel() operator. */
5643 target_flags &= ~MASK_EXPLICIT_RELOCS;
5645 /* Silently disable DSP extensions. */
5646 target_flags &= ~MASK_DSP;
5647 target_flags &= ~MASK_DSPR2;
5651 /* Reset to select base non-mips16 ISA. */
5652 target_flags &= ~MASK_MIPS16;
5654 /* When using explicit relocs, we call dbr_schedule from within
5656 if (TARGET_EXPLICIT_RELOCS)
5657 flag_delayed_branch = 0;
5659 /* Provide default values for align_* for 64-bit targets. */
5662 if (align_loops == 0)
5664 if (align_jumps == 0)
5666 if (align_functions == 0)
5667 align_functions = 8;
5671 /* (Re)initialize mips target internals for new ISA. */
5672 mips_init_split_addresses ();
5673 mips_init_relocs ();
5675 if (was_mips16_p >= 0)
5676 /* Reinitialize target-dependent state. */
5679 was_mips16_p = TARGET_MIPS16;
5682 /* Implement TARGET_SET_CURRENT_FUNCTION. Decide whether the current
5683 function should use the MIPS16 ISA and switch modes accordingly. */
5686 mips_set_current_function (tree fndecl ATTRIBUTE_UNUSED)
5690 /* Implement TARGET_HANDLE_OPTION. */
5693 mips_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
5698 if (strcmp (arg, "32") == 0)
5700 else if (strcmp (arg, "o64") == 0)
5702 else if (strcmp (arg, "n32") == 0)
5704 else if (strcmp (arg, "64") == 0)
5706 else if (strcmp (arg, "eabi") == 0)
5707 mips_abi = ABI_EABI;
5714 return mips_parse_cpu (arg) != 0;
5717 mips_isa_info = mips_parse_cpu (ACONCAT (("mips", arg, NULL)));
5718 return mips_isa_info != 0;
5720 case OPT_mno_flush_func:
5721 mips_cache_flush_func = NULL;
5724 case OPT_mcode_readable_:
5725 if (strcmp (arg, "yes") == 0)
5726 mips_code_readable = CODE_READABLE_YES;
5727 else if (strcmp (arg, "pcrel") == 0)
5728 mips_code_readable = CODE_READABLE_PCREL;
5729 else if (strcmp (arg, "no") == 0)
5730 mips_code_readable = CODE_READABLE_NO;
5740 /* Set up the threshold for data to go into the small data area, instead
5741 of the normal data area, and detect any conflicts in the switches. */
5744 override_options (void)
5746 int i, start, regno;
5747 enum machine_mode mode;
5749 #ifdef SUBTARGET_OVERRIDE_OPTIONS
5750 SUBTARGET_OVERRIDE_OPTIONS;
5753 mips_section_threshold = g_switch_set ? g_switch_value : MIPS_DEFAULT_GVALUE;
5755 /* The following code determines the architecture and register size.
5756 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
5757 The GAS and GCC code should be kept in sync as much as possible. */
5759 if (mips_arch_string != 0)
5760 mips_set_architecture (mips_parse_cpu (mips_arch_string));
5762 if (mips_isa_info != 0)
5764 if (mips_arch_info == 0)
5765 mips_set_architecture (mips_isa_info);
5766 else if (mips_arch_info->isa != mips_isa_info->isa)
5767 error ("-%s conflicts with the other architecture options, "
5768 "which specify a %s processor",
5769 mips_isa_info->name,
5770 mips_cpu_info_from_isa (mips_arch_info->isa)->name);
5773 if (mips_arch_info == 0)
5775 #ifdef MIPS_CPU_STRING_DEFAULT
5776 mips_set_architecture (mips_parse_cpu (MIPS_CPU_STRING_DEFAULT));
5778 mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT));
5782 if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
5783 error ("-march=%s is not compatible with the selected ABI",
5784 mips_arch_info->name);
5786 /* Optimize for mips_arch, unless -mtune selects a different processor. */
5787 if (mips_tune_string != 0)
5788 mips_set_tune (mips_parse_cpu (mips_tune_string));
5790 if (mips_tune_info == 0)
5791 mips_set_tune (mips_arch_info);
5793 /* Set cost structure for the processor. */
5795 mips_cost = &mips_rtx_cost_optimize_size;
5797 mips_cost = &mips_rtx_cost_data[mips_tune];
5799 /* If the user hasn't specified a branch cost, use the processor's
5801 if (mips_branch_cost == 0)
5802 mips_branch_cost = mips_cost->branch_cost;
5804 if ((target_flags_explicit & MASK_64BIT) != 0)
5806 /* The user specified the size of the integer registers. Make sure
5807 it agrees with the ABI and ISA. */
5808 if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
5809 error ("-mgp64 used with a 32-bit processor");
5810 else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
5811 error ("-mgp32 used with a 64-bit ABI");
5812 else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
5813 error ("-mgp64 used with a 32-bit ABI");
5817 /* Infer the integer register size from the ABI and processor.
5818 Restrict ourselves to 32-bit registers if that's all the
5819 processor has, or if the ABI cannot handle 64-bit registers. */
5820 if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
5821 target_flags &= ~MASK_64BIT;
5823 target_flags |= MASK_64BIT;
5826 if ((target_flags_explicit & MASK_FLOAT64) != 0)
5828 /* Really, -mfp32 and -mfp64 are ornamental options. There's
5829 only one right answer here. */
5830 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
5831 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
5832 else if (!TARGET_64BIT && TARGET_FLOAT64
5833 && !(ISA_HAS_MXHC1 && mips_abi == ABI_32))
5834 error ("-mgp32 and -mfp64 can only be combined if the target"
5835 " supports the mfhc1 and mthc1 instructions");
5836 else if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
5837 error ("unsupported combination: %s", "-mfp64 -msingle-float");
5841 /* -msingle-float selects 32-bit float registers. Otherwise the
5842 float registers should be the same size as the integer ones. */
5843 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
5844 target_flags |= MASK_FLOAT64;
5846 target_flags &= ~MASK_FLOAT64;
5849 /* End of code shared with GAS. */
5851 if ((target_flags_explicit & MASK_LONG64) == 0)
5853 if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
5854 target_flags |= MASK_LONG64;
5856 target_flags &= ~MASK_LONG64;
5860 flag_pcc_struct_return = 0;
5862 if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
5864 /* If neither -mbranch-likely nor -mno-branch-likely was given
5865 on the command line, set MASK_BRANCHLIKELY based on the target
5868 By default, we enable use of Branch Likely instructions on
5869 all architectures which support them with the following
5870 exceptions: when creating MIPS32 or MIPS64 code, and when
5871 tuning for architectures where their use tends to hurt
5874 The MIPS32 and MIPS64 architecture specifications say "Software
5875 is strongly encouraged to avoid use of Branch Likely
5876 instructions, as they will be removed from a future revision
5877 of the [MIPS32 and MIPS64] architecture." Therefore, we do not
5878 issue those instructions unless instructed to do so by
5880 if (ISA_HAS_BRANCHLIKELY
5881 && !(ISA_MIPS32 || ISA_MIPS32R2 || ISA_MIPS64)
5882 && !(TUNE_MIPS5500 || TUNE_SB1))
5883 target_flags |= MASK_BRANCHLIKELY;
5885 target_flags &= ~MASK_BRANCHLIKELY;
5887 if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
5888 warning (0, "generation of Branch Likely instructions enabled, but not supported by architecture");
5890 /* The effect of -mabicalls isn't defined for the EABI. */
5891 if (mips_abi == ABI_EABI && TARGET_ABICALLS)
5893 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
5894 target_flags &= ~MASK_ABICALLS;
5897 /* MIPS16 cannot generate PIC yet. */
5898 if (TARGET_MIPS16 && (flag_pic || TARGET_ABICALLS))
5900 sorry ("MIPS16 PIC");
5901 target_flags &= ~MASK_ABICALLS;
5902 flag_pic = flag_pie = flag_shlib = 0;
5905 if (TARGET_ABICALLS)
5906 /* We need to set flag_pic for executables as well as DSOs
5907 because we may reference symbols that are not defined in
5908 the final executable. (MIPS does not use things like
5909 copy relocs, for example.)
5911 Also, there is a body of code that uses __PIC__ to distinguish
5912 between -mabicalls and -mno-abicalls code. */
5915 /* -mvr4130-align is a "speed over size" optimization: it usually produces
5916 faster code, but at the expense of more nops. Enable it at -O3 and
5918 if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
5919 target_flags |= MASK_VR4130_ALIGN;
5921 /* Prefer a call to memcpy over inline code when optimizing for size,
5922 though see MOVE_RATIO in mips.h. */
5923 if (optimize_size && (target_flags_explicit & MASK_MEMCPY) == 0)
5924 target_flags |= MASK_MEMCPY;
5926 /* If we have a nonzero small-data limit, check that the -mgpopt
5927 setting is consistent with the other target flags. */
5928 if (mips_section_threshold > 0)
5932 if (!TARGET_MIPS16 && !TARGET_EXPLICIT_RELOCS)
5933 error ("%<-mno-gpopt%> needs %<-mexplicit-relocs%>");
5935 TARGET_LOCAL_SDATA = false;
5936 TARGET_EXTERN_SDATA = false;
5940 if (TARGET_VXWORKS_RTP)
5941 warning (0, "cannot use small-data accesses for %qs", "-mrtp");
5943 if (TARGET_ABICALLS)
5944 warning (0, "cannot use small-data accesses for %qs",
5949 #ifdef MIPS_TFMODE_FORMAT
5950 REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT;
5953 /* Make sure that the user didn't turn off paired single support when
5954 MIPS-3D support is requested. */
5955 if (TARGET_MIPS3D && (target_flags_explicit & MASK_PAIRED_SINGLE_FLOAT)
5956 && !TARGET_PAIRED_SINGLE_FLOAT)
5957 error ("-mips3d requires -mpaired-single");
5959 /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
5961 target_flags |= MASK_PAIRED_SINGLE_FLOAT;
5963 /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
5964 and TARGET_HARD_FLOAT are both true. */
5965 if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT))
5966 error ("-mips3d/-mpaired-single must be used with -mfp64 -mhard-float");
5968 /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
5970 if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_MIPS64)
5971 error ("-mips3d/-mpaired-single must be used with -mips64");
5973 /* If TARGET_DSPR2, enable MASK_DSP. */
5975 target_flags |= MASK_DSP;
5977 mips_print_operand_punct['?'] = 1;
5978 mips_print_operand_punct['#'] = 1;
5979 mips_print_operand_punct['/'] = 1;
5980 mips_print_operand_punct['&'] = 1;
5981 mips_print_operand_punct['!'] = 1;
5982 mips_print_operand_punct['*'] = 1;
5983 mips_print_operand_punct['@'] = 1;
5984 mips_print_operand_punct['.'] = 1;
5985 mips_print_operand_punct['('] = 1;
5986 mips_print_operand_punct[')'] = 1;
5987 mips_print_operand_punct['['] = 1;
5988 mips_print_operand_punct[']'] = 1;
5989 mips_print_operand_punct['<'] = 1;
5990 mips_print_operand_punct['>'] = 1;
5991 mips_print_operand_punct['{'] = 1;
5992 mips_print_operand_punct['}'] = 1;
5993 mips_print_operand_punct['^'] = 1;
5994 mips_print_operand_punct['$'] = 1;
5995 mips_print_operand_punct['+'] = 1;
5996 mips_print_operand_punct['~'] = 1;
5998 /* Set up array to map GCC register number to debug register number.
5999 Ignore the special purpose register numbers. */
6001 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
6003 mips_dbx_regno[i] = INVALID_REGNUM;
6004 if (GP_REG_P (i) || FP_REG_P (i) || ALL_COP_REG_P (i))
6005 mips_dwarf_regno[i] = i;
6007 mips_dwarf_regno[i] = INVALID_REGNUM;
6010 start = GP_DBX_FIRST - GP_REG_FIRST;
6011 for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
6012 mips_dbx_regno[i] = i + start;
6014 start = FP_DBX_FIRST - FP_REG_FIRST;
6015 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
6016 mips_dbx_regno[i] = i + start;
6018 /* HI and LO debug registers use big-endian ordering. */
6019 mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
6020 mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
6021 mips_dwarf_regno[HI_REGNUM] = MD_REG_FIRST + 0;
6022 mips_dwarf_regno[LO_REGNUM] = MD_REG_FIRST + 1;
6023 for (i = DSP_ACC_REG_FIRST; i <= DSP_ACC_REG_LAST; i += 2)
6025 mips_dwarf_regno[i + TARGET_LITTLE_ENDIAN] = i;
6026 mips_dwarf_regno[i + TARGET_BIG_ENDIAN] = i + 1;
6029 /* Set up array giving whether a given register can hold a given mode. */
6031 for (mode = VOIDmode;
6032 mode != MAX_MACHINE_MODE;
6033 mode = (enum machine_mode) ((int)mode + 1))
6035 register int size = GET_MODE_SIZE (mode);
6036 register enum mode_class class = GET_MODE_CLASS (mode);
6038 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
6042 if (mode == CCV2mode)
6045 && (regno - ST_REG_FIRST) % 2 == 0);
6047 else if (mode == CCV4mode)
6050 && (regno - ST_REG_FIRST) % 4 == 0);
6052 else if (mode == CCmode)
6055 temp = (regno == FPSW_REGNUM);
6057 temp = (ST_REG_P (regno) || GP_REG_P (regno)
6058 || FP_REG_P (regno));
6061 else if (GP_REG_P (regno))
6062 temp = ((regno & 1) == 0 || size <= UNITS_PER_WORD);
6064 else if (FP_REG_P (regno))
6065 temp = ((((regno % MAX_FPRS_PER_FMT) == 0)
6066 || (MIN_FPRS_PER_FMT == 1
6067 && size <= UNITS_PER_FPREG))
6068 && (((class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT
6069 || class == MODE_VECTOR_FLOAT)
6070 && size <= UNITS_PER_FPVALUE)
6071 /* Allow integer modes that fit into a single
6072 register. We need to put integers into FPRs
6073 when using instructions like cvt and trunc.
6074 We can't allow sizes smaller than a word,
6075 the FPU has no appropriate load/store
6076 instructions for those. */
6077 || (class == MODE_INT
6078 && size >= MIN_UNITS_PER_WORD
6079 && size <= UNITS_PER_FPREG)
6080 /* Allow TFmode for CCmode reloads. */
6081 || (ISA_HAS_8CC && mode == TFmode)));
6083 else if (ACC_REG_P (regno))
6084 temp = ((INTEGRAL_MODE_P (mode) || ALL_FIXED_POINT_MODE_P (mode))
6085 && size <= UNITS_PER_WORD * 2
6086 && (size <= UNITS_PER_WORD
6087 || regno == MD_REG_FIRST
6088 || (DSP_ACC_REG_P (regno)
6089 && ((regno - DSP_ACC_REG_FIRST) & 1) == 0)));
6091 else if (ALL_COP_REG_P (regno))
6092 temp = (class == MODE_INT && size <= UNITS_PER_WORD);
6096 mips_hard_regno_mode_ok[(int)mode][regno] = temp;
6100 /* Save GPR registers in word_mode sized hunks. word_mode hasn't been
6101 initialized yet, so we can't use that here. */
6102 gpr_mode = TARGET_64BIT ? DImode : SImode;
6104 /* Function to allocate machine-dependent function status. */
6105 init_machine_status = &mips_init_machine_status;
6107 /* Default to working around R4000 errata only if the processor
6108 was selected explicitly. */
6109 if ((target_flags_explicit & MASK_FIX_R4000) == 0
6110 && mips_matching_cpu_name_p (mips_arch_info->name, "r4000"))
6111 target_flags |= MASK_FIX_R4000;
6113 /* Default to working around R4400 errata only if the processor
6114 was selected explicitly. */
6115 if ((target_flags_explicit & MASK_FIX_R4400) == 0
6116 && mips_matching_cpu_name_p (mips_arch_info->name, "r4400"))
6117 target_flags |= MASK_FIX_R4400;
6119 /* Save base state of options. */
6120 mips_base_mips16 = TARGET_MIPS16;
6121 mips_base_target_flags = target_flags;
6122 mips_base_schedule_insns = flag_schedule_insns;
6123 mips_base_reorder_blocks_and_partition = flag_reorder_blocks_and_partition;
6124 mips_base_move_loop_invariants = flag_move_loop_invariants;
6125 mips_base_align_loops = align_loops;
6126 mips_base_align_jumps = align_jumps;
6127 mips_base_align_functions = align_functions;
6128 mips_flag_delayed_branch = flag_delayed_branch;
6130 /* Now select the mips16 or 32-bit instruction set, as requested. */
6131 mips_set_mips16_mode (mips_base_mips16);
6134 /* Swap the register information for registers I and I + 1, which
6135 currently have the wrong endianness. Note that the registers'
6136 fixedness and call-clobberedness might have been set on the
6140 mips_swap_registers (unsigned int i)
6145 #define SWAP_INT(X, Y) (tmpi = (X), (X) = (Y), (Y) = tmpi)
6146 #define SWAP_STRING(X, Y) (tmps = (X), (X) = (Y), (Y) = tmps)
6148 SWAP_INT (fixed_regs[i], fixed_regs[i + 1]);
6149 SWAP_INT (call_used_regs[i], call_used_regs[i + 1]);
6150 SWAP_INT (call_really_used_regs[i], call_really_used_regs[i + 1]);
6151 SWAP_STRING (reg_names[i], reg_names[i + 1]);
6157 /* Implement CONDITIONAL_REGISTER_USAGE. */
6160 mips_conditional_register_usage (void)
6166 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
6167 fixed_regs[regno] = call_used_regs[regno] = 1;
6169 if (!TARGET_HARD_FLOAT)
6173 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
6174 fixed_regs[regno] = call_used_regs[regno] = 1;
6175 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
6176 fixed_regs[regno] = call_used_regs[regno] = 1;
6178 else if (! ISA_HAS_8CC)
6182 /* We only have a single condition code register. We
6183 implement this by hiding all the condition code registers,
6184 and generating RTL that refers directly to ST_REG_FIRST. */
6185 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
6186 fixed_regs[regno] = call_used_regs[regno] = 1;
6188 /* In mips16 mode, we permit the $t temporary registers to be used
6189 for reload. We prohibit the unused $s registers, since they
6190 are caller saved, and saving them via a mips16 register would
6191 probably waste more time than just reloading the value. */
6194 fixed_regs[18] = call_used_regs[18] = 1;
6195 fixed_regs[19] = call_used_regs[19] = 1;
6196 fixed_regs[20] = call_used_regs[20] = 1;
6197 fixed_regs[21] = call_used_regs[21] = 1;
6198 fixed_regs[22] = call_used_regs[22] = 1;
6199 fixed_regs[23] = call_used_regs[23] = 1;
6200 fixed_regs[26] = call_used_regs[26] = 1;
6201 fixed_regs[27] = call_used_regs[27] = 1;
6202 fixed_regs[30] = call_used_regs[30] = 1;
6204 /* fp20-23 are now caller saved. */
6205 if (mips_abi == ABI_64)
6208 for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
6209 call_really_used_regs[regno] = call_used_regs[regno] = 1;
6211 /* Odd registers from fp21 to fp31 are now caller saved. */
6212 if (mips_abi == ABI_N32)
6215 for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
6216 call_really_used_regs[regno] = call_used_regs[regno] = 1;
6218 /* Make sure that double-register accumulator values are correctly
6219 ordered for the current endianness. */
6220 if (TARGET_LITTLE_ENDIAN)
6223 mips_swap_registers (MD_REG_FIRST);
6224 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno += 2)
6225 mips_swap_registers (regno);
6229 /* Allocate a chunk of memory for per-function machine-dependent data. */
6230 static struct machine_function *
6231 mips_init_machine_status (void)
6233 return ((struct machine_function *)
6234 ggc_alloc_cleared (sizeof (struct machine_function)));
6237 /* On the mips16, we want to allocate $24 (T_REG) before other
6238 registers for instructions for which it is possible. This helps
6239 avoid shuffling registers around in order to set up for an xor,
6240 encouraging the compiler to use a cmp instead. */
6243 mips_order_regs_for_local_alloc (void)
6247 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
6248 reg_alloc_order[i] = i;
6252 /* It really doesn't matter where we put register 0, since it is
6253 a fixed register anyhow. */
6254 reg_alloc_order[0] = 24;
6255 reg_alloc_order[24] = 0;
6260 /* The MIPS debug format wants all automatic variables and arguments
6261 to be in terms of the virtual frame pointer (stack pointer before
6262 any adjustment in the function), while the MIPS 3.0 linker wants
6263 the frame pointer to be the stack pointer after the initial
6264 adjustment. So, we do the adjustment here. The arg pointer (which
6265 is eliminated) points to the virtual frame pointer, while the frame
6266 pointer (which may be eliminated) points to the stack pointer after
6267 the initial adjustments. */
6270 mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
6272 rtx offset2 = const0_rtx;
6273 rtx reg = eliminate_constant_term (addr, &offset2);
6276 offset = INTVAL (offset2);
6278 if (reg == stack_pointer_rtx || reg == frame_pointer_rtx
6279 || reg == hard_frame_pointer_rtx)
6281 HOST_WIDE_INT frame_size = (!cfun->machine->frame.initialized)
6282 ? compute_frame_size (get_frame_size ())
6283 : cfun->machine->frame.total_size;
6285 /* MIPS16 frame is smaller */
6286 if (frame_pointer_needed && TARGET_MIPS16)
6287 frame_size -= cfun->machine->frame.args_size;
6289 offset = offset - frame_size;
6292 /* sdbout_parms does not want this to crash for unrecognized cases. */
6294 else if (reg != arg_pointer_rtx)
6295 fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
6302 /* If OP is an UNSPEC address, return the address to which it refers,
6303 otherwise return OP itself. */
6306 mips_strip_unspec_address (rtx op)
6310 split_const (op, &base, &offset);
6311 if (UNSPEC_ADDRESS_P (base))
6312 op = plus_constant (UNSPEC_ADDRESS (base), INTVAL (offset));
6316 /* Implement the PRINT_OPERAND macro. The MIPS-specific operand codes are:
6318 'X' OP is CONST_INT, prints 32 bits in hexadecimal format = "0x%08x",
6319 'x' OP is CONST_INT, prints 16 bits in hexadecimal format = "0x%04x",
6320 'h' OP is HIGH, prints %hi(X),
6321 'd' output integer constant in decimal,
6322 'z' if the operand is 0, use $0 instead of normal operand.
6323 'D' print second part of double-word register or memory operand.
6324 'L' print low-order register of double-word register operand.
6325 'M' print high-order register of double-word register operand.
6326 'C' print part of opcode for a branch condition.
6327 'F' print part of opcode for a floating-point branch condition.
6328 'N' print part of opcode for a branch condition, inverted.
6329 'W' print part of opcode for a floating-point branch condition, inverted.
6330 'T' print 'f' for (eq:CC ...), 't' for (ne:CC ...),
6331 'z' for (eq:?I ...), 'n' for (ne:?I ...).
6332 't' like 'T', but with the EQ/NE cases reversed
6333 'Y' for a CONST_INT X, print mips_fp_conditions[X]
6334 'Z' print the operand and a comma for ISA_HAS_8CC, otherwise print nothing
6335 'R' print the reloc associated with LO_SUM
6336 'q' print DSP accumulator registers
6338 The punctuation characters are:
6340 '(' Turn on .set noreorder
6341 ')' Turn on .set reorder
6342 '[' Turn on .set noat
6344 '<' Turn on .set nomacro
6345 '>' Turn on .set macro
6346 '{' Turn on .set volatile (not GAS)
6347 '}' Turn on .set novolatile (not GAS)
6348 '&' Turn on .set noreorder if filling delay slots
6349 '*' Turn on both .set noreorder and .set nomacro if filling delay slots
6350 '!' Turn on .set nomacro if filling delay slots
6351 '#' Print nop if in a .set noreorder section.
6352 '/' Like '#', but does nothing within a delayed branch sequence
6353 '?' Print 'l' if we are to use a branch likely instead of normal branch.
6354 '@' Print the name of the assembler temporary register (at or $1).
6355 '.' Print the name of the register with a hard-wired zero (zero or $0).
6356 '^' Print the name of the pic call-through register (t9 or $25).
6357 '$' Print the name of the stack pointer register (sp or $29).
6358 '+' Print the name of the gp register (usually gp or $28).
6359 '~' Output a branch alignment to LABEL_ALIGN(NULL). */
6362 print_operand (FILE *file, rtx op, int letter)
6364 register enum rtx_code code;
6366 if (PRINT_OPERAND_PUNCT_VALID_P (letter))
6371 if (mips_branch_likely)
6376 fputs (reg_names [GP_REG_FIRST + 1], file);
6380 fputs (reg_names [PIC_FUNCTION_ADDR_REGNUM], file);
6384 fputs (reg_names [GP_REG_FIRST + 0], file);
6388 fputs (reg_names[STACK_POINTER_REGNUM], file);
6392 fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
6396 if (final_sequence != 0 && set_noreorder++ == 0)
6397 fputs (".set\tnoreorder\n\t", file);
6401 if (final_sequence != 0)
6403 if (set_noreorder++ == 0)
6404 fputs (".set\tnoreorder\n\t", file);
6406 if (set_nomacro++ == 0)
6407 fputs (".set\tnomacro\n\t", file);
6412 if (final_sequence != 0 && set_nomacro++ == 0)
6413 fputs ("\n\t.set\tnomacro", file);
6417 if (set_noreorder != 0)
6418 fputs ("\n\tnop", file);
6422 /* Print an extra newline so that the delayed insn is separated
6423 from the following ones. This looks neater and is consistent
6424 with non-nop delayed sequences. */
6425 if (set_noreorder != 0 && final_sequence == 0)
6426 fputs ("\n\tnop\n", file);
6430 if (set_noreorder++ == 0)
6431 fputs (".set\tnoreorder\n\t", file);
6435 if (set_noreorder == 0)
6436 error ("internal error: %%) found without a %%( in assembler pattern");
6438 else if (--set_noreorder == 0)
6439 fputs ("\n\t.set\treorder", file);
6444 if (set_noat++ == 0)
6445 fputs (".set\tnoat\n\t", file);
6450 error ("internal error: %%] found without a %%[ in assembler pattern");
6451 else if (--set_noat == 0)
6452 fputs ("\n\t.set\tat", file);
6457 if (set_nomacro++ == 0)
6458 fputs (".set\tnomacro\n\t", file);
6462 if (set_nomacro == 0)
6463 error ("internal error: %%> found without a %%< in assembler pattern");
6464 else if (--set_nomacro == 0)
6465 fputs ("\n\t.set\tmacro", file);
6470 if (set_volatile++ == 0)
6471 fputs ("#.set\tvolatile\n\t", file);
6475 if (set_volatile == 0)
6476 error ("internal error: %%} found without a %%{ in assembler pattern");
6477 else if (--set_volatile == 0)
6478 fputs ("\n\t#.set\tnovolatile", file);
6484 if (align_labels_log > 0)
6485 ASM_OUTPUT_ALIGN (file, align_labels_log);
6490 error ("PRINT_OPERAND: unknown punctuation '%c'", letter);
6499 error ("PRINT_OPERAND null pointer");
6503 code = GET_CODE (op);
6508 case EQ: fputs ("eq", file); break;
6509 case NE: fputs ("ne", file); break;
6510 case GT: fputs ("gt", file); break;
6511 case GE: fputs ("ge", file); break;
6512 case LT: fputs ("lt", file); break;
6513 case LE: fputs ("le", file); break;
6514 case GTU: fputs ("gtu", file); break;
6515 case GEU: fputs ("geu", file); break;
6516 case LTU: fputs ("ltu", file); break;
6517 case LEU: fputs ("leu", file); break;
6519 fatal_insn ("PRINT_OPERAND, invalid insn for %%C", op);
6522 else if (letter == 'N')
6525 case EQ: fputs ("ne", file); break;
6526 case NE: fputs ("eq", file); break;
6527 case GT: fputs ("le", file); break;
6528 case GE: fputs ("lt", file); break;
6529 case LT: fputs ("ge", file); break;
6530 case LE: fputs ("gt", file); break;
6531 case GTU: fputs ("leu", file); break;
6532 case GEU: fputs ("ltu", file); break;
6533 case LTU: fputs ("geu", file); break;
6534 case LEU: fputs ("gtu", file); break;
6536 fatal_insn ("PRINT_OPERAND, invalid insn for %%N", op);
6539 else if (letter == 'F')
6542 case EQ: fputs ("c1f", file); break;
6543 case NE: fputs ("c1t", file); break;
6545 fatal_insn ("PRINT_OPERAND, invalid insn for %%F", op);
6548 else if (letter == 'W')
6551 case EQ: fputs ("c1t", file); break;
6552 case NE: fputs ("c1f", file); break;
6554 fatal_insn ("PRINT_OPERAND, invalid insn for %%W", op);
6557 else if (letter == 'h')
6559 if (GET_CODE (op) == HIGH)
6562 print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_hi_relocs);
6565 else if (letter == 'R')
6566 print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_lo_relocs);
6568 else if (letter == 'Y')
6570 if (GET_CODE (op) == CONST_INT
6571 && ((unsigned HOST_WIDE_INT) INTVAL (op)
6572 < ARRAY_SIZE (mips_fp_conditions)))
6573 fputs (mips_fp_conditions[INTVAL (op)], file);
6575 output_operand_lossage ("invalid %%Y value");
6578 else if (letter == 'Z')
6582 print_operand (file, op, 0);
6587 else if (letter == 'q')
6592 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op);
6594 regnum = REGNO (op);
6595 if (MD_REG_P (regnum))
6596 fprintf (file, "$ac0");
6597 else if (DSP_ACC_REG_P (regnum))
6598 fprintf (file, "$ac%c", reg_names[regnum][3]);
6600 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op);
6603 else if (code == REG || code == SUBREG)
6605 register int regnum;
6608 regnum = REGNO (op);
6610 regnum = true_regnum (op);
6612 if ((letter == 'M' && ! WORDS_BIG_ENDIAN)
6613 || (letter == 'L' && WORDS_BIG_ENDIAN)
6617 fprintf (file, "%s", reg_names[regnum]);
6620 else if (code == MEM)
6623 output_address (plus_constant (XEXP (op, 0), 4));
6625 output_address (XEXP (op, 0));
6628 else if (letter == 'x' && GET_CODE (op) == CONST_INT)
6629 fprintf (file, HOST_WIDE_INT_PRINT_HEX, 0xffff & INTVAL(op));
6631 else if (letter == 'X' && GET_CODE(op) == CONST_INT)
6632 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
6634 else if (letter == 'd' && GET_CODE(op) == CONST_INT)
6635 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (INTVAL(op)));
6637 else if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
6638 fputs (reg_names[GP_REG_FIRST], file);
6640 else if (letter == 'd' || letter == 'x' || letter == 'X')
6641 output_operand_lossage ("invalid use of %%d, %%x, or %%X");
6643 else if (letter == 'T' || letter == 't')
6645 int truth = (code == NE) == (letter == 'T');
6646 fputc ("zfnt"[truth * 2 + (GET_MODE (op) == CCmode)], file);
6649 else if (CONST_GP_P (op))
6650 fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
6653 output_addr_const (file, mips_strip_unspec_address (op));
6657 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM
6658 in context CONTEXT. RELOCS is the array of relocations to use. */
6661 print_operand_reloc (FILE *file, rtx op, enum mips_symbol_context context,
6662 const char **relocs)
6664 enum mips_symbol_type symbol_type;
6667 symbol_type = mips_classify_symbolic_expression (op, context);
6668 if (relocs[symbol_type] == 0)
6669 fatal_insn ("PRINT_OPERAND, invalid operand for relocation", op);
6671 fputs (relocs[symbol_type], file);
6672 output_addr_const (file, mips_strip_unspec_address (op));
6673 for (p = relocs[symbol_type]; *p != 0; p++)
6678 /* Output address operand X to FILE. */
6681 print_operand_address (FILE *file, rtx x)
6683 struct mips_address_info addr;
6685 if (mips_classify_address (&addr, x, word_mode, true))
6689 print_operand (file, addr.offset, 0);
6690 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
6693 case ADDRESS_LO_SUM:
6694 print_operand_reloc (file, addr.offset, SYMBOL_CONTEXT_MEM,
6696 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
6699 case ADDRESS_CONST_INT:
6700 output_addr_const (file, x);
6701 fprintf (file, "(%s)", reg_names[0]);
6704 case ADDRESS_SYMBOLIC:
6705 output_addr_const (file, mips_strip_unspec_address (x));
6711 /* When using assembler macros, keep track of all of small-data externs
6712 so that mips_file_end can emit the appropriate declarations for them.
6714 In most cases it would be safe (though pointless) to emit .externs
6715 for other symbols too. One exception is when an object is within
6716 the -G limit but declared by the user to be in a section other
6717 than .sbss or .sdata. */
6720 mips_output_external (FILE *file, tree decl, const char *name)
6722 default_elf_asm_output_external (file, decl, name);
6724 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
6725 set in order to avoid putting out names that are never really
6727 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
6729 if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
6731 fputs ("\t.extern\t", file);
6732 assemble_name (file, name);
6733 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC "\n",
6734 int_size_in_bytes (TREE_TYPE (decl)));
6736 else if (TARGET_IRIX
6737 && mips_abi == ABI_32
6738 && TREE_CODE (decl) == FUNCTION_DECL)
6740 /* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
6741 `.global name .text' directive for every used but
6742 undefined function. If we don't, the linker may perform
6743 an optimization (skipping over the insns that set $gp)
6744 when it is unsafe. */
6745 fputs ("\t.globl ", file);
6746 assemble_name (file, name);
6747 fputs (" .text\n", file);
6752 /* Emit a new filename to a stream. If we are smuggling stabs, try to
6753 put out a MIPS ECOFF file and a stab. */
6756 mips_output_filename (FILE *stream, const char *name)
6759 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
6761 if (write_symbols == DWARF2_DEBUG)
6763 else if (mips_output_filename_first_time)
6765 mips_output_filename_first_time = 0;
6766 num_source_filenames += 1;
6767 current_function_file = name;
6768 fprintf (stream, "\t.file\t%d ", num_source_filenames);
6769 output_quoted_string (stream, name);
6770 putc ('\n', stream);
6773 /* If we are emitting stabs, let dbxout.c handle this (except for
6774 the mips_output_filename_first_time case). */
6775 else if (write_symbols == DBX_DEBUG)
6778 else if (name != current_function_file
6779 && strcmp (name, current_function_file) != 0)
6781 num_source_filenames += 1;
6782 current_function_file = name;
6783 fprintf (stream, "\t.file\t%d ", num_source_filenames);
6784 output_quoted_string (stream, name);
6785 putc ('\n', stream);
6789 /* Output an ASCII string, in a space-saving way. PREFIX is the string
6790 that should be written before the opening quote, such as "\t.ascii\t"
6791 for real string data or "\t# " for a comment. */
6794 mips_output_ascii (FILE *stream, const char *string_param, size_t len,
6799 register const unsigned char *string =
6800 (const unsigned char *)string_param;
6802 fprintf (stream, "%s\"", prefix);
6803 for (i = 0; i < len; i++)
6805 register int c = string[i];
6809 if (c == '\\' || c == '\"')
6811 putc ('\\', stream);
6819 fprintf (stream, "\\%03o", c);
6823 if (cur_pos > 72 && i+1 < len)
6826 fprintf (stream, "\"\n%s\"", prefix);
6829 fprintf (stream, "\"\n");
6832 /* Implement TARGET_ASM_FILE_START. */
6835 mips_file_start (void)
6837 default_file_start ();
6841 /* Generate a special section to describe the ABI switches used to
6842 produce the resultant binary. This used to be done by the assembler
6843 setting bits in the ELF header's flags field, but we have run out of
6844 bits. GDB needs this information in order to be able to correctly
6845 debug these binaries. See the function mips_gdbarch_init() in
6846 gdb/mips-tdep.c. This is unnecessary for the IRIX 5/6 ABIs and
6847 causes unnecessary IRIX 6 ld warnings. */
6848 const char * abi_string = NULL;
6852 case ABI_32: abi_string = "abi32"; break;
6853 case ABI_N32: abi_string = "abiN32"; break;
6854 case ABI_64: abi_string = "abi64"; break;
6855 case ABI_O64: abi_string = "abiO64"; break;
6856 case ABI_EABI: abi_string = TARGET_64BIT ? "eabi64" : "eabi32"; break;
6860 /* Note - we use fprintf directly rather than calling switch_to_section
6861 because in this way we can avoid creating an allocated section. We
6862 do not want this section to take up any space in the running
6864 fprintf (asm_out_file, "\t.section .mdebug.%s\n", abi_string);
6866 /* There is no ELF header flag to distinguish long32 forms of the
6867 EABI from long64 forms. Emit a special section to help tools
6868 such as GDB. Do the same for o64, which is sometimes used with
6870 if (mips_abi == ABI_EABI || mips_abi == ABI_O64)
6871 fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n",
6872 TARGET_LONG64 ? 64 : 32);
6874 /* Restore the default section. */
6875 fprintf (asm_out_file, "\t.previous\n");
6877 #ifdef HAVE_AS_GNU_ATTRIBUTE
6878 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
6879 TARGET_HARD_FLOAT_ABI ? (TARGET_DOUBLE_FLOAT ? 1 : 2) : 3);
6883 /* Generate the pseudo ops that System V.4 wants. */
6884 if (TARGET_ABICALLS)
6885 fprintf (asm_out_file, "\t.abicalls\n");
6887 if (flag_verbose_asm)
6888 fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
6890 mips_section_threshold, mips_arch_info->name, mips_isa);
6893 #ifdef BSS_SECTION_ASM_OP
6894 /* Implement ASM_OUTPUT_ALIGNED_BSS. This differs from the default only
6895 in the use of sbss. */
6898 mips_output_aligned_bss (FILE *stream, tree decl, const char *name,
6899 unsigned HOST_WIDE_INT size, int align)
6901 extern tree last_assemble_variable_decl;
6903 if (mips_in_small_data_p (decl))
6904 switch_to_section (get_named_section (NULL, ".sbss", 0));
6906 switch_to_section (bss_section);
6907 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
6908 last_assemble_variable_decl = decl;
6909 ASM_DECLARE_OBJECT_NAME (stream, name, decl);
6910 ASM_OUTPUT_SKIP (stream, size != 0 ? size : 1);
6914 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
6915 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
6918 mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
6919 unsigned HOST_WIDE_INT size,
6922 /* If the target wants uninitialized const declarations in
6923 .rdata then don't put them in .comm. */
6924 if (TARGET_EMBEDDED_DATA && TARGET_UNINIT_CONST_IN_RODATA
6925 && TREE_CODE (decl) == VAR_DECL && TREE_READONLY (decl)
6926 && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
6928 if (TREE_PUBLIC (decl) && DECL_NAME (decl))
6929 targetm.asm_out.globalize_label (stream, name);
6931 switch_to_section (readonly_data_section);
6932 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
6933 mips_declare_object (stream, name, "",
6934 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
6938 mips_declare_common_object (stream, name, "\n\t.comm\t",
6942 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
6943 NAME is the name of the object and ALIGN is the required alignment
6944 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
6945 alignment argument. */
6948 mips_declare_common_object (FILE *stream, const char *name,
6949 const char *init_string,
6950 unsigned HOST_WIDE_INT size,
6951 unsigned int align, bool takes_alignment_p)
6953 if (!takes_alignment_p)
6955 size += (align / BITS_PER_UNIT) - 1;
6956 size -= size % (align / BITS_PER_UNIT);
6957 mips_declare_object (stream, name, init_string,
6958 "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
6961 mips_declare_object (stream, name, init_string,
6962 "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
6963 size, align / BITS_PER_UNIT);
6966 /* Emit either a label, .comm, or .lcomm directive. When using assembler
6967 macros, mark the symbol as written so that mips_file_end won't emit an
6968 .extern for it. STREAM is the output file, NAME is the name of the
6969 symbol, INIT_STRING is the string that should be written before the
6970 symbol and FINAL_STRING is the string that should be written after it.
6971 FINAL_STRING is a printf() format that consumes the remaining arguments. */
6974 mips_declare_object (FILE *stream, const char *name, const char *init_string,
6975 const char *final_string, ...)
6979 fputs (init_string, stream);
6980 assemble_name (stream, name);
6981 va_start (ap, final_string);
6982 vfprintf (stream, final_string, ap);
6985 if (!TARGET_EXPLICIT_RELOCS)
6987 tree name_tree = get_identifier (name);
6988 TREE_ASM_WRITTEN (name_tree) = 1;
6992 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
6993 extern int size_directive_output;
6995 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
6996 definitions except that it uses mips_declare_object() to emit the label. */
6999 mips_declare_object_name (FILE *stream, const char *name,
7000 tree decl ATTRIBUTE_UNUSED)
7002 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
7003 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
7006 size_directive_output = 0;
7007 if (!flag_inhibit_size_directive && DECL_SIZE (decl))
7011 size_directive_output = 1;
7012 size = int_size_in_bytes (TREE_TYPE (decl));
7013 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
7016 mips_declare_object (stream, name, "", ":\n");
7019 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
7022 mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
7026 name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
7027 if (!flag_inhibit_size_directive
7028 && DECL_SIZE (decl) != 0
7029 && !at_end && top_level
7030 && DECL_INITIAL (decl) == error_mark_node
7031 && !size_directive_output)
7035 size_directive_output = 1;
7036 size = int_size_in_bytes (TREE_TYPE (decl));
7037 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
7042 /* Return true if X in context CONTEXT is a small data address that can
7043 be rewritten as a LO_SUM. */
7046 mips_rewrite_small_data_p (rtx x, enum mips_symbol_context context)
7048 enum mips_symbol_type symbol_type;
7050 return (TARGET_EXPLICIT_RELOCS
7051 && mips_symbolic_constant_p (x, context, &symbol_type)
7052 && symbol_type == SYMBOL_GP_RELATIVE);
7056 /* A for_each_rtx callback for mips_small_data_pattern_p. DATA is the
7057 containing MEM, or null if none. */
7060 mips_small_data_pattern_1 (rtx *loc, void *data)
7062 enum mips_symbol_context context;
7064 if (GET_CODE (*loc) == LO_SUM)
7069 if (for_each_rtx (&XEXP (*loc, 0), mips_small_data_pattern_1, *loc))
7074 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
7075 return mips_rewrite_small_data_p (*loc, context);
7078 /* Return true if OP refers to small data symbols directly, not through
7082 mips_small_data_pattern_p (rtx op)
7084 return for_each_rtx (&op, mips_small_data_pattern_1, 0);
7087 /* A for_each_rtx callback, used by mips_rewrite_small_data.
7088 DATA is the containing MEM, or null if none. */
7091 mips_rewrite_small_data_1 (rtx *loc, void *data)
7093 enum mips_symbol_context context;
7097 for_each_rtx (&XEXP (*loc, 0), mips_rewrite_small_data_1, *loc);
7101 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
7102 if (mips_rewrite_small_data_p (*loc, context))
7103 *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
7105 if (GET_CODE (*loc) == LO_SUM)
7111 /* If possible, rewrite OP so that it refers to small data using
7112 explicit relocations. */
7115 mips_rewrite_small_data (rtx op)
7117 op = copy_insn (op);
7118 for_each_rtx (&op, mips_rewrite_small_data_1, 0);
7122 /* Return true if the current function has an insn that implicitly
7126 mips_function_has_gp_insn (void)
7128 /* Don't bother rechecking if we found one last time. */
7129 if (!cfun->machine->has_gp_insn_p)
7133 push_topmost_sequence ();
7134 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7136 && GET_CODE (PATTERN (insn)) != USE
7137 && GET_CODE (PATTERN (insn)) != CLOBBER
7138 && (get_attr_got (insn) != GOT_UNSET
7139 || small_data_pattern (PATTERN (insn), VOIDmode)))
7141 pop_topmost_sequence ();
7143 cfun->machine->has_gp_insn_p = (insn != 0);
7145 return cfun->machine->has_gp_insn_p;
7149 /* Return the register that should be used as the global pointer
7150 within this function. Return 0 if the function doesn't need
7151 a global pointer. */
7154 mips_global_pointer (void)
7158 /* $gp is always available unless we're using a GOT. */
7159 if (!TARGET_USE_GOT)
7160 return GLOBAL_POINTER_REGNUM;
7162 /* We must always provide $gp when it is used implicitly. */
7163 if (!TARGET_EXPLICIT_RELOCS)
7164 return GLOBAL_POINTER_REGNUM;
7166 /* FUNCTION_PROFILER includes a jal macro, so we need to give it
7168 if (current_function_profile)
7169 return GLOBAL_POINTER_REGNUM;
7171 /* If the function has a nonlocal goto, $gp must hold the correct
7172 global pointer for the target function. */
7173 if (current_function_has_nonlocal_goto)
7174 return GLOBAL_POINTER_REGNUM;
7176 /* If the gp is never referenced, there's no need to initialize it.
7177 Note that reload can sometimes introduce constant pool references
7178 into a function that otherwise didn't need them. For example,
7179 suppose we have an instruction like:
7181 (set (reg:DF R1) (float:DF (reg:SI R2)))
7183 If R2 turns out to be constant such as 1, the instruction may have a
7184 REG_EQUAL note saying that R1 == 1.0. Reload then has the option of
7185 using this constant if R2 doesn't get allocated to a register.
7187 In cases like these, reload will have added the constant to the pool
7188 but no instruction will yet refer to it. */
7189 if (!df_regs_ever_live_p (GLOBAL_POINTER_REGNUM)
7190 && !current_function_uses_const_pool
7191 && !mips_function_has_gp_insn ())
7194 /* We need a global pointer, but perhaps we can use a call-clobbered
7195 register instead of $gp. */
7196 if (TARGET_CALL_SAVED_GP && current_function_is_leaf)
7197 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
7198 if (!df_regs_ever_live_p (regno)
7199 && call_really_used_regs[regno]
7200 && !fixed_regs[regno]
7201 && regno != PIC_FUNCTION_ADDR_REGNUM)
7204 return GLOBAL_POINTER_REGNUM;
7208 /* Return true if the function return value MODE will get returned in a
7209 floating-point register. */
7212 mips_return_mode_in_fpr_p (enum machine_mode mode)
7214 return ((GET_MODE_CLASS (mode) == MODE_FLOAT
7215 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
7216 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
7217 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_HWFPVALUE);
7220 /* Return a two-character string representing a function floating-point
7221 return mode, used to name MIPS16 function stubs. */
7224 mips16_call_stub_mode_suffix (enum machine_mode mode)
7228 else if (mode == DFmode)
7230 else if (mode == SCmode)
7232 else if (mode == DCmode)
7234 else if (mode == V2SFmode)
7240 /* Return true if the current function returns its value in a floating-point
7241 register in MIPS16 mode. */
7244 mips16_cfun_returns_in_fpr_p (void)
7246 tree return_type = DECL_RESULT (current_function_decl);
7247 return (TARGET_MIPS16
7248 && TARGET_HARD_FLOAT_ABI
7249 && !aggregate_value_p (return_type, current_function_decl)
7250 && mips_return_mode_in_fpr_p (DECL_MODE (return_type)));
7254 /* Return true if the current function must save REGNO. */
7257 mips_save_reg_p (unsigned int regno)
7259 /* We only need to save $gp if TARGET_CALL_SAVED_GP and only then
7260 if we have not chosen a call-clobbered substitute. */
7261 if (regno == GLOBAL_POINTER_REGNUM)
7262 return TARGET_CALL_SAVED_GP && cfun->machine->global_pointer == regno;
7264 /* Check call-saved registers. */
7265 if ((current_function_saves_all_registers || df_regs_ever_live_p (regno))
7266 && !call_really_used_regs[regno])
7269 /* Save both registers in an FPR pair if either one is used. This is
7270 needed for the case when MIN_FPRS_PER_FMT == 1, which allows the odd
7271 register to be used without the even register. */
7272 if (FP_REG_P (regno)
7273 && MAX_FPRS_PER_FMT == 2
7274 && df_regs_ever_live_p (regno + 1)
7275 && !call_really_used_regs[regno + 1])
7278 /* We need to save the old frame pointer before setting up a new one. */
7279 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
7282 /* We need to save the incoming return address if it is ever clobbered
7283 within the function, if __builtin_eh_return is being used to set a
7284 different return address, or if a stub is being used to return a
7286 if (regno == GP_REG_FIRST + 31
7287 && (df_regs_ever_live_p (regno)
7288 || current_function_calls_eh_return
7289 || mips16_cfun_returns_in_fpr_p ()))
7295 /* Return the index of the lowest X in the range [0, SIZE) for which
7296 bit REGS[X] is set in MASK. Return SIZE if there is no such X. */
7299 mips16e_find_first_register (unsigned int mask, const unsigned char *regs,
7304 for (i = 0; i < size; i++)
7305 if (BITSET_P (mask, regs[i]))
7311 /* *MASK_PTR is a mask of general purpose registers and *GP_REG_SIZE_PTR
7312 is the number of bytes that they occupy. If *MASK_PTR contains REGS[X]
7313 for some X in [0, SIZE), adjust *MASK_PTR and *GP_REG_SIZE_PTR so that
7314 the same is true for all indexes (X, SIZE). */
7317 mips16e_mask_registers (unsigned int *mask_ptr, const unsigned char *regs,
7318 unsigned int size, HOST_WIDE_INT *gp_reg_size_ptr)
7322 i = mips16e_find_first_register (*mask_ptr, regs, size);
7323 for (i++; i < size; i++)
7324 if (!BITSET_P (*mask_ptr, regs[i]))
7326 *gp_reg_size_ptr += GET_MODE_SIZE (gpr_mode);
7327 *mask_ptr |= 1 << regs[i];
7331 /* Return the bytes needed to compute the frame pointer from the current
7332 stack pointer. SIZE is the size (in bytes) of the local variables.
7334 MIPS stack frames look like:
7336 Before call After call
7337 high +-----------------------+ +-----------------------+
7339 | caller's temps. | | caller's temps. |
7341 +-----------------------+ +-----------------------+
7343 | arguments on stack. | | arguments on stack. |
7345 +-----------------------+ +-----------------------+
7346 | 4 words to save | | 4 words to save |
7347 | arguments passed | | arguments passed |
7348 | in registers, even | | in registers, even |
7349 | if not passed. | | if not passed. |
7350 SP->+-----------------------+ VFP->+-----------------------+
7351 (VFP = SP+fp_sp_offset) | |\
7352 | fp register save | | fp_reg_size
7354 SP+gp_sp_offset->+-----------------------+
7356 | | gp register save | | gp_reg_size
7357 gp_reg_rounded | | |/
7358 | +-----------------------+
7359 \| alignment padding |
7360 +-----------------------+
7362 | local variables | | var_size
7364 +-----------------------+
7366 | alloca allocations |
7368 +-----------------------+
7370 cprestore_size | | GP save for V.4 abi |
7372 +-----------------------+
7374 | arguments on stack | |
7376 +-----------------------+ |
7377 | 4 words to save | | args_size
7378 | arguments passed | |
7379 | in registers, even | |
7380 | if not passed. | |
7381 low | (TARGET_OLDABI only) |/
7382 memory SP->+-----------------------+
7387 compute_frame_size (HOST_WIDE_INT size)
7390 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
7391 HOST_WIDE_INT var_size; /* # bytes that variables take up */
7392 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
7393 HOST_WIDE_INT cprestore_size; /* # bytes that the cprestore slot takes up */
7394 HOST_WIDE_INT gp_reg_rounded; /* # bytes needed to store gp after rounding */
7395 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
7396 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
7397 unsigned int mask; /* mask of saved gp registers */
7398 unsigned int fmask; /* mask of saved fp registers */
7400 cfun->machine->global_pointer = mips_global_pointer ();
7406 var_size = MIPS_STACK_ALIGN (size);
7407 args_size = current_function_outgoing_args_size;
7408 cprestore_size = MIPS_STACK_ALIGN (STARTING_FRAME_OFFSET) - args_size;
7410 /* The space set aside by STARTING_FRAME_OFFSET isn't needed in leaf
7411 functions. If the function has local variables, we're committed
7412 to allocating it anyway. Otherwise reclaim it here. */
7413 if (var_size == 0 && current_function_is_leaf)
7414 cprestore_size = args_size = 0;
7416 /* The MIPS 3.0 linker does not like functions that dynamically
7417 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
7418 looks like we are trying to create a second frame pointer to the
7419 function, so allocate some stack space to make it happy. */
7421 if (args_size == 0 && current_function_calls_alloca)
7422 args_size = 4 * UNITS_PER_WORD;
7424 total_size = var_size + args_size + cprestore_size;
7426 /* Calculate space needed for gp registers. */
7427 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
7428 if (mips_save_reg_p (regno))
7430 gp_reg_size += GET_MODE_SIZE (gpr_mode);
7431 mask |= 1 << (regno - GP_REG_FIRST);
7434 /* We need to restore these for the handler. */
7435 if (current_function_calls_eh_return)
7440 regno = EH_RETURN_DATA_REGNO (i);
7441 if (regno == INVALID_REGNUM)
7443 gp_reg_size += GET_MODE_SIZE (gpr_mode);
7444 mask |= 1 << (regno - GP_REG_FIRST);
7448 /* The MIPS16e SAVE and RESTORE instructions have two ranges of registers:
7449 $a3-$a0 and $s2-$s8. If we save one register in the range, we must
7450 save all later registers too. */
7451 if (GENERATE_MIPS16E_SAVE_RESTORE)
7453 mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
7454 ARRAY_SIZE (mips16e_s2_s8_regs), &gp_reg_size);
7455 mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
7456 ARRAY_SIZE (mips16e_a0_a3_regs), &gp_reg_size);
7459 /* This loop must iterate over the same space as its companion in
7460 mips_for_each_saved_reg. */
7461 if (TARGET_HARD_FLOAT)
7462 for (regno = (FP_REG_LAST - MAX_FPRS_PER_FMT + 1);
7463 regno >= FP_REG_FIRST;
7464 regno -= MAX_FPRS_PER_FMT)
7465 if (mips_save_reg_p (regno))
7467 fp_reg_size += MAX_FPRS_PER_FMT * UNITS_PER_FPREG;
7468 fmask |= ((1 << MAX_FPRS_PER_FMT) - 1) << (regno - FP_REG_FIRST);
7471 gp_reg_rounded = MIPS_STACK_ALIGN (gp_reg_size);
7472 total_size += gp_reg_rounded + MIPS_STACK_ALIGN (fp_reg_size);
7474 /* Add in the space required for saving incoming register arguments. */
7475 total_size += current_function_pretend_args_size;
7476 total_size += MIPS_STACK_ALIGN (cfun->machine->varargs_size);
7478 /* Save other computed information. */
7479 cfun->machine->frame.total_size = total_size;
7480 cfun->machine->frame.var_size = var_size;
7481 cfun->machine->frame.args_size = args_size;
7482 cfun->machine->frame.cprestore_size = cprestore_size;
7483 cfun->machine->frame.gp_reg_size = gp_reg_size;
7484 cfun->machine->frame.fp_reg_size = fp_reg_size;
7485 cfun->machine->frame.mask = mask;
7486 cfun->machine->frame.fmask = fmask;
7487 cfun->machine->frame.initialized = reload_completed;
7488 cfun->machine->frame.num_gp = gp_reg_size / UNITS_PER_WORD;
7489 cfun->machine->frame.num_fp = (fp_reg_size
7490 / (MAX_FPRS_PER_FMT * UNITS_PER_FPREG));
7494 HOST_WIDE_INT offset;
7496 if (GENERATE_MIPS16E_SAVE_RESTORE)
7497 /* MIPS16e SAVE and RESTORE instructions require the GP save area
7498 to be aligned at the high end with any padding at the low end.
7499 It is only safe to use this calculation for o32, where we never
7500 have pretend arguments, and where any varargs will be saved in
7501 the caller-allocated area rather than at the top of the frame. */
7502 offset = (total_size - GET_MODE_SIZE (gpr_mode));
7504 offset = (args_size + cprestore_size + var_size
7505 + gp_reg_size - GET_MODE_SIZE (gpr_mode));
7506 cfun->machine->frame.gp_sp_offset = offset;
7507 cfun->machine->frame.gp_save_offset = offset - total_size;
7511 cfun->machine->frame.gp_sp_offset = 0;
7512 cfun->machine->frame.gp_save_offset = 0;
7517 HOST_WIDE_INT offset;
7519 offset = (args_size + cprestore_size + var_size
7520 + gp_reg_rounded + fp_reg_size
7521 - MAX_FPRS_PER_FMT * UNITS_PER_FPREG);
7522 cfun->machine->frame.fp_sp_offset = offset;
7523 cfun->machine->frame.fp_save_offset = offset - total_size;
7527 cfun->machine->frame.fp_sp_offset = 0;
7528 cfun->machine->frame.fp_save_offset = 0;
7531 /* Ok, we're done. */
7535 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame
7536 pointer or argument pointer. TO is either the stack pointer or
7537 hard frame pointer. */
7540 mips_initial_elimination_offset (int from, int to)
7542 HOST_WIDE_INT offset;
7544 compute_frame_size (get_frame_size ());
7546 /* Set OFFSET to the offset from the stack pointer. */
7549 case FRAME_POINTER_REGNUM:
7553 case ARG_POINTER_REGNUM:
7554 offset = (cfun->machine->frame.total_size
7555 - current_function_pretend_args_size);
7562 if (TARGET_MIPS16 && to == HARD_FRAME_POINTER_REGNUM)
7563 offset -= cfun->machine->frame.args_size;
7568 /* Implement RETURN_ADDR_RTX. Note, we do not support moving
7569 back to a previous frame. */
7571 mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
7576 return get_hard_reg_initial_val (Pmode, GP_REG_FIRST + 31);
7579 /* Use FN to save or restore register REGNO. MODE is the register's
7580 mode and OFFSET is the offset of its save slot from the current
7584 mips_save_restore_reg (enum machine_mode mode, int regno,
7585 HOST_WIDE_INT offset, mips_save_restore_fn fn)
7589 mem = gen_frame_mem (mode, plus_constant (stack_pointer_rtx, offset));
7591 fn (gen_rtx_REG (mode, regno), mem);
7595 /* Call FN for each register that is saved by the current function.
7596 SP_OFFSET is the offset of the current stack pointer from the start
7600 mips_for_each_saved_reg (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
7602 enum machine_mode fpr_mode;
7603 HOST_WIDE_INT offset;
7606 /* Save registers starting from high to low. The debuggers prefer at least
7607 the return register be stored at func+4, and also it allows us not to
7608 need a nop in the epilogue if at least one register is reloaded in
7609 addition to return address. */
7610 offset = cfun->machine->frame.gp_sp_offset - sp_offset;
7611 for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
7612 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
7614 mips_save_restore_reg (gpr_mode, regno, offset, fn);
7615 offset -= GET_MODE_SIZE (gpr_mode);
7618 /* This loop must iterate over the same space as its companion in
7619 compute_frame_size. */
7620 offset = cfun->machine->frame.fp_sp_offset - sp_offset;
7621 fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
7622 for (regno = (FP_REG_LAST - MAX_FPRS_PER_FMT + 1);
7623 regno >= FP_REG_FIRST;
7624 regno -= MAX_FPRS_PER_FMT)
7625 if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
7627 mips_save_restore_reg (fpr_mode, regno, offset, fn);
7628 offset -= GET_MODE_SIZE (fpr_mode);
7632 /* If we're generating n32 or n64 abicalls, and the current function
7633 does not use $28 as its global pointer, emit a cplocal directive.
7634 Use pic_offset_table_rtx as the argument to the directive. */
7637 mips_output_cplocal (void)
7639 if (!TARGET_EXPLICIT_RELOCS
7640 && cfun->machine->global_pointer > 0
7641 && cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
7642 output_asm_insn (".cplocal %+", 0);
7645 /* Return the style of GP load sequence that is being used for the
7646 current function. */
7648 enum mips_loadgp_style
7649 mips_current_loadgp_style (void)
7651 if (!TARGET_USE_GOT || cfun->machine->global_pointer == 0)
7657 if (TARGET_ABSOLUTE_ABICALLS)
7658 return LOADGP_ABSOLUTE;
7660 return TARGET_NEWABI ? LOADGP_NEWABI : LOADGP_OLDABI;
7663 /* The __gnu_local_gp symbol. */
7665 static GTY(()) rtx mips_gnu_local_gp;
7667 /* If we're generating n32 or n64 abicalls, emit instructions
7668 to set up the global pointer. */
7671 mips_emit_loadgp (void)
7673 rtx addr, offset, incoming_address, base, index;
7675 switch (mips_current_loadgp_style ())
7677 case LOADGP_ABSOLUTE:
7678 if (mips_gnu_local_gp == NULL)
7680 mips_gnu_local_gp = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp");
7681 SYMBOL_REF_FLAGS (mips_gnu_local_gp) |= SYMBOL_FLAG_LOCAL;
7683 emit_insn (gen_loadgp_absolute (mips_gnu_local_gp));
7687 addr = XEXP (DECL_RTL (current_function_decl), 0);
7688 offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
7689 incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
7690 emit_insn (gen_loadgp_newabi (offset, incoming_address));
7691 if (!TARGET_EXPLICIT_RELOCS)
7692 emit_insn (gen_loadgp_blockage ());
7696 base = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_BASE));
7697 index = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_INDEX));
7698 emit_insn (gen_loadgp_rtp (base, index));
7699 if (!TARGET_EXPLICIT_RELOCS)
7700 emit_insn (gen_loadgp_blockage ());
7708 /* Set up the stack and frame (if desired) for the function. */
7711 mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
7714 HOST_WIDE_INT tsize = cfun->machine->frame.total_size;
7716 #ifdef SDB_DEBUGGING_INFO
7717 if (debug_info_level != DINFO_LEVEL_TERSE && write_symbols == SDB_DEBUG)
7718 SDB_OUTPUT_SOURCE_LINE (file, DECL_SOURCE_LINE (current_function_decl));
7721 /* In mips16 mode, we may need to generate a 32 bit to handle
7722 floating point arguments. The linker will arrange for any 32-bit
7723 functions to call this stub, which will then jump to the 16-bit
7726 && TARGET_HARD_FLOAT_ABI
7727 && current_function_args_info.fp_code != 0)
7728 build_mips16_function_stub (file);
7730 /* Select the mips16 mode for this function. */
7732 fprintf (file, "\t.set\tmips16\n");
7734 fprintf (file, "\t.set\tnomips16\n");
7736 if (!FUNCTION_NAME_ALREADY_DECLARED)
7738 /* Get the function name the same way that toplev.c does before calling
7739 assemble_start_function. This is needed so that the name used here
7740 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
7741 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
7743 if (!flag_inhibit_size_directive)
7745 fputs ("\t.ent\t", file);
7746 assemble_name (file, fnname);
7750 assemble_name (file, fnname);
7751 fputs (":\n", file);
7754 /* Stop mips_file_end from treating this function as external. */
7755 if (TARGET_IRIX && mips_abi == ABI_32)
7756 TREE_ASM_WRITTEN (DECL_NAME (cfun->decl)) = 1;
7758 if (!flag_inhibit_size_directive)
7760 /* .frame FRAMEREG, FRAMESIZE, RETREG */
7762 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
7763 "# vars= " HOST_WIDE_INT_PRINT_DEC ", regs= %d/%d"
7764 ", args= " HOST_WIDE_INT_PRINT_DEC
7765 ", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
7766 (reg_names[(frame_pointer_needed)
7767 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM]),
7768 ((frame_pointer_needed && TARGET_MIPS16)
7769 ? tsize - cfun->machine->frame.args_size
7771 reg_names[GP_REG_FIRST + 31],
7772 cfun->machine->frame.var_size,
7773 cfun->machine->frame.num_gp,
7774 cfun->machine->frame.num_fp,
7775 cfun->machine->frame.args_size,
7776 cfun->machine->frame.cprestore_size);
7778 /* .mask MASK, GPOFFSET; .fmask FPOFFSET */
7779 fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
7780 cfun->machine->frame.mask,
7781 cfun->machine->frame.gp_save_offset);
7782 fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
7783 cfun->machine->frame.fmask,
7784 cfun->machine->frame.fp_save_offset);
7787 OLD_SP == *FRAMEREG + FRAMESIZE => can find old_sp from nominated FP reg.
7788 HIGHEST_GP_SAVED == *FRAMEREG + FRAMESIZE + GPOFFSET => can find saved regs. */
7791 if (mips_current_loadgp_style () == LOADGP_OLDABI)
7793 /* Handle the initialization of $gp for SVR4 PIC. */
7794 if (!cfun->machine->all_noreorder_p)
7795 output_asm_insn ("%(.cpload\t%^%)", 0);
7797 output_asm_insn ("%(.cpload\t%^\n\t%<", 0);
7799 else if (cfun->machine->all_noreorder_p)
7800 output_asm_insn ("%(%<", 0);
7802 /* Tell the assembler which register we're using as the global
7803 pointer. This is needed for thunks, since they can use either
7804 explicit relocs or assembler macros. */
7805 mips_output_cplocal ();
7808 /* Make the last instruction frame related and note that it performs
7809 the operation described by FRAME_PATTERN. */
7812 mips_set_frame_expr (rtx frame_pattern)
7816 insn = get_last_insn ();
7817 RTX_FRAME_RELATED_P (insn) = 1;
7818 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7824 /* Return a frame-related rtx that stores REG at MEM.
7825 REG must be a single register. */
7828 mips_frame_set (rtx mem, rtx reg)
7832 /* If we're saving the return address register and the dwarf return
7833 address column differs from the hard register number, adjust the
7834 note reg to refer to the former. */
7835 if (REGNO (reg) == GP_REG_FIRST + 31
7836 && DWARF_FRAME_RETURN_COLUMN != GP_REG_FIRST + 31)
7837 reg = gen_rtx_REG (GET_MODE (reg), DWARF_FRAME_RETURN_COLUMN);
7839 set = gen_rtx_SET (VOIDmode, mem, reg);
7840 RTX_FRAME_RELATED_P (set) = 1;
7846 /* Save register REG to MEM. Make the instruction frame-related. */
7849 mips_save_reg (rtx reg, rtx mem)
7851 if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
7855 if (mips_split_64bit_move_p (mem, reg))
7856 mips_split_64bit_move (mem, reg);
7858 mips_emit_move (mem, reg);
7860 x1 = mips_frame_set (mips_subword (mem, 0), mips_subword (reg, 0));
7861 x2 = mips_frame_set (mips_subword (mem, 1), mips_subword (reg, 1));
7862 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
7867 && REGNO (reg) != GP_REG_FIRST + 31
7868 && !M16_REG_P (REGNO (reg)))
7870 /* Save a non-mips16 register by moving it through a temporary.
7871 We don't need to do this for $31 since there's a special
7872 instruction for it. */
7873 mips_emit_move (MIPS_PROLOGUE_TEMP (GET_MODE (reg)), reg);
7874 mips_emit_move (mem, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
7877 mips_emit_move (mem, reg);
7879 mips_set_frame_expr (mips_frame_set (mem, reg));
7883 /* Return a move between register REGNO and memory location SP + OFFSET.
7884 Make the move a load if RESTORE_P, otherwise make it a frame-related
7888 mips16e_save_restore_reg (bool restore_p, HOST_WIDE_INT offset,
7893 mem = gen_frame_mem (SImode, plus_constant (stack_pointer_rtx, offset));
7894 reg = gen_rtx_REG (SImode, regno);
7896 ? gen_rtx_SET (VOIDmode, reg, mem)
7897 : mips_frame_set (mem, reg));
7900 /* Return RTL for a MIPS16e SAVE or RESTORE instruction; RESTORE_P says which.
7901 The instruction must:
7903 - Allocate or deallocate SIZE bytes in total; SIZE is known
7906 - Save or restore as many registers in *MASK_PTR as possible.
7907 The instruction saves the first registers at the top of the
7908 allocated area, with the other registers below it.
7910 - Save NARGS argument registers above the allocated area.
7912 (NARGS is always zero if RESTORE_P.)
7914 The SAVE and RESTORE instructions cannot save and restore all general
7915 registers, so there may be some registers left over for the caller to
7916 handle. Destructively modify *MASK_PTR so that it contains the registers
7917 that still need to be saved or restored. The caller can save these
7918 registers in the memory immediately below *OFFSET_PTR, which is a
7919 byte offset from the bottom of the allocated stack area. */
7922 mips16e_build_save_restore (bool restore_p, unsigned int *mask_ptr,
7923 HOST_WIDE_INT *offset_ptr, unsigned int nargs,
7927 HOST_WIDE_INT offset, top_offset;
7928 unsigned int i, regno;
7931 gcc_assert (cfun->machine->frame.fp_reg_size == 0);
7933 /* Calculate the number of elements in the PARALLEL. We need one element
7934 for the stack adjustment, one for each argument register save, and one
7935 for each additional register move. */
7937 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
7938 if (BITSET_P (*mask_ptr, mips16e_save_restore_regs[i]))
7941 /* Create the final PARALLEL. */
7942 pattern = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (n));
7945 /* Add the stack pointer adjustment. */
7946 set = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7947 plus_constant (stack_pointer_rtx,
7948 restore_p ? size : -size));
7949 RTX_FRAME_RELATED_P (set) = 1;
7950 XVECEXP (pattern, 0, n++) = set;
7952 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
7953 top_offset = restore_p ? size : 0;
7955 /* Save the arguments. */
7956 for (i = 0; i < nargs; i++)
7958 offset = top_offset + i * GET_MODE_SIZE (gpr_mode);
7959 set = mips16e_save_restore_reg (restore_p, offset, GP_ARG_FIRST + i);
7960 XVECEXP (pattern, 0, n++) = set;
7963 /* Then fill in the other register moves. */
7964 offset = top_offset;
7965 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
7967 regno = mips16e_save_restore_regs[i];
7968 if (BITSET_P (*mask_ptr, regno))
7970 offset -= UNITS_PER_WORD;
7971 set = mips16e_save_restore_reg (restore_p, offset, regno);
7972 XVECEXP (pattern, 0, n++) = set;
7973 *mask_ptr &= ~(1 << regno);
7977 /* Tell the caller what offset it should use for the remaining registers. */
7978 *offset_ptr = size + (offset - top_offset) + size;
7980 gcc_assert (n == XVECLEN (pattern, 0));
7985 /* PATTERN is a PARALLEL whose first element adds ADJUST to the stack
7986 pointer. Return true if PATTERN matches the kind of instruction
7987 generated by mips16e_build_save_restore. If INFO is nonnull,
7988 initialize it when returning true. */
7991 mips16e_save_restore_pattern_p (rtx pattern, HOST_WIDE_INT adjust,
7992 struct mips16e_save_restore_info *info)
7994 unsigned int i, nargs, mask;
7995 HOST_WIDE_INT top_offset, save_offset, offset, extra;
7996 rtx set, reg, mem, base;
7999 if (!GENERATE_MIPS16E_SAVE_RESTORE)
8002 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
8003 top_offset = adjust > 0 ? adjust : 0;
8005 /* Interpret all other members of the PARALLEL. */
8006 save_offset = top_offset - GET_MODE_SIZE (gpr_mode);
8010 for (n = 1; n < XVECLEN (pattern, 0); n++)
8012 /* Check that we have a SET. */
8013 set = XVECEXP (pattern, 0, n);
8014 if (GET_CODE (set) != SET)
8017 /* Check that the SET is a load (if restoring) or a store
8019 mem = adjust > 0 ? SET_SRC (set) : SET_DEST (set);
8023 /* Check that the address is the sum of the stack pointer and a
8024 possibly-zero constant offset. */
8025 mips_split_plus (XEXP (mem, 0), &base, &offset);
8026 if (base != stack_pointer_rtx)
8029 /* Check that SET's other operand is a register. */
8030 reg = adjust > 0 ? SET_DEST (set) : SET_SRC (set);
8034 /* Check for argument saves. */
8035 if (offset == top_offset + nargs * GET_MODE_SIZE (gpr_mode)
8036 && REGNO (reg) == GP_ARG_FIRST + nargs)
8038 else if (offset == save_offset)
8040 while (mips16e_save_restore_regs[i++] != REGNO (reg))
8041 if (i == ARRAY_SIZE (mips16e_save_restore_regs))
8044 mask |= 1 << REGNO (reg);
8045 save_offset -= GET_MODE_SIZE (gpr_mode);
8051 /* Check that the restrictions on register ranges are met. */
8053 mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
8054 ARRAY_SIZE (mips16e_s2_s8_regs), &extra);
8055 mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
8056 ARRAY_SIZE (mips16e_a0_a3_regs), &extra);
8060 /* Make sure that the topmost argument register is not saved twice.
8061 The checks above ensure that the same is then true for the other
8062 argument registers. */
8063 if (nargs > 0 && BITSET_P (mask, GP_ARG_FIRST + nargs - 1))
8066 /* Pass back information, if requested. */
8069 info->nargs = nargs;
8071 info->size = (adjust > 0 ? adjust : -adjust);
8077 /* Add a MIPS16e SAVE or RESTORE register-range argument to string S
8078 for the register range [MIN_REG, MAX_REG]. Return a pointer to
8079 the null terminator. */
8082 mips16e_add_register_range (char *s, unsigned int min_reg,
8083 unsigned int max_reg)
8085 if (min_reg != max_reg)
8086 s += sprintf (s, ",%s-%s", reg_names[min_reg], reg_names[max_reg]);
8088 s += sprintf (s, ",%s", reg_names[min_reg]);
8092 /* Return the assembly instruction for a MIPS16e SAVE or RESTORE instruction.
8093 PATTERN and ADJUST are as for mips16e_save_restore_pattern_p. */
8096 mips16e_output_save_restore (rtx pattern, HOST_WIDE_INT adjust)
8098 static char buffer[300];
8100 struct mips16e_save_restore_info info;
8101 unsigned int i, end;
8104 /* Parse the pattern. */
8105 if (!mips16e_save_restore_pattern_p (pattern, adjust, &info))
8108 /* Add the mnemonic. */
8109 s = strcpy (buffer, adjust > 0 ? "restore\t" : "save\t");
8112 /* Save the arguments. */
8114 s += sprintf (s, "%s-%s,", reg_names[GP_ARG_FIRST],
8115 reg_names[GP_ARG_FIRST + info.nargs - 1]);
8116 else if (info.nargs == 1)
8117 s += sprintf (s, "%s,", reg_names[GP_ARG_FIRST]);
8119 /* Emit the amount of stack space to allocate or deallocate. */
8120 s += sprintf (s, "%d", (int) info.size);
8122 /* Save or restore $16. */
8123 if (BITSET_P (info.mask, 16))
8124 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 16]);
8126 /* Save or restore $17. */
8127 if (BITSET_P (info.mask, 17))
8128 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 17]);
8130 /* Save or restore registers in the range $s2...$s8, which
8131 mips16e_s2_s8_regs lists in decreasing order. Note that this
8132 is a software register range; the hardware registers are not
8133 numbered consecutively. */
8134 end = ARRAY_SIZE (mips16e_s2_s8_regs);
8135 i = mips16e_find_first_register (info.mask, mips16e_s2_s8_regs, end);
8137 s = mips16e_add_register_range (s, mips16e_s2_s8_regs[end - 1],
8138 mips16e_s2_s8_regs[i]);
8140 /* Save or restore registers in the range $a0...$a3. */
8141 end = ARRAY_SIZE (mips16e_a0_a3_regs);
8142 i = mips16e_find_first_register (info.mask, mips16e_a0_a3_regs, end);
8144 s = mips16e_add_register_range (s, mips16e_a0_a3_regs[i],
8145 mips16e_a0_a3_regs[end - 1]);
8147 /* Save or restore $31. */
8148 if (BITSET_P (info.mask, 31))
8149 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 31]);
8154 /* Return a simplified form of X using the register values in REG_VALUES.
8155 REG_VALUES[R] is the last value assigned to hard register R, or null
8156 if R has not been modified.
8158 This function is rather limited, but is good enough for our purposes. */
8161 mips16e_collect_propagate_value (rtx x, rtx *reg_values)
8165 x = avoid_constant_pool_reference (x);
8169 x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
8170 return simplify_gen_unary (GET_CODE (x), GET_MODE (x),
8171 x0, GET_MODE (XEXP (x, 0)));
8174 if (ARITHMETIC_P (x))
8176 x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
8177 x1 = mips16e_collect_propagate_value (XEXP (x, 1), reg_values);
8178 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), x0, x1);
8182 && reg_values[REGNO (x)]
8183 && !rtx_unstable_p (reg_values[REGNO (x)]))
8184 return reg_values[REGNO (x)];
8189 /* Return true if (set DEST SRC) stores an argument register into its
8190 caller-allocated save slot, storing the number of that argument
8191 register in *REGNO_PTR if so. REG_VALUES is as for
8192 mips16e_collect_propagate_value. */
8195 mips16e_collect_argument_save_p (rtx dest, rtx src, rtx *reg_values,
8196 unsigned int *regno_ptr)
8198 unsigned int argno, regno;
8199 HOST_WIDE_INT offset, required_offset;
8202 /* Check that this is a word-mode store. */
8203 if (!MEM_P (dest) || !REG_P (src) || GET_MODE (dest) != word_mode)
8206 /* Check that the register being saved is an unmodified argument
8208 regno = REGNO (src);
8209 if (regno < GP_ARG_FIRST || regno > GP_ARG_LAST || reg_values[regno])
8211 argno = regno - GP_ARG_FIRST;
8213 /* Check whether the address is an appropriate stack pointer or
8214 frame pointer access. The frame pointer is offset from the
8215 stack pointer by the size of the outgoing arguments. */
8216 addr = mips16e_collect_propagate_value (XEXP (dest, 0), reg_values);
8217 mips_split_plus (addr, &base, &offset);
8218 required_offset = cfun->machine->frame.total_size + argno * UNITS_PER_WORD;
8219 if (base == hard_frame_pointer_rtx)
8220 required_offset -= cfun->machine->frame.args_size;
8221 else if (base != stack_pointer_rtx)
8223 if (offset != required_offset)
8230 /* A subroutine of mips_expand_prologue, called only when generating
8231 MIPS16e SAVE instructions. Search the start of the function for any
8232 instructions that save argument registers into their caller-allocated
8233 save slots. Delete such instructions and return a value N such that
8234 saving [GP_ARG_FIRST, GP_ARG_FIRST + N) would make all the deleted
8235 instructions redundant. */
8238 mips16e_collect_argument_saves (void)
8240 rtx reg_values[FIRST_PSEUDO_REGISTER];
8241 rtx insn, next, set, dest, src;
8242 unsigned int nargs, regno;
8244 push_topmost_sequence ();
8246 memset (reg_values, 0, sizeof (reg_values));
8247 for (insn = get_insns (); insn; insn = next)
8249 next = NEXT_INSN (insn);
8256 set = PATTERN (insn);
8257 if (GET_CODE (set) != SET)
8260 dest = SET_DEST (set);
8261 src = SET_SRC (set);
8262 if (mips16e_collect_argument_save_p (dest, src, reg_values, ®no))
8264 if (!BITSET_P (cfun->machine->frame.mask, regno))
8267 nargs = MAX (nargs, (regno - GP_ARG_FIRST) + 1);
8270 else if (REG_P (dest) && GET_MODE (dest) == word_mode)
8271 reg_values[REGNO (dest)]
8272 = mips16e_collect_propagate_value (src, reg_values);
8276 pop_topmost_sequence ();
8281 /* Expand the prologue into a bunch of separate insns. */
8284 mips_expand_prologue (void)
8290 if (cfun->machine->global_pointer > 0)
8291 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
8293 size = compute_frame_size (get_frame_size ());
8295 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
8296 bytes beforehand; this is enough to cover the register save area
8297 without going out of range. */
8298 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
8300 HOST_WIDE_INT step1;
8302 step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
8304 if (GENERATE_MIPS16E_SAVE_RESTORE)
8306 HOST_WIDE_INT offset;
8307 unsigned int mask, regno;
8309 /* Try to merge argument stores into the save instruction. */
8310 nargs = mips16e_collect_argument_saves ();
8312 /* Build the save instruction. */
8313 mask = cfun->machine->frame.mask;
8314 insn = mips16e_build_save_restore (false, &mask, &offset,
8316 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
8319 /* Check if we need to save other registers. */
8320 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
8321 if (BITSET_P (mask, regno - GP_REG_FIRST))
8323 offset -= GET_MODE_SIZE (gpr_mode);
8324 mips_save_restore_reg (gpr_mode, regno, offset, mips_save_reg);
8329 insn = gen_add3_insn (stack_pointer_rtx,
8332 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
8334 mips_for_each_saved_reg (size, mips_save_reg);
8338 /* Allocate the rest of the frame. */
8341 if (SMALL_OPERAND (-size))
8342 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
8344 GEN_INT (-size)))) = 1;
8347 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
8350 /* There are no instructions to add or subtract registers
8351 from the stack pointer, so use the frame pointer as a
8352 temporary. We should always be using a frame pointer
8353 in this case anyway. */
8354 gcc_assert (frame_pointer_needed);
8355 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
8356 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
8357 hard_frame_pointer_rtx,
8358 MIPS_PROLOGUE_TEMP (Pmode)));
8359 mips_emit_move (stack_pointer_rtx, hard_frame_pointer_rtx);
8362 emit_insn (gen_sub3_insn (stack_pointer_rtx,
8364 MIPS_PROLOGUE_TEMP (Pmode)));
8366 /* Describe the combined effect of the previous instructions. */
8368 (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8369 plus_constant (stack_pointer_rtx, -size)));
8373 /* Set up the frame pointer, if we're using one. In mips16 code,
8374 we point the frame pointer ahead of the outgoing argument area.
8375 This should allow more variables & incoming arguments to be
8376 accessed with unextended instructions. */
8377 if (frame_pointer_needed)
8379 if (TARGET_MIPS16 && cfun->machine->frame.args_size != 0)
8381 rtx offset = GEN_INT (cfun->machine->frame.args_size);
8382 if (SMALL_OPERAND (cfun->machine->frame.args_size))
8384 (emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
8389 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), offset);
8390 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
8391 emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
8392 hard_frame_pointer_rtx,
8393 MIPS_PROLOGUE_TEMP (Pmode)));
8395 (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
8396 plus_constant (stack_pointer_rtx,
8397 cfun->machine->frame.args_size)));
8401 RTX_FRAME_RELATED_P (mips_emit_move (hard_frame_pointer_rtx,
8402 stack_pointer_rtx)) = 1;
8405 mips_emit_loadgp ();
8407 /* If generating o32/o64 abicalls, save $gp on the stack. */
8408 if (TARGET_ABICALLS && TARGET_OLDABI && !current_function_is_leaf)
8409 emit_insn (gen_cprestore (GEN_INT (current_function_outgoing_args_size)));
8411 /* If we are profiling, make sure no instructions are scheduled before
8412 the call to mcount. */
8414 if (current_function_profile)
8415 emit_insn (gen_blockage ());
8418 /* Do any necessary cleanup after a function to restore stack, frame,
8421 #define RA_MASK BITMASK_HIGH /* 1 << 31 */
8424 mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
8425 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
8427 /* Reinstate the normal $gp. */
8428 SET_REGNO (pic_offset_table_rtx, GLOBAL_POINTER_REGNUM);
8429 mips_output_cplocal ();
8431 if (cfun->machine->all_noreorder_p)
8433 /* Avoid using %>%) since it adds excess whitespace. */
8434 output_asm_insn (".set\tmacro", 0);
8435 output_asm_insn (".set\treorder", 0);
8436 set_noreorder = set_nomacro = 0;
8439 if (!FUNCTION_NAME_ALREADY_DECLARED && !flag_inhibit_size_directive)
8443 /* Get the function name the same way that toplev.c does before calling
8444 assemble_start_function. This is needed so that the name used here
8445 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
8446 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
8447 fputs ("\t.end\t", file);
8448 assemble_name (file, fnname);
8453 /* Emit instructions to restore register REG from slot MEM. */
8456 mips_restore_reg (rtx reg, rtx mem)
8458 /* There's no mips16 instruction to load $31 directly. Load into
8459 $7 instead and adjust the return insn appropriately. */
8460 if (TARGET_MIPS16 && REGNO (reg) == GP_REG_FIRST + 31)
8461 reg = gen_rtx_REG (GET_MODE (reg), 7);
8463 if (TARGET_MIPS16 && !M16_REG_P (REGNO (reg)))
8465 /* Can't restore directly; move through a temporary. */
8466 mips_emit_move (MIPS_EPILOGUE_TEMP (GET_MODE (reg)), mem);
8467 mips_emit_move (reg, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
8470 mips_emit_move (reg, mem);
8474 /* Expand the epilogue into a bunch of separate insns. SIBCALL_P is true
8475 if this epilogue precedes a sibling call, false if it is for a normal
8476 "epilogue" pattern. */
8479 mips_expand_epilogue (int sibcall_p)
8481 HOST_WIDE_INT step1, step2;
8484 if (!sibcall_p && mips_can_use_return_insn ())
8486 emit_jump_insn (gen_return ());
8490 /* In mips16 mode, if the return value should go into a floating-point
8491 register, we need to call a helper routine to copy it over. */
8492 if (mips16_cfun_returns_in_fpr_p ())
8501 enum machine_mode return_mode;
8503 return_type = DECL_RESULT (current_function_decl);
8504 return_mode = DECL_MODE (return_type);
8506 name = ACONCAT (("__mips16_ret_",
8507 mips16_call_stub_mode_suffix (return_mode),
8509 id = get_identifier (name);
8510 func = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
8511 retval = gen_rtx_REG (return_mode, GP_RETURN);
8512 call = gen_call_value_internal (retval, func, const0_rtx);
8513 insn = emit_call_insn (call);
8514 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), retval);
8517 /* Split the frame into two. STEP1 is the amount of stack we should
8518 deallocate before restoring the registers. STEP2 is the amount we
8519 should deallocate afterwards.
8521 Start off by assuming that no registers need to be restored. */
8522 step1 = cfun->machine->frame.total_size;
8525 /* Work out which register holds the frame address. Account for the
8526 frame pointer offset used by mips16 code. */
8527 if (!frame_pointer_needed)
8528 base = stack_pointer_rtx;
8531 base = hard_frame_pointer_rtx;
8533 step1 -= cfun->machine->frame.args_size;
8536 /* If we need to restore registers, deallocate as much stack as
8537 possible in the second step without going out of range. */
8538 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
8540 step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
8544 /* Set TARGET to BASE + STEP1. */
8550 /* Get an rtx for STEP1 that we can add to BASE. */
8551 adjust = GEN_INT (step1);
8552 if (!SMALL_OPERAND (step1))
8554 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), adjust);
8555 adjust = MIPS_EPILOGUE_TEMP (Pmode);
8558 /* Normal mode code can copy the result straight into $sp. */
8560 target = stack_pointer_rtx;
8562 emit_insn (gen_add3_insn (target, base, adjust));
8565 /* Copy TARGET into the stack pointer. */
8566 if (target != stack_pointer_rtx)
8567 mips_emit_move (stack_pointer_rtx, target);
8569 /* If we're using addressing macros, $gp is implicitly used by all
8570 SYMBOL_REFs. We must emit a blockage insn before restoring $gp
8572 if (TARGET_CALL_SAVED_GP && !TARGET_EXPLICIT_RELOCS)
8573 emit_insn (gen_blockage ());
8575 if (GENERATE_MIPS16E_SAVE_RESTORE && cfun->machine->frame.mask != 0)
8577 unsigned int regno, mask;
8578 HOST_WIDE_INT offset;
8581 /* Generate the restore instruction. */
8582 mask = cfun->machine->frame.mask;
8583 restore = mips16e_build_save_restore (true, &mask, &offset, 0, step2);
8585 /* Restore any other registers manually. */
8586 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
8587 if (BITSET_P (mask, regno - GP_REG_FIRST))
8589 offset -= GET_MODE_SIZE (gpr_mode);
8590 mips_save_restore_reg (gpr_mode, regno, offset, mips_restore_reg);
8593 /* Restore the remaining registers and deallocate the final bit
8595 emit_insn (restore);
8599 /* Restore the registers. */
8600 mips_for_each_saved_reg (cfun->machine->frame.total_size - step2,
8603 /* Deallocate the final bit of the frame. */
8605 emit_insn (gen_add3_insn (stack_pointer_rtx,
8610 /* Add in the __builtin_eh_return stack adjustment. We need to
8611 use a temporary in mips16 code. */
8612 if (current_function_calls_eh_return)
8616 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
8617 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
8618 MIPS_EPILOGUE_TEMP (Pmode),
8619 EH_RETURN_STACKADJ_RTX));
8620 mips_emit_move (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
8623 emit_insn (gen_add3_insn (stack_pointer_rtx,
8625 EH_RETURN_STACKADJ_RTX));
8630 /* When generating MIPS16 code, the normal mips_for_each_saved_reg
8631 path will restore the return address into $7 rather than $31. */
8633 && !GENERATE_MIPS16E_SAVE_RESTORE
8634 && (cfun->machine->frame.mask & RA_MASK) != 0)
8635 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
8636 GP_REG_FIRST + 7)));
8638 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
8639 GP_REG_FIRST + 31)));
8643 /* Return nonzero if this function is known to have a null epilogue.
8644 This allows the optimizer to omit jumps to jumps if no stack
8648 mips_can_use_return_insn (void)
8650 if (! reload_completed)
8653 if (df_regs_ever_live_p (31) || current_function_profile)
8656 /* In mips16 mode, a function that returns a floating point value
8657 needs to arrange to copy the return value into the floating point
8659 if (mips16_cfun_returns_in_fpr_p ())
8662 if (cfun->machine->frame.initialized)
8663 return cfun->machine->frame.total_size == 0;
8665 return compute_frame_size (get_frame_size ()) == 0;
8668 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
8669 in order to avoid duplicating too much logic from elsewhere. */
8672 mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8673 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8676 rtx this, temp1, temp2, insn, fnaddr;
8678 /* Pretend to be a post-reload pass while generating rtl. */
8679 reload_completed = 1;
8681 /* Mark the end of the (empty) prologue. */
8682 emit_note (NOTE_INSN_PROLOGUE_END);
8684 /* Pick a global pointer. Use a call-clobbered register if
8685 TARGET_CALL_SAVED_GP, so that we can use a sibcall. */
8688 cfun->machine->global_pointer =
8689 TARGET_CALL_SAVED_GP ? 15 : GLOBAL_POINTER_REGNUM;
8691 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
8694 /* Set up the global pointer for n32 or n64 abicalls. If
8695 LOADGP_ABSOLUTE then the thunk does not use the gp and there is
8696 no need to load it.*/
8697 if (mips_current_loadgp_style () != LOADGP_ABSOLUTE
8698 || !targetm.binds_local_p (function))
8699 mips_emit_loadgp ();
8701 /* We need two temporary registers in some cases. */
8702 temp1 = gen_rtx_REG (Pmode, 2);
8703 temp2 = gen_rtx_REG (Pmode, 3);
8705 /* Find out which register contains the "this" pointer. */
8706 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8707 this = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
8709 this = gen_rtx_REG (Pmode, GP_ARG_FIRST);
8711 /* Add DELTA to THIS. */
8714 rtx offset = GEN_INT (delta);
8715 if (!SMALL_OPERAND (delta))
8717 mips_emit_move (temp1, offset);
8720 emit_insn (gen_add3_insn (this, this, offset));
8723 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
8724 if (vcall_offset != 0)
8728 /* Set TEMP1 to *THIS. */
8729 mips_emit_move (temp1, gen_rtx_MEM (Pmode, this));
8731 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
8732 addr = mips_add_offset (temp2, temp1, vcall_offset);
8734 /* Load the offset and add it to THIS. */
8735 mips_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
8736 emit_insn (gen_add3_insn (this, this, temp1));
8739 /* Jump to the target function. Use a sibcall if direct jumps are
8740 allowed, otherwise load the address into a register first. */
8741 fnaddr = XEXP (DECL_RTL (function), 0);
8742 if (TARGET_MIPS16 || TARGET_USE_GOT || SYMBOL_REF_LONG_CALL_P (fnaddr)
8743 || SYMBOL_REF_MIPS16_FUNC_P (fnaddr))
8745 /* This is messy. gas treats "la $25,foo" as part of a call
8746 sequence and may allow a global "foo" to be lazily bound.
8747 The general move patterns therefore reject this combination.
8749 In this context, lazy binding would actually be OK
8750 for TARGET_CALL_CLOBBERED_GP, but it's still wrong for
8751 TARGET_CALL_SAVED_GP; see mips_load_call_address.
8752 We must therefore load the address via a temporary
8753 register if mips_dangerous_for_la25_p.
8755 If we jump to the temporary register rather than $25, the assembler
8756 can use the move insn to fill the jump's delay slot. */
8757 if (TARGET_USE_PIC_FN_ADDR_REG
8758 && !mips_dangerous_for_la25_p (fnaddr))
8759 temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
8760 mips_load_call_address (temp1, fnaddr, true);
8762 if (TARGET_USE_PIC_FN_ADDR_REG
8763 && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
8764 mips_emit_move (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
8765 emit_jump_insn (gen_indirect_jump (temp1));
8769 insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
8770 SIBLING_CALL_P (insn) = 1;
8773 /* Run just enough of rest_of_compilation. This sequence was
8774 "borrowed" from alpha.c. */
8775 insn = get_insns ();
8776 insn_locators_alloc ();
8777 split_all_insns_noflow ();
8778 mips16_lay_out_constants ();
8779 shorten_branches (insn);
8780 final_start_function (insn, file, 1);
8781 final (insn, file, 1);
8782 final_end_function ();
8784 /* Clean up the vars set above. Note that final_end_function resets
8785 the global pointer for us. */
8786 reload_completed = 0;
8789 /* Implement TARGET_SELECT_RTX_SECTION. */
8792 mips_select_rtx_section (enum machine_mode mode, rtx x,
8793 unsigned HOST_WIDE_INT align)
8795 /* ??? Consider using mergeable small data sections. */
8796 if (mips_rtx_constant_in_small_data_p (mode))
8797 return get_named_section (NULL, ".sdata", 0);
8799 return default_elf_select_rtx_section (mode, x, align);
8802 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
8804 The complication here is that, with the combination TARGET_ABICALLS
8805 && !TARGET_GPWORD, jump tables will use absolute addresses, and should
8806 therefore not be included in the read-only part of a DSO. Handle such
8807 cases by selecting a normal data section instead of a read-only one.
8808 The logic apes that in default_function_rodata_section. */
8811 mips_function_rodata_section (tree decl)
8813 if (!TARGET_ABICALLS || TARGET_GPWORD)
8814 return default_function_rodata_section (decl);
8816 if (decl && DECL_SECTION_NAME (decl))
8818 const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
8819 if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
8821 char *rname = ASTRDUP (name);
8823 return get_section (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
8825 else if (flag_function_sections && flag_data_sections
8826 && strncmp (name, ".text.", 6) == 0)
8828 char *rname = ASTRDUP (name);
8829 memcpy (rname + 1, "data", 4);
8830 return get_section (rname, SECTION_WRITE, decl);
8833 return data_section;
8836 /* Implement TARGET_IN_SMALL_DATA_P. This function controls whether
8837 locally-defined objects go in a small data section. It also controls
8838 the setting of the SYMBOL_REF_SMALL_P flag, which in turn helps
8839 mips_classify_symbol decide when to use %gp_rel(...)($gp) accesses. */
8842 mips_in_small_data_p (const_tree decl)
8846 if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
8849 /* We don't yet generate small-data references for -mabicalls or
8850 VxWorks RTP code. See the related -G handling in override_options. */
8851 if (TARGET_ABICALLS || TARGET_VXWORKS_RTP)
8854 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
8858 /* Reject anything that isn't in a known small-data section. */
8859 name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
8860 if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
8863 /* If a symbol is defined externally, the assembler will use the
8864 usual -G rules when deciding how to implement macros. */
8865 if (mips_lo_relocs[SYMBOL_GP_RELATIVE] || !DECL_EXTERNAL (decl))
8868 else if (TARGET_EMBEDDED_DATA)
8870 /* Don't put constants into the small data section: we want them
8871 to be in ROM rather than RAM. */
8872 if (TREE_CODE (decl) != VAR_DECL)
8875 if (TREE_READONLY (decl)
8876 && !TREE_SIDE_EFFECTS (decl)
8877 && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
8881 /* Enforce -mlocal-sdata. */
8882 if (!TARGET_LOCAL_SDATA && !TREE_PUBLIC (decl))
8885 /* Enforce -mextern-sdata. */
8886 if (!TARGET_EXTERN_SDATA && DECL_P (decl))
8888 if (DECL_EXTERNAL (decl))
8890 if (DECL_COMMON (decl) && DECL_INITIAL (decl) == NULL)
8894 size = int_size_in_bytes (TREE_TYPE (decl));
8895 return (size > 0 && size <= mips_section_threshold);
8898 /* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
8899 anchors for small data: the GP register acts as an anchor in that
8900 case. We also don't want to use them for PC-relative accesses,
8901 where the PC acts as an anchor. */
8904 mips_use_anchors_for_symbol_p (const_rtx symbol)
8906 switch (mips_classify_symbol (symbol, SYMBOL_CONTEXT_MEM))
8908 case SYMBOL_PC_RELATIVE:
8909 case SYMBOL_GP_RELATIVE:
8917 /* See whether VALTYPE is a record whose fields should be returned in
8918 floating-point registers. If so, return the number of fields and
8919 list them in FIELDS (which should have two elements). Return 0
8922 For n32 & n64, a structure with one or two fields is returned in
8923 floating-point registers as long as every field has a floating-point
8927 mips_fpr_return_fields (const_tree valtype, tree *fields)
8935 if (TREE_CODE (valtype) != RECORD_TYPE)
8939 for (field = TYPE_FIELDS (valtype); field != 0; field = TREE_CHAIN (field))
8941 if (TREE_CODE (field) != FIELD_DECL)
8944 if (TREE_CODE (TREE_TYPE (field)) != REAL_TYPE)
8950 fields[i++] = field;
8956 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
8957 a value in the most significant part of $2/$3 if:
8959 - the target is big-endian;
8961 - the value has a structure or union type (we generalize this to
8962 cover aggregates from other languages too); and
8964 - the structure is not returned in floating-point registers. */
8967 mips_return_in_msb (const_tree valtype)
8971 return (TARGET_NEWABI
8972 && TARGET_BIG_ENDIAN
8973 && AGGREGATE_TYPE_P (valtype)
8974 && mips_fpr_return_fields (valtype, fields) == 0);
8978 /* Return a composite value in a pair of floating-point registers.
8979 MODE1 and OFFSET1 are the mode and byte offset for the first value,
8980 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
8983 For n32 & n64, $f0 always holds the first value and $f2 the second.
8984 Otherwise the values are packed together as closely as possible. */
8987 mips_return_fpr_pair (enum machine_mode mode,
8988 enum machine_mode mode1, HOST_WIDE_INT offset1,
8989 enum machine_mode mode2, HOST_WIDE_INT offset2)
8993 inc = (TARGET_NEWABI ? 2 : MAX_FPRS_PER_FMT);
8994 return gen_rtx_PARALLEL
8997 gen_rtx_EXPR_LIST (VOIDmode,
8998 gen_rtx_REG (mode1, FP_RETURN),
9000 gen_rtx_EXPR_LIST (VOIDmode,
9001 gen_rtx_REG (mode2, FP_RETURN + inc),
9002 GEN_INT (offset2))));
9007 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
9008 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
9009 VALTYPE is null and MODE is the mode of the return value. */
9012 mips_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
9013 enum machine_mode mode)
9020 mode = TYPE_MODE (valtype);
9021 unsignedp = TYPE_UNSIGNED (valtype);
9023 /* Since we define TARGET_PROMOTE_FUNCTION_RETURN that returns
9024 true, we must promote the mode just as PROMOTE_MODE does. */
9025 mode = promote_mode (valtype, mode, &unsignedp, 1);
9027 /* Handle structures whose fields are returned in $f0/$f2. */
9028 switch (mips_fpr_return_fields (valtype, fields))
9031 return gen_rtx_REG (mode, FP_RETURN);
9034 return mips_return_fpr_pair (mode,
9035 TYPE_MODE (TREE_TYPE (fields[0])),
9036 int_byte_position (fields[0]),
9037 TYPE_MODE (TREE_TYPE (fields[1])),
9038 int_byte_position (fields[1]));
9041 /* If a value is passed in the most significant part of a register, see
9042 whether we have to round the mode up to a whole number of words. */
9043 if (mips_return_in_msb (valtype))
9045 HOST_WIDE_INT size = int_size_in_bytes (valtype);
9046 if (size % UNITS_PER_WORD != 0)
9048 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
9049 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
9053 /* For EABI, the class of return register depends entirely on MODE.
9054 For example, "struct { some_type x; }" and "union { some_type x; }"
9055 are returned in the same way as a bare "some_type" would be.
9056 Other ABIs only use FPRs for scalar, complex or vector types. */
9057 if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
9058 return gen_rtx_REG (mode, GP_RETURN);
9063 /* Handle long doubles for n32 & n64. */
9065 return mips_return_fpr_pair (mode,
9067 DImode, GET_MODE_SIZE (mode) / 2);
9069 if (mips_return_mode_in_fpr_p (mode))
9071 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
9072 return mips_return_fpr_pair (mode,
9073 GET_MODE_INNER (mode), 0,
9074 GET_MODE_INNER (mode),
9075 GET_MODE_SIZE (mode) / 2);
9077 return gen_rtx_REG (mode, FP_RETURN);
9081 return gen_rtx_REG (mode, GP_RETURN);
9084 /* Return nonzero when an argument must be passed by reference. */
9087 mips_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
9088 enum machine_mode mode, const_tree type,
9089 bool named ATTRIBUTE_UNUSED)
9091 if (mips_abi == ABI_EABI)
9095 /* ??? How should SCmode be handled? */
9096 if (mode == DImode || mode == DFmode
9097 || mode == DQmode || mode == UDQmode
9098 || mode == DAmode || mode == UDAmode)
9101 size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
9102 return size == -1 || size > UNITS_PER_WORD;
9106 /* If we have a variable-sized parameter, we have no choice. */
9107 return targetm.calls.must_pass_in_stack (mode, type);
9112 mips_callee_copies (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
9113 enum machine_mode mode ATTRIBUTE_UNUSED,
9114 const_tree type ATTRIBUTE_UNUSED, bool named)
9116 return mips_abi == ABI_EABI && named;
9119 /* Return true if registers of class CLASS cannot change from mode FROM
9123 mips_cannot_change_mode_class (enum machine_mode from,
9124 enum machine_mode to, enum reg_class class)
9126 if (MIN (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) <= UNITS_PER_WORD
9127 && MAX (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) > UNITS_PER_WORD)
9129 if (TARGET_BIG_ENDIAN)
9131 /* When a multi-word value is stored in paired floating-point
9132 registers, the first register always holds the low word.
9133 We therefore can't allow FPRs to change between single-word
9134 and multi-word modes. */
9135 if (MAX_FPRS_PER_FMT > 1 && reg_classes_intersect_p (FP_REGS, class))
9140 /* gcc assumes that each word of a multiword register can be accessed
9141 individually using SUBREGs. This is not true for floating-point
9142 registers if they are bigger than a word. */
9143 if (UNITS_PER_FPREG > UNITS_PER_WORD
9144 && GET_MODE_SIZE (from) > UNITS_PER_WORD
9145 && GET_MODE_SIZE (to) < UNITS_PER_FPREG
9146 && reg_classes_intersect_p (FP_REGS, class))
9149 /* Loading a 32-bit value into a 64-bit floating-point register
9150 will not sign-extend the value, despite what LOAD_EXTEND_OP says.
9151 We can't allow 64-bit float registers to change from SImode to
9156 && GET_MODE_SIZE (to) >= UNITS_PER_WORD
9157 && reg_classes_intersect_p (FP_REGS, class))
9163 /* Return true if X should not be moved directly into register $25.
9164 We need this because many versions of GAS will treat "la $25,foo" as
9165 part of a call sequence and so allow a global "foo" to be lazily bound. */
9168 mips_dangerous_for_la25_p (rtx x)
9170 return (!TARGET_EXPLICIT_RELOCS
9172 && GET_CODE (x) == SYMBOL_REF
9173 && mips_global_symbol_p (x));
9176 /* Implement PREFERRED_RELOAD_CLASS. */
9179 mips_preferred_reload_class (rtx x, enum reg_class class)
9181 if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, class))
9184 if (TARGET_HARD_FLOAT
9185 && FLOAT_MODE_P (GET_MODE (x))
9186 && reg_class_subset_p (FP_REGS, class))
9189 if (reg_class_subset_p (GR_REGS, class))
9192 if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, class))
9198 /* This function returns the register class required for a secondary
9199 register when copying between one of the registers in CLASS, and X,
9200 using MODE. If IN_P is nonzero, the copy is going from X to the
9201 register, otherwise the register is the source. A return value of
9202 NO_REGS means that no secondary register is required. */
9205 mips_secondary_reload_class (enum reg_class class,
9206 enum machine_mode mode, rtx x, int in_p)
9208 enum reg_class gr_regs = TARGET_MIPS16 ? M16_REGS : GR_REGS;
9212 if (REG_P (x)|| GET_CODE (x) == SUBREG)
9213 regno = true_regnum (x);
9215 gp_reg_p = TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
9217 if (mips_dangerous_for_la25_p (x))
9220 if (TEST_HARD_REG_BIT (reg_class_contents[(int) class], 25))
9224 /* Copying from HI or LO to anywhere other than a general register
9225 requires a general register.
9226 This rule applies to both the original HI/LO pair and the new
9227 DSP accumulators. */
9228 if (reg_class_subset_p (class, ACC_REGS))
9230 if (TARGET_MIPS16 && in_p)
9232 /* We can't really copy to HI or LO at all in mips16 mode. */
9235 return gp_reg_p ? NO_REGS : gr_regs;
9237 if (ACC_REG_P (regno))
9239 if (TARGET_MIPS16 && ! in_p)
9241 /* We can't really copy to HI or LO at all in mips16 mode. */
9244 return class == gr_regs ? NO_REGS : gr_regs;
9247 /* We can only copy a value to a condition code register from a
9248 floating point register, and even then we require a scratch
9249 floating point register. We can only copy a value out of a
9250 condition code register into a general register. */
9251 if (class == ST_REGS)
9255 return gp_reg_p ? NO_REGS : gr_regs;
9257 if (ST_REG_P (regno))
9261 return class == gr_regs ? NO_REGS : gr_regs;
9264 if (class == FP_REGS)
9268 /* In this case we can use lwc1, swc1, ldc1 or sdc1. */
9271 else if (CONSTANT_P (x) && GET_MODE_CLASS (mode) == MODE_FLOAT)
9273 /* We can use the l.s and l.d macros to load floating-point
9274 constants. ??? For l.s, we could probably get better
9275 code by returning GR_REGS here. */
9278 else if (gp_reg_p || x == CONST0_RTX (mode))
9280 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
9283 else if (FP_REG_P (regno))
9285 /* In this case we can use mov.s or mov.d. */
9290 /* Otherwise, we need to reload through an integer register. */
9295 /* In mips16 mode, going between memory and anything but M16_REGS
9296 requires an M16_REG. */
9299 if (class != M16_REGS && class != M16_NA_REGS)
9307 if (class == M16_REGS || class == M16_NA_REGS)
9316 /* Implement CLASS_MAX_NREGS.
9318 - UNITS_PER_FPREG controls the number of registers needed by FP_REGS.
9320 - ST_REGS are always hold CCmode values, and CCmode values are
9321 considered to be 4 bytes wide.
9323 All other register classes are covered by UNITS_PER_WORD. Note that
9324 this is true even for unions of integer and float registers when the
9325 latter are smaller than the former. The only supported combination
9326 in which case this occurs is -mgp64 -msingle-float, which has 64-bit
9327 words but 32-bit float registers. A word-based calculation is correct
9328 in that case since -msingle-float disallows multi-FPR values. */
9331 mips_class_max_nregs (enum reg_class class ATTRIBUTE_UNUSED,
9332 enum machine_mode mode)
9334 if (class == ST_REGS)
9335 return (GET_MODE_SIZE (mode) + 3) / 4;
9336 else if (class == FP_REGS)
9337 return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
9339 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
9343 mips_valid_pointer_mode (enum machine_mode mode)
9345 return (mode == SImode || (TARGET_64BIT && mode == DImode));
9348 /* Target hook for vector_mode_supported_p. */
9351 mips_vector_mode_supported_p (enum machine_mode mode)
9356 return TARGET_PAIRED_SINGLE_FLOAT;
9373 /* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
9376 mips_scalar_mode_supported_p (enum machine_mode mode)
9378 if (ALL_FIXED_POINT_MODE_P (mode)
9379 && GET_MODE_PRECISION (mode) <= 2 * BITS_PER_WORD)
9382 return default_scalar_mode_supported_p (mode);
9385 /* If we can access small data directly (using gp-relative relocation
9386 operators) return the small data pointer, otherwise return null.
9388 For each mips16 function which refers to GP relative symbols, we
9389 use a pseudo register, initialized at the start of the function, to
9390 hold the $gp value. */
9393 mips16_gp_pseudo_reg (void)
9395 if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
9396 cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
9398 /* Don't initialize the pseudo register if we are being called from
9399 the tree optimizers' cost-calculation routines. */
9400 if (!cfun->machine->initialized_mips16_gp_pseudo_p
9401 && (current_ir_type () != IR_GIMPLE || currently_expanding_to_rtl))
9405 /* We want to initialize this to a value which gcc will believe
9407 insn = gen_load_const_gp (cfun->machine->mips16_gp_pseudo_rtx);
9409 push_topmost_sequence ();
9410 /* We need to emit the initialization after the FUNCTION_BEG
9411 note, so that it will be integrated. */
9412 for (scan = get_insns (); scan != NULL_RTX; scan = NEXT_INSN (scan))
9414 && NOTE_KIND (scan) == NOTE_INSN_FUNCTION_BEG)
9416 if (scan == NULL_RTX)
9417 scan = get_insns ();
9418 insn = emit_insn_after (insn, scan);
9419 pop_topmost_sequence ();
9421 cfun->machine->initialized_mips16_gp_pseudo_p = true;
9424 return cfun->machine->mips16_gp_pseudo_rtx;
9427 /* Write out code to move floating point arguments in or out of
9428 general registers. Output the instructions to FILE. FP_CODE is
9429 the code describing which arguments are present (see the comment at
9430 the definition of CUMULATIVE_ARGS in mips.h). FROM_FP_P is nonzero if
9431 we are copying from the floating point registers. */
9434 mips16_fp_args (FILE *file, int fp_code, int from_fp_p)
9439 CUMULATIVE_ARGS cum;
9441 /* This code only works for the original 32-bit ABI and the O64 ABI. */
9442 gcc_assert (TARGET_OLDABI);
9449 init_cumulative_args (&cum, NULL, NULL);
9451 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
9453 enum machine_mode mode;
9454 struct mips_arg_info info;
9458 else if ((f & 3) == 2)
9463 mips_arg_info (&cum, mode, NULL, true, &info);
9464 gparg = mips_arg_regno (&info, false);
9465 fparg = mips_arg_regno (&info, true);
9468 fprintf (file, "\t%s\t%s,%s\n", s,
9469 reg_names[gparg], reg_names[fparg]);
9470 else if (TARGET_64BIT)
9471 fprintf (file, "\td%s\t%s,%s\n", s,
9472 reg_names[gparg], reg_names[fparg]);
9473 else if (ISA_HAS_MXHC1)
9474 /* -mips32r2 -mfp64 */
9475 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n",
9477 reg_names[gparg + (WORDS_BIG_ENDIAN ? 1 : 0)],
9479 from_fp_p ? "mfhc1" : "mthc1",
9480 reg_names[gparg + (WORDS_BIG_ENDIAN ? 0 : 1)],
9482 else if (TARGET_BIG_ENDIAN)
9483 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
9484 reg_names[gparg], reg_names[fparg + 1], s,
9485 reg_names[gparg + 1], reg_names[fparg]);
9487 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
9488 reg_names[gparg], reg_names[fparg], s,
9489 reg_names[gparg + 1], reg_names[fparg + 1]);
9491 function_arg_advance (&cum, mode, NULL, true);
9495 /* Build a mips16 function stub. This is used for functions which
9496 take arguments in the floating point registers. It is 32-bit code
9497 that moves the floating point args into the general registers, and
9498 then jumps to the 16-bit code. */
9501 build_mips16_function_stub (FILE *file)
9504 char *secname, *stubname;
9505 tree stubid, stubdecl;
9509 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
9510 secname = (char *) alloca (strlen (fnname) + 20);
9511 sprintf (secname, ".mips16.fn.%s", fnname);
9512 stubname = (char *) alloca (strlen (fnname) + 20);
9513 sprintf (stubname, "__fn_stub_%s", fnname);
9514 stubid = get_identifier (stubname);
9515 stubdecl = build_decl (FUNCTION_DECL, stubid,
9516 build_function_type (void_type_node, NULL_TREE));
9517 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
9518 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
9520 fprintf (file, "\t# Stub function for %s (", current_function_name ());
9522 for (f = (unsigned int) current_function_args_info.fp_code; f != 0; f >>= 2)
9524 fprintf (file, "%s%s",
9525 need_comma ? ", " : "",
9526 (f & 3) == 1 ? "float" : "double");
9529 fprintf (file, ")\n");
9531 fprintf (file, "\t.set\tnomips16\n");
9532 switch_to_section (function_section (stubdecl));
9533 ASM_OUTPUT_ALIGN (file, floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT));
9535 /* ??? If FUNCTION_NAME_ALREADY_DECLARED is defined, then we are
9536 within a .ent, and we cannot emit another .ent. */
9537 if (!FUNCTION_NAME_ALREADY_DECLARED)
9539 fputs ("\t.ent\t", file);
9540 assemble_name (file, stubname);
9544 assemble_name (file, stubname);
9545 fputs (":\n", file);
9547 /* We don't want the assembler to insert any nops here. */
9548 fprintf (file, "\t.set\tnoreorder\n");
9550 mips16_fp_args (file, current_function_args_info.fp_code, 1);
9552 fprintf (asm_out_file, "\t.set\tnoat\n");
9553 fprintf (asm_out_file, "\tla\t%s,", reg_names[GP_REG_FIRST + 1]);
9554 assemble_name (file, fnname);
9555 fprintf (file, "\n");
9556 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
9557 fprintf (asm_out_file, "\t.set\tat\n");
9559 /* Unfortunately, we can't fill the jump delay slot. We can't fill
9560 with one of the mfc1 instructions, because the result is not
9561 available for one instruction, so if the very first instruction
9562 in the function refers to the register, it will see the wrong
9564 fprintf (file, "\tnop\n");
9566 fprintf (file, "\t.set\treorder\n");
9568 if (!FUNCTION_NAME_ALREADY_DECLARED)
9570 fputs ("\t.end\t", file);
9571 assemble_name (file, stubname);
9575 switch_to_section (function_section (current_function_decl));
9578 /* We keep a list of functions for which we have already built stubs
9579 in build_mips16_call_stub. */
9583 struct mips16_stub *next;
9588 static struct mips16_stub *mips16_stubs;
9590 /* Emit code to return a double value from a mips16 stub. GPREG is the
9591 first GP reg to use, FPREG is the first FP reg to use. */
9594 mips16_fpret_double (int gpreg, int fpreg)
9597 fprintf (asm_out_file, "\tdmfc1\t%s,%s\n",
9598 reg_names[gpreg], reg_names[fpreg]);
9599 else if (TARGET_FLOAT64)
9601 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9602 reg_names[gpreg + WORDS_BIG_ENDIAN],
9604 fprintf (asm_out_file, "\tmfhc1\t%s,%s\n",
9605 reg_names[gpreg + !WORDS_BIG_ENDIAN],
9610 if (TARGET_BIG_ENDIAN)
9612 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9613 reg_names[gpreg + 0],
9614 reg_names[fpreg + 1]);
9615 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9616 reg_names[gpreg + 1],
9617 reg_names[fpreg + 0]);
9621 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9622 reg_names[gpreg + 0],
9623 reg_names[fpreg + 0]);
9624 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9625 reg_names[gpreg + 1],
9626 reg_names[fpreg + 1]);
9631 /* Build a call stub for a mips16 call. A stub is needed if we are
9632 passing any floating point values which should go into the floating
9633 point registers. If we are, and the call turns out to be to a
9634 32-bit function, the stub will be used to move the values into the
9635 floating point registers before calling the 32-bit function. The
9636 linker will magically adjust the function call to either the 16-bit
9637 function or the 32-bit stub, depending upon where the function call
9638 is actually defined.
9640 Similarly, we need a stub if the return value might come back in a
9641 floating point register.
9643 RETVAL is the location of the return value, or null if this is
9644 a call rather than a call_value. FN is the address of the
9645 function and ARG_SIZE is the size of the arguments. FP_CODE
9646 is the code built by function_arg. This function returns a nonzero
9647 value if it builds the call instruction itself. */
9650 build_mips16_call_stub (rtx retval, rtx fn, rtx arg_size, int fp_code)
9654 char *secname, *stubname;
9655 struct mips16_stub *l;
9656 tree stubid, stubdecl;
9661 /* We don't need to do anything if we aren't in mips16 mode, or if
9662 we were invoked with the -msoft-float option. */
9663 if (!TARGET_MIPS16 || TARGET_SOFT_FLOAT_ABI)
9666 /* Figure out whether the value might come back in a floating point
9669 fpret = mips_return_mode_in_fpr_p (GET_MODE (retval));
9671 /* We don't need to do anything if there were no floating point
9672 arguments and the value will not be returned in a floating point
9674 if (fp_code == 0 && ! fpret)
9677 /* We don't need to do anything if this is a call to a special
9678 mips16 support function. */
9679 if (GET_CODE (fn) == SYMBOL_REF
9680 && strncmp (XSTR (fn, 0), "__mips16_", 9) == 0)
9683 /* This code will only work for o32 and o64 abis. The other ABI's
9684 require more sophisticated support. */
9685 gcc_assert (TARGET_OLDABI);
9687 /* If we're calling via a function pointer, then we must always call
9688 via a stub. There are magic stubs provided in libgcc.a for each
9689 of the required cases. Each of them expects the function address
9690 to arrive in register $2. */
9692 if (GET_CODE (fn) != SYMBOL_REF)
9698 /* ??? If this code is modified to support other ABI's, we need
9699 to handle PARALLEL return values here. */
9702 sprintf (buf, "__mips16_call_stub_%s_%d",
9703 mips16_call_stub_mode_suffix (GET_MODE (retval)),
9706 sprintf (buf, "__mips16_call_stub_%d",
9709 id = get_identifier (buf);
9710 stub_fn = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
9712 mips_emit_move (gen_rtx_REG (Pmode, 2), fn);
9714 if (retval == NULL_RTX)
9715 insn = gen_call_internal (stub_fn, arg_size);
9717 insn = gen_call_value_internal (retval, stub_fn, arg_size);
9718 insn = emit_call_insn (insn);
9720 /* Put the register usage information on the CALL. */
9721 CALL_INSN_FUNCTION_USAGE (insn) =
9722 gen_rtx_EXPR_LIST (VOIDmode,
9723 gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 2)),
9724 CALL_INSN_FUNCTION_USAGE (insn));
9726 /* If we are handling a floating point return value, we need to
9727 save $18 in the function prologue. Putting a note on the
9728 call will mean that df_regs_ever_live_p ($18) will be true if the
9729 call is not eliminated, and we can check that in the prologue
9732 CALL_INSN_FUNCTION_USAGE (insn) =
9733 gen_rtx_EXPR_LIST (VOIDmode,
9734 gen_rtx_USE (VOIDmode,
9735 gen_rtx_REG (word_mode, 18)),
9736 CALL_INSN_FUNCTION_USAGE (insn));
9738 /* Return 1 to tell the caller that we've generated the call
9743 /* We know the function we are going to call. If we have already
9744 built a stub, we don't need to do anything further. */
9746 fnname = XSTR (fn, 0);
9747 for (l = mips16_stubs; l != NULL; l = l->next)
9748 if (strcmp (l->name, fnname) == 0)
9753 /* Build a special purpose stub. When the linker sees a
9754 function call in mips16 code, it will check where the target
9755 is defined. If the target is a 32-bit call, the linker will
9756 search for the section defined here. It can tell which
9757 symbol this section is associated with by looking at the
9758 relocation information (the name is unreliable, since this
9759 might be a static function). If such a section is found, the
9760 linker will redirect the call to the start of the magic
9763 If the function does not return a floating point value, the
9764 special stub section is named
9767 If the function does return a floating point value, the stub
9769 .mips16.call.fp.FNNAME
9772 secname = (char *) alloca (strlen (fnname) + 40);
9773 sprintf (secname, ".mips16.call.%s%s",
9776 stubname = (char *) alloca (strlen (fnname) + 20);
9777 sprintf (stubname, "__call_stub_%s%s",
9780 stubid = get_identifier (stubname);
9781 stubdecl = build_decl (FUNCTION_DECL, stubid,
9782 build_function_type (void_type_node, NULL_TREE));
9783 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
9784 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
9786 fprintf (asm_out_file, "\t# Stub function to call %s%s (",
9788 ? (GET_MODE (retval) == SFmode ? "float " : "double ")
9792 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
9794 fprintf (asm_out_file, "%s%s",
9795 need_comma ? ", " : "",
9796 (f & 3) == 1 ? "float" : "double");
9799 fprintf (asm_out_file, ")\n");
9801 fprintf (asm_out_file, "\t.set\tnomips16\n");
9802 assemble_start_function (stubdecl, stubname);
9804 if (!FUNCTION_NAME_ALREADY_DECLARED)
9806 fputs ("\t.ent\t", asm_out_file);
9807 assemble_name (asm_out_file, stubname);
9808 fputs ("\n", asm_out_file);
9810 assemble_name (asm_out_file, stubname);
9811 fputs (":\n", asm_out_file);
9814 /* We build the stub code by hand. That's the only way we can
9815 do it, since we can't generate 32-bit code during a 16-bit
9818 /* We don't want the assembler to insert any nops here. */
9819 fprintf (asm_out_file, "\t.set\tnoreorder\n");
9821 mips16_fp_args (asm_out_file, fp_code, 0);
9825 fprintf (asm_out_file, "\t.set\tnoat\n");
9826 fprintf (asm_out_file, "\tla\t%s,%s\n", reg_names[GP_REG_FIRST + 1],
9828 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
9829 fprintf (asm_out_file, "\t.set\tat\n");
9830 /* Unfortunately, we can't fill the jump delay slot. We
9831 can't fill with one of the mtc1 instructions, because the
9832 result is not available for one instruction, so if the
9833 very first instruction in the function refers to the
9834 register, it will see the wrong value. */
9835 fprintf (asm_out_file, "\tnop\n");
9839 fprintf (asm_out_file, "\tmove\t%s,%s\n",
9840 reg_names[GP_REG_FIRST + 18], reg_names[GP_REG_FIRST + 31]);
9841 fprintf (asm_out_file, "\tjal\t%s\n", fnname);
9842 /* As above, we can't fill the delay slot. */
9843 fprintf (asm_out_file, "\tnop\n");
9844 if (GET_MODE (retval) == SFmode)
9845 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9846 reg_names[GP_REG_FIRST + 2], reg_names[FP_REG_FIRST + 0]);
9847 else if (GET_MODE (retval) == SCmode)
9849 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9850 reg_names[GP_REG_FIRST + 2],
9851 reg_names[FP_REG_FIRST + 0]);
9852 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9853 reg_names[GP_REG_FIRST + 3],
9854 reg_names[FP_REG_FIRST + MAX_FPRS_PER_FMT]);
9856 else if (GET_MODE (retval) == DFmode
9857 || GET_MODE (retval) == V2SFmode)
9859 mips16_fpret_double (GP_REG_FIRST + 2, FP_REG_FIRST + 0);
9861 else if (GET_MODE (retval) == DCmode)
9863 mips16_fpret_double (GP_REG_FIRST + 2,
9865 mips16_fpret_double (GP_REG_FIRST + 4,
9866 FP_REG_FIRST + MAX_FPRS_PER_FMT);
9870 if (TARGET_BIG_ENDIAN)
9872 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9873 reg_names[GP_REG_FIRST + 2],
9874 reg_names[FP_REG_FIRST + 1]);
9875 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9876 reg_names[GP_REG_FIRST + 3],
9877 reg_names[FP_REG_FIRST + 0]);
9881 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9882 reg_names[GP_REG_FIRST + 2],
9883 reg_names[FP_REG_FIRST + 0]);
9884 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9885 reg_names[GP_REG_FIRST + 3],
9886 reg_names[FP_REG_FIRST + 1]);
9889 fprintf (asm_out_file, "\tj\t%s\n", reg_names[GP_REG_FIRST + 18]);
9890 /* As above, we can't fill the delay slot. */
9891 fprintf (asm_out_file, "\tnop\n");
9894 fprintf (asm_out_file, "\t.set\treorder\n");
9896 #ifdef ASM_DECLARE_FUNCTION_SIZE
9897 ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
9900 if (!FUNCTION_NAME_ALREADY_DECLARED)
9902 fputs ("\t.end\t", asm_out_file);
9903 assemble_name (asm_out_file, stubname);
9904 fputs ("\n", asm_out_file);
9907 /* Record this stub. */
9908 l = (struct mips16_stub *) xmalloc (sizeof *l);
9909 l->name = xstrdup (fnname);
9911 l->next = mips16_stubs;
9915 /* If we expect a floating point return value, but we've built a
9916 stub which does not expect one, then we're in trouble. We can't
9917 use the existing stub, because it won't handle the floating point
9918 value. We can't build a new stub, because the linker won't know
9919 which stub to use for the various calls in this object file.
9920 Fortunately, this case is illegal, since it means that a function
9921 was declared in two different ways in a single compilation. */
9922 if (fpret && ! l->fpret)
9923 error ("cannot handle inconsistent calls to %qs", fnname);
9925 if (retval == NULL_RTX)
9926 insn = gen_call_internal_direct (fn, arg_size);
9928 insn = gen_call_value_internal_direct (retval, fn, arg_size);
9929 insn = emit_call_insn (insn);
9931 /* If we are calling a stub which handles a floating point return
9932 value, we need to arrange to save $18 in the prologue. We do
9933 this by marking the function call as using the register. The
9934 prologue will later see that it is used, and emit code to save
9937 CALL_INSN_FUNCTION_USAGE (insn) =
9938 gen_rtx_EXPR_LIST (VOIDmode,
9939 gen_rtx_USE (VOIDmode, gen_rtx_REG (word_mode, 18)),
9940 CALL_INSN_FUNCTION_USAGE (insn));
9942 /* Return 1 to tell the caller that we've generated the call
9947 /* An entry in the mips16 constant pool. VALUE is the pool constant,
9948 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
9950 struct mips16_constant {
9951 struct mips16_constant *next;
9954 enum machine_mode mode;
9957 /* Information about an incomplete mips16 constant pool. FIRST is the
9958 first constant, HIGHEST_ADDRESS is the highest address that the first
9959 byte of the pool can have, and INSN_ADDRESS is the current instruction
9962 struct mips16_constant_pool {
9963 struct mips16_constant *first;
9964 int highest_address;
9968 /* Add constant VALUE to POOL and return its label. MODE is the
9969 value's mode (used for CONST_INTs, etc.). */
9972 add_constant (struct mips16_constant_pool *pool,
9973 rtx value, enum machine_mode mode)
9975 struct mips16_constant **p, *c;
9976 bool first_of_size_p;
9978 /* See whether the constant is already in the pool. If so, return the
9979 existing label, otherwise leave P pointing to the place where the
9980 constant should be added.
9982 Keep the pool sorted in increasing order of mode size so that we can
9983 reduce the number of alignments needed. */
9984 first_of_size_p = true;
9985 for (p = &pool->first; *p != 0; p = &(*p)->next)
9987 if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
9989 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
9991 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
9992 first_of_size_p = false;
9995 /* In the worst case, the constant needed by the earliest instruction
9996 will end up at the end of the pool. The entire pool must then be
9997 accessible from that instruction.
9999 When adding the first constant, set the pool's highest address to
10000 the address of the first out-of-range byte. Adjust this address
10001 downwards each time a new constant is added. */
10002 if (pool->first == 0)
10003 /* For pc-relative lw, addiu and daddiu instructions, the base PC value
10004 is the address of the instruction with the lowest two bits clear.
10005 The base PC value for ld has the lowest three bits clear. Assume
10006 the worst case here. */
10007 pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
10008 pool->highest_address -= GET_MODE_SIZE (mode);
10009 if (first_of_size_p)
10010 /* Take into account the worst possible padding due to alignment. */
10011 pool->highest_address -= GET_MODE_SIZE (mode) - 1;
10013 /* Create a new entry. */
10014 c = (struct mips16_constant *) xmalloc (sizeof *c);
10017 c->label = gen_label_rtx ();
10024 /* Output constant VALUE after instruction INSN and return the last
10025 instruction emitted. MODE is the mode of the constant. */
10028 dump_constants_1 (enum machine_mode mode, rtx value, rtx insn)
10030 switch (GET_MODE_CLASS (mode))
10034 rtx size = GEN_INT (GET_MODE_SIZE (mode));
10035 return emit_insn_after (gen_consttable_int (value, size), insn);
10039 return emit_insn_after (gen_consttable_float (value), insn);
10041 case MODE_VECTOR_FLOAT:
10042 case MODE_VECTOR_INT:
10045 for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
10046 insn = dump_constants_1 (GET_MODE_INNER (mode),
10047 CONST_VECTOR_ELT (value, i), insn);
10052 gcc_unreachable ();
10057 /* Dump out the constants in CONSTANTS after INSN. */
10060 dump_constants (struct mips16_constant *constants, rtx insn)
10062 struct mips16_constant *c, *next;
10066 for (c = constants; c != NULL; c = next)
10068 /* If necessary, increase the alignment of PC. */
10069 if (align < GET_MODE_SIZE (c->mode))
10071 int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
10072 insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
10074 align = GET_MODE_SIZE (c->mode);
10076 insn = emit_label_after (c->label, insn);
10077 insn = dump_constants_1 (c->mode, c->value, insn);
10083 emit_barrier_after (insn);
10086 /* Return the length of instruction INSN. */
10089 mips16_insn_length (rtx insn)
10093 rtx body = PATTERN (insn);
10094 if (GET_CODE (body) == ADDR_VEC)
10095 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
10096 if (GET_CODE (body) == ADDR_DIFF_VEC)
10097 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
10099 return get_attr_length (insn);
10102 /* Rewrite *X so that constant pool references refer to the constant's
10103 label instead. DATA points to the constant pool structure. */
10106 mips16_rewrite_pool_refs (rtx *x, void *data)
10108 struct mips16_constant_pool *pool = data;
10109 rtx base, offset, label;
10113 else if (!TARGET_MIPS16_TEXT_LOADS)
10116 split_const (*x, &base, &offset);
10117 if (GET_CODE (base) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (base))
10119 label = add_constant (pool, get_pool_constant (base),
10120 get_pool_mode (base));
10121 base = gen_rtx_LABEL_REF (Pmode, label);
10122 *x = mips_unspec_address_offset (base, offset, SYMBOL_PC_RELATIVE);
10125 return GET_CODE (*x) == CONST ? -1 : 0;
10128 /* Build MIPS16 constant pools. */
10131 mips16_lay_out_constants (void)
10133 struct mips16_constant_pool pool;
10136 if (!TARGET_MIPS16_PCREL_LOADS)
10140 memset (&pool, 0, sizeof (pool));
10141 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10143 /* Rewrite constant pool references in INSN. */
10145 for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &pool);
10147 pool.insn_address += mips16_insn_length (insn);
10149 if (pool.first != NULL)
10151 /* If there are no natural barriers between the first user of
10152 the pool and the highest acceptable address, we'll need to
10153 create a new instruction to jump around the constant pool.
10154 In the worst case, this instruction will be 4 bytes long.
10156 If it's too late to do this transformation after INSN,
10157 do it immediately before INSN. */
10158 if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
10162 label = gen_label_rtx ();
10164 jump = emit_jump_insn_before (gen_jump (label), insn);
10165 JUMP_LABEL (jump) = label;
10166 LABEL_NUSES (label) = 1;
10167 barrier = emit_barrier_after (jump);
10169 emit_label_after (label, barrier);
10170 pool.insn_address += 4;
10173 /* See whether the constant pool is now out of range of the first
10174 user. If so, output the constants after the previous barrier.
10175 Note that any instructions between BARRIER and INSN (inclusive)
10176 will use negative offsets to refer to the pool. */
10177 if (pool.insn_address > pool.highest_address)
10179 dump_constants (pool.first, barrier);
10183 else if (BARRIER_P (insn))
10187 dump_constants (pool.first, get_last_insn ());
10190 /* A temporary variable used by for_each_rtx callbacks, etc. */
10191 static rtx mips_sim_insn;
10193 /* A structure representing the state of the processor pipeline.
10194 Used by the mips_sim_* family of functions. */
10196 /* The maximum number of instructions that can be issued in a cycle.
10197 (Caches mips_issue_rate.) */
10198 unsigned int issue_rate;
10200 /* The current simulation time. */
10203 /* How many more instructions can be issued in the current cycle. */
10204 unsigned int insns_left;
10206 /* LAST_SET[X].INSN is the last instruction to set register X.
10207 LAST_SET[X].TIME is the time at which that instruction was issued.
10208 INSN is null if no instruction has yet set register X. */
10212 } last_set[FIRST_PSEUDO_REGISTER];
10214 /* The pipeline's current DFA state. */
10218 /* Reset STATE to the initial simulation state. */
10221 mips_sim_reset (struct mips_sim *state)
10224 state->insns_left = state->issue_rate;
10225 memset (&state->last_set, 0, sizeof (state->last_set));
10226 state_reset (state->dfa_state);
10229 /* Initialize STATE before its first use. DFA_STATE points to an
10230 allocated but uninitialized DFA state. */
10233 mips_sim_init (struct mips_sim *state, state_t dfa_state)
10235 state->issue_rate = mips_issue_rate ();
10236 state->dfa_state = dfa_state;
10237 mips_sim_reset (state);
10240 /* Advance STATE by one clock cycle. */
10243 mips_sim_next_cycle (struct mips_sim *state)
10246 state->insns_left = state->issue_rate;
10247 state_transition (state->dfa_state, 0);
10250 /* Advance simulation state STATE until instruction INSN can read
10254 mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
10258 for (i = 0; i < HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)); i++)
10259 if (state->last_set[REGNO (reg) + i].insn != 0)
10263 t = state->last_set[REGNO (reg) + i].time;
10264 t += insn_latency (state->last_set[REGNO (reg) + i].insn, insn);
10265 while (state->time < t)
10266 mips_sim_next_cycle (state);
10270 /* A for_each_rtx callback. If *X is a register, advance simulation state
10271 DATA until mips_sim_insn can read the register's value. */
10274 mips_sim_wait_regs_2 (rtx *x, void *data)
10277 mips_sim_wait_reg (data, mips_sim_insn, *x);
10281 /* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
10284 mips_sim_wait_regs_1 (rtx *x, void *data)
10286 for_each_rtx (x, mips_sim_wait_regs_2, data);
10289 /* Advance simulation state STATE until all of INSN's register
10290 dependencies are satisfied. */
10293 mips_sim_wait_regs (struct mips_sim *state, rtx insn)
10295 mips_sim_insn = insn;
10296 note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
10299 /* Advance simulation state STATE until the units required by
10300 instruction INSN are available. */
10303 mips_sim_wait_units (struct mips_sim *state, rtx insn)
10307 tmp_state = alloca (state_size ());
10308 while (state->insns_left == 0
10309 || (memcpy (tmp_state, state->dfa_state, state_size ()),
10310 state_transition (tmp_state, insn) >= 0))
10311 mips_sim_next_cycle (state);
10314 /* Advance simulation state STATE until INSN is ready to issue. */
10317 mips_sim_wait_insn (struct mips_sim *state, rtx insn)
10319 mips_sim_wait_regs (state, insn);
10320 mips_sim_wait_units (state, insn);
10323 /* mips_sim_insn has just set X. Update the LAST_SET array
10324 in simulation state DATA. */
10327 mips_sim_record_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
10329 struct mips_sim *state;
10334 for (i = 0; i < HARD_REGNO_NREGS (REGNO (x), GET_MODE (x)); i++)
10336 state->last_set[REGNO (x) + i].insn = mips_sim_insn;
10337 state->last_set[REGNO (x) + i].time = state->time;
10341 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
10342 can issue immediately (i.e., that mips_sim_wait_insn has already
10346 mips_sim_issue_insn (struct mips_sim *state, rtx insn)
10348 state_transition (state->dfa_state, insn);
10349 state->insns_left--;
10351 mips_sim_insn = insn;
10352 note_stores (PATTERN (insn), mips_sim_record_set, state);
10355 /* Simulate issuing a NOP in state STATE. */
10358 mips_sim_issue_nop (struct mips_sim *state)
10360 if (state->insns_left == 0)
10361 mips_sim_next_cycle (state);
10362 state->insns_left--;
10365 /* Update simulation state STATE so that it's ready to accept the instruction
10366 after INSN. INSN should be part of the main rtl chain, not a member of a
10370 mips_sim_finish_insn (struct mips_sim *state, rtx insn)
10372 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
10374 mips_sim_issue_nop (state);
10376 switch (GET_CODE (SEQ_BEGIN (insn)))
10380 /* We can't predict the processor state after a call or label. */
10381 mips_sim_reset (state);
10385 /* The delay slots of branch likely instructions are only executed
10386 when the branch is taken. Therefore, if the caller has simulated
10387 the delay slot instruction, STATE does not really reflect the state
10388 of the pipeline for the instruction after the delay slot. Also,
10389 branch likely instructions tend to incur a penalty when not taken,
10390 so there will probably be an extra delay between the branch and
10391 the instruction after the delay slot. */
10392 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
10393 mips_sim_reset (state);
10401 /* The VR4130 pipeline issues aligned pairs of instructions together,
10402 but it stalls the second instruction if it depends on the first.
10403 In order to cut down the amount of logic required, this dependence
10404 check is not based on a full instruction decode. Instead, any non-SPECIAL
10405 instruction is assumed to modify the register specified by bits 20-16
10406 (which is usually the "rt" field).
10408 In beq, beql, bne and bnel instructions, the rt field is actually an
10409 input, so we can end up with a false dependence between the branch
10410 and its delay slot. If this situation occurs in instruction INSN,
10411 try to avoid it by swapping rs and rt. */
10414 vr4130_avoid_branch_rt_conflict (rtx insn)
10418 first = SEQ_BEGIN (insn);
10419 second = SEQ_END (insn);
10421 && NONJUMP_INSN_P (second)
10422 && GET_CODE (PATTERN (first)) == SET
10423 && GET_CODE (SET_DEST (PATTERN (first))) == PC
10424 && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
10426 /* Check for the right kind of condition. */
10427 rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
10428 if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
10429 && REG_P (XEXP (cond, 0))
10430 && REG_P (XEXP (cond, 1))
10431 && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
10432 && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
10434 /* SECOND mentions the rt register but not the rs register. */
10435 rtx tmp = XEXP (cond, 0);
10436 XEXP (cond, 0) = XEXP (cond, 1);
10437 XEXP (cond, 1) = tmp;
10442 /* Implement -mvr4130-align. Go through each basic block and simulate the
10443 processor pipeline. If we find that a pair of instructions could execute
10444 in parallel, and the first of those instruction is not 8-byte aligned,
10445 insert a nop to make it aligned. */
10448 vr4130_align_insns (void)
10450 struct mips_sim state;
10451 rtx insn, subinsn, last, last2, next;
10456 /* LAST is the last instruction before INSN to have a nonzero length.
10457 LAST2 is the last such instruction before LAST. */
10461 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
10464 mips_sim_init (&state, alloca (state_size ()));
10465 for (insn = get_insns (); insn != 0; insn = next)
10467 unsigned int length;
10469 next = NEXT_INSN (insn);
10471 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
10472 This isn't really related to the alignment pass, but we do it on
10473 the fly to avoid a separate instruction walk. */
10474 vr4130_avoid_branch_rt_conflict (insn);
10476 if (USEFUL_INSN_P (insn))
10477 FOR_EACH_SUBINSN (subinsn, insn)
10479 mips_sim_wait_insn (&state, subinsn);
10481 /* If we want this instruction to issue in parallel with the
10482 previous one, make sure that the previous instruction is
10483 aligned. There are several reasons why this isn't worthwhile
10484 when the second instruction is a call:
10486 - Calls are less likely to be performance critical,
10487 - There's a good chance that the delay slot can execute
10488 in parallel with the call.
10489 - The return address would then be unaligned.
10491 In general, if we're going to insert a nop between instructions
10492 X and Y, it's better to insert it immediately after X. That
10493 way, if the nop makes Y aligned, it will also align any labels
10494 between X and Y. */
10495 if (state.insns_left != state.issue_rate
10496 && !CALL_P (subinsn))
10498 if (subinsn == SEQ_BEGIN (insn) && aligned_p)
10500 /* SUBINSN is the first instruction in INSN and INSN is
10501 aligned. We want to align the previous instruction
10502 instead, so insert a nop between LAST2 and LAST.
10504 Note that LAST could be either a single instruction
10505 or a branch with a delay slot. In the latter case,
10506 LAST, like INSN, is already aligned, but the delay
10507 slot must have some extra delay that stops it from
10508 issuing at the same time as the branch. We therefore
10509 insert a nop before the branch in order to align its
10511 emit_insn_after (gen_nop (), last2);
10514 else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
10516 /* SUBINSN is the delay slot of INSN, but INSN is
10517 currently unaligned. Insert a nop between
10518 LAST and INSN to align it. */
10519 emit_insn_after (gen_nop (), last);
10523 mips_sim_issue_insn (&state, subinsn);
10525 mips_sim_finish_insn (&state, insn);
10527 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
10528 length = get_attr_length (insn);
10531 /* If the instruction is an asm statement or multi-instruction
10532 mips.md patern, the length is only an estimate. Insert an
10533 8 byte alignment after it so that the following instructions
10534 can be handled correctly. */
10535 if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
10536 && (recog_memoized (insn) < 0 || length >= 8))
10538 next = emit_insn_after (gen_align (GEN_INT (3)), insn);
10539 next = NEXT_INSN (next);
10540 mips_sim_next_cycle (&state);
10543 else if (length & 4)
10544 aligned_p = !aligned_p;
10549 /* See whether INSN is an aligned label. */
10550 if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
10556 /* Subroutine of mips_reorg. If there is a hazard between INSN
10557 and a previous instruction, avoid it by inserting nops after
10560 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
10561 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
10562 before using the value of that register. *HILO_DELAY counts the
10563 number of instructions since the last hilo hazard (that is,
10564 the number of instructions since the last mflo or mfhi).
10566 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
10567 for the next instruction.
10569 LO_REG is an rtx for the LO register, used in dependence checking. */
10572 mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
10573 rtx *delayed_reg, rtx lo_reg)
10578 if (!INSN_P (insn))
10581 pattern = PATTERN (insn);
10583 /* Do not put the whole function in .set noreorder if it contains
10584 an asm statement. We don't know whether there will be hazards
10585 between the asm statement and the gcc-generated code. */
10586 if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
10587 cfun->machine->all_noreorder_p = false;
10589 /* Ignore zero-length instructions (barriers and the like). */
10590 ninsns = get_attr_length (insn) / 4;
10594 /* Work out how many nops are needed. Note that we only care about
10595 registers that are explicitly mentioned in the instruction's pattern.
10596 It doesn't matter that calls use the argument registers or that they
10597 clobber hi and lo. */
10598 if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
10599 nops = 2 - *hilo_delay;
10600 else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
10605 /* Insert the nops between this instruction and the previous one.
10606 Each new nop takes us further from the last hilo hazard. */
10607 *hilo_delay += nops;
10609 emit_insn_after (gen_hazard_nop (), after);
10611 /* Set up the state for the next instruction. */
10612 *hilo_delay += ninsns;
10614 if (INSN_CODE (insn) >= 0)
10615 switch (get_attr_hazard (insn))
10625 set = single_set (insn);
10626 gcc_assert (set != 0);
10627 *delayed_reg = SET_DEST (set);
10633 /* Go through the instruction stream and insert nops where necessary.
10634 See if the whole function can then be put into .set noreorder &
10638 mips_avoid_hazards (void)
10640 rtx insn, last_insn, lo_reg, delayed_reg;
10643 /* Force all instructions to be split into their final form. */
10644 split_all_insns_noflow ();
10646 /* Recalculate instruction lengths without taking nops into account. */
10647 cfun->machine->ignore_hazard_length_p = true;
10648 shorten_branches (get_insns ());
10650 cfun->machine->all_noreorder_p = true;
10652 /* Profiled functions can't be all noreorder because the profiler
10653 support uses assembler macros. */
10654 if (current_function_profile)
10655 cfun->machine->all_noreorder_p = false;
10657 /* Code compiled with -mfix-vr4120 can't be all noreorder because
10658 we rely on the assembler to work around some errata. */
10659 if (TARGET_FIX_VR4120)
10660 cfun->machine->all_noreorder_p = false;
10662 /* The same is true for -mfix-vr4130 if we might generate mflo or
10663 mfhi instructions. Note that we avoid using mflo and mfhi if
10664 the VR4130 macc and dmacc instructions are available instead;
10665 see the *mfhilo_{si,di}_macc patterns. */
10666 if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
10667 cfun->machine->all_noreorder_p = false;
10672 lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
10674 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
10677 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
10678 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
10679 mips_avoid_hazard (last_insn, XVECEXP (PATTERN (insn), 0, i),
10680 &hilo_delay, &delayed_reg, lo_reg);
10682 mips_avoid_hazard (last_insn, insn, &hilo_delay,
10683 &delayed_reg, lo_reg);
10690 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
10695 mips16_lay_out_constants ();
10696 if (TARGET_EXPLICIT_RELOCS)
10698 if (mips_flag_delayed_branch)
10699 dbr_schedule (get_insns ());
10700 mips_avoid_hazards ();
10701 if (TUNE_MIPS4130 && TARGET_VR4130_ALIGN)
10702 vr4130_align_insns ();
10706 /* This function does three things:
10708 - Register the special divsi3 and modsi3 functions if -mfix-vr4120.
10709 - Register the mips16 hardware floating point stubs.
10710 - Register the gofast functions if selected using --enable-gofast. */
10712 #include "config/gofast.h"
10715 mips_init_libfuncs (void)
10717 if (TARGET_FIX_VR4120)
10719 set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
10720 set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
10723 if (TARGET_MIPS16 && TARGET_HARD_FLOAT_ABI)
10725 set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
10726 set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
10727 set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
10728 set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
10730 set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
10731 set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
10732 set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
10733 set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
10734 set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
10735 set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
10736 set_optab_libfunc (unord_optab, SFmode, "__mips16_unordsf2");
10738 set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
10739 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
10740 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__mips16_floatunsisf");
10742 if (TARGET_DOUBLE_FLOAT)
10744 set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
10745 set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
10746 set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
10747 set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
10749 set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
10750 set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
10751 set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
10752 set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
10753 set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
10754 set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
10755 set_optab_libfunc (unord_optab, DFmode, "__mips16_unorddf2");
10757 set_conv_libfunc (sext_optab, DFmode, SFmode, "__mips16_extendsfdf2");
10758 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__mips16_truncdfsf2");
10760 set_conv_libfunc (sfix_optab, SImode, DFmode, "__mips16_fix_truncdfsi");
10761 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__mips16_floatsidf");
10762 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__mips16_floatunsidf");
10766 gofast_maybe_init_libfuncs ();
10769 /* Return a number assessing the cost of moving a register in class
10770 FROM to class TO. The classes are expressed using the enumeration
10771 values such as `GENERAL_REGS'. A value of 2 is the default; other
10772 values are interpreted relative to that.
10774 It is not required that the cost always equal 2 when FROM is the
10775 same as TO; on some machines it is expensive to move between
10776 registers if they are not general registers.
10778 If reload sees an insn consisting of a single `set' between two
10779 hard registers, and if `REGISTER_MOVE_COST' applied to their
10780 classes returns a value of 2, reload does not check to ensure that
10781 the constraints of the insn are met. Setting a cost of other than
10782 2 will allow reload to verify that the constraints are met. You
10783 should do this if the `movM' pattern's constraints do not allow
10786 ??? We make the cost of moving from HI/LO into general
10787 registers the same as for one of moving general registers to
10788 HI/LO for TARGET_MIPS16 in order to prevent allocating a
10789 pseudo to HI/LO. This might hurt optimizations though, it
10790 isn't clear if it is wise. And it might not work in all cases. We
10791 could solve the DImode LO reg problem by using a multiply, just
10792 like reload_{in,out}si. We could solve the SImode/HImode HI reg
10793 problem by using divide instructions. divu puts the remainder in
10794 the HI reg, so doing a divide by -1 will move the value in the HI
10795 reg for all values except -1. We could handle that case by using a
10796 signed divide, e.g. -1 / 2 (or maybe 1 / -2?). We'd have to emit
10797 a compare/branch to test the input value to see which instruction
10798 we need to use. This gets pretty messy, but it is feasible. */
10801 mips_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
10802 enum reg_class to, enum reg_class from)
10804 if (from == M16_REGS && reg_class_subset_p (to, GENERAL_REGS))
10806 else if (from == M16_NA_REGS && reg_class_subset_p (to, GENERAL_REGS))
10808 else if (reg_class_subset_p (from, GENERAL_REGS))
10810 if (to == M16_REGS)
10812 else if (to == M16_NA_REGS)
10814 else if (reg_class_subset_p (to, GENERAL_REGS))
10821 else if (to == FP_REGS)
10823 else if (reg_class_subset_p (to, ACC_REGS))
10830 else if (reg_class_subset_p (to, ALL_COP_REGS))
10835 else if (from == FP_REGS)
10837 if (reg_class_subset_p (to, GENERAL_REGS))
10839 else if (to == FP_REGS)
10841 else if (to == ST_REGS)
10844 else if (reg_class_subset_p (from, ACC_REGS))
10846 if (reg_class_subset_p (to, GENERAL_REGS))
10854 else if (from == ST_REGS && reg_class_subset_p (to, GENERAL_REGS))
10856 else if (reg_class_subset_p (from, ALL_COP_REGS))
10862 ??? What cases are these? Shouldn't we return 2 here? */
10867 /* Return the length of INSN. LENGTH is the initial length computed by
10868 attributes in the machine-description file. */
10871 mips_adjust_insn_length (rtx insn, int length)
10873 /* A unconditional jump has an unfilled delay slot if it is not part
10874 of a sequence. A conditional jump normally has a delay slot, but
10875 does not on MIPS16. */
10876 if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
10879 /* See how many nops might be needed to avoid hardware hazards. */
10880 if (!cfun->machine->ignore_hazard_length_p && INSN_CODE (insn) >= 0)
10881 switch (get_attr_hazard (insn))
10895 /* All MIPS16 instructions are a measly two bytes. */
10903 /* Return an asm sequence to start a noat block and load the address
10904 of a label into $1. */
10907 mips_output_load_label (void)
10909 if (TARGET_EXPLICIT_RELOCS)
10913 return "%[lw\t%@,%%got_page(%0)(%+)\n\taddiu\t%@,%@,%%got_ofst(%0)";
10916 return "%[ld\t%@,%%got_page(%0)(%+)\n\tdaddiu\t%@,%@,%%got_ofst(%0)";
10919 if (ISA_HAS_LOAD_DELAY)
10920 return "%[lw\t%@,%%got(%0)(%+)%#\n\taddiu\t%@,%@,%%lo(%0)";
10921 return "%[lw\t%@,%%got(%0)(%+)\n\taddiu\t%@,%@,%%lo(%0)";
10925 if (Pmode == DImode)
10926 return "%[dla\t%@,%0";
10928 return "%[la\t%@,%0";
10932 /* Return the assembly code for INSN, which has the operands given by
10933 OPERANDS, and which branches to OPERANDS[1] if some condition is true.
10934 BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[1]
10935 is in range of a direct branch. BRANCH_IF_FALSE is an inverted
10936 version of BRANCH_IF_TRUE. */
10939 mips_output_conditional_branch (rtx insn, rtx *operands,
10940 const char *branch_if_true,
10941 const char *branch_if_false)
10943 unsigned int length;
10944 rtx taken, not_taken;
10946 length = get_attr_length (insn);
10949 /* Just a simple conditional branch. */
10950 mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
10951 return branch_if_true;
10954 /* Generate a reversed branch around a direct jump. This fallback does
10955 not use branch-likely instructions. */
10956 mips_branch_likely = false;
10957 not_taken = gen_label_rtx ();
10958 taken = operands[1];
10960 /* Generate the reversed branch to NOT_TAKEN. */
10961 operands[1] = not_taken;
10962 output_asm_insn (branch_if_false, operands);
10964 /* If INSN has a delay slot, we must provide delay slots for both the
10965 branch to NOT_TAKEN and the conditional jump. We must also ensure
10966 that INSN's delay slot is executed in the appropriate cases. */
10967 if (final_sequence)
10969 /* This first delay slot will always be executed, so use INSN's
10970 delay slot if is not annulled. */
10971 if (!INSN_ANNULLED_BRANCH_P (insn))
10973 final_scan_insn (XVECEXP (final_sequence, 0, 1),
10974 asm_out_file, optimize, 1, NULL);
10975 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
10978 output_asm_insn ("nop", 0);
10979 fprintf (asm_out_file, "\n");
10982 /* Output the unconditional branch to TAKEN. */
10984 output_asm_insn ("j\t%0%/", &taken);
10987 output_asm_insn (mips_output_load_label (), &taken);
10988 output_asm_insn ("jr\t%@%]%/", 0);
10991 /* Now deal with its delay slot; see above. */
10992 if (final_sequence)
10994 /* This delay slot will only be executed if the branch is taken.
10995 Use INSN's delay slot if is annulled. */
10996 if (INSN_ANNULLED_BRANCH_P (insn))
10998 final_scan_insn (XVECEXP (final_sequence, 0, 1),
10999 asm_out_file, optimize, 1, NULL);
11000 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
11003 output_asm_insn ("nop", 0);
11004 fprintf (asm_out_file, "\n");
11007 /* Output NOT_TAKEN. */
11008 (*targetm.asm_out.internal_label) (asm_out_file, "L",
11009 CODE_LABEL_NUMBER (not_taken));
11013 /* Return the assembly code for INSN, which branches to OPERANDS[1]
11014 if some ordered condition is true. The condition is given by
11015 OPERANDS[0] if !INVERTED_P, otherwise it is the inverse of
11016 OPERANDS[0]. OPERANDS[2] is the comparison's first operand;
11017 its second is always zero. */
11020 mips_output_order_conditional_branch (rtx insn, rtx *operands, bool inverted_p)
11022 const char *branch[2];
11024 /* Make BRANCH[1] branch to OPERANDS[1] when the condition is true.
11025 Make BRANCH[0] branch on the inverse condition. */
11026 switch (GET_CODE (operands[0]))
11028 /* These cases are equivalent to comparisons against zero. */
11030 inverted_p = !inverted_p;
11031 /* Fall through. */
11033 branch[!inverted_p] = MIPS_BRANCH ("bne", "%2,%.,%1");
11034 branch[inverted_p] = MIPS_BRANCH ("beq", "%2,%.,%1");
11037 /* These cases are always true or always false. */
11039 inverted_p = !inverted_p;
11040 /* Fall through. */
11042 branch[!inverted_p] = MIPS_BRANCH ("beq", "%.,%.,%1");
11043 branch[inverted_p] = MIPS_BRANCH ("bne", "%.,%.,%1");
11047 branch[!inverted_p] = MIPS_BRANCH ("b%C0z", "%2,%1");
11048 branch[inverted_p] = MIPS_BRANCH ("b%N0z", "%2,%1");
11051 return mips_output_conditional_branch (insn, operands, branch[1], branch[0]);
11054 /* Used to output div or ddiv instruction DIVISION, which has the operands
11055 given by OPERANDS. Add in a divide-by-zero check if needed.
11057 When working around R4000 and R4400 errata, we need to make sure that
11058 the division is not immediately followed by a shift[1][2]. We also
11059 need to stop the division from being put into a branch delay slot[3].
11060 The easiest way to avoid both problems is to add a nop after the
11061 division. When a divide-by-zero check is needed, this nop can be
11062 used to fill the branch delay slot.
11064 [1] If a double-word or a variable shift executes immediately
11065 after starting an integer division, the shift may give an
11066 incorrect result. See quotations of errata #16 and #28 from
11067 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
11068 in mips.md for details.
11070 [2] A similar bug to [1] exists for all revisions of the
11071 R4000 and the R4400 when run in an MC configuration.
11072 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
11074 "19. In this following sequence:
11076 ddiv (or ddivu or div or divu)
11077 dsll32 (or dsrl32, dsra32)
11079 if an MPT stall occurs, while the divide is slipping the cpu
11080 pipeline, then the following double shift would end up with an
11083 Workaround: The compiler needs to avoid generating any
11084 sequence with divide followed by extended double shift."
11086 This erratum is also present in "MIPS R4400MC Errata, Processor
11087 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
11088 & 3.0" as errata #10 and #4, respectively.
11090 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
11091 (also valid for MIPS R4000MC processors):
11093 "52. R4000SC: This bug does not apply for the R4000PC.
11095 There are two flavors of this bug:
11097 1) If the instruction just after divide takes an RF exception
11098 (tlb-refill, tlb-invalid) and gets an instruction cache
11099 miss (both primary and secondary) and the line which is
11100 currently in secondary cache at this index had the first
11101 data word, where the bits 5..2 are set, then R4000 would
11102 get a wrong result for the div.
11107 ------------------- # end-of page. -tlb-refill
11112 ------------------- # end-of page. -tlb-invalid
11115 2) If the divide is in the taken branch delay slot, where the
11116 target takes RF exception and gets an I-cache miss for the
11117 exception vector or where I-cache miss occurs for the
11118 target address, under the above mentioned scenarios, the
11119 div would get wrong results.
11122 j r2 # to next page mapped or unmapped
11123 div r8,r9 # this bug would be there as long
11124 # as there is an ICache miss and
11125 nop # the "data pattern" is present
11128 beq r0, r0, NextPage # to Next page
11132 This bug is present for div, divu, ddiv, and ddivu
11135 Workaround: For item 1), OS could make sure that the next page
11136 after the divide instruction is also mapped. For item 2), the
11137 compiler could make sure that the divide instruction is not in
11138 the branch delay slot."
11140 These processors have PRId values of 0x00004220 and 0x00004300 for
11141 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
11144 mips_output_division (const char *division, rtx *operands)
11149 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
11151 output_asm_insn (s, operands);
11154 if (TARGET_CHECK_ZERO_DIV)
11158 output_asm_insn (s, operands);
11159 s = "bnez\t%2,1f\n\tbreak\t7\n1:";
11161 else if (GENERATE_DIVIDE_TRAPS)
11163 output_asm_insn (s, operands);
11164 s = "teq\t%2,%.,7";
11168 output_asm_insn ("%(bne\t%2,%.,1f", operands);
11169 output_asm_insn (s, operands);
11170 s = "break\t7%)\n1:";
11176 /* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
11177 with a final "000" replaced by "k". Ignore case.
11179 Note: this function is shared between GCC and GAS. */
11182 mips_strict_matching_cpu_name_p (const char *canonical, const char *given)
11184 while (*given != 0 && TOLOWER (*given) == TOLOWER (*canonical))
11185 given++, canonical++;
11187 return ((*given == 0 && *canonical == 0)
11188 || (strcmp (canonical, "000") == 0 && strcasecmp (given, "k") == 0));
11192 /* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
11193 CPU name. We've traditionally allowed a lot of variation here.
11195 Note: this function is shared between GCC and GAS. */
11198 mips_matching_cpu_name_p (const char *canonical, const char *given)
11200 /* First see if the name matches exactly, or with a final "000"
11201 turned into "k". */
11202 if (mips_strict_matching_cpu_name_p (canonical, given))
11205 /* If not, try comparing based on numerical designation alone.
11206 See if GIVEN is an unadorned number, or 'r' followed by a number. */
11207 if (TOLOWER (*given) == 'r')
11209 if (!ISDIGIT (*given))
11212 /* Skip over some well-known prefixes in the canonical name,
11213 hoping to find a number there too. */
11214 if (TOLOWER (canonical[0]) == 'v' && TOLOWER (canonical[1]) == 'r')
11216 else if (TOLOWER (canonical[0]) == 'r' && TOLOWER (canonical[1]) == 'm')
11218 else if (TOLOWER (canonical[0]) == 'r')
11221 return mips_strict_matching_cpu_name_p (canonical, given);
11225 /* Return the mips_cpu_info entry for the processor or ISA given
11226 by CPU_STRING. Return null if the string isn't recognized.
11228 A similar function exists in GAS. */
11230 static const struct mips_cpu_info *
11231 mips_parse_cpu (const char *cpu_string)
11233 const struct mips_cpu_info *p;
11236 /* In the past, we allowed upper-case CPU names, but it doesn't
11237 work well with the multilib machinery. */
11238 for (s = cpu_string; *s != 0; s++)
11241 warning (0, "the cpu name must be lower case");
11245 /* 'from-abi' selects the most compatible architecture for the given
11246 ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
11247 EABIs, we have to decide whether we're using the 32-bit or 64-bit
11248 version. Look first at the -mgp options, if given, otherwise base
11249 the choice on MASK_64BIT in TARGET_DEFAULT. */
11250 if (strcasecmp (cpu_string, "from-abi") == 0)
11251 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
11252 : ABI_NEEDS_64BIT_REGS ? 3
11253 : (TARGET_64BIT ? 3 : 1));
11255 /* 'default' has traditionally been a no-op. Probably not very useful. */
11256 if (strcasecmp (cpu_string, "default") == 0)
11259 for (p = mips_cpu_info_table; p->name != 0; p++)
11260 if (mips_matching_cpu_name_p (p->name, cpu_string))
11267 /* Return the processor associated with the given ISA level, or null
11268 if the ISA isn't valid. */
11270 static const struct mips_cpu_info *
11271 mips_cpu_info_from_isa (int isa)
11273 const struct mips_cpu_info *p;
11275 for (p = mips_cpu_info_table; p->name != 0; p++)
11282 /* Implement HARD_REGNO_NREGS. The size of FP registers is controlled
11283 by UNITS_PER_FPREG. The size of FP status registers is always 4, because
11284 they only hold condition code modes, and CCmode is always considered to
11285 be 4 bytes wide. All other registers are word sized. */
11288 mips_hard_regno_nregs (int regno, enum machine_mode mode)
11290 if (ST_REG_P (regno))
11291 return ((GET_MODE_SIZE (mode) + 3) / 4);
11292 else if (! FP_REG_P (regno))
11293 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
11295 return ((GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG);
11298 /* Implement TARGET_RETURN_IN_MEMORY. Under the old (i.e., 32 and O64 ABIs)
11299 all BLKmode objects are returned in memory. Under the new (N32 and
11300 64-bit MIPS ABIs) small structures are returned in a register.
11301 Objects with varying size must still be returned in memory, of
11305 mips_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
11308 return (TYPE_MODE (type) == BLKmode);
11310 return ((int_size_in_bytes (type) > (2 * UNITS_PER_WORD))
11311 || (int_size_in_bytes (type) == -1));
11315 mips_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
11317 return !TARGET_OLDABI;
11320 /* Return true if INSN is a multiply-add or multiply-subtract
11321 instruction and PREV assigns to the accumulator operand. */
11324 mips_linked_madd_p (rtx prev, rtx insn)
11328 x = single_set (insn);
11334 if (GET_CODE (x) == PLUS
11335 && GET_CODE (XEXP (x, 0)) == MULT
11336 && reg_set_p (XEXP (x, 1), prev))
11339 if (GET_CODE (x) == MINUS
11340 && GET_CODE (XEXP (x, 1)) == MULT
11341 && reg_set_p (XEXP (x, 0), prev))
11347 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
11348 that may clobber hi or lo. */
11350 static rtx mips_macc_chains_last_hilo;
11352 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
11353 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
11356 mips_macc_chains_record (rtx insn)
11358 if (get_attr_may_clobber_hilo (insn))
11359 mips_macc_chains_last_hilo = insn;
11362 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
11363 has NREADY elements, looking for a multiply-add or multiply-subtract
11364 instruction that is cumulative with mips_macc_chains_last_hilo.
11365 If there is one, promote it ahead of anything else that might
11366 clobber hi or lo. */
11369 mips_macc_chains_reorder (rtx *ready, int nready)
11373 if (mips_macc_chains_last_hilo != 0)
11374 for (i = nready - 1; i >= 0; i--)
11375 if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
11377 for (j = nready - 1; j > i; j--)
11378 if (recog_memoized (ready[j]) >= 0
11379 && get_attr_may_clobber_hilo (ready[j]))
11381 mips_promote_ready (ready, i, j);
11388 /* The last instruction to be scheduled. */
11390 static rtx vr4130_last_insn;
11392 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
11393 points to an rtx that is initially an instruction. Nullify the rtx
11394 if the instruction uses the value of register X. */
11397 vr4130_true_reg_dependence_p_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
11399 rtx *insn_ptr = data;
11402 && reg_referenced_p (x, PATTERN (*insn_ptr)))
11406 /* Return true if there is true register dependence between vr4130_last_insn
11410 vr4130_true_reg_dependence_p (rtx insn)
11412 note_stores (PATTERN (vr4130_last_insn),
11413 vr4130_true_reg_dependence_p_1, &insn);
11417 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
11418 the ready queue and that INSN2 is the instruction after it, return
11419 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
11420 in which INSN1 and INSN2 can probably issue in parallel, but for
11421 which (INSN2, INSN1) should be less sensitive to instruction
11422 alignment than (INSN1, INSN2). See 4130.md for more details. */
11425 vr4130_swap_insns_p (rtx insn1, rtx insn2)
11427 sd_iterator_def sd_it;
11430 /* Check for the following case:
11432 1) there is some other instruction X with an anti dependence on INSN1;
11433 2) X has a higher priority than INSN2; and
11434 3) X is an arithmetic instruction (and thus has no unit restrictions).
11436 If INSN1 is the last instruction blocking X, it would better to
11437 choose (INSN1, X) over (INSN2, INSN1). */
11438 FOR_EACH_DEP (insn1, SD_LIST_FORW, sd_it, dep)
11439 if (DEP_TYPE (dep) == REG_DEP_ANTI
11440 && INSN_PRIORITY (DEP_CON (dep)) > INSN_PRIORITY (insn2)
11441 && recog_memoized (DEP_CON (dep)) >= 0
11442 && get_attr_vr4130_class (DEP_CON (dep)) == VR4130_CLASS_ALU)
11445 if (vr4130_last_insn != 0
11446 && recog_memoized (insn1) >= 0
11447 && recog_memoized (insn2) >= 0)
11449 /* See whether INSN1 and INSN2 use different execution units,
11450 or if they are both ALU-type instructions. If so, they can
11451 probably execute in parallel. */
11452 enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
11453 enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
11454 if (class1 != class2 || class1 == VR4130_CLASS_ALU)
11456 /* If only one of the instructions has a dependence on
11457 vr4130_last_insn, prefer to schedule the other one first. */
11458 bool dep1 = vr4130_true_reg_dependence_p (insn1);
11459 bool dep2 = vr4130_true_reg_dependence_p (insn2);
11463 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
11464 is not an ALU-type instruction and if INSN1 uses the same
11465 execution unit. (Note that if this condition holds, we already
11466 know that INSN2 uses a different execution unit.) */
11467 if (class1 != VR4130_CLASS_ALU
11468 && recog_memoized (vr4130_last_insn) >= 0
11469 && class1 == get_attr_vr4130_class (vr4130_last_insn))
11476 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
11477 queue with at least two instructions. Swap the first two if
11478 vr4130_swap_insns_p says that it could be worthwhile. */
11481 vr4130_reorder (rtx *ready, int nready)
11483 if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
11484 mips_promote_ready (ready, nready - 2, nready - 1);
11487 /* Remove the instruction at index LOWER from ready queue READY and
11488 reinsert it in front of the instruction at index HIGHER. LOWER must
11492 mips_promote_ready (rtx *ready, int lower, int higher)
11497 new_head = ready[lower];
11498 for (i = lower; i < higher; i++)
11499 ready[i] = ready[i + 1];
11500 ready[i] = new_head;
11503 /* If the priority of the instruction at POS2 in the ready queue READY
11504 is within LIMIT units of that of the instruction at POS1, swap the
11505 instructions if POS2 is not already less than POS1. */
11508 mips_maybe_swap_ready (rtx *ready, int pos1, int pos2, int limit)
11511 && INSN_PRIORITY (ready[pos1]) + limit >= INSN_PRIORITY (ready[pos2]))
11514 temp = ready[pos1];
11515 ready[pos1] = ready[pos2];
11516 ready[pos2] = temp;
11520 /* Record whether last 74k AGEN instruction was a load or store. */
11522 static enum attr_type mips_last_74k_agen_insn = TYPE_UNKNOWN;
11524 /* Initialize mips_last_74k_agen_insn from INSN. A null argument
11525 resets to TYPE_UNKNOWN state. */
11528 mips_74k_agen_init (rtx insn)
11530 if (!insn || !NONJUMP_INSN_P (insn))
11531 mips_last_74k_agen_insn = TYPE_UNKNOWN;
11532 else if (USEFUL_INSN_P (insn))
11534 enum attr_type type = get_attr_type (insn);
11535 if (type == TYPE_LOAD || type == TYPE_STORE)
11536 mips_last_74k_agen_insn = type;
11540 /* A TUNE_74K helper function. The 74K AGEN pipeline likes multiple
11541 loads to be grouped together, and multiple stores to be grouped
11542 together. Swap things around in the ready queue to make this happen. */
11545 mips_74k_agen_reorder (rtx *ready, int nready)
11548 int store_pos, load_pos;
11553 for (i = nready - 1; i >= 0; i--)
11555 rtx insn = ready[i];
11556 if (USEFUL_INSN_P (insn))
11557 switch (get_attr_type (insn))
11560 if (store_pos == -1)
11565 if (load_pos == -1)
11574 if (load_pos == -1 || store_pos == -1)
11577 switch (mips_last_74k_agen_insn)
11580 /* Prefer to schedule loads since they have a higher latency. */
11582 /* Swap loads to the front of the queue. */
11583 mips_maybe_swap_ready (ready, load_pos, store_pos, 4);
11586 /* Swap stores to the front of the queue. */
11587 mips_maybe_swap_ready (ready, store_pos, load_pos, 4);
11594 /* Implement TARGET_SCHED_INIT. */
11597 mips_sched_init (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
11598 int max_ready ATTRIBUTE_UNUSED)
11600 mips_macc_chains_last_hilo = 0;
11601 vr4130_last_insn = 0;
11602 mips_74k_agen_init (NULL_RTX);
11605 /* Implement TARGET_SCHED_REORDER and TARG_SCHED_REORDER2. */
11608 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
11609 rtx *ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
11611 if (!reload_completed
11612 && TUNE_MACC_CHAINS
11614 mips_macc_chains_reorder (ready, *nreadyp);
11615 if (reload_completed
11617 && !TARGET_VR4130_ALIGN
11619 vr4130_reorder (ready, *nreadyp);
11621 mips_74k_agen_reorder (ready, *nreadyp);
11622 return mips_issue_rate ();
11625 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
11628 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
11629 rtx insn, int more)
11632 mips_74k_agen_init (insn);
11633 switch (GET_CODE (PATTERN (insn)))
11637 /* Don't count USEs and CLOBBERs against the issue rate. */
11642 if (!reload_completed && TUNE_MACC_CHAINS)
11643 mips_macc_chains_record (insn);
11644 vr4130_last_insn = insn;
11650 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
11651 dependencies have no cost, except on the 20Kc where output-dependence
11652 is treated like input-dependence. */
11655 mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
11656 rtx dep ATTRIBUTE_UNUSED, int cost)
11658 if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
11661 if (REG_NOTE_KIND (link) != 0)
11666 /* Return the number of instructions that can be issued per cycle. */
11669 mips_issue_rate (void)
11673 case PROCESSOR_74KC:
11674 case PROCESSOR_74KF2_1:
11675 case PROCESSOR_74KF1_1:
11676 case PROCESSOR_74KF3_2:
11677 /* The 74k is not strictly quad-issue cpu, but can be seen as one
11678 by the scheduler. It can issue 1 ALU, 1 AGEN and 2 FPU insns,
11679 but in reality only a maximum of 3 insns can be issued as the
11680 floating point load/stores also require a slot in the AGEN pipe. */
11683 case PROCESSOR_20KC:
11684 case PROCESSOR_R4130:
11685 case PROCESSOR_R5400:
11686 case PROCESSOR_R5500:
11687 case PROCESSOR_R7000:
11688 case PROCESSOR_R9000:
11691 case PROCESSOR_SB1:
11692 case PROCESSOR_SB1A:
11693 /* This is actually 4, but we get better performance if we claim 3.
11694 This is partly because of unwanted speculative code motion with the
11695 larger number, and partly because in most common cases we can't
11696 reach the theoretical max of 4. */
11704 /* Implements TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
11705 be as wide as the scheduling freedom in the DFA. */
11708 mips_multipass_dfa_lookahead (void)
11710 /* Can schedule up to 4 of the 6 function units in any one cycle. */
11717 /* Implements a store data bypass check. We need this because the cprestore
11718 pattern is type store, but defined using an UNSPEC. This UNSPEC causes the
11719 default routine to abort. We just return false for that case. */
11720 /* ??? Should try to give a better result here than assuming false. */
11723 mips_store_data_bypass_p (rtx out_insn, rtx in_insn)
11725 if (GET_CODE (PATTERN (in_insn)) == UNSPEC_VOLATILE)
11728 return ! store_data_bypass_p (out_insn, in_insn);
11731 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
11732 return the first operand of the associated "pref" or "prefx" insn. */
11735 mips_prefetch_cookie (rtx write, rtx locality)
11737 /* store_streamed / load_streamed. */
11738 if (INTVAL (locality) <= 0)
11739 return GEN_INT (INTVAL (write) + 4);
11741 /* store / load. */
11742 if (INTVAL (locality) <= 2)
11745 /* store_retained / load_retained. */
11746 return GEN_INT (INTVAL (write) + 6);
11749 /* MIPS builtin function support. */
11751 struct builtin_description
11753 /* The code of the main .md file instruction. See mips_builtin_type
11754 for more information. */
11755 enum insn_code icode;
11757 /* The floating-point comparison code to use with ICODE, if any. */
11758 enum mips_fp_condition cond;
11760 /* The name of the builtin function. */
11763 /* Specifies how the function should be expanded. */
11764 enum mips_builtin_type builtin_type;
11766 /* The function's prototype. */
11767 enum mips_function_type function_type;
11769 /* The target flags required for this function. */
11773 /* Define a MIPS_BUILTIN_DIRECT function for instruction CODE_FOR_mips_<INSN>.
11774 FUNCTION_TYPE and TARGET_FLAGS are builtin_description fields. */
11775 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
11776 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
11777 MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, TARGET_FLAGS }
11779 /* Define __builtin_mips_<INSN>_<COND>_{s,d}, both of which require
11781 #define CMP_SCALAR_BUILTINS(INSN, COND, TARGET_FLAGS) \
11782 { CODE_FOR_mips_ ## INSN ## _cond_s, MIPS_FP_COND_ ## COND, \
11783 "__builtin_mips_" #INSN "_" #COND "_s", \
11784 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, TARGET_FLAGS }, \
11785 { CODE_FOR_mips_ ## INSN ## _cond_d, MIPS_FP_COND_ ## COND, \
11786 "__builtin_mips_" #INSN "_" #COND "_d", \
11787 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, TARGET_FLAGS }
11789 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
11790 The lower and upper forms require TARGET_FLAGS while the any and all
11791 forms require MASK_MIPS3D. */
11792 #define CMP_PS_BUILTINS(INSN, COND, TARGET_FLAGS) \
11793 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11794 "__builtin_mips_any_" #INSN "_" #COND "_ps", \
11795 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
11796 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11797 "__builtin_mips_all_" #INSN "_" #COND "_ps", \
11798 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
11799 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11800 "__builtin_mips_lower_" #INSN "_" #COND "_ps", \
11801 MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }, \
11802 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11803 "__builtin_mips_upper_" #INSN "_" #COND "_ps", \
11804 MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }
11806 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
11807 require MASK_MIPS3D. */
11808 #define CMP_4S_BUILTINS(INSN, COND) \
11809 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
11810 "__builtin_mips_any_" #INSN "_" #COND "_4s", \
11811 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11813 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
11814 "__builtin_mips_all_" #INSN "_" #COND "_4s", \
11815 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11818 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
11819 instruction requires TARGET_FLAGS. */
11820 #define MOVTF_BUILTINS(INSN, COND, TARGET_FLAGS) \
11821 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11822 "__builtin_mips_movt_" #INSN "_" #COND "_ps", \
11823 MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11825 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11826 "__builtin_mips_movf_" #INSN "_" #COND "_ps", \
11827 MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11830 /* Define all the builtins related to c.cond.fmt condition COND. */
11831 #define CMP_BUILTINS(COND) \
11832 MOVTF_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
11833 MOVTF_BUILTINS (cabs, COND, MASK_MIPS3D), \
11834 CMP_SCALAR_BUILTINS (cabs, COND, MASK_MIPS3D), \
11835 CMP_PS_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
11836 CMP_PS_BUILTINS (cabs, COND, MASK_MIPS3D), \
11837 CMP_4S_BUILTINS (c, COND), \
11838 CMP_4S_BUILTINS (cabs, COND)
11840 static const struct builtin_description mips_bdesc[] =
11842 DIRECT_BUILTIN (pll_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11843 DIRECT_BUILTIN (pul_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11844 DIRECT_BUILTIN (plu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11845 DIRECT_BUILTIN (puu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11846 DIRECT_BUILTIN (cvt_ps_s, MIPS_V2SF_FTYPE_SF_SF, MASK_PAIRED_SINGLE_FLOAT),
11847 DIRECT_BUILTIN (cvt_s_pl, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11848 DIRECT_BUILTIN (cvt_s_pu, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11849 DIRECT_BUILTIN (abs_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11851 DIRECT_BUILTIN (alnv_ps, MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
11852 MASK_PAIRED_SINGLE_FLOAT),
11853 DIRECT_BUILTIN (addr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
11854 DIRECT_BUILTIN (mulr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
11855 DIRECT_BUILTIN (cvt_pw_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
11856 DIRECT_BUILTIN (cvt_ps_pw, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
11858 DIRECT_BUILTIN (recip1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
11859 DIRECT_BUILTIN (recip1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
11860 DIRECT_BUILTIN (recip1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
11861 DIRECT_BUILTIN (recip2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
11862 DIRECT_BUILTIN (recip2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
11863 DIRECT_BUILTIN (recip2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
11865 DIRECT_BUILTIN (rsqrt1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
11866 DIRECT_BUILTIN (rsqrt1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
11867 DIRECT_BUILTIN (rsqrt1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
11868 DIRECT_BUILTIN (rsqrt2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
11869 DIRECT_BUILTIN (rsqrt2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
11870 DIRECT_BUILTIN (rsqrt2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
11872 MIPS_FP_CONDITIONS (CMP_BUILTINS)
11875 /* Builtin functions for the SB-1 processor. */
11877 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
11879 static const struct builtin_description sb1_bdesc[] =
11881 DIRECT_BUILTIN (sqrt_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT)
11884 /* Builtin functions for DSP ASE. */
11886 #define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
11887 #define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
11888 #define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
11889 #define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
11890 #define CODE_FOR_mips_mul_ph CODE_FOR_mulv2hi3
11892 /* Define a MIPS_BUILTIN_DIRECT_NO_TARGET function for instruction
11893 CODE_FOR_mips_<INSN>. FUNCTION_TYPE and TARGET_FLAGS are
11894 builtin_description fields. */
11895 #define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
11896 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
11897 MIPS_BUILTIN_DIRECT_NO_TARGET, FUNCTION_TYPE, TARGET_FLAGS }
11899 /* Define __builtin_mips_bposge<VALUE>. <VALUE> is 32 for the MIPS32 DSP
11900 branch instruction. TARGET_FLAGS is a builtin_description field. */
11901 #define BPOSGE_BUILTIN(VALUE, TARGET_FLAGS) \
11902 { CODE_FOR_mips_bposge, 0, "__builtin_mips_bposge" #VALUE, \
11903 MIPS_BUILTIN_BPOSGE ## VALUE, MIPS_SI_FTYPE_VOID, TARGET_FLAGS }
11905 static const struct builtin_description dsp_bdesc[] =
11907 DIRECT_BUILTIN (addq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11908 DIRECT_BUILTIN (addq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11909 DIRECT_BUILTIN (addq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11910 DIRECT_BUILTIN (addu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11911 DIRECT_BUILTIN (addu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11912 DIRECT_BUILTIN (subq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11913 DIRECT_BUILTIN (subq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11914 DIRECT_BUILTIN (subq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11915 DIRECT_BUILTIN (subu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11916 DIRECT_BUILTIN (subu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11917 DIRECT_BUILTIN (addsc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11918 DIRECT_BUILTIN (addwc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11919 DIRECT_BUILTIN (modsub, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11920 DIRECT_BUILTIN (raddu_w_qb, MIPS_SI_FTYPE_V4QI, MASK_DSP),
11921 DIRECT_BUILTIN (absq_s_ph, MIPS_V2HI_FTYPE_V2HI, MASK_DSP),
11922 DIRECT_BUILTIN (absq_s_w, MIPS_SI_FTYPE_SI, MASK_DSP),
11923 DIRECT_BUILTIN (precrq_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
11924 DIRECT_BUILTIN (precrq_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
11925 DIRECT_BUILTIN (precrq_rs_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
11926 DIRECT_BUILTIN (precrqu_s_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
11927 DIRECT_BUILTIN (preceq_w_phl, MIPS_SI_FTYPE_V2HI, MASK_DSP),
11928 DIRECT_BUILTIN (preceq_w_phr, MIPS_SI_FTYPE_V2HI, MASK_DSP),
11929 DIRECT_BUILTIN (precequ_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11930 DIRECT_BUILTIN (precequ_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11931 DIRECT_BUILTIN (precequ_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11932 DIRECT_BUILTIN (precequ_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11933 DIRECT_BUILTIN (preceu_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11934 DIRECT_BUILTIN (preceu_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11935 DIRECT_BUILTIN (preceu_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11936 DIRECT_BUILTIN (preceu_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11937 DIRECT_BUILTIN (shll_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
11938 DIRECT_BUILTIN (shll_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
11939 DIRECT_BUILTIN (shll_s_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
11940 DIRECT_BUILTIN (shll_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11941 DIRECT_BUILTIN (shrl_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
11942 DIRECT_BUILTIN (shra_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
11943 DIRECT_BUILTIN (shra_r_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
11944 DIRECT_BUILTIN (shra_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11945 DIRECT_BUILTIN (muleu_s_ph_qbl, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
11946 DIRECT_BUILTIN (muleu_s_ph_qbr, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
11947 DIRECT_BUILTIN (mulq_rs_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11948 DIRECT_BUILTIN (muleq_s_w_phl, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
11949 DIRECT_BUILTIN (muleq_s_w_phr, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
11950 DIRECT_BUILTIN (bitrev, MIPS_SI_FTYPE_SI, MASK_DSP),
11951 DIRECT_BUILTIN (insv, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11952 DIRECT_BUILTIN (repl_qb, MIPS_V4QI_FTYPE_SI, MASK_DSP),
11953 DIRECT_BUILTIN (repl_ph, MIPS_V2HI_FTYPE_SI, MASK_DSP),
11954 DIRECT_NO_TARGET_BUILTIN (cmpu_eq_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
11955 DIRECT_NO_TARGET_BUILTIN (cmpu_lt_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
11956 DIRECT_NO_TARGET_BUILTIN (cmpu_le_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
11957 DIRECT_BUILTIN (cmpgu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
11958 DIRECT_BUILTIN (cmpgu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
11959 DIRECT_BUILTIN (cmpgu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
11960 DIRECT_NO_TARGET_BUILTIN (cmp_eq_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
11961 DIRECT_NO_TARGET_BUILTIN (cmp_lt_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
11962 DIRECT_NO_TARGET_BUILTIN (cmp_le_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
11963 DIRECT_BUILTIN (pick_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11964 DIRECT_BUILTIN (pick_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11965 DIRECT_BUILTIN (packrl_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11966 DIRECT_NO_TARGET_BUILTIN (wrdsp, MIPS_VOID_FTYPE_SI_SI, MASK_DSP),
11967 DIRECT_BUILTIN (rddsp, MIPS_SI_FTYPE_SI, MASK_DSP),
11968 DIRECT_BUILTIN (lbux, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
11969 DIRECT_BUILTIN (lhx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
11970 DIRECT_BUILTIN (lwx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
11971 BPOSGE_BUILTIN (32, MASK_DSP),
11973 /* The following are for the MIPS DSP ASE REV 2. */
11974 DIRECT_BUILTIN (absq_s_qb, MIPS_V4QI_FTYPE_V4QI, MASK_DSPR2),
11975 DIRECT_BUILTIN (addu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11976 DIRECT_BUILTIN (addu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11977 DIRECT_BUILTIN (adduh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11978 DIRECT_BUILTIN (adduh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11979 DIRECT_BUILTIN (append, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
11980 DIRECT_BUILTIN (balign, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
11981 DIRECT_BUILTIN (cmpgdu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11982 DIRECT_BUILTIN (cmpgdu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11983 DIRECT_BUILTIN (cmpgdu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11984 DIRECT_BUILTIN (mul_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11985 DIRECT_BUILTIN (mul_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11986 DIRECT_BUILTIN (mulq_rs_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11987 DIRECT_BUILTIN (mulq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11988 DIRECT_BUILTIN (mulq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11989 DIRECT_BUILTIN (precr_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11990 DIRECT_BUILTIN (precr_sra_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, MASK_DSPR2),
11991 DIRECT_BUILTIN (precr_sra_r_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, MASK_DSPR2),
11992 DIRECT_BUILTIN (prepend, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
11993 DIRECT_BUILTIN (shra_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSPR2),
11994 DIRECT_BUILTIN (shra_r_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSPR2),
11995 DIRECT_BUILTIN (shrl_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSPR2),
11996 DIRECT_BUILTIN (subu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11997 DIRECT_BUILTIN (subu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11998 DIRECT_BUILTIN (subuh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11999 DIRECT_BUILTIN (subuh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
12000 DIRECT_BUILTIN (addqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12001 DIRECT_BUILTIN (addqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12002 DIRECT_BUILTIN (addqh_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
12003 DIRECT_BUILTIN (addqh_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
12004 DIRECT_BUILTIN (subqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12005 DIRECT_BUILTIN (subqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12006 DIRECT_BUILTIN (subqh_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
12007 DIRECT_BUILTIN (subqh_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2)
12010 static const struct builtin_description dsp_32only_bdesc[] =
12012 DIRECT_BUILTIN (dpau_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
12013 DIRECT_BUILTIN (dpau_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
12014 DIRECT_BUILTIN (dpsu_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
12015 DIRECT_BUILTIN (dpsu_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
12016 DIRECT_BUILTIN (dpaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
12017 DIRECT_BUILTIN (dpsq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
12018 DIRECT_BUILTIN (mulsaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
12019 DIRECT_BUILTIN (dpaq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
12020 DIRECT_BUILTIN (dpsq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
12021 DIRECT_BUILTIN (maq_s_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
12022 DIRECT_BUILTIN (maq_s_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
12023 DIRECT_BUILTIN (maq_sa_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
12024 DIRECT_BUILTIN (maq_sa_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
12025 DIRECT_BUILTIN (extr_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
12026 DIRECT_BUILTIN (extr_r_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
12027 DIRECT_BUILTIN (extr_rs_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
12028 DIRECT_BUILTIN (extr_s_h, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
12029 DIRECT_BUILTIN (extp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
12030 DIRECT_BUILTIN (extpdp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
12031 DIRECT_BUILTIN (shilo, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
12032 DIRECT_BUILTIN (mthlip, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
12034 /* The following are for the MIPS DSP ASE REV 2. */
12035 DIRECT_BUILTIN (dpa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
12036 DIRECT_BUILTIN (dps_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
12037 DIRECT_BUILTIN (madd, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSPR2),
12038 DIRECT_BUILTIN (maddu, MIPS_DI_FTYPE_DI_USI_USI, MASK_DSPR2),
12039 DIRECT_BUILTIN (msub, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSPR2),
12040 DIRECT_BUILTIN (msubu, MIPS_DI_FTYPE_DI_USI_USI, MASK_DSPR2),
12041 DIRECT_BUILTIN (mulsa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
12042 DIRECT_BUILTIN (mult, MIPS_DI_FTYPE_SI_SI, MASK_DSPR2),
12043 DIRECT_BUILTIN (multu, MIPS_DI_FTYPE_USI_USI, MASK_DSPR2),
12044 DIRECT_BUILTIN (dpax_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
12045 DIRECT_BUILTIN (dpsx_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
12046 DIRECT_BUILTIN (dpaqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
12047 DIRECT_BUILTIN (dpaqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
12048 DIRECT_BUILTIN (dpsqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
12049 DIRECT_BUILTIN (dpsqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2)
12052 /* This helps provide a mapping from builtin function codes to bdesc
12057 /* The builtin function table that this entry describes. */
12058 const struct builtin_description *bdesc;
12060 /* The number of entries in the builtin function table. */
12063 /* The target processor that supports these builtin functions.
12064 PROCESSOR_MAX means we enable them for all processors. */
12065 enum processor_type proc;
12067 /* If the target has these flags, this builtin function table
12068 will not be supported. */
12069 int unsupported_target_flags;
12072 static const struct bdesc_map bdesc_arrays[] =
12074 { mips_bdesc, ARRAY_SIZE (mips_bdesc), PROCESSOR_MAX, 0 },
12075 { sb1_bdesc, ARRAY_SIZE (sb1_bdesc), PROCESSOR_SB1, 0 },
12076 { dsp_bdesc, ARRAY_SIZE (dsp_bdesc), PROCESSOR_MAX, 0 },
12077 { dsp_32only_bdesc, ARRAY_SIZE (dsp_32only_bdesc), PROCESSOR_MAX,
12081 /* Take the argument ARGNUM of the arglist of EXP and convert it into a form
12082 suitable for input operand OP of instruction ICODE. Return the value. */
12085 mips_prepare_builtin_arg (enum insn_code icode,
12086 unsigned int op, tree exp, unsigned int argnum)
12089 enum machine_mode mode;
12091 value = expand_normal (CALL_EXPR_ARG (exp, argnum));
12092 mode = insn_data[icode].operand[op].mode;
12093 if (!insn_data[icode].operand[op].predicate (value, mode))
12095 value = copy_to_mode_reg (mode, value);
12096 /* Check the predicate again. */
12097 if (!insn_data[icode].operand[op].predicate (value, mode))
12099 error ("invalid argument to builtin function");
12107 /* Return an rtx suitable for output operand OP of instruction ICODE.
12108 If TARGET is non-null, try to use it where possible. */
12111 mips_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
12113 enum machine_mode mode;
12115 mode = insn_data[icode].operand[op].mode;
12116 if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
12117 target = gen_reg_rtx (mode);
12122 /* Expand builtin functions. This is called from TARGET_EXPAND_BUILTIN. */
12125 mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
12126 enum machine_mode mode ATTRIBUTE_UNUSED,
12127 int ignore ATTRIBUTE_UNUSED)
12129 enum insn_code icode;
12130 enum mips_builtin_type type;
12132 unsigned int fcode;
12133 const struct builtin_description *bdesc;
12134 const struct bdesc_map *m;
12136 fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
12137 fcode = DECL_FUNCTION_CODE (fndecl);
12141 error ("built-in function %qs not supported for MIPS16",
12142 IDENTIFIER_POINTER (DECL_NAME (fndecl)));
12147 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
12149 if (fcode < m->size)
12152 icode = bdesc[fcode].icode;
12153 type = bdesc[fcode].builtin_type;
12163 case MIPS_BUILTIN_DIRECT:
12164 return mips_expand_builtin_direct (icode, target, exp, true);
12166 case MIPS_BUILTIN_DIRECT_NO_TARGET:
12167 return mips_expand_builtin_direct (icode, target, exp, false);
12169 case MIPS_BUILTIN_MOVT:
12170 case MIPS_BUILTIN_MOVF:
12171 return mips_expand_builtin_movtf (type, icode, bdesc[fcode].cond,
12174 case MIPS_BUILTIN_CMP_ANY:
12175 case MIPS_BUILTIN_CMP_ALL:
12176 case MIPS_BUILTIN_CMP_UPPER:
12177 case MIPS_BUILTIN_CMP_LOWER:
12178 case MIPS_BUILTIN_CMP_SINGLE:
12179 return mips_expand_builtin_compare (type, icode, bdesc[fcode].cond,
12182 case MIPS_BUILTIN_BPOSGE32:
12183 return mips_expand_builtin_bposge (type, target);
12190 /* Init builtin functions. This is called from TARGET_INIT_BUILTIN. */
12193 mips_init_builtins (void)
12195 const struct builtin_description *d;
12196 const struct bdesc_map *m;
12197 tree types[(int) MIPS_MAX_FTYPE_MAX];
12198 tree V2SF_type_node;
12199 tree V2HI_type_node;
12200 tree V4QI_type_node;
12201 unsigned int offset;
12203 /* We have only builtins for -mpaired-single, -mips3d and -mdsp. */
12204 if (!TARGET_PAIRED_SINGLE_FLOAT && !TARGET_DSP)
12207 if (TARGET_PAIRED_SINGLE_FLOAT)
12209 V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
12211 types[MIPS_V2SF_FTYPE_V2SF]
12212 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
12214 types[MIPS_V2SF_FTYPE_V2SF_V2SF]
12215 = build_function_type_list (V2SF_type_node,
12216 V2SF_type_node, V2SF_type_node, NULL_TREE);
12218 types[MIPS_V2SF_FTYPE_V2SF_V2SF_INT]
12219 = build_function_type_list (V2SF_type_node,
12220 V2SF_type_node, V2SF_type_node,
12221 integer_type_node, NULL_TREE);
12223 types[MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF]
12224 = build_function_type_list (V2SF_type_node,
12225 V2SF_type_node, V2SF_type_node,
12226 V2SF_type_node, V2SF_type_node, NULL_TREE);
12228 types[MIPS_V2SF_FTYPE_SF_SF]
12229 = build_function_type_list (V2SF_type_node,
12230 float_type_node, float_type_node, NULL_TREE);
12232 types[MIPS_INT_FTYPE_V2SF_V2SF]
12233 = build_function_type_list (integer_type_node,
12234 V2SF_type_node, V2SF_type_node, NULL_TREE);
12236 types[MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF]
12237 = build_function_type_list (integer_type_node,
12238 V2SF_type_node, V2SF_type_node,
12239 V2SF_type_node, V2SF_type_node, NULL_TREE);
12241 types[MIPS_INT_FTYPE_SF_SF]
12242 = build_function_type_list (integer_type_node,
12243 float_type_node, float_type_node, NULL_TREE);
12245 types[MIPS_INT_FTYPE_DF_DF]
12246 = build_function_type_list (integer_type_node,
12247 double_type_node, double_type_node, NULL_TREE);
12249 types[MIPS_SF_FTYPE_V2SF]
12250 = build_function_type_list (float_type_node, V2SF_type_node, NULL_TREE);
12252 types[MIPS_SF_FTYPE_SF]
12253 = build_function_type_list (float_type_node,
12254 float_type_node, NULL_TREE);
12256 types[MIPS_SF_FTYPE_SF_SF]
12257 = build_function_type_list (float_type_node,
12258 float_type_node, float_type_node, NULL_TREE);
12260 types[MIPS_DF_FTYPE_DF]
12261 = build_function_type_list (double_type_node,
12262 double_type_node, NULL_TREE);
12264 types[MIPS_DF_FTYPE_DF_DF]
12265 = build_function_type_list (double_type_node,
12266 double_type_node, double_type_node, NULL_TREE);
12271 V2HI_type_node = build_vector_type_for_mode (intHI_type_node, V2HImode);
12272 V4QI_type_node = build_vector_type_for_mode (intQI_type_node, V4QImode);
12274 types[MIPS_V2HI_FTYPE_V2HI_V2HI]
12275 = build_function_type_list (V2HI_type_node,
12276 V2HI_type_node, V2HI_type_node,
12279 types[MIPS_SI_FTYPE_SI_SI]
12280 = build_function_type_list (intSI_type_node,
12281 intSI_type_node, intSI_type_node,
12284 types[MIPS_V4QI_FTYPE_V4QI_V4QI]
12285 = build_function_type_list (V4QI_type_node,
12286 V4QI_type_node, V4QI_type_node,
12289 types[MIPS_SI_FTYPE_V4QI]
12290 = build_function_type_list (intSI_type_node,
12294 types[MIPS_V2HI_FTYPE_V2HI]
12295 = build_function_type_list (V2HI_type_node,
12299 types[MIPS_SI_FTYPE_SI]
12300 = build_function_type_list (intSI_type_node,
12304 types[MIPS_V4QI_FTYPE_V2HI_V2HI]
12305 = build_function_type_list (V4QI_type_node,
12306 V2HI_type_node, V2HI_type_node,
12309 types[MIPS_V2HI_FTYPE_SI_SI]
12310 = build_function_type_list (V2HI_type_node,
12311 intSI_type_node, intSI_type_node,
12314 types[MIPS_SI_FTYPE_V2HI]
12315 = build_function_type_list (intSI_type_node,
12319 types[MIPS_V2HI_FTYPE_V4QI]
12320 = build_function_type_list (V2HI_type_node,
12324 types[MIPS_V4QI_FTYPE_V4QI_SI]
12325 = build_function_type_list (V4QI_type_node,
12326 V4QI_type_node, intSI_type_node,
12329 types[MIPS_V2HI_FTYPE_V2HI_SI]
12330 = build_function_type_list (V2HI_type_node,
12331 V2HI_type_node, intSI_type_node,
12334 types[MIPS_V2HI_FTYPE_V4QI_V2HI]
12335 = build_function_type_list (V2HI_type_node,
12336 V4QI_type_node, V2HI_type_node,
12339 types[MIPS_SI_FTYPE_V2HI_V2HI]
12340 = build_function_type_list (intSI_type_node,
12341 V2HI_type_node, V2HI_type_node,
12344 types[MIPS_DI_FTYPE_DI_V4QI_V4QI]
12345 = build_function_type_list (intDI_type_node,
12346 intDI_type_node, V4QI_type_node, V4QI_type_node,
12349 types[MIPS_DI_FTYPE_DI_V2HI_V2HI]
12350 = build_function_type_list (intDI_type_node,
12351 intDI_type_node, V2HI_type_node, V2HI_type_node,
12354 types[MIPS_DI_FTYPE_DI_SI_SI]
12355 = build_function_type_list (intDI_type_node,
12356 intDI_type_node, intSI_type_node, intSI_type_node,
12359 types[MIPS_V4QI_FTYPE_SI]
12360 = build_function_type_list (V4QI_type_node,
12364 types[MIPS_V2HI_FTYPE_SI]
12365 = build_function_type_list (V2HI_type_node,
12369 types[MIPS_VOID_FTYPE_V4QI_V4QI]
12370 = build_function_type_list (void_type_node,
12371 V4QI_type_node, V4QI_type_node,
12374 types[MIPS_SI_FTYPE_V4QI_V4QI]
12375 = build_function_type_list (intSI_type_node,
12376 V4QI_type_node, V4QI_type_node,
12379 types[MIPS_VOID_FTYPE_V2HI_V2HI]
12380 = build_function_type_list (void_type_node,
12381 V2HI_type_node, V2HI_type_node,
12384 types[MIPS_SI_FTYPE_DI_SI]
12385 = build_function_type_list (intSI_type_node,
12386 intDI_type_node, intSI_type_node,
12389 types[MIPS_DI_FTYPE_DI_SI]
12390 = build_function_type_list (intDI_type_node,
12391 intDI_type_node, intSI_type_node,
12394 types[MIPS_VOID_FTYPE_SI_SI]
12395 = build_function_type_list (void_type_node,
12396 intSI_type_node, intSI_type_node,
12399 types[MIPS_SI_FTYPE_PTR_SI]
12400 = build_function_type_list (intSI_type_node,
12401 ptr_type_node, intSI_type_node,
12404 types[MIPS_SI_FTYPE_VOID]
12405 = build_function_type (intSI_type_node, void_list_node);
12409 types[MIPS_V4QI_FTYPE_V4QI]
12410 = build_function_type_list (V4QI_type_node,
12414 types[MIPS_SI_FTYPE_SI_SI_SI]
12415 = build_function_type_list (intSI_type_node,
12416 intSI_type_node, intSI_type_node,
12417 intSI_type_node, NULL_TREE);
12419 types[MIPS_DI_FTYPE_DI_USI_USI]
12420 = build_function_type_list (intDI_type_node,
12422 unsigned_intSI_type_node,
12423 unsigned_intSI_type_node, NULL_TREE);
12425 types[MIPS_DI_FTYPE_SI_SI]
12426 = build_function_type_list (intDI_type_node,
12427 intSI_type_node, intSI_type_node,
12430 types[MIPS_DI_FTYPE_USI_USI]
12431 = build_function_type_list (intDI_type_node,
12432 unsigned_intSI_type_node,
12433 unsigned_intSI_type_node, NULL_TREE);
12435 types[MIPS_V2HI_FTYPE_SI_SI_SI]
12436 = build_function_type_list (V2HI_type_node,
12437 intSI_type_node, intSI_type_node,
12438 intSI_type_node, NULL_TREE);
12443 /* Iterate through all of the bdesc arrays, initializing all of the
12444 builtin functions. */
12447 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
12449 if ((m->proc == PROCESSOR_MAX || (m->proc == mips_arch))
12450 && (m->unsupported_target_flags & target_flags) == 0)
12451 for (d = m->bdesc; d < &m->bdesc[m->size]; d++)
12452 if ((d->target_flags & target_flags) == d->target_flags)
12453 add_builtin_function (d->name, types[d->function_type],
12454 d - m->bdesc + offset,
12455 BUILT_IN_MD, NULL, NULL);
12460 /* Expand a MIPS_BUILTIN_DIRECT function. ICODE is the code of the
12461 .md pattern and CALL is the function expr with arguments. TARGET,
12462 if nonnull, suggests a good place to put the result.
12463 HAS_TARGET indicates the function must return something. */
12466 mips_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
12469 rtx ops[MAX_RECOG_OPERANDS];
12475 /* We save target to ops[0]. */
12476 ops[0] = mips_prepare_builtin_target (icode, 0, target);
12480 /* We need to test if the arglist is not zero. Some instructions have extra
12481 clobber registers. */
12482 for (; i < insn_data[icode].n_operands && i <= call_expr_nargs (exp); i++, j++)
12483 ops[i] = mips_prepare_builtin_arg (icode, i, exp, j);
12488 emit_insn (GEN_FCN (icode) (ops[0], ops[1]));
12492 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2]));
12496 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]));
12500 gcc_unreachable ();
12505 /* Expand a __builtin_mips_movt_*_ps() or __builtin_mips_movf_*_ps()
12506 function (TYPE says which). EXP is the tree for the function
12507 function, ICODE is the instruction that should be used to compare
12508 the first two arguments, and COND is the condition it should test.
12509 TARGET, if nonnull, suggests a good place to put the result. */
12512 mips_expand_builtin_movtf (enum mips_builtin_type type,
12513 enum insn_code icode, enum mips_fp_condition cond,
12514 rtx target, tree exp)
12516 rtx cmp_result, op0, op1;
12518 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
12519 op0 = mips_prepare_builtin_arg (icode, 1, exp, 0);
12520 op1 = mips_prepare_builtin_arg (icode, 2, exp, 1);
12521 emit_insn (GEN_FCN (icode) (cmp_result, op0, op1, GEN_INT (cond)));
12523 icode = CODE_FOR_mips_cond_move_tf_ps;
12524 target = mips_prepare_builtin_target (icode, 0, target);
12525 if (type == MIPS_BUILTIN_MOVT)
12527 op1 = mips_prepare_builtin_arg (icode, 2, exp, 2);
12528 op0 = mips_prepare_builtin_arg (icode, 1, exp, 3);
12532 op0 = mips_prepare_builtin_arg (icode, 1, exp, 2);
12533 op1 = mips_prepare_builtin_arg (icode, 2, exp, 3);
12535 emit_insn (gen_mips_cond_move_tf_ps (target, op0, op1, cmp_result));
12539 /* Move VALUE_IF_TRUE into TARGET if CONDITION is true; move VALUE_IF_FALSE
12540 into TARGET otherwise. Return TARGET. */
12543 mips_builtin_branch_and_move (rtx condition, rtx target,
12544 rtx value_if_true, rtx value_if_false)
12546 rtx true_label, done_label;
12548 true_label = gen_label_rtx ();
12549 done_label = gen_label_rtx ();
12551 /* First assume that CONDITION is false. */
12552 mips_emit_move (target, value_if_false);
12554 /* Branch to TRUE_LABEL if CONDITION is true and DONE_LABEL otherwise. */
12555 emit_jump_insn (gen_condjump (condition, true_label));
12556 emit_jump_insn (gen_jump (done_label));
12559 /* Fix TARGET if CONDITION is true. */
12560 emit_label (true_label);
12561 mips_emit_move (target, value_if_true);
12563 emit_label (done_label);
12567 /* Expand a comparison builtin of type BUILTIN_TYPE. ICODE is the code
12568 of the comparison instruction and COND is the condition it should test.
12569 EXP is the function call and arguments and TARGET, if nonnull,
12570 suggests a good place to put the boolean result. */
12573 mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
12574 enum insn_code icode, enum mips_fp_condition cond,
12575 rtx target, tree exp)
12577 rtx offset, condition, cmp_result, ops[MAX_RECOG_OPERANDS];
12581 if (target == 0 || GET_MODE (target) != SImode)
12582 target = gen_reg_rtx (SImode);
12584 /* Prepare the operands to the comparison. */
12585 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
12586 for (i = 1; i < insn_data[icode].n_operands - 1; i++, j++)
12587 ops[i] = mips_prepare_builtin_arg (icode, i, exp, j);
12589 switch (insn_data[icode].n_operands)
12592 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2], GEN_INT (cond)));
12596 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2],
12597 ops[3], ops[4], GEN_INT (cond)));
12601 gcc_unreachable ();
12604 /* If the comparison sets more than one register, we define the result
12605 to be 0 if all registers are false and -1 if all registers are true.
12606 The value of the complete result is indeterminate otherwise. */
12607 switch (builtin_type)
12609 case MIPS_BUILTIN_CMP_ALL:
12610 condition = gen_rtx_NE (VOIDmode, cmp_result, constm1_rtx);
12611 return mips_builtin_branch_and_move (condition, target,
12612 const0_rtx, const1_rtx);
12614 case MIPS_BUILTIN_CMP_UPPER:
12615 case MIPS_BUILTIN_CMP_LOWER:
12616 offset = GEN_INT (builtin_type == MIPS_BUILTIN_CMP_UPPER);
12617 condition = gen_single_cc (cmp_result, offset);
12618 return mips_builtin_branch_and_move (condition, target,
12619 const1_rtx, const0_rtx);
12622 condition = gen_rtx_NE (VOIDmode, cmp_result, const0_rtx);
12623 return mips_builtin_branch_and_move (condition, target,
12624 const1_rtx, const0_rtx);
12628 /* Expand a bposge builtin of type BUILTIN_TYPE. TARGET, if nonnull,
12629 suggests a good place to put the boolean result. */
12632 mips_expand_builtin_bposge (enum mips_builtin_type builtin_type, rtx target)
12634 rtx condition, cmp_result;
12637 if (target == 0 || GET_MODE (target) != SImode)
12638 target = gen_reg_rtx (SImode);
12640 cmp_result = gen_rtx_REG (CCDSPmode, CCDSP_PO_REGNUM);
12642 if (builtin_type == MIPS_BUILTIN_BPOSGE32)
12647 condition = gen_rtx_GE (VOIDmode, cmp_result, GEN_INT (cmp_value));
12648 return mips_builtin_branch_and_move (condition, target,
12649 const1_rtx, const0_rtx);
12652 /* Return true if we should force MIPS16 mode for the function named by
12653 the SYMBOL_REF SYMBOL, which belongs to DECL and has type TYPE.
12654 FIRST is true if this is the first time handling this decl. */
12657 mips_use_mips16_mode_p (rtx symbol, tree decl, int first, tree type)
12661 /* Explicit function attributes take precedence. */
12662 if (mips_mips16_type_p (type))
12664 if (mips_nomips16_type_p (type))
12667 /* A nested function should inherit the MIPS16 setting from its parent. */
12668 parent = decl_function_context (decl);
12670 return SYMBOL_REF_MIPS16_FUNC_P (XEXP (DECL_RTL (parent), 0));
12672 /* Handle -mflip-mips16. */
12673 if (TARGET_FLIP_MIPS16
12674 && !DECL_BUILT_IN (decl)
12675 && !DECL_ARTIFICIAL (decl))
12678 /* Use the setting we picked first time around. */
12679 return SYMBOL_REF_MIPS16_FUNC_P (symbol);
12681 mips16_flipper = !mips16_flipper;
12682 if (mips16_flipper)
12683 return !mips_base_mips16;
12686 return mips_base_mips16;
12689 /* Set SYMBOL_REF_FLAGS for the SYMBOL_REF inside RTL, which belongs to DECL.
12690 FIRST is true if this is the first time handling this decl. */
12693 mips_encode_section_info (tree decl, rtx rtl, int first)
12695 default_encode_section_info (decl, rtl, first);
12697 if (TREE_CODE (decl) == FUNCTION_DECL)
12699 rtx symbol = XEXP (rtl, 0);
12700 tree type = TREE_TYPE (decl);
12702 if ((TARGET_LONG_CALLS && !mips_near_type_p (type))
12703 || mips_far_type_p (type))
12704 SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL;
12706 if (mips_use_mips16_mode_p (symbol, decl, first, type))
12708 if (flag_pic || TARGET_ABICALLS)
12709 sorry ("MIPS16 PIC");
12711 SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_MIPS16_FUNC;
12716 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. Some code models use the incoming
12717 value of PIC_FUNCTION_ADDR_REGNUM to set up the global pointer. */
12720 mips_extra_live_on_entry (bitmap regs)
12722 if (TARGET_USE_GOT && !TARGET_ABSOLUTE_ABICALLS)
12723 bitmap_set_bit (regs, PIC_FUNCTION_ADDR_REGNUM);
12726 /* SImode values are represented as sign-extended to DImode. */
12729 mips_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
12731 if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
12732 return SIGN_EXTEND;
12737 /* MIPS implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
12740 mips_output_dwarf_dtprel (FILE *file, int size, rtx x)
12745 fputs ("\t.dtprelword\t", file);
12749 fputs ("\t.dtpreldword\t", file);
12753 gcc_unreachable ();
12755 output_addr_const (file, x);
12756 fputs ("+0x8000", file);
12759 #include "gt-mips.h"