1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5 Contributed by A. Lichnewsky, lich@inria.inria.fr.
6 Changes by Michael Meissner, meissner@osf.org.
7 64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
8 Brendan Eich, brendan@microunity.com.
10 This file is part of GCC.
12 GCC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 3, or (at your option)
17 GCC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
28 #include "coretypes.h"
33 #include "hard-reg-set.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "insn-attr.h"
53 #include "target-def.h"
54 #include "integrate.h"
55 #include "langhooks.h"
56 #include "cfglayout.h"
57 #include "sched-int.h"
58 #include "tree-gimple.h"
60 #include "diagnostic.h"
62 /* True if X is an unspec wrapper around a SYMBOL_REF or LABEL_REF. */
63 #define UNSPEC_ADDRESS_P(X) \
64 (GET_CODE (X) == UNSPEC \
65 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
66 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
68 /* Extract the symbol or label from UNSPEC wrapper X. */
69 #define UNSPEC_ADDRESS(X) \
72 /* Extract the symbol type from UNSPEC wrapper X. */
73 #define UNSPEC_ADDRESS_TYPE(X) \
74 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
76 /* The maximum distance between the top of the stack frame and the
77 value $sp has when we save and restore registers.
79 The value for normal-mode code must be a SMALL_OPERAND and must
80 preserve the maximum stack alignment. We therefore use a value
81 of 0x7ff0 in this case.
83 MIPS16e SAVE and RESTORE instructions can adjust the stack pointer by
84 up to 0x7f8 bytes and can usually save or restore all the registers
85 that we need to save or restore. (Note that we can only use these
86 instructions for o32, for which the stack alignment is 8 bytes.)
88 We use a maximum gap of 0x100 or 0x400 for MIPS16 code when SAVE and
89 RESTORE are not available. We can then use unextended instructions
90 to save and restore registers, and to allocate and deallocate the top
92 #define MIPS_MAX_FIRST_STACK_STEP \
93 (!TARGET_MIPS16 ? 0x7ff0 \
94 : GENERATE_MIPS16E_SAVE_RESTORE ? 0x7f8 \
95 : TARGET_64BIT ? 0x100 : 0x400)
97 /* True if INSN is a mips.md pattern or asm statement. */
98 #define USEFUL_INSN_P(INSN) \
100 && GET_CODE (PATTERN (INSN)) != USE \
101 && GET_CODE (PATTERN (INSN)) != CLOBBER \
102 && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
103 && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
105 /* If INSN is a delayed branch sequence, return the first instruction
106 in the sequence, otherwise return INSN itself. */
107 #define SEQ_BEGIN(INSN) \
108 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
109 ? XVECEXP (PATTERN (INSN), 0, 0) \
112 /* Likewise for the last instruction in a delayed branch sequence. */
113 #define SEQ_END(INSN) \
114 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
115 ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
118 /* Execute the following loop body with SUBINSN set to each instruction
119 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
120 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
121 for ((SUBINSN) = SEQ_BEGIN (INSN); \
122 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
123 (SUBINSN) = NEXT_INSN (SUBINSN))
125 /* True if bit BIT is set in VALUE. */
126 #define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
128 /* Classifies an address.
131 A natural register + offset address. The register satisfies
132 mips_valid_base_register_p and the offset is a const_arith_operand.
135 A LO_SUM rtx. The first operand is a valid base register and
136 the second operand is a symbolic address.
139 A signed 16-bit constant address.
142 A constant symbolic address (equivalent to CONSTANT_SYMBOLIC). */
143 enum mips_address_type {
150 /* Classifies the prototype of a builtin function. */
151 enum mips_function_type
153 MIPS_V2SF_FTYPE_V2SF,
154 MIPS_V2SF_FTYPE_V2SF_V2SF,
155 MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
156 MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF,
157 MIPS_V2SF_FTYPE_SF_SF,
158 MIPS_INT_FTYPE_V2SF_V2SF,
159 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF,
160 MIPS_INT_FTYPE_SF_SF,
161 MIPS_INT_FTYPE_DF_DF,
168 /* For MIPS DSP ASE */
170 MIPS_DI_FTYPE_DI_SI_SI,
171 MIPS_DI_FTYPE_DI_V2HI_V2HI,
172 MIPS_DI_FTYPE_DI_V4QI_V4QI,
174 MIPS_SI_FTYPE_PTR_SI,
178 MIPS_SI_FTYPE_V2HI_V2HI,
180 MIPS_SI_FTYPE_V4QI_V4QI,
183 MIPS_V2HI_FTYPE_SI_SI,
184 MIPS_V2HI_FTYPE_V2HI,
185 MIPS_V2HI_FTYPE_V2HI_SI,
186 MIPS_V2HI_FTYPE_V2HI_V2HI,
187 MIPS_V2HI_FTYPE_V4QI,
188 MIPS_V2HI_FTYPE_V4QI_V2HI,
190 MIPS_V4QI_FTYPE_V2HI_V2HI,
191 MIPS_V4QI_FTYPE_V4QI_SI,
192 MIPS_V4QI_FTYPE_V4QI_V4QI,
193 MIPS_VOID_FTYPE_SI_SI,
194 MIPS_VOID_FTYPE_V2HI_V2HI,
195 MIPS_VOID_FTYPE_V4QI_V4QI,
197 /* For MIPS DSP REV 2 ASE. */
198 MIPS_V4QI_FTYPE_V4QI,
199 MIPS_SI_FTYPE_SI_SI_SI,
200 MIPS_DI_FTYPE_DI_USI_USI,
202 MIPS_DI_FTYPE_USI_USI,
203 MIPS_V2HI_FTYPE_SI_SI_SI,
209 /* Specifies how a builtin function should be converted into rtl. */
210 enum mips_builtin_type
212 /* The builtin corresponds directly to an .md pattern. The return
213 value is mapped to operand 0 and the arguments are mapped to
214 operands 1 and above. */
217 /* The builtin corresponds directly to an .md pattern. There is no return
218 value and the arguments are mapped to operands 0 and above. */
219 MIPS_BUILTIN_DIRECT_NO_TARGET,
221 /* The builtin corresponds to a comparison instruction followed by
222 a mips_cond_move_tf_ps pattern. The first two arguments are the
223 values to compare and the second two arguments are the vector
224 operands for the movt.ps or movf.ps instruction (in assembly order). */
228 /* The builtin corresponds to a V2SF comparison instruction. Operand 0
229 of this instruction is the result of the comparison, which has mode
230 CCV2 or CCV4. The function arguments are mapped to operands 1 and
231 above. The function's return value is an SImode boolean that is
232 true under the following conditions:
234 MIPS_BUILTIN_CMP_ANY: one of the registers is true
235 MIPS_BUILTIN_CMP_ALL: all of the registers are true
236 MIPS_BUILTIN_CMP_LOWER: the first register is true
237 MIPS_BUILTIN_CMP_UPPER: the second register is true. */
238 MIPS_BUILTIN_CMP_ANY,
239 MIPS_BUILTIN_CMP_ALL,
240 MIPS_BUILTIN_CMP_UPPER,
241 MIPS_BUILTIN_CMP_LOWER,
243 /* As above, but the instruction only sets a single $fcc register. */
244 MIPS_BUILTIN_CMP_SINGLE,
246 /* For generating bposge32 branch instructions in MIPS32 DSP ASE. */
247 MIPS_BUILTIN_BPOSGE32
250 /* Invokes MACRO (COND) for each c.cond.fmt condition. */
251 #define MIPS_FP_CONDITIONS(MACRO) \
269 /* Enumerates the codes above as MIPS_FP_COND_<X>. */
270 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
271 enum mips_fp_condition {
272 MIPS_FP_CONDITIONS (DECLARE_MIPS_COND)
275 /* Index X provides the string representation of MIPS_FP_COND_<X>. */
276 #define STRINGIFY(X) #X
277 static const char *const mips_fp_conditions[] = {
278 MIPS_FP_CONDITIONS (STRINGIFY)
281 /* A function to save or store a register. The first argument is the
282 register and the second is the stack slot. */
283 typedef void (*mips_save_restore_fn) (rtx, rtx);
285 struct mips16_constant;
286 struct mips_arg_info;
287 struct mips_address_info;
288 struct mips_integer_op;
291 static bool mips_valid_base_register_p (rtx, enum machine_mode, int);
292 static bool mips_classify_address (struct mips_address_info *, rtx,
293 enum machine_mode, int);
294 static bool mips_cannot_force_const_mem (rtx);
295 static bool mips_use_blocks_for_constant_p (enum machine_mode, const_rtx);
296 static int mips_symbol_insns (enum mips_symbol_type, enum machine_mode);
297 static bool mips16_unextended_reference_p (enum machine_mode mode, rtx, rtx);
298 static rtx mips_force_temporary (rtx, rtx);
299 static rtx mips_unspec_offset_high (rtx, rtx, rtx, enum mips_symbol_type);
300 static rtx mips_add_offset (rtx, rtx, HOST_WIDE_INT);
301 static unsigned int mips_build_shift (struct mips_integer_op *, HOST_WIDE_INT);
302 static unsigned int mips_build_lower (struct mips_integer_op *,
303 unsigned HOST_WIDE_INT);
304 static unsigned int mips_build_integer (struct mips_integer_op *,
305 unsigned HOST_WIDE_INT);
306 static void mips_legitimize_const_move (enum machine_mode, rtx, rtx);
307 static int m16_check_op (rtx, int, int, int);
308 static bool mips_rtx_costs (rtx, int, int, int *);
309 static int mips_address_cost (rtx);
310 static void mips_emit_compare (enum rtx_code *, rtx *, rtx *, bool);
311 static void mips_load_call_address (rtx, rtx, int);
312 static bool mips_function_ok_for_sibcall (tree, tree);
313 static void mips_block_move_straight (rtx, rtx, HOST_WIDE_INT);
314 static void mips_adjust_block_mem (rtx, HOST_WIDE_INT, rtx *, rtx *);
315 static void mips_block_move_loop (rtx, rtx, HOST_WIDE_INT);
316 static void mips_arg_info (const CUMULATIVE_ARGS *, enum machine_mode,
317 tree, int, struct mips_arg_info *);
318 static bool mips_get_unaligned_mem (rtx *, unsigned int, int, rtx *, rtx *);
319 static void mips_set_architecture (const struct mips_cpu_info *);
320 static void mips_set_tune (const struct mips_cpu_info *);
321 static bool mips_handle_option (size_t, const char *, int);
322 static struct machine_function *mips_init_machine_status (void);
323 static void print_operand_reloc (FILE *, rtx, enum mips_symbol_context,
325 static void mips_file_start (void);
326 static int mips_small_data_pattern_1 (rtx *, void *);
327 static int mips_rewrite_small_data_1 (rtx *, void *);
328 static bool mips_function_has_gp_insn (void);
329 static unsigned int mips_global_pointer (void);
330 static bool mips_save_reg_p (unsigned int);
331 static void mips_save_restore_reg (enum machine_mode, int, HOST_WIDE_INT,
332 mips_save_restore_fn);
333 static void mips_for_each_saved_reg (HOST_WIDE_INT, mips_save_restore_fn);
334 static void mips_output_cplocal (void);
335 static void mips_emit_loadgp (void);
336 static void mips_output_function_prologue (FILE *, HOST_WIDE_INT);
337 static void mips_set_frame_expr (rtx);
338 static rtx mips_frame_set (rtx, rtx);
339 static void mips_save_reg (rtx, rtx);
340 static void mips_output_function_epilogue (FILE *, HOST_WIDE_INT);
341 static void mips_restore_reg (rtx, rtx);
342 static void mips_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
343 HOST_WIDE_INT, tree);
344 static section *mips_select_rtx_section (enum machine_mode, rtx,
345 unsigned HOST_WIDE_INT);
346 static section *mips_function_rodata_section (tree);
347 static bool mips_in_small_data_p (const_tree);
348 static bool mips_use_anchors_for_symbol_p (const_rtx);
349 static int mips_fpr_return_fields (const_tree, tree *);
350 static bool mips_return_in_msb (const_tree);
351 static rtx mips_return_fpr_pair (enum machine_mode mode,
352 enum machine_mode mode1, HOST_WIDE_INT,
353 enum machine_mode mode2, HOST_WIDE_INT);
354 static rtx mips16_gp_pseudo_reg (void);
355 static void mips16_fp_args (FILE *, int, int);
356 static void build_mips16_function_stub (FILE *);
357 static rtx dump_constants_1 (enum machine_mode, rtx, rtx);
358 static void dump_constants (struct mips16_constant *, rtx);
359 static int mips16_insn_length (rtx);
360 static int mips16_rewrite_pool_refs (rtx *, void *);
361 static void mips16_lay_out_constants (void);
362 static void mips_sim_reset (struct mips_sim *);
363 static void mips_sim_init (struct mips_sim *, state_t);
364 static void mips_sim_next_cycle (struct mips_sim *);
365 static void mips_sim_wait_reg (struct mips_sim *, rtx, rtx);
366 static int mips_sim_wait_regs_2 (rtx *, void *);
367 static void mips_sim_wait_regs_1 (rtx *, void *);
368 static void mips_sim_wait_regs (struct mips_sim *, rtx);
369 static void mips_sim_wait_units (struct mips_sim *, rtx);
370 static void mips_sim_wait_insn (struct mips_sim *, rtx);
371 static void mips_sim_record_set (rtx, const_rtx, void *);
372 static void mips_sim_issue_insn (struct mips_sim *, rtx);
373 static void mips_sim_issue_nop (struct mips_sim *);
374 static void mips_sim_finish_insn (struct mips_sim *, rtx);
375 static void vr4130_avoid_branch_rt_conflict (rtx);
376 static void vr4130_align_insns (void);
377 static void mips_avoid_hazard (rtx, rtx, int *, rtx *, rtx);
378 static void mips_avoid_hazards (void);
379 static void mips_reorg (void);
380 static bool mips_strict_matching_cpu_name_p (const char *, const char *);
381 static bool mips_matching_cpu_name_p (const char *, const char *);
382 static const struct mips_cpu_info *mips_parse_cpu (const char *);
383 static const struct mips_cpu_info *mips_cpu_info_from_isa (int);
384 static bool mips_return_in_memory (const_tree, const_tree);
385 static bool mips_strict_argument_naming (CUMULATIVE_ARGS *);
386 static void mips_macc_chains_record (rtx);
387 static void mips_macc_chains_reorder (rtx *, int);
388 static void vr4130_true_reg_dependence_p_1 (rtx, const_rtx, void *);
389 static bool vr4130_true_reg_dependence_p (rtx);
390 static bool vr4130_swap_insns_p (rtx, rtx);
391 static void vr4130_reorder (rtx *, int);
392 static void mips_promote_ready (rtx *, int, int);
393 static void mips_sched_init (FILE *, int, int);
394 static int mips_sched_reorder (FILE *, int, rtx *, int *, int);
395 static int mips_variable_issue (FILE *, int, rtx, int);
396 static int mips_adjust_cost (rtx, rtx, rtx, int);
397 static int mips_issue_rate (void);
398 static int mips_multipass_dfa_lookahead (void);
399 static void mips_init_libfuncs (void);
400 static void mips_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
402 static tree mips_build_builtin_va_list (void);
403 static tree mips_gimplify_va_arg_expr (tree, tree, tree *, tree *);
404 static bool mips_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode mode,
406 static bool mips_callee_copies (CUMULATIVE_ARGS *, enum machine_mode mode,
408 static int mips_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode mode,
410 static bool mips_valid_pointer_mode (enum machine_mode);
411 static bool mips_scalar_mode_supported_p (enum machine_mode);
412 static bool mips_vector_mode_supported_p (enum machine_mode);
413 static rtx mips_prepare_builtin_arg (enum insn_code, unsigned int, tree, unsigned int);
414 static rtx mips_prepare_builtin_target (enum insn_code, unsigned int, rtx);
415 static rtx mips_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
416 static void mips_init_builtins (void);
417 static rtx mips_expand_builtin_direct (enum insn_code, rtx, tree, bool);
418 static rtx mips_expand_builtin_movtf (enum mips_builtin_type,
419 enum insn_code, enum mips_fp_condition,
421 static rtx mips_expand_builtin_compare (enum mips_builtin_type,
422 enum insn_code, enum mips_fp_condition,
424 static rtx mips_expand_builtin_bposge (enum mips_builtin_type, rtx);
425 static void mips_encode_section_info (tree, rtx, int);
426 static void mips_extra_live_on_entry (bitmap);
427 static int mips_comp_type_attributes (const_tree, const_tree);
428 static void mips_set_mips16_mode (int);
429 static void mips_insert_attributes (tree, tree *);
430 static tree mips_merge_decl_attributes (tree, tree);
431 static void mips_set_current_function (tree);
432 static int mips_mode_rep_extended (enum machine_mode, enum machine_mode);
433 static bool mips_offset_within_alignment_p (rtx, HOST_WIDE_INT);
434 static void mips_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
436 /* Structure to be filled in by compute_frame_size with register
437 save masks, and offsets for the current function. */
439 struct mips_frame_info GTY(())
441 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
442 HOST_WIDE_INT var_size; /* # bytes that variables take up */
443 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
444 HOST_WIDE_INT cprestore_size; /* # bytes that the .cprestore slot takes up */
445 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
446 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
447 unsigned int mask; /* mask of saved gp registers */
448 unsigned int fmask; /* mask of saved fp registers */
449 HOST_WIDE_INT gp_save_offset; /* offset from vfp to store gp registers */
450 HOST_WIDE_INT fp_save_offset; /* offset from vfp to store fp registers */
451 HOST_WIDE_INT gp_sp_offset; /* offset from new sp to store gp registers */
452 HOST_WIDE_INT fp_sp_offset; /* offset from new sp to store fp registers */
453 bool initialized; /* true if frame size already calculated */
454 int num_gp; /* number of gp registers saved */
455 int num_fp; /* number of fp registers saved */
458 struct machine_function GTY(()) {
459 /* Pseudo-reg holding the value of $28 in a mips16 function which
460 refers to GP relative global variables. */
461 rtx mips16_gp_pseudo_rtx;
463 /* The number of extra stack bytes taken up by register varargs.
464 This area is allocated by the callee at the very top of the frame. */
467 /* Current frame information, calculated by compute_frame_size. */
468 struct mips_frame_info frame;
470 /* The register to use as the global pointer within this function. */
471 unsigned int global_pointer;
473 /* True if mips_adjust_insn_length should ignore an instruction's
475 bool ignore_hazard_length_p;
477 /* True if the whole function is suitable for .set noreorder and
479 bool all_noreorder_p;
481 /* True if the function is known to have an instruction that needs $gp. */
484 /* True if we have emitted an instruction to initialize
485 mips16_gp_pseudo_rtx. */
486 bool initialized_mips16_gp_pseudo_p;
489 /* Information about a single argument. */
492 /* True if the argument is passed in a floating-point register, or
493 would have been if we hadn't run out of registers. */
496 /* The number of words passed in registers, rounded up. */
497 unsigned int reg_words;
499 /* For EABI, the offset of the first register from GP_ARG_FIRST or
500 FP_ARG_FIRST. For other ABIs, the offset of the first register from
501 the start of the ABI's argument structure (see the CUMULATIVE_ARGS
502 comment for details).
504 The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
506 unsigned int reg_offset;
508 /* The number of words that must be passed on the stack, rounded up. */
509 unsigned int stack_words;
511 /* The offset from the start of the stack overflow area of the argument's
512 first stack word. Only meaningful when STACK_WORDS is nonzero. */
513 unsigned int stack_offset;
517 /* Information about an address described by mips_address_type.
523 REG is the base register and OFFSET is the constant offset.
526 REG is the register that contains the high part of the address,
527 OFFSET is the symbolic address being referenced and SYMBOL_TYPE
528 is the type of OFFSET's symbol.
531 SYMBOL_TYPE is the type of symbol being referenced. */
533 struct mips_address_info
535 enum mips_address_type type;
538 enum mips_symbol_type symbol_type;
542 /* One stage in a constant building sequence. These sequences have
546 A = A CODE[1] VALUE[1]
547 A = A CODE[2] VALUE[2]
550 where A is an accumulator, each CODE[i] is a binary rtl operation
551 and each VALUE[i] is a constant integer. */
552 struct mips_integer_op {
554 unsigned HOST_WIDE_INT value;
558 /* The largest number of operations needed to load an integer constant.
559 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
560 When the lowest bit is clear, we can try, but reject a sequence with
561 an extra SLL at the end. */
562 #define MIPS_MAX_INTEGER_OPS 7
564 /* Information about a MIPS16e SAVE or RESTORE instruction. */
565 struct mips16e_save_restore_info {
566 /* The number of argument registers saved by a SAVE instruction.
567 0 for RESTORE instructions. */
570 /* Bit X is set if the instruction saves or restores GPR X. */
573 /* The total number of bytes to allocate. */
577 /* Global variables for machine-dependent things. */
579 /* Threshold for data being put into the small data/bss area, instead
580 of the normal data area. */
581 int mips_section_threshold = -1;
583 /* Count the number of .file directives, so that .loc is up to date. */
584 int num_source_filenames = 0;
586 /* Count the number of sdb related labels are generated (to find block
587 start and end boundaries). */
588 int sdb_label_count = 0;
590 /* Next label # for each statement for Silicon Graphics IRIS systems. */
593 /* Name of the file containing the current function. */
594 const char *current_function_file = "";
596 /* Number of nested .set noreorder, noat, nomacro, and volatile requests. */
602 /* The next branch instruction is a branch likely, not branch normal. */
603 int mips_branch_likely;
605 /* The operands passed to the last cmpMM expander. */
608 /* The target cpu for code generation. */
609 enum processor_type mips_arch;
610 const struct mips_cpu_info *mips_arch_info;
612 /* The target cpu for optimization and scheduling. */
613 enum processor_type mips_tune;
614 const struct mips_cpu_info *mips_tune_info;
616 /* Which instruction set architecture to use. */
619 /* Which ABI to use. */
620 int mips_abi = MIPS_ABI_DEFAULT;
622 /* Cost information to use. */
623 const struct mips_rtx_cost_data *mips_cost;
625 /* Remember the ambient target flags, excluding mips16. */
626 static int mips_base_target_flags;
627 /* The mips16 command-line target flags only. */
628 static bool mips_base_mips16;
629 /* Similar copies of option settings. */
630 static int mips_base_schedule_insns; /* flag_schedule_insns */
631 static int mips_base_reorder_blocks_and_partition; /* flag_reorder... */
632 static int mips_base_move_loop_invariants; /* flag_move_loop_invariants */
633 static int mips_base_align_loops; /* align_loops */
634 static int mips_base_align_jumps; /* align_jumps */
635 static int mips_base_align_functions; /* align_functions */
636 static GTY(()) int mips16_flipper;
638 /* The -mtext-loads setting. */
639 enum mips_code_readable_setting mips_code_readable = CODE_READABLE_YES;
641 /* The architecture selected by -mipsN. */
642 static const struct mips_cpu_info *mips_isa_info;
644 /* If TRUE, we split addresses into their high and low parts in the RTL. */
645 int mips_split_addresses;
647 /* Mode used for saving/restoring general purpose registers. */
648 static enum machine_mode gpr_mode;
650 /* Array giving truth value on whether or not a given hard register
651 can support a given mode. */
652 char mips_hard_regno_mode_ok[(int)MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
654 /* List of all MIPS punctuation characters used by print_operand. */
655 char mips_print_operand_punct[256];
657 /* Map GCC register number to debugger register number. */
658 int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
659 int mips_dwarf_regno[FIRST_PSEUDO_REGISTER];
661 /* A copy of the original flag_delayed_branch: see override_options. */
662 static int mips_flag_delayed_branch;
664 static GTY (()) int mips_output_filename_first_time = 1;
666 /* mips_split_p[X] is true if symbols of type X can be split by
667 mips_split_symbol(). */
668 bool mips_split_p[NUM_SYMBOL_TYPES];
670 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
671 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
672 if they are matched by a special .md file pattern. */
673 static const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
675 /* Likewise for HIGHs. */
676 static const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
678 /* Map hard register number to register class */
679 const enum reg_class mips_regno_to_class[] =
681 LEA_REGS, LEA_REGS, M16_NA_REGS, V1_REG,
682 M16_REGS, M16_REGS, M16_REGS, M16_REGS,
683 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
684 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
685 M16_NA_REGS, M16_NA_REGS, LEA_REGS, LEA_REGS,
686 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
687 T_REG, PIC_FN_ADDR_REG, LEA_REGS, LEA_REGS,
688 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
689 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
690 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
691 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
692 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
693 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
694 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
695 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
696 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
697 MD0_REG, MD1_REG, NO_REGS, ST_REGS,
698 ST_REGS, ST_REGS, ST_REGS, ST_REGS,
699 ST_REGS, ST_REGS, ST_REGS, NO_REGS,
700 NO_REGS, ALL_REGS, ALL_REGS, NO_REGS,
701 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
702 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
703 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
704 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
705 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
706 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
707 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
708 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
709 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
710 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
711 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
712 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
713 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
714 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
715 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
716 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
717 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
718 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
719 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
720 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
721 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
722 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
723 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
724 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
725 DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS,
726 DSP_ACC_REGS, DSP_ACC_REGS, ALL_REGS, ALL_REGS,
727 ALL_REGS, ALL_REGS, ALL_REGS, ALL_REGS
730 /* Table of machine dependent attributes. */
731 const struct attribute_spec mips_attribute_table[] =
733 { "long_call", 0, 0, false, true, true, NULL },
734 { "far", 0, 0, false, true, true, NULL },
735 { "near", 0, 0, false, true, true, NULL },
736 /* Switch MIPS16 ASE on and off per-function. We would really like
737 to make these type attributes, but GCC doesn't provide the hooks
738 we need to support the right conversion rules. As declaration
739 attributes, they affect code generation but don't carry other
741 { "mips16", 0, 0, true, false, false, NULL },
742 { "nomips16", 0, 0, true, false, false, NULL },
743 { NULL, 0, 0, false, false, false, NULL }
746 /* A table describing all the processors gcc knows about. Names are
747 matched in the order listed. The first mention of an ISA level is
748 taken as the canonical name for that ISA.
750 To ease comparison, please keep this table in the same order
751 as gas's mips_cpu_info_table[]. Please also make sure that
752 MIPS_ISA_LEVEL_SPEC and MIPS_ARCH_FLOAT_SPEC handle all -march
753 options correctly. */
754 const struct mips_cpu_info mips_cpu_info_table[] = {
755 /* Entries for generic ISAs */
756 { "mips1", PROCESSOR_R3000, 1 },
757 { "mips2", PROCESSOR_R6000, 2 },
758 { "mips3", PROCESSOR_R4000, 3 },
759 { "mips4", PROCESSOR_R8000, 4 },
760 { "mips32", PROCESSOR_4KC, 32 },
761 { "mips32r2", PROCESSOR_M4K, 33 },
762 { "mips64", PROCESSOR_5KC, 64 },
765 { "r3000", PROCESSOR_R3000, 1 },
766 { "r2000", PROCESSOR_R3000, 1 }, /* = r3000 */
767 { "r3900", PROCESSOR_R3900, 1 },
770 { "r6000", PROCESSOR_R6000, 2 },
773 { "r4000", PROCESSOR_R4000, 3 },
774 { "vr4100", PROCESSOR_R4100, 3 },
775 { "vr4111", PROCESSOR_R4111, 3 },
776 { "vr4120", PROCESSOR_R4120, 3 },
777 { "vr4130", PROCESSOR_R4130, 3 },
778 { "vr4300", PROCESSOR_R4300, 3 },
779 { "r4400", PROCESSOR_R4000, 3 }, /* = r4000 */
780 { "r4600", PROCESSOR_R4600, 3 },
781 { "orion", PROCESSOR_R4600, 3 }, /* = r4600 */
782 { "r4650", PROCESSOR_R4650, 3 },
785 { "r8000", PROCESSOR_R8000, 4 },
786 { "vr5000", PROCESSOR_R5000, 4 },
787 { "vr5400", PROCESSOR_R5400, 4 },
788 { "vr5500", PROCESSOR_R5500, 4 },
789 { "rm7000", PROCESSOR_R7000, 4 },
790 { "rm9000", PROCESSOR_R9000, 4 },
793 { "4kc", PROCESSOR_4KC, 32 },
794 { "4km", PROCESSOR_4KC, 32 }, /* = 4kc */
795 { "4kp", PROCESSOR_4KP, 32 },
796 { "4ksc", PROCESSOR_4KC, 32 },
798 /* MIPS32 Release 2 */
799 { "m4k", PROCESSOR_M4K, 33 },
800 { "4kec", PROCESSOR_4KC, 33 },
801 { "4kem", PROCESSOR_4KC, 33 },
802 { "4kep", PROCESSOR_4KP, 33 },
803 { "4ksd", PROCESSOR_4KC, 33 },
805 { "24kc", PROCESSOR_24KC, 33 },
806 { "24kf2_1", PROCESSOR_24KF2_1, 33 },
807 { "24kf", PROCESSOR_24KF2_1, 33 },
808 { "24kf1_1", PROCESSOR_24KF1_1, 33 },
809 { "24kfx", PROCESSOR_24KF1_1, 33 },
810 { "24kx", PROCESSOR_24KF1_1, 33 },
812 { "24kec", PROCESSOR_24KC, 33 }, /* 24K with DSP */
813 { "24kef2_1", PROCESSOR_24KF2_1, 33 },
814 { "24kef", PROCESSOR_24KF2_1, 33 },
815 { "24kef1_1", PROCESSOR_24KF1_1, 33 },
816 { "24kefx", PROCESSOR_24KF1_1, 33 },
817 { "24kex", PROCESSOR_24KF1_1, 33 },
819 { "34kc", PROCESSOR_24KC, 33 }, /* 34K with MT/DSP */
820 { "34kf2_1", PROCESSOR_24KF2_1, 33 },
821 { "34kf", PROCESSOR_24KF2_1, 33 },
822 { "34kf1_1", PROCESSOR_24KF1_1, 33 },
823 { "34kfx", PROCESSOR_24KF1_1, 33 },
824 { "34kx", PROCESSOR_24KF1_1, 33 },
826 { "74kc", PROCESSOR_74KC, 33 }, /* 74K with DSPr2 */
827 { "74kf2_1", PROCESSOR_74KF2_1, 33 },
828 { "74kf", PROCESSOR_74KF2_1, 33 },
829 { "74kf1_1", PROCESSOR_74KF1_1, 33 },
830 { "74kfx", PROCESSOR_74KF1_1, 33 },
831 { "74kx", PROCESSOR_74KF1_1, 33 },
832 { "74kf3_2", PROCESSOR_74KF3_2, 33 },
835 { "5kc", PROCESSOR_5KC, 64 },
836 { "5kf", PROCESSOR_5KF, 64 },
837 { "20kc", PROCESSOR_20KC, 64 },
838 { "sb1", PROCESSOR_SB1, 64 },
839 { "sb1a", PROCESSOR_SB1A, 64 },
840 { "sr71000", PROCESSOR_SR71000, 64 },
846 /* Default costs. If these are used for a processor we should look
847 up the actual costs. */
848 #define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \
849 COSTS_N_INSNS (7), /* fp_mult_sf */ \
850 COSTS_N_INSNS (8), /* fp_mult_df */ \
851 COSTS_N_INSNS (23), /* fp_div_sf */ \
852 COSTS_N_INSNS (36), /* fp_div_df */ \
853 COSTS_N_INSNS (10), /* int_mult_si */ \
854 COSTS_N_INSNS (10), /* int_mult_di */ \
855 COSTS_N_INSNS (69), /* int_div_si */ \
856 COSTS_N_INSNS (69), /* int_div_di */ \
857 2, /* branch_cost */ \
858 4 /* memory_latency */
860 /* Need to replace these with the costs of calling the appropriate
862 #define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \
863 COSTS_N_INSNS (256), /* fp_mult_sf */ \
864 COSTS_N_INSNS (256), /* fp_mult_df */ \
865 COSTS_N_INSNS (256), /* fp_div_sf */ \
866 COSTS_N_INSNS (256) /* fp_div_df */
868 static struct mips_rtx_cost_data const mips_rtx_cost_optimize_size =
870 COSTS_N_INSNS (1), /* fp_add */
871 COSTS_N_INSNS (1), /* fp_mult_sf */
872 COSTS_N_INSNS (1), /* fp_mult_df */
873 COSTS_N_INSNS (1), /* fp_div_sf */
874 COSTS_N_INSNS (1), /* fp_div_df */
875 COSTS_N_INSNS (1), /* int_mult_si */
876 COSTS_N_INSNS (1), /* int_mult_di */
877 COSTS_N_INSNS (1), /* int_div_si */
878 COSTS_N_INSNS (1), /* int_div_di */
880 4 /* memory_latency */
883 static struct mips_rtx_cost_data const mips_rtx_cost_data[PROCESSOR_MAX] =
886 COSTS_N_INSNS (2), /* fp_add */
887 COSTS_N_INSNS (4), /* fp_mult_sf */
888 COSTS_N_INSNS (5), /* fp_mult_df */
889 COSTS_N_INSNS (12), /* fp_div_sf */
890 COSTS_N_INSNS (19), /* fp_div_df */
891 COSTS_N_INSNS (12), /* int_mult_si */
892 COSTS_N_INSNS (12), /* int_mult_di */
893 COSTS_N_INSNS (35), /* int_div_si */
894 COSTS_N_INSNS (35), /* int_div_di */
896 4 /* memory_latency */
901 COSTS_N_INSNS (6), /* int_mult_si */
902 COSTS_N_INSNS (6), /* int_mult_di */
903 COSTS_N_INSNS (36), /* int_div_si */
904 COSTS_N_INSNS (36), /* int_div_di */
906 4 /* memory_latency */
910 COSTS_N_INSNS (36), /* int_mult_si */
911 COSTS_N_INSNS (36), /* int_mult_di */
912 COSTS_N_INSNS (37), /* int_div_si */
913 COSTS_N_INSNS (37), /* int_div_di */
915 4 /* memory_latency */
919 COSTS_N_INSNS (4), /* int_mult_si */
920 COSTS_N_INSNS (11), /* int_mult_di */
921 COSTS_N_INSNS (36), /* int_div_si */
922 COSTS_N_INSNS (68), /* int_div_di */
924 4 /* memory_latency */
927 COSTS_N_INSNS (4), /* fp_add */
928 COSTS_N_INSNS (4), /* fp_mult_sf */
929 COSTS_N_INSNS (5), /* fp_mult_df */
930 COSTS_N_INSNS (17), /* fp_div_sf */
931 COSTS_N_INSNS (32), /* fp_div_df */
932 COSTS_N_INSNS (4), /* int_mult_si */
933 COSTS_N_INSNS (11), /* int_mult_di */
934 COSTS_N_INSNS (36), /* int_div_si */
935 COSTS_N_INSNS (68), /* int_div_di */
937 4 /* memory_latency */
940 COSTS_N_INSNS (4), /* fp_add */
941 COSTS_N_INSNS (4), /* fp_mult_sf */
942 COSTS_N_INSNS (5), /* fp_mult_df */
943 COSTS_N_INSNS (17), /* fp_div_sf */
944 COSTS_N_INSNS (32), /* fp_div_df */
945 COSTS_N_INSNS (4), /* int_mult_si */
946 COSTS_N_INSNS (7), /* int_mult_di */
947 COSTS_N_INSNS (42), /* int_div_si */
948 COSTS_N_INSNS (72), /* int_div_di */
950 4 /* memory_latency */
954 COSTS_N_INSNS (5), /* int_mult_si */
955 COSTS_N_INSNS (5), /* int_mult_di */
956 COSTS_N_INSNS (41), /* int_div_si */
957 COSTS_N_INSNS (41), /* int_div_di */
959 4 /* memory_latency */
962 COSTS_N_INSNS (8), /* fp_add */
963 COSTS_N_INSNS (8), /* fp_mult_sf */
964 COSTS_N_INSNS (10), /* fp_mult_df */
965 COSTS_N_INSNS (34), /* fp_div_sf */
966 COSTS_N_INSNS (64), /* fp_div_df */
967 COSTS_N_INSNS (5), /* int_mult_si */
968 COSTS_N_INSNS (5), /* int_mult_di */
969 COSTS_N_INSNS (41), /* int_div_si */
970 COSTS_N_INSNS (41), /* int_div_di */
972 4 /* memory_latency */
975 COSTS_N_INSNS (4), /* fp_add */
976 COSTS_N_INSNS (4), /* fp_mult_sf */
977 COSTS_N_INSNS (5), /* fp_mult_df */
978 COSTS_N_INSNS (17), /* fp_div_sf */
979 COSTS_N_INSNS (32), /* fp_div_df */
980 COSTS_N_INSNS (5), /* int_mult_si */
981 COSTS_N_INSNS (5), /* int_mult_di */
982 COSTS_N_INSNS (41), /* int_div_si */
983 COSTS_N_INSNS (41), /* int_div_di */
985 4 /* memory_latency */
989 COSTS_N_INSNS (5), /* int_mult_si */
990 COSTS_N_INSNS (5), /* int_mult_di */
991 COSTS_N_INSNS (41), /* int_div_si */
992 COSTS_N_INSNS (41), /* int_div_di */
994 4 /* memory_latency */
997 COSTS_N_INSNS (8), /* fp_add */
998 COSTS_N_INSNS (8), /* fp_mult_sf */
999 COSTS_N_INSNS (10), /* fp_mult_df */
1000 COSTS_N_INSNS (34), /* fp_div_sf */
1001 COSTS_N_INSNS (64), /* fp_div_df */
1002 COSTS_N_INSNS (5), /* int_mult_si */
1003 COSTS_N_INSNS (5), /* int_mult_di */
1004 COSTS_N_INSNS (41), /* int_div_si */
1005 COSTS_N_INSNS (41), /* int_div_di */
1006 1, /* branch_cost */
1007 4 /* memory_latency */
1010 COSTS_N_INSNS (4), /* fp_add */
1011 COSTS_N_INSNS (4), /* fp_mult_sf */
1012 COSTS_N_INSNS (5), /* fp_mult_df */
1013 COSTS_N_INSNS (17), /* fp_div_sf */
1014 COSTS_N_INSNS (32), /* fp_div_df */
1015 COSTS_N_INSNS (5), /* int_mult_si */
1016 COSTS_N_INSNS (5), /* int_mult_di */
1017 COSTS_N_INSNS (41), /* int_div_si */
1018 COSTS_N_INSNS (41), /* int_div_di */
1019 1, /* branch_cost */
1020 4 /* memory_latency */
1023 COSTS_N_INSNS (6), /* fp_add */
1024 COSTS_N_INSNS (6), /* fp_mult_sf */
1025 COSTS_N_INSNS (7), /* fp_mult_df */
1026 COSTS_N_INSNS (25), /* fp_div_sf */
1027 COSTS_N_INSNS (48), /* fp_div_df */
1028 COSTS_N_INSNS (5), /* int_mult_si */
1029 COSTS_N_INSNS (5), /* int_mult_di */
1030 COSTS_N_INSNS (41), /* int_div_si */
1031 COSTS_N_INSNS (41), /* int_div_di */
1032 1, /* branch_cost */
1033 4 /* memory_latency */
1039 COSTS_N_INSNS (2), /* fp_add */
1040 COSTS_N_INSNS (4), /* fp_mult_sf */
1041 COSTS_N_INSNS (5), /* fp_mult_df */
1042 COSTS_N_INSNS (12), /* fp_div_sf */
1043 COSTS_N_INSNS (19), /* fp_div_df */
1044 COSTS_N_INSNS (2), /* int_mult_si */
1045 COSTS_N_INSNS (2), /* int_mult_di */
1046 COSTS_N_INSNS (35), /* int_div_si */
1047 COSTS_N_INSNS (35), /* int_div_di */
1048 1, /* branch_cost */
1049 4 /* memory_latency */
1052 COSTS_N_INSNS (3), /* fp_add */
1053 COSTS_N_INSNS (5), /* fp_mult_sf */
1054 COSTS_N_INSNS (6), /* fp_mult_df */
1055 COSTS_N_INSNS (15), /* fp_div_sf */
1056 COSTS_N_INSNS (16), /* fp_div_df */
1057 COSTS_N_INSNS (17), /* int_mult_si */
1058 COSTS_N_INSNS (17), /* int_mult_di */
1059 COSTS_N_INSNS (38), /* int_div_si */
1060 COSTS_N_INSNS (38), /* int_div_di */
1061 2, /* branch_cost */
1062 6 /* memory_latency */
1065 COSTS_N_INSNS (6), /* fp_add */
1066 COSTS_N_INSNS (7), /* fp_mult_sf */
1067 COSTS_N_INSNS (8), /* fp_mult_df */
1068 COSTS_N_INSNS (23), /* fp_div_sf */
1069 COSTS_N_INSNS (36), /* fp_div_df */
1070 COSTS_N_INSNS (10), /* int_mult_si */
1071 COSTS_N_INSNS (10), /* int_mult_di */
1072 COSTS_N_INSNS (69), /* int_div_si */
1073 COSTS_N_INSNS (69), /* int_div_di */
1074 2, /* branch_cost */
1075 6 /* memory_latency */
1087 /* The only costs that appear to be updated here are
1088 integer multiplication. */
1090 COSTS_N_INSNS (4), /* int_mult_si */
1091 COSTS_N_INSNS (6), /* int_mult_di */
1092 COSTS_N_INSNS (69), /* int_div_si */
1093 COSTS_N_INSNS (69), /* int_div_di */
1094 1, /* branch_cost */
1095 4 /* memory_latency */
1107 COSTS_N_INSNS (6), /* fp_add */
1108 COSTS_N_INSNS (4), /* fp_mult_sf */
1109 COSTS_N_INSNS (5), /* fp_mult_df */
1110 COSTS_N_INSNS (23), /* fp_div_sf */
1111 COSTS_N_INSNS (36), /* fp_div_df */
1112 COSTS_N_INSNS (5), /* int_mult_si */
1113 COSTS_N_INSNS (5), /* int_mult_di */
1114 COSTS_N_INSNS (36), /* int_div_si */
1115 COSTS_N_INSNS (36), /* int_div_di */
1116 1, /* branch_cost */
1117 4 /* memory_latency */
1120 COSTS_N_INSNS (6), /* fp_add */
1121 COSTS_N_INSNS (5), /* fp_mult_sf */
1122 COSTS_N_INSNS (6), /* fp_mult_df */
1123 COSTS_N_INSNS (30), /* fp_div_sf */
1124 COSTS_N_INSNS (59), /* fp_div_df */
1125 COSTS_N_INSNS (3), /* int_mult_si */
1126 COSTS_N_INSNS (4), /* int_mult_di */
1127 COSTS_N_INSNS (42), /* int_div_si */
1128 COSTS_N_INSNS (74), /* int_div_di */
1129 1, /* branch_cost */
1130 4 /* memory_latency */
1133 COSTS_N_INSNS (6), /* fp_add */
1134 COSTS_N_INSNS (5), /* fp_mult_sf */
1135 COSTS_N_INSNS (6), /* fp_mult_df */
1136 COSTS_N_INSNS (30), /* fp_div_sf */
1137 COSTS_N_INSNS (59), /* fp_div_df */
1138 COSTS_N_INSNS (5), /* int_mult_si */
1139 COSTS_N_INSNS (9), /* int_mult_di */
1140 COSTS_N_INSNS (42), /* int_div_si */
1141 COSTS_N_INSNS (74), /* int_div_di */
1142 1, /* branch_cost */
1143 4 /* memory_latency */
1146 /* The only costs that are changed here are
1147 integer multiplication. */
1148 COSTS_N_INSNS (6), /* fp_add */
1149 COSTS_N_INSNS (7), /* fp_mult_sf */
1150 COSTS_N_INSNS (8), /* fp_mult_df */
1151 COSTS_N_INSNS (23), /* fp_div_sf */
1152 COSTS_N_INSNS (36), /* fp_div_df */
1153 COSTS_N_INSNS (5), /* int_mult_si */
1154 COSTS_N_INSNS (9), /* int_mult_di */
1155 COSTS_N_INSNS (69), /* int_div_si */
1156 COSTS_N_INSNS (69), /* int_div_di */
1157 1, /* branch_cost */
1158 4 /* memory_latency */
1164 /* The only costs that are changed here are
1165 integer multiplication. */
1166 COSTS_N_INSNS (6), /* fp_add */
1167 COSTS_N_INSNS (7), /* fp_mult_sf */
1168 COSTS_N_INSNS (8), /* fp_mult_df */
1169 COSTS_N_INSNS (23), /* fp_div_sf */
1170 COSTS_N_INSNS (36), /* fp_div_df */
1171 COSTS_N_INSNS (3), /* int_mult_si */
1172 COSTS_N_INSNS (8), /* int_mult_di */
1173 COSTS_N_INSNS (69), /* int_div_si */
1174 COSTS_N_INSNS (69), /* int_div_di */
1175 1, /* branch_cost */
1176 4 /* memory_latency */
1179 /* These costs are the same as the SB-1A below. */
1180 COSTS_N_INSNS (4), /* fp_add */
1181 COSTS_N_INSNS (4), /* fp_mult_sf */
1182 COSTS_N_INSNS (4), /* fp_mult_df */
1183 COSTS_N_INSNS (24), /* fp_div_sf */
1184 COSTS_N_INSNS (32), /* fp_div_df */
1185 COSTS_N_INSNS (3), /* int_mult_si */
1186 COSTS_N_INSNS (4), /* int_mult_di */
1187 COSTS_N_INSNS (36), /* int_div_si */
1188 COSTS_N_INSNS (68), /* int_div_di */
1189 1, /* branch_cost */
1190 4 /* memory_latency */
1193 /* These costs are the same as the SB-1 above. */
1194 COSTS_N_INSNS (4), /* fp_add */
1195 COSTS_N_INSNS (4), /* fp_mult_sf */
1196 COSTS_N_INSNS (4), /* fp_mult_df */
1197 COSTS_N_INSNS (24), /* fp_div_sf */
1198 COSTS_N_INSNS (32), /* fp_div_df */
1199 COSTS_N_INSNS (3), /* int_mult_si */
1200 COSTS_N_INSNS (4), /* int_mult_di */
1201 COSTS_N_INSNS (36), /* int_div_si */
1202 COSTS_N_INSNS (68), /* int_div_di */
1203 1, /* branch_cost */
1204 4 /* memory_latency */
1211 /* If a MIPS16e SAVE or RESTORE instruction saves or restores register
1212 mips16e_s2_s8_regs[X], it must also save the registers in indexes
1213 X + 1 onwards. Likewise mips16e_a0_a3_regs. */
1214 static const unsigned char mips16e_s2_s8_regs[] = {
1215 30, 23, 22, 21, 20, 19, 18
1217 static const unsigned char mips16e_a0_a3_regs[] = {
1221 /* A list of the registers that can be saved by the MIPS16e SAVE instruction,
1222 ordered from the uppermost in memory to the lowest in memory. */
1223 static const unsigned char mips16e_save_restore_regs[] = {
1224 31, 30, 23, 22, 21, 20, 19, 18, 17, 16, 7, 6, 5, 4
1227 /* Initialize the GCC target structure. */
1228 #undef TARGET_ASM_ALIGNED_HI_OP
1229 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
1230 #undef TARGET_ASM_ALIGNED_SI_OP
1231 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
1232 #undef TARGET_ASM_ALIGNED_DI_OP
1233 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
1235 #undef TARGET_ASM_FUNCTION_PROLOGUE
1236 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
1237 #undef TARGET_ASM_FUNCTION_EPILOGUE
1238 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
1239 #undef TARGET_ASM_SELECT_RTX_SECTION
1240 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
1241 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
1242 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
1244 #undef TARGET_SCHED_INIT
1245 #define TARGET_SCHED_INIT mips_sched_init
1246 #undef TARGET_SCHED_REORDER
1247 #define TARGET_SCHED_REORDER mips_sched_reorder
1248 #undef TARGET_SCHED_REORDER2
1249 #define TARGET_SCHED_REORDER2 mips_sched_reorder
1250 #undef TARGET_SCHED_VARIABLE_ISSUE
1251 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
1252 #undef TARGET_SCHED_ADJUST_COST
1253 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
1254 #undef TARGET_SCHED_ISSUE_RATE
1255 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
1256 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1257 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1258 mips_multipass_dfa_lookahead
1260 #undef TARGET_DEFAULT_TARGET_FLAGS
1261 #define TARGET_DEFAULT_TARGET_FLAGS \
1263 | TARGET_CPU_DEFAULT \
1264 | TARGET_ENDIAN_DEFAULT \
1265 | TARGET_FP_EXCEPTIONS_DEFAULT \
1266 | MASK_CHECK_ZERO_DIV \
1268 #undef TARGET_HANDLE_OPTION
1269 #define TARGET_HANDLE_OPTION mips_handle_option
1271 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1272 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
1274 #undef TARGET_INSERT_ATTRIBUTES
1275 #define TARGET_INSERT_ATTRIBUTES mips_insert_attributes
1276 #undef TARGET_MERGE_DECL_ATTRIBUTES
1277 #define TARGET_MERGE_DECL_ATTRIBUTES mips_merge_decl_attributes
1278 #undef TARGET_SET_CURRENT_FUNCTION
1279 #define TARGET_SET_CURRENT_FUNCTION mips_set_current_function
1281 #undef TARGET_VALID_POINTER_MODE
1282 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
1283 #undef TARGET_RTX_COSTS
1284 #define TARGET_RTX_COSTS mips_rtx_costs
1285 #undef TARGET_ADDRESS_COST
1286 #define TARGET_ADDRESS_COST mips_address_cost
1288 #undef TARGET_IN_SMALL_DATA_P
1289 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
1291 #undef TARGET_MACHINE_DEPENDENT_REORG
1292 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
1294 #undef TARGET_ASM_FILE_START
1295 #define TARGET_ASM_FILE_START mips_file_start
1296 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
1297 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
1299 #undef TARGET_INIT_LIBFUNCS
1300 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
1302 #undef TARGET_BUILD_BUILTIN_VA_LIST
1303 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
1304 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1305 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
1307 #undef TARGET_PROMOTE_FUNCTION_ARGS
1308 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
1309 #undef TARGET_PROMOTE_FUNCTION_RETURN
1310 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
1311 #undef TARGET_PROMOTE_PROTOTYPES
1312 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
1314 #undef TARGET_RETURN_IN_MEMORY
1315 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
1316 #undef TARGET_RETURN_IN_MSB
1317 #define TARGET_RETURN_IN_MSB mips_return_in_msb
1319 #undef TARGET_ASM_OUTPUT_MI_THUNK
1320 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
1321 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1322 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1324 #undef TARGET_SETUP_INCOMING_VARARGS
1325 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
1326 #undef TARGET_STRICT_ARGUMENT_NAMING
1327 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
1328 #undef TARGET_MUST_PASS_IN_STACK
1329 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
1330 #undef TARGET_PASS_BY_REFERENCE
1331 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
1332 #undef TARGET_CALLEE_COPIES
1333 #define TARGET_CALLEE_COPIES mips_callee_copies
1334 #undef TARGET_ARG_PARTIAL_BYTES
1335 #define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
1337 #undef TARGET_MODE_REP_EXTENDED
1338 #define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
1340 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1341 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
1343 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1344 #define TARGET_SCALAR_MODE_SUPPORTED_P mips_scalar_mode_supported_p
1346 #undef TARGET_INIT_BUILTINS
1347 #define TARGET_INIT_BUILTINS mips_init_builtins
1348 #undef TARGET_EXPAND_BUILTIN
1349 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
1351 #undef TARGET_HAVE_TLS
1352 #define TARGET_HAVE_TLS HAVE_AS_TLS
1354 #undef TARGET_CANNOT_FORCE_CONST_MEM
1355 #define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
1357 #undef TARGET_ENCODE_SECTION_INFO
1358 #define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
1360 #undef TARGET_ATTRIBUTE_TABLE
1361 #define TARGET_ATTRIBUTE_TABLE mips_attribute_table
1362 /* All our function attributes are related to how out-of-line copies should
1363 be compiled or called. They don't in themselves prevent inlining. */
1364 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
1365 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
1367 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1368 #define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
1370 #undef TARGET_MIN_ANCHOR_OFFSET
1371 #define TARGET_MIN_ANCHOR_OFFSET -32768
1372 #undef TARGET_MAX_ANCHOR_OFFSET
1373 #define TARGET_MAX_ANCHOR_OFFSET 32767
1374 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1375 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
1376 #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
1377 #define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
1379 #undef TARGET_COMP_TYPE_ATTRIBUTES
1380 #define TARGET_COMP_TYPE_ATTRIBUTES mips_comp_type_attributes
1382 #ifdef HAVE_AS_DTPRELWORD
1383 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1384 #define TARGET_ASM_OUTPUT_DWARF_DTPREL mips_output_dwarf_dtprel
1387 struct gcc_target targetm = TARGET_INITIALIZER;
1390 /* Predicates to test for presence of "near" and "far"/"long_call"
1391 attributes on the given TYPE. */
1394 mips_near_type_p (const_tree type)
1396 return lookup_attribute ("near", TYPE_ATTRIBUTES (type)) != NULL;
1400 mips_far_type_p (const_tree type)
1402 return (lookup_attribute ("long_call", TYPE_ATTRIBUTES (type)) != NULL
1403 || lookup_attribute ("far", TYPE_ATTRIBUTES (type)) != NULL);
1406 /* Similar predicates for "mips16"/"nomips16" attributes. */
1409 mips_mips16_decl_p (const_tree decl)
1411 return lookup_attribute ("mips16", DECL_ATTRIBUTES (decl)) != NULL;
1415 mips_nomips16_decl_p (const_tree decl)
1417 return lookup_attribute ("nomips16", DECL_ATTRIBUTES (decl)) != NULL;
1420 /* Return 0 if the attributes for two types are incompatible, 1 if they
1421 are compatible, and 2 if they are nearly compatible (which causes a
1422 warning to be generated). */
1425 mips_comp_type_attributes (const_tree type1, const_tree type2)
1427 /* Check for mismatch of non-default calling convention. */
1428 if (TREE_CODE (type1) != FUNCTION_TYPE)
1431 /* Disallow mixed near/far attributes. */
1432 if (mips_far_type_p (type1) && mips_near_type_p (type2))
1434 if (mips_near_type_p (type1) && mips_far_type_p (type2))
1440 /* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR
1441 and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */
1444 mips_split_plus (rtx x, rtx *base_ptr, HOST_WIDE_INT *offset_ptr)
1446 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
1448 *base_ptr = XEXP (x, 0);
1449 *offset_ptr = INTVAL (XEXP (x, 1));
1458 /* Return true if SYMBOL_REF X is associated with a global symbol
1459 (in the STB_GLOBAL sense). */
1462 mips_global_symbol_p (const_rtx x)
1464 const_tree const decl = SYMBOL_REF_DECL (x);
1467 return !SYMBOL_REF_LOCAL_P (x);
1469 /* Weakref symbols are not TREE_PUBLIC, but their targets are global
1470 or weak symbols. Relocations in the object file will be against
1471 the target symbol, so it's that symbol's binding that matters here. */
1472 return DECL_P (decl) && (TREE_PUBLIC (decl) || DECL_WEAK (decl));
1475 /* Return true if SYMBOL_REF X binds locally. */
1478 mips_symbol_binds_local_p (const_rtx x)
1480 return (SYMBOL_REF_DECL (x)
1481 ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
1482 : SYMBOL_REF_LOCAL_P (x));
1485 /* Return true if rtx constants of mode MODE should be put into a small
1489 mips_rtx_constant_in_small_data_p (enum machine_mode mode)
1491 return (!TARGET_EMBEDDED_DATA
1492 && TARGET_LOCAL_SDATA
1493 && GET_MODE_SIZE (mode) <= mips_section_threshold);
1496 /* Return the method that should be used to access SYMBOL_REF or
1497 LABEL_REF X in context CONTEXT. */
1499 static enum mips_symbol_type
1500 mips_classify_symbol (const_rtx x, enum mips_symbol_context context)
1503 return SYMBOL_GOT_DISP;
1505 if (GET_CODE (x) == LABEL_REF)
1507 /* LABEL_REFs are used for jump tables as well as text labels.
1508 Only return SYMBOL_PC_RELATIVE if we know the label is in
1509 the text section. */
1510 if (TARGET_MIPS16_SHORT_JUMP_TABLES)
1511 return SYMBOL_PC_RELATIVE;
1512 if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
1513 return SYMBOL_GOT_PAGE_OFST;
1514 return SYMBOL_ABSOLUTE;
1517 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1519 if (SYMBOL_REF_TLS_MODEL (x))
1522 if (CONSTANT_POOL_ADDRESS_P (x))
1524 if (TARGET_MIPS16_TEXT_LOADS)
1525 return SYMBOL_PC_RELATIVE;
1527 if (TARGET_MIPS16_PCREL_LOADS && context == SYMBOL_CONTEXT_MEM)
1528 return SYMBOL_PC_RELATIVE;
1530 if (mips_rtx_constant_in_small_data_p (get_pool_mode (x)))
1531 return SYMBOL_GP_RELATIVE;
1534 /* Do not use small-data accesses for weak symbols; they may end up
1537 && SYMBOL_REF_SMALL_P (x)
1538 && !SYMBOL_REF_WEAK (x))
1539 return SYMBOL_GP_RELATIVE;
1541 /* Don't use GOT accesses for locally-binding symbols when -mno-shared
1544 && !(TARGET_ABSOLUTE_ABICALLS && mips_symbol_binds_local_p (x)))
1546 /* There are three cases to consider:
1548 - o32 PIC (either with or without explicit relocs)
1549 - n32/n64 PIC without explicit relocs
1550 - n32/n64 PIC with explicit relocs
1552 In the first case, both local and global accesses will use an
1553 R_MIPS_GOT16 relocation. We must correctly predict which of
1554 the two semantics (local or global) the assembler and linker
1555 will apply. The choice depends on the symbol's binding rather
1556 than its visibility.
1558 In the second case, the assembler will not use R_MIPS_GOT16
1559 relocations, but it chooses between local and global accesses
1560 in the same way as for o32 PIC.
1562 In the third case we have more freedom since both forms of
1563 access will work for any kind of symbol. However, there seems
1564 little point in doing things differently. */
1565 if (mips_global_symbol_p (x))
1566 return SYMBOL_GOT_DISP;
1568 return SYMBOL_GOT_PAGE_OFST;
1571 if (TARGET_MIPS16_PCREL_LOADS && context != SYMBOL_CONTEXT_CALL)
1572 return SYMBOL_FORCE_TO_MEM;
1573 return SYMBOL_ABSOLUTE;
1576 /* Classify symbolic expression X, given that it appears in context
1579 static enum mips_symbol_type
1580 mips_classify_symbolic_expression (rtx x, enum mips_symbol_context context)
1584 split_const (x, &x, &offset);
1585 if (UNSPEC_ADDRESS_P (x))
1586 return UNSPEC_ADDRESS_TYPE (x);
1588 return mips_classify_symbol (x, context);
1591 /* Return true if OFFSET is within the range [0, ALIGN), where ALIGN
1592 is the alignment (in bytes) of SYMBOL_REF X. */
1595 mips_offset_within_alignment_p (rtx x, HOST_WIDE_INT offset)
1597 /* If for some reason we can't get the alignment for the
1598 symbol, initializing this to one means we will only accept
1600 HOST_WIDE_INT align = 1;
1603 /* Get the alignment of the symbol we're referring to. */
1604 t = SYMBOL_REF_DECL (x);
1606 align = DECL_ALIGN_UNIT (t);
1608 return offset >= 0 && offset < align;
1611 /* Return true if X is a symbolic constant that can be used in context
1612 CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */
1615 mips_symbolic_constant_p (rtx x, enum mips_symbol_context context,
1616 enum mips_symbol_type *symbol_type)
1620 split_const (x, &x, &offset);
1621 if (UNSPEC_ADDRESS_P (x))
1623 *symbol_type = UNSPEC_ADDRESS_TYPE (x);
1624 x = UNSPEC_ADDRESS (x);
1626 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1628 *symbol_type = mips_classify_symbol (x, context);
1629 if (*symbol_type == SYMBOL_TLS)
1635 if (offset == const0_rtx)
1638 /* Check whether a nonzero offset is valid for the underlying
1640 switch (*symbol_type)
1642 case SYMBOL_ABSOLUTE:
1643 case SYMBOL_FORCE_TO_MEM:
1644 case SYMBOL_32_HIGH:
1645 case SYMBOL_64_HIGH:
1648 /* If the target has 64-bit pointers and the object file only
1649 supports 32-bit symbols, the values of those symbols will be
1650 sign-extended. In this case we can't allow an arbitrary offset
1651 in case the 32-bit value X + OFFSET has a different sign from X. */
1652 if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
1653 return offset_within_block_p (x, INTVAL (offset));
1655 /* In other cases the relocations can handle any offset. */
1658 case SYMBOL_PC_RELATIVE:
1659 /* Allow constant pool references to be converted to LABEL+CONSTANT.
1660 In this case, we no longer have access to the underlying constant,
1661 but the original symbol-based access was known to be valid. */
1662 if (GET_CODE (x) == LABEL_REF)
1667 case SYMBOL_GP_RELATIVE:
1668 /* Make sure that the offset refers to something within the
1669 same object block. This should guarantee that the final
1670 PC- or GP-relative offset is within the 16-bit limit. */
1671 return offset_within_block_p (x, INTVAL (offset));
1673 case SYMBOL_GOT_PAGE_OFST:
1674 case SYMBOL_GOTOFF_PAGE:
1675 /* If the symbol is global, the GOT entry will contain the symbol's
1676 address, and we will apply a 16-bit offset after loading it.
1677 If the symbol is local, the linker should provide enough local
1678 GOT entries for a 16-bit offset, but larger offsets may lead
1680 return SMALL_INT (offset);
1684 /* There is no carry between the HI and LO REL relocations, so the
1685 offset is only valid if we know it won't lead to such a carry. */
1686 return mips_offset_within_alignment_p (x, INTVAL (offset));
1688 case SYMBOL_GOT_DISP:
1689 case SYMBOL_GOTOFF_DISP:
1690 case SYMBOL_GOTOFF_CALL:
1691 case SYMBOL_GOTOFF_LOADGP:
1694 case SYMBOL_GOTTPREL:
1703 /* This function is used to implement REG_MODE_OK_FOR_BASE_P. */
1706 mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode, int strict)
1708 if (!HARD_REGISTER_NUM_P (regno))
1712 regno = reg_renumber[regno];
1715 /* These fake registers will be eliminated to either the stack or
1716 hard frame pointer, both of which are usually valid base registers.
1717 Reload deals with the cases where the eliminated form isn't valid. */
1718 if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
1721 /* In mips16 mode, the stack pointer can only address word and doubleword
1722 values, nothing smaller. There are two problems here:
1724 (a) Instantiating virtual registers can introduce new uses of the
1725 stack pointer. If these virtual registers are valid addresses,
1726 the stack pointer should be too.
1728 (b) Most uses of the stack pointer are not made explicit until
1729 FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
1730 We don't know until that stage whether we'll be eliminating to the
1731 stack pointer (which needs the restriction) or the hard frame
1732 pointer (which doesn't).
1734 All in all, it seems more consistent to only enforce this restriction
1735 during and after reload. */
1736 if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
1737 return !strict || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1739 return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
1743 /* Return true if X is a valid base register for the given mode.
1744 Allow only hard registers if STRICT. */
1747 mips_valid_base_register_p (rtx x, enum machine_mode mode, int strict)
1749 if (!strict && GET_CODE (x) == SUBREG)
1753 && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict));
1757 /* Return true if X is a valid address for machine mode MODE. If it is,
1758 fill in INFO appropriately. STRICT is true if we should only accept
1759 hard base registers. */
1762 mips_classify_address (struct mips_address_info *info, rtx x,
1763 enum machine_mode mode, int strict)
1765 switch (GET_CODE (x))
1769 info->type = ADDRESS_REG;
1771 info->offset = const0_rtx;
1772 return mips_valid_base_register_p (info->reg, mode, strict);
1775 info->type = ADDRESS_REG;
1776 info->reg = XEXP (x, 0);
1777 info->offset = XEXP (x, 1);
1778 return (mips_valid_base_register_p (info->reg, mode, strict)
1779 && const_arith_operand (info->offset, VOIDmode));
1782 info->type = ADDRESS_LO_SUM;
1783 info->reg = XEXP (x, 0);
1784 info->offset = XEXP (x, 1);
1785 /* We have to trust the creator of the LO_SUM to do something vaguely
1786 sane. Target-independent code that creates a LO_SUM should also
1787 create and verify the matching HIGH. Target-independent code that
1788 adds an offset to a LO_SUM must prove that the offset will not
1789 induce a carry. Failure to do either of these things would be
1790 a bug, and we are not required to check for it here. The MIPS
1791 backend itself should only create LO_SUMs for valid symbolic
1792 constants, with the high part being either a HIGH or a copy
1795 = mips_classify_symbolic_expression (info->offset, SYMBOL_CONTEXT_MEM);
1796 return (mips_valid_base_register_p (info->reg, mode, strict)
1797 && mips_symbol_insns (info->symbol_type, mode) > 0
1798 && mips_lo_relocs[info->symbol_type] != 0);
1801 /* Small-integer addresses don't occur very often, but they
1802 are legitimate if $0 is a valid base register. */
1803 info->type = ADDRESS_CONST_INT;
1804 return !TARGET_MIPS16 && SMALL_INT (x);
1809 info->type = ADDRESS_SYMBOLIC;
1810 return (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_MEM,
1812 && mips_symbol_insns (info->symbol_type, mode) > 0
1813 && !mips_split_p[info->symbol_type]);
1820 /* Return true if X is a thread-local symbol. */
1823 mips_tls_operand_p (rtx x)
1825 return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
1828 /* Return true if X can not be forced into a constant pool. */
1831 mips_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1833 return mips_tls_operand_p (*x);
1836 /* Return true if X can not be forced into a constant pool. */
1839 mips_cannot_force_const_mem (rtx x)
1845 /* As an optimization, reject constants that mips_legitimize_move
1848 Suppose we have a multi-instruction sequence that loads constant C
1849 into register R. If R does not get allocated a hard register, and
1850 R is used in an operand that allows both registers and memory
1851 references, reload will consider forcing C into memory and using
1852 one of the instruction's memory alternatives. Returning false
1853 here will force it to use an input reload instead. */
1854 if (GET_CODE (x) == CONST_INT)
1857 split_const (x, &base, &offset);
1858 if (symbolic_operand (base, VOIDmode) && SMALL_INT (offset))
1862 if (TARGET_HAVE_TLS && for_each_rtx (&x, &mips_tls_symbol_ref_1, 0))
1868 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. We can't use blocks for
1869 constants when we're using a per-function constant pool. */
1872 mips_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
1873 const_rtx x ATTRIBUTE_UNUSED)
1875 return !TARGET_MIPS16_PCREL_LOADS;
1878 /* Like mips_symbol_insns, but treat extended MIPS16 instructions as a
1879 single instruction. We rely on the fact that, in the worst case,
1880 all instructions involved in a MIPS16 address calculation are usually
1884 mips_symbol_insns_1 (enum mips_symbol_type type, enum machine_mode mode)
1888 case SYMBOL_ABSOLUTE:
1889 /* When using 64-bit symbols, we need 5 preparatory instructions,
1892 lui $at,%highest(symbol)
1893 daddiu $at,$at,%higher(symbol)
1895 daddiu $at,$at,%hi(symbol)
1898 The final address is then $at + %lo(symbol). With 32-bit
1899 symbols we just need a preparatory lui for normal mode and
1900 a preparatory "li; sll" for MIPS16. */
1901 return ABI_HAS_64BIT_SYMBOLS ? 6 : TARGET_MIPS16 ? 3 : 2;
1903 case SYMBOL_GP_RELATIVE:
1904 /* Treat GP-relative accesses as taking a single instruction on
1905 MIPS16 too; the copy of $gp can often be shared. */
1908 case SYMBOL_PC_RELATIVE:
1909 /* PC-relative constants can be only be used with addiupc,
1911 if (mode == MAX_MACHINE_MODE
1912 || GET_MODE_SIZE (mode) == 4
1913 || GET_MODE_SIZE (mode) == 8)
1916 /* The constant must be loaded using addiupc first. */
1919 case SYMBOL_FORCE_TO_MEM:
1920 /* LEAs will be converted into constant-pool references by
1922 if (mode == MAX_MACHINE_MODE)
1925 /* The constant must be loaded from the constant pool. */
1928 case SYMBOL_GOT_DISP:
1929 /* The constant will have to be loaded from the GOT before it
1930 is used in an address. */
1931 if (mode != MAX_MACHINE_MODE)
1936 case SYMBOL_GOT_PAGE_OFST:
1937 /* Unless -funit-at-a-time is in effect, we can't be sure whether
1938 the local/global classification is accurate. See override_options
1941 The worst cases are:
1943 (1) For local symbols when generating o32 or o64 code. The assembler
1949 ...and the final address will be $at + %lo(symbol).
1951 (2) For global symbols when -mxgot. The assembler will use:
1953 lui $at,%got_hi(symbol)
1956 ...and the final address will be $at + %got_lo(symbol). */
1959 case SYMBOL_GOTOFF_PAGE:
1960 case SYMBOL_GOTOFF_DISP:
1961 case SYMBOL_GOTOFF_CALL:
1962 case SYMBOL_GOTOFF_LOADGP:
1963 case SYMBOL_32_HIGH:
1964 case SYMBOL_64_HIGH:
1970 case SYMBOL_GOTTPREL:
1973 /* A 16-bit constant formed by a single relocation, or a 32-bit
1974 constant formed from a high 16-bit relocation and a low 16-bit
1975 relocation. Use mips_split_p to determine which. */
1976 return !mips_split_p[type] ? 1 : TARGET_MIPS16 ? 3 : 2;
1979 /* We don't treat a bare TLS symbol as a constant. */
1985 /* If MODE is MAX_MACHINE_MODE, return the number of instructions needed
1986 to load symbols of type TYPE into a register. Return 0 if the given
1987 type of symbol cannot be used as an immediate operand.
1989 Otherwise, return the number of instructions needed to load or store
1990 values of mode MODE to or from addresses of type TYPE. Return 0 if
1991 the given type of symbol is not valid in addresses.
1993 In both cases, treat extended MIPS16 instructions as two instructions. */
1996 mips_symbol_insns (enum mips_symbol_type type, enum machine_mode mode)
1998 return mips_symbol_insns_1 (type, mode) * (TARGET_MIPS16 ? 2 : 1);
2001 /* Return true if X is a legitimate $sp-based address for mode MDOE. */
2004 mips_stack_address_p (rtx x, enum machine_mode mode)
2006 struct mips_address_info addr;
2008 return (mips_classify_address (&addr, x, mode, false)
2009 && addr.type == ADDRESS_REG
2010 && addr.reg == stack_pointer_rtx);
2013 /* Return true if a value at OFFSET bytes from BASE can be accessed
2014 using an unextended mips16 instruction. MODE is the mode of the
2017 Usually the offset in an unextended instruction is a 5-bit field.
2018 The offset is unsigned and shifted left once for HIs, twice
2019 for SIs, and so on. An exception is SImode accesses off the
2020 stack pointer, which have an 8-bit immediate field. */
2023 mips16_unextended_reference_p (enum machine_mode mode, rtx base, rtx offset)
2026 && GET_CODE (offset) == CONST_INT
2027 && INTVAL (offset) >= 0
2028 && (INTVAL (offset) & (GET_MODE_SIZE (mode) - 1)) == 0)
2030 if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
2031 return INTVAL (offset) < 256 * GET_MODE_SIZE (mode);
2032 return INTVAL (offset) < 32 * GET_MODE_SIZE (mode);
2038 /* Return the number of instructions needed to load or store a value
2039 of mode MODE at X. Return 0 if X isn't valid for MODE. Assume that
2040 multiword moves may need to be split into word moves if MIGHT_SPLIT_P,
2041 otherwise assume that a single load or store is enough.
2043 For mips16 code, count extended instructions as two instructions. */
2046 mips_address_insns (rtx x, enum machine_mode mode, bool might_split_p)
2048 struct mips_address_info addr;
2051 /* BLKmode is used for single unaligned loads and stores and should
2052 not count as a multiword mode. (GET_MODE_SIZE (BLKmode) is pretty
2053 meaningless, so we have to single it out as a special case one way
2055 if (mode != BLKmode && might_split_p)
2056 factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2060 if (mips_classify_address (&addr, x, mode, false))
2065 && !mips16_unextended_reference_p (mode, addr.reg, addr.offset))
2069 case ADDRESS_LO_SUM:
2070 return (TARGET_MIPS16 ? factor * 2 : factor);
2072 case ADDRESS_CONST_INT:
2075 case ADDRESS_SYMBOLIC:
2076 return factor * mips_symbol_insns (addr.symbol_type, mode);
2082 /* Likewise for constant X. */
2085 mips_const_insns (rtx x)
2087 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2088 enum mips_symbol_type symbol_type;
2091 switch (GET_CODE (x))
2094 if (!mips_symbolic_constant_p (XEXP (x, 0), SYMBOL_CONTEXT_LEA,
2096 || !mips_split_p[symbol_type])
2099 /* This is simply an lui for normal mode. It is an extended
2100 "li" followed by an extended "sll" for MIPS16. */
2101 return TARGET_MIPS16 ? 4 : 1;
2105 /* Unsigned 8-bit constants can be loaded using an unextended
2106 LI instruction. Unsigned 16-bit constants can be loaded
2107 using an extended LI. Negative constants must be loaded
2108 using LI and then negated. */
2109 return (INTVAL (x) >= 0 && INTVAL (x) < 256 ? 1
2110 : SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
2111 : INTVAL (x) > -256 && INTVAL (x) < 0 ? 2
2112 : SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
2115 return mips_build_integer (codes, INTVAL (x));
2119 return (!TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0);
2125 /* See if we can refer to X directly. */
2126 if (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_LEA, &symbol_type))
2127 return mips_symbol_insns (symbol_type, MAX_MACHINE_MODE);
2129 /* Otherwise try splitting the constant into a base and offset.
2130 16-bit offsets can be added using an extra addiu. Larger offsets
2131 must be calculated separately and then added to the base. */
2132 split_const (x, &x, &offset);
2135 int n = mips_const_insns (x);
2138 if (SMALL_INT (offset))
2141 return n + 1 + mips_build_integer (codes, INTVAL (offset));
2148 return mips_symbol_insns (mips_classify_symbol (x, SYMBOL_CONTEXT_LEA),
2157 /* Return the number of instructions needed to implement INSN,
2158 given that it loads from or stores to MEM. Count extended
2159 mips16 instructions as two instructions. */
2162 mips_load_store_insns (rtx mem, rtx insn)
2164 enum machine_mode mode;
2168 gcc_assert (MEM_P (mem));
2169 mode = GET_MODE (mem);
2171 /* Try to prove that INSN does not need to be split. */
2172 might_split_p = true;
2173 if (GET_MODE_BITSIZE (mode) == 64)
2175 set = single_set (insn);
2176 if (set && !mips_split_64bit_move_p (SET_DEST (set), SET_SRC (set)))
2177 might_split_p = false;
2180 return mips_address_insns (XEXP (mem, 0), mode, might_split_p);
2184 /* Return the number of instructions needed for an integer division. */
2187 mips_idiv_insns (void)
2192 if (TARGET_CHECK_ZERO_DIV)
2194 if (GENERATE_DIVIDE_TRAPS)
2200 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
2205 /* This function is used to implement GO_IF_LEGITIMATE_ADDRESS. It
2206 returns a nonzero value if X is a legitimate address for a memory
2207 operand of the indicated MODE. STRICT is nonzero if this function
2208 is called during reload. */
2211 mips_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
2213 struct mips_address_info addr;
2215 return mips_classify_address (&addr, x, mode, strict);
2218 /* Emit a move from SRC to DEST. Assume that the move expanders can
2219 handle all moves if !can_create_pseudo_p (). The distinction is
2220 important because, unlike emit_move_insn, the move expanders know
2221 how to force Pmode objects into the constant pool even when the
2222 constant pool address is not itself legitimate. */
2225 mips_emit_move (rtx dest, rtx src)
2227 return (can_create_pseudo_p ()
2228 ? emit_move_insn (dest, src)
2229 : emit_move_insn_1 (dest, src));
2232 /* Copy VALUE to a register and return that register. If new psuedos
2233 are allowed, copy it into a new register, otherwise use DEST. */
2236 mips_force_temporary (rtx dest, rtx value)
2238 if (can_create_pseudo_p ())
2239 return force_reg (Pmode, value);
2242 mips_emit_move (copy_rtx (dest), value);
2248 /* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
2249 it appears in a MEM of that mode. Return true if ADDR is a legitimate
2250 constant in that context and can be split into a high part and a LO_SUM.
2251 If so, and if LO_SUM_OUT is nonnull, emit the high part and return
2252 the LO_SUM in *LO_SUM_OUT. Leave *LO_SUM_OUT unchanged otherwise.
2254 TEMP is as for mips_force_temporary and is used to load the high
2255 part into a register. */
2258 mips_split_symbol (rtx temp, rtx addr, enum machine_mode mode, rtx *lo_sum_out)
2260 enum mips_symbol_context context;
2261 enum mips_symbol_type symbol_type;
2264 context = (mode == MAX_MACHINE_MODE
2265 ? SYMBOL_CONTEXT_LEA
2266 : SYMBOL_CONTEXT_MEM);
2267 if (!mips_symbolic_constant_p (addr, context, &symbol_type)
2268 || mips_symbol_insns (symbol_type, mode) == 0
2269 || !mips_split_p[symbol_type])
2274 if (symbol_type == SYMBOL_GP_RELATIVE)
2276 if (!can_create_pseudo_p ())
2278 emit_insn (gen_load_const_gp (copy_rtx (temp)));
2282 high = mips16_gp_pseudo_reg ();
2286 high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
2287 high = mips_force_temporary (temp, high);
2289 *lo_sum_out = gen_rtx_LO_SUM (Pmode, high, addr);
2295 /* Wrap symbol or label BASE in an unspec address of type SYMBOL_TYPE
2296 and add CONST_INT OFFSET to the result. */
2299 mips_unspec_address_offset (rtx base, rtx offset,
2300 enum mips_symbol_type symbol_type)
2302 base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
2303 UNSPEC_ADDRESS_FIRST + symbol_type);
2304 if (offset != const0_rtx)
2305 base = gen_rtx_PLUS (Pmode, base, offset);
2306 return gen_rtx_CONST (Pmode, base);
2309 /* Return an UNSPEC address with underlying address ADDRESS and symbol
2310 type SYMBOL_TYPE. */
2313 mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
2317 split_const (address, &base, &offset);
2318 return mips_unspec_address_offset (base, offset, symbol_type);
2322 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
2323 high part to BASE and return the result. Just return BASE otherwise.
2324 TEMP is available as a temporary register if needed.
2326 The returned expression can be used as the first operand to a LO_SUM. */
2329 mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
2330 enum mips_symbol_type symbol_type)
2332 if (mips_split_p[symbol_type])
2334 addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
2335 addr = mips_force_temporary (temp, addr);
2336 return mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
2342 /* Return a legitimate address for REG + OFFSET. TEMP is as for
2343 mips_force_temporary; it is only needed when OFFSET is not a
2347 mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
2349 if (!SMALL_OPERAND (offset))
2354 /* Load the full offset into a register so that we can use
2355 an unextended instruction for the address itself. */
2356 high = GEN_INT (offset);
2361 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. */
2362 high = GEN_INT (CONST_HIGH_PART (offset));
2363 offset = CONST_LOW_PART (offset);
2365 high = mips_force_temporary (temp, high);
2366 reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
2368 return plus_constant (reg, offset);
2371 /* Emit a call to __tls_get_addr. SYM is the TLS symbol we are
2372 referencing, and TYPE is the symbol type to use (either global
2373 dynamic or local dynamic). V0 is an RTX for the return value
2374 location. The entire insn sequence is returned. */
2376 static GTY(()) rtx mips_tls_symbol;
2379 mips_call_tls_get_addr (rtx sym, enum mips_symbol_type type, rtx v0)
2381 rtx insn, loc, tga, a0;
2383 a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
2385 if (!mips_tls_symbol)
2386 mips_tls_symbol = init_one_libfunc ("__tls_get_addr");
2388 loc = mips_unspec_address (sym, type);
2392 emit_insn (gen_rtx_SET (Pmode, a0,
2393 gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, loc)));
2394 tga = gen_rtx_MEM (Pmode, mips_tls_symbol);
2395 insn = emit_call_insn (gen_call_value (v0, tga, const0_rtx, const0_rtx));
2396 CONST_OR_PURE_CALL_P (insn) = 1;
2397 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), v0);
2398 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
2399 insn = get_insns ();
2406 /* Generate the code to access LOC, a thread local SYMBOL_REF. The
2407 return value will be a valid address and move_operand (either a REG
2411 mips_legitimize_tls_address (rtx loc)
2413 rtx dest, insn, v0, v1, tmp1, tmp2, eqv;
2414 enum tls_model model;
2418 sorry ("MIPS16 TLS");
2419 return gen_reg_rtx (Pmode);
2422 v0 = gen_rtx_REG (Pmode, GP_RETURN);
2423 v1 = gen_rtx_REG (Pmode, GP_RETURN + 1);
2425 model = SYMBOL_REF_TLS_MODEL (loc);
2426 /* Only TARGET_ABICALLS code can have more than one module; other
2427 code must be be static and should not use a GOT. All TLS models
2428 reduce to local exec in this situation. */
2429 if (!TARGET_ABICALLS)
2430 model = TLS_MODEL_LOCAL_EXEC;
2434 case TLS_MODEL_GLOBAL_DYNAMIC:
2435 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSGD, v0);
2436 dest = gen_reg_rtx (Pmode);
2437 emit_libcall_block (insn, dest, v0, loc);
2440 case TLS_MODEL_LOCAL_DYNAMIC:
2441 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSLDM, v0);
2442 tmp1 = gen_reg_rtx (Pmode);
2444 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2445 share the LDM result with other LD model accesses. */
2446 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2448 emit_libcall_block (insn, tmp1, v0, eqv);
2450 tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_DTPREL);
2451 dest = gen_rtx_LO_SUM (Pmode, tmp2,
2452 mips_unspec_address (loc, SYMBOL_DTPREL));
2455 case TLS_MODEL_INITIAL_EXEC:
2456 tmp1 = gen_reg_rtx (Pmode);
2457 tmp2 = mips_unspec_address (loc, SYMBOL_GOTTPREL);
2458 if (Pmode == DImode)
2460 emit_insn (gen_tls_get_tp_di (v1));
2461 emit_insn (gen_load_gotdi (tmp1, pic_offset_table_rtx, tmp2));
2465 emit_insn (gen_tls_get_tp_si (v1));
2466 emit_insn (gen_load_gotsi (tmp1, pic_offset_table_rtx, tmp2));
2468 dest = gen_reg_rtx (Pmode);
2469 emit_insn (gen_add3_insn (dest, tmp1, v1));
2472 case TLS_MODEL_LOCAL_EXEC:
2473 if (Pmode == DImode)
2474 emit_insn (gen_tls_get_tp_di (v1));
2476 emit_insn (gen_tls_get_tp_si (v1));
2478 tmp1 = mips_unspec_offset_high (NULL, v1, loc, SYMBOL_TPREL);
2479 dest = gen_rtx_LO_SUM (Pmode, tmp1,
2480 mips_unspec_address (loc, SYMBOL_TPREL));
2490 /* This function is used to implement LEGITIMIZE_ADDRESS. If *XLOC can
2491 be legitimized in a way that the generic machinery might not expect,
2492 put the new address in *XLOC and return true. MODE is the mode of
2493 the memory being accessed. */
2496 mips_legitimize_address (rtx *xloc, enum machine_mode mode)
2498 if (mips_tls_operand_p (*xloc))
2500 *xloc = mips_legitimize_tls_address (*xloc);
2504 /* See if the address can split into a high part and a LO_SUM. */
2505 if (mips_split_symbol (NULL, *xloc, mode, xloc))
2508 if (GET_CODE (*xloc) == PLUS && GET_CODE (XEXP (*xloc, 1)) == CONST_INT)
2510 /* Handle REG + CONSTANT using mips_add_offset. */
2513 reg = XEXP (*xloc, 0);
2514 if (!mips_valid_base_register_p (reg, mode, 0))
2515 reg = copy_to_mode_reg (Pmode, reg);
2516 *xloc = mips_add_offset (0, reg, INTVAL (XEXP (*xloc, 1)));
2524 /* Subroutine of mips_build_integer (with the same interface).
2525 Assume that the final action in the sequence should be a left shift. */
2528 mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
2530 unsigned int i, shift;
2532 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
2533 since signed numbers are easier to load than unsigned ones. */
2535 while ((value & 1) == 0)
2536 value /= 2, shift++;
2538 i = mips_build_integer (codes, value);
2539 codes[i].code = ASHIFT;
2540 codes[i].value = shift;
2545 /* As for mips_build_shift, but assume that the final action will be
2546 an IOR or PLUS operation. */
2549 mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
2551 unsigned HOST_WIDE_INT high;
2554 high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
2555 if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
2557 /* The constant is too complex to load with a simple lui/ori pair
2558 so our goal is to clear as many trailing zeros as possible.
2559 In this case, we know bit 16 is set and that the low 16 bits
2560 form a negative number. If we subtract that number from VALUE,
2561 we will clear at least the lowest 17 bits, maybe more. */
2562 i = mips_build_integer (codes, CONST_HIGH_PART (value));
2563 codes[i].code = PLUS;
2564 codes[i].value = CONST_LOW_PART (value);
2568 i = mips_build_integer (codes, high);
2569 codes[i].code = IOR;
2570 codes[i].value = value & 0xffff;
2576 /* Fill CODES with a sequence of rtl operations to load VALUE.
2577 Return the number of operations needed. */
2580 mips_build_integer (struct mips_integer_op *codes,
2581 unsigned HOST_WIDE_INT value)
2583 if (SMALL_OPERAND (value)
2584 || SMALL_OPERAND_UNSIGNED (value)
2585 || LUI_OPERAND (value))
2587 /* The value can be loaded with a single instruction. */
2588 codes[0].code = UNKNOWN;
2589 codes[0].value = value;
2592 else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
2594 /* Either the constant is a simple LUI/ORI combination or its
2595 lowest bit is set. We don't want to shift in this case. */
2596 return mips_build_lower (codes, value);
2598 else if ((value & 0xffff) == 0)
2600 /* The constant will need at least three actions. The lowest
2601 16 bits are clear, so the final action will be a shift. */
2602 return mips_build_shift (codes, value);
2606 /* The final action could be a shift, add or inclusive OR.
2607 Rather than use a complex condition to select the best
2608 approach, try both mips_build_shift and mips_build_lower
2609 and pick the one that gives the shortest sequence.
2610 Note that this case is only used once per constant. */
2611 struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
2612 unsigned int cost, alt_cost;
2614 cost = mips_build_shift (codes, value);
2615 alt_cost = mips_build_lower (alt_codes, value);
2616 if (alt_cost < cost)
2618 memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
2626 /* Load VALUE into DEST, using TEMP as a temporary register if need be. */
2629 mips_move_integer (rtx dest, rtx temp, unsigned HOST_WIDE_INT value)
2631 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2632 enum machine_mode mode;
2633 unsigned int i, cost;
2636 mode = GET_MODE (dest);
2637 cost = mips_build_integer (codes, value);
2639 /* Apply each binary operation to X. Invariant: X is a legitimate
2640 source operand for a SET pattern. */
2641 x = GEN_INT (codes[0].value);
2642 for (i = 1; i < cost; i++)
2644 if (!can_create_pseudo_p ())
2646 emit_insn (gen_rtx_SET (VOIDmode, temp, x));
2650 x = force_reg (mode, x);
2651 x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
2654 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
2658 /* Subroutine of mips_legitimize_move. Move constant SRC into register
2659 DEST given that SRC satisfies immediate_operand but doesn't satisfy
2663 mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
2667 /* Split moves of big integers into smaller pieces. */
2668 if (splittable_const_int_operand (src, mode))
2670 mips_move_integer (dest, dest, INTVAL (src));
2674 /* Split moves of symbolic constants into high/low pairs. */
2675 if (mips_split_symbol (dest, src, MAX_MACHINE_MODE, &src))
2677 emit_insn (gen_rtx_SET (VOIDmode, dest, src));
2681 if (mips_tls_operand_p (src))
2683 mips_emit_move (dest, mips_legitimize_tls_address (src));
2687 /* If we have (const (plus symbol offset)), and that expression cannot
2688 be forced into memory, load the symbol first and add in the offset.
2689 In non-MIPS16 mode, prefer to do this even if the constant _can_ be
2690 forced into memory, as it usually produces better code. */
2691 split_const (src, &base, &offset);
2692 if (offset != const0_rtx
2693 && (targetm.cannot_force_const_mem (src)
2694 || (!TARGET_MIPS16 && can_create_pseudo_p ())))
2696 base = mips_force_temporary (dest, base);
2697 mips_emit_move (dest, mips_add_offset (0, base, INTVAL (offset)));
2701 src = force_const_mem (mode, src);
2703 /* When using explicit relocs, constant pool references are sometimes
2704 not legitimate addresses. */
2705 mips_split_symbol (dest, XEXP (src, 0), mode, &XEXP (src, 0));
2706 mips_emit_move (dest, src);
2710 /* If (set DEST SRC) is not a valid instruction, emit an equivalent
2711 sequence that is valid. */
2714 mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
2716 if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
2718 mips_emit_move (dest, force_reg (mode, src));
2722 /* Check for individual, fully-reloaded mflo and mfhi instructions. */
2723 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
2724 && REG_P (src) && MD_REG_P (REGNO (src))
2725 && REG_P (dest) && GP_REG_P (REGNO (dest)))
2727 int other_regno = REGNO (src) == HI_REGNUM ? LO_REGNUM : HI_REGNUM;
2728 if (GET_MODE_SIZE (mode) <= 4)
2729 emit_insn (gen_mfhilo_si (gen_rtx_REG (SImode, REGNO (dest)),
2730 gen_rtx_REG (SImode, REGNO (src)),
2731 gen_rtx_REG (SImode, other_regno)));
2733 emit_insn (gen_mfhilo_di (gen_rtx_REG (DImode, REGNO (dest)),
2734 gen_rtx_REG (DImode, REGNO (src)),
2735 gen_rtx_REG (DImode, other_regno)));
2739 /* We need to deal with constants that would be legitimate
2740 immediate_operands but not legitimate move_operands. */
2741 if (CONSTANT_P (src) && !move_operand (src, mode))
2743 mips_legitimize_const_move (mode, dest, src);
2744 set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
2750 /* We need a lot of little routines to check constant values on the
2751 mips16. These are used to figure out how long the instruction will
2752 be. It would be much better to do this using constraints, but
2753 there aren't nearly enough letters available. */
2756 m16_check_op (rtx op, int low, int high, int mask)
2758 return (GET_CODE (op) == CONST_INT
2759 && INTVAL (op) >= low
2760 && INTVAL (op) <= high
2761 && (INTVAL (op) & mask) == 0);
2765 m16_uimm3_b (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2767 return m16_check_op (op, 0x1, 0x8, 0);
2771 m16_simm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2773 return m16_check_op (op, - 0x8, 0x7, 0);
2777 m16_nsimm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2779 return m16_check_op (op, - 0x7, 0x8, 0);
2783 m16_simm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2785 return m16_check_op (op, - 0x10, 0xf, 0);
2789 m16_nsimm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2791 return m16_check_op (op, - 0xf, 0x10, 0);
2795 m16_uimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2797 return m16_check_op (op, (- 0x10) << 2, 0xf << 2, 3);
2801 m16_nuimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2803 return m16_check_op (op, (- 0xf) << 2, 0x10 << 2, 3);
2807 m16_simm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2809 return m16_check_op (op, - 0x80, 0x7f, 0);
2813 m16_nsimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2815 return m16_check_op (op, - 0x7f, 0x80, 0);
2819 m16_uimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2821 return m16_check_op (op, 0x0, 0xff, 0);
2825 m16_nuimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2827 return m16_check_op (op, - 0xff, 0x0, 0);
2831 m16_uimm8_m1_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2833 return m16_check_op (op, - 0x1, 0xfe, 0);
2837 m16_uimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2839 return m16_check_op (op, 0x0, 0xff << 2, 3);
2843 m16_nuimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2845 return m16_check_op (op, (- 0xff) << 2, 0x0, 3);
2849 m16_simm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2851 return m16_check_op (op, (- 0x80) << 3, 0x7f << 3, 7);
2855 m16_nsimm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2857 return m16_check_op (op, (- 0x7f) << 3, 0x80 << 3, 7);
2860 /* Return true if ADDR matches the pattern for the lwxs load scaled indexed
2861 address instruction. */
2864 mips_lwxs_address_p (rtx addr)
2867 && GET_CODE (addr) == PLUS
2868 && REG_P (XEXP (addr, 1)))
2870 rtx offset = XEXP (addr, 0);
2871 if (GET_CODE (offset) == MULT
2872 && REG_P (XEXP (offset, 0))
2873 && GET_CODE (XEXP (offset, 1)) == CONST_INT
2874 && INTVAL (XEXP (offset, 1)) == 4)
2880 /* The cost of loading values from the constant pool. It should be
2881 larger than the cost of any constant we want to synthesize inline. */
2883 #define CONSTANT_POOL_COST COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 8)
2885 /* Return the cost of X when used as an operand to the MIPS16 instruction
2886 that implements CODE. Return -1 if there is no such instruction, or if
2887 X is not a valid immediate operand for it. */
2890 mips16_constant_cost (int code, HOST_WIDE_INT x)
2897 /* Shifts by between 1 and 8 bits (inclusive) are unextended,
2898 other shifts are extended. The shift patterns truncate the shift
2899 count to the right size, so there are no out-of-range values. */
2900 if (IN_RANGE (x, 1, 8))
2902 return COSTS_N_INSNS (1);
2905 if (IN_RANGE (x, -128, 127))
2907 if (SMALL_OPERAND (x))
2908 return COSTS_N_INSNS (1);
2912 /* Like LE, but reject the always-true case. */
2916 /* We add 1 to the immediate and use SLT. */
2919 /* We can use CMPI for an xor with an unsigned 16-bit X. */
2922 if (IN_RANGE (x, 0, 255))
2924 if (SMALL_OPERAND_UNSIGNED (x))
2925 return COSTS_N_INSNS (1);
2930 /* Equality comparisons with 0 are cheap. */
2940 /* Return true if there is a non-MIPS16 instruction that implements CODE
2941 and if that instruction accepts X as an immediate operand. */
2944 mips_immediate_operand_p (int code, HOST_WIDE_INT x)
2951 /* All shift counts are truncated to a valid constant. */
2956 /* Likewise rotates, if the target supports rotates at all. */
2962 /* These instructions take 16-bit unsigned immediates. */
2963 return SMALL_OPERAND_UNSIGNED (x);
2968 /* These instructions take 16-bit signed immediates. */
2969 return SMALL_OPERAND (x);
2975 /* The "immediate" forms of these instructions are really
2976 implemented as comparisons with register 0. */
2981 /* Likewise, meaning that the only valid immediate operand is 1. */
2985 /* We add 1 to the immediate and use SLT. */
2986 return SMALL_OPERAND (x + 1);
2989 /* Likewise SLTU, but reject the always-true case. */
2990 return SMALL_OPERAND (x + 1) && x + 1 != 0;
2994 /* The bit position and size are immediate operands. */
2995 return ISA_HAS_EXT_INS;
2998 /* By default assume that $0 can be used for 0. */
3003 /* Return the cost of binary operation X, given that the instruction
3004 sequence for a word-sized or smaller operation has cost SINGLE_COST
3005 and that the sequence of a double-word operation has cost DOUBLE_COST. */
3008 mips_binary_cost (rtx x, int single_cost, int double_cost)
3012 if (GET_MODE_SIZE (GET_MODE (x)) == UNITS_PER_WORD * 2)
3017 + rtx_cost (XEXP (x, 0), 0)
3018 + rtx_cost (XEXP (x, 1), GET_CODE (x)));
3021 /* Return the cost of floating-point multiplications of mode MODE. */
3024 mips_fp_mult_cost (enum machine_mode mode)
3026 return mode == DFmode ? mips_cost->fp_mult_df : mips_cost->fp_mult_sf;
3029 /* Return the cost of floating-point divisions of mode MODE. */
3032 mips_fp_div_cost (enum machine_mode mode)
3034 return mode == DFmode ? mips_cost->fp_div_df : mips_cost->fp_div_sf;
3037 /* Return the cost of sign-extending OP to mode MODE, not including the
3038 cost of OP itself. */
3041 mips_sign_extend_cost (enum machine_mode mode, rtx op)
3044 /* Extended loads are as cheap as unextended ones. */
3047 if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
3048 /* A sign extension from SImode to DImode in 64-bit mode is free. */
3051 if (ISA_HAS_SEB_SEH || GENERATE_MIPS16E)
3052 /* We can use SEB or SEH. */
3053 return COSTS_N_INSNS (1);
3055 /* We need to use a shift left and a shift right. */
3056 return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
3059 /* Return the cost of zero-extending OP to mode MODE, not including the
3060 cost of OP itself. */
3063 mips_zero_extend_cost (enum machine_mode mode, rtx op)
3066 /* Extended loads are as cheap as unextended ones. */
3069 if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
3070 /* We need a shift left by 32 bits and a shift right by 32 bits. */
3071 return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
3073 if (GENERATE_MIPS16E)
3074 /* We can use ZEB or ZEH. */
3075 return COSTS_N_INSNS (1);
3078 /* We need to load 0xff or 0xffff into a register and use AND. */
3079 return COSTS_N_INSNS (GET_MODE (op) == QImode ? 2 : 3);
3081 /* We can use ANDI. */
3082 return COSTS_N_INSNS (1);
3085 /* Implement TARGET_RTX_COSTS. */
3088 mips_rtx_costs (rtx x, int code, int outer_code, int *total)
3090 enum machine_mode mode = GET_MODE (x);
3091 bool float_mode_p = FLOAT_MODE_P (mode);
3095 /* The cost of a COMPARE is hard to define for MIPS. COMPAREs don't
3096 appear in the instruction stream, and the cost of a comparison is
3097 really the cost of the branch or scc condition. At the time of
3098 writing, gcc only uses an explicit outer COMPARE code when optabs
3099 is testing whether a constant is expensive enough to force into a
3100 register. We want optabs to pass such constants through the MIPS
3101 expanders instead, so make all constants very cheap here. */
3102 if (outer_code == COMPARE)
3104 gcc_assert (CONSTANT_P (x));
3112 /* Treat *clear_upper32-style ANDs as having zero cost in the
3113 second operand. The cost is entirely in the first operand.
3115 ??? This is needed because we would otherwise try to CSE
3116 the constant operand. Although that's the right thing for
3117 instructions that continue to be a register operation throughout
3118 compilation, it is disastrous for instructions that could
3119 later be converted into a memory operation. */
3121 && outer_code == AND
3122 && UINTVAL (x) == 0xffffffff)
3130 cost = mips16_constant_cost (outer_code, INTVAL (x));
3139 /* When not optimizing for size, we care more about the cost
3140 of hot code, and hot code is often in a loop. If a constant
3141 operand needs to be forced into a register, we will often be
3142 able to hoist the constant load out of the loop, so the load
3143 should not contribute to the cost. */
3145 || mips_immediate_operand_p (outer_code, INTVAL (x)))
3157 if (force_to_mem_operand (x, VOIDmode))
3159 *total = COSTS_N_INSNS (1);
3162 cost = mips_const_insns (x);
3165 /* If the constant is likely to be stored in a GPR, SETs of
3166 single-insn constants are as cheap as register sets; we
3167 never want to CSE them.
3169 Don't reduce the cost of storing a floating-point zero in
3170 FPRs. If we have a zero in an FPR for other reasons, we
3171 can get better cfg-cleanup and delayed-branch results by
3172 using it consistently, rather than using $0 sometimes and
3173 an FPR at other times. Also, moves between floating-point
3174 registers are sometimes cheaper than (D)MTC1 $0. */
3176 && outer_code == SET
3177 && !(float_mode_p && TARGET_HARD_FLOAT))
3179 /* When non-MIPS16 code loads a constant N>1 times, we rarely
3180 want to CSE the constant itself. It is usually better to
3181 have N copies of the last operation in the sequence and one
3182 shared copy of the other operations. (Note that this is
3183 not true for MIPS16 code, where the final operation in the
3184 sequence is often an extended instruction.)
3186 Also, if we have a CONST_INT, we don't know whether it is
3187 for a word or doubleword operation, so we cannot rely on
3188 the result of mips_build_integer. */
3189 else if (!TARGET_MIPS16
3190 && (outer_code == SET || mode == VOIDmode))
3192 *total = COSTS_N_INSNS (cost);
3195 /* The value will need to be fetched from the constant pool. */
3196 *total = CONSTANT_POOL_COST;
3200 /* If the address is legitimate, return the number of
3201 instructions it needs. */
3203 cost = mips_address_insns (addr, mode, true);
3206 *total = COSTS_N_INSNS (cost + 1);
3209 /* Check for a scaled indexed address. */
3210 if (mips_lwxs_address_p (addr))
3212 *total = COSTS_N_INSNS (2);
3215 /* Otherwise use the default handling. */
3219 *total = COSTS_N_INSNS (6);
3223 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1);
3227 /* Check for a *clear_upper32 pattern and treat it like a zero
3228 extension. See the pattern's comment for details. */
3231 && CONST_INT_P (XEXP (x, 1))
3232 && UINTVAL (XEXP (x, 1)) == 0xffffffff)
3234 *total = (mips_zero_extend_cost (mode, XEXP (x, 0))
3235 + rtx_cost (XEXP (x, 0), 0));
3242 /* Double-word operations use two single-word operations. */
3243 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (2));
3251 if (CONSTANT_P (XEXP (x, 1)))
3252 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4));
3254 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (12));
3259 *total = mips_cost->fp_add;
3261 *total = COSTS_N_INSNS (4);
3265 /* Low-part immediates need an extended MIPS16 instruction. */
3266 *total = (COSTS_N_INSNS (TARGET_MIPS16 ? 2 : 1)
3267 + rtx_cost (XEXP (x, 0), 0));
3282 /* Branch comparisons have VOIDmode, so use the first operand's
3284 mode = GET_MODE (XEXP (x, 0));
3285 if (FLOAT_MODE_P (mode))
3287 *total = mips_cost->fp_add;
3290 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4));
3295 && ISA_HAS_NMADD_NMSUB
3296 && TARGET_FUSED_MADD
3297 && !HONOR_NANS (mode)
3298 && !HONOR_SIGNED_ZEROS (mode))
3300 /* See if we can use NMADD or NMSUB. See mips.md for the
3301 associated patterns. */
3302 rtx op0 = XEXP (x, 0);
3303 rtx op1 = XEXP (x, 1);
3304 if (GET_CODE (op0) == MULT && GET_CODE (XEXP (op0, 0)) == NEG)
3306 *total = (mips_fp_mult_cost (mode)
3307 + rtx_cost (XEXP (XEXP (op0, 0), 0), 0)
3308 + rtx_cost (XEXP (op0, 1), 0)
3309 + rtx_cost (op1, 0));
3312 if (GET_CODE (op1) == MULT)
3314 *total = (mips_fp_mult_cost (mode)
3316 + rtx_cost (XEXP (op1, 0), 0)
3317 + rtx_cost (XEXP (op1, 1), 0));
3327 && TARGET_FUSED_MADD
3328 && GET_CODE (XEXP (x, 0)) == MULT)
3331 *total = mips_cost->fp_add;
3335 /* Double-word operations require three single-word operations and
3336 an SLTU. The MIPS16 version then needs to move the result of
3337 the SLTU from $24 to a MIPS16 register. */
3338 *total = mips_binary_cost (x, COSTS_N_INSNS (1),
3339 COSTS_N_INSNS (TARGET_MIPS16 ? 5 : 4));
3344 && ISA_HAS_NMADD_NMSUB
3345 && TARGET_FUSED_MADD
3346 && !HONOR_NANS (mode)
3347 && HONOR_SIGNED_ZEROS (mode))
3349 /* See if we can use NMADD or NMSUB. See mips.md for the
3350 associated patterns. */
3351 rtx op = XEXP (x, 0);
3352 if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
3353 && GET_CODE (XEXP (op, 0)) == MULT)
3355 *total = (mips_fp_mult_cost (mode)
3356 + rtx_cost (XEXP (XEXP (op, 0), 0), 0)
3357 + rtx_cost (XEXP (XEXP (op, 0), 1), 0)
3358 + rtx_cost (XEXP (op, 1), 0));
3364 *total = mips_cost->fp_add;
3366 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 4 : 1);
3371 *total = mips_fp_mult_cost (mode);
3372 else if (mode == DImode && !TARGET_64BIT)
3373 /* Synthesized from 2 mulsi3s, 1 mulsidi3 and two additions,
3374 where the mulsidi3 always includes an MFHI and an MFLO. */
3375 *total = (optimize_size
3376 ? COSTS_N_INSNS (ISA_HAS_MUL3 ? 7 : 9)
3377 : mips_cost->int_mult_si * 3 + 6);
3378 else if (optimize_size)
3379 *total = (ISA_HAS_MUL3 ? 1 : 2);
3380 else if (mode == DImode)
3381 *total = mips_cost->int_mult_di;
3383 *total = mips_cost->int_mult_si;
3387 /* Check for a reciprocal. */
3388 if (float_mode_p && XEXP (x, 0) == CONST1_RTX (mode))
3391 && flag_unsafe_math_optimizations
3392 && (outer_code == SQRT || GET_CODE (XEXP (x, 1)) == SQRT))
3394 /* An rsqrt<mode>a or rsqrt<mode>b pattern. Count the
3395 division as being free. */
3396 *total = rtx_cost (XEXP (x, 1), 0);
3401 *total = mips_fp_div_cost (mode) + rtx_cost (XEXP (x, 1), 0);
3411 *total = mips_fp_div_cost (mode);
3420 /* It is our responsibility to make division by a power of 2
3421 as cheap as 2 register additions if we want the division
3422 expanders to be used for such operations; see the setting
3423 of sdiv_pow2_cheap in optabs.c. Using (D)DIV for MIPS16
3424 should always produce shorter code than using
3425 expand_sdiv2_pow2. */
3427 && CONST_INT_P (XEXP (x, 1))
3428 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
3430 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), 0);
3433 *total = COSTS_N_INSNS (mips_idiv_insns ());
3435 else if (mode == DImode)
3436 *total = mips_cost->int_div_di;
3438 *total = mips_cost->int_div_si;
3442 *total = mips_sign_extend_cost (mode, XEXP (x, 0));
3446 *total = mips_zero_extend_cost (mode, XEXP (x, 0));
3450 case UNSIGNED_FLOAT:
3453 case FLOAT_TRUNCATE:
3454 *total = mips_cost->fp_add;
3462 /* Provide the costs of an addressing mode that contains ADDR.
3463 If ADDR is not a valid address, its cost is irrelevant. */
3466 mips_address_cost (rtx addr)
3468 return mips_address_insns (addr, SImode, false);
3471 /* Return one word of double-word value OP, taking into account the fixed
3472 endianness of certain registers. HIGH_P is true to select the high part,
3473 false to select the low part. */
3476 mips_subword (rtx op, int high_p)
3479 enum machine_mode mode;
3481 mode = GET_MODE (op);
3482 if (mode == VOIDmode)
3485 if (TARGET_BIG_ENDIAN ? !high_p : high_p)
3486 byte = UNITS_PER_WORD;
3490 if (FP_REG_RTX_P (op))
3491 return gen_rtx_REG (word_mode, high_p ? REGNO (op) + 1 : REGNO (op));
3494 return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
3496 return simplify_gen_subreg (word_mode, op, mode, byte);
3500 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
3503 mips_split_64bit_move_p (rtx dest, rtx src)
3508 /* FP->FP moves can be done in a single instruction. */
3509 if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
3512 /* Check for floating-point loads and stores. They can be done using
3513 ldc1 and sdc1 on MIPS II and above. */
3516 if (FP_REG_RTX_P (dest) && MEM_P (src))
3518 if (FP_REG_RTX_P (src) && MEM_P (dest))
3525 /* Split a 64-bit move from SRC to DEST assuming that
3526 mips_split_64bit_move_p holds.
3528 Moves into and out of FPRs cause some difficulty here. Such moves
3529 will always be DFmode, since paired FPRs are not allowed to store
3530 DImode values. The most natural representation would be two separate
3531 32-bit moves, such as:
3533 (set (reg:SI $f0) (mem:SI ...))
3534 (set (reg:SI $f1) (mem:SI ...))
3536 However, the second insn is invalid because odd-numbered FPRs are
3537 not allowed to store independent values. Use the patterns load_df_low,
3538 load_df_high and store_df_high instead. */
3541 mips_split_64bit_move (rtx dest, rtx src)
3543 if (FP_REG_RTX_P (dest))
3545 /* Loading an FPR from memory or from GPRs. */
3548 dest = gen_lowpart (DFmode, dest);
3549 emit_insn (gen_load_df_low (dest, mips_subword (src, 0)));
3550 emit_insn (gen_mthc1 (dest, mips_subword (src, 1),
3555 emit_insn (gen_load_df_low (copy_rtx (dest),
3556 mips_subword (src, 0)));
3557 emit_insn (gen_load_df_high (dest, mips_subword (src, 1),
3561 else if (FP_REG_RTX_P (src))
3563 /* Storing an FPR into memory or GPRs. */
3566 src = gen_lowpart (DFmode, src);
3567 mips_emit_move (mips_subword (dest, 0), mips_subword (src, 0));
3568 emit_insn (gen_mfhc1 (mips_subword (dest, 1), src));
3572 mips_emit_move (mips_subword (dest, 0), mips_subword (src, 0));
3573 emit_insn (gen_store_df_high (mips_subword (dest, 1), src));
3578 /* The operation can be split into two normal moves. Decide in
3579 which order to do them. */
3582 low_dest = mips_subword (dest, 0);
3583 if (REG_P (low_dest)
3584 && reg_overlap_mentioned_p (low_dest, src))
3586 mips_emit_move (mips_subword (dest, 1), mips_subword (src, 1));
3587 mips_emit_move (low_dest, mips_subword (src, 0));
3591 mips_emit_move (low_dest, mips_subword (src, 0));
3592 mips_emit_move (mips_subword (dest, 1), mips_subword (src, 1));
3597 /* Return the appropriate instructions to move SRC into DEST. Assume
3598 that SRC is operand 1 and DEST is operand 0. */
3601 mips_output_move (rtx dest, rtx src)
3603 enum rtx_code dest_code, src_code;
3604 enum mips_symbol_type symbol_type;
3607 dest_code = GET_CODE (dest);
3608 src_code = GET_CODE (src);
3609 dbl_p = (GET_MODE_SIZE (GET_MODE (dest)) == 8);
3611 if (dbl_p && mips_split_64bit_move_p (dest, src))
3614 if ((src_code == REG && GP_REG_P (REGNO (src)))
3615 || (!TARGET_MIPS16 && src == CONST0_RTX (GET_MODE (dest))))
3617 if (dest_code == REG)
3619 if (GP_REG_P (REGNO (dest)))
3620 return "move\t%0,%z1";
3622 if (MD_REG_P (REGNO (dest)))
3625 if (DSP_ACC_REG_P (REGNO (dest)))
3627 static char retval[] = "mt__\t%z1,%q0";
3628 retval[2] = reg_names[REGNO (dest)][4];
3629 retval[3] = reg_names[REGNO (dest)][5];
3633 if (FP_REG_P (REGNO (dest)))
3634 return (dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0");
3636 if (ALL_COP_REG_P (REGNO (dest)))
3638 static char retval[] = "dmtc_\t%z1,%0";
3640 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3641 return (dbl_p ? retval : retval + 1);
3644 if (dest_code == MEM)
3645 return (dbl_p ? "sd\t%z1,%0" : "sw\t%z1,%0");
3647 if (dest_code == REG && GP_REG_P (REGNO (dest)))
3649 if (src_code == REG)
3651 if (DSP_ACC_REG_P (REGNO (src)))
3653 static char retval[] = "mf__\t%0,%q1";
3654 retval[2] = reg_names[REGNO (src)][4];
3655 retval[3] = reg_names[REGNO (src)][5];
3659 if (ST_REG_P (REGNO (src)) && ISA_HAS_8CC)
3660 return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
3662 if (FP_REG_P (REGNO (src)))
3663 return (dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1");
3665 if (ALL_COP_REG_P (REGNO (src)))
3667 static char retval[] = "dmfc_\t%0,%1";
3669 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3670 return (dbl_p ? retval : retval + 1);
3674 if (src_code == MEM)
3675 return (dbl_p ? "ld\t%0,%1" : "lw\t%0,%1");
3677 if (src_code == CONST_INT)
3679 /* Don't use the X format, because that will give out of
3680 range numbers for 64-bit hosts and 32-bit targets. */
3682 return "li\t%0,%1\t\t\t# %X1";
3684 if (INTVAL (src) >= 0 && INTVAL (src) <= 0xffff)
3687 if (INTVAL (src) < 0 && INTVAL (src) >= -0xffff)
3691 if (src_code == HIGH)
3692 return TARGET_MIPS16 ? "#" : "lui\t%0,%h1";
3694 if (CONST_GP_P (src))
3695 return "move\t%0,%1";
3697 if (mips_symbolic_constant_p (src, SYMBOL_CONTEXT_LEA, &symbol_type)
3698 && mips_lo_relocs[symbol_type] != 0)
3700 /* A signed 16-bit constant formed by applying a relocation
3701 operator to a symbolic address. */
3702 gcc_assert (!mips_split_p[symbol_type]);
3703 return "li\t%0,%R1";
3706 if (symbolic_operand (src, VOIDmode))
3708 gcc_assert (TARGET_MIPS16
3709 ? TARGET_MIPS16_TEXT_LOADS
3710 : !TARGET_EXPLICIT_RELOCS);
3711 return (dbl_p ? "dla\t%0,%1" : "la\t%0,%1");
3714 if (src_code == REG && FP_REG_P (REGNO (src)))
3716 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3718 if (GET_MODE (dest) == V2SFmode)
3719 return "mov.ps\t%0,%1";
3721 return (dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1");
3724 if (dest_code == MEM)
3725 return (dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0");
3727 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3729 if (src_code == MEM)
3730 return (dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1");
3732 if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
3734 static char retval[] = "l_c_\t%0,%1";
3736 retval[1] = (dbl_p ? 'd' : 'w');
3737 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3740 if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
3742 static char retval[] = "s_c_\t%1,%0";
3744 retval[1] = (dbl_p ? 'd' : 'w');
3745 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3751 /* Restore $gp from its save slot. Valid only when using o32 or
3755 mips_restore_gp (void)
3759 gcc_assert (TARGET_ABICALLS && TARGET_OLDABI);
3761 address = mips_add_offset (pic_offset_table_rtx,
3762 frame_pointer_needed
3763 ? hard_frame_pointer_rtx
3764 : stack_pointer_rtx,
3765 current_function_outgoing_args_size);
3766 slot = gen_rtx_MEM (Pmode, address);
3768 mips_emit_move (pic_offset_table_rtx, slot);
3769 if (!TARGET_EXPLICIT_RELOCS)
3770 emit_insn (gen_blockage ());
3773 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
3776 mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
3778 emit_insn (gen_rtx_SET (VOIDmode, target,
3779 gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
3782 /* Return true if CMP1 is a suitable second operand for relational
3783 operator CODE. See also the *sCC patterns in mips.md. */
3786 mips_relational_operand_ok_p (enum rtx_code code, rtx cmp1)
3792 return reg_or_0_operand (cmp1, VOIDmode);
3796 return !TARGET_MIPS16 && cmp1 == const1_rtx;
3800 return arith_operand (cmp1, VOIDmode);
3803 return sle_operand (cmp1, VOIDmode);
3806 return sleu_operand (cmp1, VOIDmode);
3813 /* Canonicalize LE or LEU comparisons into LT comparisons when
3814 possible to avoid extra instructions or inverting the
3818 mips_canonicalize_comparison (enum rtx_code *code, rtx *cmp1,
3819 enum machine_mode mode)
3821 HOST_WIDE_INT original, plus_one;
3823 if (GET_CODE (*cmp1) != CONST_INT)
3826 original = INTVAL (*cmp1);
3827 plus_one = trunc_int_for_mode ((unsigned HOST_WIDE_INT) original + 1, mode);
3832 if (original < plus_one)
3835 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3844 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3857 /* Compare CMP0 and CMP1 using relational operator CODE and store the
3858 result in TARGET. CMP0 and TARGET are register_operands that have
3859 the same integer mode. If INVERT_PTR is nonnull, it's OK to set
3860 TARGET to the inverse of the result and flip *INVERT_PTR instead. */
3863 mips_emit_int_relational (enum rtx_code code, bool *invert_ptr,
3864 rtx target, rtx cmp0, rtx cmp1)
3866 /* First see if there is a MIPS instruction that can do this operation
3867 with CMP1 in its current form. If not, try to canonicalize the
3868 comparison to LT. If that fails, try doing the same for the
3869 inverse operation. If that also fails, force CMP1 into a register
3871 if (mips_relational_operand_ok_p (code, cmp1))
3872 mips_emit_binary (code, target, cmp0, cmp1);
3873 else if (mips_canonicalize_comparison (&code, &cmp1, GET_MODE (target)))
3874 mips_emit_binary (code, target, cmp0, cmp1);
3877 enum rtx_code inv_code = reverse_condition (code);
3878 if (!mips_relational_operand_ok_p (inv_code, cmp1))
3880 cmp1 = force_reg (GET_MODE (cmp0), cmp1);
3881 mips_emit_int_relational (code, invert_ptr, target, cmp0, cmp1);
3883 else if (invert_ptr == 0)
3885 rtx inv_target = gen_reg_rtx (GET_MODE (target));
3886 mips_emit_binary (inv_code, inv_target, cmp0, cmp1);
3887 mips_emit_binary (XOR, target, inv_target, const1_rtx);
3891 *invert_ptr = !*invert_ptr;
3892 mips_emit_binary (inv_code, target, cmp0, cmp1);
3897 /* Return a register that is zero iff CMP0 and CMP1 are equal.
3898 The register will have the same mode as CMP0. */
3901 mips_zero_if_equal (rtx cmp0, rtx cmp1)
3903 if (cmp1 == const0_rtx)
3906 if (uns_arith_operand (cmp1, VOIDmode))
3907 return expand_binop (GET_MODE (cmp0), xor_optab,
3908 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3910 return expand_binop (GET_MODE (cmp0), sub_optab,
3911 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3914 /* Convert *CODE into a code that can be used in a floating-point
3915 scc instruction (c.<cond>.<fmt>). Return true if the values of
3916 the condition code registers will be inverted, with 0 indicating
3917 that the condition holds. */
3920 mips_reverse_fp_cond_p (enum rtx_code *code)
3927 *code = reverse_condition_maybe_unordered (*code);
3935 /* Convert a comparison into something that can be used in a branch or
3936 conditional move. cmp_operands[0] and cmp_operands[1] are the values
3937 being compared and *CODE is the code used to compare them.
3939 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
3940 If NEED_EQ_NE_P, then only EQ/NE comparisons against zero are possible,
3941 otherwise any standard branch condition can be used. The standard branch
3944 - EQ/NE between two registers.
3945 - any comparison between a register and zero. */
3948 mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
3950 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) == MODE_INT)
3952 if (!need_eq_ne_p && cmp_operands[1] == const0_rtx)
3954 *op0 = cmp_operands[0];
3955 *op1 = cmp_operands[1];
3957 else if (*code == EQ || *code == NE)
3961 *op0 = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3966 *op0 = cmp_operands[0];
3967 *op1 = force_reg (GET_MODE (*op0), cmp_operands[1]);
3972 /* The comparison needs a separate scc instruction. Store the
3973 result of the scc in *OP0 and compare it against zero. */
3974 bool invert = false;
3975 *op0 = gen_reg_rtx (GET_MODE (cmp_operands[0]));
3977 mips_emit_int_relational (*code, &invert, *op0,
3978 cmp_operands[0], cmp_operands[1]);
3979 *code = (invert ? EQ : NE);
3982 else if (ALL_FIXED_POINT_MODE_P (GET_MODE (cmp_operands[0])))
3984 *op0 = gen_rtx_REG (CCDSPmode, CCDSP_CC_REGNUM);
3985 mips_emit_binary (*code, *op0, cmp_operands[0], cmp_operands[1]);
3991 enum rtx_code cmp_code;
3993 /* Floating-point tests use a separate c.cond.fmt comparison to
3994 set a condition code register. The branch or conditional move
3995 will then compare that register against zero.
3997 Set CMP_CODE to the code of the comparison instruction and
3998 *CODE to the code that the branch or move should use. */
4000 *code = mips_reverse_fp_cond_p (&cmp_code) ? EQ : NE;
4002 ? gen_reg_rtx (CCmode)
4003 : gen_rtx_REG (CCmode, FPSW_REGNUM));
4005 mips_emit_binary (cmp_code, *op0, cmp_operands[0], cmp_operands[1]);
4009 /* Try comparing cmp_operands[0] and cmp_operands[1] using rtl code CODE.
4010 Store the result in TARGET and return true if successful.
4012 On 64-bit targets, TARGET may be wider than cmp_operands[0]. */
4015 mips_emit_scc (enum rtx_code code, rtx target)
4017 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) != MODE_INT)
4020 target = gen_lowpart (GET_MODE (cmp_operands[0]), target);
4021 if (code == EQ || code == NE)
4023 rtx zie = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
4024 mips_emit_binary (code, target, zie, const0_rtx);
4027 mips_emit_int_relational (code, 0, target,
4028 cmp_operands[0], cmp_operands[1]);
4032 /* Emit the common code for doing conditional branches.
4033 operand[0] is the label to jump to.
4034 The comparison operands are saved away by cmp{si,di,sf,df}. */
4037 gen_conditional_branch (rtx *operands, enum rtx_code code)
4039 rtx op0, op1, condition;
4041 mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
4042 condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
4043 emit_jump_insn (gen_condjump (condition, operands[0]));
4048 (set temp (COND:CCV2 CMP_OP0 CMP_OP1))
4049 (set DEST (unspec [TRUE_SRC FALSE_SRC temp] UNSPEC_MOVE_TF_PS)) */
4052 mips_expand_vcondv2sf (rtx dest, rtx true_src, rtx false_src,
4053 enum rtx_code cond, rtx cmp_op0, rtx cmp_op1)
4058 reversed_p = mips_reverse_fp_cond_p (&cond);
4059 cmp_result = gen_reg_rtx (CCV2mode);
4060 emit_insn (gen_scc_ps (cmp_result,
4061 gen_rtx_fmt_ee (cond, VOIDmode, cmp_op0, cmp_op1)));
4063 emit_insn (gen_mips_cond_move_tf_ps (dest, false_src, true_src,
4066 emit_insn (gen_mips_cond_move_tf_ps (dest, true_src, false_src,
4070 /* Emit the common code for conditional moves. OPERANDS is the array
4071 of operands passed to the conditional move define_expand. */
4074 gen_conditional_move (rtx *operands)
4079 code = GET_CODE (operands[1]);
4080 mips_emit_compare (&code, &op0, &op1, true);
4081 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
4082 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
4083 gen_rtx_fmt_ee (code,
4086 operands[2], operands[3])));
4089 /* Emit a conditional trap. OPERANDS is the array of operands passed to
4090 the conditional_trap expander. */
4093 mips_gen_conditional_trap (rtx *operands)
4096 enum rtx_code cmp_code = GET_CODE (operands[0]);
4097 enum machine_mode mode = GET_MODE (cmp_operands[0]);
4099 /* MIPS conditional trap machine instructions don't have GT or LE
4100 flavors, so we must invert the comparison and convert to LT and
4101 GE, respectively. */
4104 case GT: cmp_code = LT; break;
4105 case LE: cmp_code = GE; break;
4106 case GTU: cmp_code = LTU; break;
4107 case LEU: cmp_code = GEU; break;
4110 if (cmp_code == GET_CODE (operands[0]))
4112 op0 = cmp_operands[0];
4113 op1 = cmp_operands[1];
4117 op0 = cmp_operands[1];
4118 op1 = cmp_operands[0];
4120 op0 = force_reg (mode, op0);
4121 if (!arith_operand (op1, mode))
4122 op1 = force_reg (mode, op1);
4124 emit_insn (gen_rtx_TRAP_IF (VOIDmode,
4125 gen_rtx_fmt_ee (cmp_code, mode, op0, op1),
4129 /* Return true if function DECL is a MIPS16 function. Return the ambient
4130 setting if DECL is null. */
4133 mips_use_mips16_mode_p (tree decl)
4137 /* Nested functions must use the same frame pointer as their
4138 parent and must therefore use the same ISA mode. */
4139 tree parent = decl_function_context (decl);
4142 if (mips_mips16_decl_p (decl))
4144 if (mips_nomips16_decl_p (decl))
4147 return mips_base_mips16;
4150 /* Return true if calls to X can use R_MIPS_CALL* relocations. */
4153 mips_ok_for_lazy_binding_p (rtx x)
4155 return (TARGET_USE_GOT
4156 && GET_CODE (x) == SYMBOL_REF
4157 && !mips_symbol_binds_local_p (x));
4160 /* Load function address ADDR into register DEST. SIBCALL_P is true
4161 if the address is needed for a sibling call. */
4164 mips_load_call_address (rtx dest, rtx addr, int sibcall_p)
4166 /* If we're generating PIC, and this call is to a global function,
4167 try to allow its address to be resolved lazily. This isn't
4168 possible if TARGET_CALL_SAVED_GP since the value of $gp on entry
4169 to the stub would be our caller's gp, not ours. */
4170 if (TARGET_EXPLICIT_RELOCS
4171 && !(sibcall_p && TARGET_CALL_SAVED_GP)
4172 && mips_ok_for_lazy_binding_p (addr))
4174 rtx high, lo_sum_symbol;
4176 high = mips_unspec_offset_high (dest, pic_offset_table_rtx,
4177 addr, SYMBOL_GOTOFF_CALL);
4178 lo_sum_symbol = mips_unspec_address (addr, SYMBOL_GOTOFF_CALL);
4179 if (Pmode == SImode)
4180 emit_insn (gen_load_callsi (dest, high, lo_sum_symbol));
4182 emit_insn (gen_load_calldi (dest, high, lo_sum_symbol));
4185 mips_emit_move (dest, addr);
4189 /* Expand a call or call_value instruction. RESULT is where the
4190 result will go (null for calls), ADDR is the address of the
4191 function, ARGS_SIZE is the size of the arguments and AUX is
4192 the value passed to us by mips_function_arg. SIBCALL_P is true
4193 if we are expanding a sibling call, false if we're expanding
4197 mips_expand_call (rtx result, rtx addr, rtx args_size, rtx aux, int sibcall_p)
4199 rtx orig_addr, pattern, insn;
4202 if (!call_insn_operand (addr, VOIDmode))
4204 addr = gen_reg_rtx (Pmode);
4205 mips_load_call_address (addr, orig_addr, sibcall_p);
4209 && TARGET_HARD_FLOAT_ABI
4210 && build_mips16_call_stub (result, addr, args_size,
4211 aux == 0 ? 0 : (int) GET_MODE (aux)))
4215 pattern = (sibcall_p
4216 ? gen_sibcall_internal (addr, args_size)
4217 : gen_call_internal (addr, args_size));
4218 else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
4222 reg1 = XEXP (XVECEXP (result, 0, 0), 0);
4223 reg2 = XEXP (XVECEXP (result, 0, 1), 0);
4226 ? gen_sibcall_value_multiple_internal (reg1, addr, args_size, reg2)
4227 : gen_call_value_multiple_internal (reg1, addr, args_size, reg2));
4230 pattern = (sibcall_p
4231 ? gen_sibcall_value_internal (result, addr, args_size)
4232 : gen_call_value_internal (result, addr, args_size));
4234 insn = emit_call_insn (pattern);
4236 /* Lazy-binding stubs require $gp to be valid on entry. */
4237 if (mips_ok_for_lazy_binding_p (orig_addr))
4238 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
4242 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */
4245 mips_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
4247 if (!TARGET_SIBCALLS)
4250 /* We can't do a sibcall if the called function is a MIPS16 function
4251 because there is no direct "jx" instruction equivalent to "jalx" to
4252 switch the ISA mode. */
4253 if (mips_use_mips16_mode_p (decl))
4256 /* ...and when -minterlink-mips16 is in effect, assume that external
4257 functions could be MIPS16 ones unless an attribute explicitly
4258 tells us otherwise. We only care about cases where the sibling
4259 and normal calls would both be direct. */
4260 if (TARGET_INTERLINK_MIPS16
4262 && DECL_EXTERNAL (decl)
4263 && !mips_nomips16_decl_p (decl)
4264 && const_call_insn_operand (XEXP (DECL_RTL (decl), 0), VOIDmode))
4271 /* Emit code to move general operand SRC into condition-code
4272 register DEST. SCRATCH is a scratch TFmode float register.
4279 where FP1 and FP2 are single-precision float registers
4280 taken from SCRATCH. */
4283 mips_emit_fcc_reload (rtx dest, rtx src, rtx scratch)
4287 /* Change the source to SFmode. */
4289 src = adjust_address (src, SFmode, 0);
4290 else if (REG_P (src) || GET_CODE (src) == SUBREG)
4291 src = gen_rtx_REG (SFmode, true_regnum (src));
4293 fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
4294 fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + MAX_FPRS_PER_FMT);
4296 mips_emit_move (copy_rtx (fp1), src);
4297 mips_emit_move (copy_rtx (fp2), CONST0_RTX (SFmode));
4298 emit_insn (gen_slt_sf (dest, fp2, fp1));
4301 /* Emit code to change the current function's return address to
4302 ADDRESS. SCRATCH is available as a scratch register, if needed.
4303 ADDRESS and SCRATCH are both word-mode GPRs. */
4306 mips_set_return_address (rtx address, rtx scratch)
4310 compute_frame_size (get_frame_size ());
4311 gcc_assert ((cfun->machine->frame.mask >> 31) & 1);
4312 slot_address = mips_add_offset (scratch, stack_pointer_rtx,
4313 cfun->machine->frame.gp_sp_offset);
4315 mips_emit_move (gen_rtx_MEM (GET_MODE (address), slot_address), address);
4318 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
4319 Assume that the areas do not overlap. */
4322 mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
4324 HOST_WIDE_INT offset, delta;
4325 unsigned HOST_WIDE_INT bits;
4327 enum machine_mode mode;
4330 /* Work out how many bits to move at a time. If both operands have
4331 half-word alignment, it is usually better to move in half words.
4332 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
4333 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
4334 Otherwise move word-sized chunks. */
4335 if (MEM_ALIGN (src) == BITS_PER_WORD / 2
4336 && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
4337 bits = BITS_PER_WORD / 2;
4339 bits = BITS_PER_WORD;
4341 mode = mode_for_size (bits, MODE_INT, 0);
4342 delta = bits / BITS_PER_UNIT;
4344 /* Allocate a buffer for the temporary registers. */
4345 regs = alloca (sizeof (rtx) * length / delta);
4347 /* Load as many BITS-sized chunks as possible. Use a normal load if
4348 the source has enough alignment, otherwise use left/right pairs. */
4349 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
4351 regs[i] = gen_reg_rtx (mode);
4352 if (MEM_ALIGN (src) >= bits)
4353 mips_emit_move (regs[i], adjust_address (src, mode, offset));
4356 rtx part = adjust_address (src, BLKmode, offset);
4357 if (!mips_expand_unaligned_load (regs[i], part, bits, 0))
4362 /* Copy the chunks to the destination. */
4363 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
4364 if (MEM_ALIGN (dest) >= bits)
4365 mips_emit_move (adjust_address (dest, mode, offset), regs[i]);
4368 rtx part = adjust_address (dest, BLKmode, offset);
4369 if (!mips_expand_unaligned_store (part, regs[i], bits, 0))
4373 /* Mop up any left-over bytes. */
4374 if (offset < length)
4376 src = adjust_address (src, BLKmode, offset);
4377 dest = adjust_address (dest, BLKmode, offset);
4378 move_by_pieces (dest, src, length - offset,
4379 MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
4383 #define MAX_MOVE_REGS 4
4384 #define MAX_MOVE_BYTES (MAX_MOVE_REGS * UNITS_PER_WORD)
4387 /* Helper function for doing a loop-based block operation on memory
4388 reference MEM. Each iteration of the loop will operate on LENGTH
4391 Create a new base register for use within the loop and point it to
4392 the start of MEM. Create a new memory reference that uses this
4393 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
4396 mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
4397 rtx *loop_reg, rtx *loop_mem)
4399 *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
4401 /* Although the new mem does not refer to a known location,
4402 it does keep up to LENGTH bytes of alignment. */
4403 *loop_mem = change_address (mem, BLKmode, *loop_reg);
4404 set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
4408 /* Move LENGTH bytes from SRC to DEST using a loop that moves MAX_MOVE_BYTES
4409 per iteration. LENGTH must be at least MAX_MOVE_BYTES. Assume that the
4410 memory regions do not overlap. */
4413 mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length)
4415 rtx label, src_reg, dest_reg, final_src;
4416 HOST_WIDE_INT leftover;
4418 leftover = length % MAX_MOVE_BYTES;
4421 /* Create registers and memory references for use within the loop. */
4422 mips_adjust_block_mem (src, MAX_MOVE_BYTES, &src_reg, &src);
4423 mips_adjust_block_mem (dest, MAX_MOVE_BYTES, &dest_reg, &dest);
4425 /* Calculate the value that SRC_REG should have after the last iteration
4427 final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
4430 /* Emit the start of the loop. */
4431 label = gen_label_rtx ();
4434 /* Emit the loop body. */
4435 mips_block_move_straight (dest, src, MAX_MOVE_BYTES);
4437 /* Move on to the next block. */
4438 mips_emit_move (src_reg, plus_constant (src_reg, MAX_MOVE_BYTES));
4439 mips_emit_move (dest_reg, plus_constant (dest_reg, MAX_MOVE_BYTES));
4441 /* Emit the loop condition. */
4442 if (Pmode == DImode)
4443 emit_insn (gen_cmpdi (src_reg, final_src));
4445 emit_insn (gen_cmpsi (src_reg, final_src));
4446 emit_jump_insn (gen_bne (label));
4448 /* Mop up any left-over bytes. */
4450 mips_block_move_straight (dest, src, leftover);
4454 /* Expand a loop of synci insns for the address range [BEGIN, END). */
4457 mips_expand_synci_loop (rtx begin, rtx end)
4459 rtx inc, label, cmp, cmp_result;
4461 /* Load INC with the cache line size (rdhwr INC,$1). */
4462 inc = gen_reg_rtx (SImode);
4463 emit_insn (gen_rdhwr (inc, const1_rtx));
4465 /* Loop back to here. */
4466 label = gen_label_rtx ();
4469 emit_insn (gen_synci (begin));
4471 cmp = gen_reg_rtx (Pmode);
4472 mips_emit_binary (GTU, cmp, begin, end);
4474 mips_emit_binary (PLUS, begin, begin, inc);
4476 cmp_result = gen_rtx_EQ (VOIDmode, cmp, const0_rtx);
4477 emit_jump_insn (gen_condjump (cmp_result, label));
4480 /* Expand a movmemsi instruction. */
4483 mips_expand_block_move (rtx dest, rtx src, rtx length)
4485 if (GET_CODE (length) == CONST_INT)
4487 if (INTVAL (length) <= 2 * MAX_MOVE_BYTES)
4489 mips_block_move_straight (dest, src, INTVAL (length));
4494 mips_block_move_loop (dest, src, INTVAL (length));
4501 /* Argument support functions. */
4503 /* Initialize CUMULATIVE_ARGS for a function. */
4506 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
4507 rtx libname ATTRIBUTE_UNUSED)
4509 static CUMULATIVE_ARGS zero_cum;
4510 tree param, next_param;
4513 cum->prototype = (fntype && TYPE_ARG_TYPES (fntype));
4515 /* Determine if this function has variable arguments. This is
4516 indicated by the last argument being 'void_type_mode' if there
4517 are no variable arguments. The standard MIPS calling sequence
4518 passes all arguments in the general purpose registers in this case. */
4520 for (param = fntype ? TYPE_ARG_TYPES (fntype) : 0;
4521 param != 0; param = next_param)
4523 next_param = TREE_CHAIN (param);
4524 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
4525 cum->gp_reg_found = 1;
4530 /* Fill INFO with information about a single argument. CUM is the
4531 cumulative state for earlier arguments. MODE is the mode of this
4532 argument and TYPE is its type (if known). NAMED is true if this
4533 is a named (fixed) argument rather than a variable one. */
4536 mips_arg_info (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
4537 tree type, int named, struct mips_arg_info *info)
4539 bool doubleword_aligned_p;
4540 unsigned int num_bytes, num_words, max_regs;
4542 /* Work out the size of the argument. */
4543 num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
4544 num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4546 /* Decide whether it should go in a floating-point register, assuming
4547 one is free. Later code checks for availability.
4549 The checks against UNITS_PER_FPVALUE handle the soft-float and
4550 single-float cases. */
4554 /* The EABI conventions have traditionally been defined in terms
4555 of TYPE_MODE, regardless of the actual type. */
4556 info->fpr_p = ((GET_MODE_CLASS (mode) == MODE_FLOAT
4557 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4558 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4563 /* Only leading floating-point scalars are passed in
4564 floating-point registers. We also handle vector floats the same
4565 say, which is OK because they are not covered by the standard ABI. */
4566 info->fpr_p = (!cum->gp_reg_found
4567 && cum->arg_number < 2
4568 && (type == 0 || SCALAR_FLOAT_TYPE_P (type)
4569 || VECTOR_FLOAT_TYPE_P (type))
4570 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4571 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4572 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4577 /* Scalar and complex floating-point types are passed in
4578 floating-point registers. */
4579 info->fpr_p = (named
4580 && (type == 0 || FLOAT_TYPE_P (type))
4581 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4582 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4583 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4584 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
4586 /* ??? According to the ABI documentation, the real and imaginary
4587 parts of complex floats should be passed in individual registers.
4588 The real and imaginary parts of stack arguments are supposed
4589 to be contiguous and there should be an extra word of padding
4592 This has two problems. First, it makes it impossible to use a
4593 single "void *" va_list type, since register and stack arguments
4594 are passed differently. (At the time of writing, MIPSpro cannot
4595 handle complex float varargs correctly.) Second, it's unclear
4596 what should happen when there is only one register free.
4598 For now, we assume that named complex floats should go into FPRs
4599 if there are two FPRs free, otherwise they should be passed in the
4600 same way as a struct containing two floats. */
4602 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4603 && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
4605 if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
4606 info->fpr_p = false;
4616 /* See whether the argument has doubleword alignment. */
4617 doubleword_aligned_p = FUNCTION_ARG_BOUNDARY (mode, type) > BITS_PER_WORD;
4619 /* Set REG_OFFSET to the register count we're interested in.
4620 The EABI allocates the floating-point registers separately,
4621 but the other ABIs allocate them like integer registers. */
4622 info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
4626 /* Advance to an even register if the argument is doubleword-aligned. */
4627 if (doubleword_aligned_p)
4628 info->reg_offset += info->reg_offset & 1;
4630 /* Work out the offset of a stack argument. */
4631 info->stack_offset = cum->stack_words;
4632 if (doubleword_aligned_p)
4633 info->stack_offset += info->stack_offset & 1;
4635 max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
4637 /* Partition the argument between registers and stack. */
4638 info->reg_words = MIN (num_words, max_regs);
4639 info->stack_words = num_words - info->reg_words;
4643 /* INFO describes an argument that is passed in a single-register value.
4644 Return the register it uses, assuming that FPRs are available if
4648 mips_arg_regno (const struct mips_arg_info *info, bool hard_float_p)
4650 if (!info->fpr_p || !hard_float_p)
4651 return GP_ARG_FIRST + info->reg_offset;
4652 else if (mips_abi == ABI_32 && TARGET_DOUBLE_FLOAT && info->reg_offset > 0)
4653 /* In o32, the second argument is always passed in $f14
4654 for TARGET_DOUBLE_FLOAT, regardless of whether the
4655 first argument was a word or doubleword. */
4656 return FP_ARG_FIRST + 2;
4658 return FP_ARG_FIRST + info->reg_offset;
4661 /* Implement FUNCTION_ARG_ADVANCE. */
4664 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4665 tree type, int named)
4667 struct mips_arg_info info;
4669 mips_arg_info (cum, mode, type, named, &info);
4672 cum->gp_reg_found = true;
4674 /* See the comment above the cumulative args structure in mips.h
4675 for an explanation of what this code does. It assumes the O32
4676 ABI, which passes at most 2 arguments in float registers. */
4677 if (cum->arg_number < 2 && info.fpr_p)
4678 cum->fp_code += (mode == SFmode ? 1 : 2) << (cum->arg_number * 2);
4680 if (mips_abi != ABI_EABI || !info.fpr_p)
4681 cum->num_gprs = info.reg_offset + info.reg_words;
4682 else if (info.reg_words > 0)
4683 cum->num_fprs += MAX_FPRS_PER_FMT;
4685 if (info.stack_words > 0)
4686 cum->stack_words = info.stack_offset + info.stack_words;
4691 /* Implement FUNCTION_ARG. */
4694 function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
4695 tree type, int named)
4697 struct mips_arg_info info;
4699 /* We will be called with a mode of VOIDmode after the last argument
4700 has been seen. Whatever we return will be passed to the call
4701 insn. If we need a mips16 fp_code, return a REG with the code
4702 stored as the mode. */
4703 if (mode == VOIDmode)
4705 if (TARGET_MIPS16 && cum->fp_code != 0)
4706 return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
4712 mips_arg_info (cum, mode, type, named, &info);
4714 /* Return straight away if the whole argument is passed on the stack. */
4715 if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
4719 && TREE_CODE (type) == RECORD_TYPE
4721 && TYPE_SIZE_UNIT (type)
4722 && host_integerp (TYPE_SIZE_UNIT (type), 1)
4725 /* The Irix 6 n32/n64 ABIs say that if any 64-bit chunk of the
4726 structure contains a double in its entirety, then that 64-bit
4727 chunk is passed in a floating point register. */
4730 /* First check to see if there is any such field. */
4731 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4732 if (TREE_CODE (field) == FIELD_DECL
4733 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
4734 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
4735 && host_integerp (bit_position (field), 0)
4736 && int_bit_position (field) % BITS_PER_WORD == 0)
4741 /* Now handle the special case by returning a PARALLEL
4742 indicating where each 64-bit chunk goes. INFO.REG_WORDS
4743 chunks are passed in registers. */
4745 HOST_WIDE_INT bitpos;
4748 /* assign_parms checks the mode of ENTRY_PARM, so we must
4749 use the actual mode here. */
4750 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
4753 field = TYPE_FIELDS (type);
4754 for (i = 0; i < info.reg_words; i++)
4758 for (; field; field = TREE_CHAIN (field))
4759 if (TREE_CODE (field) == FIELD_DECL
4760 && int_bit_position (field) >= bitpos)
4764 && int_bit_position (field) == bitpos
4765 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
4766 && !TARGET_SOFT_FLOAT
4767 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
4768 reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
4770 reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
4773 = gen_rtx_EXPR_LIST (VOIDmode, reg,
4774 GEN_INT (bitpos / BITS_PER_UNIT));
4776 bitpos += BITS_PER_WORD;
4782 /* Handle the n32/n64 conventions for passing complex floating-point
4783 arguments in FPR pairs. The real part goes in the lower register
4784 and the imaginary part goes in the upper register. */
4787 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4790 enum machine_mode inner;
4793 inner = GET_MODE_INNER (mode);
4794 reg = FP_ARG_FIRST + info.reg_offset;
4795 if (info.reg_words * UNITS_PER_WORD == GET_MODE_SIZE (inner))
4797 /* Real part in registers, imaginary part on stack. */
4798 gcc_assert (info.stack_words == info.reg_words);
4799 return gen_rtx_REG (inner, reg);
4803 gcc_assert (info.stack_words == 0);
4804 real = gen_rtx_EXPR_LIST (VOIDmode,
4805 gen_rtx_REG (inner, reg),
4807 imag = gen_rtx_EXPR_LIST (VOIDmode,
4809 reg + info.reg_words / 2),
4810 GEN_INT (GET_MODE_SIZE (inner)));
4811 return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
4815 return gen_rtx_REG (mode, mips_arg_regno (&info, TARGET_HARD_FLOAT));
4819 /* Implement TARGET_ARG_PARTIAL_BYTES. */
4822 mips_arg_partial_bytes (CUMULATIVE_ARGS *cum,
4823 enum machine_mode mode, tree type, bool named)
4825 struct mips_arg_info info;
4827 mips_arg_info (cum, mode, type, named, &info);
4828 return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
4832 /* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
4833 PARM_BOUNDARY bits of alignment, but will be given anything up
4834 to STACK_BOUNDARY bits if the type requires it. */
4837 function_arg_boundary (enum machine_mode mode, tree type)
4839 unsigned int alignment;
4841 alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
4842 if (alignment < PARM_BOUNDARY)
4843 alignment = PARM_BOUNDARY;
4844 if (alignment > STACK_BOUNDARY)
4845 alignment = STACK_BOUNDARY;
4849 /* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
4850 upward rather than downward. In other words, return true if the
4851 first byte of the stack slot has useful data, false if the last
4855 mips_pad_arg_upward (enum machine_mode mode, const_tree type)
4857 /* On little-endian targets, the first byte of every stack argument
4858 is passed in the first byte of the stack slot. */
4859 if (!BYTES_BIG_ENDIAN)
4862 /* Otherwise, integral types are padded downward: the last byte of a
4863 stack argument is passed in the last byte of the stack slot. */
4865 ? (INTEGRAL_TYPE_P (type)
4866 || POINTER_TYPE_P (type)
4867 || FIXED_POINT_TYPE_P (type))
4868 : (GET_MODE_CLASS (mode) == MODE_INT
4869 || ALL_SCALAR_FIXED_POINT_MODE_P (mode)))
4872 /* Big-endian o64 pads floating-point arguments downward. */
4873 if (mips_abi == ABI_O64)
4874 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4877 /* Other types are padded upward for o32, o64, n32 and n64. */
4878 if (mips_abi != ABI_EABI)
4881 /* Arguments smaller than a stack slot are padded downward. */
4882 if (mode != BLKmode)
4883 return (GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY);
4885 return (int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT));
4889 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
4890 if the least significant byte of the register has useful data. Return
4891 the opposite if the most significant byte does. */
4894 mips_pad_reg_upward (enum machine_mode mode, tree type)
4896 /* No shifting is required for floating-point arguments. */
4897 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4898 return !BYTES_BIG_ENDIAN;
4900 /* Otherwise, apply the same padding to register arguments as we do
4901 to stack arguments. */
4902 return mips_pad_arg_upward (mode, type);
4906 mips_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4907 tree type, int *pretend_size ATTRIBUTE_UNUSED,
4910 CUMULATIVE_ARGS local_cum;
4911 int gp_saved, fp_saved;
4913 /* The caller has advanced CUM up to, but not beyond, the last named
4914 argument. Advance a local copy of CUM past the last "real" named
4915 argument, to find out how many registers are left over. */
4918 FUNCTION_ARG_ADVANCE (local_cum, mode, type, 1);
4920 /* Found out how many registers we need to save. */
4921 gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
4922 fp_saved = (EABI_FLOAT_VARARGS_P
4923 ? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
4932 ptr = plus_constant (virtual_incoming_args_rtx,
4933 REG_PARM_STACK_SPACE (cfun->decl)
4934 - gp_saved * UNITS_PER_WORD);
4935 mem = gen_rtx_MEM (BLKmode, ptr);
4936 set_mem_alias_set (mem, get_varargs_alias_set ());
4938 move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
4943 /* We can't use move_block_from_reg, because it will use
4945 enum machine_mode mode;
4948 /* Set OFF to the offset from virtual_incoming_args_rtx of
4949 the first float register. The FP save area lies below
4950 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
4951 off = -gp_saved * UNITS_PER_WORD;
4952 off &= ~(UNITS_PER_FPVALUE - 1);
4953 off -= fp_saved * UNITS_PER_FPREG;
4955 mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
4957 for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS;
4958 i += MAX_FPRS_PER_FMT)
4962 ptr = plus_constant (virtual_incoming_args_rtx, off);
4963 mem = gen_rtx_MEM (mode, ptr);
4964 set_mem_alias_set (mem, get_varargs_alias_set ());
4965 mips_emit_move (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
4966 off += UNITS_PER_HWFPVALUE;
4970 if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
4971 cfun->machine->varargs_size = (gp_saved * UNITS_PER_WORD
4972 + fp_saved * UNITS_PER_FPREG);
4975 /* Create the va_list data type.
4976 We keep 3 pointers, and two offsets.
4977 Two pointers are to the overflow area, which starts at the CFA.
4978 One of these is constant, for addressing into the GPR save area below it.
4979 The other is advanced up the stack through the overflow region.
4980 The third pointer is to the GPR save area. Since the FPR save area
4981 is just below it, we can address FPR slots off this pointer.
4982 We also keep two one-byte offsets, which are to be subtracted from the
4983 constant pointers to yield addresses in the GPR and FPR save areas.
4984 These are downcounted as float or non-float arguments are used,
4985 and when they get to zero, the argument must be obtained from the
4987 If !EABI_FLOAT_VARARGS_P, then no FPR save area exists, and a single
4988 pointer is enough. It's started at the GPR save area, and is
4990 Note that the GPR save area is not constant size, due to optimization
4991 in the prologue. Hence, we can't use a design with two pointers
4992 and two offsets, although we could have designed this with two pointers
4993 and three offsets. */
4996 mips_build_builtin_va_list (void)
4998 if (EABI_FLOAT_VARARGS_P)
5000 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
5003 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5005 f_ovfl = build_decl (FIELD_DECL, get_identifier ("__overflow_argptr"),
5007 f_gtop = build_decl (FIELD_DECL, get_identifier ("__gpr_top"),
5009 f_ftop = build_decl (FIELD_DECL, get_identifier ("__fpr_top"),
5011 f_goff = build_decl (FIELD_DECL, get_identifier ("__gpr_offset"),
5012 unsigned_char_type_node);
5013 f_foff = build_decl (FIELD_DECL, get_identifier ("__fpr_offset"),
5014 unsigned_char_type_node);
5015 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
5016 warn on every user file. */
5017 index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1);
5018 array = build_array_type (unsigned_char_type_node,
5019 build_index_type (index));
5020 f_res = build_decl (FIELD_DECL, get_identifier ("__reserved"), array);
5022 DECL_FIELD_CONTEXT (f_ovfl) = record;
5023 DECL_FIELD_CONTEXT (f_gtop) = record;
5024 DECL_FIELD_CONTEXT (f_ftop) = record;
5025 DECL_FIELD_CONTEXT (f_goff) = record;
5026 DECL_FIELD_CONTEXT (f_foff) = record;
5027 DECL_FIELD_CONTEXT (f_res) = record;
5029 TYPE_FIELDS (record) = f_ovfl;
5030 TREE_CHAIN (f_ovfl) = f_gtop;
5031 TREE_CHAIN (f_gtop) = f_ftop;
5032 TREE_CHAIN (f_ftop) = f_goff;
5033 TREE_CHAIN (f_goff) = f_foff;
5034 TREE_CHAIN (f_foff) = f_res;
5036 layout_type (record);
5039 else if (TARGET_IRIX && TARGET_IRIX6)
5040 /* On IRIX 6, this type is 'char *'. */
5041 return build_pointer_type (char_type_node);
5043 /* Otherwise, we use 'void *'. */
5044 return ptr_type_node;
5047 /* Implement va_start. */
5050 mips_va_start (tree valist, rtx nextarg)
5052 if (EABI_FLOAT_VARARGS_P)
5054 const CUMULATIVE_ARGS *cum;
5055 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
5056 tree ovfl, gtop, ftop, goff, foff;
5058 int gpr_save_area_size;
5059 int fpr_save_area_size;
5062 cum = ¤t_function_args_info;
5064 = (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
5066 = (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
5068 f_ovfl = TYPE_FIELDS (va_list_type_node);
5069 f_gtop = TREE_CHAIN (f_ovfl);
5070 f_ftop = TREE_CHAIN (f_gtop);
5071 f_goff = TREE_CHAIN (f_ftop);
5072 f_foff = TREE_CHAIN (f_goff);
5074 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
5076 gtop = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
5078 ftop = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
5080 goff = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
5082 foff = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
5085 /* Emit code to initialize OVFL, which points to the next varargs
5086 stack argument. CUM->STACK_WORDS gives the number of stack
5087 words used by named arguments. */
5088 t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
5089 if (cum->stack_words > 0)
5090 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), t,
5091 size_int (cum->stack_words * UNITS_PER_WORD));
5092 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
5093 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5095 /* Emit code to initialize GTOP, the top of the GPR save area. */
5096 t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
5097 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (gtop), gtop, t);
5098 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5100 /* Emit code to initialize FTOP, the top of the FPR save area.
5101 This address is gpr_save_area_bytes below GTOP, rounded
5102 down to the next fp-aligned boundary. */
5103 t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
5104 fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
5105 fpr_offset &= ~(UNITS_PER_FPVALUE - 1);
5107 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ftop), t,
5108 size_int (-fpr_offset));
5109 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ftop), ftop, t);
5110 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5112 /* Emit code to initialize GOFF, the offset from GTOP of the
5113 next GPR argument. */
5114 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (goff), goff,
5115 build_int_cst (NULL_TREE, gpr_save_area_size));
5116 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5118 /* Likewise emit code to initialize FOFF, the offset from FTOP
5119 of the next FPR argument. */
5120 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (foff), foff,
5121 build_int_cst (NULL_TREE, fpr_save_area_size));
5122 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5126 nextarg = plus_constant (nextarg, -cfun->machine->varargs_size);
5127 std_expand_builtin_va_start (valist, nextarg);
5131 /* Implement va_arg. */
5134 mips_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
5136 HOST_WIDE_INT size, rsize;
5140 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
5143 type = build_pointer_type (type);
5145 size = int_size_in_bytes (type);
5146 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
5148 if (mips_abi != ABI_EABI || !EABI_FLOAT_VARARGS_P)
5149 addr = std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
5152 /* Not a simple merged stack. */
5154 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
5155 tree ovfl, top, off, align;
5156 HOST_WIDE_INT osize;
5159 f_ovfl = TYPE_FIELDS (va_list_type_node);
5160 f_gtop = TREE_CHAIN (f_ovfl);
5161 f_ftop = TREE_CHAIN (f_gtop);
5162 f_goff = TREE_CHAIN (f_ftop);
5163 f_foff = TREE_CHAIN (f_goff);
5165 /* We maintain separate pointers and offsets for floating-point
5166 and integer arguments, but we need similar code in both cases.
5169 TOP be the top of the register save area;
5170 OFF be the offset from TOP of the next register;
5171 ADDR_RTX be the address of the argument;
5172 RSIZE be the number of bytes used to store the argument
5173 when it's in the register save area;
5174 OSIZE be the number of bytes used to store it when it's
5175 in the stack overflow area; and
5176 PADDING be (BYTES_BIG_ENDIAN ? OSIZE - RSIZE : 0)
5178 The code we want is:
5180 1: off &= -rsize; // round down
5183 4: addr_rtx = top - off;
5188 9: ovfl += ((intptr_t) ovfl + osize - 1) & -osize;
5189 10: addr_rtx = ovfl + PADDING;
5193 [1] and [9] can sometimes be optimized away. */
5195 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
5198 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
5199 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
5201 top = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
5203 off = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
5206 /* When floating-point registers are saved to the stack,
5207 each one will take up UNITS_PER_HWFPVALUE bytes, regardless
5208 of the float's precision. */
5209 rsize = UNITS_PER_HWFPVALUE;
5211 /* Overflow arguments are padded to UNITS_PER_WORD bytes
5212 (= PARM_BOUNDARY bits). This can be different from RSIZE
5215 (1) On 32-bit targets when TYPE is a structure such as:
5217 struct s { float f; };
5219 Such structures are passed in paired FPRs, so RSIZE
5220 will be 8 bytes. However, the structure only takes
5221 up 4 bytes of memory, so OSIZE will only be 4.
5223 (2) In combinations such as -mgp64 -msingle-float
5224 -fshort-double. Doubles passed in registers
5225 will then take up 4 (UNITS_PER_HWFPVALUE) bytes,
5226 but those passed on the stack take up
5227 UNITS_PER_WORD bytes. */
5228 osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
5232 top = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
5234 off = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
5236 if (rsize > UNITS_PER_WORD)
5238 /* [1] Emit code for: off &= -rsize. */
5239 t = build2 (BIT_AND_EXPR, TREE_TYPE (off), off,
5240 build_int_cst (NULL_TREE, -rsize));
5241 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (off), off, t);
5242 gimplify_and_add (t, pre_p);
5247 /* [2] Emit code to branch if off == 0. */
5248 t = build2 (NE_EXPR, boolean_type_node, off,
5249 build_int_cst (TREE_TYPE (off), 0));
5250 addr = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
5252 /* [5] Emit code for: off -= rsize. We do this as a form of
5253 post-increment not available to C. Also widen for the
5254 coming pointer arithmetic. */
5255 t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize));
5256 t = build2 (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
5257 t = fold_convert (sizetype, t);
5258 t = fold_build1 (NEGATE_EXPR, sizetype, t);
5260 /* [4] Emit code for: addr_rtx = top - off. On big endian machines,
5261 the argument has RSIZE - SIZE bytes of leading padding. */
5262 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (top), top, t);
5263 if (BYTES_BIG_ENDIAN && rsize > size)
5265 u = size_int (rsize - size);
5266 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
5268 COND_EXPR_THEN (addr) = t;
5270 if (osize > UNITS_PER_WORD)
5272 /* [9] Emit: ovfl += ((intptr_t) ovfl + osize - 1) & -osize. */
5273 u = size_int (osize - 1);
5274 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), ovfl, u);
5275 t = fold_convert (sizetype, t);
5276 u = size_int (-osize);
5277 t = build2 (BIT_AND_EXPR, sizetype, t, u);
5278 t = fold_convert (TREE_TYPE (ovfl), t);
5279 align = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
5284 /* [10, 11]. Emit code to store ovfl in addr_rtx, then
5285 post-increment ovfl by osize. On big-endian machines,
5286 the argument has OSIZE - SIZE bytes of leading padding. */
5287 u = fold_convert (TREE_TYPE (ovfl),
5288 build_int_cst (NULL_TREE, osize));
5289 t = build2 (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
5290 if (BYTES_BIG_ENDIAN && osize > size)
5292 u = size_int (osize - size);
5293 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
5296 /* String [9] and [10,11] together. */
5298 t = build2 (COMPOUND_EXPR, TREE_TYPE (t), align, t);
5299 COND_EXPR_ELSE (addr) = t;
5301 addr = fold_convert (build_pointer_type (type), addr);
5302 addr = build_va_arg_indirect_ref (addr);
5306 addr = build_va_arg_indirect_ref (addr);
5311 /* Return true if it is possible to use left/right accesses for a
5312 bitfield of WIDTH bits starting BITPOS bits into *OP. When
5313 returning true, update *OP, *LEFT and *RIGHT as follows:
5315 *OP is a BLKmode reference to the whole field.
5317 *LEFT is a QImode reference to the first byte if big endian or
5318 the last byte if little endian. This address can be used in the
5319 left-side instructions (lwl, swl, ldl, sdl).
5321 *RIGHT is a QImode reference to the opposite end of the field and
5322 can be used in the patterning right-side instruction. */
5325 mips_get_unaligned_mem (rtx *op, unsigned int width, int bitpos,
5326 rtx *left, rtx *right)
5330 /* Check that the operand really is a MEM. Not all the extv and
5331 extzv predicates are checked. */
5335 /* Check that the size is valid. */
5336 if (width != 32 && (!TARGET_64BIT || width != 64))
5339 /* We can only access byte-aligned values. Since we are always passed
5340 a reference to the first byte of the field, it is not necessary to
5341 do anything with BITPOS after this check. */
5342 if (bitpos % BITS_PER_UNIT != 0)
5345 /* Reject aligned bitfields: we want to use a normal load or store
5346 instead of a left/right pair. */
5347 if (MEM_ALIGN (*op) >= width)
5350 /* Adjust *OP to refer to the whole field. This also has the effect
5351 of legitimizing *OP's address for BLKmode, possibly simplifying it. */
5352 *op = adjust_address (*op, BLKmode, 0);
5353 set_mem_size (*op, GEN_INT (width / BITS_PER_UNIT));
5355 /* Get references to both ends of the field. We deliberately don't
5356 use the original QImode *OP for FIRST since the new BLKmode one
5357 might have a simpler address. */
5358 first = adjust_address (*op, QImode, 0);
5359 last = adjust_address (*op, QImode, width / BITS_PER_UNIT - 1);
5361 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
5362 be the upper word and RIGHT the lower word. */
5363 if (TARGET_BIG_ENDIAN)
5364 *left = first, *right = last;
5366 *left = last, *right = first;
5372 /* Try to emit the equivalent of (set DEST (zero_extract SRC WIDTH BITPOS)).
5373 Return true on success. We only handle cases where zero_extract is
5374 equivalent to sign_extract. */
5377 mips_expand_unaligned_load (rtx dest, rtx src, unsigned int width, int bitpos)
5379 rtx left, right, temp;
5381 /* If TARGET_64BIT, the destination of a 32-bit load will be a
5382 paradoxical word_mode subreg. This is the only case in which
5383 we allow the destination to be larger than the source. */
5384 if (GET_CODE (dest) == SUBREG
5385 && GET_MODE (dest) == DImode
5386 && SUBREG_BYTE (dest) == 0
5387 && GET_MODE (SUBREG_REG (dest)) == SImode)
5388 dest = SUBREG_REG (dest);
5390 /* After the above adjustment, the destination must be the same
5391 width as the source. */
5392 if (GET_MODE_BITSIZE (GET_MODE (dest)) != width)
5395 if (!mips_get_unaligned_mem (&src, width, bitpos, &left, &right))
5398 temp = gen_reg_rtx (GET_MODE (dest));
5399 if (GET_MODE (dest) == DImode)
5401 emit_insn (gen_mov_ldl (temp, src, left));
5402 emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
5406 emit_insn (gen_mov_lwl (temp, src, left));
5407 emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
5413 /* Try to expand (set (zero_extract DEST WIDTH BITPOS) SRC). Return
5417 mips_expand_unaligned_store (rtx dest, rtx src, unsigned int width, int bitpos)
5420 enum machine_mode mode;
5422 if (!mips_get_unaligned_mem (&dest, width, bitpos, &left, &right))
5425 mode = mode_for_size (width, MODE_INT, 0);
5426 src = gen_lowpart (mode, src);
5430 emit_insn (gen_mov_sdl (dest, src, left));
5431 emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
5435 emit_insn (gen_mov_swl (dest, src, left));
5436 emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
5441 /* Return true if X is a MEM with the same size as MODE. */
5444 mips_mem_fits_mode_p (enum machine_mode mode, rtx x)
5451 size = MEM_SIZE (x);
5452 return size && INTVAL (size) == GET_MODE_SIZE (mode);
5455 /* Return true if (zero_extract OP SIZE POSITION) can be used as the
5456 source of an "ext" instruction or the destination of an "ins"
5457 instruction. OP must be a register operand and the following
5458 conditions must hold:
5460 0 <= POSITION < GET_MODE_BITSIZE (GET_MODE (op))
5461 0 < SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
5462 0 < POSITION + SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
5464 Also reject lengths equal to a word as they are better handled
5465 by the move patterns. */
5468 mips_use_ins_ext_p (rtx op, rtx size, rtx position)
5470 HOST_WIDE_INT len, pos;
5472 if (!ISA_HAS_EXT_INS
5473 || !register_operand (op, VOIDmode)
5474 || GET_MODE_BITSIZE (GET_MODE (op)) > BITS_PER_WORD)
5477 len = INTVAL (size);
5478 pos = INTVAL (position);
5480 if (len <= 0 || len >= GET_MODE_BITSIZE (GET_MODE (op))
5481 || pos < 0 || pos + len > GET_MODE_BITSIZE (GET_MODE (op)))
5487 /* Set up globals to generate code for the ISA or processor
5488 described by INFO. */
5491 mips_set_architecture (const struct mips_cpu_info *info)
5495 mips_arch_info = info;
5496 mips_arch = info->cpu;
5497 mips_isa = info->isa;
5502 /* Likewise for tuning. */
5505 mips_set_tune (const struct mips_cpu_info *info)
5509 mips_tune_info = info;
5510 mips_tune = info->cpu;
5514 /* Initialize mips_split_addresses from the associated command-line
5517 mips_split_addresses is a half-way house between explicit
5518 relocations and the traditional assembler macros. It can
5519 split absolute 32-bit symbolic constants into a high/lo_sum
5520 pair but uses macros for other sorts of access.
5522 Like explicit relocation support for REL targets, it relies
5523 on GNU extensions in the assembler and the linker.
5525 Although this code should work for -O0, it has traditionally
5526 been treated as an optimization. */
5529 mips_init_split_addresses (void)
5531 if (!TARGET_MIPS16 && TARGET_SPLIT_ADDRESSES
5532 && optimize && !flag_pic
5533 && !ABI_HAS_64BIT_SYMBOLS)
5534 mips_split_addresses = 1;
5536 mips_split_addresses = 0;
5539 /* (Re-)Initialize information about relocs. */
5542 mips_init_relocs (void)
5544 memset (mips_split_p, '\0', sizeof (mips_split_p));
5545 memset (mips_hi_relocs, '\0', sizeof (mips_hi_relocs));
5546 memset (mips_lo_relocs, '\0', sizeof (mips_lo_relocs));
5548 if (ABI_HAS_64BIT_SYMBOLS)
5550 if (TARGET_EXPLICIT_RELOCS)
5552 mips_split_p[SYMBOL_64_HIGH] = true;
5553 mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
5554 mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
5556 mips_split_p[SYMBOL_64_MID] = true;
5557 mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
5558 mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
5560 mips_split_p[SYMBOL_64_LOW] = true;
5561 mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
5562 mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
5564 mips_split_p[SYMBOL_ABSOLUTE] = true;
5565 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
5570 if (TARGET_EXPLICIT_RELOCS || mips_split_addresses || TARGET_MIPS16)
5572 mips_split_p[SYMBOL_ABSOLUTE] = true;
5573 mips_hi_relocs[SYMBOL_ABSOLUTE] = "%hi(";
5574 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
5576 mips_lo_relocs[SYMBOL_32_HIGH] = "%hi(";
5582 /* The high part is provided by a pseudo copy of $gp. */
5583 mips_split_p[SYMBOL_GP_RELATIVE] = true;
5584 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gprel(";
5587 if (TARGET_EXPLICIT_RELOCS)
5589 /* Small data constants are kept whole until after reload,
5590 then lowered by mips_rewrite_small_data. */
5591 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gp_rel(";
5593 mips_split_p[SYMBOL_GOT_PAGE_OFST] = true;
5596 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
5597 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%got_ofst(";
5601 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
5602 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%lo(";
5607 /* The HIGH and LO_SUM are matched by special .md patterns. */
5608 mips_split_p[SYMBOL_GOT_DISP] = true;
5610 mips_split_p[SYMBOL_GOTOFF_DISP] = true;
5611 mips_hi_relocs[SYMBOL_GOTOFF_DISP] = "%got_hi(";
5612 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_lo(";
5614 mips_split_p[SYMBOL_GOTOFF_CALL] = true;
5615 mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
5616 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
5621 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_disp(";
5623 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got(";
5624 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
5630 mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
5631 mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
5632 mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
5635 /* Thread-local relocation operators. */
5636 mips_lo_relocs[SYMBOL_TLSGD] = "%tlsgd(";
5637 mips_lo_relocs[SYMBOL_TLSLDM] = "%tlsldm(";
5638 mips_split_p[SYMBOL_DTPREL] = 1;
5639 mips_hi_relocs[SYMBOL_DTPREL] = "%dtprel_hi(";
5640 mips_lo_relocs[SYMBOL_DTPREL] = "%dtprel_lo(";
5641 mips_lo_relocs[SYMBOL_GOTTPREL] = "%gottprel(";
5642 mips_split_p[SYMBOL_TPREL] = 1;
5643 mips_hi_relocs[SYMBOL_TPREL] = "%tprel_hi(";
5644 mips_lo_relocs[SYMBOL_TPREL] = "%tprel_lo(";
5646 mips_lo_relocs[SYMBOL_HALF] = "%half(";
5649 static GTY(()) int was_mips16_p = -1;
5651 /* Set up the target-dependent global state so that it matches the
5652 current function's ISA mode. */
5655 mips_set_mips16_mode (int mips16_p)
5657 if (mips16_p == was_mips16_p)
5660 /* Restore base settings of various flags. */
5661 target_flags = mips_base_target_flags;
5662 align_loops = mips_base_align_loops;
5663 align_jumps = mips_base_align_jumps;
5664 align_functions = mips_base_align_functions;
5665 flag_schedule_insns = mips_base_schedule_insns;
5666 flag_reorder_blocks_and_partition = mips_base_reorder_blocks_and_partition;
5667 flag_move_loop_invariants = mips_base_move_loop_invariants;
5668 flag_delayed_branch = mips_flag_delayed_branch;
5672 /* Select mips16 instruction set. */
5673 target_flags |= MASK_MIPS16;
5675 /* Don't run the scheduler before reload, since it tends to
5676 increase register pressure. */
5677 flag_schedule_insns = 0;
5679 /* Don't do hot/cold partitioning. The constant layout code expects
5680 the whole function to be in a single section. */
5681 flag_reorder_blocks_and_partition = 0;
5683 /* Don't move loop invariants, because it tends to increase
5684 register pressure. It also introduces an extra move in cases
5685 where the constant is the first operand in a two-operand binary
5686 instruction, or when it forms a register argument to a functon
5688 flag_move_loop_invariants = 0;
5690 /* Silently disable -mexplicit-relocs since it doesn't apply
5691 to mips16 code. Even so, it would overly pedantic to warn
5692 about "-mips16 -mexplicit-relocs", especially given that
5693 we use a %gprel() operator. */
5694 target_flags &= ~MASK_EXPLICIT_RELOCS;
5696 /* Experiments suggest we get the best overall results from using
5697 the range of an unextended lw or sw. Code that makes heavy use
5698 of byte or short accesses can do better with ranges of 0...31
5699 and 0...63 respectively, but most code is sensitive to the range
5700 of lw and sw instead. */
5701 targetm.min_anchor_offset = 0;
5702 targetm.max_anchor_offset = 127;
5704 if (flag_pic || TARGET_ABICALLS)
5705 sorry ("MIPS16 PIC");
5709 /* Reset to select base non-mips16 ISA. */
5710 target_flags &= ~MASK_MIPS16;
5712 /* When using explicit relocs, we call dbr_schedule from within
5714 if (TARGET_EXPLICIT_RELOCS)
5715 flag_delayed_branch = 0;
5717 /* Provide default values for align_* for 64-bit targets. */
5720 if (align_loops == 0)
5722 if (align_jumps == 0)
5724 if (align_functions == 0)
5725 align_functions = 8;
5728 targetm.min_anchor_offset = TARGET_MIN_ANCHOR_OFFSET;
5729 targetm.max_anchor_offset = TARGET_MAX_ANCHOR_OFFSET;
5732 /* (Re)initialize mips target internals for new ISA. */
5733 mips_init_split_addresses ();
5734 mips_init_relocs ();
5736 if (was_mips16_p >= 0)
5737 /* Reinitialize target-dependent state. */
5740 was_mips16_p = TARGET_MIPS16;
5743 /* Use a hash table to keep track of implicit mips16/nomips16 attributes
5744 for -mflip_mips16. It maps decl names onto a boolean mode setting. */
5746 struct mflip_mips16_entry GTY (()) {
5750 static GTY ((param_is (struct mflip_mips16_entry))) htab_t mflip_mips16_htab;
5752 /* Hash table callbacks for mflip_mips16_htab. */
5755 mflip_mips16_htab_hash (const void *entry)
5757 return htab_hash_string (((const struct mflip_mips16_entry *) entry)->name);
5761 mflip_mips16_htab_eq (const void *entry, const void *name)
5763 return strcmp (((const struct mflip_mips16_entry *) entry)->name,
5764 (const char *) name) == 0;
5767 /* DECL is a function that needs a default "mips16" or "nomips16" attribute
5768 for -mflip-mips16. Return true if it should use "mips16" and false if
5769 it should use "nomips16". */
5772 mflip_mips16_use_mips16_p (tree decl)
5774 struct mflip_mips16_entry *entry;
5779 /* Use the opposite of the command-line setting for anonymous decls. */
5780 if (!DECL_NAME (decl))
5781 return !mips_base_mips16;
5783 if (!mflip_mips16_htab)
5784 mflip_mips16_htab = htab_create_ggc (37, mflip_mips16_htab_hash,
5785 mflip_mips16_htab_eq, NULL);
5787 name = IDENTIFIER_POINTER (DECL_NAME (decl));
5788 hash = htab_hash_string (name);
5789 slot = htab_find_slot_with_hash (mflip_mips16_htab, name, hash, INSERT);
5790 entry = (struct mflip_mips16_entry *) *slot;
5793 mips16_flipper = !mips16_flipper;
5794 entry = GGC_NEW (struct mflip_mips16_entry);
5796 entry->mips16_p = mips16_flipper ? !mips_base_mips16 : mips_base_mips16;
5799 return entry->mips16_p;
5802 /* Implement TARGET_INSERT_ATTRIBUTES. */
5805 mips_insert_attributes (tree decl, tree *attributes)
5808 bool mips16_p, nomips16_p;
5810 /* Check for "mips16" and "nomips16" attributes. */
5811 mips16_p = lookup_attribute ("mips16", *attributes) != NULL;
5812 nomips16_p = lookup_attribute ("nomips16", *attributes) != NULL;
5813 if (TREE_CODE (decl) != FUNCTION_DECL)
5816 error ("%qs attribute only applies to functions", "mips16");
5818 error ("%qs attribute only applies to functions", "nomips16");
5822 mips16_p |= mips_mips16_decl_p (decl);
5823 nomips16_p |= mips_nomips16_decl_p (decl);
5824 if (mips16_p || nomips16_p)
5826 /* DECL cannot be simultaneously mips16 and nomips16. */
5827 if (mips16_p && nomips16_p)
5828 error ("%qs cannot have both %<mips16%> and "
5829 "%<nomips16%> attributes",
5830 IDENTIFIER_POINTER (DECL_NAME (decl)));
5832 else if (TARGET_FLIP_MIPS16 && !DECL_ARTIFICIAL (decl))
5834 /* Implement -mflip-mips16. If DECL has neither a "nomips16" nor a
5835 "mips16" attribute, arbitrarily pick one. We must pick the same
5836 setting for duplicate declarations of a function. */
5837 name = mflip_mips16_use_mips16_p (decl) ? "mips16" : "nomips16";
5838 *attributes = tree_cons (get_identifier (name), NULL, *attributes);
5843 /* Implement TARGET_MERGE_DECL_ATTRIBUTES. */
5846 mips_merge_decl_attributes (tree olddecl, tree newdecl)
5848 /* The decls' "mips16" and "nomips16" attributes must match exactly. */
5849 if (mips_mips16_decl_p (olddecl) != mips_mips16_decl_p (newdecl))
5850 error ("%qs redeclared with conflicting %qs attributes",
5851 IDENTIFIER_POINTER (DECL_NAME (newdecl)), "mips16");
5852 if (mips_nomips16_decl_p (olddecl) != mips_nomips16_decl_p (newdecl))
5853 error ("%qs redeclared with conflicting %qs attributes",
5854 IDENTIFIER_POINTER (DECL_NAME (newdecl)), "nomips16");
5856 return merge_attributes (DECL_ATTRIBUTES (olddecl),
5857 DECL_ATTRIBUTES (newdecl));
5860 /* Implement TARGET_SET_CURRENT_FUNCTION. Decide whether the current
5861 function should use the MIPS16 ISA and switch modes accordingly. */
5864 mips_set_current_function (tree fndecl)
5866 mips_set_mips16_mode (mips_use_mips16_mode_p (fndecl));
5869 /* Implement TARGET_HANDLE_OPTION. */
5872 mips_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
5877 if (strcmp (arg, "32") == 0)
5879 else if (strcmp (arg, "o64") == 0)
5881 else if (strcmp (arg, "n32") == 0)
5883 else if (strcmp (arg, "64") == 0)
5885 else if (strcmp (arg, "eabi") == 0)
5886 mips_abi = ABI_EABI;
5893 return mips_parse_cpu (arg) != 0;
5896 mips_isa_info = mips_parse_cpu (ACONCAT (("mips", arg, NULL)));
5897 return mips_isa_info != 0;
5899 case OPT_mno_flush_func:
5900 mips_cache_flush_func = NULL;
5903 case OPT_mcode_readable_:
5904 if (strcmp (arg, "yes") == 0)
5905 mips_code_readable = CODE_READABLE_YES;
5906 else if (strcmp (arg, "pcrel") == 0)
5907 mips_code_readable = CODE_READABLE_PCREL;
5908 else if (strcmp (arg, "no") == 0)
5909 mips_code_readable = CODE_READABLE_NO;
5919 /* Set up the threshold for data to go into the small data area, instead
5920 of the normal data area, and detect any conflicts in the switches. */
5923 override_options (void)
5925 int i, start, regno;
5926 enum machine_mode mode;
5928 #ifdef SUBTARGET_OVERRIDE_OPTIONS
5929 SUBTARGET_OVERRIDE_OPTIONS;
5932 mips_section_threshold = g_switch_set ? g_switch_value : MIPS_DEFAULT_GVALUE;
5934 /* The following code determines the architecture and register size.
5935 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
5936 The GAS and GCC code should be kept in sync as much as possible. */
5938 if (mips_arch_string != 0)
5939 mips_set_architecture (mips_parse_cpu (mips_arch_string));
5941 if (mips_isa_info != 0)
5943 if (mips_arch_info == 0)
5944 mips_set_architecture (mips_isa_info);
5945 else if (mips_arch_info->isa != mips_isa_info->isa)
5946 error ("-%s conflicts with the other architecture options, "
5947 "which specify a %s processor",
5948 mips_isa_info->name,
5949 mips_cpu_info_from_isa (mips_arch_info->isa)->name);
5952 if (mips_arch_info == 0)
5954 #ifdef MIPS_CPU_STRING_DEFAULT
5955 mips_set_architecture (mips_parse_cpu (MIPS_CPU_STRING_DEFAULT));
5957 mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT));
5961 if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
5962 error ("-march=%s is not compatible with the selected ABI",
5963 mips_arch_info->name);
5965 /* Optimize for mips_arch, unless -mtune selects a different processor. */
5966 if (mips_tune_string != 0)
5967 mips_set_tune (mips_parse_cpu (mips_tune_string));
5969 if (mips_tune_info == 0)
5970 mips_set_tune (mips_arch_info);
5972 /* Set cost structure for the processor. */
5974 mips_cost = &mips_rtx_cost_optimize_size;
5976 mips_cost = &mips_rtx_cost_data[mips_tune];
5978 /* If the user hasn't specified a branch cost, use the processor's
5980 if (mips_branch_cost == 0)
5981 mips_branch_cost = mips_cost->branch_cost;
5983 if ((target_flags_explicit & MASK_64BIT) != 0)
5985 /* The user specified the size of the integer registers. Make sure
5986 it agrees with the ABI and ISA. */
5987 if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
5988 error ("-mgp64 used with a 32-bit processor");
5989 else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
5990 error ("-mgp32 used with a 64-bit ABI");
5991 else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
5992 error ("-mgp64 used with a 32-bit ABI");
5996 /* Infer the integer register size from the ABI and processor.
5997 Restrict ourselves to 32-bit registers if that's all the
5998 processor has, or if the ABI cannot handle 64-bit registers. */
5999 if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
6000 target_flags &= ~MASK_64BIT;
6002 target_flags |= MASK_64BIT;
6005 if ((target_flags_explicit & MASK_FLOAT64) != 0)
6007 /* Really, -mfp32 and -mfp64 are ornamental options. There's
6008 only one right answer here. */
6009 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
6010 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
6011 else if (!TARGET_64BIT && TARGET_FLOAT64
6012 && !(ISA_HAS_MXHC1 && mips_abi == ABI_32))
6013 error ("-mgp32 and -mfp64 can only be combined if the target"
6014 " supports the mfhc1 and mthc1 instructions");
6015 else if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
6016 error ("unsupported combination: %s", "-mfp64 -msingle-float");
6020 /* -msingle-float selects 32-bit float registers. Otherwise the
6021 float registers should be the same size as the integer ones. */
6022 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
6023 target_flags |= MASK_FLOAT64;
6025 target_flags &= ~MASK_FLOAT64;
6028 /* End of code shared with GAS. */
6030 if ((target_flags_explicit & MASK_LONG64) == 0)
6032 if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
6033 target_flags |= MASK_LONG64;
6035 target_flags &= ~MASK_LONG64;
6039 flag_pcc_struct_return = 0;
6041 if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
6043 /* If neither -mbranch-likely nor -mno-branch-likely was given
6044 on the command line, set MASK_BRANCHLIKELY based on the target
6047 By default, we enable use of Branch Likely instructions on
6048 all architectures which support them with the following
6049 exceptions: when creating MIPS32 or MIPS64 code, and when
6050 tuning for architectures where their use tends to hurt
6053 The MIPS32 and MIPS64 architecture specifications say "Software
6054 is strongly encouraged to avoid use of Branch Likely
6055 instructions, as they will be removed from a future revision
6056 of the [MIPS32 and MIPS64] architecture." Therefore, we do not
6057 issue those instructions unless instructed to do so by
6059 if (ISA_HAS_BRANCHLIKELY
6060 && !(ISA_MIPS32 || ISA_MIPS32R2 || ISA_MIPS64)
6061 && !(TUNE_MIPS5500 || TUNE_SB1))
6062 target_flags |= MASK_BRANCHLIKELY;
6064 target_flags &= ~MASK_BRANCHLIKELY;
6066 if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
6067 warning (0, "generation of Branch Likely instructions enabled, but not supported by architecture");
6069 /* The effect of -mabicalls isn't defined for the EABI. */
6070 if (mips_abi == ABI_EABI && TARGET_ABICALLS)
6072 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
6073 target_flags &= ~MASK_ABICALLS;
6076 /* MIPS16 cannot generate PIC yet. */
6077 if (TARGET_MIPS16 && (flag_pic || TARGET_ABICALLS))
6079 sorry ("MIPS16 PIC");
6080 target_flags &= ~MASK_ABICALLS;
6081 flag_pic = flag_pie = flag_shlib = 0;
6084 if (TARGET_ABICALLS)
6085 /* We need to set flag_pic for executables as well as DSOs
6086 because we may reference symbols that are not defined in
6087 the final executable. (MIPS does not use things like
6088 copy relocs, for example.)
6090 Also, there is a body of code that uses __PIC__ to distinguish
6091 between -mabicalls and -mno-abicalls code. */
6094 /* -mvr4130-align is a "speed over size" optimization: it usually produces
6095 faster code, but at the expense of more nops. Enable it at -O3 and
6097 if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
6098 target_flags |= MASK_VR4130_ALIGN;
6100 /* Prefer a call to memcpy over inline code when optimizing for size,
6101 though see MOVE_RATIO in mips.h. */
6102 if (optimize_size && (target_flags_explicit & MASK_MEMCPY) == 0)
6103 target_flags |= MASK_MEMCPY;
6105 /* If we have a nonzero small-data limit, check that the -mgpopt
6106 setting is consistent with the other target flags. */
6107 if (mips_section_threshold > 0)
6111 if (!TARGET_MIPS16 && !TARGET_EXPLICIT_RELOCS)
6112 error ("%<-mno-gpopt%> needs %<-mexplicit-relocs%>");
6114 TARGET_LOCAL_SDATA = false;
6115 TARGET_EXTERN_SDATA = false;
6119 if (TARGET_VXWORKS_RTP)
6120 warning (0, "cannot use small-data accesses for %qs", "-mrtp");
6122 if (TARGET_ABICALLS)
6123 warning (0, "cannot use small-data accesses for %qs",
6128 #ifdef MIPS_TFMODE_FORMAT
6129 REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT;
6132 /* Make sure that the user didn't turn off paired single support when
6133 MIPS-3D support is requested. */
6134 if (TARGET_MIPS3D && (target_flags_explicit & MASK_PAIRED_SINGLE_FLOAT)
6135 && !TARGET_PAIRED_SINGLE_FLOAT)
6136 error ("-mips3d requires -mpaired-single");
6138 /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
6140 target_flags |= MASK_PAIRED_SINGLE_FLOAT;
6142 /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
6143 and TARGET_HARD_FLOAT_ABI are both true. */
6144 if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT_ABI))
6145 error ("-mips3d/-mpaired-single must be used with -mfp64 -mhard-float");
6147 /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
6149 if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_MIPS64)
6150 error ("-mips3d/-mpaired-single must be used with -mips64");
6152 /* If TARGET_DSPR2, enable MASK_DSP. */
6154 target_flags |= MASK_DSP;
6156 mips_print_operand_punct['?'] = 1;
6157 mips_print_operand_punct['#'] = 1;
6158 mips_print_operand_punct['/'] = 1;
6159 mips_print_operand_punct['&'] = 1;
6160 mips_print_operand_punct['!'] = 1;
6161 mips_print_operand_punct['*'] = 1;
6162 mips_print_operand_punct['@'] = 1;
6163 mips_print_operand_punct['.'] = 1;
6164 mips_print_operand_punct['('] = 1;
6165 mips_print_operand_punct[')'] = 1;
6166 mips_print_operand_punct['['] = 1;
6167 mips_print_operand_punct[']'] = 1;
6168 mips_print_operand_punct['<'] = 1;
6169 mips_print_operand_punct['>'] = 1;
6170 mips_print_operand_punct['{'] = 1;
6171 mips_print_operand_punct['}'] = 1;
6172 mips_print_operand_punct['^'] = 1;
6173 mips_print_operand_punct['$'] = 1;
6174 mips_print_operand_punct['+'] = 1;
6175 mips_print_operand_punct['~'] = 1;
6176 mips_print_operand_punct['|'] = 1;
6177 mips_print_operand_punct['-'] = 1;
6179 /* Set up array to map GCC register number to debug register number.
6180 Ignore the special purpose register numbers. */
6182 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
6184 mips_dbx_regno[i] = INVALID_REGNUM;
6185 if (GP_REG_P (i) || FP_REG_P (i) || ALL_COP_REG_P (i))
6186 mips_dwarf_regno[i] = i;
6188 mips_dwarf_regno[i] = INVALID_REGNUM;
6191 start = GP_DBX_FIRST - GP_REG_FIRST;
6192 for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
6193 mips_dbx_regno[i] = i + start;
6195 start = FP_DBX_FIRST - FP_REG_FIRST;
6196 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
6197 mips_dbx_regno[i] = i + start;
6199 /* HI and LO debug registers use big-endian ordering. */
6200 mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
6201 mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
6202 mips_dwarf_regno[HI_REGNUM] = MD_REG_FIRST + 0;
6203 mips_dwarf_regno[LO_REGNUM] = MD_REG_FIRST + 1;
6204 for (i = DSP_ACC_REG_FIRST; i <= DSP_ACC_REG_LAST; i += 2)
6206 mips_dwarf_regno[i + TARGET_LITTLE_ENDIAN] = i;
6207 mips_dwarf_regno[i + TARGET_BIG_ENDIAN] = i + 1;
6210 /* Set up array giving whether a given register can hold a given mode. */
6212 for (mode = VOIDmode;
6213 mode != MAX_MACHINE_MODE;
6214 mode = (enum machine_mode) ((int)mode + 1))
6216 register int size = GET_MODE_SIZE (mode);
6217 register enum mode_class class = GET_MODE_CLASS (mode);
6219 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
6223 if (mode == CCV2mode)
6226 && (regno - ST_REG_FIRST) % 2 == 0);
6228 else if (mode == CCV4mode)
6231 && (regno - ST_REG_FIRST) % 4 == 0);
6233 else if (mode == CCmode)
6236 temp = (regno == FPSW_REGNUM);
6238 temp = (ST_REG_P (regno) || GP_REG_P (regno)
6239 || FP_REG_P (regno));
6242 else if (GP_REG_P (regno))
6243 temp = ((regno & 1) == 0 || size <= UNITS_PER_WORD);
6245 else if (FP_REG_P (regno))
6246 temp = ((((regno % MAX_FPRS_PER_FMT) == 0)
6247 || (MIN_FPRS_PER_FMT == 1
6248 && size <= UNITS_PER_FPREG))
6249 && (((class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT
6250 || class == MODE_VECTOR_FLOAT)
6251 && size <= UNITS_PER_FPVALUE)
6252 /* Allow integer modes that fit into a single
6253 register. We need to put integers into FPRs
6254 when using instructions like cvt and trunc.
6255 We can't allow sizes smaller than a word,
6256 the FPU has no appropriate load/store
6257 instructions for those. */
6258 || (class == MODE_INT
6259 && size >= MIN_UNITS_PER_WORD
6260 && size <= UNITS_PER_FPREG)
6261 /* Allow TFmode for CCmode reloads. */
6262 || (ISA_HAS_8CC && mode == TFmode)));
6264 else if (ACC_REG_P (regno))
6265 temp = ((INTEGRAL_MODE_P (mode) || ALL_FIXED_POINT_MODE_P (mode))
6266 && size <= UNITS_PER_WORD * 2
6267 && (size <= UNITS_PER_WORD
6268 || regno == MD_REG_FIRST
6269 || (DSP_ACC_REG_P (regno)
6270 && ((regno - DSP_ACC_REG_FIRST) & 1) == 0)));
6272 else if (ALL_COP_REG_P (regno))
6273 temp = (class == MODE_INT && size <= UNITS_PER_WORD);
6277 mips_hard_regno_mode_ok[(int)mode][regno] = temp;
6281 /* Save GPR registers in word_mode sized hunks. word_mode hasn't been
6282 initialized yet, so we can't use that here. */
6283 gpr_mode = TARGET_64BIT ? DImode : SImode;
6285 /* Function to allocate machine-dependent function status. */
6286 init_machine_status = &mips_init_machine_status;
6288 /* Default to working around R4000 errata only if the processor
6289 was selected explicitly. */
6290 if ((target_flags_explicit & MASK_FIX_R4000) == 0
6291 && mips_matching_cpu_name_p (mips_arch_info->name, "r4000"))
6292 target_flags |= MASK_FIX_R4000;
6294 /* Default to working around R4400 errata only if the processor
6295 was selected explicitly. */
6296 if ((target_flags_explicit & MASK_FIX_R4400) == 0
6297 && mips_matching_cpu_name_p (mips_arch_info->name, "r4400"))
6298 target_flags |= MASK_FIX_R4400;
6300 /* Save base state of options. */
6301 mips_base_mips16 = TARGET_MIPS16;
6302 mips_base_target_flags = target_flags;
6303 mips_base_schedule_insns = flag_schedule_insns;
6304 mips_base_reorder_blocks_and_partition = flag_reorder_blocks_and_partition;
6305 mips_base_move_loop_invariants = flag_move_loop_invariants;
6306 mips_base_align_loops = align_loops;
6307 mips_base_align_jumps = align_jumps;
6308 mips_base_align_functions = align_functions;
6309 mips_flag_delayed_branch = flag_delayed_branch;
6311 /* Now select the mips16 or 32-bit instruction set, as requested. */
6312 mips_set_mips16_mode (mips_base_mips16);
6315 /* Swap the register information for registers I and I + 1, which
6316 currently have the wrong endianness. Note that the registers'
6317 fixedness and call-clobberedness might have been set on the
6321 mips_swap_registers (unsigned int i)
6326 #define SWAP_INT(X, Y) (tmpi = (X), (X) = (Y), (Y) = tmpi)
6327 #define SWAP_STRING(X, Y) (tmps = (X), (X) = (Y), (Y) = tmps)
6329 SWAP_INT (fixed_regs[i], fixed_regs[i + 1]);
6330 SWAP_INT (call_used_regs[i], call_used_regs[i + 1]);
6331 SWAP_INT (call_really_used_regs[i], call_really_used_regs[i + 1]);
6332 SWAP_STRING (reg_names[i], reg_names[i + 1]);
6338 /* Implement CONDITIONAL_REGISTER_USAGE. */
6341 mips_conditional_register_usage (void)
6347 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
6348 fixed_regs[regno] = call_used_regs[regno] = 1;
6350 if (!TARGET_HARD_FLOAT)
6354 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
6355 fixed_regs[regno] = call_used_regs[regno] = 1;
6356 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
6357 fixed_regs[regno] = call_used_regs[regno] = 1;
6359 else if (! ISA_HAS_8CC)
6363 /* We only have a single condition code register. We
6364 implement this by hiding all the condition code registers,
6365 and generating RTL that refers directly to ST_REG_FIRST. */
6366 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
6367 fixed_regs[regno] = call_used_regs[regno] = 1;
6369 /* In mips16 mode, we permit the $t temporary registers to be used
6370 for reload. We prohibit the unused $s registers, since they
6371 are caller saved, and saving them via a mips16 register would
6372 probably waste more time than just reloading the value. */
6375 fixed_regs[18] = call_used_regs[18] = 1;
6376 fixed_regs[19] = call_used_regs[19] = 1;
6377 fixed_regs[20] = call_used_regs[20] = 1;
6378 fixed_regs[21] = call_used_regs[21] = 1;
6379 fixed_regs[22] = call_used_regs[22] = 1;
6380 fixed_regs[23] = call_used_regs[23] = 1;
6381 fixed_regs[26] = call_used_regs[26] = 1;
6382 fixed_regs[27] = call_used_regs[27] = 1;
6383 fixed_regs[30] = call_used_regs[30] = 1;
6385 /* fp20-23 are now caller saved. */
6386 if (mips_abi == ABI_64)
6389 for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
6390 call_really_used_regs[regno] = call_used_regs[regno] = 1;
6392 /* Odd registers from fp21 to fp31 are now caller saved. */
6393 if (mips_abi == ABI_N32)
6396 for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
6397 call_really_used_regs[regno] = call_used_regs[regno] = 1;
6399 /* Make sure that double-register accumulator values are correctly
6400 ordered for the current endianness. */
6401 if (TARGET_LITTLE_ENDIAN)
6404 mips_swap_registers (MD_REG_FIRST);
6405 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno += 2)
6406 mips_swap_registers (regno);
6410 /* Allocate a chunk of memory for per-function machine-dependent data. */
6411 static struct machine_function *
6412 mips_init_machine_status (void)
6414 return ((struct machine_function *)
6415 ggc_alloc_cleared (sizeof (struct machine_function)));
6418 /* On the mips16, we want to allocate $24 (T_REG) before other
6419 registers for instructions for which it is possible. This helps
6420 avoid shuffling registers around in order to set up for an xor,
6421 encouraging the compiler to use a cmp instead. */
6424 mips_order_regs_for_local_alloc (void)
6428 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
6429 reg_alloc_order[i] = i;
6433 /* It really doesn't matter where we put register 0, since it is
6434 a fixed register anyhow. */
6435 reg_alloc_order[0] = 24;
6436 reg_alloc_order[24] = 0;
6441 /* The MIPS debug format wants all automatic variables and arguments
6442 to be in terms of the virtual frame pointer (stack pointer before
6443 any adjustment in the function), while the MIPS 3.0 linker wants
6444 the frame pointer to be the stack pointer after the initial
6445 adjustment. So, we do the adjustment here. The arg pointer (which
6446 is eliminated) points to the virtual frame pointer, while the frame
6447 pointer (which may be eliminated) points to the stack pointer after
6448 the initial adjustments. */
6451 mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
6453 rtx offset2 = const0_rtx;
6454 rtx reg = eliminate_constant_term (addr, &offset2);
6457 offset = INTVAL (offset2);
6459 if (reg == stack_pointer_rtx || reg == frame_pointer_rtx
6460 || reg == hard_frame_pointer_rtx)
6462 HOST_WIDE_INT frame_size = (!cfun->machine->frame.initialized)
6463 ? compute_frame_size (get_frame_size ())
6464 : cfun->machine->frame.total_size;
6466 /* MIPS16 frame is smaller */
6467 if (frame_pointer_needed && TARGET_MIPS16)
6468 frame_size -= cfun->machine->frame.args_size;
6470 offset = offset - frame_size;
6473 /* sdbout_parms does not want this to crash for unrecognized cases. */
6475 else if (reg != arg_pointer_rtx)
6476 fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
6483 /* If OP is an UNSPEC address, return the address to which it refers,
6484 otherwise return OP itself. */
6487 mips_strip_unspec_address (rtx op)
6491 split_const (op, &base, &offset);
6492 if (UNSPEC_ADDRESS_P (base))
6493 op = plus_constant (UNSPEC_ADDRESS (base), INTVAL (offset));
6497 /* Implement the PRINT_OPERAND macro. The MIPS-specific operand codes are:
6499 'X' OP is CONST_INT, prints 32 bits in hexadecimal format = "0x%08x",
6500 'x' OP is CONST_INT, prints 16 bits in hexadecimal format = "0x%04x",
6501 'h' OP is HIGH, prints %hi(X),
6502 'd' output integer constant in decimal,
6503 'z' if the operand is 0, use $0 instead of normal operand.
6504 'D' print second part of double-word register or memory operand.
6505 'L' print low-order register of double-word register operand.
6506 'M' print high-order register of double-word register operand.
6507 'C' print part of opcode for a branch condition.
6508 'F' print part of opcode for a floating-point branch condition.
6509 'N' print part of opcode for a branch condition, inverted.
6510 'W' print part of opcode for a floating-point branch condition, inverted.
6511 'T' print 'f' for (eq:CC ...), 't' for (ne:CC ...),
6512 'z' for (eq:?I ...), 'n' for (ne:?I ...).
6513 't' like 'T', but with the EQ/NE cases reversed
6514 'Y' for a CONST_INT X, print mips_fp_conditions[X]
6515 'Z' print the operand and a comma for ISA_HAS_8CC, otherwise print nothing
6516 'R' print the reloc associated with LO_SUM
6517 'q' print DSP accumulator registers
6519 The punctuation characters are:
6521 '(' Turn on .set noreorder
6522 ')' Turn on .set reorder
6523 '[' Turn on .set noat
6525 '<' Turn on .set nomacro
6526 '>' Turn on .set macro
6527 '{' Turn on .set volatile (not GAS)
6528 '}' Turn on .set novolatile (not GAS)
6529 '&' Turn on .set noreorder if filling delay slots
6530 '*' Turn on both .set noreorder and .set nomacro if filling delay slots
6531 '!' Turn on .set nomacro if filling delay slots
6532 '#' Print nop if in a .set noreorder section.
6533 '/' Like '#', but does nothing within a delayed branch sequence
6534 '?' Print 'l' if we are to use a branch likely instead of normal branch.
6535 '@' Print the name of the assembler temporary register (at or $1).
6536 '.' Print the name of the register with a hard-wired zero (zero or $0).
6537 '^' Print the name of the pic call-through register (t9 or $25).
6538 '$' Print the name of the stack pointer register (sp or $29).
6539 '+' Print the name of the gp register (usually gp or $28).
6540 '~' Output a branch alignment to LABEL_ALIGN(NULL).
6541 '|' Print .set push; .set mips2 if !ISA_HAS_LL_SC.
6542 '-' Print .set pop under the same conditions for '|'. */
6545 print_operand (FILE *file, rtx op, int letter)
6547 register enum rtx_code code;
6549 if (PRINT_OPERAND_PUNCT_VALID_P (letter))
6554 if (mips_branch_likely)
6559 fputs (reg_names [GP_REG_FIRST + 1], file);
6563 fputs (reg_names [PIC_FUNCTION_ADDR_REGNUM], file);
6567 fputs (reg_names [GP_REG_FIRST + 0], file);
6571 fputs (reg_names[STACK_POINTER_REGNUM], file);
6575 fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
6579 if (final_sequence != 0 && set_noreorder++ == 0)
6580 fputs (".set\tnoreorder\n\t", file);
6584 if (final_sequence != 0)
6586 if (set_noreorder++ == 0)
6587 fputs (".set\tnoreorder\n\t", file);
6589 if (set_nomacro++ == 0)
6590 fputs (".set\tnomacro\n\t", file);
6595 if (final_sequence != 0 && set_nomacro++ == 0)
6596 fputs ("\n\t.set\tnomacro", file);
6600 if (set_noreorder != 0)
6601 fputs ("\n\tnop", file);
6605 /* Print an extra newline so that the delayed insn is separated
6606 from the following ones. This looks neater and is consistent
6607 with non-nop delayed sequences. */
6608 if (set_noreorder != 0 && final_sequence == 0)
6609 fputs ("\n\tnop\n", file);
6613 if (set_noreorder++ == 0)
6614 fputs (".set\tnoreorder\n\t", file);
6618 if (set_noreorder == 0)
6619 error ("internal error: %%) found without a %%( in assembler pattern");
6621 else if (--set_noreorder == 0)
6622 fputs ("\n\t.set\treorder", file);
6627 if (set_noat++ == 0)
6628 fputs (".set\tnoat\n\t", file);
6633 error ("internal error: %%] found without a %%[ in assembler pattern");
6634 else if (--set_noat == 0)
6635 fputs ("\n\t.set\tat", file);
6640 if (set_nomacro++ == 0)
6641 fputs (".set\tnomacro\n\t", file);
6645 if (set_nomacro == 0)
6646 error ("internal error: %%> found without a %%< in assembler pattern");
6647 else if (--set_nomacro == 0)
6648 fputs ("\n\t.set\tmacro", file);
6653 if (set_volatile++ == 0)
6654 fputs ("#.set\tvolatile\n\t", file);
6658 if (set_volatile == 0)
6659 error ("internal error: %%} found without a %%{ in assembler pattern");
6660 else if (--set_volatile == 0)
6661 fputs ("\n\t#.set\tnovolatile", file);
6667 if (align_labels_log > 0)
6668 ASM_OUTPUT_ALIGN (file, align_labels_log);
6674 fputs (".set\tpush\n\t.set\tmips2\n\t", file);
6679 fputs ("\n\t.set\tpop", file);
6683 error ("PRINT_OPERAND: unknown punctuation '%c'", letter);
6692 error ("PRINT_OPERAND null pointer");
6696 code = GET_CODE (op);
6701 case EQ: fputs ("eq", file); break;
6702 case NE: fputs ("ne", file); break;
6703 case GT: fputs ("gt", file); break;
6704 case GE: fputs ("ge", file); break;
6705 case LT: fputs ("lt", file); break;
6706 case LE: fputs ("le", file); break;
6707 case GTU: fputs ("gtu", file); break;
6708 case GEU: fputs ("geu", file); break;
6709 case LTU: fputs ("ltu", file); break;
6710 case LEU: fputs ("leu", file); break;
6712 fatal_insn ("PRINT_OPERAND, invalid insn for %%C", op);
6715 else if (letter == 'N')
6718 case EQ: fputs ("ne", file); break;
6719 case NE: fputs ("eq", file); break;
6720 case GT: fputs ("le", file); break;
6721 case GE: fputs ("lt", file); break;
6722 case LT: fputs ("ge", file); break;
6723 case LE: fputs ("gt", file); break;
6724 case GTU: fputs ("leu", file); break;
6725 case GEU: fputs ("ltu", file); break;
6726 case LTU: fputs ("geu", file); break;
6727 case LEU: fputs ("gtu", file); break;
6729 fatal_insn ("PRINT_OPERAND, invalid insn for %%N", op);
6732 else if (letter == 'F')
6735 case EQ: fputs ("c1f", file); break;
6736 case NE: fputs ("c1t", file); break;
6738 fatal_insn ("PRINT_OPERAND, invalid insn for %%F", op);
6741 else if (letter == 'W')
6744 case EQ: fputs ("c1t", file); break;
6745 case NE: fputs ("c1f", file); break;
6747 fatal_insn ("PRINT_OPERAND, invalid insn for %%W", op);
6750 else if (letter == 'h')
6752 if (GET_CODE (op) == HIGH)
6755 print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_hi_relocs);
6758 else if (letter == 'R')
6759 print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_lo_relocs);
6761 else if (letter == 'Y')
6763 if (GET_CODE (op) == CONST_INT
6764 && ((unsigned HOST_WIDE_INT) INTVAL (op)
6765 < ARRAY_SIZE (mips_fp_conditions)))
6766 fputs (mips_fp_conditions[INTVAL (op)], file);
6768 output_operand_lossage ("invalid %%Y value");
6771 else if (letter == 'Z')
6775 print_operand (file, op, 0);
6780 else if (letter == 'q')
6785 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op);
6787 regnum = REGNO (op);
6788 if (MD_REG_P (regnum))
6789 fprintf (file, "$ac0");
6790 else if (DSP_ACC_REG_P (regnum))
6791 fprintf (file, "$ac%c", reg_names[regnum][3]);
6793 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op);
6796 else if (code == REG || code == SUBREG)
6798 register int regnum;
6801 regnum = REGNO (op);
6803 regnum = true_regnum (op);
6805 if ((letter == 'M' && ! WORDS_BIG_ENDIAN)
6806 || (letter == 'L' && WORDS_BIG_ENDIAN)
6810 fprintf (file, "%s", reg_names[regnum]);
6813 else if (code == MEM)
6816 output_address (plus_constant (XEXP (op, 0), 4));
6818 output_address (XEXP (op, 0));
6821 else if (letter == 'x' && GET_CODE (op) == CONST_INT)
6822 fprintf (file, HOST_WIDE_INT_PRINT_HEX, 0xffff & INTVAL(op));
6824 else if (letter == 'X' && GET_CODE(op) == CONST_INT)
6825 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
6827 else if (letter == 'd' && GET_CODE(op) == CONST_INT)
6828 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (INTVAL(op)));
6830 else if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
6831 fputs (reg_names[GP_REG_FIRST], file);
6833 else if (letter == 'd' || letter == 'x' || letter == 'X')
6834 output_operand_lossage ("invalid use of %%d, %%x, or %%X");
6836 else if (letter == 'T' || letter == 't')
6838 int truth = (code == NE) == (letter == 'T');
6839 fputc ("zfnt"[truth * 2 + (GET_MODE (op) == CCmode)], file);
6842 else if (CONST_GP_P (op))
6843 fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
6846 output_addr_const (file, mips_strip_unspec_address (op));
6850 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM
6851 in context CONTEXT. RELOCS is the array of relocations to use. */
6854 print_operand_reloc (FILE *file, rtx op, enum mips_symbol_context context,
6855 const char **relocs)
6857 enum mips_symbol_type symbol_type;
6860 symbol_type = mips_classify_symbolic_expression (op, context);
6861 if (relocs[symbol_type] == 0)
6862 fatal_insn ("PRINT_OPERAND, invalid operand for relocation", op);
6864 fputs (relocs[symbol_type], file);
6865 output_addr_const (file, mips_strip_unspec_address (op));
6866 for (p = relocs[symbol_type]; *p != 0; p++)
6871 /* Output address operand X to FILE. */
6874 print_operand_address (FILE *file, rtx x)
6876 struct mips_address_info addr;
6878 if (mips_classify_address (&addr, x, word_mode, true))
6882 print_operand (file, addr.offset, 0);
6883 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
6886 case ADDRESS_LO_SUM:
6887 print_operand_reloc (file, addr.offset, SYMBOL_CONTEXT_MEM,
6889 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
6892 case ADDRESS_CONST_INT:
6893 output_addr_const (file, x);
6894 fprintf (file, "(%s)", reg_names[0]);
6897 case ADDRESS_SYMBOLIC:
6898 output_addr_const (file, mips_strip_unspec_address (x));
6904 /* When using assembler macros, keep track of all of small-data externs
6905 so that mips_file_end can emit the appropriate declarations for them.
6907 In most cases it would be safe (though pointless) to emit .externs
6908 for other symbols too. One exception is when an object is within
6909 the -G limit but declared by the user to be in a section other
6910 than .sbss or .sdata. */
6913 mips_output_external (FILE *file, tree decl, const char *name)
6915 default_elf_asm_output_external (file, decl, name);
6917 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
6918 set in order to avoid putting out names that are never really
6920 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
6922 if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
6924 fputs ("\t.extern\t", file);
6925 assemble_name (file, name);
6926 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC "\n",
6927 int_size_in_bytes (TREE_TYPE (decl)));
6929 else if (TARGET_IRIX
6930 && mips_abi == ABI_32
6931 && TREE_CODE (decl) == FUNCTION_DECL)
6933 /* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
6934 `.global name .text' directive for every used but
6935 undefined function. If we don't, the linker may perform
6936 an optimization (skipping over the insns that set $gp)
6937 when it is unsafe. */
6938 fputs ("\t.globl ", file);
6939 assemble_name (file, name);
6940 fputs (" .text\n", file);
6945 /* Emit a new filename to a stream. If we are smuggling stabs, try to
6946 put out a MIPS ECOFF file and a stab. */
6949 mips_output_filename (FILE *stream, const char *name)
6952 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
6954 if (write_symbols == DWARF2_DEBUG)
6956 else if (mips_output_filename_first_time)
6958 mips_output_filename_first_time = 0;
6959 num_source_filenames += 1;
6960 current_function_file = name;
6961 fprintf (stream, "\t.file\t%d ", num_source_filenames);
6962 output_quoted_string (stream, name);
6963 putc ('\n', stream);
6966 /* If we are emitting stabs, let dbxout.c handle this (except for
6967 the mips_output_filename_first_time case). */
6968 else if (write_symbols == DBX_DEBUG)
6971 else if (name != current_function_file
6972 && strcmp (name, current_function_file) != 0)
6974 num_source_filenames += 1;
6975 current_function_file = name;
6976 fprintf (stream, "\t.file\t%d ", num_source_filenames);
6977 output_quoted_string (stream, name);
6978 putc ('\n', stream);
6982 /* Output an ASCII string, in a space-saving way. PREFIX is the string
6983 that should be written before the opening quote, such as "\t.ascii\t"
6984 for real string data or "\t# " for a comment. */
6987 mips_output_ascii (FILE *stream, const char *string_param, size_t len,
6992 register const unsigned char *string =
6993 (const unsigned char *)string_param;
6995 fprintf (stream, "%s\"", prefix);
6996 for (i = 0; i < len; i++)
6998 register int c = string[i];
7002 if (c == '\\' || c == '\"')
7004 putc ('\\', stream);
7012 fprintf (stream, "\\%03o", c);
7016 if (cur_pos > 72 && i+1 < len)
7019 fprintf (stream, "\"\n%s\"", prefix);
7022 fprintf (stream, "\"\n");
7025 /* Implement TARGET_ASM_FILE_START. */
7028 mips_file_start (void)
7030 default_file_start ();
7034 /* Generate a special section to describe the ABI switches used to
7035 produce the resultant binary. This used to be done by the assembler
7036 setting bits in the ELF header's flags field, but we have run out of
7037 bits. GDB needs this information in order to be able to correctly
7038 debug these binaries. See the function mips_gdbarch_init() in
7039 gdb/mips-tdep.c. This is unnecessary for the IRIX 5/6 ABIs and
7040 causes unnecessary IRIX 6 ld warnings. */
7041 const char * abi_string = NULL;
7045 case ABI_32: abi_string = "abi32"; break;
7046 case ABI_N32: abi_string = "abiN32"; break;
7047 case ABI_64: abi_string = "abi64"; break;
7048 case ABI_O64: abi_string = "abiO64"; break;
7049 case ABI_EABI: abi_string = TARGET_64BIT ? "eabi64" : "eabi32"; break;
7053 /* Note - we use fprintf directly rather than calling switch_to_section
7054 because in this way we can avoid creating an allocated section. We
7055 do not want this section to take up any space in the running
7057 fprintf (asm_out_file, "\t.section .mdebug.%s\n\t.previous\n",
7060 /* There is no ELF header flag to distinguish long32 forms of the
7061 EABI from long64 forms. Emit a special section to help tools
7062 such as GDB. Do the same for o64, which is sometimes used with
7064 if (mips_abi == ABI_EABI || mips_abi == ABI_O64)
7065 fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n"
7066 "\t.previous\n", TARGET_LONG64 ? 64 : 32);
7068 #ifdef HAVE_AS_GNU_ATTRIBUTE
7069 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
7070 TARGET_HARD_FLOAT_ABI ? (TARGET_DOUBLE_FLOAT ? 1 : 2) : 3);
7074 /* Generate the pseudo ops that System V.4 wants. */
7075 if (TARGET_ABICALLS)
7076 fprintf (asm_out_file, "\t.abicalls\n");
7078 if (flag_verbose_asm)
7079 fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
7081 mips_section_threshold, mips_arch_info->name, mips_isa);
7084 #ifdef BSS_SECTION_ASM_OP
7085 /* Implement ASM_OUTPUT_ALIGNED_BSS. This differs from the default only
7086 in the use of sbss. */
7089 mips_output_aligned_bss (FILE *stream, tree decl, const char *name,
7090 unsigned HOST_WIDE_INT size, int align)
7092 extern tree last_assemble_variable_decl;
7094 if (mips_in_small_data_p (decl))
7095 switch_to_section (get_named_section (NULL, ".sbss", 0));
7097 switch_to_section (bss_section);
7098 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
7099 last_assemble_variable_decl = decl;
7100 ASM_DECLARE_OBJECT_NAME (stream, name, decl);
7101 ASM_OUTPUT_SKIP (stream, size != 0 ? size : 1);
7105 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
7106 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
7109 mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
7110 unsigned HOST_WIDE_INT size,
7113 /* If the target wants uninitialized const declarations in
7114 .rdata then don't put them in .comm. */
7115 if (TARGET_EMBEDDED_DATA && TARGET_UNINIT_CONST_IN_RODATA
7116 && TREE_CODE (decl) == VAR_DECL && TREE_READONLY (decl)
7117 && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
7119 if (TREE_PUBLIC (decl) && DECL_NAME (decl))
7120 targetm.asm_out.globalize_label (stream, name);
7122 switch_to_section (readonly_data_section);
7123 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
7124 mips_declare_object (stream, name, "",
7125 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
7129 mips_declare_common_object (stream, name, "\n\t.comm\t",
7133 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
7134 NAME is the name of the object and ALIGN is the required alignment
7135 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
7136 alignment argument. */
7139 mips_declare_common_object (FILE *stream, const char *name,
7140 const char *init_string,
7141 unsigned HOST_WIDE_INT size,
7142 unsigned int align, bool takes_alignment_p)
7144 if (!takes_alignment_p)
7146 size += (align / BITS_PER_UNIT) - 1;
7147 size -= size % (align / BITS_PER_UNIT);
7148 mips_declare_object (stream, name, init_string,
7149 "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
7152 mips_declare_object (stream, name, init_string,
7153 "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
7154 size, align / BITS_PER_UNIT);
7157 /* Emit either a label, .comm, or .lcomm directive. When using assembler
7158 macros, mark the symbol as written so that mips_file_end won't emit an
7159 .extern for it. STREAM is the output file, NAME is the name of the
7160 symbol, INIT_STRING is the string that should be written before the
7161 symbol and FINAL_STRING is the string that should be written after it.
7162 FINAL_STRING is a printf() format that consumes the remaining arguments. */
7165 mips_declare_object (FILE *stream, const char *name, const char *init_string,
7166 const char *final_string, ...)
7170 fputs (init_string, stream);
7171 assemble_name (stream, name);
7172 va_start (ap, final_string);
7173 vfprintf (stream, final_string, ap);
7176 if (!TARGET_EXPLICIT_RELOCS)
7178 tree name_tree = get_identifier (name);
7179 TREE_ASM_WRITTEN (name_tree) = 1;
7183 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
7184 extern int size_directive_output;
7186 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
7187 definitions except that it uses mips_declare_object() to emit the label. */
7190 mips_declare_object_name (FILE *stream, const char *name,
7191 tree decl ATTRIBUTE_UNUSED)
7193 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
7194 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
7197 size_directive_output = 0;
7198 if (!flag_inhibit_size_directive && DECL_SIZE (decl))
7202 size_directive_output = 1;
7203 size = int_size_in_bytes (TREE_TYPE (decl));
7204 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
7207 mips_declare_object (stream, name, "", ":\n");
7210 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
7213 mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
7217 name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
7218 if (!flag_inhibit_size_directive
7219 && DECL_SIZE (decl) != 0
7220 && !at_end && top_level
7221 && DECL_INITIAL (decl) == error_mark_node
7222 && !size_directive_output)
7226 size_directive_output = 1;
7227 size = int_size_in_bytes (TREE_TYPE (decl));
7228 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
7233 /* Return true if X in context CONTEXT is a small data address that can
7234 be rewritten as a LO_SUM. */
7237 mips_rewrite_small_data_p (rtx x, enum mips_symbol_context context)
7239 enum mips_symbol_type symbol_type;
7241 return (TARGET_EXPLICIT_RELOCS
7242 && mips_symbolic_constant_p (x, context, &symbol_type)
7243 && symbol_type == SYMBOL_GP_RELATIVE);
7247 /* A for_each_rtx callback for mips_small_data_pattern_p. DATA is the
7248 containing MEM, or null if none. */
7251 mips_small_data_pattern_1 (rtx *loc, void *data)
7253 enum mips_symbol_context context;
7255 if (GET_CODE (*loc) == LO_SUM)
7260 if (for_each_rtx (&XEXP (*loc, 0), mips_small_data_pattern_1, *loc))
7265 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
7266 return mips_rewrite_small_data_p (*loc, context);
7269 /* Return true if OP refers to small data symbols directly, not through
7273 mips_small_data_pattern_p (rtx op)
7275 return for_each_rtx (&op, mips_small_data_pattern_1, 0);
7278 /* A for_each_rtx callback, used by mips_rewrite_small_data.
7279 DATA is the containing MEM, or null if none. */
7282 mips_rewrite_small_data_1 (rtx *loc, void *data)
7284 enum mips_symbol_context context;
7288 for_each_rtx (&XEXP (*loc, 0), mips_rewrite_small_data_1, *loc);
7292 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
7293 if (mips_rewrite_small_data_p (*loc, context))
7294 *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
7296 if (GET_CODE (*loc) == LO_SUM)
7302 /* If possible, rewrite OP so that it refers to small data using
7303 explicit relocations. */
7306 mips_rewrite_small_data (rtx op)
7308 op = copy_insn (op);
7309 for_each_rtx (&op, mips_rewrite_small_data_1, 0);
7313 /* Return true if the current function has an insn that implicitly
7317 mips_function_has_gp_insn (void)
7319 /* Don't bother rechecking if we found one last time. */
7320 if (!cfun->machine->has_gp_insn_p)
7324 push_topmost_sequence ();
7325 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7327 && GET_CODE (PATTERN (insn)) != USE
7328 && GET_CODE (PATTERN (insn)) != CLOBBER
7329 && (get_attr_got (insn) != GOT_UNSET
7330 || small_data_pattern (PATTERN (insn), VOIDmode)))
7332 pop_topmost_sequence ();
7334 cfun->machine->has_gp_insn_p = (insn != 0);
7336 return cfun->machine->has_gp_insn_p;
7340 /* Return the register that should be used as the global pointer
7341 within this function. Return 0 if the function doesn't need
7342 a global pointer. */
7345 mips_global_pointer (void)
7349 /* $gp is always available unless we're using a GOT. */
7350 if (!TARGET_USE_GOT)
7351 return GLOBAL_POINTER_REGNUM;
7353 /* We must always provide $gp when it is used implicitly. */
7354 if (!TARGET_EXPLICIT_RELOCS)
7355 return GLOBAL_POINTER_REGNUM;
7357 /* FUNCTION_PROFILER includes a jal macro, so we need to give it
7359 if (current_function_profile)
7360 return GLOBAL_POINTER_REGNUM;
7362 /* If the function has a nonlocal goto, $gp must hold the correct
7363 global pointer for the target function. */
7364 if (current_function_has_nonlocal_goto)
7365 return GLOBAL_POINTER_REGNUM;
7367 /* If the gp is never referenced, there's no need to initialize it.
7368 Note that reload can sometimes introduce constant pool references
7369 into a function that otherwise didn't need them. For example,
7370 suppose we have an instruction like:
7372 (set (reg:DF R1) (float:DF (reg:SI R2)))
7374 If R2 turns out to be constant such as 1, the instruction may have a
7375 REG_EQUAL note saying that R1 == 1.0. Reload then has the option of
7376 using this constant if R2 doesn't get allocated to a register.
7378 In cases like these, reload will have added the constant to the pool
7379 but no instruction will yet refer to it. */
7380 if (!df_regs_ever_live_p (GLOBAL_POINTER_REGNUM)
7381 && !current_function_uses_const_pool
7382 && !mips_function_has_gp_insn ())
7385 /* We need a global pointer, but perhaps we can use a call-clobbered
7386 register instead of $gp. */
7387 if (TARGET_CALL_SAVED_GP && current_function_is_leaf)
7388 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
7389 if (!df_regs_ever_live_p (regno)
7390 && call_really_used_regs[regno]
7391 && !fixed_regs[regno]
7392 && regno != PIC_FUNCTION_ADDR_REGNUM)
7395 return GLOBAL_POINTER_REGNUM;
7399 /* Return true if the function return value MODE will get returned in a
7400 floating-point register. */
7403 mips_return_mode_in_fpr_p (enum machine_mode mode)
7405 return ((GET_MODE_CLASS (mode) == MODE_FLOAT
7406 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
7407 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
7408 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_HWFPVALUE);
7411 /* Return a two-character string representing a function floating-point
7412 return mode, used to name MIPS16 function stubs. */
7415 mips16_call_stub_mode_suffix (enum machine_mode mode)
7419 else if (mode == DFmode)
7421 else if (mode == SCmode)
7423 else if (mode == DCmode)
7425 else if (mode == V2SFmode)
7431 /* Return true if the current function returns its value in a floating-point
7432 register in MIPS16 mode. */
7435 mips16_cfun_returns_in_fpr_p (void)
7437 tree return_type = DECL_RESULT (current_function_decl);
7438 return (TARGET_MIPS16
7439 && TARGET_HARD_FLOAT_ABI
7440 && !aggregate_value_p (return_type, current_function_decl)
7441 && mips_return_mode_in_fpr_p (DECL_MODE (return_type)));
7445 /* Return true if the current function must save REGNO. */
7448 mips_save_reg_p (unsigned int regno)
7450 /* We only need to save $gp if TARGET_CALL_SAVED_GP and only then
7451 if we have not chosen a call-clobbered substitute. */
7452 if (regno == GLOBAL_POINTER_REGNUM)
7453 return TARGET_CALL_SAVED_GP && cfun->machine->global_pointer == regno;
7455 /* Check call-saved registers. */
7456 if ((current_function_saves_all_registers || df_regs_ever_live_p (regno))
7457 && !call_really_used_regs[regno])
7460 /* Save both registers in an FPR pair if either one is used. This is
7461 needed for the case when MIN_FPRS_PER_FMT == 1, which allows the odd
7462 register to be used without the even register. */
7463 if (FP_REG_P (regno)
7464 && MAX_FPRS_PER_FMT == 2
7465 && df_regs_ever_live_p (regno + 1)
7466 && !call_really_used_regs[regno + 1])
7469 /* We need to save the old frame pointer before setting up a new one. */
7470 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
7473 /* Check for registers that must be saved for FUNCTION_PROFILER. */
7474 if (current_function_profile && MIPS_SAVE_REG_FOR_PROFILING_P (regno))
7477 /* We need to save the incoming return address if it is ever clobbered
7478 within the function, if __builtin_eh_return is being used to set a
7479 different return address, or if a stub is being used to return a
7481 if (regno == GP_REG_FIRST + 31
7482 && (df_regs_ever_live_p (regno)
7483 || current_function_calls_eh_return
7484 || mips16_cfun_returns_in_fpr_p ()))
7490 /* Return the index of the lowest X in the range [0, SIZE) for which
7491 bit REGS[X] is set in MASK. Return SIZE if there is no such X. */
7494 mips16e_find_first_register (unsigned int mask, const unsigned char *regs,
7499 for (i = 0; i < size; i++)
7500 if (BITSET_P (mask, regs[i]))
7506 /* *MASK_PTR is a mask of general purpose registers and *GP_REG_SIZE_PTR
7507 is the number of bytes that they occupy. If *MASK_PTR contains REGS[X]
7508 for some X in [0, SIZE), adjust *MASK_PTR and *GP_REG_SIZE_PTR so that
7509 the same is true for all indexes (X, SIZE). */
7512 mips16e_mask_registers (unsigned int *mask_ptr, const unsigned char *regs,
7513 unsigned int size, HOST_WIDE_INT *gp_reg_size_ptr)
7517 i = mips16e_find_first_register (*mask_ptr, regs, size);
7518 for (i++; i < size; i++)
7519 if (!BITSET_P (*mask_ptr, regs[i]))
7521 *gp_reg_size_ptr += GET_MODE_SIZE (gpr_mode);
7522 *mask_ptr |= 1 << regs[i];
7526 /* Return the bytes needed to compute the frame pointer from the current
7527 stack pointer. SIZE is the size (in bytes) of the local variables.
7529 MIPS stack frames look like:
7531 Before call After call
7532 high +-----------------------+ +-----------------------+
7534 | caller's temps. | | caller's temps. |
7536 +-----------------------+ +-----------------------+
7538 | arguments on stack. | | arguments on stack. |
7540 +-----------------------+ +-----------------------+
7541 | 4 words to save | | 4 words to save |
7542 | arguments passed | | arguments passed |
7543 | in registers, even | | in registers, even |
7544 | if not passed. | | if not passed. |
7545 SP->+-----------------------+ VFP->+-----------------------+
7546 (VFP = SP+fp_sp_offset) | |\
7547 | fp register save | | fp_reg_size
7549 SP+gp_sp_offset->+-----------------------+
7551 | | gp register save | | gp_reg_size
7552 gp_reg_rounded | | |/
7553 | +-----------------------+
7554 \| alignment padding |
7555 +-----------------------+
7557 | local variables | | var_size
7559 +-----------------------+
7561 | alloca allocations |
7563 +-----------------------+
7565 cprestore_size | | GP save for V.4 abi |
7567 +-----------------------+
7569 | arguments on stack | |
7571 +-----------------------+ |
7572 | 4 words to save | | args_size
7573 | arguments passed | |
7574 | in registers, even | |
7575 | if not passed. | |
7576 low | (TARGET_OLDABI only) |/
7577 memory SP->+-----------------------+
7582 compute_frame_size (HOST_WIDE_INT size)
7585 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
7586 HOST_WIDE_INT var_size; /* # bytes that variables take up */
7587 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
7588 HOST_WIDE_INT cprestore_size; /* # bytes that the cprestore slot takes up */
7589 HOST_WIDE_INT gp_reg_rounded; /* # bytes needed to store gp after rounding */
7590 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
7591 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
7592 unsigned int mask; /* mask of saved gp registers */
7593 unsigned int fmask; /* mask of saved fp registers */
7595 cfun->machine->global_pointer = mips_global_pointer ();
7601 var_size = MIPS_STACK_ALIGN (size);
7602 args_size = current_function_outgoing_args_size;
7603 cprestore_size = MIPS_STACK_ALIGN (STARTING_FRAME_OFFSET) - args_size;
7605 /* The space set aside by STARTING_FRAME_OFFSET isn't needed in leaf
7606 functions. If the function has local variables, we're committed
7607 to allocating it anyway. Otherwise reclaim it here. */
7608 if (var_size == 0 && current_function_is_leaf)
7609 cprestore_size = args_size = 0;
7611 /* The MIPS 3.0 linker does not like functions that dynamically
7612 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
7613 looks like we are trying to create a second frame pointer to the
7614 function, so allocate some stack space to make it happy. */
7616 if (args_size == 0 && current_function_calls_alloca)
7617 args_size = 4 * UNITS_PER_WORD;
7619 total_size = var_size + args_size + cprestore_size;
7621 /* Calculate space needed for gp registers. */
7622 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
7623 if (mips_save_reg_p (regno))
7625 gp_reg_size += GET_MODE_SIZE (gpr_mode);
7626 mask |= 1 << (regno - GP_REG_FIRST);
7629 /* We need to restore these for the handler. */
7630 if (current_function_calls_eh_return)
7635 regno = EH_RETURN_DATA_REGNO (i);
7636 if (regno == INVALID_REGNUM)
7638 gp_reg_size += GET_MODE_SIZE (gpr_mode);
7639 mask |= 1 << (regno - GP_REG_FIRST);
7643 /* The MIPS16e SAVE and RESTORE instructions have two ranges of registers:
7644 $a3-$a0 and $s2-$s8. If we save one register in the range, we must
7645 save all later registers too. */
7646 if (GENERATE_MIPS16E_SAVE_RESTORE)
7648 mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
7649 ARRAY_SIZE (mips16e_s2_s8_regs), &gp_reg_size);
7650 mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
7651 ARRAY_SIZE (mips16e_a0_a3_regs), &gp_reg_size);
7654 /* This loop must iterate over the same space as its companion in
7655 mips_for_each_saved_reg. */
7656 if (TARGET_HARD_FLOAT)
7657 for (regno = (FP_REG_LAST - MAX_FPRS_PER_FMT + 1);
7658 regno >= FP_REG_FIRST;
7659 regno -= MAX_FPRS_PER_FMT)
7660 if (mips_save_reg_p (regno))
7662 fp_reg_size += MAX_FPRS_PER_FMT * UNITS_PER_FPREG;
7663 fmask |= ((1 << MAX_FPRS_PER_FMT) - 1) << (regno - FP_REG_FIRST);
7666 gp_reg_rounded = MIPS_STACK_ALIGN (gp_reg_size);
7667 total_size += gp_reg_rounded + MIPS_STACK_ALIGN (fp_reg_size);
7669 /* Add in the space required for saving incoming register arguments. */
7670 total_size += current_function_pretend_args_size;
7671 total_size += MIPS_STACK_ALIGN (cfun->machine->varargs_size);
7673 /* Save other computed information. */
7674 cfun->machine->frame.total_size = total_size;
7675 cfun->machine->frame.var_size = var_size;
7676 cfun->machine->frame.args_size = args_size;
7677 cfun->machine->frame.cprestore_size = cprestore_size;
7678 cfun->machine->frame.gp_reg_size = gp_reg_size;
7679 cfun->machine->frame.fp_reg_size = fp_reg_size;
7680 cfun->machine->frame.mask = mask;
7681 cfun->machine->frame.fmask = fmask;
7682 cfun->machine->frame.initialized = reload_completed;
7683 cfun->machine->frame.num_gp = gp_reg_size / UNITS_PER_WORD;
7684 cfun->machine->frame.num_fp = (fp_reg_size
7685 / (MAX_FPRS_PER_FMT * UNITS_PER_FPREG));
7689 HOST_WIDE_INT offset;
7691 if (GENERATE_MIPS16E_SAVE_RESTORE)
7692 /* MIPS16e SAVE and RESTORE instructions require the GP save area
7693 to be aligned at the high end with any padding at the low end.
7694 It is only safe to use this calculation for o32, where we never
7695 have pretend arguments, and where any varargs will be saved in
7696 the caller-allocated area rather than at the top of the frame. */
7697 offset = (total_size - GET_MODE_SIZE (gpr_mode));
7699 offset = (args_size + cprestore_size + var_size
7700 + gp_reg_size - GET_MODE_SIZE (gpr_mode));
7701 cfun->machine->frame.gp_sp_offset = offset;
7702 cfun->machine->frame.gp_save_offset = offset - total_size;
7706 cfun->machine->frame.gp_sp_offset = 0;
7707 cfun->machine->frame.gp_save_offset = 0;
7712 HOST_WIDE_INT offset;
7714 offset = (args_size + cprestore_size + var_size
7715 + gp_reg_rounded + fp_reg_size
7716 - MAX_FPRS_PER_FMT * UNITS_PER_FPREG);
7717 cfun->machine->frame.fp_sp_offset = offset;
7718 cfun->machine->frame.fp_save_offset = offset - total_size;
7722 cfun->machine->frame.fp_sp_offset = 0;
7723 cfun->machine->frame.fp_save_offset = 0;
7726 /* Ok, we're done. */
7730 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame
7731 pointer or argument pointer. TO is either the stack pointer or
7732 hard frame pointer. */
7735 mips_initial_elimination_offset (int from, int to)
7737 HOST_WIDE_INT offset;
7739 compute_frame_size (get_frame_size ());
7741 /* Set OFFSET to the offset from the stack pointer. */
7744 case FRAME_POINTER_REGNUM:
7748 case ARG_POINTER_REGNUM:
7749 offset = (cfun->machine->frame.total_size
7750 - current_function_pretend_args_size);
7757 if (TARGET_MIPS16 && to == HARD_FRAME_POINTER_REGNUM)
7758 offset -= cfun->machine->frame.args_size;
7763 /* Implement RETURN_ADDR_RTX. Note, we do not support moving
7764 back to a previous frame. */
7766 mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
7771 return get_hard_reg_initial_val (Pmode, GP_REG_FIRST + 31);
7774 /* Use FN to save or restore register REGNO. MODE is the register's
7775 mode and OFFSET is the offset of its save slot from the current
7779 mips_save_restore_reg (enum machine_mode mode, int regno,
7780 HOST_WIDE_INT offset, mips_save_restore_fn fn)
7784 mem = gen_frame_mem (mode, plus_constant (stack_pointer_rtx, offset));
7786 fn (gen_rtx_REG (mode, regno), mem);
7790 /* Call FN for each register that is saved by the current function.
7791 SP_OFFSET is the offset of the current stack pointer from the start
7795 mips_for_each_saved_reg (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
7797 enum machine_mode fpr_mode;
7798 HOST_WIDE_INT offset;
7801 /* Save registers starting from high to low. The debuggers prefer at least
7802 the return register be stored at func+4, and also it allows us not to
7803 need a nop in the epilogue if at least one register is reloaded in
7804 addition to return address. */
7805 offset = cfun->machine->frame.gp_sp_offset - sp_offset;
7806 for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
7807 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
7809 mips_save_restore_reg (gpr_mode, regno, offset, fn);
7810 offset -= GET_MODE_SIZE (gpr_mode);
7813 /* This loop must iterate over the same space as its companion in
7814 compute_frame_size. */
7815 offset = cfun->machine->frame.fp_sp_offset - sp_offset;
7816 fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
7817 for (regno = (FP_REG_LAST - MAX_FPRS_PER_FMT + 1);
7818 regno >= FP_REG_FIRST;
7819 regno -= MAX_FPRS_PER_FMT)
7820 if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
7822 mips_save_restore_reg (fpr_mode, regno, offset, fn);
7823 offset -= GET_MODE_SIZE (fpr_mode);
7827 /* If we're generating n32 or n64 abicalls, and the current function
7828 does not use $28 as its global pointer, emit a cplocal directive.
7829 Use pic_offset_table_rtx as the argument to the directive. */
7832 mips_output_cplocal (void)
7834 if (!TARGET_EXPLICIT_RELOCS
7835 && cfun->machine->global_pointer > 0
7836 && cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
7837 output_asm_insn (".cplocal %+", 0);
7840 /* Return the style of GP load sequence that is being used for the
7841 current function. */
7843 enum mips_loadgp_style
7844 mips_current_loadgp_style (void)
7846 if (!TARGET_USE_GOT || cfun->machine->global_pointer == 0)
7852 if (TARGET_ABSOLUTE_ABICALLS)
7853 return LOADGP_ABSOLUTE;
7855 return TARGET_NEWABI ? LOADGP_NEWABI : LOADGP_OLDABI;
7858 /* The __gnu_local_gp symbol. */
7860 static GTY(()) rtx mips_gnu_local_gp;
7862 /* If we're generating n32 or n64 abicalls, emit instructions
7863 to set up the global pointer. */
7866 mips_emit_loadgp (void)
7868 rtx addr, offset, incoming_address, base, index;
7870 switch (mips_current_loadgp_style ())
7872 case LOADGP_ABSOLUTE:
7873 if (mips_gnu_local_gp == NULL)
7875 mips_gnu_local_gp = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp");
7876 SYMBOL_REF_FLAGS (mips_gnu_local_gp) |= SYMBOL_FLAG_LOCAL;
7878 emit_insn (gen_loadgp_absolute (mips_gnu_local_gp));
7882 addr = XEXP (DECL_RTL (current_function_decl), 0);
7883 offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
7884 incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
7885 emit_insn (gen_loadgp_newabi (offset, incoming_address));
7886 if (!TARGET_EXPLICIT_RELOCS)
7887 emit_insn (gen_loadgp_blockage ());
7891 base = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_BASE));
7892 index = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_INDEX));
7893 emit_insn (gen_loadgp_rtp (base, index));
7894 if (!TARGET_EXPLICIT_RELOCS)
7895 emit_insn (gen_loadgp_blockage ());
7903 /* Set up the stack and frame (if desired) for the function. */
7906 mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
7909 HOST_WIDE_INT tsize = cfun->machine->frame.total_size;
7911 #ifdef SDB_DEBUGGING_INFO
7912 if (debug_info_level != DINFO_LEVEL_TERSE && write_symbols == SDB_DEBUG)
7913 SDB_OUTPUT_SOURCE_LINE (file, DECL_SOURCE_LINE (current_function_decl));
7916 /* In mips16 mode, we may need to generate a 32 bit to handle
7917 floating point arguments. The linker will arrange for any 32-bit
7918 functions to call this stub, which will then jump to the 16-bit
7921 && TARGET_HARD_FLOAT_ABI
7922 && current_function_args_info.fp_code != 0)
7923 build_mips16_function_stub (file);
7925 /* Select the mips16 mode for this function. */
7927 fprintf (file, "\t.set\tmips16\n");
7929 fprintf (file, "\t.set\tnomips16\n");
7931 if (!FUNCTION_NAME_ALREADY_DECLARED)
7933 /* Get the function name the same way that toplev.c does before calling
7934 assemble_start_function. This is needed so that the name used here
7935 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
7936 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
7938 if (!flag_inhibit_size_directive)
7940 fputs ("\t.ent\t", file);
7941 assemble_name (file, fnname);
7945 assemble_name (file, fnname);
7946 fputs (":\n", file);
7949 /* Stop mips_file_end from treating this function as external. */
7950 if (TARGET_IRIX && mips_abi == ABI_32)
7951 TREE_ASM_WRITTEN (DECL_NAME (cfun->decl)) = 1;
7953 if (!flag_inhibit_size_directive)
7955 /* .frame FRAMEREG, FRAMESIZE, RETREG */
7957 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
7958 "# vars= " HOST_WIDE_INT_PRINT_DEC ", regs= %d/%d"
7959 ", args= " HOST_WIDE_INT_PRINT_DEC
7960 ", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
7961 (reg_names[(frame_pointer_needed)
7962 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM]),
7963 ((frame_pointer_needed && TARGET_MIPS16)
7964 ? tsize - cfun->machine->frame.args_size
7966 reg_names[GP_REG_FIRST + 31],
7967 cfun->machine->frame.var_size,
7968 cfun->machine->frame.num_gp,
7969 cfun->machine->frame.num_fp,
7970 cfun->machine->frame.args_size,
7971 cfun->machine->frame.cprestore_size);
7973 /* .mask MASK, GPOFFSET; .fmask FPOFFSET */
7974 fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
7975 cfun->machine->frame.mask,
7976 cfun->machine->frame.gp_save_offset);
7977 fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
7978 cfun->machine->frame.fmask,
7979 cfun->machine->frame.fp_save_offset);
7982 OLD_SP == *FRAMEREG + FRAMESIZE => can find old_sp from nominated FP reg.
7983 HIGHEST_GP_SAVED == *FRAMEREG + FRAMESIZE + GPOFFSET => can find saved regs. */
7986 if (mips_current_loadgp_style () == LOADGP_OLDABI)
7988 /* Handle the initialization of $gp for SVR4 PIC. */
7989 if (!cfun->machine->all_noreorder_p)
7990 output_asm_insn ("%(.cpload\t%^%)", 0);
7992 output_asm_insn ("%(.cpload\t%^\n\t%<", 0);
7994 else if (cfun->machine->all_noreorder_p)
7995 output_asm_insn ("%(%<", 0);
7997 /* Tell the assembler which register we're using as the global
7998 pointer. This is needed for thunks, since they can use either
7999 explicit relocs or assembler macros. */
8000 mips_output_cplocal ();
8003 /* Make the last instruction frame related and note that it performs
8004 the operation described by FRAME_PATTERN. */
8007 mips_set_frame_expr (rtx frame_pattern)
8011 insn = get_last_insn ();
8012 RTX_FRAME_RELATED_P (insn) = 1;
8013 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
8019 /* Return a frame-related rtx that stores REG at MEM.
8020 REG must be a single register. */
8023 mips_frame_set (rtx mem, rtx reg)
8027 /* If we're saving the return address register and the dwarf return
8028 address column differs from the hard register number, adjust the
8029 note reg to refer to the former. */
8030 if (REGNO (reg) == GP_REG_FIRST + 31
8031 && DWARF_FRAME_RETURN_COLUMN != GP_REG_FIRST + 31)
8032 reg = gen_rtx_REG (GET_MODE (reg), DWARF_FRAME_RETURN_COLUMN);
8034 set = gen_rtx_SET (VOIDmode, mem, reg);
8035 RTX_FRAME_RELATED_P (set) = 1;
8041 /* Save register REG to MEM. Make the instruction frame-related. */
8044 mips_save_reg (rtx reg, rtx mem)
8046 if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
8050 if (mips_split_64bit_move_p (mem, reg))
8051 mips_split_64bit_move (mem, reg);
8053 mips_emit_move (mem, reg);
8055 x1 = mips_frame_set (mips_subword (mem, 0), mips_subword (reg, 0));
8056 x2 = mips_frame_set (mips_subword (mem, 1), mips_subword (reg, 1));
8057 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
8062 && REGNO (reg) != GP_REG_FIRST + 31
8063 && !M16_REG_P (REGNO (reg)))
8065 /* Save a non-mips16 register by moving it through a temporary.
8066 We don't need to do this for $31 since there's a special
8067 instruction for it. */
8068 mips_emit_move (MIPS_PROLOGUE_TEMP (GET_MODE (reg)), reg);
8069 mips_emit_move (mem, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
8072 mips_emit_move (mem, reg);
8074 mips_set_frame_expr (mips_frame_set (mem, reg));
8078 /* Return a move between register REGNO and memory location SP + OFFSET.
8079 Make the move a load if RESTORE_P, otherwise make it a frame-related
8083 mips16e_save_restore_reg (bool restore_p, HOST_WIDE_INT offset,
8088 mem = gen_frame_mem (SImode, plus_constant (stack_pointer_rtx, offset));
8089 reg = gen_rtx_REG (SImode, regno);
8091 ? gen_rtx_SET (VOIDmode, reg, mem)
8092 : mips_frame_set (mem, reg));
8095 /* Return RTL for a MIPS16e SAVE or RESTORE instruction; RESTORE_P says which.
8096 The instruction must:
8098 - Allocate or deallocate SIZE bytes in total; SIZE is known
8101 - Save or restore as many registers in *MASK_PTR as possible.
8102 The instruction saves the first registers at the top of the
8103 allocated area, with the other registers below it.
8105 - Save NARGS argument registers above the allocated area.
8107 (NARGS is always zero if RESTORE_P.)
8109 The SAVE and RESTORE instructions cannot save and restore all general
8110 registers, so there may be some registers left over for the caller to
8111 handle. Destructively modify *MASK_PTR so that it contains the registers
8112 that still need to be saved or restored. The caller can save these
8113 registers in the memory immediately below *OFFSET_PTR, which is a
8114 byte offset from the bottom of the allocated stack area. */
8117 mips16e_build_save_restore (bool restore_p, unsigned int *mask_ptr,
8118 HOST_WIDE_INT *offset_ptr, unsigned int nargs,
8122 HOST_WIDE_INT offset, top_offset;
8123 unsigned int i, regno;
8126 gcc_assert (cfun->machine->frame.fp_reg_size == 0);
8128 /* Calculate the number of elements in the PARALLEL. We need one element
8129 for the stack adjustment, one for each argument register save, and one
8130 for each additional register move. */
8132 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
8133 if (BITSET_P (*mask_ptr, mips16e_save_restore_regs[i]))
8136 /* Create the final PARALLEL. */
8137 pattern = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (n));
8140 /* Add the stack pointer adjustment. */
8141 set = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8142 plus_constant (stack_pointer_rtx,
8143 restore_p ? size : -size));
8144 RTX_FRAME_RELATED_P (set) = 1;
8145 XVECEXP (pattern, 0, n++) = set;
8147 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
8148 top_offset = restore_p ? size : 0;
8150 /* Save the arguments. */
8151 for (i = 0; i < nargs; i++)
8153 offset = top_offset + i * GET_MODE_SIZE (gpr_mode);
8154 set = mips16e_save_restore_reg (restore_p, offset, GP_ARG_FIRST + i);
8155 XVECEXP (pattern, 0, n++) = set;
8158 /* Then fill in the other register moves. */
8159 offset = top_offset;
8160 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
8162 regno = mips16e_save_restore_regs[i];
8163 if (BITSET_P (*mask_ptr, regno))
8165 offset -= UNITS_PER_WORD;
8166 set = mips16e_save_restore_reg (restore_p, offset, regno);
8167 XVECEXP (pattern, 0, n++) = set;
8168 *mask_ptr &= ~(1 << regno);
8172 /* Tell the caller what offset it should use for the remaining registers. */
8173 *offset_ptr = size + (offset - top_offset) + size;
8175 gcc_assert (n == XVECLEN (pattern, 0));
8180 /* PATTERN is a PARALLEL whose first element adds ADJUST to the stack
8181 pointer. Return true if PATTERN matches the kind of instruction
8182 generated by mips16e_build_save_restore. If INFO is nonnull,
8183 initialize it when returning true. */
8186 mips16e_save_restore_pattern_p (rtx pattern, HOST_WIDE_INT adjust,
8187 struct mips16e_save_restore_info *info)
8189 unsigned int i, nargs, mask;
8190 HOST_WIDE_INT top_offset, save_offset, offset, extra;
8191 rtx set, reg, mem, base;
8194 if (!GENERATE_MIPS16E_SAVE_RESTORE)
8197 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
8198 top_offset = adjust > 0 ? adjust : 0;
8200 /* Interpret all other members of the PARALLEL. */
8201 save_offset = top_offset - GET_MODE_SIZE (gpr_mode);
8205 for (n = 1; n < XVECLEN (pattern, 0); n++)
8207 /* Check that we have a SET. */
8208 set = XVECEXP (pattern, 0, n);
8209 if (GET_CODE (set) != SET)
8212 /* Check that the SET is a load (if restoring) or a store
8214 mem = adjust > 0 ? SET_SRC (set) : SET_DEST (set);
8218 /* Check that the address is the sum of the stack pointer and a
8219 possibly-zero constant offset. */
8220 mips_split_plus (XEXP (mem, 0), &base, &offset);
8221 if (base != stack_pointer_rtx)
8224 /* Check that SET's other operand is a register. */
8225 reg = adjust > 0 ? SET_DEST (set) : SET_SRC (set);
8229 /* Check for argument saves. */
8230 if (offset == top_offset + nargs * GET_MODE_SIZE (gpr_mode)
8231 && REGNO (reg) == GP_ARG_FIRST + nargs)
8233 else if (offset == save_offset)
8235 while (mips16e_save_restore_regs[i++] != REGNO (reg))
8236 if (i == ARRAY_SIZE (mips16e_save_restore_regs))
8239 mask |= 1 << REGNO (reg);
8240 save_offset -= GET_MODE_SIZE (gpr_mode);
8246 /* Check that the restrictions on register ranges are met. */
8248 mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
8249 ARRAY_SIZE (mips16e_s2_s8_regs), &extra);
8250 mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
8251 ARRAY_SIZE (mips16e_a0_a3_regs), &extra);
8255 /* Make sure that the topmost argument register is not saved twice.
8256 The checks above ensure that the same is then true for the other
8257 argument registers. */
8258 if (nargs > 0 && BITSET_P (mask, GP_ARG_FIRST + nargs - 1))
8261 /* Pass back information, if requested. */
8264 info->nargs = nargs;
8266 info->size = (adjust > 0 ? adjust : -adjust);
8272 /* Add a MIPS16e SAVE or RESTORE register-range argument to string S
8273 for the register range [MIN_REG, MAX_REG]. Return a pointer to
8274 the null terminator. */
8277 mips16e_add_register_range (char *s, unsigned int min_reg,
8278 unsigned int max_reg)
8280 if (min_reg != max_reg)
8281 s += sprintf (s, ",%s-%s", reg_names[min_reg], reg_names[max_reg]);
8283 s += sprintf (s, ",%s", reg_names[min_reg]);
8287 /* Return the assembly instruction for a MIPS16e SAVE or RESTORE instruction.
8288 PATTERN and ADJUST are as for mips16e_save_restore_pattern_p. */
8291 mips16e_output_save_restore (rtx pattern, HOST_WIDE_INT adjust)
8293 static char buffer[300];
8295 struct mips16e_save_restore_info info;
8296 unsigned int i, end;
8299 /* Parse the pattern. */
8300 if (!mips16e_save_restore_pattern_p (pattern, adjust, &info))
8303 /* Add the mnemonic. */
8304 s = strcpy (buffer, adjust > 0 ? "restore\t" : "save\t");
8307 /* Save the arguments. */
8309 s += sprintf (s, "%s-%s,", reg_names[GP_ARG_FIRST],
8310 reg_names[GP_ARG_FIRST + info.nargs - 1]);
8311 else if (info.nargs == 1)
8312 s += sprintf (s, "%s,", reg_names[GP_ARG_FIRST]);
8314 /* Emit the amount of stack space to allocate or deallocate. */
8315 s += sprintf (s, "%d", (int) info.size);
8317 /* Save or restore $16. */
8318 if (BITSET_P (info.mask, 16))
8319 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 16]);
8321 /* Save or restore $17. */
8322 if (BITSET_P (info.mask, 17))
8323 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 17]);
8325 /* Save or restore registers in the range $s2...$s8, which
8326 mips16e_s2_s8_regs lists in decreasing order. Note that this
8327 is a software register range; the hardware registers are not
8328 numbered consecutively. */
8329 end = ARRAY_SIZE (mips16e_s2_s8_regs);
8330 i = mips16e_find_first_register (info.mask, mips16e_s2_s8_regs, end);
8332 s = mips16e_add_register_range (s, mips16e_s2_s8_regs[end - 1],
8333 mips16e_s2_s8_regs[i]);
8335 /* Save or restore registers in the range $a0...$a3. */
8336 end = ARRAY_SIZE (mips16e_a0_a3_regs);
8337 i = mips16e_find_first_register (info.mask, mips16e_a0_a3_regs, end);
8339 s = mips16e_add_register_range (s, mips16e_a0_a3_regs[i],
8340 mips16e_a0_a3_regs[end - 1]);
8342 /* Save or restore $31. */
8343 if (BITSET_P (info.mask, 31))
8344 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 31]);
8349 /* Return a simplified form of X using the register values in REG_VALUES.
8350 REG_VALUES[R] is the last value assigned to hard register R, or null
8351 if R has not been modified.
8353 This function is rather limited, but is good enough for our purposes. */
8356 mips16e_collect_propagate_value (rtx x, rtx *reg_values)
8360 x = avoid_constant_pool_reference (x);
8364 x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
8365 return simplify_gen_unary (GET_CODE (x), GET_MODE (x),
8366 x0, GET_MODE (XEXP (x, 0)));
8369 if (ARITHMETIC_P (x))
8371 x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
8372 x1 = mips16e_collect_propagate_value (XEXP (x, 1), reg_values);
8373 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), x0, x1);
8377 && reg_values[REGNO (x)]
8378 && !rtx_unstable_p (reg_values[REGNO (x)]))
8379 return reg_values[REGNO (x)];
8384 /* Return true if (set DEST SRC) stores an argument register into its
8385 caller-allocated save slot, storing the number of that argument
8386 register in *REGNO_PTR if so. REG_VALUES is as for
8387 mips16e_collect_propagate_value. */
8390 mips16e_collect_argument_save_p (rtx dest, rtx src, rtx *reg_values,
8391 unsigned int *regno_ptr)
8393 unsigned int argno, regno;
8394 HOST_WIDE_INT offset, required_offset;
8397 /* Check that this is a word-mode store. */
8398 if (!MEM_P (dest) || !REG_P (src) || GET_MODE (dest) != word_mode)
8401 /* Check that the register being saved is an unmodified argument
8403 regno = REGNO (src);
8404 if (regno < GP_ARG_FIRST || regno > GP_ARG_LAST || reg_values[regno])
8406 argno = regno - GP_ARG_FIRST;
8408 /* Check whether the address is an appropriate stack pointer or
8409 frame pointer access. The frame pointer is offset from the
8410 stack pointer by the size of the outgoing arguments. */
8411 addr = mips16e_collect_propagate_value (XEXP (dest, 0), reg_values);
8412 mips_split_plus (addr, &base, &offset);
8413 required_offset = cfun->machine->frame.total_size + argno * UNITS_PER_WORD;
8414 if (base == hard_frame_pointer_rtx)
8415 required_offset -= cfun->machine->frame.args_size;
8416 else if (base != stack_pointer_rtx)
8418 if (offset != required_offset)
8425 /* A subroutine of mips_expand_prologue, called only when generating
8426 MIPS16e SAVE instructions. Search the start of the function for any
8427 instructions that save argument registers into their caller-allocated
8428 save slots. Delete such instructions and return a value N such that
8429 saving [GP_ARG_FIRST, GP_ARG_FIRST + N) would make all the deleted
8430 instructions redundant. */
8433 mips16e_collect_argument_saves (void)
8435 rtx reg_values[FIRST_PSEUDO_REGISTER];
8436 rtx insn, next, set, dest, src;
8437 unsigned int nargs, regno;
8439 push_topmost_sequence ();
8441 memset (reg_values, 0, sizeof (reg_values));
8442 for (insn = get_insns (); insn; insn = next)
8444 next = NEXT_INSN (insn);
8451 set = PATTERN (insn);
8452 if (GET_CODE (set) != SET)
8455 dest = SET_DEST (set);
8456 src = SET_SRC (set);
8457 if (mips16e_collect_argument_save_p (dest, src, reg_values, ®no))
8459 if (!BITSET_P (cfun->machine->frame.mask, regno))
8462 nargs = MAX (nargs, (regno - GP_ARG_FIRST) + 1);
8465 else if (REG_P (dest) && GET_MODE (dest) == word_mode)
8466 reg_values[REGNO (dest)]
8467 = mips16e_collect_propagate_value (src, reg_values);
8471 pop_topmost_sequence ();
8476 /* Expand the prologue into a bunch of separate insns. */
8479 mips_expand_prologue (void)
8485 if (cfun->machine->global_pointer > 0)
8486 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
8488 size = compute_frame_size (get_frame_size ());
8490 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
8491 bytes beforehand; this is enough to cover the register save area
8492 without going out of range. */
8493 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
8495 HOST_WIDE_INT step1;
8497 step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
8499 if (GENERATE_MIPS16E_SAVE_RESTORE)
8501 HOST_WIDE_INT offset;
8502 unsigned int mask, regno;
8504 /* Try to merge argument stores into the save instruction. */
8505 nargs = mips16e_collect_argument_saves ();
8507 /* Build the save instruction. */
8508 mask = cfun->machine->frame.mask;
8509 insn = mips16e_build_save_restore (false, &mask, &offset,
8511 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
8514 /* Check if we need to save other registers. */
8515 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
8516 if (BITSET_P (mask, regno - GP_REG_FIRST))
8518 offset -= GET_MODE_SIZE (gpr_mode);
8519 mips_save_restore_reg (gpr_mode, regno, offset, mips_save_reg);
8524 insn = gen_add3_insn (stack_pointer_rtx,
8527 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
8529 mips_for_each_saved_reg (size, mips_save_reg);
8533 /* Allocate the rest of the frame. */
8536 if (SMALL_OPERAND (-size))
8537 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
8539 GEN_INT (-size)))) = 1;
8542 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
8545 /* There are no instructions to add or subtract registers
8546 from the stack pointer, so use the frame pointer as a
8547 temporary. We should always be using a frame pointer
8548 in this case anyway. */
8549 gcc_assert (frame_pointer_needed);
8550 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
8551 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
8552 hard_frame_pointer_rtx,
8553 MIPS_PROLOGUE_TEMP (Pmode)));
8554 mips_emit_move (stack_pointer_rtx, hard_frame_pointer_rtx);
8557 emit_insn (gen_sub3_insn (stack_pointer_rtx,
8559 MIPS_PROLOGUE_TEMP (Pmode)));
8561 /* Describe the combined effect of the previous instructions. */
8563 (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8564 plus_constant (stack_pointer_rtx, -size)));
8568 /* Set up the frame pointer, if we're using one. In mips16 code,
8569 we point the frame pointer ahead of the outgoing argument area.
8570 This should allow more variables & incoming arguments to be
8571 accessed with unextended instructions. */
8572 if (frame_pointer_needed)
8574 if (TARGET_MIPS16 && cfun->machine->frame.args_size != 0)
8576 rtx offset = GEN_INT (cfun->machine->frame.args_size);
8577 if (SMALL_OPERAND (cfun->machine->frame.args_size))
8579 (emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
8584 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), offset);
8585 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
8586 emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
8587 hard_frame_pointer_rtx,
8588 MIPS_PROLOGUE_TEMP (Pmode)));
8590 (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
8591 plus_constant (stack_pointer_rtx,
8592 cfun->machine->frame.args_size)));
8596 RTX_FRAME_RELATED_P (mips_emit_move (hard_frame_pointer_rtx,
8597 stack_pointer_rtx)) = 1;
8600 mips_emit_loadgp ();
8602 /* If generating o32/o64 abicalls, save $gp on the stack. */
8603 if (TARGET_ABICALLS && TARGET_OLDABI && !current_function_is_leaf)
8604 emit_insn (gen_cprestore (GEN_INT (current_function_outgoing_args_size)));
8606 /* If we are profiling, make sure no instructions are scheduled before
8607 the call to mcount. */
8609 if (current_function_profile)
8610 emit_insn (gen_blockage ());
8613 /* Do any necessary cleanup after a function to restore stack, frame,
8616 #define RA_MASK BITMASK_HIGH /* 1 << 31 */
8619 mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
8620 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
8622 /* Reinstate the normal $gp. */
8623 SET_REGNO (pic_offset_table_rtx, GLOBAL_POINTER_REGNUM);
8624 mips_output_cplocal ();
8626 if (cfun->machine->all_noreorder_p)
8628 /* Avoid using %>%) since it adds excess whitespace. */
8629 output_asm_insn (".set\tmacro", 0);
8630 output_asm_insn (".set\treorder", 0);
8631 set_noreorder = set_nomacro = 0;
8634 if (!FUNCTION_NAME_ALREADY_DECLARED && !flag_inhibit_size_directive)
8638 /* Get the function name the same way that toplev.c does before calling
8639 assemble_start_function. This is needed so that the name used here
8640 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
8641 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
8642 fputs ("\t.end\t", file);
8643 assemble_name (file, fnname);
8648 /* Emit instructions to restore register REG from slot MEM. */
8651 mips_restore_reg (rtx reg, rtx mem)
8653 /* There's no mips16 instruction to load $31 directly. Load into
8654 $7 instead and adjust the return insn appropriately. */
8655 if (TARGET_MIPS16 && REGNO (reg) == GP_REG_FIRST + 31)
8656 reg = gen_rtx_REG (GET_MODE (reg), 7);
8658 if (TARGET_MIPS16 && !M16_REG_P (REGNO (reg)))
8660 /* Can't restore directly; move through a temporary. */
8661 mips_emit_move (MIPS_EPILOGUE_TEMP (GET_MODE (reg)), mem);
8662 mips_emit_move (reg, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
8665 mips_emit_move (reg, mem);
8669 /* Expand the epilogue into a bunch of separate insns. SIBCALL_P is true
8670 if this epilogue precedes a sibling call, false if it is for a normal
8671 "epilogue" pattern. */
8674 mips_expand_epilogue (int sibcall_p)
8676 HOST_WIDE_INT step1, step2;
8679 if (!sibcall_p && mips_can_use_return_insn ())
8681 emit_jump_insn (gen_return ());
8685 /* In mips16 mode, if the return value should go into a floating-point
8686 register, we need to call a helper routine to copy it over. */
8687 if (mips16_cfun_returns_in_fpr_p ())
8696 enum machine_mode return_mode;
8698 return_type = DECL_RESULT (current_function_decl);
8699 return_mode = DECL_MODE (return_type);
8701 name = ACONCAT (("__mips16_ret_",
8702 mips16_call_stub_mode_suffix (return_mode),
8704 id = get_identifier (name);
8705 func = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
8706 retval = gen_rtx_REG (return_mode, GP_RETURN);
8707 call = gen_call_value_internal (retval, func, const0_rtx);
8708 insn = emit_call_insn (call);
8709 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), retval);
8712 /* Split the frame into two. STEP1 is the amount of stack we should
8713 deallocate before restoring the registers. STEP2 is the amount we
8714 should deallocate afterwards.
8716 Start off by assuming that no registers need to be restored. */
8717 step1 = cfun->machine->frame.total_size;
8720 /* Work out which register holds the frame address. Account for the
8721 frame pointer offset used by mips16 code. */
8722 if (!frame_pointer_needed)
8723 base = stack_pointer_rtx;
8726 base = hard_frame_pointer_rtx;
8728 step1 -= cfun->machine->frame.args_size;
8731 /* If we need to restore registers, deallocate as much stack as
8732 possible in the second step without going out of range. */
8733 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
8735 step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
8739 /* Set TARGET to BASE + STEP1. */
8745 /* Get an rtx for STEP1 that we can add to BASE. */
8746 adjust = GEN_INT (step1);
8747 if (!SMALL_OPERAND (step1))
8749 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), adjust);
8750 adjust = MIPS_EPILOGUE_TEMP (Pmode);
8753 /* Normal mode code can copy the result straight into $sp. */
8755 target = stack_pointer_rtx;
8757 emit_insn (gen_add3_insn (target, base, adjust));
8760 /* Copy TARGET into the stack pointer. */
8761 if (target != stack_pointer_rtx)
8762 mips_emit_move (stack_pointer_rtx, target);
8764 /* If we're using addressing macros, $gp is implicitly used by all
8765 SYMBOL_REFs. We must emit a blockage insn before restoring $gp
8767 if (TARGET_CALL_SAVED_GP && !TARGET_EXPLICIT_RELOCS)
8768 emit_insn (gen_blockage ());
8770 if (GENERATE_MIPS16E_SAVE_RESTORE && cfun->machine->frame.mask != 0)
8772 unsigned int regno, mask;
8773 HOST_WIDE_INT offset;
8776 /* Generate the restore instruction. */
8777 mask = cfun->machine->frame.mask;
8778 restore = mips16e_build_save_restore (true, &mask, &offset, 0, step2);
8780 /* Restore any other registers manually. */
8781 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
8782 if (BITSET_P (mask, regno - GP_REG_FIRST))
8784 offset -= GET_MODE_SIZE (gpr_mode);
8785 mips_save_restore_reg (gpr_mode, regno, offset, mips_restore_reg);
8788 /* Restore the remaining registers and deallocate the final bit
8790 emit_insn (restore);
8794 /* Restore the registers. */
8795 mips_for_each_saved_reg (cfun->machine->frame.total_size - step2,
8798 /* Deallocate the final bit of the frame. */
8800 emit_insn (gen_add3_insn (stack_pointer_rtx,
8805 /* Add in the __builtin_eh_return stack adjustment. We need to
8806 use a temporary in mips16 code. */
8807 if (current_function_calls_eh_return)
8811 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
8812 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
8813 MIPS_EPILOGUE_TEMP (Pmode),
8814 EH_RETURN_STACKADJ_RTX));
8815 mips_emit_move (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
8818 emit_insn (gen_add3_insn (stack_pointer_rtx,
8820 EH_RETURN_STACKADJ_RTX));
8825 /* When generating MIPS16 code, the normal mips_for_each_saved_reg
8826 path will restore the return address into $7 rather than $31. */
8828 && !GENERATE_MIPS16E_SAVE_RESTORE
8829 && (cfun->machine->frame.mask & RA_MASK) != 0)
8830 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
8831 GP_REG_FIRST + 7)));
8833 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
8834 GP_REG_FIRST + 31)));
8838 /* Return nonzero if this function is known to have a null epilogue.
8839 This allows the optimizer to omit jumps to jumps if no stack
8843 mips_can_use_return_insn (void)
8845 if (! reload_completed)
8848 if (df_regs_ever_live_p (31) || current_function_profile)
8851 /* In mips16 mode, a function that returns a floating point value
8852 needs to arrange to copy the return value into the floating point
8854 if (mips16_cfun_returns_in_fpr_p ())
8857 if (cfun->machine->frame.initialized)
8858 return cfun->machine->frame.total_size == 0;
8860 return compute_frame_size (get_frame_size ()) == 0;
8863 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
8864 in order to avoid duplicating too much logic from elsewhere. */
8867 mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8868 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8871 rtx this, temp1, temp2, insn, fnaddr;
8874 /* Pretend to be a post-reload pass while generating rtl. */
8875 reload_completed = 1;
8877 /* Mark the end of the (empty) prologue. */
8878 emit_note (NOTE_INSN_PROLOGUE_END);
8880 /* Determine if we can use a sibcall to call FUNCTION directly. */
8881 fnaddr = XEXP (DECL_RTL (function), 0);
8882 use_sibcall_p = (mips_function_ok_for_sibcall (function, NULL)
8883 && const_call_insn_operand (fnaddr, Pmode));
8885 /* Determine if we need to load FNADDR from the GOT. */
8887 switch (mips_classify_symbol (fnaddr, SYMBOL_CONTEXT_LEA))
8889 case SYMBOL_GOT_PAGE_OFST:
8890 case SYMBOL_GOT_DISP:
8891 /* Pick a global pointer. Use a call-clobbered register if
8892 TARGET_CALL_SAVED_GP. */
8893 cfun->machine->global_pointer =
8894 TARGET_CALL_SAVED_GP ? 15 : GLOBAL_POINTER_REGNUM;
8895 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
8897 /* Set up the global pointer for n32 or n64 abicalls. */
8898 mips_emit_loadgp ();
8905 /* We need two temporary registers in some cases. */
8906 temp1 = gen_rtx_REG (Pmode, 2);
8907 temp2 = gen_rtx_REG (Pmode, 3);
8909 /* Find out which register contains the "this" pointer. */
8910 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8911 this = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
8913 this = gen_rtx_REG (Pmode, GP_ARG_FIRST);
8915 /* Add DELTA to THIS. */
8918 rtx offset = GEN_INT (delta);
8919 if (!SMALL_OPERAND (delta))
8921 mips_emit_move (temp1, offset);
8924 emit_insn (gen_add3_insn (this, this, offset));
8927 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
8928 if (vcall_offset != 0)
8932 /* Set TEMP1 to *THIS. */
8933 mips_emit_move (temp1, gen_rtx_MEM (Pmode, this));
8935 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
8936 addr = mips_add_offset (temp2, temp1, vcall_offset);
8938 /* Load the offset and add it to THIS. */
8939 mips_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
8940 emit_insn (gen_add3_insn (this, this, temp1));
8943 /* Jump to the target function. Use a sibcall if direct jumps are
8944 allowed, otherwise load the address into a register first. */
8947 insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
8948 SIBLING_CALL_P (insn) = 1;
8952 /* This is messy. gas treats "la $25,foo" as part of a call
8953 sequence and may allow a global "foo" to be lazily bound.
8954 The general move patterns therefore reject this combination.
8956 In this context, lazy binding would actually be OK
8957 for TARGET_CALL_CLOBBERED_GP, but it's still wrong for
8958 TARGET_CALL_SAVED_GP; see mips_load_call_address.
8959 We must therefore load the address via a temporary
8960 register if mips_dangerous_for_la25_p.
8962 If we jump to the temporary register rather than $25, the assembler
8963 can use the move insn to fill the jump's delay slot. */
8964 if (TARGET_USE_PIC_FN_ADDR_REG
8965 && !mips_dangerous_for_la25_p (fnaddr))
8966 temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
8967 mips_load_call_address (temp1, fnaddr, true);
8969 if (TARGET_USE_PIC_FN_ADDR_REG
8970 && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
8971 mips_emit_move (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
8972 emit_jump_insn (gen_indirect_jump (temp1));
8975 /* Run just enough of rest_of_compilation. This sequence was
8976 "borrowed" from alpha.c. */
8977 insn = get_insns ();
8978 insn_locators_alloc ();
8979 split_all_insns_noflow ();
8980 mips16_lay_out_constants ();
8981 shorten_branches (insn);
8982 final_start_function (insn, file, 1);
8983 final (insn, file, 1);
8984 final_end_function ();
8986 /* Clean up the vars set above. Note that final_end_function resets
8987 the global pointer for us. */
8988 reload_completed = 0;
8991 /* Implement TARGET_SELECT_RTX_SECTION. */
8994 mips_select_rtx_section (enum machine_mode mode, rtx x,
8995 unsigned HOST_WIDE_INT align)
8997 /* ??? Consider using mergeable small data sections. */
8998 if (mips_rtx_constant_in_small_data_p (mode))
8999 return get_named_section (NULL, ".sdata", 0);
9001 return default_elf_select_rtx_section (mode, x, align);
9004 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
9006 The complication here is that, with the combination TARGET_ABICALLS
9007 && !TARGET_GPWORD, jump tables will use absolute addresses, and should
9008 therefore not be included in the read-only part of a DSO. Handle such
9009 cases by selecting a normal data section instead of a read-only one.
9010 The logic apes that in default_function_rodata_section. */
9013 mips_function_rodata_section (tree decl)
9015 if (!TARGET_ABICALLS || TARGET_GPWORD)
9016 return default_function_rodata_section (decl);
9018 if (decl && DECL_SECTION_NAME (decl))
9020 const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
9021 if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
9023 char *rname = ASTRDUP (name);
9025 return get_section (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
9027 else if (flag_function_sections && flag_data_sections
9028 && strncmp (name, ".text.", 6) == 0)
9030 char *rname = ASTRDUP (name);
9031 memcpy (rname + 1, "data", 4);
9032 return get_section (rname, SECTION_WRITE, decl);
9035 return data_section;
9038 /* Implement TARGET_IN_SMALL_DATA_P. This function controls whether
9039 locally-defined objects go in a small data section. It also controls
9040 the setting of the SYMBOL_REF_SMALL_P flag, which in turn helps
9041 mips_classify_symbol decide when to use %gp_rel(...)($gp) accesses. */
9044 mips_in_small_data_p (const_tree decl)
9048 if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
9051 /* We don't yet generate small-data references for -mabicalls or
9052 VxWorks RTP code. See the related -G handling in override_options. */
9053 if (TARGET_ABICALLS || TARGET_VXWORKS_RTP)
9056 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
9060 /* Reject anything that isn't in a known small-data section. */
9061 name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
9062 if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
9065 /* If a symbol is defined externally, the assembler will use the
9066 usual -G rules when deciding how to implement macros. */
9067 if (mips_lo_relocs[SYMBOL_GP_RELATIVE] || !DECL_EXTERNAL (decl))
9070 else if (TARGET_EMBEDDED_DATA)
9072 /* Don't put constants into the small data section: we want them
9073 to be in ROM rather than RAM. */
9074 if (TREE_CODE (decl) != VAR_DECL)
9077 if (TREE_READONLY (decl)
9078 && !TREE_SIDE_EFFECTS (decl)
9079 && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
9083 /* Enforce -mlocal-sdata. */
9084 if (!TARGET_LOCAL_SDATA && !TREE_PUBLIC (decl))
9087 /* Enforce -mextern-sdata. */
9088 if (!TARGET_EXTERN_SDATA && DECL_P (decl))
9090 if (DECL_EXTERNAL (decl))
9092 if (DECL_COMMON (decl) && DECL_INITIAL (decl) == NULL)
9096 size = int_size_in_bytes (TREE_TYPE (decl));
9097 return (size > 0 && size <= mips_section_threshold);
9100 /* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
9101 anchors for small data: the GP register acts as an anchor in that
9102 case. We also don't want to use them for PC-relative accesses,
9103 where the PC acts as an anchor. */
9106 mips_use_anchors_for_symbol_p (const_rtx symbol)
9108 switch (mips_classify_symbol (symbol, SYMBOL_CONTEXT_MEM))
9110 case SYMBOL_PC_RELATIVE:
9111 case SYMBOL_GP_RELATIVE:
9115 return default_use_anchors_for_symbol_p (symbol);
9119 /* See whether VALTYPE is a record whose fields should be returned in
9120 floating-point registers. If so, return the number of fields and
9121 list them in FIELDS (which should have two elements). Return 0
9124 For n32 & n64, a structure with one or two fields is returned in
9125 floating-point registers as long as every field has a floating-point
9129 mips_fpr_return_fields (const_tree valtype, tree *fields)
9137 if (TREE_CODE (valtype) != RECORD_TYPE)
9141 for (field = TYPE_FIELDS (valtype); field != 0; field = TREE_CHAIN (field))
9143 if (TREE_CODE (field) != FIELD_DECL)
9146 if (TREE_CODE (TREE_TYPE (field)) != REAL_TYPE)
9152 fields[i++] = field;
9158 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
9159 a value in the most significant part of $2/$3 if:
9161 - the target is big-endian;
9163 - the value has a structure or union type (we generalize this to
9164 cover aggregates from other languages too); and
9166 - the structure is not returned in floating-point registers. */
9169 mips_return_in_msb (const_tree valtype)
9173 return (TARGET_NEWABI
9174 && TARGET_BIG_ENDIAN
9175 && AGGREGATE_TYPE_P (valtype)
9176 && mips_fpr_return_fields (valtype, fields) == 0);
9180 /* Return a composite value in a pair of floating-point registers.
9181 MODE1 and OFFSET1 are the mode and byte offset for the first value,
9182 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
9185 For n32 & n64, $f0 always holds the first value and $f2 the second.
9186 Otherwise the values are packed together as closely as possible. */
9189 mips_return_fpr_pair (enum machine_mode mode,
9190 enum machine_mode mode1, HOST_WIDE_INT offset1,
9191 enum machine_mode mode2, HOST_WIDE_INT offset2)
9195 inc = (TARGET_NEWABI ? 2 : MAX_FPRS_PER_FMT);
9196 return gen_rtx_PARALLEL
9199 gen_rtx_EXPR_LIST (VOIDmode,
9200 gen_rtx_REG (mode1, FP_RETURN),
9202 gen_rtx_EXPR_LIST (VOIDmode,
9203 gen_rtx_REG (mode2, FP_RETURN + inc),
9204 GEN_INT (offset2))));
9209 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
9210 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
9211 VALTYPE is null and MODE is the mode of the return value. */
9214 mips_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
9215 enum machine_mode mode)
9222 mode = TYPE_MODE (valtype);
9223 unsignedp = TYPE_UNSIGNED (valtype);
9225 /* Since we define TARGET_PROMOTE_FUNCTION_RETURN that returns
9226 true, we must promote the mode just as PROMOTE_MODE does. */
9227 mode = promote_mode (valtype, mode, &unsignedp, 1);
9229 /* Handle structures whose fields are returned in $f0/$f2. */
9230 switch (mips_fpr_return_fields (valtype, fields))
9233 return gen_rtx_REG (mode, FP_RETURN);
9236 return mips_return_fpr_pair (mode,
9237 TYPE_MODE (TREE_TYPE (fields[0])),
9238 int_byte_position (fields[0]),
9239 TYPE_MODE (TREE_TYPE (fields[1])),
9240 int_byte_position (fields[1]));
9243 /* If a value is passed in the most significant part of a register, see
9244 whether we have to round the mode up to a whole number of words. */
9245 if (mips_return_in_msb (valtype))
9247 HOST_WIDE_INT size = int_size_in_bytes (valtype);
9248 if (size % UNITS_PER_WORD != 0)
9250 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
9251 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
9255 /* For EABI, the class of return register depends entirely on MODE.
9256 For example, "struct { some_type x; }" and "union { some_type x; }"
9257 are returned in the same way as a bare "some_type" would be.
9258 Other ABIs only use FPRs for scalar, complex or vector types. */
9259 if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
9260 return gen_rtx_REG (mode, GP_RETURN);
9265 /* Handle long doubles for n32 & n64. */
9267 return mips_return_fpr_pair (mode,
9269 DImode, GET_MODE_SIZE (mode) / 2);
9271 if (mips_return_mode_in_fpr_p (mode))
9273 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
9274 return mips_return_fpr_pair (mode,
9275 GET_MODE_INNER (mode), 0,
9276 GET_MODE_INNER (mode),
9277 GET_MODE_SIZE (mode) / 2);
9279 return gen_rtx_REG (mode, FP_RETURN);
9283 return gen_rtx_REG (mode, GP_RETURN);
9286 /* Return nonzero when an argument must be passed by reference. */
9289 mips_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
9290 enum machine_mode mode, const_tree type,
9291 bool named ATTRIBUTE_UNUSED)
9293 if (mips_abi == ABI_EABI)
9297 /* ??? How should SCmode be handled? */
9298 if (mode == DImode || mode == DFmode
9299 || mode == DQmode || mode == UDQmode
9300 || mode == DAmode || mode == UDAmode)
9303 size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
9304 return size == -1 || size > UNITS_PER_WORD;
9308 /* If we have a variable-sized parameter, we have no choice. */
9309 return targetm.calls.must_pass_in_stack (mode, type);
9314 mips_callee_copies (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
9315 enum machine_mode mode ATTRIBUTE_UNUSED,
9316 const_tree type ATTRIBUTE_UNUSED, bool named)
9318 return mips_abi == ABI_EABI && named;
9321 /* Return true if registers of class CLASS cannot change from mode FROM
9325 mips_cannot_change_mode_class (enum machine_mode from,
9326 enum machine_mode to, enum reg_class class)
9328 if (MIN (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) <= UNITS_PER_WORD
9329 && MAX (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) > UNITS_PER_WORD)
9331 if (TARGET_BIG_ENDIAN)
9333 /* When a multi-word value is stored in paired floating-point
9334 registers, the first register always holds the low word.
9335 We therefore can't allow FPRs to change between single-word
9336 and multi-word modes. */
9337 if (MAX_FPRS_PER_FMT > 1 && reg_classes_intersect_p (FP_REGS, class))
9342 /* gcc assumes that each word of a multiword register can be accessed
9343 individually using SUBREGs. This is not true for floating-point
9344 registers if they are bigger than a word. */
9345 if (UNITS_PER_FPREG > UNITS_PER_WORD
9346 && GET_MODE_SIZE (from) > UNITS_PER_WORD
9347 && GET_MODE_SIZE (to) < UNITS_PER_FPREG
9348 && reg_classes_intersect_p (FP_REGS, class))
9351 /* Loading a 32-bit value into a 64-bit floating-point register
9352 will not sign-extend the value, despite what LOAD_EXTEND_OP says.
9353 We can't allow 64-bit float registers to change from SImode to
9358 && GET_MODE_SIZE (to) >= UNITS_PER_WORD
9359 && reg_classes_intersect_p (FP_REGS, class))
9365 /* Return true if X should not be moved directly into register $25.
9366 We need this because many versions of GAS will treat "la $25,foo" as
9367 part of a call sequence and so allow a global "foo" to be lazily bound. */
9370 mips_dangerous_for_la25_p (rtx x)
9372 return (!TARGET_EXPLICIT_RELOCS
9374 && GET_CODE (x) == SYMBOL_REF
9375 && mips_global_symbol_p (x));
9378 /* Implement PREFERRED_RELOAD_CLASS. */
9381 mips_preferred_reload_class (rtx x, enum reg_class class)
9383 if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, class))
9386 if (TARGET_HARD_FLOAT
9387 && FLOAT_MODE_P (GET_MODE (x))
9388 && reg_class_subset_p (FP_REGS, class))
9391 if (reg_class_subset_p (GR_REGS, class))
9394 if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, class))
9400 /* This function returns the register class required for a secondary
9401 register when copying between one of the registers in CLASS, and X,
9402 using MODE. If IN_P is nonzero, the copy is going from X to the
9403 register, otherwise the register is the source. A return value of
9404 NO_REGS means that no secondary register is required. */
9407 mips_secondary_reload_class (enum reg_class class,
9408 enum machine_mode mode, rtx x, int in_p)
9410 enum reg_class gr_regs = TARGET_MIPS16 ? M16_REGS : GR_REGS;
9414 if (REG_P (x)|| GET_CODE (x) == SUBREG)
9415 regno = true_regnum (x);
9417 gp_reg_p = TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
9419 if (mips_dangerous_for_la25_p (x))
9422 if (TEST_HARD_REG_BIT (reg_class_contents[(int) class], 25))
9426 /* Copying from HI or LO to anywhere other than a general register
9427 requires a general register.
9428 This rule applies to both the original HI/LO pair and the new
9429 DSP accumulators. */
9430 if (reg_class_subset_p (class, ACC_REGS))
9432 if (TARGET_MIPS16 && in_p)
9434 /* We can't really copy to HI or LO at all in mips16 mode. */
9437 return gp_reg_p ? NO_REGS : gr_regs;
9439 if (ACC_REG_P (regno))
9441 if (TARGET_MIPS16 && ! in_p)
9443 /* We can't really copy to HI or LO at all in mips16 mode. */
9446 return class == gr_regs ? NO_REGS : gr_regs;
9449 /* We can only copy a value to a condition code register from a
9450 floating point register, and even then we require a scratch
9451 floating point register. We can only copy a value out of a
9452 condition code register into a general register. */
9453 if (class == ST_REGS)
9457 return gp_reg_p ? NO_REGS : gr_regs;
9459 if (ST_REG_P (regno))
9463 return class == gr_regs ? NO_REGS : gr_regs;
9466 if (class == FP_REGS)
9470 /* In this case we can use lwc1, swc1, ldc1 or sdc1. */
9473 else if (CONSTANT_P (x) && GET_MODE_CLASS (mode) == MODE_FLOAT)
9475 /* We can use the l.s and l.d macros to load floating-point
9476 constants. ??? For l.s, we could probably get better
9477 code by returning GR_REGS here. */
9480 else if (gp_reg_p || x == CONST0_RTX (mode))
9482 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
9485 else if (FP_REG_P (regno))
9487 /* In this case we can use mov.s or mov.d. */
9492 /* Otherwise, we need to reload through an integer register. */
9497 /* In mips16 mode, going between memory and anything but M16_REGS
9498 requires an M16_REG. */
9501 if (class != M16_REGS && class != M16_NA_REGS)
9509 if (class == M16_REGS || class == M16_NA_REGS)
9518 /* Implement CLASS_MAX_NREGS.
9520 - UNITS_PER_FPREG controls the number of registers needed by FP_REGS.
9522 - ST_REGS are always hold CCmode values, and CCmode values are
9523 considered to be 4 bytes wide.
9525 All other register classes are covered by UNITS_PER_WORD. Note that
9526 this is true even for unions of integer and float registers when the
9527 latter are smaller than the former. The only supported combination
9528 in which case this occurs is -mgp64 -msingle-float, which has 64-bit
9529 words but 32-bit float registers. A word-based calculation is correct
9530 in that case since -msingle-float disallows multi-FPR values. */
9533 mips_class_max_nregs (enum reg_class class ATTRIBUTE_UNUSED,
9534 enum machine_mode mode)
9536 if (class == ST_REGS)
9537 return (GET_MODE_SIZE (mode) + 3) / 4;
9538 else if (class == FP_REGS)
9539 return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
9541 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
9545 mips_valid_pointer_mode (enum machine_mode mode)
9547 return (mode == SImode || (TARGET_64BIT && mode == DImode));
9550 /* Target hook for vector_mode_supported_p. */
9553 mips_vector_mode_supported_p (enum machine_mode mode)
9558 return TARGET_PAIRED_SINGLE_FLOAT;
9575 /* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
9578 mips_scalar_mode_supported_p (enum machine_mode mode)
9580 if (ALL_FIXED_POINT_MODE_P (mode)
9581 && GET_MODE_PRECISION (mode) <= 2 * BITS_PER_WORD)
9584 return default_scalar_mode_supported_p (mode);
9587 /* If we can access small data directly (using gp-relative relocation
9588 operators) return the small data pointer, otherwise return null.
9590 For each mips16 function which refers to GP relative symbols, we
9591 use a pseudo register, initialized at the start of the function, to
9592 hold the $gp value. */
9595 mips16_gp_pseudo_reg (void)
9597 if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
9598 cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
9600 /* Don't initialize the pseudo register if we are being called from
9601 the tree optimizers' cost-calculation routines. */
9602 if (!cfun->machine->initialized_mips16_gp_pseudo_p
9603 && (current_ir_type () != IR_GIMPLE || currently_expanding_to_rtl))
9607 /* We want to initialize this to a value which gcc will believe
9609 insn = gen_load_const_gp (cfun->machine->mips16_gp_pseudo_rtx);
9611 push_topmost_sequence ();
9612 /* We need to emit the initialization after the FUNCTION_BEG
9613 note, so that it will be integrated. */
9614 for (scan = get_insns (); scan != NULL_RTX; scan = NEXT_INSN (scan))
9616 && NOTE_KIND (scan) == NOTE_INSN_FUNCTION_BEG)
9618 if (scan == NULL_RTX)
9619 scan = get_insns ();
9620 insn = emit_insn_after (insn, scan);
9621 pop_topmost_sequence ();
9623 cfun->machine->initialized_mips16_gp_pseudo_p = true;
9626 return cfun->machine->mips16_gp_pseudo_rtx;
9629 /* Write out code to move floating point arguments in or out of
9630 general registers. Output the instructions to FILE. FP_CODE is
9631 the code describing which arguments are present (see the comment at
9632 the definition of CUMULATIVE_ARGS in mips.h). FROM_FP_P is nonzero if
9633 we are copying from the floating point registers. */
9636 mips16_fp_args (FILE *file, int fp_code, int from_fp_p)
9641 CUMULATIVE_ARGS cum;
9643 /* This code only works for the original 32-bit ABI and the O64 ABI. */
9644 gcc_assert (TARGET_OLDABI);
9651 init_cumulative_args (&cum, NULL, NULL);
9653 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
9655 enum machine_mode mode;
9656 struct mips_arg_info info;
9660 else if ((f & 3) == 2)
9665 mips_arg_info (&cum, mode, NULL, true, &info);
9666 gparg = mips_arg_regno (&info, false);
9667 fparg = mips_arg_regno (&info, true);
9670 fprintf (file, "\t%s\t%s,%s\n", s,
9671 reg_names[gparg], reg_names[fparg]);
9672 else if (TARGET_64BIT)
9673 fprintf (file, "\td%s\t%s,%s\n", s,
9674 reg_names[gparg], reg_names[fparg]);
9675 else if (ISA_HAS_MXHC1)
9676 /* -mips32r2 -mfp64 */
9677 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n",
9679 reg_names[gparg + (WORDS_BIG_ENDIAN ? 1 : 0)],
9681 from_fp_p ? "mfhc1" : "mthc1",
9682 reg_names[gparg + (WORDS_BIG_ENDIAN ? 0 : 1)],
9684 else if (TARGET_BIG_ENDIAN)
9685 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
9686 reg_names[gparg], reg_names[fparg + 1], s,
9687 reg_names[gparg + 1], reg_names[fparg]);
9689 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
9690 reg_names[gparg], reg_names[fparg], s,
9691 reg_names[gparg + 1], reg_names[fparg + 1]);
9693 function_arg_advance (&cum, mode, NULL, true);
9697 /* Build a mips16 function stub. This is used for functions which
9698 take arguments in the floating point registers. It is 32-bit code
9699 that moves the floating point args into the general registers, and
9700 then jumps to the 16-bit code. */
9703 build_mips16_function_stub (FILE *file)
9706 char *secname, *stubname;
9707 tree stubid, stubdecl;
9711 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
9712 fnname = targetm.strip_name_encoding (fnname);
9713 secname = (char *) alloca (strlen (fnname) + 20);
9714 sprintf (secname, ".mips16.fn.%s", fnname);
9715 stubname = (char *) alloca (strlen (fnname) + 20);
9716 sprintf (stubname, "__fn_stub_%s", fnname);
9717 stubid = get_identifier (stubname);
9718 stubdecl = build_decl (FUNCTION_DECL, stubid,
9719 build_function_type (void_type_node, NULL_TREE));
9720 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
9721 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
9723 fprintf (file, "\t# Stub function for %s (", current_function_name ());
9725 for (f = (unsigned int) current_function_args_info.fp_code; f != 0; f >>= 2)
9727 fprintf (file, "%s%s",
9728 need_comma ? ", " : "",
9729 (f & 3) == 1 ? "float" : "double");
9732 fprintf (file, ")\n");
9734 fprintf (file, "\t.set\tnomips16\n");
9735 switch_to_section (function_section (stubdecl));
9736 ASM_OUTPUT_ALIGN (file, floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT));
9738 /* ??? If FUNCTION_NAME_ALREADY_DECLARED is defined, then we are
9739 within a .ent, and we cannot emit another .ent. */
9740 if (!FUNCTION_NAME_ALREADY_DECLARED)
9742 fputs ("\t.ent\t", file);
9743 assemble_name (file, stubname);
9747 assemble_name (file, stubname);
9748 fputs (":\n", file);
9750 /* We don't want the assembler to insert any nops here. */
9751 fprintf (file, "\t.set\tnoreorder\n");
9753 mips16_fp_args (file, current_function_args_info.fp_code, 1);
9755 fprintf (asm_out_file, "\t.set\tnoat\n");
9756 fprintf (asm_out_file, "\tla\t%s,", reg_names[GP_REG_FIRST + 1]);
9757 assemble_name (file, fnname);
9758 fprintf (file, "\n");
9759 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
9760 fprintf (asm_out_file, "\t.set\tat\n");
9762 /* Unfortunately, we can't fill the jump delay slot. We can't fill
9763 with one of the mfc1 instructions, because the result is not
9764 available for one instruction, so if the very first instruction
9765 in the function refers to the register, it will see the wrong
9767 fprintf (file, "\tnop\n");
9769 fprintf (file, "\t.set\treorder\n");
9771 if (!FUNCTION_NAME_ALREADY_DECLARED)
9773 fputs ("\t.end\t", file);
9774 assemble_name (file, stubname);
9778 switch_to_section (function_section (current_function_decl));
9781 /* We keep a list of functions for which we have already built stubs
9782 in build_mips16_call_stub. */
9786 struct mips16_stub *next;
9791 static struct mips16_stub *mips16_stubs;
9793 /* Emit code to return a double value from a mips16 stub. GPREG is the
9794 first GP reg to use, FPREG is the first FP reg to use. */
9797 mips16_fpret_double (int gpreg, int fpreg)
9800 fprintf (asm_out_file, "\tdmfc1\t%s,%s\n",
9801 reg_names[gpreg], reg_names[fpreg]);
9802 else if (TARGET_FLOAT64)
9804 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9805 reg_names[gpreg + WORDS_BIG_ENDIAN],
9807 fprintf (asm_out_file, "\tmfhc1\t%s,%s\n",
9808 reg_names[gpreg + !WORDS_BIG_ENDIAN],
9813 if (TARGET_BIG_ENDIAN)
9815 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9816 reg_names[gpreg + 0],
9817 reg_names[fpreg + 1]);
9818 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9819 reg_names[gpreg + 1],
9820 reg_names[fpreg + 0]);
9824 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9825 reg_names[gpreg + 0],
9826 reg_names[fpreg + 0]);
9827 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9828 reg_names[gpreg + 1],
9829 reg_names[fpreg + 1]);
9834 /* Build a call stub for a mips16 call. A stub is needed if we are
9835 passing any floating point values which should go into the floating
9836 point registers. If we are, and the call turns out to be to a
9837 32-bit function, the stub will be used to move the values into the
9838 floating point registers before calling the 32-bit function. The
9839 linker will magically adjust the function call to either the 16-bit
9840 function or the 32-bit stub, depending upon where the function call
9841 is actually defined.
9843 Similarly, we need a stub if the return value might come back in a
9844 floating point register.
9846 RETVAL is the location of the return value, or null if this is
9847 a call rather than a call_value. FN is the address of the
9848 function and ARG_SIZE is the size of the arguments. FP_CODE
9849 is the code built by function_arg. This function returns a nonzero
9850 value if it builds the call instruction itself. */
9853 build_mips16_call_stub (rtx retval, rtx fn, rtx arg_size, int fp_code)
9857 char *secname, *stubname;
9858 struct mips16_stub *l;
9859 tree stubid, stubdecl;
9864 /* We don't need to do anything if we aren't in mips16 mode, or if
9865 we were invoked with the -msoft-float option. */
9866 if (!TARGET_MIPS16 || TARGET_SOFT_FLOAT_ABI)
9869 /* Figure out whether the value might come back in a floating point
9872 fpret = mips_return_mode_in_fpr_p (GET_MODE (retval));
9874 /* We don't need to do anything if there were no floating point
9875 arguments and the value will not be returned in a floating point
9877 if (fp_code == 0 && ! fpret)
9880 /* We don't need to do anything if this is a call to a special
9881 mips16 support function. */
9882 if (GET_CODE (fn) == SYMBOL_REF
9883 && strncmp (XSTR (fn, 0), "__mips16_", 9) == 0)
9886 /* This code will only work for o32 and o64 abis. The other ABI's
9887 require more sophisticated support. */
9888 gcc_assert (TARGET_OLDABI);
9890 /* If we're calling via a function pointer, then we must always call
9891 via a stub. There are magic stubs provided in libgcc.a for each
9892 of the required cases. Each of them expects the function address
9893 to arrive in register $2. */
9895 if (GET_CODE (fn) != SYMBOL_REF)
9901 /* ??? If this code is modified to support other ABI's, we need
9902 to handle PARALLEL return values here. */
9905 sprintf (buf, "__mips16_call_stub_%s_%d",
9906 mips16_call_stub_mode_suffix (GET_MODE (retval)),
9909 sprintf (buf, "__mips16_call_stub_%d",
9912 id = get_identifier (buf);
9913 stub_fn = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
9915 mips_emit_move (gen_rtx_REG (Pmode, 2), fn);
9917 if (retval == NULL_RTX)
9918 insn = gen_call_internal (stub_fn, arg_size);
9920 insn = gen_call_value_internal (retval, stub_fn, arg_size);
9921 insn = emit_call_insn (insn);
9923 /* Put the register usage information on the CALL. */
9924 CALL_INSN_FUNCTION_USAGE (insn) =
9925 gen_rtx_EXPR_LIST (VOIDmode,
9926 gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 2)),
9927 CALL_INSN_FUNCTION_USAGE (insn));
9929 /* If we are handling a floating point return value, we need to
9930 save $18 in the function prologue. Putting a note on the
9931 call will mean that df_regs_ever_live_p ($18) will be true if the
9932 call is not eliminated, and we can check that in the prologue
9935 CALL_INSN_FUNCTION_USAGE (insn) =
9936 gen_rtx_EXPR_LIST (VOIDmode,
9937 gen_rtx_USE (VOIDmode,
9938 gen_rtx_REG (word_mode, 18)),
9939 CALL_INSN_FUNCTION_USAGE (insn));
9941 /* Return 1 to tell the caller that we've generated the call
9946 /* We know the function we are going to call. If we have already
9947 built a stub, we don't need to do anything further. */
9949 fnname = targetm.strip_name_encoding (XSTR (fn, 0));
9950 for (l = mips16_stubs; l != NULL; l = l->next)
9951 if (strcmp (l->name, fnname) == 0)
9956 /* Build a special purpose stub. When the linker sees a
9957 function call in mips16 code, it will check where the target
9958 is defined. If the target is a 32-bit call, the linker will
9959 search for the section defined here. It can tell which
9960 symbol this section is associated with by looking at the
9961 relocation information (the name is unreliable, since this
9962 might be a static function). If such a section is found, the
9963 linker will redirect the call to the start of the magic
9966 If the function does not return a floating point value, the
9967 special stub section is named
9970 If the function does return a floating point value, the stub
9972 .mips16.call.fp.FNNAME
9975 secname = (char *) alloca (strlen (fnname) + 40);
9976 sprintf (secname, ".mips16.call.%s%s",
9979 stubname = (char *) alloca (strlen (fnname) + 20);
9980 sprintf (stubname, "__call_stub_%s%s",
9983 stubid = get_identifier (stubname);
9984 stubdecl = build_decl (FUNCTION_DECL, stubid,
9985 build_function_type (void_type_node, NULL_TREE));
9986 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
9987 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
9989 fprintf (asm_out_file, "\t# Stub function to call %s%s (",
9991 ? (GET_MODE (retval) == SFmode ? "float " : "double ")
9995 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
9997 fprintf (asm_out_file, "%s%s",
9998 need_comma ? ", " : "",
9999 (f & 3) == 1 ? "float" : "double");
10002 fprintf (asm_out_file, ")\n");
10004 fprintf (asm_out_file, "\t.set\tnomips16\n");
10005 assemble_start_function (stubdecl, stubname);
10007 if (!FUNCTION_NAME_ALREADY_DECLARED)
10009 fputs ("\t.ent\t", asm_out_file);
10010 assemble_name (asm_out_file, stubname);
10011 fputs ("\n", asm_out_file);
10013 assemble_name (asm_out_file, stubname);
10014 fputs (":\n", asm_out_file);
10017 /* We build the stub code by hand. That's the only way we can
10018 do it, since we can't generate 32-bit code during a 16-bit
10021 /* We don't want the assembler to insert any nops here. */
10022 fprintf (asm_out_file, "\t.set\tnoreorder\n");
10024 mips16_fp_args (asm_out_file, fp_code, 0);
10028 fprintf (asm_out_file, "\t.set\tnoat\n");
10029 fprintf (asm_out_file, "\tla\t%s,%s\n", reg_names[GP_REG_FIRST + 1],
10031 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
10032 fprintf (asm_out_file, "\t.set\tat\n");
10033 /* Unfortunately, we can't fill the jump delay slot. We
10034 can't fill with one of the mtc1 instructions, because the
10035 result is not available for one instruction, so if the
10036 very first instruction in the function refers to the
10037 register, it will see the wrong value. */
10038 fprintf (asm_out_file, "\tnop\n");
10042 fprintf (asm_out_file, "\tmove\t%s,%s\n",
10043 reg_names[GP_REG_FIRST + 18], reg_names[GP_REG_FIRST + 31]);
10044 fprintf (asm_out_file, "\tjal\t%s\n", fnname);
10045 /* As above, we can't fill the delay slot. */
10046 fprintf (asm_out_file, "\tnop\n");
10047 switch (GET_MODE (retval))
10050 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
10051 reg_names[GP_REG_FIRST + 3],
10052 reg_names[FP_REG_FIRST + MAX_FPRS_PER_FMT]);
10055 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
10056 reg_names[GP_REG_FIRST + 2],
10057 reg_names[FP_REG_FIRST + 0]);
10058 if (GET_MODE (retval) == SCmode && TARGET_64BIT)
10060 /* On 64-bit targets, complex floats are returned in
10061 a single GPR, such that "sd" on a suitably-aligned
10062 target would store the value correctly. */
10063 fprintf (asm_out_file, "\tdsll\t%s,%s,32\n",
10064 reg_names[GP_REG_FIRST + 2 + TARGET_LITTLE_ENDIAN],
10065 reg_names[GP_REG_FIRST + 2 + TARGET_LITTLE_ENDIAN]);
10066 fprintf (asm_out_file, "\tor\t%s,%s,%s\n",
10067 reg_names[GP_REG_FIRST + 2],
10068 reg_names[GP_REG_FIRST + 2],
10069 reg_names[GP_REG_FIRST + 3]);
10074 mips16_fpret_double (GP_REG_FIRST + 2 + (8 / UNITS_PER_WORD),
10075 FP_REG_FIRST + MAX_FPRS_PER_FMT);
10079 mips16_fpret_double (GP_REG_FIRST + 2, FP_REG_FIRST + 0);
10083 gcc_unreachable ();
10085 fprintf (asm_out_file, "\tj\t%s\n", reg_names[GP_REG_FIRST + 18]);
10086 /* As above, we can't fill the delay slot. */
10087 fprintf (asm_out_file, "\tnop\n");
10090 fprintf (asm_out_file, "\t.set\treorder\n");
10092 #ifdef ASM_DECLARE_FUNCTION_SIZE
10093 ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
10096 if (!FUNCTION_NAME_ALREADY_DECLARED)
10098 fputs ("\t.end\t", asm_out_file);
10099 assemble_name (asm_out_file, stubname);
10100 fputs ("\n", asm_out_file);
10103 /* Record this stub. */
10104 l = (struct mips16_stub *) xmalloc (sizeof *l);
10105 l->name = xstrdup (fnname);
10107 l->next = mips16_stubs;
10111 /* If we expect a floating point return value, but we've built a
10112 stub which does not expect one, then we're in trouble. We can't
10113 use the existing stub, because it won't handle the floating point
10114 value. We can't build a new stub, because the linker won't know
10115 which stub to use for the various calls in this object file.
10116 Fortunately, this case is illegal, since it means that a function
10117 was declared in two different ways in a single compilation. */
10118 if (fpret && ! l->fpret)
10119 error ("cannot handle inconsistent calls to %qs", fnname);
10121 if (retval == NULL_RTX)
10122 insn = gen_call_internal_direct (fn, arg_size);
10124 insn = gen_call_value_internal_direct (retval, fn, arg_size);
10125 insn = emit_call_insn (insn);
10127 /* If we are calling a stub which handles a floating point return
10128 value, we need to arrange to save $18 in the prologue. We do
10129 this by marking the function call as using the register. The
10130 prologue will later see that it is used, and emit code to save
10133 CALL_INSN_FUNCTION_USAGE (insn) =
10134 gen_rtx_EXPR_LIST (VOIDmode,
10135 gen_rtx_USE (VOIDmode, gen_rtx_REG (word_mode, 18)),
10136 CALL_INSN_FUNCTION_USAGE (insn));
10138 /* Return 1 to tell the caller that we've generated the call
10143 /* An entry in the mips16 constant pool. VALUE is the pool constant,
10144 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
10146 struct mips16_constant {
10147 struct mips16_constant *next;
10150 enum machine_mode mode;
10153 /* Information about an incomplete mips16 constant pool. FIRST is the
10154 first constant, HIGHEST_ADDRESS is the highest address that the first
10155 byte of the pool can have, and INSN_ADDRESS is the current instruction
10158 struct mips16_constant_pool {
10159 struct mips16_constant *first;
10160 int highest_address;
10164 /* Add constant VALUE to POOL and return its label. MODE is the
10165 value's mode (used for CONST_INTs, etc.). */
10168 add_constant (struct mips16_constant_pool *pool,
10169 rtx value, enum machine_mode mode)
10171 struct mips16_constant **p, *c;
10172 bool first_of_size_p;
10174 /* See whether the constant is already in the pool. If so, return the
10175 existing label, otherwise leave P pointing to the place where the
10176 constant should be added.
10178 Keep the pool sorted in increasing order of mode size so that we can
10179 reduce the number of alignments needed. */
10180 first_of_size_p = true;
10181 for (p = &pool->first; *p != 0; p = &(*p)->next)
10183 if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
10184 return (*p)->label;
10185 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
10187 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
10188 first_of_size_p = false;
10191 /* In the worst case, the constant needed by the earliest instruction
10192 will end up at the end of the pool. The entire pool must then be
10193 accessible from that instruction.
10195 When adding the first constant, set the pool's highest address to
10196 the address of the first out-of-range byte. Adjust this address
10197 downwards each time a new constant is added. */
10198 if (pool->first == 0)
10199 /* For pc-relative lw, addiu and daddiu instructions, the base PC value
10200 is the address of the instruction with the lowest two bits clear.
10201 The base PC value for ld has the lowest three bits clear. Assume
10202 the worst case here. */
10203 pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
10204 pool->highest_address -= GET_MODE_SIZE (mode);
10205 if (first_of_size_p)
10206 /* Take into account the worst possible padding due to alignment. */
10207 pool->highest_address -= GET_MODE_SIZE (mode) - 1;
10209 /* Create a new entry. */
10210 c = (struct mips16_constant *) xmalloc (sizeof *c);
10213 c->label = gen_label_rtx ();
10220 /* Output constant VALUE after instruction INSN and return the last
10221 instruction emitted. MODE is the mode of the constant. */
10224 dump_constants_1 (enum machine_mode mode, rtx value, rtx insn)
10226 if (SCALAR_INT_MODE_P (mode)
10227 || ALL_SCALAR_FRACT_MODE_P (mode)
10228 || ALL_SCALAR_ACCUM_MODE_P (mode))
10230 rtx size = GEN_INT (GET_MODE_SIZE (mode));
10231 return emit_insn_after (gen_consttable_int (value, size), insn);
10234 if (SCALAR_FLOAT_MODE_P (mode))
10235 return emit_insn_after (gen_consttable_float (value), insn);
10237 if (VECTOR_MODE_P (mode))
10241 for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
10242 insn = dump_constants_1 (GET_MODE_INNER (mode),
10243 CONST_VECTOR_ELT (value, i), insn);
10247 gcc_unreachable ();
10251 /* Dump out the constants in CONSTANTS after INSN. */
10254 dump_constants (struct mips16_constant *constants, rtx insn)
10256 struct mips16_constant *c, *next;
10260 for (c = constants; c != NULL; c = next)
10262 /* If necessary, increase the alignment of PC. */
10263 if (align < GET_MODE_SIZE (c->mode))
10265 int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
10266 insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
10268 align = GET_MODE_SIZE (c->mode);
10270 insn = emit_label_after (c->label, insn);
10271 insn = dump_constants_1 (c->mode, c->value, insn);
10277 emit_barrier_after (insn);
10280 /* Return the length of instruction INSN. */
10283 mips16_insn_length (rtx insn)
10287 rtx body = PATTERN (insn);
10288 if (GET_CODE (body) == ADDR_VEC)
10289 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
10290 if (GET_CODE (body) == ADDR_DIFF_VEC)
10291 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
10293 return get_attr_length (insn);
10296 /* If *X is a symbolic constant that refers to the constant pool, add
10297 the constant to POOL and rewrite *X to use the constant's label. */
10300 mips16_rewrite_pool_constant (struct mips16_constant_pool *pool, rtx *x)
10302 rtx base, offset, label;
10304 split_const (*x, &base, &offset);
10305 if (GET_CODE (base) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (base))
10307 label = add_constant (pool, get_pool_constant (base),
10308 get_pool_mode (base));
10309 base = gen_rtx_LABEL_REF (Pmode, label);
10310 *x = mips_unspec_address_offset (base, offset, SYMBOL_PC_RELATIVE);
10314 /* This structure is used to communicate with mips16_rewrite_pool_refs.
10315 INSN is the instruction we're rewriting and POOL points to the current
10317 struct mips16_rewrite_pool_refs_info {
10319 struct mips16_constant_pool *pool;
10322 /* Rewrite *X so that constant pool references refer to the constant's
10323 label instead. DATA points to a mips16_rewrite_pool_refs_info
10327 mips16_rewrite_pool_refs (rtx *x, void *data)
10329 struct mips16_rewrite_pool_refs_info *info = data;
10331 if (force_to_mem_operand (*x, Pmode))
10333 rtx mem = force_const_mem (GET_MODE (*x), *x);
10334 validate_change (info->insn, x, mem, false);
10339 mips16_rewrite_pool_constant (info->pool, &XEXP (*x, 0));
10343 if (TARGET_MIPS16_TEXT_LOADS)
10344 mips16_rewrite_pool_constant (info->pool, x);
10346 return GET_CODE (*x) == CONST ? -1 : 0;
10349 /* Build MIPS16 constant pools. */
10352 mips16_lay_out_constants (void)
10354 struct mips16_constant_pool pool;
10355 struct mips16_rewrite_pool_refs_info info;
10358 if (!TARGET_MIPS16_PCREL_LOADS)
10362 memset (&pool, 0, sizeof (pool));
10363 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10365 /* Rewrite constant pool references in INSN. */
10370 for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &info);
10373 pool.insn_address += mips16_insn_length (insn);
10375 if (pool.first != NULL)
10377 /* If there are no natural barriers between the first user of
10378 the pool and the highest acceptable address, we'll need to
10379 create a new instruction to jump around the constant pool.
10380 In the worst case, this instruction will be 4 bytes long.
10382 If it's too late to do this transformation after INSN,
10383 do it immediately before INSN. */
10384 if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
10388 label = gen_label_rtx ();
10390 jump = emit_jump_insn_before (gen_jump (label), insn);
10391 JUMP_LABEL (jump) = label;
10392 LABEL_NUSES (label) = 1;
10393 barrier = emit_barrier_after (jump);
10395 emit_label_after (label, barrier);
10396 pool.insn_address += 4;
10399 /* See whether the constant pool is now out of range of the first
10400 user. If so, output the constants after the previous barrier.
10401 Note that any instructions between BARRIER and INSN (inclusive)
10402 will use negative offsets to refer to the pool. */
10403 if (pool.insn_address > pool.highest_address)
10405 dump_constants (pool.first, barrier);
10409 else if (BARRIER_P (insn))
10413 dump_constants (pool.first, get_last_insn ());
10416 /* A temporary variable used by for_each_rtx callbacks, etc. */
10417 static rtx mips_sim_insn;
10419 /* A structure representing the state of the processor pipeline.
10420 Used by the mips_sim_* family of functions. */
10422 /* The maximum number of instructions that can be issued in a cycle.
10423 (Caches mips_issue_rate.) */
10424 unsigned int issue_rate;
10426 /* The current simulation time. */
10429 /* How many more instructions can be issued in the current cycle. */
10430 unsigned int insns_left;
10432 /* LAST_SET[X].INSN is the last instruction to set register X.
10433 LAST_SET[X].TIME is the time at which that instruction was issued.
10434 INSN is null if no instruction has yet set register X. */
10438 } last_set[FIRST_PSEUDO_REGISTER];
10440 /* The pipeline's current DFA state. */
10444 /* Reset STATE to the initial simulation state. */
10447 mips_sim_reset (struct mips_sim *state)
10450 state->insns_left = state->issue_rate;
10451 memset (&state->last_set, 0, sizeof (state->last_set));
10452 state_reset (state->dfa_state);
10455 /* Initialize STATE before its first use. DFA_STATE points to an
10456 allocated but uninitialized DFA state. */
10459 mips_sim_init (struct mips_sim *state, state_t dfa_state)
10461 state->issue_rate = mips_issue_rate ();
10462 state->dfa_state = dfa_state;
10463 mips_sim_reset (state);
10466 /* Advance STATE by one clock cycle. */
10469 mips_sim_next_cycle (struct mips_sim *state)
10472 state->insns_left = state->issue_rate;
10473 state_transition (state->dfa_state, 0);
10476 /* Advance simulation state STATE until instruction INSN can read
10480 mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
10484 for (i = 0; i < HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)); i++)
10485 if (state->last_set[REGNO (reg) + i].insn != 0)
10489 t = state->last_set[REGNO (reg) + i].time;
10490 t += insn_latency (state->last_set[REGNO (reg) + i].insn, insn);
10491 while (state->time < t)
10492 mips_sim_next_cycle (state);
10496 /* A for_each_rtx callback. If *X is a register, advance simulation state
10497 DATA until mips_sim_insn can read the register's value. */
10500 mips_sim_wait_regs_2 (rtx *x, void *data)
10503 mips_sim_wait_reg (data, mips_sim_insn, *x);
10507 /* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
10510 mips_sim_wait_regs_1 (rtx *x, void *data)
10512 for_each_rtx (x, mips_sim_wait_regs_2, data);
10515 /* Advance simulation state STATE until all of INSN's register
10516 dependencies are satisfied. */
10519 mips_sim_wait_regs (struct mips_sim *state, rtx insn)
10521 mips_sim_insn = insn;
10522 note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
10525 /* Advance simulation state STATE until the units required by
10526 instruction INSN are available. */
10529 mips_sim_wait_units (struct mips_sim *state, rtx insn)
10533 tmp_state = alloca (state_size ());
10534 while (state->insns_left == 0
10535 || (memcpy (tmp_state, state->dfa_state, state_size ()),
10536 state_transition (tmp_state, insn) >= 0))
10537 mips_sim_next_cycle (state);
10540 /* Advance simulation state STATE until INSN is ready to issue. */
10543 mips_sim_wait_insn (struct mips_sim *state, rtx insn)
10545 mips_sim_wait_regs (state, insn);
10546 mips_sim_wait_units (state, insn);
10549 /* mips_sim_insn has just set X. Update the LAST_SET array
10550 in simulation state DATA. */
10553 mips_sim_record_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
10555 struct mips_sim *state;
10560 for (i = 0; i < HARD_REGNO_NREGS (REGNO (x), GET_MODE (x)); i++)
10562 state->last_set[REGNO (x) + i].insn = mips_sim_insn;
10563 state->last_set[REGNO (x) + i].time = state->time;
10567 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
10568 can issue immediately (i.e., that mips_sim_wait_insn has already
10572 mips_sim_issue_insn (struct mips_sim *state, rtx insn)
10574 state_transition (state->dfa_state, insn);
10575 state->insns_left--;
10577 mips_sim_insn = insn;
10578 note_stores (PATTERN (insn), mips_sim_record_set, state);
10581 /* Simulate issuing a NOP in state STATE. */
10584 mips_sim_issue_nop (struct mips_sim *state)
10586 if (state->insns_left == 0)
10587 mips_sim_next_cycle (state);
10588 state->insns_left--;
10591 /* Update simulation state STATE so that it's ready to accept the instruction
10592 after INSN. INSN should be part of the main rtl chain, not a member of a
10596 mips_sim_finish_insn (struct mips_sim *state, rtx insn)
10598 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
10600 mips_sim_issue_nop (state);
10602 switch (GET_CODE (SEQ_BEGIN (insn)))
10606 /* We can't predict the processor state after a call or label. */
10607 mips_sim_reset (state);
10611 /* The delay slots of branch likely instructions are only executed
10612 when the branch is taken. Therefore, if the caller has simulated
10613 the delay slot instruction, STATE does not really reflect the state
10614 of the pipeline for the instruction after the delay slot. Also,
10615 branch likely instructions tend to incur a penalty when not taken,
10616 so there will probably be an extra delay between the branch and
10617 the instruction after the delay slot. */
10618 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
10619 mips_sim_reset (state);
10627 /* The VR4130 pipeline issues aligned pairs of instructions together,
10628 but it stalls the second instruction if it depends on the first.
10629 In order to cut down the amount of logic required, this dependence
10630 check is not based on a full instruction decode. Instead, any non-SPECIAL
10631 instruction is assumed to modify the register specified by bits 20-16
10632 (which is usually the "rt" field).
10634 In beq, beql, bne and bnel instructions, the rt field is actually an
10635 input, so we can end up with a false dependence between the branch
10636 and its delay slot. If this situation occurs in instruction INSN,
10637 try to avoid it by swapping rs and rt. */
10640 vr4130_avoid_branch_rt_conflict (rtx insn)
10644 first = SEQ_BEGIN (insn);
10645 second = SEQ_END (insn);
10647 && NONJUMP_INSN_P (second)
10648 && GET_CODE (PATTERN (first)) == SET
10649 && GET_CODE (SET_DEST (PATTERN (first))) == PC
10650 && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
10652 /* Check for the right kind of condition. */
10653 rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
10654 if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
10655 && REG_P (XEXP (cond, 0))
10656 && REG_P (XEXP (cond, 1))
10657 && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
10658 && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
10660 /* SECOND mentions the rt register but not the rs register. */
10661 rtx tmp = XEXP (cond, 0);
10662 XEXP (cond, 0) = XEXP (cond, 1);
10663 XEXP (cond, 1) = tmp;
10668 /* Implement -mvr4130-align. Go through each basic block and simulate the
10669 processor pipeline. If we find that a pair of instructions could execute
10670 in parallel, and the first of those instruction is not 8-byte aligned,
10671 insert a nop to make it aligned. */
10674 vr4130_align_insns (void)
10676 struct mips_sim state;
10677 rtx insn, subinsn, last, last2, next;
10682 /* LAST is the last instruction before INSN to have a nonzero length.
10683 LAST2 is the last such instruction before LAST. */
10687 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
10690 mips_sim_init (&state, alloca (state_size ()));
10691 for (insn = get_insns (); insn != 0; insn = next)
10693 unsigned int length;
10695 next = NEXT_INSN (insn);
10697 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
10698 This isn't really related to the alignment pass, but we do it on
10699 the fly to avoid a separate instruction walk. */
10700 vr4130_avoid_branch_rt_conflict (insn);
10702 if (USEFUL_INSN_P (insn))
10703 FOR_EACH_SUBINSN (subinsn, insn)
10705 mips_sim_wait_insn (&state, subinsn);
10707 /* If we want this instruction to issue in parallel with the
10708 previous one, make sure that the previous instruction is
10709 aligned. There are several reasons why this isn't worthwhile
10710 when the second instruction is a call:
10712 - Calls are less likely to be performance critical,
10713 - There's a good chance that the delay slot can execute
10714 in parallel with the call.
10715 - The return address would then be unaligned.
10717 In general, if we're going to insert a nop between instructions
10718 X and Y, it's better to insert it immediately after X. That
10719 way, if the nop makes Y aligned, it will also align any labels
10720 between X and Y. */
10721 if (state.insns_left != state.issue_rate
10722 && !CALL_P (subinsn))
10724 if (subinsn == SEQ_BEGIN (insn) && aligned_p)
10726 /* SUBINSN is the first instruction in INSN and INSN is
10727 aligned. We want to align the previous instruction
10728 instead, so insert a nop between LAST2 and LAST.
10730 Note that LAST could be either a single instruction
10731 or a branch with a delay slot. In the latter case,
10732 LAST, like INSN, is already aligned, but the delay
10733 slot must have some extra delay that stops it from
10734 issuing at the same time as the branch. We therefore
10735 insert a nop before the branch in order to align its
10737 emit_insn_after (gen_nop (), last2);
10740 else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
10742 /* SUBINSN is the delay slot of INSN, but INSN is
10743 currently unaligned. Insert a nop between
10744 LAST and INSN to align it. */
10745 emit_insn_after (gen_nop (), last);
10749 mips_sim_issue_insn (&state, subinsn);
10751 mips_sim_finish_insn (&state, insn);
10753 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
10754 length = get_attr_length (insn);
10757 /* If the instruction is an asm statement or multi-instruction
10758 mips.md patern, the length is only an estimate. Insert an
10759 8 byte alignment after it so that the following instructions
10760 can be handled correctly. */
10761 if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
10762 && (recog_memoized (insn) < 0 || length >= 8))
10764 next = emit_insn_after (gen_align (GEN_INT (3)), insn);
10765 next = NEXT_INSN (next);
10766 mips_sim_next_cycle (&state);
10769 else if (length & 4)
10770 aligned_p = !aligned_p;
10775 /* See whether INSN is an aligned label. */
10776 if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
10782 /* Subroutine of mips_reorg. If there is a hazard between INSN
10783 and a previous instruction, avoid it by inserting nops after
10786 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
10787 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
10788 before using the value of that register. *HILO_DELAY counts the
10789 number of instructions since the last hilo hazard (that is,
10790 the number of instructions since the last mflo or mfhi).
10792 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
10793 for the next instruction.
10795 LO_REG is an rtx for the LO register, used in dependence checking. */
10798 mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
10799 rtx *delayed_reg, rtx lo_reg)
10804 if (!INSN_P (insn))
10807 pattern = PATTERN (insn);
10809 /* Do not put the whole function in .set noreorder if it contains
10810 an asm statement. We don't know whether there will be hazards
10811 between the asm statement and the gcc-generated code. */
10812 if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
10813 cfun->machine->all_noreorder_p = false;
10815 /* Ignore zero-length instructions (barriers and the like). */
10816 ninsns = get_attr_length (insn) / 4;
10820 /* Work out how many nops are needed. Note that we only care about
10821 registers that are explicitly mentioned in the instruction's pattern.
10822 It doesn't matter that calls use the argument registers or that they
10823 clobber hi and lo. */
10824 if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
10825 nops = 2 - *hilo_delay;
10826 else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
10831 /* Insert the nops between this instruction and the previous one.
10832 Each new nop takes us further from the last hilo hazard. */
10833 *hilo_delay += nops;
10835 emit_insn_after (gen_hazard_nop (), after);
10837 /* Set up the state for the next instruction. */
10838 *hilo_delay += ninsns;
10840 if (INSN_CODE (insn) >= 0)
10841 switch (get_attr_hazard (insn))
10851 set = single_set (insn);
10852 gcc_assert (set != 0);
10853 *delayed_reg = SET_DEST (set);
10859 /* Go through the instruction stream and insert nops where necessary.
10860 See if the whole function can then be put into .set noreorder &
10864 mips_avoid_hazards (void)
10866 rtx insn, last_insn, lo_reg, delayed_reg;
10869 /* Force all instructions to be split into their final form. */
10870 split_all_insns_noflow ();
10872 /* Recalculate instruction lengths without taking nops into account. */
10873 cfun->machine->ignore_hazard_length_p = true;
10874 shorten_branches (get_insns ());
10876 cfun->machine->all_noreorder_p = true;
10878 /* Profiled functions can't be all noreorder because the profiler
10879 support uses assembler macros. */
10880 if (current_function_profile)
10881 cfun->machine->all_noreorder_p = false;
10883 /* Code compiled with -mfix-vr4120 can't be all noreorder because
10884 we rely on the assembler to work around some errata. */
10885 if (TARGET_FIX_VR4120)
10886 cfun->machine->all_noreorder_p = false;
10888 /* The same is true for -mfix-vr4130 if we might generate mflo or
10889 mfhi instructions. Note that we avoid using mflo and mfhi if
10890 the VR4130 macc and dmacc instructions are available instead;
10891 see the *mfhilo_{si,di}_macc patterns. */
10892 if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
10893 cfun->machine->all_noreorder_p = false;
10898 lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
10900 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
10903 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
10904 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
10905 mips_avoid_hazard (last_insn, XVECEXP (PATTERN (insn), 0, i),
10906 &hilo_delay, &delayed_reg, lo_reg);
10908 mips_avoid_hazard (last_insn, insn, &hilo_delay,
10909 &delayed_reg, lo_reg);
10916 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
10921 mips16_lay_out_constants ();
10922 if (TARGET_EXPLICIT_RELOCS)
10924 if (mips_flag_delayed_branch)
10925 dbr_schedule (get_insns ());
10926 mips_avoid_hazards ();
10927 if (TUNE_MIPS4130 && TARGET_VR4130_ALIGN)
10928 vr4130_align_insns ();
10932 /* This function does three things:
10934 - Register the special divsi3 and modsi3 functions if -mfix-vr4120.
10935 - Register the mips16 hardware floating point stubs.
10936 - Register the gofast functions if selected using --enable-gofast. */
10938 #include "config/gofast.h"
10941 mips_init_libfuncs (void)
10943 if (TARGET_FIX_VR4120)
10945 set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
10946 set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
10949 if (TARGET_MIPS16 && TARGET_HARD_FLOAT_ABI)
10951 set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
10952 set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
10953 set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
10954 set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
10956 set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
10957 set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
10958 set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
10959 set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
10960 set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
10961 set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
10962 set_optab_libfunc (unord_optab, SFmode, "__mips16_unordsf2");
10964 set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
10965 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
10966 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__mips16_floatunsisf");
10968 if (TARGET_DOUBLE_FLOAT)
10970 set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
10971 set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
10972 set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
10973 set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
10975 set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
10976 set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
10977 set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
10978 set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
10979 set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
10980 set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
10981 set_optab_libfunc (unord_optab, DFmode, "__mips16_unorddf2");
10983 set_conv_libfunc (sext_optab, DFmode, SFmode, "__mips16_extendsfdf2");
10984 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__mips16_truncdfsf2");
10986 set_conv_libfunc (sfix_optab, SImode, DFmode, "__mips16_fix_truncdfsi");
10987 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__mips16_floatsidf");
10988 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__mips16_floatunsidf");
10992 gofast_maybe_init_libfuncs ();
10995 /* Return a number assessing the cost of moving a register in class
10996 FROM to class TO. The classes are expressed using the enumeration
10997 values such as `GENERAL_REGS'. A value of 2 is the default; other
10998 values are interpreted relative to that.
11000 It is not required that the cost always equal 2 when FROM is the
11001 same as TO; on some machines it is expensive to move between
11002 registers if they are not general registers.
11004 If reload sees an insn consisting of a single `set' between two
11005 hard registers, and if `REGISTER_MOVE_COST' applied to their
11006 classes returns a value of 2, reload does not check to ensure that
11007 the constraints of the insn are met. Setting a cost of other than
11008 2 will allow reload to verify that the constraints are met. You
11009 should do this if the `movM' pattern's constraints do not allow
11012 ??? We make the cost of moving from HI/LO into general
11013 registers the same as for one of moving general registers to
11014 HI/LO for TARGET_MIPS16 in order to prevent allocating a
11015 pseudo to HI/LO. This might hurt optimizations though, it
11016 isn't clear if it is wise. And it might not work in all cases. We
11017 could solve the DImode LO reg problem by using a multiply, just
11018 like reload_{in,out}si. We could solve the SImode/HImode HI reg
11019 problem by using divide instructions. divu puts the remainder in
11020 the HI reg, so doing a divide by -1 will move the value in the HI
11021 reg for all values except -1. We could handle that case by using a
11022 signed divide, e.g. -1 / 2 (or maybe 1 / -2?). We'd have to emit
11023 a compare/branch to test the input value to see which instruction
11024 we need to use. This gets pretty messy, but it is feasible. */
11027 mips_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
11028 enum reg_class to, enum reg_class from)
11030 if (from == M16_REGS && reg_class_subset_p (to, GENERAL_REGS))
11032 else if (from == M16_NA_REGS && reg_class_subset_p (to, GENERAL_REGS))
11034 else if (reg_class_subset_p (from, GENERAL_REGS))
11036 if (to == M16_REGS)
11038 else if (to == M16_NA_REGS)
11040 else if (reg_class_subset_p (to, GENERAL_REGS))
11047 else if (to == FP_REGS)
11049 else if (reg_class_subset_p (to, ACC_REGS))
11056 else if (reg_class_subset_p (to, ALL_COP_REGS))
11061 else if (from == FP_REGS)
11063 if (reg_class_subset_p (to, GENERAL_REGS))
11065 else if (to == FP_REGS)
11067 else if (to == ST_REGS)
11070 else if (reg_class_subset_p (from, ACC_REGS))
11072 if (reg_class_subset_p (to, GENERAL_REGS))
11080 else if (from == ST_REGS && reg_class_subset_p (to, GENERAL_REGS))
11082 else if (reg_class_subset_p (from, ALL_COP_REGS))
11088 ??? What cases are these? Shouldn't we return 2 here? */
11093 /* Return the length of INSN. LENGTH is the initial length computed by
11094 attributes in the machine-description file. */
11097 mips_adjust_insn_length (rtx insn, int length)
11099 /* A unconditional jump has an unfilled delay slot if it is not part
11100 of a sequence. A conditional jump normally has a delay slot, but
11101 does not on MIPS16. */
11102 if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
11105 /* See how many nops might be needed to avoid hardware hazards. */
11106 if (!cfun->machine->ignore_hazard_length_p && INSN_CODE (insn) >= 0)
11107 switch (get_attr_hazard (insn))
11121 /* All MIPS16 instructions are a measly two bytes. */
11129 /* Return an asm sequence to start a noat block and load the address
11130 of a label into $1. */
11133 mips_output_load_label (void)
11135 if (TARGET_EXPLICIT_RELOCS)
11139 return "%[lw\t%@,%%got_page(%0)(%+)\n\taddiu\t%@,%@,%%got_ofst(%0)";
11142 return "%[ld\t%@,%%got_page(%0)(%+)\n\tdaddiu\t%@,%@,%%got_ofst(%0)";
11145 if (ISA_HAS_LOAD_DELAY)
11146 return "%[lw\t%@,%%got(%0)(%+)%#\n\taddiu\t%@,%@,%%lo(%0)";
11147 return "%[lw\t%@,%%got(%0)(%+)\n\taddiu\t%@,%@,%%lo(%0)";
11151 if (Pmode == DImode)
11152 return "%[dla\t%@,%0";
11154 return "%[la\t%@,%0";
11158 /* Return the assembly code for INSN, which has the operands given by
11159 OPERANDS, and which branches to OPERANDS[1] if some condition is true.
11160 BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[1]
11161 is in range of a direct branch. BRANCH_IF_FALSE is an inverted
11162 version of BRANCH_IF_TRUE. */
11165 mips_output_conditional_branch (rtx insn, rtx *operands,
11166 const char *branch_if_true,
11167 const char *branch_if_false)
11169 unsigned int length;
11170 rtx taken, not_taken;
11172 length = get_attr_length (insn);
11175 /* Just a simple conditional branch. */
11176 mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
11177 return branch_if_true;
11180 /* Generate a reversed branch around a direct jump. This fallback does
11181 not use branch-likely instructions. */
11182 mips_branch_likely = false;
11183 not_taken = gen_label_rtx ();
11184 taken = operands[1];
11186 /* Generate the reversed branch to NOT_TAKEN. */
11187 operands[1] = not_taken;
11188 output_asm_insn (branch_if_false, operands);
11190 /* If INSN has a delay slot, we must provide delay slots for both the
11191 branch to NOT_TAKEN and the conditional jump. We must also ensure
11192 that INSN's delay slot is executed in the appropriate cases. */
11193 if (final_sequence)
11195 /* This first delay slot will always be executed, so use INSN's
11196 delay slot if is not annulled. */
11197 if (!INSN_ANNULLED_BRANCH_P (insn))
11199 final_scan_insn (XVECEXP (final_sequence, 0, 1),
11200 asm_out_file, optimize, 1, NULL);
11201 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
11204 output_asm_insn ("nop", 0);
11205 fprintf (asm_out_file, "\n");
11208 /* Output the unconditional branch to TAKEN. */
11210 output_asm_insn ("j\t%0%/", &taken);
11213 output_asm_insn (mips_output_load_label (), &taken);
11214 output_asm_insn ("jr\t%@%]%/", 0);
11217 /* Now deal with its delay slot; see above. */
11218 if (final_sequence)
11220 /* This delay slot will only be executed if the branch is taken.
11221 Use INSN's delay slot if is annulled. */
11222 if (INSN_ANNULLED_BRANCH_P (insn))
11224 final_scan_insn (XVECEXP (final_sequence, 0, 1),
11225 asm_out_file, optimize, 1, NULL);
11226 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
11229 output_asm_insn ("nop", 0);
11230 fprintf (asm_out_file, "\n");
11233 /* Output NOT_TAKEN. */
11234 (*targetm.asm_out.internal_label) (asm_out_file, "L",
11235 CODE_LABEL_NUMBER (not_taken));
11239 /* Return the assembly code for INSN, which branches to OPERANDS[1]
11240 if some ordered condition is true. The condition is given by
11241 OPERANDS[0] if !INVERTED_P, otherwise it is the inverse of
11242 OPERANDS[0]. OPERANDS[2] is the comparison's first operand;
11243 its second is always zero. */
11246 mips_output_order_conditional_branch (rtx insn, rtx *operands, bool inverted_p)
11248 const char *branch[2];
11250 /* Make BRANCH[1] branch to OPERANDS[1] when the condition is true.
11251 Make BRANCH[0] branch on the inverse condition. */
11252 switch (GET_CODE (operands[0]))
11254 /* These cases are equivalent to comparisons against zero. */
11256 inverted_p = !inverted_p;
11257 /* Fall through. */
11259 branch[!inverted_p] = MIPS_BRANCH ("bne", "%2,%.,%1");
11260 branch[inverted_p] = MIPS_BRANCH ("beq", "%2,%.,%1");
11263 /* These cases are always true or always false. */
11265 inverted_p = !inverted_p;
11266 /* Fall through. */
11268 branch[!inverted_p] = MIPS_BRANCH ("beq", "%.,%.,%1");
11269 branch[inverted_p] = MIPS_BRANCH ("bne", "%.,%.,%1");
11273 branch[!inverted_p] = MIPS_BRANCH ("b%C0z", "%2,%1");
11274 branch[inverted_p] = MIPS_BRANCH ("b%N0z", "%2,%1");
11277 return mips_output_conditional_branch (insn, operands, branch[1], branch[0]);
11280 /* Used to output div or ddiv instruction DIVISION, which has the operands
11281 given by OPERANDS. Add in a divide-by-zero check if needed.
11283 When working around R4000 and R4400 errata, we need to make sure that
11284 the division is not immediately followed by a shift[1][2]. We also
11285 need to stop the division from being put into a branch delay slot[3].
11286 The easiest way to avoid both problems is to add a nop after the
11287 division. When a divide-by-zero check is needed, this nop can be
11288 used to fill the branch delay slot.
11290 [1] If a double-word or a variable shift executes immediately
11291 after starting an integer division, the shift may give an
11292 incorrect result. See quotations of errata #16 and #28 from
11293 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
11294 in mips.md for details.
11296 [2] A similar bug to [1] exists for all revisions of the
11297 R4000 and the R4400 when run in an MC configuration.
11298 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
11300 "19. In this following sequence:
11302 ddiv (or ddivu or div or divu)
11303 dsll32 (or dsrl32, dsra32)
11305 if an MPT stall occurs, while the divide is slipping the cpu
11306 pipeline, then the following double shift would end up with an
11309 Workaround: The compiler needs to avoid generating any
11310 sequence with divide followed by extended double shift."
11312 This erratum is also present in "MIPS R4400MC Errata, Processor
11313 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
11314 & 3.0" as errata #10 and #4, respectively.
11316 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
11317 (also valid for MIPS R4000MC processors):
11319 "52. R4000SC: This bug does not apply for the R4000PC.
11321 There are two flavors of this bug:
11323 1) If the instruction just after divide takes an RF exception
11324 (tlb-refill, tlb-invalid) and gets an instruction cache
11325 miss (both primary and secondary) and the line which is
11326 currently in secondary cache at this index had the first
11327 data word, where the bits 5..2 are set, then R4000 would
11328 get a wrong result for the div.
11333 ------------------- # end-of page. -tlb-refill
11338 ------------------- # end-of page. -tlb-invalid
11341 2) If the divide is in the taken branch delay slot, where the
11342 target takes RF exception and gets an I-cache miss for the
11343 exception vector or where I-cache miss occurs for the
11344 target address, under the above mentioned scenarios, the
11345 div would get wrong results.
11348 j r2 # to next page mapped or unmapped
11349 div r8,r9 # this bug would be there as long
11350 # as there is an ICache miss and
11351 nop # the "data pattern" is present
11354 beq r0, r0, NextPage # to Next page
11358 This bug is present for div, divu, ddiv, and ddivu
11361 Workaround: For item 1), OS could make sure that the next page
11362 after the divide instruction is also mapped. For item 2), the
11363 compiler could make sure that the divide instruction is not in
11364 the branch delay slot."
11366 These processors have PRId values of 0x00004220 and 0x00004300 for
11367 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
11370 mips_output_division (const char *division, rtx *operands)
11375 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
11377 output_asm_insn (s, operands);
11380 if (TARGET_CHECK_ZERO_DIV)
11384 output_asm_insn (s, operands);
11385 s = "bnez\t%2,1f\n\tbreak\t7\n1:";
11387 else if (GENERATE_DIVIDE_TRAPS)
11389 output_asm_insn (s, operands);
11390 s = "teq\t%2,%.,7";
11394 output_asm_insn ("%(bne\t%2,%.,1f", operands);
11395 output_asm_insn (s, operands);
11396 s = "break\t7%)\n1:";
11402 /* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
11403 with a final "000" replaced by "k". Ignore case.
11405 Note: this function is shared between GCC and GAS. */
11408 mips_strict_matching_cpu_name_p (const char *canonical, const char *given)
11410 while (*given != 0 && TOLOWER (*given) == TOLOWER (*canonical))
11411 given++, canonical++;
11413 return ((*given == 0 && *canonical == 0)
11414 || (strcmp (canonical, "000") == 0 && strcasecmp (given, "k") == 0));
11418 /* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
11419 CPU name. We've traditionally allowed a lot of variation here.
11421 Note: this function is shared between GCC and GAS. */
11424 mips_matching_cpu_name_p (const char *canonical, const char *given)
11426 /* First see if the name matches exactly, or with a final "000"
11427 turned into "k". */
11428 if (mips_strict_matching_cpu_name_p (canonical, given))
11431 /* If not, try comparing based on numerical designation alone.
11432 See if GIVEN is an unadorned number, or 'r' followed by a number. */
11433 if (TOLOWER (*given) == 'r')
11435 if (!ISDIGIT (*given))
11438 /* Skip over some well-known prefixes in the canonical name,
11439 hoping to find a number there too. */
11440 if (TOLOWER (canonical[0]) == 'v' && TOLOWER (canonical[1]) == 'r')
11442 else if (TOLOWER (canonical[0]) == 'r' && TOLOWER (canonical[1]) == 'm')
11444 else if (TOLOWER (canonical[0]) == 'r')
11447 return mips_strict_matching_cpu_name_p (canonical, given);
11451 /* Return the mips_cpu_info entry for the processor or ISA given
11452 by CPU_STRING. Return null if the string isn't recognized.
11454 A similar function exists in GAS. */
11456 static const struct mips_cpu_info *
11457 mips_parse_cpu (const char *cpu_string)
11459 const struct mips_cpu_info *p;
11462 /* In the past, we allowed upper-case CPU names, but it doesn't
11463 work well with the multilib machinery. */
11464 for (s = cpu_string; *s != 0; s++)
11467 warning (0, "the cpu name must be lower case");
11471 /* 'from-abi' selects the most compatible architecture for the given
11472 ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
11473 EABIs, we have to decide whether we're using the 32-bit or 64-bit
11474 version. Look first at the -mgp options, if given, otherwise base
11475 the choice on MASK_64BIT in TARGET_DEFAULT. */
11476 if (strcasecmp (cpu_string, "from-abi") == 0)
11477 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
11478 : ABI_NEEDS_64BIT_REGS ? 3
11479 : (TARGET_64BIT ? 3 : 1));
11481 /* 'default' has traditionally been a no-op. Probably not very useful. */
11482 if (strcasecmp (cpu_string, "default") == 0)
11485 for (p = mips_cpu_info_table; p->name != 0; p++)
11486 if (mips_matching_cpu_name_p (p->name, cpu_string))
11493 /* Return the processor associated with the given ISA level, or null
11494 if the ISA isn't valid. */
11496 static const struct mips_cpu_info *
11497 mips_cpu_info_from_isa (int isa)
11499 const struct mips_cpu_info *p;
11501 for (p = mips_cpu_info_table; p->name != 0; p++)
11508 /* Implement HARD_REGNO_NREGS. The size of FP registers is controlled
11509 by UNITS_PER_FPREG. The size of FP status registers is always 4, because
11510 they only hold condition code modes, and CCmode is always considered to
11511 be 4 bytes wide. All other registers are word sized. */
11514 mips_hard_regno_nregs (int regno, enum machine_mode mode)
11516 if (ST_REG_P (regno))
11517 return ((GET_MODE_SIZE (mode) + 3) / 4);
11518 else if (! FP_REG_P (regno))
11519 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
11521 return ((GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG);
11524 /* Implement TARGET_RETURN_IN_MEMORY. Under the old (i.e., 32 and O64 ABIs)
11525 all BLKmode objects are returned in memory. Under the new (N32 and
11526 64-bit MIPS ABIs) small structures are returned in a register.
11527 Objects with varying size must still be returned in memory, of
11531 mips_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
11534 return (TYPE_MODE (type) == BLKmode);
11536 return ((int_size_in_bytes (type) > (2 * UNITS_PER_WORD))
11537 || (int_size_in_bytes (type) == -1));
11541 mips_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
11543 return !TARGET_OLDABI;
11546 /* Return true if INSN is a multiply-add or multiply-subtract
11547 instruction and PREV assigns to the accumulator operand. */
11550 mips_linked_madd_p (rtx prev, rtx insn)
11554 x = single_set (insn);
11560 if (GET_CODE (x) == PLUS
11561 && GET_CODE (XEXP (x, 0)) == MULT
11562 && reg_set_p (XEXP (x, 1), prev))
11565 if (GET_CODE (x) == MINUS
11566 && GET_CODE (XEXP (x, 1)) == MULT
11567 && reg_set_p (XEXP (x, 0), prev))
11573 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
11574 that may clobber hi or lo. */
11576 static rtx mips_macc_chains_last_hilo;
11578 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
11579 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
11582 mips_macc_chains_record (rtx insn)
11584 if (get_attr_may_clobber_hilo (insn))
11585 mips_macc_chains_last_hilo = insn;
11588 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
11589 has NREADY elements, looking for a multiply-add or multiply-subtract
11590 instruction that is cumulative with mips_macc_chains_last_hilo.
11591 If there is one, promote it ahead of anything else that might
11592 clobber hi or lo. */
11595 mips_macc_chains_reorder (rtx *ready, int nready)
11599 if (mips_macc_chains_last_hilo != 0)
11600 for (i = nready - 1; i >= 0; i--)
11601 if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
11603 for (j = nready - 1; j > i; j--)
11604 if (recog_memoized (ready[j]) >= 0
11605 && get_attr_may_clobber_hilo (ready[j]))
11607 mips_promote_ready (ready, i, j);
11614 /* The last instruction to be scheduled. */
11616 static rtx vr4130_last_insn;
11618 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
11619 points to an rtx that is initially an instruction. Nullify the rtx
11620 if the instruction uses the value of register X. */
11623 vr4130_true_reg_dependence_p_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
11625 rtx *insn_ptr = data;
11628 && reg_referenced_p (x, PATTERN (*insn_ptr)))
11632 /* Return true if there is true register dependence between vr4130_last_insn
11636 vr4130_true_reg_dependence_p (rtx insn)
11638 note_stores (PATTERN (vr4130_last_insn),
11639 vr4130_true_reg_dependence_p_1, &insn);
11643 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
11644 the ready queue and that INSN2 is the instruction after it, return
11645 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
11646 in which INSN1 and INSN2 can probably issue in parallel, but for
11647 which (INSN2, INSN1) should be less sensitive to instruction
11648 alignment than (INSN1, INSN2). See 4130.md for more details. */
11651 vr4130_swap_insns_p (rtx insn1, rtx insn2)
11653 sd_iterator_def sd_it;
11656 /* Check for the following case:
11658 1) there is some other instruction X with an anti dependence on INSN1;
11659 2) X has a higher priority than INSN2; and
11660 3) X is an arithmetic instruction (and thus has no unit restrictions).
11662 If INSN1 is the last instruction blocking X, it would better to
11663 choose (INSN1, X) over (INSN2, INSN1). */
11664 FOR_EACH_DEP (insn1, SD_LIST_FORW, sd_it, dep)
11665 if (DEP_TYPE (dep) == REG_DEP_ANTI
11666 && INSN_PRIORITY (DEP_CON (dep)) > INSN_PRIORITY (insn2)
11667 && recog_memoized (DEP_CON (dep)) >= 0
11668 && get_attr_vr4130_class (DEP_CON (dep)) == VR4130_CLASS_ALU)
11671 if (vr4130_last_insn != 0
11672 && recog_memoized (insn1) >= 0
11673 && recog_memoized (insn2) >= 0)
11675 /* See whether INSN1 and INSN2 use different execution units,
11676 or if they are both ALU-type instructions. If so, they can
11677 probably execute in parallel. */
11678 enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
11679 enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
11680 if (class1 != class2 || class1 == VR4130_CLASS_ALU)
11682 /* If only one of the instructions has a dependence on
11683 vr4130_last_insn, prefer to schedule the other one first. */
11684 bool dep1 = vr4130_true_reg_dependence_p (insn1);
11685 bool dep2 = vr4130_true_reg_dependence_p (insn2);
11689 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
11690 is not an ALU-type instruction and if INSN1 uses the same
11691 execution unit. (Note that if this condition holds, we already
11692 know that INSN2 uses a different execution unit.) */
11693 if (class1 != VR4130_CLASS_ALU
11694 && recog_memoized (vr4130_last_insn) >= 0
11695 && class1 == get_attr_vr4130_class (vr4130_last_insn))
11702 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
11703 queue with at least two instructions. Swap the first two if
11704 vr4130_swap_insns_p says that it could be worthwhile. */
11707 vr4130_reorder (rtx *ready, int nready)
11709 if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
11710 mips_promote_ready (ready, nready - 2, nready - 1);
11713 /* Remove the instruction at index LOWER from ready queue READY and
11714 reinsert it in front of the instruction at index HIGHER. LOWER must
11718 mips_promote_ready (rtx *ready, int lower, int higher)
11723 new_head = ready[lower];
11724 for (i = lower; i < higher; i++)
11725 ready[i] = ready[i + 1];
11726 ready[i] = new_head;
11729 /* If the priority of the instruction at POS2 in the ready queue READY
11730 is within LIMIT units of that of the instruction at POS1, swap the
11731 instructions if POS2 is not already less than POS1. */
11734 mips_maybe_swap_ready (rtx *ready, int pos1, int pos2, int limit)
11737 && INSN_PRIORITY (ready[pos1]) + limit >= INSN_PRIORITY (ready[pos2]))
11740 temp = ready[pos1];
11741 ready[pos1] = ready[pos2];
11742 ready[pos2] = temp;
11746 /* Record whether last 74k AGEN instruction was a load or store. */
11748 static enum attr_type mips_last_74k_agen_insn = TYPE_UNKNOWN;
11750 /* Initialize mips_last_74k_agen_insn from INSN. A null argument
11751 resets to TYPE_UNKNOWN state. */
11754 mips_74k_agen_init (rtx insn)
11756 if (!insn || !NONJUMP_INSN_P (insn))
11757 mips_last_74k_agen_insn = TYPE_UNKNOWN;
11758 else if (USEFUL_INSN_P (insn))
11760 enum attr_type type = get_attr_type (insn);
11761 if (type == TYPE_LOAD || type == TYPE_STORE)
11762 mips_last_74k_agen_insn = type;
11766 /* A TUNE_74K helper function. The 74K AGEN pipeline likes multiple
11767 loads to be grouped together, and multiple stores to be grouped
11768 together. Swap things around in the ready queue to make this happen. */
11771 mips_74k_agen_reorder (rtx *ready, int nready)
11774 int store_pos, load_pos;
11779 for (i = nready - 1; i >= 0; i--)
11781 rtx insn = ready[i];
11782 if (USEFUL_INSN_P (insn))
11783 switch (get_attr_type (insn))
11786 if (store_pos == -1)
11791 if (load_pos == -1)
11800 if (load_pos == -1 || store_pos == -1)
11803 switch (mips_last_74k_agen_insn)
11806 /* Prefer to schedule loads since they have a higher latency. */
11808 /* Swap loads to the front of the queue. */
11809 mips_maybe_swap_ready (ready, load_pos, store_pos, 4);
11812 /* Swap stores to the front of the queue. */
11813 mips_maybe_swap_ready (ready, store_pos, load_pos, 4);
11820 /* Implement TARGET_SCHED_INIT. */
11823 mips_sched_init (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
11824 int max_ready ATTRIBUTE_UNUSED)
11826 mips_macc_chains_last_hilo = 0;
11827 vr4130_last_insn = 0;
11828 mips_74k_agen_init (NULL_RTX);
11831 /* Implement TARGET_SCHED_REORDER and TARG_SCHED_REORDER2. */
11834 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
11835 rtx *ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
11837 if (!reload_completed
11838 && TUNE_MACC_CHAINS
11840 mips_macc_chains_reorder (ready, *nreadyp);
11841 if (reload_completed
11843 && !TARGET_VR4130_ALIGN
11845 vr4130_reorder (ready, *nreadyp);
11847 mips_74k_agen_reorder (ready, *nreadyp);
11848 return mips_issue_rate ();
11851 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
11854 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
11855 rtx insn, int more)
11858 mips_74k_agen_init (insn);
11859 switch (GET_CODE (PATTERN (insn)))
11863 /* Don't count USEs and CLOBBERs against the issue rate. */
11868 if (!reload_completed && TUNE_MACC_CHAINS)
11869 mips_macc_chains_record (insn);
11870 vr4130_last_insn = insn;
11876 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
11877 dependencies have no cost, except on the 20Kc where output-dependence
11878 is treated like input-dependence. */
11881 mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
11882 rtx dep ATTRIBUTE_UNUSED, int cost)
11884 if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
11887 if (REG_NOTE_KIND (link) != 0)
11892 /* Return the number of instructions that can be issued per cycle. */
11895 mips_issue_rate (void)
11899 case PROCESSOR_74KC:
11900 case PROCESSOR_74KF2_1:
11901 case PROCESSOR_74KF1_1:
11902 case PROCESSOR_74KF3_2:
11903 /* The 74k is not strictly quad-issue cpu, but can be seen as one
11904 by the scheduler. It can issue 1 ALU, 1 AGEN and 2 FPU insns,
11905 but in reality only a maximum of 3 insns can be issued as the
11906 floating point load/stores also require a slot in the AGEN pipe. */
11909 case PROCESSOR_20KC:
11910 case PROCESSOR_R4130:
11911 case PROCESSOR_R5400:
11912 case PROCESSOR_R5500:
11913 case PROCESSOR_R7000:
11914 case PROCESSOR_R9000:
11917 case PROCESSOR_SB1:
11918 case PROCESSOR_SB1A:
11919 /* This is actually 4, but we get better performance if we claim 3.
11920 This is partly because of unwanted speculative code motion with the
11921 larger number, and partly because in most common cases we can't
11922 reach the theoretical max of 4. */
11930 /* Implements TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
11931 be as wide as the scheduling freedom in the DFA. */
11934 mips_multipass_dfa_lookahead (void)
11936 /* Can schedule up to 4 of the 6 function units in any one cycle. */
11943 /* Implements a store data bypass check. We need this because the cprestore
11944 pattern is type store, but defined using an UNSPEC. This UNSPEC causes the
11945 default routine to abort. We just return false for that case. */
11946 /* ??? Should try to give a better result here than assuming false. */
11949 mips_store_data_bypass_p (rtx out_insn, rtx in_insn)
11951 if (GET_CODE (PATTERN (in_insn)) == UNSPEC_VOLATILE)
11954 return ! store_data_bypass_p (out_insn, in_insn);
11957 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
11958 return the first operand of the associated "pref" or "prefx" insn. */
11961 mips_prefetch_cookie (rtx write, rtx locality)
11963 /* store_streamed / load_streamed. */
11964 if (INTVAL (locality) <= 0)
11965 return GEN_INT (INTVAL (write) + 4);
11967 /* store / load. */
11968 if (INTVAL (locality) <= 2)
11971 /* store_retained / load_retained. */
11972 return GEN_INT (INTVAL (write) + 6);
11975 /* MIPS builtin function support. */
11977 struct builtin_description
11979 /* The code of the main .md file instruction. See mips_builtin_type
11980 for more information. */
11981 enum insn_code icode;
11983 /* The floating-point comparison code to use with ICODE, if any. */
11984 enum mips_fp_condition cond;
11986 /* The name of the builtin function. */
11989 /* Specifies how the function should be expanded. */
11990 enum mips_builtin_type builtin_type;
11992 /* The function's prototype. */
11993 enum mips_function_type function_type;
11995 /* The target flags required for this function. */
11999 /* Define a MIPS_BUILTIN_DIRECT function for instruction CODE_FOR_mips_<INSN>.
12000 FUNCTION_TYPE and TARGET_FLAGS are builtin_description fields. */
12001 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
12002 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
12003 MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, TARGET_FLAGS }
12005 /* Define __builtin_mips_<INSN>_<COND>_{s,d}, both of which require
12007 #define CMP_SCALAR_BUILTINS(INSN, COND, TARGET_FLAGS) \
12008 { CODE_FOR_mips_ ## INSN ## _cond_s, MIPS_FP_COND_ ## COND, \
12009 "__builtin_mips_" #INSN "_" #COND "_s", \
12010 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, TARGET_FLAGS }, \
12011 { CODE_FOR_mips_ ## INSN ## _cond_d, MIPS_FP_COND_ ## COND, \
12012 "__builtin_mips_" #INSN "_" #COND "_d", \
12013 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, TARGET_FLAGS }
12015 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
12016 The lower and upper forms require TARGET_FLAGS while the any and all
12017 forms require MASK_MIPS3D. */
12018 #define CMP_PS_BUILTINS(INSN, COND, TARGET_FLAGS) \
12019 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
12020 "__builtin_mips_any_" #INSN "_" #COND "_ps", \
12021 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
12022 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
12023 "__builtin_mips_all_" #INSN "_" #COND "_ps", \
12024 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
12025 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
12026 "__builtin_mips_lower_" #INSN "_" #COND "_ps", \
12027 MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }, \
12028 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
12029 "__builtin_mips_upper_" #INSN "_" #COND "_ps", \
12030 MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }
12032 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
12033 require MASK_MIPS3D. */
12034 #define CMP_4S_BUILTINS(INSN, COND) \
12035 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
12036 "__builtin_mips_any_" #INSN "_" #COND "_4s", \
12037 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
12039 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
12040 "__builtin_mips_all_" #INSN "_" #COND "_4s", \
12041 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
12044 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
12045 instruction requires TARGET_FLAGS. */
12046 #define MOVTF_BUILTINS(INSN, COND, TARGET_FLAGS) \
12047 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
12048 "__builtin_mips_movt_" #INSN "_" #COND "_ps", \
12049 MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
12051 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
12052 "__builtin_mips_movf_" #INSN "_" #COND "_ps", \
12053 MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
12056 /* Define all the builtins related to c.cond.fmt condition COND. */
12057 #define CMP_BUILTINS(COND) \
12058 MOVTF_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
12059 MOVTF_BUILTINS (cabs, COND, MASK_MIPS3D), \
12060 CMP_SCALAR_BUILTINS (cabs, COND, MASK_MIPS3D), \
12061 CMP_PS_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
12062 CMP_PS_BUILTINS (cabs, COND, MASK_MIPS3D), \
12063 CMP_4S_BUILTINS (c, COND), \
12064 CMP_4S_BUILTINS (cabs, COND)
12066 static const struct builtin_description mips_bdesc[] =
12068 DIRECT_BUILTIN (pll_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
12069 DIRECT_BUILTIN (pul_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
12070 DIRECT_BUILTIN (plu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
12071 DIRECT_BUILTIN (puu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
12072 DIRECT_BUILTIN (cvt_ps_s, MIPS_V2SF_FTYPE_SF_SF, MASK_PAIRED_SINGLE_FLOAT),
12073 DIRECT_BUILTIN (cvt_s_pl, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
12074 DIRECT_BUILTIN (cvt_s_pu, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
12075 DIRECT_BUILTIN (abs_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
12077 DIRECT_BUILTIN (alnv_ps, MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
12078 MASK_PAIRED_SINGLE_FLOAT),
12079 DIRECT_BUILTIN (addr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
12080 DIRECT_BUILTIN (mulr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
12081 DIRECT_BUILTIN (cvt_pw_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
12082 DIRECT_BUILTIN (cvt_ps_pw, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
12084 DIRECT_BUILTIN (recip1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
12085 DIRECT_BUILTIN (recip1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
12086 DIRECT_BUILTIN (recip1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
12087 DIRECT_BUILTIN (recip2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
12088 DIRECT_BUILTIN (recip2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
12089 DIRECT_BUILTIN (recip2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
12091 DIRECT_BUILTIN (rsqrt1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
12092 DIRECT_BUILTIN (rsqrt1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
12093 DIRECT_BUILTIN (rsqrt1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
12094 DIRECT_BUILTIN (rsqrt2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
12095 DIRECT_BUILTIN (rsqrt2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
12096 DIRECT_BUILTIN (rsqrt2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
12098 MIPS_FP_CONDITIONS (CMP_BUILTINS)
12101 /* Builtin functions for the SB-1 processor. */
12103 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
12105 static const struct builtin_description sb1_bdesc[] =
12107 DIRECT_BUILTIN (sqrt_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT)
12110 /* Builtin functions for DSP ASE. */
12112 #define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
12113 #define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
12114 #define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
12115 #define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
12116 #define CODE_FOR_mips_mul_ph CODE_FOR_mulv2hi3
12118 /* Define a MIPS_BUILTIN_DIRECT_NO_TARGET function for instruction
12119 CODE_FOR_mips_<INSN>. FUNCTION_TYPE and TARGET_FLAGS are
12120 builtin_description fields. */
12121 #define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
12122 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
12123 MIPS_BUILTIN_DIRECT_NO_TARGET, FUNCTION_TYPE, TARGET_FLAGS }
12125 /* Define __builtin_mips_bposge<VALUE>. <VALUE> is 32 for the MIPS32 DSP
12126 branch instruction. TARGET_FLAGS is a builtin_description field. */
12127 #define BPOSGE_BUILTIN(VALUE, TARGET_FLAGS) \
12128 { CODE_FOR_mips_bposge, 0, "__builtin_mips_bposge" #VALUE, \
12129 MIPS_BUILTIN_BPOSGE ## VALUE, MIPS_SI_FTYPE_VOID, TARGET_FLAGS }
12131 static const struct builtin_description dsp_bdesc[] =
12133 DIRECT_BUILTIN (addq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
12134 DIRECT_BUILTIN (addq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
12135 DIRECT_BUILTIN (addq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
12136 DIRECT_BUILTIN (addu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
12137 DIRECT_BUILTIN (addu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
12138 DIRECT_BUILTIN (subq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
12139 DIRECT_BUILTIN (subq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
12140 DIRECT_BUILTIN (subq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
12141 DIRECT_BUILTIN (subu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
12142 DIRECT_BUILTIN (subu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
12143 DIRECT_BUILTIN (addsc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
12144 DIRECT_BUILTIN (addwc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
12145 DIRECT_BUILTIN (modsub, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
12146 DIRECT_BUILTIN (raddu_w_qb, MIPS_SI_FTYPE_V4QI, MASK_DSP),
12147 DIRECT_BUILTIN (absq_s_ph, MIPS_V2HI_FTYPE_V2HI, MASK_DSP),
12148 DIRECT_BUILTIN (absq_s_w, MIPS_SI_FTYPE_SI, MASK_DSP),
12149 DIRECT_BUILTIN (precrq_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
12150 DIRECT_BUILTIN (precrq_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
12151 DIRECT_BUILTIN (precrq_rs_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
12152 DIRECT_BUILTIN (precrqu_s_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
12153 DIRECT_BUILTIN (preceq_w_phl, MIPS_SI_FTYPE_V2HI, MASK_DSP),
12154 DIRECT_BUILTIN (preceq_w_phr, MIPS_SI_FTYPE_V2HI, MASK_DSP),
12155 DIRECT_BUILTIN (precequ_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
12156 DIRECT_BUILTIN (precequ_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
12157 DIRECT_BUILTIN (precequ_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
12158 DIRECT_BUILTIN (precequ_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
12159 DIRECT_BUILTIN (preceu_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
12160 DIRECT_BUILTIN (preceu_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
12161 DIRECT_BUILTIN (preceu_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
12162 DIRECT_BUILTIN (preceu_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
12163 DIRECT_BUILTIN (shll_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
12164 DIRECT_BUILTIN (shll_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
12165 DIRECT_BUILTIN (shll_s_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
12166 DIRECT_BUILTIN (shll_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
12167 DIRECT_BUILTIN (shrl_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
12168 DIRECT_BUILTIN (shra_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
12169 DIRECT_BUILTIN (shra_r_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
12170 DIRECT_BUILTIN (shra_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
12171 DIRECT_BUILTIN (muleu_s_ph_qbl, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
12172 DIRECT_BUILTIN (muleu_s_ph_qbr, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
12173 DIRECT_BUILTIN (mulq_rs_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
12174 DIRECT_BUILTIN (muleq_s_w_phl, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
12175 DIRECT_BUILTIN (muleq_s_w_phr, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
12176 DIRECT_BUILTIN (bitrev, MIPS_SI_FTYPE_SI, MASK_DSP),
12177 DIRECT_BUILTIN (insv, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
12178 DIRECT_BUILTIN (repl_qb, MIPS_V4QI_FTYPE_SI, MASK_DSP),
12179 DIRECT_BUILTIN (repl_ph, MIPS_V2HI_FTYPE_SI, MASK_DSP),
12180 DIRECT_NO_TARGET_BUILTIN (cmpu_eq_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
12181 DIRECT_NO_TARGET_BUILTIN (cmpu_lt_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
12182 DIRECT_NO_TARGET_BUILTIN (cmpu_le_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
12183 DIRECT_BUILTIN (cmpgu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
12184 DIRECT_BUILTIN (cmpgu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
12185 DIRECT_BUILTIN (cmpgu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
12186 DIRECT_NO_TARGET_BUILTIN (cmp_eq_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
12187 DIRECT_NO_TARGET_BUILTIN (cmp_lt_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
12188 DIRECT_NO_TARGET_BUILTIN (cmp_le_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
12189 DIRECT_BUILTIN (pick_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
12190 DIRECT_BUILTIN (pick_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
12191 DIRECT_BUILTIN (packrl_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
12192 DIRECT_NO_TARGET_BUILTIN (wrdsp, MIPS_VOID_FTYPE_SI_SI, MASK_DSP),
12193 DIRECT_BUILTIN (rddsp, MIPS_SI_FTYPE_SI, MASK_DSP),
12194 DIRECT_BUILTIN (lbux, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
12195 DIRECT_BUILTIN (lhx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
12196 DIRECT_BUILTIN (lwx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
12197 BPOSGE_BUILTIN (32, MASK_DSP),
12199 /* The following are for the MIPS DSP ASE REV 2. */
12200 DIRECT_BUILTIN (absq_s_qb, MIPS_V4QI_FTYPE_V4QI, MASK_DSPR2),
12201 DIRECT_BUILTIN (addu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12202 DIRECT_BUILTIN (addu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12203 DIRECT_BUILTIN (adduh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
12204 DIRECT_BUILTIN (adduh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
12205 DIRECT_BUILTIN (append, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
12206 DIRECT_BUILTIN (balign, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
12207 DIRECT_BUILTIN (cmpgdu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
12208 DIRECT_BUILTIN (cmpgdu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
12209 DIRECT_BUILTIN (cmpgdu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
12210 DIRECT_BUILTIN (mul_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12211 DIRECT_BUILTIN (mul_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12212 DIRECT_BUILTIN (mulq_rs_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
12213 DIRECT_BUILTIN (mulq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12214 DIRECT_BUILTIN (mulq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
12215 DIRECT_BUILTIN (precr_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12216 DIRECT_BUILTIN (precr_sra_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, MASK_DSPR2),
12217 DIRECT_BUILTIN (precr_sra_r_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, MASK_DSPR2),
12218 DIRECT_BUILTIN (prepend, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
12219 DIRECT_BUILTIN (shra_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSPR2),
12220 DIRECT_BUILTIN (shra_r_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSPR2),
12221 DIRECT_BUILTIN (shrl_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSPR2),
12222 DIRECT_BUILTIN (subu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12223 DIRECT_BUILTIN (subu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12224 DIRECT_BUILTIN (subuh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
12225 DIRECT_BUILTIN (subuh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
12226 DIRECT_BUILTIN (addqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12227 DIRECT_BUILTIN (addqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12228 DIRECT_BUILTIN (addqh_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
12229 DIRECT_BUILTIN (addqh_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
12230 DIRECT_BUILTIN (subqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12231 DIRECT_BUILTIN (subqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
12232 DIRECT_BUILTIN (subqh_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
12233 DIRECT_BUILTIN (subqh_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2)
12236 static const struct builtin_description dsp_32only_bdesc[] =
12238 DIRECT_BUILTIN (dpau_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
12239 DIRECT_BUILTIN (dpau_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
12240 DIRECT_BUILTIN (dpsu_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
12241 DIRECT_BUILTIN (dpsu_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
12242 DIRECT_BUILTIN (dpaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
12243 DIRECT_BUILTIN (dpsq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
12244 DIRECT_BUILTIN (mulsaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
12245 DIRECT_BUILTIN (dpaq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
12246 DIRECT_BUILTIN (dpsq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
12247 DIRECT_BUILTIN (maq_s_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
12248 DIRECT_BUILTIN (maq_s_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
12249 DIRECT_BUILTIN (maq_sa_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
12250 DIRECT_BUILTIN (maq_sa_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
12251 DIRECT_BUILTIN (extr_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
12252 DIRECT_BUILTIN (extr_r_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
12253 DIRECT_BUILTIN (extr_rs_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
12254 DIRECT_BUILTIN (extr_s_h, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
12255 DIRECT_BUILTIN (extp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
12256 DIRECT_BUILTIN (extpdp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
12257 DIRECT_BUILTIN (shilo, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
12258 DIRECT_BUILTIN (mthlip, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
12260 /* The following are for the MIPS DSP ASE REV 2. */
12261 DIRECT_BUILTIN (dpa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
12262 DIRECT_BUILTIN (dps_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
12263 DIRECT_BUILTIN (madd, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSPR2),
12264 DIRECT_BUILTIN (maddu, MIPS_DI_FTYPE_DI_USI_USI, MASK_DSPR2),
12265 DIRECT_BUILTIN (msub, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSPR2),
12266 DIRECT_BUILTIN (msubu, MIPS_DI_FTYPE_DI_USI_USI, MASK_DSPR2),
12267 DIRECT_BUILTIN (mulsa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
12268 DIRECT_BUILTIN (mult, MIPS_DI_FTYPE_SI_SI, MASK_DSPR2),
12269 DIRECT_BUILTIN (multu, MIPS_DI_FTYPE_USI_USI, MASK_DSPR2),
12270 DIRECT_BUILTIN (dpax_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
12271 DIRECT_BUILTIN (dpsx_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
12272 DIRECT_BUILTIN (dpaqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
12273 DIRECT_BUILTIN (dpaqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
12274 DIRECT_BUILTIN (dpsqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
12275 DIRECT_BUILTIN (dpsqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2)
12278 /* This helps provide a mapping from builtin function codes to bdesc
12283 /* The builtin function table that this entry describes. */
12284 const struct builtin_description *bdesc;
12286 /* The number of entries in the builtin function table. */
12289 /* The target processor that supports these builtin functions.
12290 PROCESSOR_MAX means we enable them for all processors. */
12291 enum processor_type proc;
12293 /* If the target has these flags, this builtin function table
12294 will not be supported. */
12295 int unsupported_target_flags;
12298 static const struct bdesc_map bdesc_arrays[] =
12300 { mips_bdesc, ARRAY_SIZE (mips_bdesc), PROCESSOR_MAX, 0 },
12301 { sb1_bdesc, ARRAY_SIZE (sb1_bdesc), PROCESSOR_SB1, 0 },
12302 { dsp_bdesc, ARRAY_SIZE (dsp_bdesc), PROCESSOR_MAX, 0 },
12303 { dsp_32only_bdesc, ARRAY_SIZE (dsp_32only_bdesc), PROCESSOR_MAX,
12307 /* Take the argument ARGNUM of the arglist of EXP and convert it into a form
12308 suitable for input operand OP of instruction ICODE. Return the value. */
12311 mips_prepare_builtin_arg (enum insn_code icode,
12312 unsigned int op, tree exp, unsigned int argnum)
12315 enum machine_mode mode;
12317 value = expand_normal (CALL_EXPR_ARG (exp, argnum));
12318 mode = insn_data[icode].operand[op].mode;
12319 if (!insn_data[icode].operand[op].predicate (value, mode))
12321 value = copy_to_mode_reg (mode, value);
12322 /* Check the predicate again. */
12323 if (!insn_data[icode].operand[op].predicate (value, mode))
12325 error ("invalid argument to builtin function");
12333 /* Return an rtx suitable for output operand OP of instruction ICODE.
12334 If TARGET is non-null, try to use it where possible. */
12337 mips_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
12339 enum machine_mode mode;
12341 mode = insn_data[icode].operand[op].mode;
12342 if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
12343 target = gen_reg_rtx (mode);
12348 /* Expand builtin functions. This is called from TARGET_EXPAND_BUILTIN. */
12351 mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
12352 enum machine_mode mode ATTRIBUTE_UNUSED,
12353 int ignore ATTRIBUTE_UNUSED)
12355 enum insn_code icode;
12356 enum mips_builtin_type type;
12358 unsigned int fcode;
12359 const struct builtin_description *bdesc;
12360 const struct bdesc_map *m;
12362 fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
12363 fcode = DECL_FUNCTION_CODE (fndecl);
12367 error ("built-in function %qs not supported for MIPS16",
12368 IDENTIFIER_POINTER (DECL_NAME (fndecl)));
12373 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
12375 if (fcode < m->size)
12378 icode = bdesc[fcode].icode;
12379 type = bdesc[fcode].builtin_type;
12389 case MIPS_BUILTIN_DIRECT:
12390 return mips_expand_builtin_direct (icode, target, exp, true);
12392 case MIPS_BUILTIN_DIRECT_NO_TARGET:
12393 return mips_expand_builtin_direct (icode, target, exp, false);
12395 case MIPS_BUILTIN_MOVT:
12396 case MIPS_BUILTIN_MOVF:
12397 return mips_expand_builtin_movtf (type, icode, bdesc[fcode].cond,
12400 case MIPS_BUILTIN_CMP_ANY:
12401 case MIPS_BUILTIN_CMP_ALL:
12402 case MIPS_BUILTIN_CMP_UPPER:
12403 case MIPS_BUILTIN_CMP_LOWER:
12404 case MIPS_BUILTIN_CMP_SINGLE:
12405 return mips_expand_builtin_compare (type, icode, bdesc[fcode].cond,
12408 case MIPS_BUILTIN_BPOSGE32:
12409 return mips_expand_builtin_bposge (type, target);
12416 /* Init builtin functions. This is called from TARGET_INIT_BUILTIN. */
12419 mips_init_builtins (void)
12421 const struct builtin_description *d;
12422 const struct bdesc_map *m;
12423 tree types[(int) MIPS_MAX_FTYPE_MAX];
12424 tree V2SF_type_node;
12425 tree V2HI_type_node;
12426 tree V4QI_type_node;
12427 unsigned int offset;
12429 /* We have only builtins for -mpaired-single, -mips3d and -mdsp. */
12430 if (!TARGET_PAIRED_SINGLE_FLOAT && !TARGET_DSP)
12433 if (TARGET_PAIRED_SINGLE_FLOAT)
12435 V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
12437 types[MIPS_V2SF_FTYPE_V2SF]
12438 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
12440 types[MIPS_V2SF_FTYPE_V2SF_V2SF]
12441 = build_function_type_list (V2SF_type_node,
12442 V2SF_type_node, V2SF_type_node, NULL_TREE);
12444 types[MIPS_V2SF_FTYPE_V2SF_V2SF_INT]
12445 = build_function_type_list (V2SF_type_node,
12446 V2SF_type_node, V2SF_type_node,
12447 integer_type_node, NULL_TREE);
12449 types[MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF]
12450 = build_function_type_list (V2SF_type_node,
12451 V2SF_type_node, V2SF_type_node,
12452 V2SF_type_node, V2SF_type_node, NULL_TREE);
12454 types[MIPS_V2SF_FTYPE_SF_SF]
12455 = build_function_type_list (V2SF_type_node,
12456 float_type_node, float_type_node, NULL_TREE);
12458 types[MIPS_INT_FTYPE_V2SF_V2SF]
12459 = build_function_type_list (integer_type_node,
12460 V2SF_type_node, V2SF_type_node, NULL_TREE);
12462 types[MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF]
12463 = build_function_type_list (integer_type_node,
12464 V2SF_type_node, V2SF_type_node,
12465 V2SF_type_node, V2SF_type_node, NULL_TREE);
12467 types[MIPS_INT_FTYPE_SF_SF]
12468 = build_function_type_list (integer_type_node,
12469 float_type_node, float_type_node, NULL_TREE);
12471 types[MIPS_INT_FTYPE_DF_DF]
12472 = build_function_type_list (integer_type_node,
12473 double_type_node, double_type_node, NULL_TREE);
12475 types[MIPS_SF_FTYPE_V2SF]
12476 = build_function_type_list (float_type_node, V2SF_type_node, NULL_TREE);
12478 types[MIPS_SF_FTYPE_SF]
12479 = build_function_type_list (float_type_node,
12480 float_type_node, NULL_TREE);
12482 types[MIPS_SF_FTYPE_SF_SF]
12483 = build_function_type_list (float_type_node,
12484 float_type_node, float_type_node, NULL_TREE);
12486 types[MIPS_DF_FTYPE_DF]
12487 = build_function_type_list (double_type_node,
12488 double_type_node, NULL_TREE);
12490 types[MIPS_DF_FTYPE_DF_DF]
12491 = build_function_type_list (double_type_node,
12492 double_type_node, double_type_node, NULL_TREE);
12497 V2HI_type_node = build_vector_type_for_mode (intHI_type_node, V2HImode);
12498 V4QI_type_node = build_vector_type_for_mode (intQI_type_node, V4QImode);
12500 types[MIPS_V2HI_FTYPE_V2HI_V2HI]
12501 = build_function_type_list (V2HI_type_node,
12502 V2HI_type_node, V2HI_type_node,
12505 types[MIPS_SI_FTYPE_SI_SI]
12506 = build_function_type_list (intSI_type_node,
12507 intSI_type_node, intSI_type_node,
12510 types[MIPS_V4QI_FTYPE_V4QI_V4QI]
12511 = build_function_type_list (V4QI_type_node,
12512 V4QI_type_node, V4QI_type_node,
12515 types[MIPS_SI_FTYPE_V4QI]
12516 = build_function_type_list (intSI_type_node,
12520 types[MIPS_V2HI_FTYPE_V2HI]
12521 = build_function_type_list (V2HI_type_node,
12525 types[MIPS_SI_FTYPE_SI]
12526 = build_function_type_list (intSI_type_node,
12530 types[MIPS_V4QI_FTYPE_V2HI_V2HI]
12531 = build_function_type_list (V4QI_type_node,
12532 V2HI_type_node, V2HI_type_node,
12535 types[MIPS_V2HI_FTYPE_SI_SI]
12536 = build_function_type_list (V2HI_type_node,
12537 intSI_type_node, intSI_type_node,
12540 types[MIPS_SI_FTYPE_V2HI]
12541 = build_function_type_list (intSI_type_node,
12545 types[MIPS_V2HI_FTYPE_V4QI]
12546 = build_function_type_list (V2HI_type_node,
12550 types[MIPS_V4QI_FTYPE_V4QI_SI]
12551 = build_function_type_list (V4QI_type_node,
12552 V4QI_type_node, intSI_type_node,
12555 types[MIPS_V2HI_FTYPE_V2HI_SI]
12556 = build_function_type_list (V2HI_type_node,
12557 V2HI_type_node, intSI_type_node,
12560 types[MIPS_V2HI_FTYPE_V4QI_V2HI]
12561 = build_function_type_list (V2HI_type_node,
12562 V4QI_type_node, V2HI_type_node,
12565 types[MIPS_SI_FTYPE_V2HI_V2HI]
12566 = build_function_type_list (intSI_type_node,
12567 V2HI_type_node, V2HI_type_node,
12570 types[MIPS_DI_FTYPE_DI_V4QI_V4QI]
12571 = build_function_type_list (intDI_type_node,
12572 intDI_type_node, V4QI_type_node, V4QI_type_node,
12575 types[MIPS_DI_FTYPE_DI_V2HI_V2HI]
12576 = build_function_type_list (intDI_type_node,
12577 intDI_type_node, V2HI_type_node, V2HI_type_node,
12580 types[MIPS_DI_FTYPE_DI_SI_SI]
12581 = build_function_type_list (intDI_type_node,
12582 intDI_type_node, intSI_type_node, intSI_type_node,
12585 types[MIPS_V4QI_FTYPE_SI]
12586 = build_function_type_list (V4QI_type_node,
12590 types[MIPS_V2HI_FTYPE_SI]
12591 = build_function_type_list (V2HI_type_node,
12595 types[MIPS_VOID_FTYPE_V4QI_V4QI]
12596 = build_function_type_list (void_type_node,
12597 V4QI_type_node, V4QI_type_node,
12600 types[MIPS_SI_FTYPE_V4QI_V4QI]
12601 = build_function_type_list (intSI_type_node,
12602 V4QI_type_node, V4QI_type_node,
12605 types[MIPS_VOID_FTYPE_V2HI_V2HI]
12606 = build_function_type_list (void_type_node,
12607 V2HI_type_node, V2HI_type_node,
12610 types[MIPS_SI_FTYPE_DI_SI]
12611 = build_function_type_list (intSI_type_node,
12612 intDI_type_node, intSI_type_node,
12615 types[MIPS_DI_FTYPE_DI_SI]
12616 = build_function_type_list (intDI_type_node,
12617 intDI_type_node, intSI_type_node,
12620 types[MIPS_VOID_FTYPE_SI_SI]
12621 = build_function_type_list (void_type_node,
12622 intSI_type_node, intSI_type_node,
12625 types[MIPS_SI_FTYPE_PTR_SI]
12626 = build_function_type_list (intSI_type_node,
12627 ptr_type_node, intSI_type_node,
12630 types[MIPS_SI_FTYPE_VOID]
12631 = build_function_type (intSI_type_node, void_list_node);
12635 types[MIPS_V4QI_FTYPE_V4QI]
12636 = build_function_type_list (V4QI_type_node,
12640 types[MIPS_SI_FTYPE_SI_SI_SI]
12641 = build_function_type_list (intSI_type_node,
12642 intSI_type_node, intSI_type_node,
12643 intSI_type_node, NULL_TREE);
12645 types[MIPS_DI_FTYPE_DI_USI_USI]
12646 = build_function_type_list (intDI_type_node,
12648 unsigned_intSI_type_node,
12649 unsigned_intSI_type_node, NULL_TREE);
12651 types[MIPS_DI_FTYPE_SI_SI]
12652 = build_function_type_list (intDI_type_node,
12653 intSI_type_node, intSI_type_node,
12656 types[MIPS_DI_FTYPE_USI_USI]
12657 = build_function_type_list (intDI_type_node,
12658 unsigned_intSI_type_node,
12659 unsigned_intSI_type_node, NULL_TREE);
12661 types[MIPS_V2HI_FTYPE_SI_SI_SI]
12662 = build_function_type_list (V2HI_type_node,
12663 intSI_type_node, intSI_type_node,
12664 intSI_type_node, NULL_TREE);
12669 /* Iterate through all of the bdesc arrays, initializing all of the
12670 builtin functions. */
12673 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
12675 if ((m->proc == PROCESSOR_MAX || (m->proc == mips_arch))
12676 && (m->unsupported_target_flags & target_flags) == 0)
12677 for (d = m->bdesc; d < &m->bdesc[m->size]; d++)
12678 if ((d->target_flags & target_flags) == d->target_flags)
12679 add_builtin_function (d->name, types[d->function_type],
12680 d - m->bdesc + offset,
12681 BUILT_IN_MD, NULL, NULL);
12686 /* Expand a MIPS_BUILTIN_DIRECT function. ICODE is the code of the
12687 .md pattern and CALL is the function expr with arguments. TARGET,
12688 if nonnull, suggests a good place to put the result.
12689 HAS_TARGET indicates the function must return something. */
12692 mips_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
12695 rtx ops[MAX_RECOG_OPERANDS];
12701 /* We save target to ops[0]. */
12702 ops[0] = mips_prepare_builtin_target (icode, 0, target);
12706 /* We need to test if the arglist is not zero. Some instructions have extra
12707 clobber registers. */
12708 for (; i < insn_data[icode].n_operands && i <= call_expr_nargs (exp); i++, j++)
12709 ops[i] = mips_prepare_builtin_arg (icode, i, exp, j);
12714 emit_insn (GEN_FCN (icode) (ops[0], ops[1]));
12718 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2]));
12722 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]));
12726 gcc_unreachable ();
12731 /* Expand a __builtin_mips_movt_*_ps() or __builtin_mips_movf_*_ps()
12732 function (TYPE says which). EXP is the tree for the function
12733 function, ICODE is the instruction that should be used to compare
12734 the first two arguments, and COND is the condition it should test.
12735 TARGET, if nonnull, suggests a good place to put the result. */
12738 mips_expand_builtin_movtf (enum mips_builtin_type type,
12739 enum insn_code icode, enum mips_fp_condition cond,
12740 rtx target, tree exp)
12742 rtx cmp_result, op0, op1;
12744 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
12745 op0 = mips_prepare_builtin_arg (icode, 1, exp, 0);
12746 op1 = mips_prepare_builtin_arg (icode, 2, exp, 1);
12747 emit_insn (GEN_FCN (icode) (cmp_result, op0, op1, GEN_INT (cond)));
12749 icode = CODE_FOR_mips_cond_move_tf_ps;
12750 target = mips_prepare_builtin_target (icode, 0, target);
12751 if (type == MIPS_BUILTIN_MOVT)
12753 op1 = mips_prepare_builtin_arg (icode, 2, exp, 2);
12754 op0 = mips_prepare_builtin_arg (icode, 1, exp, 3);
12758 op0 = mips_prepare_builtin_arg (icode, 1, exp, 2);
12759 op1 = mips_prepare_builtin_arg (icode, 2, exp, 3);
12761 emit_insn (gen_mips_cond_move_tf_ps (target, op0, op1, cmp_result));
12765 /* Move VALUE_IF_TRUE into TARGET if CONDITION is true; move VALUE_IF_FALSE
12766 into TARGET otherwise. Return TARGET. */
12769 mips_builtin_branch_and_move (rtx condition, rtx target,
12770 rtx value_if_true, rtx value_if_false)
12772 rtx true_label, done_label;
12774 true_label = gen_label_rtx ();
12775 done_label = gen_label_rtx ();
12777 /* First assume that CONDITION is false. */
12778 mips_emit_move (target, value_if_false);
12780 /* Branch to TRUE_LABEL if CONDITION is true and DONE_LABEL otherwise. */
12781 emit_jump_insn (gen_condjump (condition, true_label));
12782 emit_jump_insn (gen_jump (done_label));
12785 /* Fix TARGET if CONDITION is true. */
12786 emit_label (true_label);
12787 mips_emit_move (target, value_if_true);
12789 emit_label (done_label);
12793 /* Expand a comparison builtin of type BUILTIN_TYPE. ICODE is the code
12794 of the comparison instruction and COND is the condition it should test.
12795 EXP is the function call and arguments and TARGET, if nonnull,
12796 suggests a good place to put the boolean result. */
12799 mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
12800 enum insn_code icode, enum mips_fp_condition cond,
12801 rtx target, tree exp)
12803 rtx offset, condition, cmp_result, ops[MAX_RECOG_OPERANDS];
12807 if (target == 0 || GET_MODE (target) != SImode)
12808 target = gen_reg_rtx (SImode);
12810 /* Prepare the operands to the comparison. */
12811 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
12812 for (i = 1; i < insn_data[icode].n_operands - 1; i++, j++)
12813 ops[i] = mips_prepare_builtin_arg (icode, i, exp, j);
12815 switch (insn_data[icode].n_operands)
12818 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2], GEN_INT (cond)));
12822 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2],
12823 ops[3], ops[4], GEN_INT (cond)));
12827 gcc_unreachable ();
12830 /* If the comparison sets more than one register, we define the result
12831 to be 0 if all registers are false and -1 if all registers are true.
12832 The value of the complete result is indeterminate otherwise. */
12833 switch (builtin_type)
12835 case MIPS_BUILTIN_CMP_ALL:
12836 condition = gen_rtx_NE (VOIDmode, cmp_result, constm1_rtx);
12837 return mips_builtin_branch_and_move (condition, target,
12838 const0_rtx, const1_rtx);
12840 case MIPS_BUILTIN_CMP_UPPER:
12841 case MIPS_BUILTIN_CMP_LOWER:
12842 offset = GEN_INT (builtin_type == MIPS_BUILTIN_CMP_UPPER);
12843 condition = gen_single_cc (cmp_result, offset);
12844 return mips_builtin_branch_and_move (condition, target,
12845 const1_rtx, const0_rtx);
12848 condition = gen_rtx_NE (VOIDmode, cmp_result, const0_rtx);
12849 return mips_builtin_branch_and_move (condition, target,
12850 const1_rtx, const0_rtx);
12854 /* Expand a bposge builtin of type BUILTIN_TYPE. TARGET, if nonnull,
12855 suggests a good place to put the boolean result. */
12858 mips_expand_builtin_bposge (enum mips_builtin_type builtin_type, rtx target)
12860 rtx condition, cmp_result;
12863 if (target == 0 || GET_MODE (target) != SImode)
12864 target = gen_reg_rtx (SImode);
12866 cmp_result = gen_rtx_REG (CCDSPmode, CCDSP_PO_REGNUM);
12868 if (builtin_type == MIPS_BUILTIN_BPOSGE32)
12873 condition = gen_rtx_GE (VOIDmode, cmp_result, GEN_INT (cmp_value));
12874 return mips_builtin_branch_and_move (condition, target,
12875 const1_rtx, const0_rtx);
12878 /* Set SYMBOL_REF_FLAGS for the SYMBOL_REF inside RTL, which belongs to DECL.
12879 FIRST is true if this is the first time handling this decl. */
12882 mips_encode_section_info (tree decl, rtx rtl, int first)
12884 default_encode_section_info (decl, rtl, first);
12886 if (TREE_CODE (decl) == FUNCTION_DECL)
12888 rtx symbol = XEXP (rtl, 0);
12889 tree type = TREE_TYPE (decl);
12891 if ((TARGET_LONG_CALLS && !mips_near_type_p (type))
12892 || mips_far_type_p (type))
12893 SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL;
12897 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. Some code models use the incoming
12898 value of PIC_FUNCTION_ADDR_REGNUM to set up the global pointer. */
12901 mips_extra_live_on_entry (bitmap regs)
12903 if (TARGET_USE_GOT && !TARGET_ABSOLUTE_ABICALLS)
12904 bitmap_set_bit (regs, PIC_FUNCTION_ADDR_REGNUM);
12907 /* SImode values are represented as sign-extended to DImode. */
12910 mips_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
12912 if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
12913 return SIGN_EXTEND;
12918 /* MIPS implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
12921 mips_output_dwarf_dtprel (FILE *file, int size, rtx x)
12926 fputs ("\t.dtprelword\t", file);
12930 fputs ("\t.dtpreldword\t", file);
12934 gcc_unreachable ();
12936 output_addr_const (file, x);
12937 fputs ("+0x8000", file);
12940 #include "gt-mips.h"