1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5 Contributed by A. Lichnewsky, lich@inria.inria.fr.
6 Changes by Michael Meissner, meissner@osf.org.
7 64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
8 Brendan Eich, brendan@microunity.com.
10 This file is part of GCC.
12 GCC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 3, or (at your option)
17 GCC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
28 #include "coretypes.h"
33 #include "hard-reg-set.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "insn-attr.h"
53 #include "target-def.h"
54 #include "integrate.h"
55 #include "langhooks.h"
56 #include "cfglayout.h"
57 #include "sched-int.h"
58 #include "tree-gimple.h"
60 #include "diagnostic.h"
62 /* True if X is an unspec wrapper around a SYMBOL_REF or LABEL_REF. */
63 #define UNSPEC_ADDRESS_P(X) \
64 (GET_CODE (X) == UNSPEC \
65 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
66 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
68 /* Extract the symbol or label from UNSPEC wrapper X. */
69 #define UNSPEC_ADDRESS(X) \
72 /* Extract the symbol type from UNSPEC wrapper X. */
73 #define UNSPEC_ADDRESS_TYPE(X) \
74 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
76 /* The maximum distance between the top of the stack frame and the
77 value $sp has when we save and restore registers.
79 The value for normal-mode code must be a SMALL_OPERAND and must
80 preserve the maximum stack alignment. We therefore use a value
81 of 0x7ff0 in this case.
83 MIPS16e SAVE and RESTORE instructions can adjust the stack pointer by
84 up to 0x7f8 bytes and can usually save or restore all the registers
85 that we need to save or restore. (Note that we can only use these
86 instructions for o32, for which the stack alignment is 8 bytes.)
88 We use a maximum gap of 0x100 or 0x400 for MIPS16 code when SAVE and
89 RESTORE are not available. We can then use unextended instructions
90 to save and restore registers, and to allocate and deallocate the top
92 #define MIPS_MAX_FIRST_STACK_STEP \
93 (!TARGET_MIPS16 ? 0x7ff0 \
94 : GENERATE_MIPS16E_SAVE_RESTORE ? 0x7f8 \
95 : TARGET_64BIT ? 0x100 : 0x400)
97 /* True if INSN is a mips.md pattern or asm statement. */
98 #define USEFUL_INSN_P(INSN) \
100 && GET_CODE (PATTERN (INSN)) != USE \
101 && GET_CODE (PATTERN (INSN)) != CLOBBER \
102 && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
103 && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
105 /* If INSN is a delayed branch sequence, return the first instruction
106 in the sequence, otherwise return INSN itself. */
107 #define SEQ_BEGIN(INSN) \
108 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
109 ? XVECEXP (PATTERN (INSN), 0, 0) \
112 /* Likewise for the last instruction in a delayed branch sequence. */
113 #define SEQ_END(INSN) \
114 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
115 ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
118 /* Execute the following loop body with SUBINSN set to each instruction
119 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
120 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
121 for ((SUBINSN) = SEQ_BEGIN (INSN); \
122 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
123 (SUBINSN) = NEXT_INSN (SUBINSN))
125 /* True if bit BIT is set in VALUE. */
126 #define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
128 /* Classifies an address.
131 A natural register + offset address. The register satisfies
132 mips_valid_base_register_p and the offset is a const_arith_operand.
135 A LO_SUM rtx. The first operand is a valid base register and
136 the second operand is a symbolic address.
139 A signed 16-bit constant address.
142 A constant symbolic address (equivalent to CONSTANT_SYMBOLIC). */
143 enum mips_address_type {
150 /* Classifies the prototype of a builtin function. */
151 enum mips_function_type
153 MIPS_V2SF_FTYPE_V2SF,
154 MIPS_V2SF_FTYPE_V2SF_V2SF,
155 MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
156 MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF,
157 MIPS_V2SF_FTYPE_SF_SF,
158 MIPS_INT_FTYPE_V2SF_V2SF,
159 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF,
160 MIPS_INT_FTYPE_SF_SF,
161 MIPS_INT_FTYPE_DF_DF,
168 /* For MIPS DSP ASE */
170 MIPS_DI_FTYPE_DI_SI_SI,
171 MIPS_DI_FTYPE_DI_V2HI_V2HI,
172 MIPS_DI_FTYPE_DI_V4QI_V4QI,
174 MIPS_SI_FTYPE_PTR_SI,
178 MIPS_SI_FTYPE_V2HI_V2HI,
180 MIPS_SI_FTYPE_V4QI_V4QI,
183 MIPS_V2HI_FTYPE_SI_SI,
184 MIPS_V2HI_FTYPE_V2HI,
185 MIPS_V2HI_FTYPE_V2HI_SI,
186 MIPS_V2HI_FTYPE_V2HI_V2HI,
187 MIPS_V2HI_FTYPE_V4QI,
188 MIPS_V2HI_FTYPE_V4QI_V2HI,
190 MIPS_V4QI_FTYPE_V2HI_V2HI,
191 MIPS_V4QI_FTYPE_V4QI_SI,
192 MIPS_V4QI_FTYPE_V4QI_V4QI,
193 MIPS_VOID_FTYPE_SI_SI,
194 MIPS_VOID_FTYPE_V2HI_V2HI,
195 MIPS_VOID_FTYPE_V4QI_V4QI,
197 /* For MIPS DSP REV 2 ASE. */
198 MIPS_V4QI_FTYPE_V4QI,
199 MIPS_SI_FTYPE_SI_SI_SI,
200 MIPS_DI_FTYPE_DI_USI_USI,
202 MIPS_DI_FTYPE_USI_USI,
203 MIPS_V2HI_FTYPE_SI_SI_SI,
209 /* Specifies how a builtin function should be converted into rtl. */
210 enum mips_builtin_type
212 /* The builtin corresponds directly to an .md pattern. The return
213 value is mapped to operand 0 and the arguments are mapped to
214 operands 1 and above. */
217 /* The builtin corresponds directly to an .md pattern. There is no return
218 value and the arguments are mapped to operands 0 and above. */
219 MIPS_BUILTIN_DIRECT_NO_TARGET,
221 /* The builtin corresponds to a comparison instruction followed by
222 a mips_cond_move_tf_ps pattern. The first two arguments are the
223 values to compare and the second two arguments are the vector
224 operands for the movt.ps or movf.ps instruction (in assembly order). */
228 /* The builtin corresponds to a V2SF comparison instruction. Operand 0
229 of this instruction is the result of the comparison, which has mode
230 CCV2 or CCV4. The function arguments are mapped to operands 1 and
231 above. The function's return value is an SImode boolean that is
232 true under the following conditions:
234 MIPS_BUILTIN_CMP_ANY: one of the registers is true
235 MIPS_BUILTIN_CMP_ALL: all of the registers are true
236 MIPS_BUILTIN_CMP_LOWER: the first register is true
237 MIPS_BUILTIN_CMP_UPPER: the second register is true. */
238 MIPS_BUILTIN_CMP_ANY,
239 MIPS_BUILTIN_CMP_ALL,
240 MIPS_BUILTIN_CMP_UPPER,
241 MIPS_BUILTIN_CMP_LOWER,
243 /* As above, but the instruction only sets a single $fcc register. */
244 MIPS_BUILTIN_CMP_SINGLE,
246 /* For generating bposge32 branch instructions in MIPS32 DSP ASE. */
247 MIPS_BUILTIN_BPOSGE32
250 /* Invokes MACRO (COND) for each c.cond.fmt condition. */
251 #define MIPS_FP_CONDITIONS(MACRO) \
269 /* Enumerates the codes above as MIPS_FP_COND_<X>. */
270 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
271 enum mips_fp_condition {
272 MIPS_FP_CONDITIONS (DECLARE_MIPS_COND)
275 /* Index X provides the string representation of MIPS_FP_COND_<X>. */
276 #define STRINGIFY(X) #X
277 static const char *const mips_fp_conditions[] = {
278 MIPS_FP_CONDITIONS (STRINGIFY)
281 /* A function to save or store a register. The first argument is the
282 register and the second is the stack slot. */
283 typedef void (*mips_save_restore_fn) (rtx, rtx);
285 struct mips16_constant;
286 struct mips_arg_info;
287 struct mips_address_info;
288 struct mips_integer_op;
291 static bool mips_valid_base_register_p (rtx, enum machine_mode, int);
292 static bool mips_classify_address (struct mips_address_info *, rtx,
293 enum machine_mode, int);
294 static bool mips_cannot_force_const_mem (rtx);
295 static bool mips_use_blocks_for_constant_p (enum machine_mode, const_rtx);
296 static int mips_symbol_insns (enum mips_symbol_type, enum machine_mode);
297 static bool mips16_unextended_reference_p (enum machine_mode mode, rtx, rtx);
298 static rtx mips_force_temporary (rtx, rtx);
299 static rtx mips_unspec_offset_high (rtx, rtx, rtx, enum mips_symbol_type);
300 static rtx mips_add_offset (rtx, rtx, HOST_WIDE_INT);
301 static unsigned int mips_build_shift (struct mips_integer_op *, HOST_WIDE_INT);
302 static unsigned int mips_build_lower (struct mips_integer_op *,
303 unsigned HOST_WIDE_INT);
304 static unsigned int mips_build_integer (struct mips_integer_op *,
305 unsigned HOST_WIDE_INT);
306 static void mips_legitimize_const_move (enum machine_mode, rtx, rtx);
307 static int m16_check_op (rtx, int, int, int);
308 static bool mips_rtx_costs (rtx, int, int, int *);
309 static int mips_address_cost (rtx);
310 static void mips_emit_compare (enum rtx_code *, rtx *, rtx *, bool);
311 static void mips_load_call_address (rtx, rtx, int);
312 static bool mips_function_ok_for_sibcall (tree, tree);
313 static void mips_block_move_straight (rtx, rtx, HOST_WIDE_INT);
314 static void mips_adjust_block_mem (rtx, HOST_WIDE_INT, rtx *, rtx *);
315 static void mips_block_move_loop (rtx, rtx, HOST_WIDE_INT);
316 static void mips_arg_info (const CUMULATIVE_ARGS *, enum machine_mode,
317 tree, int, struct mips_arg_info *);
318 static bool mips_get_unaligned_mem (rtx *, unsigned int, int, rtx *, rtx *);
319 static void mips_set_architecture (const struct mips_cpu_info *);
320 static void mips_set_tune (const struct mips_cpu_info *);
321 static bool mips_handle_option (size_t, const char *, int);
322 static struct machine_function *mips_init_machine_status (void);
323 static void print_operand_reloc (FILE *, rtx, enum mips_symbol_context,
325 static void mips_file_start (void);
326 static int mips_small_data_pattern_1 (rtx *, void *);
327 static int mips_rewrite_small_data_1 (rtx *, void *);
328 static bool mips_function_has_gp_insn (void);
329 static unsigned int mips_global_pointer (void);
330 static bool mips_save_reg_p (unsigned int);
331 static void mips_save_restore_reg (enum machine_mode, int, HOST_WIDE_INT,
332 mips_save_restore_fn);
333 static void mips_for_each_saved_reg (HOST_WIDE_INT, mips_save_restore_fn);
334 static void mips_output_cplocal (void);
335 static void mips_emit_loadgp (void);
336 static void mips_output_function_prologue (FILE *, HOST_WIDE_INT);
337 static void mips_set_frame_expr (rtx);
338 static rtx mips_frame_set (rtx, rtx);
339 static void mips_save_reg (rtx, rtx);
340 static void mips_output_function_epilogue (FILE *, HOST_WIDE_INT);
341 static void mips_restore_reg (rtx, rtx);
342 static void mips_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
343 HOST_WIDE_INT, tree);
344 static section *mips_select_rtx_section (enum machine_mode, rtx,
345 unsigned HOST_WIDE_INT);
346 static section *mips_function_rodata_section (tree);
347 static bool mips_in_small_data_p (const_tree);
348 static bool mips_use_anchors_for_symbol_p (const_rtx);
349 static int mips_fpr_return_fields (const_tree, tree *);
350 static bool mips_return_in_msb (const_tree);
351 static rtx mips_return_fpr_pair (enum machine_mode mode,
352 enum machine_mode mode1, HOST_WIDE_INT,
353 enum machine_mode mode2, HOST_WIDE_INT);
354 static rtx mips16_gp_pseudo_reg (void);
355 static void mips16_fp_args (FILE *, int, int);
356 static void build_mips16_function_stub (FILE *);
357 static rtx dump_constants_1 (enum machine_mode, rtx, rtx);
358 static void dump_constants (struct mips16_constant *, rtx);
359 static int mips16_insn_length (rtx);
360 static int mips16_rewrite_pool_refs (rtx *, void *);
361 static void mips16_lay_out_constants (void);
362 static void mips_sim_reset (struct mips_sim *);
363 static void mips_sim_init (struct mips_sim *, state_t);
364 static void mips_sim_next_cycle (struct mips_sim *);
365 static void mips_sim_wait_reg (struct mips_sim *, rtx, rtx);
366 static int mips_sim_wait_regs_2 (rtx *, void *);
367 static void mips_sim_wait_regs_1 (rtx *, void *);
368 static void mips_sim_wait_regs (struct mips_sim *, rtx);
369 static void mips_sim_wait_units (struct mips_sim *, rtx);
370 static void mips_sim_wait_insn (struct mips_sim *, rtx);
371 static void mips_sim_record_set (rtx, const_rtx, void *);
372 static void mips_sim_issue_insn (struct mips_sim *, rtx);
373 static void mips_sim_issue_nop (struct mips_sim *);
374 static void mips_sim_finish_insn (struct mips_sim *, rtx);
375 static void vr4130_avoid_branch_rt_conflict (rtx);
376 static void vr4130_align_insns (void);
377 static void mips_avoid_hazard (rtx, rtx, int *, rtx *, rtx);
378 static void mips_avoid_hazards (void);
379 static void mips_reorg (void);
380 static bool mips_strict_matching_cpu_name_p (const char *, const char *);
381 static bool mips_matching_cpu_name_p (const char *, const char *);
382 static const struct mips_cpu_info *mips_parse_cpu (const char *);
383 static const struct mips_cpu_info *mips_cpu_info_from_isa (int);
384 static bool mips_return_in_memory (const_tree, const_tree);
385 static bool mips_strict_argument_naming (CUMULATIVE_ARGS *);
386 static void mips_macc_chains_record (rtx);
387 static void mips_macc_chains_reorder (rtx *, int);
388 static void vr4130_true_reg_dependence_p_1 (rtx, const_rtx, void *);
389 static bool vr4130_true_reg_dependence_p (rtx);
390 static bool vr4130_swap_insns_p (rtx, rtx);
391 static void vr4130_reorder (rtx *, int);
392 static void mips_promote_ready (rtx *, int, int);
393 static void mips_sched_init (FILE *, int, int);
394 static int mips_sched_reorder (FILE *, int, rtx *, int *, int);
395 static int mips_variable_issue (FILE *, int, rtx, int);
396 static int mips_adjust_cost (rtx, rtx, rtx, int);
397 static int mips_issue_rate (void);
398 static int mips_multipass_dfa_lookahead (void);
399 static void mips_init_libfuncs (void);
400 static void mips_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
402 static tree mips_build_builtin_va_list (void);
403 static tree mips_gimplify_va_arg_expr (tree, tree, tree *, tree *);
404 static bool mips_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode mode,
406 static bool mips_callee_copies (CUMULATIVE_ARGS *, enum machine_mode mode,
408 static int mips_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode mode,
410 static bool mips_valid_pointer_mode (enum machine_mode);
411 static bool mips_vector_mode_supported_p (enum machine_mode);
412 static rtx mips_prepare_builtin_arg (enum insn_code, unsigned int, tree, unsigned int);
413 static rtx mips_prepare_builtin_target (enum insn_code, unsigned int, rtx);
414 static rtx mips_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
415 static void mips_init_builtins (void);
416 static rtx mips_expand_builtin_direct (enum insn_code, rtx, tree, bool);
417 static rtx mips_expand_builtin_movtf (enum mips_builtin_type,
418 enum insn_code, enum mips_fp_condition,
420 static rtx mips_expand_builtin_compare (enum mips_builtin_type,
421 enum insn_code, enum mips_fp_condition,
423 static rtx mips_expand_builtin_bposge (enum mips_builtin_type, rtx);
424 static void mips_encode_section_info (tree, rtx, int);
425 static void mips_extra_live_on_entry (bitmap);
426 static int mips_comp_type_attributes (const_tree, const_tree);
427 static void mips_set_mips16_mode (int);
428 static void mips_set_current_function (tree);
429 static int mips_mode_rep_extended (enum machine_mode, enum machine_mode);
430 static bool mips_offset_within_alignment_p (rtx, HOST_WIDE_INT);
431 static void mips_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
433 /* Structure to be filled in by compute_frame_size with register
434 save masks, and offsets for the current function. */
436 struct mips_frame_info GTY(())
438 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
439 HOST_WIDE_INT var_size; /* # bytes that variables take up */
440 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
441 HOST_WIDE_INT cprestore_size; /* # bytes that the .cprestore slot takes up */
442 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
443 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
444 unsigned int mask; /* mask of saved gp registers */
445 unsigned int fmask; /* mask of saved fp registers */
446 HOST_WIDE_INT gp_save_offset; /* offset from vfp to store gp registers */
447 HOST_WIDE_INT fp_save_offset; /* offset from vfp to store fp registers */
448 HOST_WIDE_INT gp_sp_offset; /* offset from new sp to store gp registers */
449 HOST_WIDE_INT fp_sp_offset; /* offset from new sp to store fp registers */
450 bool initialized; /* true if frame size already calculated */
451 int num_gp; /* number of gp registers saved */
452 int num_fp; /* number of fp registers saved */
455 struct machine_function GTY(()) {
456 /* Pseudo-reg holding the value of $28 in a mips16 function which
457 refers to GP relative global variables. */
458 rtx mips16_gp_pseudo_rtx;
460 /* The number of extra stack bytes taken up by register varargs.
461 This area is allocated by the callee at the very top of the frame. */
464 /* Current frame information, calculated by compute_frame_size. */
465 struct mips_frame_info frame;
467 /* The register to use as the global pointer within this function. */
468 unsigned int global_pointer;
470 /* True if mips_adjust_insn_length should ignore an instruction's
472 bool ignore_hazard_length_p;
474 /* True if the whole function is suitable for .set noreorder and
476 bool all_noreorder_p;
478 /* True if the function is known to have an instruction that needs $gp. */
481 /* True if we have emitted an instruction to initialize
482 mips16_gp_pseudo_rtx. */
483 bool initialized_mips16_gp_pseudo_p;
486 /* Information about a single argument. */
489 /* True if the argument is passed in a floating-point register, or
490 would have been if we hadn't run out of registers. */
493 /* The number of words passed in registers, rounded up. */
494 unsigned int reg_words;
496 /* For EABI, the offset of the first register from GP_ARG_FIRST or
497 FP_ARG_FIRST. For other ABIs, the offset of the first register from
498 the start of the ABI's argument structure (see the CUMULATIVE_ARGS
499 comment for details).
501 The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
503 unsigned int reg_offset;
505 /* The number of words that must be passed on the stack, rounded up. */
506 unsigned int stack_words;
508 /* The offset from the start of the stack overflow area of the argument's
509 first stack word. Only meaningful when STACK_WORDS is nonzero. */
510 unsigned int stack_offset;
514 /* Information about an address described by mips_address_type.
520 REG is the base register and OFFSET is the constant offset.
523 REG is the register that contains the high part of the address,
524 OFFSET is the symbolic address being referenced and SYMBOL_TYPE
525 is the type of OFFSET's symbol.
528 SYMBOL_TYPE is the type of symbol being referenced. */
530 struct mips_address_info
532 enum mips_address_type type;
535 enum mips_symbol_type symbol_type;
539 /* One stage in a constant building sequence. These sequences have
543 A = A CODE[1] VALUE[1]
544 A = A CODE[2] VALUE[2]
547 where A is an accumulator, each CODE[i] is a binary rtl operation
548 and each VALUE[i] is a constant integer. */
549 struct mips_integer_op {
551 unsigned HOST_WIDE_INT value;
555 /* The largest number of operations needed to load an integer constant.
556 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
557 When the lowest bit is clear, we can try, but reject a sequence with
558 an extra SLL at the end. */
559 #define MIPS_MAX_INTEGER_OPS 7
561 /* Information about a MIPS16e SAVE or RESTORE instruction. */
562 struct mips16e_save_restore_info {
563 /* The number of argument registers saved by a SAVE instruction.
564 0 for RESTORE instructions. */
567 /* Bit X is set if the instruction saves or restores GPR X. */
570 /* The total number of bytes to allocate. */
574 /* Global variables for machine-dependent things. */
576 /* Threshold for data being put into the small data/bss area, instead
577 of the normal data area. */
578 int mips_section_threshold = -1;
580 /* Count the number of .file directives, so that .loc is up to date. */
581 int num_source_filenames = 0;
583 /* Count the number of sdb related labels are generated (to find block
584 start and end boundaries). */
585 int sdb_label_count = 0;
587 /* Next label # for each statement for Silicon Graphics IRIS systems. */
590 /* Name of the file containing the current function. */
591 const char *current_function_file = "";
593 /* Number of nested .set noreorder, noat, nomacro, and volatile requests. */
599 /* The next branch instruction is a branch likely, not branch normal. */
600 int mips_branch_likely;
602 /* The operands passed to the last cmpMM expander. */
605 /* The target cpu for code generation. */
606 enum processor_type mips_arch;
607 const struct mips_cpu_info *mips_arch_info;
609 /* The target cpu for optimization and scheduling. */
610 enum processor_type mips_tune;
611 const struct mips_cpu_info *mips_tune_info;
613 /* Which instruction set architecture to use. */
616 /* Which ABI to use. */
617 int mips_abi = MIPS_ABI_DEFAULT;
619 /* Cost information to use. */
620 const struct mips_rtx_cost_data *mips_cost;
622 /* Remember the ambient target flags, excluding mips16. */
623 static int mips_base_target_flags;
624 /* The mips16 command-line target flags only. */
625 static bool mips_base_mips16;
626 /* Similar copies of option settings. */
627 static int mips_base_schedule_insns; /* flag_schedule_insns */
628 static int mips_base_reorder_blocks_and_partition; /* flag_reorder... */
629 static int mips_base_move_loop_invariants; /* flag_move_loop_invariants */
630 static int mips_base_align_loops; /* align_loops */
631 static int mips_base_align_jumps; /* align_jumps */
632 static int mips_base_align_functions; /* align_functions */
633 static GTY(()) int mips16_flipper;
635 /* The -mtext-loads setting. */
636 enum mips_code_readable_setting mips_code_readable = CODE_READABLE_YES;
638 /* The architecture selected by -mipsN. */
639 static const struct mips_cpu_info *mips_isa_info;
641 /* If TRUE, we split addresses into their high and low parts in the RTL. */
642 int mips_split_addresses;
644 /* Mode used for saving/restoring general purpose registers. */
645 static enum machine_mode gpr_mode;
647 /* Array giving truth value on whether or not a given hard register
648 can support a given mode. */
649 char mips_hard_regno_mode_ok[(int)MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
651 /* List of all MIPS punctuation characters used by print_operand. */
652 char mips_print_operand_punct[256];
654 /* Map GCC register number to debugger register number. */
655 int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
656 int mips_dwarf_regno[FIRST_PSEUDO_REGISTER];
658 /* A copy of the original flag_delayed_branch: see override_options. */
659 static int mips_flag_delayed_branch;
661 static GTY (()) int mips_output_filename_first_time = 1;
663 /* mips_split_p[X] is true if symbols of type X can be split by
664 mips_split_symbol(). */
665 bool mips_split_p[NUM_SYMBOL_TYPES];
667 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
668 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
669 if they are matched by a special .md file pattern. */
670 static const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
672 /* Likewise for HIGHs. */
673 static const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
675 /* Map hard register number to register class */
676 const enum reg_class mips_regno_to_class[] =
678 LEA_REGS, LEA_REGS, M16_NA_REGS, V1_REG,
679 M16_REGS, M16_REGS, M16_REGS, M16_REGS,
680 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
681 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
682 M16_NA_REGS, M16_NA_REGS, LEA_REGS, LEA_REGS,
683 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
684 T_REG, PIC_FN_ADDR_REG, LEA_REGS, LEA_REGS,
685 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
686 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
687 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
688 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
689 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
690 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
691 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
692 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
693 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
694 MD0_REG, MD1_REG, NO_REGS, ST_REGS,
695 ST_REGS, ST_REGS, ST_REGS, ST_REGS,
696 ST_REGS, ST_REGS, ST_REGS, NO_REGS,
697 NO_REGS, ALL_REGS, ALL_REGS, NO_REGS,
698 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
699 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
700 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
701 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
702 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
703 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
704 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
705 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
706 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
707 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
708 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
709 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
710 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
711 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
712 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
713 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
714 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
715 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
716 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
717 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
718 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
719 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
720 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
721 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
722 DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS,
723 DSP_ACC_REGS, DSP_ACC_REGS, ALL_REGS, ALL_REGS,
724 ALL_REGS, ALL_REGS, ALL_REGS, ALL_REGS
727 /* Table of machine dependent attributes. */
728 const struct attribute_spec mips_attribute_table[] =
730 { "long_call", 0, 0, false, true, true, NULL },
731 { "far", 0, 0, false, true, true, NULL },
732 { "near", 0, 0, false, true, true, NULL },
733 /* Switch MIPS16 ASE on and off per-function. */
734 { "mips16", 0, 0, false, true, true, NULL },
735 { "nomips16", 0, 0, false, true, true, NULL },
736 { NULL, 0, 0, false, false, false, NULL }
739 /* A table describing all the processors gcc knows about. Names are
740 matched in the order listed. The first mention of an ISA level is
741 taken as the canonical name for that ISA.
743 To ease comparison, please keep this table in the same order as
744 gas's mips_cpu_info_table[]. Please also make sure that
745 MIPS_ISA_LEVEL_SPEC handles all -march options correctly. */
746 const struct mips_cpu_info mips_cpu_info_table[] = {
747 /* Entries for generic ISAs */
748 { "mips1", PROCESSOR_R3000, 1 },
749 { "mips2", PROCESSOR_R6000, 2 },
750 { "mips3", PROCESSOR_R4000, 3 },
751 { "mips4", PROCESSOR_R8000, 4 },
752 { "mips32", PROCESSOR_4KC, 32 },
753 { "mips32r2", PROCESSOR_M4K, 33 },
754 { "mips64", PROCESSOR_5KC, 64 },
757 { "r3000", PROCESSOR_R3000, 1 },
758 { "r2000", PROCESSOR_R3000, 1 }, /* = r3000 */
759 { "r3900", PROCESSOR_R3900, 1 },
762 { "r6000", PROCESSOR_R6000, 2 },
765 { "r4000", PROCESSOR_R4000, 3 },
766 { "vr4100", PROCESSOR_R4100, 3 },
767 { "vr4111", PROCESSOR_R4111, 3 },
768 { "vr4120", PROCESSOR_R4120, 3 },
769 { "vr4130", PROCESSOR_R4130, 3 },
770 { "vr4300", PROCESSOR_R4300, 3 },
771 { "r4400", PROCESSOR_R4000, 3 }, /* = r4000 */
772 { "r4600", PROCESSOR_R4600, 3 },
773 { "orion", PROCESSOR_R4600, 3 }, /* = r4600 */
774 { "r4650", PROCESSOR_R4650, 3 },
777 { "r8000", PROCESSOR_R8000, 4 },
778 { "vr5000", PROCESSOR_R5000, 4 },
779 { "vr5400", PROCESSOR_R5400, 4 },
780 { "vr5500", PROCESSOR_R5500, 4 },
781 { "rm7000", PROCESSOR_R7000, 4 },
782 { "rm9000", PROCESSOR_R9000, 4 },
785 { "4kc", PROCESSOR_4KC, 32 },
786 { "4km", PROCESSOR_4KC, 32 }, /* = 4kc */
787 { "4kp", PROCESSOR_4KP, 32 },
788 { "4ksc", PROCESSOR_4KC, 32 },
790 /* MIPS32 Release 2 */
791 { "m4k", PROCESSOR_M4K, 33 },
792 { "4kec", PROCESSOR_4KC, 33 },
793 { "4kem", PROCESSOR_4KC, 33 },
794 { "4kep", PROCESSOR_4KP, 33 },
795 { "4ksd", PROCESSOR_4KC, 33 },
797 { "24kc", PROCESSOR_24KC, 33 },
798 { "24kf2_1", PROCESSOR_24KF2_1, 33 },
799 { "24kf", PROCESSOR_24KF2_1, 33 },
800 { "24kf1_1", PROCESSOR_24KF1_1, 33 },
801 { "24kfx", PROCESSOR_24KF1_1, 33 },
802 { "24kx", PROCESSOR_24KF1_1, 33 },
804 { "24kec", PROCESSOR_24KC, 33 }, /* 24K with DSP */
805 { "24kef2_1", PROCESSOR_24KF2_1, 33 },
806 { "24kef", PROCESSOR_24KF2_1, 33 },
807 { "24kef1_1", PROCESSOR_24KF1_1, 33 },
808 { "24kefx", PROCESSOR_24KF1_1, 33 },
809 { "24kex", PROCESSOR_24KF1_1, 33 },
811 { "34kc", PROCESSOR_24KC, 33 }, /* 34K with MT/DSP */
812 { "34kf2_1", PROCESSOR_24KF2_1, 33 },
813 { "34kf", PROCESSOR_24KF2_1, 33 },
814 { "34kf1_1", PROCESSOR_24KF1_1, 33 },
815 { "34kfx", PROCESSOR_24KF1_1, 33 },
816 { "34kx", PROCESSOR_24KF1_1, 33 },
818 { "74kc", PROCESSOR_74KC, 33 }, /* 74K with DSPr2 */
819 { "74kf2_1", PROCESSOR_74KF2_1, 33 },
820 { "74kf", PROCESSOR_74KF2_1, 33 },
821 { "74kf1_1", PROCESSOR_74KF1_1, 33 },
822 { "74kfx", PROCESSOR_74KF1_1, 33 },
823 { "74kx", PROCESSOR_74KF1_1, 33 },
824 { "74kf3_2", PROCESSOR_74KF3_2, 33 },
827 { "5kc", PROCESSOR_5KC, 64 },
828 { "5kf", PROCESSOR_5KF, 64 },
829 { "20kc", PROCESSOR_20KC, 64 },
830 { "sb1", PROCESSOR_SB1, 64 },
831 { "sb1a", PROCESSOR_SB1A, 64 },
832 { "sr71000", PROCESSOR_SR71000, 64 },
838 /* Default costs. If these are used for a processor we should look
839 up the actual costs. */
840 #define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \
841 COSTS_N_INSNS (7), /* fp_mult_sf */ \
842 COSTS_N_INSNS (8), /* fp_mult_df */ \
843 COSTS_N_INSNS (23), /* fp_div_sf */ \
844 COSTS_N_INSNS (36), /* fp_div_df */ \
845 COSTS_N_INSNS (10), /* int_mult_si */ \
846 COSTS_N_INSNS (10), /* int_mult_di */ \
847 COSTS_N_INSNS (69), /* int_div_si */ \
848 COSTS_N_INSNS (69), /* int_div_di */ \
849 2, /* branch_cost */ \
850 4 /* memory_latency */
852 /* Need to replace these with the costs of calling the appropriate
854 #define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \
855 COSTS_N_INSNS (256), /* fp_mult_sf */ \
856 COSTS_N_INSNS (256), /* fp_mult_df */ \
857 COSTS_N_INSNS (256), /* fp_div_sf */ \
858 COSTS_N_INSNS (256) /* fp_div_df */
860 static struct mips_rtx_cost_data const mips_rtx_cost_optimize_size =
862 COSTS_N_INSNS (1), /* fp_add */
863 COSTS_N_INSNS (1), /* fp_mult_sf */
864 COSTS_N_INSNS (1), /* fp_mult_df */
865 COSTS_N_INSNS (1), /* fp_div_sf */
866 COSTS_N_INSNS (1), /* fp_div_df */
867 COSTS_N_INSNS (1), /* int_mult_si */
868 COSTS_N_INSNS (1), /* int_mult_di */
869 COSTS_N_INSNS (1), /* int_div_si */
870 COSTS_N_INSNS (1), /* int_div_di */
872 4 /* memory_latency */
875 static struct mips_rtx_cost_data const mips_rtx_cost_data[PROCESSOR_MAX] =
878 COSTS_N_INSNS (2), /* fp_add */
879 COSTS_N_INSNS (4), /* fp_mult_sf */
880 COSTS_N_INSNS (5), /* fp_mult_df */
881 COSTS_N_INSNS (12), /* fp_div_sf */
882 COSTS_N_INSNS (19), /* fp_div_df */
883 COSTS_N_INSNS (12), /* int_mult_si */
884 COSTS_N_INSNS (12), /* int_mult_di */
885 COSTS_N_INSNS (35), /* int_div_si */
886 COSTS_N_INSNS (35), /* int_div_di */
888 4 /* memory_latency */
893 COSTS_N_INSNS (6), /* int_mult_si */
894 COSTS_N_INSNS (6), /* int_mult_di */
895 COSTS_N_INSNS (36), /* int_div_si */
896 COSTS_N_INSNS (36), /* int_div_di */
898 4 /* memory_latency */
902 COSTS_N_INSNS (36), /* int_mult_si */
903 COSTS_N_INSNS (36), /* int_mult_di */
904 COSTS_N_INSNS (37), /* int_div_si */
905 COSTS_N_INSNS (37), /* int_div_di */
907 4 /* memory_latency */
911 COSTS_N_INSNS (4), /* int_mult_si */
912 COSTS_N_INSNS (11), /* int_mult_di */
913 COSTS_N_INSNS (36), /* int_div_si */
914 COSTS_N_INSNS (68), /* int_div_di */
916 4 /* memory_latency */
919 COSTS_N_INSNS (4), /* fp_add */
920 COSTS_N_INSNS (4), /* fp_mult_sf */
921 COSTS_N_INSNS (5), /* fp_mult_df */
922 COSTS_N_INSNS (17), /* fp_div_sf */
923 COSTS_N_INSNS (32), /* fp_div_df */
924 COSTS_N_INSNS (4), /* int_mult_si */
925 COSTS_N_INSNS (11), /* int_mult_di */
926 COSTS_N_INSNS (36), /* int_div_si */
927 COSTS_N_INSNS (68), /* int_div_di */
929 4 /* memory_latency */
932 COSTS_N_INSNS (4), /* fp_add */
933 COSTS_N_INSNS (4), /* fp_mult_sf */
934 COSTS_N_INSNS (5), /* fp_mult_df */
935 COSTS_N_INSNS (17), /* fp_div_sf */
936 COSTS_N_INSNS (32), /* fp_div_df */
937 COSTS_N_INSNS (4), /* int_mult_si */
938 COSTS_N_INSNS (7), /* int_mult_di */
939 COSTS_N_INSNS (42), /* int_div_si */
940 COSTS_N_INSNS (72), /* int_div_di */
942 4 /* memory_latency */
946 COSTS_N_INSNS (5), /* int_mult_si */
947 COSTS_N_INSNS (5), /* int_mult_di */
948 COSTS_N_INSNS (41), /* int_div_si */
949 COSTS_N_INSNS (41), /* int_div_di */
951 4 /* memory_latency */
954 COSTS_N_INSNS (8), /* fp_add */
955 COSTS_N_INSNS (8), /* fp_mult_sf */
956 COSTS_N_INSNS (10), /* fp_mult_df */
957 COSTS_N_INSNS (34), /* fp_div_sf */
958 COSTS_N_INSNS (64), /* fp_div_df */
959 COSTS_N_INSNS (5), /* int_mult_si */
960 COSTS_N_INSNS (5), /* int_mult_di */
961 COSTS_N_INSNS (41), /* int_div_si */
962 COSTS_N_INSNS (41), /* int_div_di */
964 4 /* memory_latency */
967 COSTS_N_INSNS (4), /* fp_add */
968 COSTS_N_INSNS (4), /* fp_mult_sf */
969 COSTS_N_INSNS (5), /* fp_mult_df */
970 COSTS_N_INSNS (17), /* fp_div_sf */
971 COSTS_N_INSNS (32), /* fp_div_df */
972 COSTS_N_INSNS (5), /* int_mult_si */
973 COSTS_N_INSNS (5), /* int_mult_di */
974 COSTS_N_INSNS (41), /* int_div_si */
975 COSTS_N_INSNS (41), /* int_div_di */
977 4 /* memory_latency */
981 COSTS_N_INSNS (5), /* int_mult_si */
982 COSTS_N_INSNS (5), /* int_mult_di */
983 COSTS_N_INSNS (41), /* int_div_si */
984 COSTS_N_INSNS (41), /* int_div_di */
986 4 /* memory_latency */
989 COSTS_N_INSNS (8), /* fp_add */
990 COSTS_N_INSNS (8), /* fp_mult_sf */
991 COSTS_N_INSNS (10), /* fp_mult_df */
992 COSTS_N_INSNS (34), /* fp_div_sf */
993 COSTS_N_INSNS (64), /* fp_div_df */
994 COSTS_N_INSNS (5), /* int_mult_si */
995 COSTS_N_INSNS (5), /* int_mult_di */
996 COSTS_N_INSNS (41), /* int_div_si */
997 COSTS_N_INSNS (41), /* int_div_di */
999 4 /* memory_latency */
1002 COSTS_N_INSNS (4), /* fp_add */
1003 COSTS_N_INSNS (4), /* fp_mult_sf */
1004 COSTS_N_INSNS (5), /* fp_mult_df */
1005 COSTS_N_INSNS (17), /* fp_div_sf */
1006 COSTS_N_INSNS (32), /* fp_div_df */
1007 COSTS_N_INSNS (5), /* int_mult_si */
1008 COSTS_N_INSNS (5), /* int_mult_di */
1009 COSTS_N_INSNS (41), /* int_div_si */
1010 COSTS_N_INSNS (41), /* int_div_di */
1011 1, /* branch_cost */
1012 4 /* memory_latency */
1015 COSTS_N_INSNS (6), /* fp_add */
1016 COSTS_N_INSNS (6), /* fp_mult_sf */
1017 COSTS_N_INSNS (7), /* fp_mult_df */
1018 COSTS_N_INSNS (25), /* fp_div_sf */
1019 COSTS_N_INSNS (48), /* fp_div_df */
1020 COSTS_N_INSNS (5), /* int_mult_si */
1021 COSTS_N_INSNS (5), /* int_mult_di */
1022 COSTS_N_INSNS (41), /* int_div_si */
1023 COSTS_N_INSNS (41), /* int_div_di */
1024 1, /* branch_cost */
1025 4 /* memory_latency */
1031 COSTS_N_INSNS (2), /* fp_add */
1032 COSTS_N_INSNS (4), /* fp_mult_sf */
1033 COSTS_N_INSNS (5), /* fp_mult_df */
1034 COSTS_N_INSNS (12), /* fp_div_sf */
1035 COSTS_N_INSNS (19), /* fp_div_df */
1036 COSTS_N_INSNS (2), /* int_mult_si */
1037 COSTS_N_INSNS (2), /* int_mult_di */
1038 COSTS_N_INSNS (35), /* int_div_si */
1039 COSTS_N_INSNS (35), /* int_div_di */
1040 1, /* branch_cost */
1041 4 /* memory_latency */
1044 COSTS_N_INSNS (3), /* fp_add */
1045 COSTS_N_INSNS (5), /* fp_mult_sf */
1046 COSTS_N_INSNS (6), /* fp_mult_df */
1047 COSTS_N_INSNS (15), /* fp_div_sf */
1048 COSTS_N_INSNS (16), /* fp_div_df */
1049 COSTS_N_INSNS (17), /* int_mult_si */
1050 COSTS_N_INSNS (17), /* int_mult_di */
1051 COSTS_N_INSNS (38), /* int_div_si */
1052 COSTS_N_INSNS (38), /* int_div_di */
1053 2, /* branch_cost */
1054 6 /* memory_latency */
1057 COSTS_N_INSNS (6), /* fp_add */
1058 COSTS_N_INSNS (7), /* fp_mult_sf */
1059 COSTS_N_INSNS (8), /* fp_mult_df */
1060 COSTS_N_INSNS (23), /* fp_div_sf */
1061 COSTS_N_INSNS (36), /* fp_div_df */
1062 COSTS_N_INSNS (10), /* int_mult_si */
1063 COSTS_N_INSNS (10), /* int_mult_di */
1064 COSTS_N_INSNS (69), /* int_div_si */
1065 COSTS_N_INSNS (69), /* int_div_di */
1066 2, /* branch_cost */
1067 6 /* memory_latency */
1079 /* The only costs that appear to be updated here are
1080 integer multiplication. */
1082 COSTS_N_INSNS (4), /* int_mult_si */
1083 COSTS_N_INSNS (6), /* int_mult_di */
1084 COSTS_N_INSNS (69), /* int_div_si */
1085 COSTS_N_INSNS (69), /* int_div_di */
1086 1, /* branch_cost */
1087 4 /* memory_latency */
1099 COSTS_N_INSNS (6), /* fp_add */
1100 COSTS_N_INSNS (4), /* fp_mult_sf */
1101 COSTS_N_INSNS (5), /* fp_mult_df */
1102 COSTS_N_INSNS (23), /* fp_div_sf */
1103 COSTS_N_INSNS (36), /* fp_div_df */
1104 COSTS_N_INSNS (5), /* int_mult_si */
1105 COSTS_N_INSNS (5), /* int_mult_di */
1106 COSTS_N_INSNS (36), /* int_div_si */
1107 COSTS_N_INSNS (36), /* int_div_di */
1108 1, /* branch_cost */
1109 4 /* memory_latency */
1112 COSTS_N_INSNS (6), /* fp_add */
1113 COSTS_N_INSNS (5), /* fp_mult_sf */
1114 COSTS_N_INSNS (6), /* fp_mult_df */
1115 COSTS_N_INSNS (30), /* fp_div_sf */
1116 COSTS_N_INSNS (59), /* fp_div_df */
1117 COSTS_N_INSNS (3), /* int_mult_si */
1118 COSTS_N_INSNS (4), /* int_mult_di */
1119 COSTS_N_INSNS (42), /* int_div_si */
1120 COSTS_N_INSNS (74), /* int_div_di */
1121 1, /* branch_cost */
1122 4 /* memory_latency */
1125 COSTS_N_INSNS (6), /* fp_add */
1126 COSTS_N_INSNS (5), /* fp_mult_sf */
1127 COSTS_N_INSNS (6), /* fp_mult_df */
1128 COSTS_N_INSNS (30), /* fp_div_sf */
1129 COSTS_N_INSNS (59), /* fp_div_df */
1130 COSTS_N_INSNS (5), /* int_mult_si */
1131 COSTS_N_INSNS (9), /* int_mult_di */
1132 COSTS_N_INSNS (42), /* int_div_si */
1133 COSTS_N_INSNS (74), /* int_div_di */
1134 1, /* branch_cost */
1135 4 /* memory_latency */
1138 /* The only costs that are changed here are
1139 integer multiplication. */
1140 COSTS_N_INSNS (6), /* fp_add */
1141 COSTS_N_INSNS (7), /* fp_mult_sf */
1142 COSTS_N_INSNS (8), /* fp_mult_df */
1143 COSTS_N_INSNS (23), /* fp_div_sf */
1144 COSTS_N_INSNS (36), /* fp_div_df */
1145 COSTS_N_INSNS (5), /* int_mult_si */
1146 COSTS_N_INSNS (9), /* int_mult_di */
1147 COSTS_N_INSNS (69), /* int_div_si */
1148 COSTS_N_INSNS (69), /* int_div_di */
1149 1, /* branch_cost */
1150 4 /* memory_latency */
1156 /* The only costs that are changed here are
1157 integer multiplication. */
1158 COSTS_N_INSNS (6), /* fp_add */
1159 COSTS_N_INSNS (7), /* fp_mult_sf */
1160 COSTS_N_INSNS (8), /* fp_mult_df */
1161 COSTS_N_INSNS (23), /* fp_div_sf */
1162 COSTS_N_INSNS (36), /* fp_div_df */
1163 COSTS_N_INSNS (3), /* int_mult_si */
1164 COSTS_N_INSNS (8), /* int_mult_di */
1165 COSTS_N_INSNS (69), /* int_div_si */
1166 COSTS_N_INSNS (69), /* int_div_di */
1167 1, /* branch_cost */
1168 4 /* memory_latency */
1171 /* These costs are the same as the SB-1A below. */
1172 COSTS_N_INSNS (4), /* fp_add */
1173 COSTS_N_INSNS (4), /* fp_mult_sf */
1174 COSTS_N_INSNS (4), /* fp_mult_df */
1175 COSTS_N_INSNS (24), /* fp_div_sf */
1176 COSTS_N_INSNS (32), /* fp_div_df */
1177 COSTS_N_INSNS (3), /* int_mult_si */
1178 COSTS_N_INSNS (4), /* int_mult_di */
1179 COSTS_N_INSNS (36), /* int_div_si */
1180 COSTS_N_INSNS (68), /* int_div_di */
1181 1, /* branch_cost */
1182 4 /* memory_latency */
1185 /* These costs are the same as the SB-1 above. */
1186 COSTS_N_INSNS (4), /* fp_add */
1187 COSTS_N_INSNS (4), /* fp_mult_sf */
1188 COSTS_N_INSNS (4), /* fp_mult_df */
1189 COSTS_N_INSNS (24), /* fp_div_sf */
1190 COSTS_N_INSNS (32), /* fp_div_df */
1191 COSTS_N_INSNS (3), /* int_mult_si */
1192 COSTS_N_INSNS (4), /* int_mult_di */
1193 COSTS_N_INSNS (36), /* int_div_si */
1194 COSTS_N_INSNS (68), /* int_div_di */
1195 1, /* branch_cost */
1196 4 /* memory_latency */
1203 /* If a MIPS16e SAVE or RESTORE instruction saves or restores register
1204 mips16e_s2_s8_regs[X], it must also save the registers in indexes
1205 X + 1 onwards. Likewise mips16e_a0_a3_regs. */
1206 static const unsigned char mips16e_s2_s8_regs[] = {
1207 30, 23, 22, 21, 20, 19, 18
1209 static const unsigned char mips16e_a0_a3_regs[] = {
1213 /* A list of the registers that can be saved by the MIPS16e SAVE instruction,
1214 ordered from the uppermost in memory to the lowest in memory. */
1215 static const unsigned char mips16e_save_restore_regs[] = {
1216 31, 30, 23, 22, 21, 20, 19, 18, 17, 16, 7, 6, 5, 4
1219 /* Nonzero if -march should decide the default value of
1220 MASK_SOFT_FLOAT_ABI. */
1221 #ifndef MIPS_MARCH_CONTROLS_SOFT_FLOAT
1222 #define MIPS_MARCH_CONTROLS_SOFT_FLOAT 0
1225 /* Initialize the GCC target structure. */
1226 #undef TARGET_ASM_ALIGNED_HI_OP
1227 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
1228 #undef TARGET_ASM_ALIGNED_SI_OP
1229 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
1230 #undef TARGET_ASM_ALIGNED_DI_OP
1231 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
1233 #undef TARGET_ASM_FUNCTION_PROLOGUE
1234 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
1235 #undef TARGET_ASM_FUNCTION_EPILOGUE
1236 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
1237 #undef TARGET_ASM_SELECT_RTX_SECTION
1238 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
1239 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
1240 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
1242 #undef TARGET_SCHED_INIT
1243 #define TARGET_SCHED_INIT mips_sched_init
1244 #undef TARGET_SCHED_REORDER
1245 #define TARGET_SCHED_REORDER mips_sched_reorder
1246 #undef TARGET_SCHED_REORDER2
1247 #define TARGET_SCHED_REORDER2 mips_sched_reorder
1248 #undef TARGET_SCHED_VARIABLE_ISSUE
1249 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
1250 #undef TARGET_SCHED_ADJUST_COST
1251 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
1252 #undef TARGET_SCHED_ISSUE_RATE
1253 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
1254 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1255 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1256 mips_multipass_dfa_lookahead
1258 #undef TARGET_DEFAULT_TARGET_FLAGS
1259 #define TARGET_DEFAULT_TARGET_FLAGS \
1261 | TARGET_CPU_DEFAULT \
1262 | TARGET_ENDIAN_DEFAULT \
1263 | TARGET_FP_EXCEPTIONS_DEFAULT \
1264 | MASK_CHECK_ZERO_DIV \
1266 #undef TARGET_HANDLE_OPTION
1267 #define TARGET_HANDLE_OPTION mips_handle_option
1269 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1270 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
1272 #undef TARGET_SET_CURRENT_FUNCTION
1273 #define TARGET_SET_CURRENT_FUNCTION mips_set_current_function
1275 #undef TARGET_VALID_POINTER_MODE
1276 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
1277 #undef TARGET_RTX_COSTS
1278 #define TARGET_RTX_COSTS mips_rtx_costs
1279 #undef TARGET_ADDRESS_COST
1280 #define TARGET_ADDRESS_COST mips_address_cost
1282 #undef TARGET_IN_SMALL_DATA_P
1283 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
1285 #undef TARGET_MACHINE_DEPENDENT_REORG
1286 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
1288 #undef TARGET_ASM_FILE_START
1289 #define TARGET_ASM_FILE_START mips_file_start
1290 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
1291 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
1293 #undef TARGET_INIT_LIBFUNCS
1294 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
1296 #undef TARGET_BUILD_BUILTIN_VA_LIST
1297 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
1298 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1299 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
1301 #undef TARGET_PROMOTE_FUNCTION_ARGS
1302 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
1303 #undef TARGET_PROMOTE_FUNCTION_RETURN
1304 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
1305 #undef TARGET_PROMOTE_PROTOTYPES
1306 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
1308 #undef TARGET_RETURN_IN_MEMORY
1309 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
1310 #undef TARGET_RETURN_IN_MSB
1311 #define TARGET_RETURN_IN_MSB mips_return_in_msb
1313 #undef TARGET_ASM_OUTPUT_MI_THUNK
1314 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
1315 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1316 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1318 #undef TARGET_SETUP_INCOMING_VARARGS
1319 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
1320 #undef TARGET_STRICT_ARGUMENT_NAMING
1321 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
1322 #undef TARGET_MUST_PASS_IN_STACK
1323 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
1324 #undef TARGET_PASS_BY_REFERENCE
1325 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
1326 #undef TARGET_CALLEE_COPIES
1327 #define TARGET_CALLEE_COPIES mips_callee_copies
1328 #undef TARGET_ARG_PARTIAL_BYTES
1329 #define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
1331 #undef TARGET_MODE_REP_EXTENDED
1332 #define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
1334 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1335 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
1337 #undef TARGET_INIT_BUILTINS
1338 #define TARGET_INIT_BUILTINS mips_init_builtins
1339 #undef TARGET_EXPAND_BUILTIN
1340 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
1342 #undef TARGET_HAVE_TLS
1343 #define TARGET_HAVE_TLS HAVE_AS_TLS
1345 #undef TARGET_CANNOT_FORCE_CONST_MEM
1346 #define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
1348 #undef TARGET_ENCODE_SECTION_INFO
1349 #define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
1351 #undef TARGET_ATTRIBUTE_TABLE
1352 #define TARGET_ATTRIBUTE_TABLE mips_attribute_table
1354 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1355 #define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
1357 #undef TARGET_MIN_ANCHOR_OFFSET
1358 #define TARGET_MIN_ANCHOR_OFFSET -32768
1359 #undef TARGET_MAX_ANCHOR_OFFSET
1360 #define TARGET_MAX_ANCHOR_OFFSET 32767
1361 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1362 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
1363 #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
1364 #define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
1366 #undef TARGET_COMP_TYPE_ATTRIBUTES
1367 #define TARGET_COMP_TYPE_ATTRIBUTES mips_comp_type_attributes
1369 #ifdef HAVE_AS_DTPRELWORD
1370 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1371 #define TARGET_ASM_OUTPUT_DWARF_DTPREL mips_output_dwarf_dtprel
1374 struct gcc_target targetm = TARGET_INITIALIZER;
1377 /* Predicates to test for presence of "near" and "far"/"long_call"
1378 attributes on the given TYPE. */
1381 mips_near_type_p (const_tree type)
1383 return lookup_attribute ("near", TYPE_ATTRIBUTES (type)) != NULL;
1387 mips_far_type_p (const_tree type)
1389 return (lookup_attribute ("long_call", TYPE_ATTRIBUTES (type)) != NULL
1390 || lookup_attribute ("far", TYPE_ATTRIBUTES (type)) != NULL);
1393 /* Similar predicates for "mips16"/"nomips16" attributes. */
1396 mips_mips16_type_p (const_tree type)
1398 return lookup_attribute ("mips16", TYPE_ATTRIBUTES (type)) != NULL;
1402 mips_nomips16_type_p (const_tree type)
1404 return lookup_attribute ("nomips16", TYPE_ATTRIBUTES (type)) != NULL;
1407 /* Return 0 if the attributes for two types are incompatible, 1 if they
1408 are compatible, and 2 if they are nearly compatible (which causes a
1409 warning to be generated). */
1412 mips_comp_type_attributes (const_tree type1, const_tree type2)
1414 /* Check for mismatch of non-default calling convention. */
1415 if (TREE_CODE (type1) != FUNCTION_TYPE)
1418 /* Disallow mixed near/far attributes. */
1419 if (mips_far_type_p (type1) && mips_near_type_p (type2))
1421 if (mips_near_type_p (type1) && mips_far_type_p (type2))
1424 /* Mips16/nomips16 attributes must match exactly. */
1425 if (mips_nomips16_type_p (type1) != mips_nomips16_type_p (type2)
1426 || mips_mips16_type_p (type1) != mips_mips16_type_p (type2))
1432 /* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR
1433 and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */
1436 mips_split_plus (rtx x, rtx *base_ptr, HOST_WIDE_INT *offset_ptr)
1438 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
1440 *base_ptr = XEXP (x, 0);
1441 *offset_ptr = INTVAL (XEXP (x, 1));
1450 /* Return true if SYMBOL_REF X is associated with a global symbol
1451 (in the STB_GLOBAL sense). */
1454 mips_global_symbol_p (const_rtx x)
1456 const_tree const decl = SYMBOL_REF_DECL (x);
1459 return !SYMBOL_REF_LOCAL_P (x);
1461 /* Weakref symbols are not TREE_PUBLIC, but their targets are global
1462 or weak symbols. Relocations in the object file will be against
1463 the target symbol, so it's that symbol's binding that matters here. */
1464 return DECL_P (decl) && (TREE_PUBLIC (decl) || DECL_WEAK (decl));
1467 /* Return true if SYMBOL_REF X binds locally. */
1470 mips_symbol_binds_local_p (const_rtx x)
1472 return (SYMBOL_REF_DECL (x)
1473 ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
1474 : SYMBOL_REF_LOCAL_P (x));
1477 /* Return true if rtx constants of mode MODE should be put into a small
1481 mips_rtx_constant_in_small_data_p (enum machine_mode mode)
1483 return (!TARGET_EMBEDDED_DATA
1484 && TARGET_LOCAL_SDATA
1485 && GET_MODE_SIZE (mode) <= mips_section_threshold);
1488 /* Return the method that should be used to access SYMBOL_REF or
1489 LABEL_REF X in context CONTEXT. */
1491 static enum mips_symbol_type
1492 mips_classify_symbol (const_rtx x, enum mips_symbol_context context)
1495 return SYMBOL_GOT_DISP;
1497 if (GET_CODE (x) == LABEL_REF)
1499 /* LABEL_REFs are used for jump tables as well as text labels.
1500 Only return SYMBOL_PC_RELATIVE if we know the label is in
1501 the text section. */
1502 if (TARGET_MIPS16_SHORT_JUMP_TABLES)
1503 return SYMBOL_PC_RELATIVE;
1504 if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
1505 return SYMBOL_GOT_PAGE_OFST;
1506 return SYMBOL_ABSOLUTE;
1509 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1511 if (SYMBOL_REF_TLS_MODEL (x))
1514 if (CONSTANT_POOL_ADDRESS_P (x))
1516 if (TARGET_MIPS16_TEXT_LOADS)
1517 return SYMBOL_PC_RELATIVE;
1519 if (TARGET_MIPS16_PCREL_LOADS && context == SYMBOL_CONTEXT_MEM)
1520 return SYMBOL_PC_RELATIVE;
1522 if (mips_rtx_constant_in_small_data_p (get_pool_mode (x)))
1523 return SYMBOL_GP_RELATIVE;
1526 /* Do not use small-data accesses for weak symbols; they may end up
1529 && SYMBOL_REF_SMALL_P (x)
1530 && !SYMBOL_REF_WEAK (x))
1531 return SYMBOL_GP_RELATIVE;
1533 /* Don't use GOT accesses for locally-binding symbols when -mno-shared
1536 && !(TARGET_ABSOLUTE_ABICALLS && mips_symbol_binds_local_p (x)))
1538 /* There are three cases to consider:
1540 - o32 PIC (either with or without explicit relocs)
1541 - n32/n64 PIC without explicit relocs
1542 - n32/n64 PIC with explicit relocs
1544 In the first case, both local and global accesses will use an
1545 R_MIPS_GOT16 relocation. We must correctly predict which of
1546 the two semantics (local or global) the assembler and linker
1547 will apply. The choice depends on the symbol's binding rather
1548 than its visibility.
1550 In the second case, the assembler will not use R_MIPS_GOT16
1551 relocations, but it chooses between local and global accesses
1552 in the same way as for o32 PIC.
1554 In the third case we have more freedom since both forms of
1555 access will work for any kind of symbol. However, there seems
1556 little point in doing things differently. */
1557 if (mips_global_symbol_p (x))
1558 return SYMBOL_GOT_DISP;
1560 return SYMBOL_GOT_PAGE_OFST;
1563 if (TARGET_MIPS16_PCREL_LOADS && context != SYMBOL_CONTEXT_CALL)
1564 return SYMBOL_FORCE_TO_MEM;
1565 return SYMBOL_ABSOLUTE;
1568 /* Classify symbolic expression X, given that it appears in context
1571 static enum mips_symbol_type
1572 mips_classify_symbolic_expression (rtx x, enum mips_symbol_context context)
1576 split_const (x, &x, &offset);
1577 if (UNSPEC_ADDRESS_P (x))
1578 return UNSPEC_ADDRESS_TYPE (x);
1580 return mips_classify_symbol (x, context);
1583 /* Return true if OFFSET is within the range [0, ALIGN), where ALIGN
1584 is the alignment (in bytes) of SYMBOL_REF X. */
1587 mips_offset_within_alignment_p (rtx x, HOST_WIDE_INT offset)
1589 /* If for some reason we can't get the alignment for the
1590 symbol, initializing this to one means we will only accept
1592 HOST_WIDE_INT align = 1;
1595 /* Get the alignment of the symbol we're referring to. */
1596 t = SYMBOL_REF_DECL (x);
1598 align = DECL_ALIGN_UNIT (t);
1600 return offset >= 0 && offset < align;
1603 /* Return true if X is a symbolic constant that can be used in context
1604 CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */
1607 mips_symbolic_constant_p (rtx x, enum mips_symbol_context context,
1608 enum mips_symbol_type *symbol_type)
1612 split_const (x, &x, &offset);
1613 if (UNSPEC_ADDRESS_P (x))
1615 *symbol_type = UNSPEC_ADDRESS_TYPE (x);
1616 x = UNSPEC_ADDRESS (x);
1618 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1620 *symbol_type = mips_classify_symbol (x, context);
1621 if (*symbol_type == SYMBOL_TLS)
1627 if (offset == const0_rtx)
1630 /* Check whether a nonzero offset is valid for the underlying
1632 switch (*symbol_type)
1634 case SYMBOL_ABSOLUTE:
1635 case SYMBOL_FORCE_TO_MEM:
1636 case SYMBOL_32_HIGH:
1637 case SYMBOL_64_HIGH:
1640 /* If the target has 64-bit pointers and the object file only
1641 supports 32-bit symbols, the values of those symbols will be
1642 sign-extended. In this case we can't allow an arbitrary offset
1643 in case the 32-bit value X + OFFSET has a different sign from X. */
1644 if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
1645 return offset_within_block_p (x, INTVAL (offset));
1647 /* In other cases the relocations can handle any offset. */
1650 case SYMBOL_PC_RELATIVE:
1651 /* Allow constant pool references to be converted to LABEL+CONSTANT.
1652 In this case, we no longer have access to the underlying constant,
1653 but the original symbol-based access was known to be valid. */
1654 if (GET_CODE (x) == LABEL_REF)
1659 case SYMBOL_GP_RELATIVE:
1660 /* Make sure that the offset refers to something within the
1661 same object block. This should guarantee that the final
1662 PC- or GP-relative offset is within the 16-bit limit. */
1663 return offset_within_block_p (x, INTVAL (offset));
1665 case SYMBOL_GOT_PAGE_OFST:
1666 case SYMBOL_GOTOFF_PAGE:
1667 /* If the symbol is global, the GOT entry will contain the symbol's
1668 address, and we will apply a 16-bit offset after loading it.
1669 If the symbol is local, the linker should provide enough local
1670 GOT entries for a 16-bit offset, but larger offsets may lead
1672 return SMALL_INT (offset);
1676 /* There is no carry between the HI and LO REL relocations, so the
1677 offset is only valid if we know it won't lead to such a carry. */
1678 return mips_offset_within_alignment_p (x, INTVAL (offset));
1680 case SYMBOL_GOT_DISP:
1681 case SYMBOL_GOTOFF_DISP:
1682 case SYMBOL_GOTOFF_CALL:
1683 case SYMBOL_GOTOFF_LOADGP:
1686 case SYMBOL_GOTTPREL:
1695 /* This function is used to implement REG_MODE_OK_FOR_BASE_P. */
1698 mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode, int strict)
1700 if (!HARD_REGISTER_NUM_P (regno))
1704 regno = reg_renumber[regno];
1707 /* These fake registers will be eliminated to either the stack or
1708 hard frame pointer, both of which are usually valid base registers.
1709 Reload deals with the cases where the eliminated form isn't valid. */
1710 if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
1713 /* In mips16 mode, the stack pointer can only address word and doubleword
1714 values, nothing smaller. There are two problems here:
1716 (a) Instantiating virtual registers can introduce new uses of the
1717 stack pointer. If these virtual registers are valid addresses,
1718 the stack pointer should be too.
1720 (b) Most uses of the stack pointer are not made explicit until
1721 FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
1722 We don't know until that stage whether we'll be eliminating to the
1723 stack pointer (which needs the restriction) or the hard frame
1724 pointer (which doesn't).
1726 All in all, it seems more consistent to only enforce this restriction
1727 during and after reload. */
1728 if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
1729 return !strict || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1731 return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
1735 /* Return true if X is a valid base register for the given mode.
1736 Allow only hard registers if STRICT. */
1739 mips_valid_base_register_p (rtx x, enum machine_mode mode, int strict)
1741 if (!strict && GET_CODE (x) == SUBREG)
1745 && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict));
1749 /* Return true if X is a valid address for machine mode MODE. If it is,
1750 fill in INFO appropriately. STRICT is true if we should only accept
1751 hard base registers. */
1754 mips_classify_address (struct mips_address_info *info, rtx x,
1755 enum machine_mode mode, int strict)
1757 switch (GET_CODE (x))
1761 info->type = ADDRESS_REG;
1763 info->offset = const0_rtx;
1764 return mips_valid_base_register_p (info->reg, mode, strict);
1767 info->type = ADDRESS_REG;
1768 info->reg = XEXP (x, 0);
1769 info->offset = XEXP (x, 1);
1770 return (mips_valid_base_register_p (info->reg, mode, strict)
1771 && const_arith_operand (info->offset, VOIDmode));
1774 info->type = ADDRESS_LO_SUM;
1775 info->reg = XEXP (x, 0);
1776 info->offset = XEXP (x, 1);
1777 /* We have to trust the creator of the LO_SUM to do something vaguely
1778 sane. Target-independent code that creates a LO_SUM should also
1779 create and verify the matching HIGH. Target-independent code that
1780 adds an offset to a LO_SUM must prove that the offset will not
1781 induce a carry. Failure to do either of these things would be
1782 a bug, and we are not required to check for it here. The MIPS
1783 backend itself should only create LO_SUMs for valid symbolic
1784 constants, with the high part being either a HIGH or a copy
1787 = mips_classify_symbolic_expression (info->offset, SYMBOL_CONTEXT_MEM);
1788 return (mips_valid_base_register_p (info->reg, mode, strict)
1789 && mips_symbol_insns (info->symbol_type, mode) > 0
1790 && mips_lo_relocs[info->symbol_type] != 0);
1793 /* Small-integer addresses don't occur very often, but they
1794 are legitimate if $0 is a valid base register. */
1795 info->type = ADDRESS_CONST_INT;
1796 return !TARGET_MIPS16 && SMALL_INT (x);
1801 info->type = ADDRESS_SYMBOLIC;
1802 return (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_MEM,
1804 && mips_symbol_insns (info->symbol_type, mode) > 0
1805 && !mips_split_p[info->symbol_type]);
1812 /* Return true if X is a thread-local symbol. */
1815 mips_tls_operand_p (rtx x)
1817 return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
1820 /* Return true if X can not be forced into a constant pool. */
1823 mips_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1825 return mips_tls_operand_p (*x);
1828 /* Return true if X can not be forced into a constant pool. */
1831 mips_cannot_force_const_mem (rtx x)
1837 /* As an optimization, reject constants that mips_legitimize_move
1840 Suppose we have a multi-instruction sequence that loads constant C
1841 into register R. If R does not get allocated a hard register, and
1842 R is used in an operand that allows both registers and memory
1843 references, reload will consider forcing C into memory and using
1844 one of the instruction's memory alternatives. Returning false
1845 here will force it to use an input reload instead. */
1846 if (GET_CODE (x) == CONST_INT)
1849 split_const (x, &base, &offset);
1850 if (symbolic_operand (base, VOIDmode) && SMALL_INT (offset))
1854 if (TARGET_HAVE_TLS && for_each_rtx (&x, &mips_tls_symbol_ref_1, 0))
1860 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. We can't use blocks for
1861 constants when we're using a per-function constant pool. */
1864 mips_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
1865 const_rtx x ATTRIBUTE_UNUSED)
1867 return !TARGET_MIPS16_PCREL_LOADS;
1870 /* Like mips_symbol_insns, but treat extended MIPS16 instructions as a
1871 single instruction. We rely on the fact that, in the worst case,
1872 all instructions involved in a MIPS16 address calculation are usually
1876 mips_symbol_insns_1 (enum mips_symbol_type type, enum machine_mode mode)
1880 case SYMBOL_ABSOLUTE:
1881 /* When using 64-bit symbols, we need 5 preparatory instructions,
1884 lui $at,%highest(symbol)
1885 daddiu $at,$at,%higher(symbol)
1887 daddiu $at,$at,%hi(symbol)
1890 The final address is then $at + %lo(symbol). With 32-bit
1891 symbols we just need a preparatory lui for normal mode and
1892 a preparatory "li; sll" for MIPS16. */
1893 return ABI_HAS_64BIT_SYMBOLS ? 6 : TARGET_MIPS16 ? 3 : 2;
1895 case SYMBOL_GP_RELATIVE:
1896 /* Treat GP-relative accesses as taking a single instruction on
1897 MIPS16 too; the copy of $gp can often be shared. */
1900 case SYMBOL_PC_RELATIVE:
1901 /* PC-relative constants can be only be used with addiupc,
1903 if (mode == MAX_MACHINE_MODE
1904 || GET_MODE_SIZE (mode) == 4
1905 || GET_MODE_SIZE (mode) == 8)
1908 /* The constant must be loaded using addiupc first. */
1911 case SYMBOL_FORCE_TO_MEM:
1912 /* The constant must be loaded from the constant pool. */
1915 case SYMBOL_GOT_DISP:
1916 /* The constant will have to be loaded from the GOT before it
1917 is used in an address. */
1918 if (mode != MAX_MACHINE_MODE)
1923 case SYMBOL_GOT_PAGE_OFST:
1924 /* Unless -funit-at-a-time is in effect, we can't be sure whether
1925 the local/global classification is accurate. See override_options
1928 The worst cases are:
1930 (1) For local symbols when generating o32 or o64 code. The assembler
1936 ...and the final address will be $at + %lo(symbol).
1938 (2) For global symbols when -mxgot. The assembler will use:
1940 lui $at,%got_hi(symbol)
1943 ...and the final address will be $at + %got_lo(symbol). */
1946 case SYMBOL_GOTOFF_PAGE:
1947 case SYMBOL_GOTOFF_DISP:
1948 case SYMBOL_GOTOFF_CALL:
1949 case SYMBOL_GOTOFF_LOADGP:
1950 case SYMBOL_32_HIGH:
1951 case SYMBOL_64_HIGH:
1957 case SYMBOL_GOTTPREL:
1960 /* A 16-bit constant formed by a single relocation, or a 32-bit
1961 constant formed from a high 16-bit relocation and a low 16-bit
1962 relocation. Use mips_split_p to determine which. */
1963 return !mips_split_p[type] ? 1 : TARGET_MIPS16 ? 3 : 2;
1966 /* We don't treat a bare TLS symbol as a constant. */
1972 /* If MODE is MAX_MACHINE_MODE, return the number of instructions needed
1973 to load symbols of type TYPE into a register. Return 0 if the given
1974 type of symbol cannot be used as an immediate operand.
1976 Otherwise, return the number of instructions needed to load or store
1977 values of mode MODE to or from addresses of type TYPE. Return 0 if
1978 the given type of symbol is not valid in addresses.
1980 In both cases, treat extended MIPS16 instructions as two instructions. */
1983 mips_symbol_insns (enum mips_symbol_type type, enum machine_mode mode)
1985 return mips_symbol_insns_1 (type, mode) * (TARGET_MIPS16 ? 2 : 1);
1988 /* Return true if X is a legitimate $sp-based address for mode MDOE. */
1991 mips_stack_address_p (rtx x, enum machine_mode mode)
1993 struct mips_address_info addr;
1995 return (mips_classify_address (&addr, x, mode, false)
1996 && addr.type == ADDRESS_REG
1997 && addr.reg == stack_pointer_rtx);
2000 /* Return true if a value at OFFSET bytes from BASE can be accessed
2001 using an unextended mips16 instruction. MODE is the mode of the
2004 Usually the offset in an unextended instruction is a 5-bit field.
2005 The offset is unsigned and shifted left once for HIs, twice
2006 for SIs, and so on. An exception is SImode accesses off the
2007 stack pointer, which have an 8-bit immediate field. */
2010 mips16_unextended_reference_p (enum machine_mode mode, rtx base, rtx offset)
2013 && GET_CODE (offset) == CONST_INT
2014 && INTVAL (offset) >= 0
2015 && (INTVAL (offset) & (GET_MODE_SIZE (mode) - 1)) == 0)
2017 if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
2018 return INTVAL (offset) < 256 * GET_MODE_SIZE (mode);
2019 return INTVAL (offset) < 32 * GET_MODE_SIZE (mode);
2025 /* Return the number of instructions needed to load or store a value
2026 of mode MODE at X. Return 0 if X isn't valid for MODE. Assume that
2027 multiword moves may need to be split into word moves if MIGHT_SPLIT_P,
2028 otherwise assume that a single load or store is enough.
2030 For mips16 code, count extended instructions as two instructions. */
2033 mips_address_insns (rtx x, enum machine_mode mode, bool might_split_p)
2035 struct mips_address_info addr;
2038 /* BLKmode is used for single unaligned loads and stores and should
2039 not count as a multiword mode. (GET_MODE_SIZE (BLKmode) is pretty
2040 meaningless, so we have to single it out as a special case one way
2042 if (mode != BLKmode && might_split_p)
2043 factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2047 if (mips_classify_address (&addr, x, mode, false))
2052 && !mips16_unextended_reference_p (mode, addr.reg, addr.offset))
2056 case ADDRESS_LO_SUM:
2057 return (TARGET_MIPS16 ? factor * 2 : factor);
2059 case ADDRESS_CONST_INT:
2062 case ADDRESS_SYMBOLIC:
2063 return factor * mips_symbol_insns (addr.symbol_type, mode);
2069 /* Likewise for constant X. */
2072 mips_const_insns (rtx x)
2074 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2075 enum mips_symbol_type symbol_type;
2078 switch (GET_CODE (x))
2081 if (!mips_symbolic_constant_p (XEXP (x, 0), SYMBOL_CONTEXT_LEA,
2083 || !mips_split_p[symbol_type])
2086 /* This is simply an lui for normal mode. It is an extended
2087 "li" followed by an extended "sll" for MIPS16. */
2088 return TARGET_MIPS16 ? 4 : 1;
2092 /* Unsigned 8-bit constants can be loaded using an unextended
2093 LI instruction. Unsigned 16-bit constants can be loaded
2094 using an extended LI. Negative constants must be loaded
2095 using LI and then negated. */
2096 return (INTVAL (x) >= 0 && INTVAL (x) < 256 ? 1
2097 : SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
2098 : INTVAL (x) > -256 && INTVAL (x) < 0 ? 2
2099 : SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
2102 return mips_build_integer (codes, INTVAL (x));
2106 return (!TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0);
2112 /* See if we can refer to X directly. */
2113 if (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_LEA, &symbol_type))
2114 return mips_symbol_insns (symbol_type, MAX_MACHINE_MODE);
2116 /* Otherwise try splitting the constant into a base and offset.
2117 16-bit offsets can be added using an extra addiu. Larger offsets
2118 must be calculated separately and then added to the base. */
2119 split_const (x, &x, &offset);
2122 int n = mips_const_insns (x);
2125 if (SMALL_INT (offset))
2128 return n + 1 + mips_build_integer (codes, INTVAL (offset));
2135 return mips_symbol_insns (mips_classify_symbol (x, SYMBOL_CONTEXT_LEA),
2144 /* Return the number of instructions needed to implement INSN,
2145 given that it loads from or stores to MEM. Count extended
2146 mips16 instructions as two instructions. */
2149 mips_load_store_insns (rtx mem, rtx insn)
2151 enum machine_mode mode;
2155 gcc_assert (MEM_P (mem));
2156 mode = GET_MODE (mem);
2158 /* Try to prove that INSN does not need to be split. */
2159 might_split_p = true;
2160 if (GET_MODE_BITSIZE (mode) == 64)
2162 set = single_set (insn);
2163 if (set && !mips_split_64bit_move_p (SET_DEST (set), SET_SRC (set)))
2164 might_split_p = false;
2167 return mips_address_insns (XEXP (mem, 0), mode, might_split_p);
2171 /* Return the number of instructions needed for an integer division. */
2174 mips_idiv_insns (void)
2179 if (TARGET_CHECK_ZERO_DIV)
2181 if (GENERATE_DIVIDE_TRAPS)
2187 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
2192 /* This function is used to implement GO_IF_LEGITIMATE_ADDRESS. It
2193 returns a nonzero value if X is a legitimate address for a memory
2194 operand of the indicated MODE. STRICT is nonzero if this function
2195 is called during reload. */
2198 mips_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
2200 struct mips_address_info addr;
2202 return mips_classify_address (&addr, x, mode, strict);
2205 /* Emit a move from SRC to DEST. Assume that the move expanders can
2206 handle all moves if !can_create_pseudo_p (). The distinction is
2207 important because, unlike emit_move_insn, the move expanders know
2208 how to force Pmode objects into the constant pool even when the
2209 constant pool address is not itself legitimate. */
2212 mips_emit_move (rtx dest, rtx src)
2214 return (can_create_pseudo_p ()
2215 ? emit_move_insn (dest, src)
2216 : emit_move_insn_1 (dest, src));
2219 /* Copy VALUE to a register and return that register. If new psuedos
2220 are allowed, copy it into a new register, otherwise use DEST. */
2223 mips_force_temporary (rtx dest, rtx value)
2225 if (can_create_pseudo_p ())
2226 return force_reg (Pmode, value);
2229 mips_emit_move (copy_rtx (dest), value);
2235 /* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
2236 it appears in a MEM of that mode. Return true if ADDR is a legitimate
2237 constant in that context and can be split into a high part and a LO_SUM.
2238 If so, and if LO_SUM_OUT is nonnull, emit the high part and return
2239 the LO_SUM in *LO_SUM_OUT. Leave *LO_SUM_OUT unchanged otherwise.
2241 TEMP is as for mips_force_temporary and is used to load the high
2242 part into a register. */
2245 mips_split_symbol (rtx temp, rtx addr, enum machine_mode mode, rtx *lo_sum_out)
2247 enum mips_symbol_context context;
2248 enum mips_symbol_type symbol_type;
2251 context = (mode == MAX_MACHINE_MODE
2252 ? SYMBOL_CONTEXT_LEA
2253 : SYMBOL_CONTEXT_MEM);
2254 if (!mips_symbolic_constant_p (addr, context, &symbol_type)
2255 || mips_symbol_insns (symbol_type, mode) == 0
2256 || !mips_split_p[symbol_type])
2261 if (symbol_type == SYMBOL_GP_RELATIVE)
2263 if (!can_create_pseudo_p ())
2265 emit_insn (gen_load_const_gp (copy_rtx (temp)));
2269 high = mips16_gp_pseudo_reg ();
2273 high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
2274 high = mips_force_temporary (temp, high);
2276 *lo_sum_out = gen_rtx_LO_SUM (Pmode, high, addr);
2282 /* Wrap symbol or label BASE in an unspec address of type SYMBOL_TYPE
2283 and add CONST_INT OFFSET to the result. */
2286 mips_unspec_address_offset (rtx base, rtx offset,
2287 enum mips_symbol_type symbol_type)
2289 base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
2290 UNSPEC_ADDRESS_FIRST + symbol_type);
2291 if (offset != const0_rtx)
2292 base = gen_rtx_PLUS (Pmode, base, offset);
2293 return gen_rtx_CONST (Pmode, base);
2296 /* Return an UNSPEC address with underlying address ADDRESS and symbol
2297 type SYMBOL_TYPE. */
2300 mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
2304 split_const (address, &base, &offset);
2305 return mips_unspec_address_offset (base, offset, symbol_type);
2309 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
2310 high part to BASE and return the result. Just return BASE otherwise.
2311 TEMP is available as a temporary register if needed.
2313 The returned expression can be used as the first operand to a LO_SUM. */
2316 mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
2317 enum mips_symbol_type symbol_type)
2319 if (mips_split_p[symbol_type])
2321 addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
2322 addr = mips_force_temporary (temp, addr);
2323 return mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
2329 /* Return a legitimate address for REG + OFFSET. TEMP is as for
2330 mips_force_temporary; it is only needed when OFFSET is not a
2334 mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
2336 if (!SMALL_OPERAND (offset))
2341 /* Load the full offset into a register so that we can use
2342 an unextended instruction for the address itself. */
2343 high = GEN_INT (offset);
2348 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. */
2349 high = GEN_INT (CONST_HIGH_PART (offset));
2350 offset = CONST_LOW_PART (offset);
2352 high = mips_force_temporary (temp, high);
2353 reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
2355 return plus_constant (reg, offset);
2358 /* Emit a call to __tls_get_addr. SYM is the TLS symbol we are
2359 referencing, and TYPE is the symbol type to use (either global
2360 dynamic or local dynamic). V0 is an RTX for the return value
2361 location. The entire insn sequence is returned. */
2363 static GTY(()) rtx mips_tls_symbol;
2366 mips_call_tls_get_addr (rtx sym, enum mips_symbol_type type, rtx v0)
2368 rtx insn, loc, tga, a0;
2370 a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
2372 if (!mips_tls_symbol)
2373 mips_tls_symbol = init_one_libfunc ("__tls_get_addr");
2375 loc = mips_unspec_address (sym, type);
2379 emit_insn (gen_rtx_SET (Pmode, a0,
2380 gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, loc)));
2381 tga = gen_rtx_MEM (Pmode, mips_tls_symbol);
2382 insn = emit_call_insn (gen_call_value (v0, tga, const0_rtx, const0_rtx));
2383 CONST_OR_PURE_CALL_P (insn) = 1;
2384 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), v0);
2385 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
2386 insn = get_insns ();
2393 /* Generate the code to access LOC, a thread local SYMBOL_REF. The
2394 return value will be a valid address and move_operand (either a REG
2398 mips_legitimize_tls_address (rtx loc)
2400 rtx dest, insn, v0, v1, tmp1, tmp2, eqv;
2401 enum tls_model model;
2405 sorry ("MIPS16 TLS");
2406 return gen_reg_rtx (Pmode);
2409 v0 = gen_rtx_REG (Pmode, GP_RETURN);
2410 v1 = gen_rtx_REG (Pmode, GP_RETURN + 1);
2412 model = SYMBOL_REF_TLS_MODEL (loc);
2413 /* Only TARGET_ABICALLS code can have more than one module; other
2414 code must be be static and should not use a GOT. All TLS models
2415 reduce to local exec in this situation. */
2416 if (!TARGET_ABICALLS)
2417 model = TLS_MODEL_LOCAL_EXEC;
2421 case TLS_MODEL_GLOBAL_DYNAMIC:
2422 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSGD, v0);
2423 dest = gen_reg_rtx (Pmode);
2424 emit_libcall_block (insn, dest, v0, loc);
2427 case TLS_MODEL_LOCAL_DYNAMIC:
2428 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSLDM, v0);
2429 tmp1 = gen_reg_rtx (Pmode);
2431 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2432 share the LDM result with other LD model accesses. */
2433 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2435 emit_libcall_block (insn, tmp1, v0, eqv);
2437 tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_DTPREL);
2438 dest = gen_rtx_LO_SUM (Pmode, tmp2,
2439 mips_unspec_address (loc, SYMBOL_DTPREL));
2442 case TLS_MODEL_INITIAL_EXEC:
2443 tmp1 = gen_reg_rtx (Pmode);
2444 tmp2 = mips_unspec_address (loc, SYMBOL_GOTTPREL);
2445 if (Pmode == DImode)
2447 emit_insn (gen_tls_get_tp_di (v1));
2448 emit_insn (gen_load_gotdi (tmp1, pic_offset_table_rtx, tmp2));
2452 emit_insn (gen_tls_get_tp_si (v1));
2453 emit_insn (gen_load_gotsi (tmp1, pic_offset_table_rtx, tmp2));
2455 dest = gen_reg_rtx (Pmode);
2456 emit_insn (gen_add3_insn (dest, tmp1, v1));
2459 case TLS_MODEL_LOCAL_EXEC:
2460 if (Pmode == DImode)
2461 emit_insn (gen_tls_get_tp_di (v1));
2463 emit_insn (gen_tls_get_tp_si (v1));
2465 tmp1 = mips_unspec_offset_high (NULL, v1, loc, SYMBOL_TPREL);
2466 dest = gen_rtx_LO_SUM (Pmode, tmp1,
2467 mips_unspec_address (loc, SYMBOL_TPREL));
2477 /* This function is used to implement LEGITIMIZE_ADDRESS. If *XLOC can
2478 be legitimized in a way that the generic machinery might not expect,
2479 put the new address in *XLOC and return true. MODE is the mode of
2480 the memory being accessed. */
2483 mips_legitimize_address (rtx *xloc, enum machine_mode mode)
2485 if (mips_tls_operand_p (*xloc))
2487 *xloc = mips_legitimize_tls_address (*xloc);
2491 /* See if the address can split into a high part and a LO_SUM. */
2492 if (mips_split_symbol (NULL, *xloc, mode, xloc))
2495 if (GET_CODE (*xloc) == PLUS && GET_CODE (XEXP (*xloc, 1)) == CONST_INT)
2497 /* Handle REG + CONSTANT using mips_add_offset. */
2500 reg = XEXP (*xloc, 0);
2501 if (!mips_valid_base_register_p (reg, mode, 0))
2502 reg = copy_to_mode_reg (Pmode, reg);
2503 *xloc = mips_add_offset (0, reg, INTVAL (XEXP (*xloc, 1)));
2511 /* Subroutine of mips_build_integer (with the same interface).
2512 Assume that the final action in the sequence should be a left shift. */
2515 mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
2517 unsigned int i, shift;
2519 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
2520 since signed numbers are easier to load than unsigned ones. */
2522 while ((value & 1) == 0)
2523 value /= 2, shift++;
2525 i = mips_build_integer (codes, value);
2526 codes[i].code = ASHIFT;
2527 codes[i].value = shift;
2532 /* As for mips_build_shift, but assume that the final action will be
2533 an IOR or PLUS operation. */
2536 mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
2538 unsigned HOST_WIDE_INT high;
2541 high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
2542 if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
2544 /* The constant is too complex to load with a simple lui/ori pair
2545 so our goal is to clear as many trailing zeros as possible.
2546 In this case, we know bit 16 is set and that the low 16 bits
2547 form a negative number. If we subtract that number from VALUE,
2548 we will clear at least the lowest 17 bits, maybe more. */
2549 i = mips_build_integer (codes, CONST_HIGH_PART (value));
2550 codes[i].code = PLUS;
2551 codes[i].value = CONST_LOW_PART (value);
2555 i = mips_build_integer (codes, high);
2556 codes[i].code = IOR;
2557 codes[i].value = value & 0xffff;
2563 /* Fill CODES with a sequence of rtl operations to load VALUE.
2564 Return the number of operations needed. */
2567 mips_build_integer (struct mips_integer_op *codes,
2568 unsigned HOST_WIDE_INT value)
2570 if (SMALL_OPERAND (value)
2571 || SMALL_OPERAND_UNSIGNED (value)
2572 || LUI_OPERAND (value))
2574 /* The value can be loaded with a single instruction. */
2575 codes[0].code = UNKNOWN;
2576 codes[0].value = value;
2579 else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
2581 /* Either the constant is a simple LUI/ORI combination or its
2582 lowest bit is set. We don't want to shift in this case. */
2583 return mips_build_lower (codes, value);
2585 else if ((value & 0xffff) == 0)
2587 /* The constant will need at least three actions. The lowest
2588 16 bits are clear, so the final action will be a shift. */
2589 return mips_build_shift (codes, value);
2593 /* The final action could be a shift, add or inclusive OR.
2594 Rather than use a complex condition to select the best
2595 approach, try both mips_build_shift and mips_build_lower
2596 and pick the one that gives the shortest sequence.
2597 Note that this case is only used once per constant. */
2598 struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
2599 unsigned int cost, alt_cost;
2601 cost = mips_build_shift (codes, value);
2602 alt_cost = mips_build_lower (alt_codes, value);
2603 if (alt_cost < cost)
2605 memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
2613 /* Load VALUE into DEST, using TEMP as a temporary register if need be. */
2616 mips_move_integer (rtx dest, rtx temp, unsigned HOST_WIDE_INT value)
2618 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2619 enum machine_mode mode;
2620 unsigned int i, cost;
2623 mode = GET_MODE (dest);
2624 cost = mips_build_integer (codes, value);
2626 /* Apply each binary operation to X. Invariant: X is a legitimate
2627 source operand for a SET pattern. */
2628 x = GEN_INT (codes[0].value);
2629 for (i = 1; i < cost; i++)
2631 if (!can_create_pseudo_p ())
2633 emit_insn (gen_rtx_SET (VOIDmode, temp, x));
2637 x = force_reg (mode, x);
2638 x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
2641 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
2645 /* Subroutine of mips_legitimize_move. Move constant SRC into register
2646 DEST given that SRC satisfies immediate_operand but doesn't satisfy
2650 mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
2654 /* Split moves of big integers into smaller pieces. */
2655 if (splittable_const_int_operand (src, mode))
2657 mips_move_integer (dest, dest, INTVAL (src));
2661 /* Split moves of symbolic constants into high/low pairs. */
2662 if (mips_split_symbol (dest, src, MAX_MACHINE_MODE, &src))
2664 emit_insn (gen_rtx_SET (VOIDmode, dest, src));
2668 if (mips_tls_operand_p (src))
2670 mips_emit_move (dest, mips_legitimize_tls_address (src));
2674 /* If we have (const (plus symbol offset)), and that expression cannot
2675 be forced into memory, load the symbol first and add in the offset.
2676 In non-MIPS16 mode, prefer to do this even if the constant _can_ be
2677 forced into memory, as it usually produces better code. */
2678 split_const (src, &base, &offset);
2679 if (offset != const0_rtx
2680 && (targetm.cannot_force_const_mem (src)
2681 || (!TARGET_MIPS16 && can_create_pseudo_p ())))
2683 base = mips_force_temporary (dest, base);
2684 mips_emit_move (dest, mips_add_offset (0, base, INTVAL (offset)));
2688 src = force_const_mem (mode, src);
2690 /* When using explicit relocs, constant pool references are sometimes
2691 not legitimate addresses. */
2692 mips_split_symbol (dest, XEXP (src, 0), mode, &XEXP (src, 0));
2693 mips_emit_move (dest, src);
2697 /* If (set DEST SRC) is not a valid instruction, emit an equivalent
2698 sequence that is valid. */
2701 mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
2703 if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
2705 mips_emit_move (dest, force_reg (mode, src));
2709 /* Check for individual, fully-reloaded mflo and mfhi instructions. */
2710 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
2711 && REG_P (src) && MD_REG_P (REGNO (src))
2712 && REG_P (dest) && GP_REG_P (REGNO (dest)))
2714 int other_regno = REGNO (src) == HI_REGNUM ? LO_REGNUM : HI_REGNUM;
2715 if (GET_MODE_SIZE (mode) <= 4)
2716 emit_insn (gen_mfhilo_si (gen_rtx_REG (SImode, REGNO (dest)),
2717 gen_rtx_REG (SImode, REGNO (src)),
2718 gen_rtx_REG (SImode, other_regno)));
2720 emit_insn (gen_mfhilo_di (gen_rtx_REG (DImode, REGNO (dest)),
2721 gen_rtx_REG (DImode, REGNO (src)),
2722 gen_rtx_REG (DImode, other_regno)));
2726 /* We need to deal with constants that would be legitimate
2727 immediate_operands but not legitimate move_operands. */
2728 if (CONSTANT_P (src) && !move_operand (src, mode))
2730 mips_legitimize_const_move (mode, dest, src);
2731 set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
2737 /* We need a lot of little routines to check constant values on the
2738 mips16. These are used to figure out how long the instruction will
2739 be. It would be much better to do this using constraints, but
2740 there aren't nearly enough letters available. */
2743 m16_check_op (rtx op, int low, int high, int mask)
2745 return (GET_CODE (op) == CONST_INT
2746 && INTVAL (op) >= low
2747 && INTVAL (op) <= high
2748 && (INTVAL (op) & mask) == 0);
2752 m16_uimm3_b (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2754 return m16_check_op (op, 0x1, 0x8, 0);
2758 m16_simm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2760 return m16_check_op (op, - 0x8, 0x7, 0);
2764 m16_nsimm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2766 return m16_check_op (op, - 0x7, 0x8, 0);
2770 m16_simm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2772 return m16_check_op (op, - 0x10, 0xf, 0);
2776 m16_nsimm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2778 return m16_check_op (op, - 0xf, 0x10, 0);
2782 m16_uimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2784 return m16_check_op (op, (- 0x10) << 2, 0xf << 2, 3);
2788 m16_nuimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2790 return m16_check_op (op, (- 0xf) << 2, 0x10 << 2, 3);
2794 m16_simm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2796 return m16_check_op (op, - 0x80, 0x7f, 0);
2800 m16_nsimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2802 return m16_check_op (op, - 0x7f, 0x80, 0);
2806 m16_uimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2808 return m16_check_op (op, 0x0, 0xff, 0);
2812 m16_nuimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2814 return m16_check_op (op, - 0xff, 0x0, 0);
2818 m16_uimm8_m1_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2820 return m16_check_op (op, - 0x1, 0xfe, 0);
2824 m16_uimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2826 return m16_check_op (op, 0x0, 0xff << 2, 3);
2830 m16_nuimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2832 return m16_check_op (op, (- 0xff) << 2, 0x0, 3);
2836 m16_simm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2838 return m16_check_op (op, (- 0x80) << 3, 0x7f << 3, 7);
2842 m16_nsimm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2844 return m16_check_op (op, (- 0x7f) << 3, 0x80 << 3, 7);
2847 /* Return true if ADDR matches the pattern for the lwxs load scaled indexed
2848 address instruction. */
2851 mips_lwxs_address_p (rtx addr)
2854 && GET_CODE (addr) == PLUS
2855 && REG_P (XEXP (addr, 1)))
2857 rtx offset = XEXP (addr, 0);
2858 if (GET_CODE (offset) == MULT
2859 && REG_P (XEXP (offset, 0))
2860 && GET_CODE (XEXP (offset, 1)) == CONST_INT
2861 && INTVAL (XEXP (offset, 1)) == 4)
2868 mips_rtx_costs (rtx x, int code, int outer_code, int *total)
2870 enum machine_mode mode = GET_MODE (x);
2871 bool float_mode_p = FLOAT_MODE_P (mode);
2878 /* A number between 1 and 8 inclusive is efficient for a shift.
2879 Otherwise, we will need an extended instruction. */
2880 if ((outer_code) == ASHIFT || (outer_code) == ASHIFTRT
2881 || (outer_code) == LSHIFTRT)
2883 if (INTVAL (x) >= 1 && INTVAL (x) <= 8)
2886 *total = COSTS_N_INSNS (1);
2890 /* We can use cmpi for an xor with an unsigned 16-bit value. */
2891 if ((outer_code) == XOR
2892 && INTVAL (x) >= 0 && INTVAL (x) < 0x10000)
2898 /* We may be able to use slt or sltu for a comparison with a
2899 signed 16-bit value. (The boundary conditions aren't quite
2900 right, but this is just a heuristic anyhow.) */
2901 if (((outer_code) == LT || (outer_code) == LE
2902 || (outer_code) == GE || (outer_code) == GT
2903 || (outer_code) == LTU || (outer_code) == LEU
2904 || (outer_code) == GEU || (outer_code) == GTU)
2905 && INTVAL (x) >= -0x8000 && INTVAL (x) < 0x8000)
2911 /* Equality comparisons with 0 are cheap. */
2912 if (((outer_code) == EQ || (outer_code) == NE)
2919 /* Constants in the range 0...255 can be loaded with an unextended
2920 instruction. They are therefore as cheap as a register move.
2922 Given the choice between "li R1,0...255" and "move R1,R2"
2923 (where R2 is a known constant), it is usually better to use "li",
2924 since we do not want to unnecessarily extend the lifetime
2926 if (outer_code == SET
2928 && INTVAL (x) < 256)
2936 /* These can be used anywhere. */
2941 /* Otherwise fall through to the handling below because
2942 we'll need to construct the constant. */
2948 if (LEGITIMATE_CONSTANT_P (x))
2950 *total = COSTS_N_INSNS (1);
2955 /* The value will need to be fetched from the constant pool. */
2956 *total = CONSTANT_POOL_COST;
2962 /* If the address is legitimate, return the number of
2963 instructions it needs. */
2964 rtx addr = XEXP (x, 0);
2965 int n = mips_address_insns (addr, GET_MODE (x), true);
2968 *total = COSTS_N_INSNS (n + 1);
2971 /* Check for scaled indexed address. */
2972 if (mips_lwxs_address_p (addr))
2974 *total = COSTS_N_INSNS (2);
2977 /* Otherwise use the default handling. */
2982 *total = COSTS_N_INSNS (6);
2986 *total = COSTS_N_INSNS ((mode == DImode && !TARGET_64BIT) ? 2 : 1);
2992 if (mode == DImode && !TARGET_64BIT)
2994 *total = COSTS_N_INSNS (2);
3002 if (mode == DImode && !TARGET_64BIT)
3004 *total = COSTS_N_INSNS ((GET_CODE (XEXP (x, 1)) == CONST_INT)
3012 *total = COSTS_N_INSNS (1);
3014 *total = COSTS_N_INSNS (4);
3018 *total = COSTS_N_INSNS (1);
3025 *total = mips_cost->fp_add;
3029 else if (mode == DImode && !TARGET_64BIT)
3031 *total = COSTS_N_INSNS (4);
3037 if (mode == DImode && !TARGET_64BIT)
3039 *total = COSTS_N_INSNS (4);
3046 *total = mips_cost->fp_mult_sf;
3048 else if (mode == DFmode)
3049 *total = mips_cost->fp_mult_df;
3051 else if (mode == SImode)
3052 *total = mips_cost->int_mult_si;
3055 *total = mips_cost->int_mult_di;
3064 *total = mips_cost->fp_div_sf;
3066 *total = mips_cost->fp_div_df;
3075 *total = mips_cost->int_div_di;
3077 *total = mips_cost->int_div_si;
3082 /* A sign extend from SImode to DImode in 64-bit mode is often
3083 zero instructions, because the result can often be used
3084 directly by another instruction; we'll call it one. */
3085 if (TARGET_64BIT && mode == DImode
3086 && GET_MODE (XEXP (x, 0)) == SImode)
3087 *total = COSTS_N_INSNS (1);
3089 *total = COSTS_N_INSNS (2);
3093 if (TARGET_64BIT && mode == DImode
3094 && GET_MODE (XEXP (x, 0)) == SImode)
3095 *total = COSTS_N_INSNS (2);
3097 *total = COSTS_N_INSNS (1);
3101 case UNSIGNED_FLOAT:
3104 case FLOAT_TRUNCATE:
3106 *total = mips_cost->fp_add;
3114 /* Provide the costs of an addressing mode that contains ADDR.
3115 If ADDR is not a valid address, its cost is irrelevant. */
3118 mips_address_cost (rtx addr)
3120 return mips_address_insns (addr, SImode, false);
3123 /* Return one word of double-word value OP, taking into account the fixed
3124 endianness of certain registers. HIGH_P is true to select the high part,
3125 false to select the low part. */
3128 mips_subword (rtx op, int high_p)
3131 enum machine_mode mode;
3133 mode = GET_MODE (op);
3134 if (mode == VOIDmode)
3137 if (TARGET_BIG_ENDIAN ? !high_p : high_p)
3138 byte = UNITS_PER_WORD;
3142 if (FP_REG_RTX_P (op))
3143 return gen_rtx_REG (word_mode, high_p ? REGNO (op) + 1 : REGNO (op));
3146 return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
3148 return simplify_gen_subreg (word_mode, op, mode, byte);
3152 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
3155 mips_split_64bit_move_p (rtx dest, rtx src)
3160 /* FP->FP moves can be done in a single instruction. */
3161 if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
3164 /* Check for floating-point loads and stores. They can be done using
3165 ldc1 and sdc1 on MIPS II and above. */
3168 if (FP_REG_RTX_P (dest) && MEM_P (src))
3170 if (FP_REG_RTX_P (src) && MEM_P (dest))
3177 /* Split a 64-bit move from SRC to DEST assuming that
3178 mips_split_64bit_move_p holds.
3180 Moves into and out of FPRs cause some difficulty here. Such moves
3181 will always be DFmode, since paired FPRs are not allowed to store
3182 DImode values. The most natural representation would be two separate
3183 32-bit moves, such as:
3185 (set (reg:SI $f0) (mem:SI ...))
3186 (set (reg:SI $f1) (mem:SI ...))
3188 However, the second insn is invalid because odd-numbered FPRs are
3189 not allowed to store independent values. Use the patterns load_df_low,
3190 load_df_high and store_df_high instead. */
3193 mips_split_64bit_move (rtx dest, rtx src)
3195 if (FP_REG_RTX_P (dest))
3197 /* Loading an FPR from memory or from GPRs. */
3200 dest = gen_lowpart (DFmode, dest);
3201 emit_insn (gen_load_df_low (dest, mips_subword (src, 0)));
3202 emit_insn (gen_mthc1 (dest, mips_subword (src, 1),
3207 emit_insn (gen_load_df_low (copy_rtx (dest),
3208 mips_subword (src, 0)));
3209 emit_insn (gen_load_df_high (dest, mips_subword (src, 1),
3213 else if (FP_REG_RTX_P (src))
3215 /* Storing an FPR into memory or GPRs. */
3218 src = gen_lowpart (DFmode, src);
3219 mips_emit_move (mips_subword (dest, 0), mips_subword (src, 0));
3220 emit_insn (gen_mfhc1 (mips_subword (dest, 1), src));
3224 mips_emit_move (mips_subword (dest, 0), mips_subword (src, 0));
3225 emit_insn (gen_store_df_high (mips_subword (dest, 1), src));
3230 /* The operation can be split into two normal moves. Decide in
3231 which order to do them. */
3234 low_dest = mips_subword (dest, 0);
3235 if (REG_P (low_dest)
3236 && reg_overlap_mentioned_p (low_dest, src))
3238 mips_emit_move (mips_subword (dest, 1), mips_subword (src, 1));
3239 mips_emit_move (low_dest, mips_subword (src, 0));
3243 mips_emit_move (low_dest, mips_subword (src, 0));
3244 mips_emit_move (mips_subword (dest, 1), mips_subword (src, 1));
3249 /* Return the appropriate instructions to move SRC into DEST. Assume
3250 that SRC is operand 1 and DEST is operand 0. */
3253 mips_output_move (rtx dest, rtx src)
3255 enum rtx_code dest_code, src_code;
3256 enum mips_symbol_type symbol_type;
3259 dest_code = GET_CODE (dest);
3260 src_code = GET_CODE (src);
3261 dbl_p = (GET_MODE_SIZE (GET_MODE (dest)) == 8);
3263 if (dbl_p && mips_split_64bit_move_p (dest, src))
3266 if ((src_code == REG && GP_REG_P (REGNO (src)))
3267 || (!TARGET_MIPS16 && src == CONST0_RTX (GET_MODE (dest))))
3269 if (dest_code == REG)
3271 if (GP_REG_P (REGNO (dest)))
3272 return "move\t%0,%z1";
3274 if (MD_REG_P (REGNO (dest)))
3277 if (DSP_ACC_REG_P (REGNO (dest)))
3279 static char retval[] = "mt__\t%z1,%q0";
3280 retval[2] = reg_names[REGNO (dest)][4];
3281 retval[3] = reg_names[REGNO (dest)][5];
3285 if (FP_REG_P (REGNO (dest)))
3286 return (dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0");
3288 if (ALL_COP_REG_P (REGNO (dest)))
3290 static char retval[] = "dmtc_\t%z1,%0";
3292 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3293 return (dbl_p ? retval : retval + 1);
3296 if (dest_code == MEM)
3297 return (dbl_p ? "sd\t%z1,%0" : "sw\t%z1,%0");
3299 if (dest_code == REG && GP_REG_P (REGNO (dest)))
3301 if (src_code == REG)
3303 if (DSP_ACC_REG_P (REGNO (src)))
3305 static char retval[] = "mf__\t%0,%q1";
3306 retval[2] = reg_names[REGNO (src)][4];
3307 retval[3] = reg_names[REGNO (src)][5];
3311 if (ST_REG_P (REGNO (src)) && ISA_HAS_8CC)
3312 return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
3314 if (FP_REG_P (REGNO (src)))
3315 return (dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1");
3317 if (ALL_COP_REG_P (REGNO (src)))
3319 static char retval[] = "dmfc_\t%0,%1";
3321 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3322 return (dbl_p ? retval : retval + 1);
3326 if (src_code == MEM)
3327 return (dbl_p ? "ld\t%0,%1" : "lw\t%0,%1");
3329 if (src_code == CONST_INT)
3331 /* Don't use the X format, because that will give out of
3332 range numbers for 64-bit hosts and 32-bit targets. */
3334 return "li\t%0,%1\t\t\t# %X1";
3336 if (INTVAL (src) >= 0 && INTVAL (src) <= 0xffff)
3339 if (INTVAL (src) < 0 && INTVAL (src) >= -0xffff)
3343 if (src_code == HIGH)
3344 return TARGET_MIPS16 ? "#" : "lui\t%0,%h1";
3346 if (CONST_GP_P (src))
3347 return "move\t%0,%1";
3349 if (mips_symbolic_constant_p (src, SYMBOL_CONTEXT_LEA, &symbol_type)
3350 && mips_lo_relocs[symbol_type] != 0)
3352 /* A signed 16-bit constant formed by applying a relocation
3353 operator to a symbolic address. */
3354 gcc_assert (!mips_split_p[symbol_type]);
3355 return "li\t%0,%R1";
3358 if (symbolic_operand (src, VOIDmode))
3360 gcc_assert (TARGET_MIPS16
3361 ? TARGET_MIPS16_TEXT_LOADS
3362 : !TARGET_EXPLICIT_RELOCS);
3363 return (dbl_p ? "dla\t%0,%1" : "la\t%0,%1");
3366 if (src_code == REG && FP_REG_P (REGNO (src)))
3368 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3370 if (GET_MODE (dest) == V2SFmode)
3371 return "mov.ps\t%0,%1";
3373 return (dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1");
3376 if (dest_code == MEM)
3377 return (dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0");
3379 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3381 if (src_code == MEM)
3382 return (dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1");
3384 if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
3386 static char retval[] = "l_c_\t%0,%1";
3388 retval[1] = (dbl_p ? 'd' : 'w');
3389 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3392 if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
3394 static char retval[] = "s_c_\t%1,%0";
3396 retval[1] = (dbl_p ? 'd' : 'w');
3397 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3403 /* Restore $gp from its save slot. Valid only when using o32 or
3407 mips_restore_gp (void)
3411 gcc_assert (TARGET_ABICALLS && TARGET_OLDABI);
3413 address = mips_add_offset (pic_offset_table_rtx,
3414 frame_pointer_needed
3415 ? hard_frame_pointer_rtx
3416 : stack_pointer_rtx,
3417 current_function_outgoing_args_size);
3418 slot = gen_rtx_MEM (Pmode, address);
3420 mips_emit_move (pic_offset_table_rtx, slot);
3421 if (!TARGET_EXPLICIT_RELOCS)
3422 emit_insn (gen_blockage ());
3425 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
3428 mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
3430 emit_insn (gen_rtx_SET (VOIDmode, target,
3431 gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
3434 /* Return true if CMP1 is a suitable second operand for relational
3435 operator CODE. See also the *sCC patterns in mips.md. */
3438 mips_relational_operand_ok_p (enum rtx_code code, rtx cmp1)
3444 return reg_or_0_operand (cmp1, VOIDmode);
3448 return !TARGET_MIPS16 && cmp1 == const1_rtx;
3452 return arith_operand (cmp1, VOIDmode);
3455 return sle_operand (cmp1, VOIDmode);
3458 return sleu_operand (cmp1, VOIDmode);
3465 /* Canonicalize LE or LEU comparisons into LT comparisons when
3466 possible to avoid extra instructions or inverting the
3470 mips_canonicalize_comparison (enum rtx_code *code, rtx *cmp1,
3471 enum machine_mode mode)
3473 HOST_WIDE_INT original, plus_one;
3475 if (GET_CODE (*cmp1) != CONST_INT)
3478 original = INTVAL (*cmp1);
3479 plus_one = trunc_int_for_mode ((unsigned HOST_WIDE_INT) original + 1, mode);
3484 if (original < plus_one)
3487 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3496 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3509 /* Compare CMP0 and CMP1 using relational operator CODE and store the
3510 result in TARGET. CMP0 and TARGET are register_operands that have
3511 the same integer mode. If INVERT_PTR is nonnull, it's OK to set
3512 TARGET to the inverse of the result and flip *INVERT_PTR instead. */
3515 mips_emit_int_relational (enum rtx_code code, bool *invert_ptr,
3516 rtx target, rtx cmp0, rtx cmp1)
3518 /* First see if there is a MIPS instruction that can do this operation
3519 with CMP1 in its current form. If not, try to canonicalize the
3520 comparison to LT. If that fails, try doing the same for the
3521 inverse operation. If that also fails, force CMP1 into a register
3523 if (mips_relational_operand_ok_p (code, cmp1))
3524 mips_emit_binary (code, target, cmp0, cmp1);
3525 else if (mips_canonicalize_comparison (&code, &cmp1, GET_MODE (target)))
3526 mips_emit_binary (code, target, cmp0, cmp1);
3529 enum rtx_code inv_code = reverse_condition (code);
3530 if (!mips_relational_operand_ok_p (inv_code, cmp1))
3532 cmp1 = force_reg (GET_MODE (cmp0), cmp1);
3533 mips_emit_int_relational (code, invert_ptr, target, cmp0, cmp1);
3535 else if (invert_ptr == 0)
3537 rtx inv_target = gen_reg_rtx (GET_MODE (target));
3538 mips_emit_binary (inv_code, inv_target, cmp0, cmp1);
3539 mips_emit_binary (XOR, target, inv_target, const1_rtx);
3543 *invert_ptr = !*invert_ptr;
3544 mips_emit_binary (inv_code, target, cmp0, cmp1);
3549 /* Return a register that is zero iff CMP0 and CMP1 are equal.
3550 The register will have the same mode as CMP0. */
3553 mips_zero_if_equal (rtx cmp0, rtx cmp1)
3555 if (cmp1 == const0_rtx)
3558 if (uns_arith_operand (cmp1, VOIDmode))
3559 return expand_binop (GET_MODE (cmp0), xor_optab,
3560 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3562 return expand_binop (GET_MODE (cmp0), sub_optab,
3563 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3566 /* Convert *CODE into a code that can be used in a floating-point
3567 scc instruction (c.<cond>.<fmt>). Return true if the values of
3568 the condition code registers will be inverted, with 0 indicating
3569 that the condition holds. */
3572 mips_reverse_fp_cond_p (enum rtx_code *code)
3579 *code = reverse_condition_maybe_unordered (*code);
3587 /* Convert a comparison into something that can be used in a branch or
3588 conditional move. cmp_operands[0] and cmp_operands[1] are the values
3589 being compared and *CODE is the code used to compare them.
3591 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
3592 If NEED_EQ_NE_P, then only EQ/NE comparisons against zero are possible,
3593 otherwise any standard branch condition can be used. The standard branch
3596 - EQ/NE between two registers.
3597 - any comparison between a register and zero. */
3600 mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
3602 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) == MODE_INT)
3604 if (!need_eq_ne_p && cmp_operands[1] == const0_rtx)
3606 *op0 = cmp_operands[0];
3607 *op1 = cmp_operands[1];
3609 else if (*code == EQ || *code == NE)
3613 *op0 = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3618 *op0 = cmp_operands[0];
3619 *op1 = force_reg (GET_MODE (*op0), cmp_operands[1]);
3624 /* The comparison needs a separate scc instruction. Store the
3625 result of the scc in *OP0 and compare it against zero. */
3626 bool invert = false;
3627 *op0 = gen_reg_rtx (GET_MODE (cmp_operands[0]));
3629 mips_emit_int_relational (*code, &invert, *op0,
3630 cmp_operands[0], cmp_operands[1]);
3631 *code = (invert ? EQ : NE);
3636 enum rtx_code cmp_code;
3638 /* Floating-point tests use a separate c.cond.fmt comparison to
3639 set a condition code register. The branch or conditional move
3640 will then compare that register against zero.
3642 Set CMP_CODE to the code of the comparison instruction and
3643 *CODE to the code that the branch or move should use. */
3645 *code = mips_reverse_fp_cond_p (&cmp_code) ? EQ : NE;
3647 ? gen_reg_rtx (CCmode)
3648 : gen_rtx_REG (CCmode, FPSW_REGNUM));
3650 mips_emit_binary (cmp_code, *op0, cmp_operands[0], cmp_operands[1]);
3654 /* Try comparing cmp_operands[0] and cmp_operands[1] using rtl code CODE.
3655 Store the result in TARGET and return true if successful.
3657 On 64-bit targets, TARGET may be wider than cmp_operands[0]. */
3660 mips_emit_scc (enum rtx_code code, rtx target)
3662 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) != MODE_INT)
3665 target = gen_lowpart (GET_MODE (cmp_operands[0]), target);
3666 if (code == EQ || code == NE)
3668 rtx zie = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3669 mips_emit_binary (code, target, zie, const0_rtx);
3672 mips_emit_int_relational (code, 0, target,
3673 cmp_operands[0], cmp_operands[1]);
3677 /* Emit the common code for doing conditional branches.
3678 operand[0] is the label to jump to.
3679 The comparison operands are saved away by cmp{si,di,sf,df}. */
3682 gen_conditional_branch (rtx *operands, enum rtx_code code)
3684 rtx op0, op1, condition;
3686 mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
3687 condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
3688 emit_jump_insn (gen_condjump (condition, operands[0]));
3693 (set temp (COND:CCV2 CMP_OP0 CMP_OP1))
3694 (set DEST (unspec [TRUE_SRC FALSE_SRC temp] UNSPEC_MOVE_TF_PS)) */
3697 mips_expand_vcondv2sf (rtx dest, rtx true_src, rtx false_src,
3698 enum rtx_code cond, rtx cmp_op0, rtx cmp_op1)
3703 reversed_p = mips_reverse_fp_cond_p (&cond);
3704 cmp_result = gen_reg_rtx (CCV2mode);
3705 emit_insn (gen_scc_ps (cmp_result,
3706 gen_rtx_fmt_ee (cond, VOIDmode, cmp_op0, cmp_op1)));
3708 emit_insn (gen_mips_cond_move_tf_ps (dest, false_src, true_src,
3711 emit_insn (gen_mips_cond_move_tf_ps (dest, true_src, false_src,
3715 /* Emit the common code for conditional moves. OPERANDS is the array
3716 of operands passed to the conditional move define_expand. */
3719 gen_conditional_move (rtx *operands)
3724 code = GET_CODE (operands[1]);
3725 mips_emit_compare (&code, &op0, &op1, true);
3726 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
3727 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3728 gen_rtx_fmt_ee (code,
3731 operands[2], operands[3])));
3734 /* Emit a conditional trap. OPERANDS is the array of operands passed to
3735 the conditional_trap expander. */
3738 mips_gen_conditional_trap (rtx *operands)
3741 enum rtx_code cmp_code = GET_CODE (operands[0]);
3742 enum machine_mode mode = GET_MODE (cmp_operands[0]);
3744 /* MIPS conditional trap machine instructions don't have GT or LE
3745 flavors, so we must invert the comparison and convert to LT and
3746 GE, respectively. */
3749 case GT: cmp_code = LT; break;
3750 case LE: cmp_code = GE; break;
3751 case GTU: cmp_code = LTU; break;
3752 case LEU: cmp_code = GEU; break;
3755 if (cmp_code == GET_CODE (operands[0]))
3757 op0 = cmp_operands[0];
3758 op1 = cmp_operands[1];
3762 op0 = cmp_operands[1];
3763 op1 = cmp_operands[0];
3765 op0 = force_reg (mode, op0);
3766 if (!arith_operand (op1, mode))
3767 op1 = force_reg (mode, op1);
3769 emit_insn (gen_rtx_TRAP_IF (VOIDmode,
3770 gen_rtx_fmt_ee (cmp_code, mode, op0, op1),
3774 /* Return true if calls to X can use R_MIPS_CALL* relocations. */
3777 mips_ok_for_lazy_binding_p (rtx x)
3779 return (TARGET_USE_GOT
3780 && GET_CODE (x) == SYMBOL_REF
3781 && !mips_symbol_binds_local_p (x));
3784 /* Load function address ADDR into register DEST. SIBCALL_P is true
3785 if the address is needed for a sibling call. */
3788 mips_load_call_address (rtx dest, rtx addr, int sibcall_p)
3790 /* If we're generating PIC, and this call is to a global function,
3791 try to allow its address to be resolved lazily. This isn't
3792 possible if TARGET_CALL_SAVED_GP since the value of $gp on entry
3793 to the stub would be our caller's gp, not ours. */
3794 if (TARGET_EXPLICIT_RELOCS
3795 && !(sibcall_p && TARGET_CALL_SAVED_GP)
3796 && mips_ok_for_lazy_binding_p (addr))
3798 rtx high, lo_sum_symbol;
3800 high = mips_unspec_offset_high (dest, pic_offset_table_rtx,
3801 addr, SYMBOL_GOTOFF_CALL);
3802 lo_sum_symbol = mips_unspec_address (addr, SYMBOL_GOTOFF_CALL);
3803 if (Pmode == SImode)
3804 emit_insn (gen_load_callsi (dest, high, lo_sum_symbol));
3806 emit_insn (gen_load_calldi (dest, high, lo_sum_symbol));
3809 mips_emit_move (dest, addr);
3813 /* Expand a call or call_value instruction. RESULT is where the
3814 result will go (null for calls), ADDR is the address of the
3815 function, ARGS_SIZE is the size of the arguments and AUX is
3816 the value passed to us by mips_function_arg. SIBCALL_P is true
3817 if we are expanding a sibling call, false if we're expanding
3821 mips_expand_call (rtx result, rtx addr, rtx args_size, rtx aux, int sibcall_p)
3823 rtx orig_addr, pattern, insn;
3826 if (!call_insn_operand (addr, VOIDmode))
3828 addr = gen_reg_rtx (Pmode);
3829 mips_load_call_address (addr, orig_addr, sibcall_p);
3833 && TARGET_HARD_FLOAT_ABI
3834 && build_mips16_call_stub (result, addr, args_size,
3835 aux == 0 ? 0 : (int) GET_MODE (aux)))
3839 pattern = (sibcall_p
3840 ? gen_sibcall_internal (addr, args_size)
3841 : gen_call_internal (addr, args_size));
3842 else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
3846 reg1 = XEXP (XVECEXP (result, 0, 0), 0);
3847 reg2 = XEXP (XVECEXP (result, 0, 1), 0);
3850 ? gen_sibcall_value_multiple_internal (reg1, addr, args_size, reg2)
3851 : gen_call_value_multiple_internal (reg1, addr, args_size, reg2));
3854 pattern = (sibcall_p
3855 ? gen_sibcall_value_internal (result, addr, args_size)
3856 : gen_call_value_internal (result, addr, args_size));
3858 insn = emit_call_insn (pattern);
3860 /* Lazy-binding stubs require $gp to be valid on entry. */
3861 if (mips_ok_for_lazy_binding_p (orig_addr))
3862 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
3866 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */
3869 mips_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
3871 if (!TARGET_SIBCALLS)
3874 /* We can't do a sibcall if the called function is a MIPS16 function
3875 because there is no direct "jx" instruction equivalent to "jalx" to
3876 switch the ISA mode. */
3877 if (decl && SYMBOL_REF_MIPS16_FUNC_P (XEXP (DECL_RTL (decl), 0)))
3884 /* Emit code to move general operand SRC into condition-code
3885 register DEST. SCRATCH is a scratch TFmode float register.
3892 where FP1 and FP2 are single-precision float registers
3893 taken from SCRATCH. */
3896 mips_emit_fcc_reload (rtx dest, rtx src, rtx scratch)
3900 /* Change the source to SFmode. */
3902 src = adjust_address (src, SFmode, 0);
3903 else if (REG_P (src) || GET_CODE (src) == SUBREG)
3904 src = gen_rtx_REG (SFmode, true_regnum (src));
3906 fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
3907 fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + MAX_FPRS_PER_FMT);
3909 mips_emit_move (copy_rtx (fp1), src);
3910 mips_emit_move (copy_rtx (fp2), CONST0_RTX (SFmode));
3911 emit_insn (gen_slt_sf (dest, fp2, fp1));
3914 /* Emit code to change the current function's return address to
3915 ADDRESS. SCRATCH is available as a scratch register, if needed.
3916 ADDRESS and SCRATCH are both word-mode GPRs. */
3919 mips_set_return_address (rtx address, rtx scratch)
3923 compute_frame_size (get_frame_size ());
3924 gcc_assert ((cfun->machine->frame.mask >> 31) & 1);
3925 slot_address = mips_add_offset (scratch, stack_pointer_rtx,
3926 cfun->machine->frame.gp_sp_offset);
3928 mips_emit_move (gen_rtx_MEM (GET_MODE (address), slot_address), address);
3931 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
3932 Assume that the areas do not overlap. */
3935 mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
3937 HOST_WIDE_INT offset, delta;
3938 unsigned HOST_WIDE_INT bits;
3940 enum machine_mode mode;
3943 /* Work out how many bits to move at a time. If both operands have
3944 half-word alignment, it is usually better to move in half words.
3945 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
3946 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
3947 Otherwise move word-sized chunks. */
3948 if (MEM_ALIGN (src) == BITS_PER_WORD / 2
3949 && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
3950 bits = BITS_PER_WORD / 2;
3952 bits = BITS_PER_WORD;
3954 mode = mode_for_size (bits, MODE_INT, 0);
3955 delta = bits / BITS_PER_UNIT;
3957 /* Allocate a buffer for the temporary registers. */
3958 regs = alloca (sizeof (rtx) * length / delta);
3960 /* Load as many BITS-sized chunks as possible. Use a normal load if
3961 the source has enough alignment, otherwise use left/right pairs. */
3962 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
3964 regs[i] = gen_reg_rtx (mode);
3965 if (MEM_ALIGN (src) >= bits)
3966 mips_emit_move (regs[i], adjust_address (src, mode, offset));
3969 rtx part = adjust_address (src, BLKmode, offset);
3970 if (!mips_expand_unaligned_load (regs[i], part, bits, 0))
3975 /* Copy the chunks to the destination. */
3976 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
3977 if (MEM_ALIGN (dest) >= bits)
3978 mips_emit_move (adjust_address (dest, mode, offset), regs[i]);
3981 rtx part = adjust_address (dest, BLKmode, offset);
3982 if (!mips_expand_unaligned_store (part, regs[i], bits, 0))
3986 /* Mop up any left-over bytes. */
3987 if (offset < length)
3989 src = adjust_address (src, BLKmode, offset);
3990 dest = adjust_address (dest, BLKmode, offset);
3991 move_by_pieces (dest, src, length - offset,
3992 MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
3996 #define MAX_MOVE_REGS 4
3997 #define MAX_MOVE_BYTES (MAX_MOVE_REGS * UNITS_PER_WORD)
4000 /* Helper function for doing a loop-based block operation on memory
4001 reference MEM. Each iteration of the loop will operate on LENGTH
4004 Create a new base register for use within the loop and point it to
4005 the start of MEM. Create a new memory reference that uses this
4006 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
4009 mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
4010 rtx *loop_reg, rtx *loop_mem)
4012 *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
4014 /* Although the new mem does not refer to a known location,
4015 it does keep up to LENGTH bytes of alignment. */
4016 *loop_mem = change_address (mem, BLKmode, *loop_reg);
4017 set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
4021 /* Move LENGTH bytes from SRC to DEST using a loop that moves MAX_MOVE_BYTES
4022 per iteration. LENGTH must be at least MAX_MOVE_BYTES. Assume that the
4023 memory regions do not overlap. */
4026 mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length)
4028 rtx label, src_reg, dest_reg, final_src;
4029 HOST_WIDE_INT leftover;
4031 leftover = length % MAX_MOVE_BYTES;
4034 /* Create registers and memory references for use within the loop. */
4035 mips_adjust_block_mem (src, MAX_MOVE_BYTES, &src_reg, &src);
4036 mips_adjust_block_mem (dest, MAX_MOVE_BYTES, &dest_reg, &dest);
4038 /* Calculate the value that SRC_REG should have after the last iteration
4040 final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
4043 /* Emit the start of the loop. */
4044 label = gen_label_rtx ();
4047 /* Emit the loop body. */
4048 mips_block_move_straight (dest, src, MAX_MOVE_BYTES);
4050 /* Move on to the next block. */
4051 mips_emit_move (src_reg, plus_constant (src_reg, MAX_MOVE_BYTES));
4052 mips_emit_move (dest_reg, plus_constant (dest_reg, MAX_MOVE_BYTES));
4054 /* Emit the loop condition. */
4055 if (Pmode == DImode)
4056 emit_insn (gen_cmpdi (src_reg, final_src));
4058 emit_insn (gen_cmpsi (src_reg, final_src));
4059 emit_jump_insn (gen_bne (label));
4061 /* Mop up any left-over bytes. */
4063 mips_block_move_straight (dest, src, leftover);
4067 /* Expand a loop of synci insns for the address range [BEGIN, END). */
4070 mips_expand_synci_loop (rtx begin, rtx end)
4072 rtx inc, label, cmp, cmp_result;
4074 /* Load INC with the cache line size (rdhwr INC,$1). */
4075 inc = gen_reg_rtx (SImode);
4076 emit_insn (gen_rdhwr (inc, const1_rtx));
4078 /* Loop back to here. */
4079 label = gen_label_rtx ();
4082 emit_insn (gen_synci (begin));
4084 cmp = gen_reg_rtx (Pmode);
4085 mips_emit_binary (GTU, cmp, begin, end);
4087 mips_emit_binary (PLUS, begin, begin, inc);
4089 cmp_result = gen_rtx_EQ (VOIDmode, cmp, const0_rtx);
4090 emit_jump_insn (gen_condjump (cmp_result, label));
4093 /* Expand a movmemsi instruction. */
4096 mips_expand_block_move (rtx dest, rtx src, rtx length)
4098 if (GET_CODE (length) == CONST_INT)
4100 if (INTVAL (length) <= 2 * MAX_MOVE_BYTES)
4102 mips_block_move_straight (dest, src, INTVAL (length));
4107 mips_block_move_loop (dest, src, INTVAL (length));
4114 /* Argument support functions. */
4116 /* Initialize CUMULATIVE_ARGS for a function. */
4119 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
4120 rtx libname ATTRIBUTE_UNUSED)
4122 static CUMULATIVE_ARGS zero_cum;
4123 tree param, next_param;
4126 cum->prototype = (fntype && TYPE_ARG_TYPES (fntype));
4128 /* Determine if this function has variable arguments. This is
4129 indicated by the last argument being 'void_type_mode' if there
4130 are no variable arguments. The standard MIPS calling sequence
4131 passes all arguments in the general purpose registers in this case. */
4133 for (param = fntype ? TYPE_ARG_TYPES (fntype) : 0;
4134 param != 0; param = next_param)
4136 next_param = TREE_CHAIN (param);
4137 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
4138 cum->gp_reg_found = 1;
4143 /* Fill INFO with information about a single argument. CUM is the
4144 cumulative state for earlier arguments. MODE is the mode of this
4145 argument and TYPE is its type (if known). NAMED is true if this
4146 is a named (fixed) argument rather than a variable one. */
4149 mips_arg_info (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
4150 tree type, int named, struct mips_arg_info *info)
4152 bool doubleword_aligned_p;
4153 unsigned int num_bytes, num_words, max_regs;
4155 /* Work out the size of the argument. */
4156 num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
4157 num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4159 /* Decide whether it should go in a floating-point register, assuming
4160 one is free. Later code checks for availability.
4162 The checks against UNITS_PER_FPVALUE handle the soft-float and
4163 single-float cases. */
4167 /* The EABI conventions have traditionally been defined in terms
4168 of TYPE_MODE, regardless of the actual type. */
4169 info->fpr_p = ((GET_MODE_CLASS (mode) == MODE_FLOAT
4170 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4171 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4176 /* Only leading floating-point scalars are passed in
4177 floating-point registers. We also handle vector floats the same
4178 say, which is OK because they are not covered by the standard ABI. */
4179 info->fpr_p = (!cum->gp_reg_found
4180 && cum->arg_number < 2
4181 && (type == 0 || SCALAR_FLOAT_TYPE_P (type)
4182 || VECTOR_FLOAT_TYPE_P (type))
4183 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4184 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4185 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4190 /* Scalar and complex floating-point types are passed in
4191 floating-point registers. */
4192 info->fpr_p = (named
4193 && (type == 0 || FLOAT_TYPE_P (type))
4194 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4195 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4196 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4197 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
4199 /* ??? According to the ABI documentation, the real and imaginary
4200 parts of complex floats should be passed in individual registers.
4201 The real and imaginary parts of stack arguments are supposed
4202 to be contiguous and there should be an extra word of padding
4205 This has two problems. First, it makes it impossible to use a
4206 single "void *" va_list type, since register and stack arguments
4207 are passed differently. (At the time of writing, MIPSpro cannot
4208 handle complex float varargs correctly.) Second, it's unclear
4209 what should happen when there is only one register free.
4211 For now, we assume that named complex floats should go into FPRs
4212 if there are two FPRs free, otherwise they should be passed in the
4213 same way as a struct containing two floats. */
4215 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4216 && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
4218 if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
4219 info->fpr_p = false;
4229 /* See whether the argument has doubleword alignment. */
4230 doubleword_aligned_p = FUNCTION_ARG_BOUNDARY (mode, type) > BITS_PER_WORD;
4232 /* Set REG_OFFSET to the register count we're interested in.
4233 The EABI allocates the floating-point registers separately,
4234 but the other ABIs allocate them like integer registers. */
4235 info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
4239 /* Advance to an even register if the argument is doubleword-aligned. */
4240 if (doubleword_aligned_p)
4241 info->reg_offset += info->reg_offset & 1;
4243 /* Work out the offset of a stack argument. */
4244 info->stack_offset = cum->stack_words;
4245 if (doubleword_aligned_p)
4246 info->stack_offset += info->stack_offset & 1;
4248 max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
4250 /* Partition the argument between registers and stack. */
4251 info->reg_words = MIN (num_words, max_regs);
4252 info->stack_words = num_words - info->reg_words;
4256 /* INFO describes an argument that is passed in a single-register value.
4257 Return the register it uses, assuming that FPRs are available if
4261 mips_arg_regno (const struct mips_arg_info *info, bool hard_float_p)
4263 if (!info->fpr_p || !hard_float_p)
4264 return GP_ARG_FIRST + info->reg_offset;
4265 else if (mips_abi == ABI_32 && TARGET_DOUBLE_FLOAT && info->reg_offset > 0)
4266 /* In o32, the second argument is always passed in $f14
4267 for TARGET_DOUBLE_FLOAT, regardless of whether the
4268 first argument was a word or doubleword. */
4269 return FP_ARG_FIRST + 2;
4271 return FP_ARG_FIRST + info->reg_offset;
4274 /* Implement FUNCTION_ARG_ADVANCE. */
4277 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4278 tree type, int named)
4280 struct mips_arg_info info;
4282 mips_arg_info (cum, mode, type, named, &info);
4285 cum->gp_reg_found = true;
4287 /* See the comment above the cumulative args structure in mips.h
4288 for an explanation of what this code does. It assumes the O32
4289 ABI, which passes at most 2 arguments in float registers. */
4290 if (cum->arg_number < 2 && info.fpr_p)
4291 cum->fp_code += (mode == SFmode ? 1 : 2) << (cum->arg_number * 2);
4293 if (mips_abi != ABI_EABI || !info.fpr_p)
4294 cum->num_gprs = info.reg_offset + info.reg_words;
4295 else if (info.reg_words > 0)
4296 cum->num_fprs += MAX_FPRS_PER_FMT;
4298 if (info.stack_words > 0)
4299 cum->stack_words = info.stack_offset + info.stack_words;
4304 /* Implement FUNCTION_ARG. */
4307 function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
4308 tree type, int named)
4310 struct mips_arg_info info;
4312 /* We will be called with a mode of VOIDmode after the last argument
4313 has been seen. Whatever we return will be passed to the call
4314 insn. If we need a mips16 fp_code, return a REG with the code
4315 stored as the mode. */
4316 if (mode == VOIDmode)
4318 if (TARGET_MIPS16 && cum->fp_code != 0)
4319 return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
4325 mips_arg_info (cum, mode, type, named, &info);
4327 /* Return straight away if the whole argument is passed on the stack. */
4328 if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
4332 && TREE_CODE (type) == RECORD_TYPE
4334 && TYPE_SIZE_UNIT (type)
4335 && host_integerp (TYPE_SIZE_UNIT (type), 1)
4338 /* The Irix 6 n32/n64 ABIs say that if any 64-bit chunk of the
4339 structure contains a double in its entirety, then that 64-bit
4340 chunk is passed in a floating point register. */
4343 /* First check to see if there is any such field. */
4344 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4345 if (TREE_CODE (field) == FIELD_DECL
4346 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
4347 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
4348 && host_integerp (bit_position (field), 0)
4349 && int_bit_position (field) % BITS_PER_WORD == 0)
4354 /* Now handle the special case by returning a PARALLEL
4355 indicating where each 64-bit chunk goes. INFO.REG_WORDS
4356 chunks are passed in registers. */
4358 HOST_WIDE_INT bitpos;
4361 /* assign_parms checks the mode of ENTRY_PARM, so we must
4362 use the actual mode here. */
4363 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
4366 field = TYPE_FIELDS (type);
4367 for (i = 0; i < info.reg_words; i++)
4371 for (; field; field = TREE_CHAIN (field))
4372 if (TREE_CODE (field) == FIELD_DECL
4373 && int_bit_position (field) >= bitpos)
4377 && int_bit_position (field) == bitpos
4378 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
4379 && !TARGET_SOFT_FLOAT
4380 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
4381 reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
4383 reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
4386 = gen_rtx_EXPR_LIST (VOIDmode, reg,
4387 GEN_INT (bitpos / BITS_PER_UNIT));
4389 bitpos += BITS_PER_WORD;
4395 /* Handle the n32/n64 conventions for passing complex floating-point
4396 arguments in FPR pairs. The real part goes in the lower register
4397 and the imaginary part goes in the upper register. */
4400 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4403 enum machine_mode inner;
4406 inner = GET_MODE_INNER (mode);
4407 reg = FP_ARG_FIRST + info.reg_offset;
4408 if (info.reg_words * UNITS_PER_WORD == GET_MODE_SIZE (inner))
4410 /* Real part in registers, imaginary part on stack. */
4411 gcc_assert (info.stack_words == info.reg_words);
4412 return gen_rtx_REG (inner, reg);
4416 gcc_assert (info.stack_words == 0);
4417 real = gen_rtx_EXPR_LIST (VOIDmode,
4418 gen_rtx_REG (inner, reg),
4420 imag = gen_rtx_EXPR_LIST (VOIDmode,
4422 reg + info.reg_words / 2),
4423 GEN_INT (GET_MODE_SIZE (inner)));
4424 return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
4428 return gen_rtx_REG (mode, mips_arg_regno (&info, TARGET_HARD_FLOAT));
4432 /* Implement TARGET_ARG_PARTIAL_BYTES. */
4435 mips_arg_partial_bytes (CUMULATIVE_ARGS *cum,
4436 enum machine_mode mode, tree type, bool named)
4438 struct mips_arg_info info;
4440 mips_arg_info (cum, mode, type, named, &info);
4441 return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
4445 /* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
4446 PARM_BOUNDARY bits of alignment, but will be given anything up
4447 to STACK_BOUNDARY bits if the type requires it. */
4450 function_arg_boundary (enum machine_mode mode, tree type)
4452 unsigned int alignment;
4454 alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
4455 if (alignment < PARM_BOUNDARY)
4456 alignment = PARM_BOUNDARY;
4457 if (alignment > STACK_BOUNDARY)
4458 alignment = STACK_BOUNDARY;
4462 /* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
4463 upward rather than downward. In other words, return true if the
4464 first byte of the stack slot has useful data, false if the last
4468 mips_pad_arg_upward (enum machine_mode mode, const_tree type)
4470 /* On little-endian targets, the first byte of every stack argument
4471 is passed in the first byte of the stack slot. */
4472 if (!BYTES_BIG_ENDIAN)
4475 /* Otherwise, integral types are padded downward: the last byte of a
4476 stack argument is passed in the last byte of the stack slot. */
4478 ? INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)
4479 : GET_MODE_CLASS (mode) == MODE_INT)
4482 /* Big-endian o64 pads floating-point arguments downward. */
4483 if (mips_abi == ABI_O64)
4484 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4487 /* Other types are padded upward for o32, o64, n32 and n64. */
4488 if (mips_abi != ABI_EABI)
4491 /* Arguments smaller than a stack slot are padded downward. */
4492 if (mode != BLKmode)
4493 return (GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY);
4495 return (int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT));
4499 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
4500 if the least significant byte of the register has useful data. Return
4501 the opposite if the most significant byte does. */
4504 mips_pad_reg_upward (enum machine_mode mode, tree type)
4506 /* No shifting is required for floating-point arguments. */
4507 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4508 return !BYTES_BIG_ENDIAN;
4510 /* Otherwise, apply the same padding to register arguments as we do
4511 to stack arguments. */
4512 return mips_pad_arg_upward (mode, type);
4516 mips_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4517 tree type, int *pretend_size ATTRIBUTE_UNUSED,
4520 CUMULATIVE_ARGS local_cum;
4521 int gp_saved, fp_saved;
4523 /* The caller has advanced CUM up to, but not beyond, the last named
4524 argument. Advance a local copy of CUM past the last "real" named
4525 argument, to find out how many registers are left over. */
4528 FUNCTION_ARG_ADVANCE (local_cum, mode, type, 1);
4530 /* Found out how many registers we need to save. */
4531 gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
4532 fp_saved = (EABI_FLOAT_VARARGS_P
4533 ? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
4542 ptr = plus_constant (virtual_incoming_args_rtx,
4543 REG_PARM_STACK_SPACE (cfun->decl)
4544 - gp_saved * UNITS_PER_WORD);
4545 mem = gen_rtx_MEM (BLKmode, ptr);
4546 set_mem_alias_set (mem, get_varargs_alias_set ());
4548 move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
4553 /* We can't use move_block_from_reg, because it will use
4555 enum machine_mode mode;
4558 /* Set OFF to the offset from virtual_incoming_args_rtx of
4559 the first float register. The FP save area lies below
4560 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
4561 off = -gp_saved * UNITS_PER_WORD;
4562 off &= ~(UNITS_PER_FPVALUE - 1);
4563 off -= fp_saved * UNITS_PER_FPREG;
4565 mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
4567 for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS;
4568 i += MAX_FPRS_PER_FMT)
4572 ptr = plus_constant (virtual_incoming_args_rtx, off);
4573 mem = gen_rtx_MEM (mode, ptr);
4574 set_mem_alias_set (mem, get_varargs_alias_set ());
4575 mips_emit_move (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
4576 off += UNITS_PER_HWFPVALUE;
4580 if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
4581 cfun->machine->varargs_size = (gp_saved * UNITS_PER_WORD
4582 + fp_saved * UNITS_PER_FPREG);
4585 /* Create the va_list data type.
4586 We keep 3 pointers, and two offsets.
4587 Two pointers are to the overflow area, which starts at the CFA.
4588 One of these is constant, for addressing into the GPR save area below it.
4589 The other is advanced up the stack through the overflow region.
4590 The third pointer is to the GPR save area. Since the FPR save area
4591 is just below it, we can address FPR slots off this pointer.
4592 We also keep two one-byte offsets, which are to be subtracted from the
4593 constant pointers to yield addresses in the GPR and FPR save areas.
4594 These are downcounted as float or non-float arguments are used,
4595 and when they get to zero, the argument must be obtained from the
4597 If !EABI_FLOAT_VARARGS_P, then no FPR save area exists, and a single
4598 pointer is enough. It's started at the GPR save area, and is
4600 Note that the GPR save area is not constant size, due to optimization
4601 in the prologue. Hence, we can't use a design with two pointers
4602 and two offsets, although we could have designed this with two pointers
4603 and three offsets. */
4606 mips_build_builtin_va_list (void)
4608 if (EABI_FLOAT_VARARGS_P)
4610 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
4613 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
4615 f_ovfl = build_decl (FIELD_DECL, get_identifier ("__overflow_argptr"),
4617 f_gtop = build_decl (FIELD_DECL, get_identifier ("__gpr_top"),
4619 f_ftop = build_decl (FIELD_DECL, get_identifier ("__fpr_top"),
4621 f_goff = build_decl (FIELD_DECL, get_identifier ("__gpr_offset"),
4622 unsigned_char_type_node);
4623 f_foff = build_decl (FIELD_DECL, get_identifier ("__fpr_offset"),
4624 unsigned_char_type_node);
4625 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
4626 warn on every user file. */
4627 index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1);
4628 array = build_array_type (unsigned_char_type_node,
4629 build_index_type (index));
4630 f_res = build_decl (FIELD_DECL, get_identifier ("__reserved"), array);
4632 DECL_FIELD_CONTEXT (f_ovfl) = record;
4633 DECL_FIELD_CONTEXT (f_gtop) = record;
4634 DECL_FIELD_CONTEXT (f_ftop) = record;
4635 DECL_FIELD_CONTEXT (f_goff) = record;
4636 DECL_FIELD_CONTEXT (f_foff) = record;
4637 DECL_FIELD_CONTEXT (f_res) = record;
4639 TYPE_FIELDS (record) = f_ovfl;
4640 TREE_CHAIN (f_ovfl) = f_gtop;
4641 TREE_CHAIN (f_gtop) = f_ftop;
4642 TREE_CHAIN (f_ftop) = f_goff;
4643 TREE_CHAIN (f_goff) = f_foff;
4644 TREE_CHAIN (f_foff) = f_res;
4646 layout_type (record);
4649 else if (TARGET_IRIX && TARGET_IRIX6)
4650 /* On IRIX 6, this type is 'char *'. */
4651 return build_pointer_type (char_type_node);
4653 /* Otherwise, we use 'void *'. */
4654 return ptr_type_node;
4657 /* Implement va_start. */
4660 mips_va_start (tree valist, rtx nextarg)
4662 if (EABI_FLOAT_VARARGS_P)
4664 const CUMULATIVE_ARGS *cum;
4665 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
4666 tree ovfl, gtop, ftop, goff, foff;
4668 int gpr_save_area_size;
4669 int fpr_save_area_size;
4672 cum = ¤t_function_args_info;
4674 = (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
4676 = (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
4678 f_ovfl = TYPE_FIELDS (va_list_type_node);
4679 f_gtop = TREE_CHAIN (f_ovfl);
4680 f_ftop = TREE_CHAIN (f_gtop);
4681 f_goff = TREE_CHAIN (f_ftop);
4682 f_foff = TREE_CHAIN (f_goff);
4684 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
4686 gtop = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
4688 ftop = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
4690 goff = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
4692 foff = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
4695 /* Emit code to initialize OVFL, which points to the next varargs
4696 stack argument. CUM->STACK_WORDS gives the number of stack
4697 words used by named arguments. */
4698 t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
4699 if (cum->stack_words > 0)
4700 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), t,
4701 size_int (cum->stack_words * UNITS_PER_WORD));
4702 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
4703 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4705 /* Emit code to initialize GTOP, the top of the GPR save area. */
4706 t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
4707 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (gtop), gtop, t);
4708 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4710 /* Emit code to initialize FTOP, the top of the FPR save area.
4711 This address is gpr_save_area_bytes below GTOP, rounded
4712 down to the next fp-aligned boundary. */
4713 t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
4714 fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
4715 fpr_offset &= ~(UNITS_PER_FPVALUE - 1);
4717 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ftop), t,
4718 size_int (-fpr_offset));
4719 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ftop), ftop, t);
4720 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4722 /* Emit code to initialize GOFF, the offset from GTOP of the
4723 next GPR argument. */
4724 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (goff), goff,
4725 build_int_cst (NULL_TREE, gpr_save_area_size));
4726 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4728 /* Likewise emit code to initialize FOFF, the offset from FTOP
4729 of the next FPR argument. */
4730 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (foff), foff,
4731 build_int_cst (NULL_TREE, fpr_save_area_size));
4732 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4736 nextarg = plus_constant (nextarg, -cfun->machine->varargs_size);
4737 std_expand_builtin_va_start (valist, nextarg);
4741 /* Implement va_arg. */
4744 mips_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
4746 HOST_WIDE_INT size, rsize;
4750 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
4753 type = build_pointer_type (type);
4755 size = int_size_in_bytes (type);
4756 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
4758 if (mips_abi != ABI_EABI || !EABI_FLOAT_VARARGS_P)
4759 addr = std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4762 /* Not a simple merged stack. */
4764 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
4765 tree ovfl, top, off, align;
4766 HOST_WIDE_INT osize;
4769 f_ovfl = TYPE_FIELDS (va_list_type_node);
4770 f_gtop = TREE_CHAIN (f_ovfl);
4771 f_ftop = TREE_CHAIN (f_gtop);
4772 f_goff = TREE_CHAIN (f_ftop);
4773 f_foff = TREE_CHAIN (f_goff);
4775 /* We maintain separate pointers and offsets for floating-point
4776 and integer arguments, but we need similar code in both cases.
4779 TOP be the top of the register save area;
4780 OFF be the offset from TOP of the next register;
4781 ADDR_RTX be the address of the argument;
4782 RSIZE be the number of bytes used to store the argument
4783 when it's in the register save area;
4784 OSIZE be the number of bytes used to store it when it's
4785 in the stack overflow area; and
4786 PADDING be (BYTES_BIG_ENDIAN ? OSIZE - RSIZE : 0)
4788 The code we want is:
4790 1: off &= -rsize; // round down
4793 4: addr_rtx = top - off;
4798 9: ovfl += ((intptr_t) ovfl + osize - 1) & -osize;
4799 10: addr_rtx = ovfl + PADDING;
4803 [1] and [9] can sometimes be optimized away. */
4805 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
4808 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
4809 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
4811 top = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
4813 off = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
4816 /* When floating-point registers are saved to the stack,
4817 each one will take up UNITS_PER_HWFPVALUE bytes, regardless
4818 of the float's precision. */
4819 rsize = UNITS_PER_HWFPVALUE;
4821 /* Overflow arguments are padded to UNITS_PER_WORD bytes
4822 (= PARM_BOUNDARY bits). This can be different from RSIZE
4825 (1) On 32-bit targets when TYPE is a structure such as:
4827 struct s { float f; };
4829 Such structures are passed in paired FPRs, so RSIZE
4830 will be 8 bytes. However, the structure only takes
4831 up 4 bytes of memory, so OSIZE will only be 4.
4833 (2) In combinations such as -mgp64 -msingle-float
4834 -fshort-double. Doubles passed in registers
4835 will then take up 4 (UNITS_PER_HWFPVALUE) bytes,
4836 but those passed on the stack take up
4837 UNITS_PER_WORD bytes. */
4838 osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
4842 top = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
4844 off = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
4846 if (rsize > UNITS_PER_WORD)
4848 /* [1] Emit code for: off &= -rsize. */
4849 t = build2 (BIT_AND_EXPR, TREE_TYPE (off), off,
4850 build_int_cst (NULL_TREE, -rsize));
4851 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (off), off, t);
4852 gimplify_and_add (t, pre_p);
4857 /* [2] Emit code to branch if off == 0. */
4858 t = build2 (NE_EXPR, boolean_type_node, off,
4859 build_int_cst (TREE_TYPE (off), 0));
4860 addr = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
4862 /* [5] Emit code for: off -= rsize. We do this as a form of
4863 post-increment not available to C. Also widen for the
4864 coming pointer arithmetic. */
4865 t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize));
4866 t = build2 (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
4867 t = fold_convert (sizetype, t);
4868 t = fold_build1 (NEGATE_EXPR, sizetype, t);
4870 /* [4] Emit code for: addr_rtx = top - off. On big endian machines,
4871 the argument has RSIZE - SIZE bytes of leading padding. */
4872 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (top), top, t);
4873 if (BYTES_BIG_ENDIAN && rsize > size)
4875 u = size_int (rsize - size);
4876 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
4878 COND_EXPR_THEN (addr) = t;
4880 if (osize > UNITS_PER_WORD)
4882 /* [9] Emit: ovfl += ((intptr_t) ovfl + osize - 1) & -osize. */
4883 u = size_int (osize - 1);
4884 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), ovfl, u);
4885 t = fold_convert (sizetype, t);
4886 u = size_int (-osize);
4887 t = build2 (BIT_AND_EXPR, sizetype, t, u);
4888 t = fold_convert (TREE_TYPE (ovfl), t);
4889 align = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
4894 /* [10, 11]. Emit code to store ovfl in addr_rtx, then
4895 post-increment ovfl by osize. On big-endian machines,
4896 the argument has OSIZE - SIZE bytes of leading padding. */
4897 u = fold_convert (TREE_TYPE (ovfl),
4898 build_int_cst (NULL_TREE, osize));
4899 t = build2 (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
4900 if (BYTES_BIG_ENDIAN && osize > size)
4902 u = size_int (osize - size);
4903 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
4906 /* String [9] and [10,11] together. */
4908 t = build2 (COMPOUND_EXPR, TREE_TYPE (t), align, t);
4909 COND_EXPR_ELSE (addr) = t;
4911 addr = fold_convert (build_pointer_type (type), addr);
4912 addr = build_va_arg_indirect_ref (addr);
4916 addr = build_va_arg_indirect_ref (addr);
4921 /* Return true if it is possible to use left/right accesses for a
4922 bitfield of WIDTH bits starting BITPOS bits into *OP. When
4923 returning true, update *OP, *LEFT and *RIGHT as follows:
4925 *OP is a BLKmode reference to the whole field.
4927 *LEFT is a QImode reference to the first byte if big endian or
4928 the last byte if little endian. This address can be used in the
4929 left-side instructions (lwl, swl, ldl, sdl).
4931 *RIGHT is a QImode reference to the opposite end of the field and
4932 can be used in the patterning right-side instruction. */
4935 mips_get_unaligned_mem (rtx *op, unsigned int width, int bitpos,
4936 rtx *left, rtx *right)
4940 /* Check that the operand really is a MEM. Not all the extv and
4941 extzv predicates are checked. */
4945 /* Check that the size is valid. */
4946 if (width != 32 && (!TARGET_64BIT || width != 64))
4949 /* We can only access byte-aligned values. Since we are always passed
4950 a reference to the first byte of the field, it is not necessary to
4951 do anything with BITPOS after this check. */
4952 if (bitpos % BITS_PER_UNIT != 0)
4955 /* Reject aligned bitfields: we want to use a normal load or store
4956 instead of a left/right pair. */
4957 if (MEM_ALIGN (*op) >= width)
4960 /* Adjust *OP to refer to the whole field. This also has the effect
4961 of legitimizing *OP's address for BLKmode, possibly simplifying it. */
4962 *op = adjust_address (*op, BLKmode, 0);
4963 set_mem_size (*op, GEN_INT (width / BITS_PER_UNIT));
4965 /* Get references to both ends of the field. We deliberately don't
4966 use the original QImode *OP for FIRST since the new BLKmode one
4967 might have a simpler address. */
4968 first = adjust_address (*op, QImode, 0);
4969 last = adjust_address (*op, QImode, width / BITS_PER_UNIT - 1);
4971 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
4972 be the upper word and RIGHT the lower word. */
4973 if (TARGET_BIG_ENDIAN)
4974 *left = first, *right = last;
4976 *left = last, *right = first;
4982 /* Try to emit the equivalent of (set DEST (zero_extract SRC WIDTH BITPOS)).
4983 Return true on success. We only handle cases where zero_extract is
4984 equivalent to sign_extract. */
4987 mips_expand_unaligned_load (rtx dest, rtx src, unsigned int width, int bitpos)
4989 rtx left, right, temp;
4991 /* If TARGET_64BIT, the destination of a 32-bit load will be a
4992 paradoxical word_mode subreg. This is the only case in which
4993 we allow the destination to be larger than the source. */
4994 if (GET_CODE (dest) == SUBREG
4995 && GET_MODE (dest) == DImode
4996 && SUBREG_BYTE (dest) == 0
4997 && GET_MODE (SUBREG_REG (dest)) == SImode)
4998 dest = SUBREG_REG (dest);
5000 /* After the above adjustment, the destination must be the same
5001 width as the source. */
5002 if (GET_MODE_BITSIZE (GET_MODE (dest)) != width)
5005 if (!mips_get_unaligned_mem (&src, width, bitpos, &left, &right))
5008 temp = gen_reg_rtx (GET_MODE (dest));
5009 if (GET_MODE (dest) == DImode)
5011 emit_insn (gen_mov_ldl (temp, src, left));
5012 emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
5016 emit_insn (gen_mov_lwl (temp, src, left));
5017 emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
5023 /* Try to expand (set (zero_extract DEST WIDTH BITPOS) SRC). Return
5027 mips_expand_unaligned_store (rtx dest, rtx src, unsigned int width, int bitpos)
5030 enum machine_mode mode;
5032 if (!mips_get_unaligned_mem (&dest, width, bitpos, &left, &right))
5035 mode = mode_for_size (width, MODE_INT, 0);
5036 src = gen_lowpart (mode, src);
5040 emit_insn (gen_mov_sdl (dest, src, left));
5041 emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
5045 emit_insn (gen_mov_swl (dest, src, left));
5046 emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
5051 /* Return true if X is a MEM with the same size as MODE. */
5054 mips_mem_fits_mode_p (enum machine_mode mode, rtx x)
5061 size = MEM_SIZE (x);
5062 return size && INTVAL (size) == GET_MODE_SIZE (mode);
5065 /* Return true if (zero_extract OP SIZE POSITION) can be used as the
5066 source of an "ext" instruction or the destination of an "ins"
5067 instruction. OP must be a register operand and the following
5068 conditions must hold:
5070 0 <= POSITION < GET_MODE_BITSIZE (GET_MODE (op))
5071 0 < SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
5072 0 < POSITION + SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
5074 Also reject lengths equal to a word as they are better handled
5075 by the move patterns. */
5078 mips_use_ins_ext_p (rtx op, rtx size, rtx position)
5080 HOST_WIDE_INT len, pos;
5082 if (!ISA_HAS_EXT_INS
5083 || !register_operand (op, VOIDmode)
5084 || GET_MODE_BITSIZE (GET_MODE (op)) > BITS_PER_WORD)
5087 len = INTVAL (size);
5088 pos = INTVAL (position);
5090 if (len <= 0 || len >= GET_MODE_BITSIZE (GET_MODE (op))
5091 || pos < 0 || pos + len > GET_MODE_BITSIZE (GET_MODE (op)))
5097 /* Set up globals to generate code for the ISA or processor
5098 described by INFO. */
5101 mips_set_architecture (const struct mips_cpu_info *info)
5105 mips_arch_info = info;
5106 mips_arch = info->cpu;
5107 mips_isa = info->isa;
5112 /* Likewise for tuning. */
5115 mips_set_tune (const struct mips_cpu_info *info)
5119 mips_tune_info = info;
5120 mips_tune = info->cpu;
5124 /* Initialize mips_split_addresses from the associated command-line
5127 mips_split_addresses is a half-way house between explicit
5128 relocations and the traditional assembler macros. It can
5129 split absolute 32-bit symbolic constants into a high/lo_sum
5130 pair but uses macros for other sorts of access.
5132 Like explicit relocation support for REL targets, it relies
5133 on GNU extensions in the assembler and the linker.
5135 Although this code should work for -O0, it has traditionally
5136 been treated as an optimization. */
5139 mips_init_split_addresses (void)
5141 if (!TARGET_MIPS16 && TARGET_SPLIT_ADDRESSES
5142 && optimize && !flag_pic
5143 && !ABI_HAS_64BIT_SYMBOLS)
5144 mips_split_addresses = 1;
5146 mips_split_addresses = 0;
5149 /* (Re-)Initialize information about relocs. */
5152 mips_init_relocs (void)
5154 memset (mips_split_p, '\0', sizeof (mips_split_p));
5155 memset (mips_hi_relocs, '\0', sizeof (mips_hi_relocs));
5156 memset (mips_lo_relocs, '\0', sizeof (mips_lo_relocs));
5158 if (ABI_HAS_64BIT_SYMBOLS)
5160 if (TARGET_EXPLICIT_RELOCS)
5162 mips_split_p[SYMBOL_64_HIGH] = true;
5163 mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
5164 mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
5166 mips_split_p[SYMBOL_64_MID] = true;
5167 mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
5168 mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
5170 mips_split_p[SYMBOL_64_LOW] = true;
5171 mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
5172 mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
5174 mips_split_p[SYMBOL_ABSOLUTE] = true;
5175 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
5180 if (TARGET_EXPLICIT_RELOCS || mips_split_addresses || TARGET_MIPS16)
5182 mips_split_p[SYMBOL_ABSOLUTE] = true;
5183 mips_hi_relocs[SYMBOL_ABSOLUTE] = "%hi(";
5184 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
5186 mips_lo_relocs[SYMBOL_32_HIGH] = "%hi(";
5192 /* The high part is provided by a pseudo copy of $gp. */
5193 mips_split_p[SYMBOL_GP_RELATIVE] = true;
5194 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gprel(";
5197 if (TARGET_EXPLICIT_RELOCS)
5199 /* Small data constants are kept whole until after reload,
5200 then lowered by mips_rewrite_small_data. */
5201 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gp_rel(";
5203 mips_split_p[SYMBOL_GOT_PAGE_OFST] = true;
5206 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
5207 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%got_ofst(";
5211 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
5212 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%lo(";
5217 /* The HIGH and LO_SUM are matched by special .md patterns. */
5218 mips_split_p[SYMBOL_GOT_DISP] = true;
5220 mips_split_p[SYMBOL_GOTOFF_DISP] = true;
5221 mips_hi_relocs[SYMBOL_GOTOFF_DISP] = "%got_hi(";
5222 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_lo(";
5224 mips_split_p[SYMBOL_GOTOFF_CALL] = true;
5225 mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
5226 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
5231 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_disp(";
5233 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got(";
5234 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
5240 mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
5241 mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
5242 mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
5245 /* Thread-local relocation operators. */
5246 mips_lo_relocs[SYMBOL_TLSGD] = "%tlsgd(";
5247 mips_lo_relocs[SYMBOL_TLSLDM] = "%tlsldm(";
5248 mips_split_p[SYMBOL_DTPREL] = 1;
5249 mips_hi_relocs[SYMBOL_DTPREL] = "%dtprel_hi(";
5250 mips_lo_relocs[SYMBOL_DTPREL] = "%dtprel_lo(";
5251 mips_lo_relocs[SYMBOL_GOTTPREL] = "%gottprel(";
5252 mips_split_p[SYMBOL_TPREL] = 1;
5253 mips_hi_relocs[SYMBOL_TPREL] = "%tprel_hi(";
5254 mips_lo_relocs[SYMBOL_TPREL] = "%tprel_lo(";
5256 mips_lo_relocs[SYMBOL_HALF] = "%half(";
5259 static GTY(()) int was_mips16_p = -1;
5261 /* Set up the target-dependent global state so that it matches the
5262 current function's ISA mode. */
5265 mips_set_mips16_mode (int mips16_p)
5267 if (mips16_p == was_mips16_p)
5270 /* Restore base settings of various flags. */
5271 target_flags = mips_base_target_flags;
5272 align_loops = mips_base_align_loops;
5273 align_jumps = mips_base_align_jumps;
5274 align_functions = mips_base_align_functions;
5275 flag_schedule_insns = mips_base_schedule_insns;
5276 flag_reorder_blocks_and_partition = mips_base_reorder_blocks_and_partition;
5277 flag_move_loop_invariants = mips_base_move_loop_invariants;
5278 flag_delayed_branch = mips_flag_delayed_branch;
5282 /* Select mips16 instruction set. */
5283 target_flags |= MASK_MIPS16;
5285 /* Don't run the scheduler before reload, since it tends to
5286 increase register pressure. */
5287 flag_schedule_insns = 0;
5289 /* Don't do hot/cold partitioning. The constant layout code expects
5290 the whole function to be in a single section. */
5291 flag_reorder_blocks_and_partition = 0;
5293 /* Don't move loop invariants, because it tends to increase
5294 register pressure. It also introduces an extra move in cases
5295 where the constant is the first operand in a two-operand binary
5296 instruction, or when it forms a register argument to a functon
5298 flag_move_loop_invariants = 0;
5300 /* Silently disable -mexplicit-relocs since it doesn't apply
5301 to mips16 code. Even so, it would overly pedantic to warn
5302 about "-mips16 -mexplicit-relocs", especially given that
5303 we use a %gprel() operator. */
5304 target_flags &= ~MASK_EXPLICIT_RELOCS;
5306 /* Silently disable DSP extensions. */
5307 target_flags &= ~MASK_DSP;
5308 target_flags &= ~MASK_DSPR2;
5312 /* Reset to select base non-mips16 ISA. */
5313 target_flags &= ~MASK_MIPS16;
5315 /* When using explicit relocs, we call dbr_schedule from within
5317 if (TARGET_EXPLICIT_RELOCS)
5318 flag_delayed_branch = 0;
5320 /* Provide default values for align_* for 64-bit targets. */
5323 if (align_loops == 0)
5325 if (align_jumps == 0)
5327 if (align_functions == 0)
5328 align_functions = 8;
5332 /* (Re)initialize mips target internals for new ISA. */
5333 mips_init_split_addresses ();
5334 mips_init_relocs ();
5336 if (was_mips16_p >= 0)
5337 /* Reinitialize target-dependent state. */
5340 was_mips16_p = TARGET_MIPS16;
5343 /* Implement TARGET_SET_CURRENT_FUNCTION. Decide whether the current
5344 function should use the MIPS16 ISA and switch modes accordingly. */
5347 mips_set_current_function (tree fndecl)
5350 if (errorcount || sorrycount)
5351 /* Avoid generating RTL when fndecl is possibly invalid. Best to fall
5352 back on non-MIPS16 mode to avoid any strange secondary errors about
5353 use of unsupported features in MIPS16 mode. */
5356 mips16p = SYMBOL_REF_MIPS16_FUNC_P (XEXP (DECL_RTL (fndecl), 0));
5358 mips16p = mips_base_mips16;
5359 mips_set_mips16_mode (mips16p);
5362 /* Implement TARGET_HANDLE_OPTION. */
5365 mips_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
5370 if (strcmp (arg, "32") == 0)
5372 else if (strcmp (arg, "o64") == 0)
5374 else if (strcmp (arg, "n32") == 0)
5376 else if (strcmp (arg, "64") == 0)
5378 else if (strcmp (arg, "eabi") == 0)
5379 mips_abi = ABI_EABI;
5386 return mips_parse_cpu (arg) != 0;
5389 mips_isa_info = mips_parse_cpu (ACONCAT (("mips", arg, NULL)));
5390 return mips_isa_info != 0;
5392 case OPT_mno_flush_func:
5393 mips_cache_flush_func = NULL;
5396 case OPT_mcode_readable_:
5397 if (strcmp (arg, "yes") == 0)
5398 mips_code_readable = CODE_READABLE_YES;
5399 else if (strcmp (arg, "pcrel") == 0)
5400 mips_code_readable = CODE_READABLE_PCREL;
5401 else if (strcmp (arg, "no") == 0)
5402 mips_code_readable = CODE_READABLE_NO;
5412 /* Set up the threshold for data to go into the small data area, instead
5413 of the normal data area, and detect any conflicts in the switches. */
5416 override_options (void)
5418 int i, start, regno;
5419 enum machine_mode mode;
5421 #ifdef SUBTARGET_OVERRIDE_OPTIONS
5422 SUBTARGET_OVERRIDE_OPTIONS;
5425 mips_section_threshold = g_switch_set ? g_switch_value : MIPS_DEFAULT_GVALUE;
5427 /* The following code determines the architecture and register size.
5428 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
5429 The GAS and GCC code should be kept in sync as much as possible. */
5431 if (mips_arch_string != 0)
5432 mips_set_architecture (mips_parse_cpu (mips_arch_string));
5434 if (mips_isa_info != 0)
5436 if (mips_arch_info == 0)
5437 mips_set_architecture (mips_isa_info);
5438 else if (mips_arch_info->isa != mips_isa_info->isa)
5439 error ("-%s conflicts with the other architecture options, "
5440 "which specify a %s processor",
5441 mips_isa_info->name,
5442 mips_cpu_info_from_isa (mips_arch_info->isa)->name);
5445 if (mips_arch_info == 0)
5447 #ifdef MIPS_CPU_STRING_DEFAULT
5448 mips_set_architecture (mips_parse_cpu (MIPS_CPU_STRING_DEFAULT));
5450 mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT));
5454 if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
5455 error ("-march=%s is not compatible with the selected ABI",
5456 mips_arch_info->name);
5458 /* Optimize for mips_arch, unless -mtune selects a different processor. */
5459 if (mips_tune_string != 0)
5460 mips_set_tune (mips_parse_cpu (mips_tune_string));
5462 if (mips_tune_info == 0)
5463 mips_set_tune (mips_arch_info);
5465 /* Set cost structure for the processor. */
5467 mips_cost = &mips_rtx_cost_optimize_size;
5469 mips_cost = &mips_rtx_cost_data[mips_tune];
5471 /* If the user hasn't specified a branch cost, use the processor's
5473 if (mips_branch_cost == 0)
5474 mips_branch_cost = mips_cost->branch_cost;
5476 if ((target_flags_explicit & MASK_64BIT) != 0)
5478 /* The user specified the size of the integer registers. Make sure
5479 it agrees with the ABI and ISA. */
5480 if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
5481 error ("-mgp64 used with a 32-bit processor");
5482 else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
5483 error ("-mgp32 used with a 64-bit ABI");
5484 else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
5485 error ("-mgp64 used with a 32-bit ABI");
5489 /* Infer the integer register size from the ABI and processor.
5490 Restrict ourselves to 32-bit registers if that's all the
5491 processor has, or if the ABI cannot handle 64-bit registers. */
5492 if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
5493 target_flags &= ~MASK_64BIT;
5495 target_flags |= MASK_64BIT;
5498 if ((target_flags_explicit & MASK_FLOAT64) != 0)
5500 /* Really, -mfp32 and -mfp64 are ornamental options. There's
5501 only one right answer here. */
5502 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
5503 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
5504 else if (!TARGET_64BIT && TARGET_FLOAT64
5505 && !(ISA_HAS_MXHC1 && mips_abi == ABI_32))
5506 error ("-mgp32 and -mfp64 can only be combined if the target"
5507 " supports the mfhc1 and mthc1 instructions");
5508 else if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
5509 error ("unsupported combination: %s", "-mfp64 -msingle-float");
5513 /* -msingle-float selects 32-bit float registers. Otherwise the
5514 float registers should be the same size as the integer ones. */
5515 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
5516 target_flags |= MASK_FLOAT64;
5518 target_flags &= ~MASK_FLOAT64;
5521 /* End of code shared with GAS. */
5523 if ((target_flags_explicit & MASK_LONG64) == 0)
5525 if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
5526 target_flags |= MASK_LONG64;
5528 target_flags &= ~MASK_LONG64;
5531 if (MIPS_MARCH_CONTROLS_SOFT_FLOAT
5532 && (target_flags_explicit & MASK_SOFT_FLOAT_ABI) == 0)
5534 /* For some configurations, it is useful to have -march control
5535 the default setting of MASK_SOFT_FLOAT_ABI. */
5536 switch ((int) mips_arch)
5538 case PROCESSOR_R4100:
5539 case PROCESSOR_R4111:
5540 case PROCESSOR_R4120:
5541 case PROCESSOR_R4130:
5542 target_flags |= MASK_SOFT_FLOAT_ABI;
5546 target_flags &= ~MASK_SOFT_FLOAT_ABI;
5552 flag_pcc_struct_return = 0;
5554 if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
5556 /* If neither -mbranch-likely nor -mno-branch-likely was given
5557 on the command line, set MASK_BRANCHLIKELY based on the target
5560 By default, we enable use of Branch Likely instructions on
5561 all architectures which support them with the following
5562 exceptions: when creating MIPS32 or MIPS64 code, and when
5563 tuning for architectures where their use tends to hurt
5566 The MIPS32 and MIPS64 architecture specifications say "Software
5567 is strongly encouraged to avoid use of Branch Likely
5568 instructions, as they will be removed from a future revision
5569 of the [MIPS32 and MIPS64] architecture." Therefore, we do not
5570 issue those instructions unless instructed to do so by
5572 if (ISA_HAS_BRANCHLIKELY
5573 && !(ISA_MIPS32 || ISA_MIPS32R2 || ISA_MIPS64)
5574 && !(TUNE_MIPS5500 || TUNE_SB1))
5575 target_flags |= MASK_BRANCHLIKELY;
5577 target_flags &= ~MASK_BRANCHLIKELY;
5579 if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
5580 warning (0, "generation of Branch Likely instructions enabled, but not supported by architecture");
5582 /* The effect of -mabicalls isn't defined for the EABI. */
5583 if (mips_abi == ABI_EABI && TARGET_ABICALLS)
5585 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
5586 target_flags &= ~MASK_ABICALLS;
5589 /* MIPS16 cannot generate PIC yet. */
5590 if (TARGET_MIPS16 && (flag_pic || TARGET_ABICALLS))
5592 sorry ("MIPS16 PIC");
5593 target_flags &= ~MASK_ABICALLS;
5594 flag_pic = flag_pie = flag_shlib = 0;
5597 if (TARGET_ABICALLS)
5598 /* We need to set flag_pic for executables as well as DSOs
5599 because we may reference symbols that are not defined in
5600 the final executable. (MIPS does not use things like
5601 copy relocs, for example.)
5603 Also, there is a body of code that uses __PIC__ to distinguish
5604 between -mabicalls and -mno-abicalls code. */
5607 /* -mvr4130-align is a "speed over size" optimization: it usually produces
5608 faster code, but at the expense of more nops. Enable it at -O3 and
5610 if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
5611 target_flags |= MASK_VR4130_ALIGN;
5613 /* Prefer a call to memcpy over inline code when optimizing for size,
5614 though see MOVE_RATIO in mips.h. */
5615 if (optimize_size && (target_flags_explicit & MASK_MEMCPY) == 0)
5616 target_flags |= MASK_MEMCPY;
5618 /* If we have a nonzero small-data limit, check that the -mgpopt
5619 setting is consistent with the other target flags. */
5620 if (mips_section_threshold > 0)
5624 if (!TARGET_MIPS16 && !TARGET_EXPLICIT_RELOCS)
5625 error ("%<-mno-gpopt%> needs %<-mexplicit-relocs%>");
5627 TARGET_LOCAL_SDATA = false;
5628 TARGET_EXTERN_SDATA = false;
5632 if (TARGET_VXWORKS_RTP)
5633 warning (0, "cannot use small-data accesses for %qs", "-mrtp");
5635 if (TARGET_ABICALLS)
5636 warning (0, "cannot use small-data accesses for %qs",
5641 #ifdef MIPS_TFMODE_FORMAT
5642 REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT;
5645 /* Make sure that the user didn't turn off paired single support when
5646 MIPS-3D support is requested. */
5647 if (TARGET_MIPS3D && (target_flags_explicit & MASK_PAIRED_SINGLE_FLOAT)
5648 && !TARGET_PAIRED_SINGLE_FLOAT)
5649 error ("-mips3d requires -mpaired-single");
5651 /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
5653 target_flags |= MASK_PAIRED_SINGLE_FLOAT;
5655 /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
5656 and TARGET_HARD_FLOAT are both true. */
5657 if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT))
5658 error ("-mips3d/-mpaired-single must be used with -mfp64 -mhard-float");
5660 /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
5662 if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_MIPS64)
5663 error ("-mips3d/-mpaired-single must be used with -mips64");
5665 /* If TARGET_DSPR2, enable MASK_DSP. */
5667 target_flags |= MASK_DSP;
5669 mips_print_operand_punct['?'] = 1;
5670 mips_print_operand_punct['#'] = 1;
5671 mips_print_operand_punct['/'] = 1;
5672 mips_print_operand_punct['&'] = 1;
5673 mips_print_operand_punct['!'] = 1;
5674 mips_print_operand_punct['*'] = 1;
5675 mips_print_operand_punct['@'] = 1;
5676 mips_print_operand_punct['.'] = 1;
5677 mips_print_operand_punct['('] = 1;
5678 mips_print_operand_punct[')'] = 1;
5679 mips_print_operand_punct['['] = 1;
5680 mips_print_operand_punct[']'] = 1;
5681 mips_print_operand_punct['<'] = 1;
5682 mips_print_operand_punct['>'] = 1;
5683 mips_print_operand_punct['{'] = 1;
5684 mips_print_operand_punct['}'] = 1;
5685 mips_print_operand_punct['^'] = 1;
5686 mips_print_operand_punct['$'] = 1;
5687 mips_print_operand_punct['+'] = 1;
5688 mips_print_operand_punct['~'] = 1;
5690 /* Set up array to map GCC register number to debug register number.
5691 Ignore the special purpose register numbers. */
5693 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5695 mips_dbx_regno[i] = INVALID_REGNUM;
5696 if (GP_REG_P (i) || FP_REG_P (i) || ALL_COP_REG_P (i))
5697 mips_dwarf_regno[i] = i;
5699 mips_dwarf_regno[i] = INVALID_REGNUM;
5702 start = GP_DBX_FIRST - GP_REG_FIRST;
5703 for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
5704 mips_dbx_regno[i] = i + start;
5706 start = FP_DBX_FIRST - FP_REG_FIRST;
5707 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
5708 mips_dbx_regno[i] = i + start;
5710 /* HI and LO debug registers use big-endian ordering. */
5711 mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
5712 mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
5713 mips_dwarf_regno[HI_REGNUM] = MD_REG_FIRST + 0;
5714 mips_dwarf_regno[LO_REGNUM] = MD_REG_FIRST + 1;
5715 for (i = DSP_ACC_REG_FIRST; i <= DSP_ACC_REG_LAST; i += 2)
5717 mips_dwarf_regno[i + TARGET_LITTLE_ENDIAN] = i;
5718 mips_dwarf_regno[i + TARGET_BIG_ENDIAN] = i + 1;
5721 /* Set up array giving whether a given register can hold a given mode. */
5723 for (mode = VOIDmode;
5724 mode != MAX_MACHINE_MODE;
5725 mode = (enum machine_mode) ((int)mode + 1))
5727 register int size = GET_MODE_SIZE (mode);
5728 register enum mode_class class = GET_MODE_CLASS (mode);
5730 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5734 if (mode == CCV2mode)
5737 && (regno - ST_REG_FIRST) % 2 == 0);
5739 else if (mode == CCV4mode)
5742 && (regno - ST_REG_FIRST) % 4 == 0);
5744 else if (mode == CCmode)
5747 temp = (regno == FPSW_REGNUM);
5749 temp = (ST_REG_P (regno) || GP_REG_P (regno)
5750 || FP_REG_P (regno));
5753 else if (GP_REG_P (regno))
5754 temp = ((regno & 1) == 0 || size <= UNITS_PER_WORD);
5756 else if (FP_REG_P (regno))
5757 temp = ((((regno % MAX_FPRS_PER_FMT) == 0)
5758 || (MIN_FPRS_PER_FMT == 1
5759 && size <= UNITS_PER_FPREG))
5760 && (((class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT
5761 || class == MODE_VECTOR_FLOAT)
5762 && size <= UNITS_PER_FPVALUE)
5763 /* Allow integer modes that fit into a single
5764 register. We need to put integers into FPRs
5765 when using instructions like cvt and trunc.
5766 We can't allow sizes smaller than a word,
5767 the FPU has no appropriate load/store
5768 instructions for those. */
5769 || (class == MODE_INT
5770 && size >= MIN_UNITS_PER_WORD
5771 && size <= UNITS_PER_FPREG)
5772 /* Allow TFmode for CCmode reloads. */
5773 || (ISA_HAS_8CC && mode == TFmode)));
5775 else if (ACC_REG_P (regno))
5776 temp = (INTEGRAL_MODE_P (mode)
5777 && size <= UNITS_PER_WORD * 2
5778 && (size <= UNITS_PER_WORD
5779 || regno == MD_REG_FIRST
5780 || (DSP_ACC_REG_P (regno)
5781 && ((regno - DSP_ACC_REG_FIRST) & 1) == 0)));
5783 else if (ALL_COP_REG_P (regno))
5784 temp = (class == MODE_INT && size <= UNITS_PER_WORD);
5788 mips_hard_regno_mode_ok[(int)mode][regno] = temp;
5792 /* Save GPR registers in word_mode sized hunks. word_mode hasn't been
5793 initialized yet, so we can't use that here. */
5794 gpr_mode = TARGET_64BIT ? DImode : SImode;
5796 /* Function to allocate machine-dependent function status. */
5797 init_machine_status = &mips_init_machine_status;
5799 /* Default to working around R4000 errata only if the processor
5800 was selected explicitly. */
5801 if ((target_flags_explicit & MASK_FIX_R4000) == 0
5802 && mips_matching_cpu_name_p (mips_arch_info->name, "r4000"))
5803 target_flags |= MASK_FIX_R4000;
5805 /* Default to working around R4400 errata only if the processor
5806 was selected explicitly. */
5807 if ((target_flags_explicit & MASK_FIX_R4400) == 0
5808 && mips_matching_cpu_name_p (mips_arch_info->name, "r4400"))
5809 target_flags |= MASK_FIX_R4400;
5811 /* Save base state of options. */
5812 mips_base_mips16 = TARGET_MIPS16;
5813 mips_base_target_flags = target_flags;
5814 mips_base_schedule_insns = flag_schedule_insns;
5815 mips_base_reorder_blocks_and_partition = flag_reorder_blocks_and_partition;
5816 mips_base_move_loop_invariants = flag_move_loop_invariants;
5817 mips_base_align_loops = align_loops;
5818 mips_base_align_jumps = align_jumps;
5819 mips_base_align_functions = align_functions;
5820 mips_flag_delayed_branch = flag_delayed_branch;
5822 /* Now select the mips16 or 32-bit instruction set, as requested. */
5823 mips_set_mips16_mode (mips_base_mips16);
5826 /* Swap the register information for registers I and I + 1, which
5827 currently have the wrong endianness. Note that the registers'
5828 fixedness and call-clobberedness might have been set on the
5832 mips_swap_registers (unsigned int i)
5837 #define SWAP_INT(X, Y) (tmpi = (X), (X) = (Y), (Y) = tmpi)
5838 #define SWAP_STRING(X, Y) (tmps = (X), (X) = (Y), (Y) = tmps)
5840 SWAP_INT (fixed_regs[i], fixed_regs[i + 1]);
5841 SWAP_INT (call_used_regs[i], call_used_regs[i + 1]);
5842 SWAP_INT (call_really_used_regs[i], call_really_used_regs[i + 1]);
5843 SWAP_STRING (reg_names[i], reg_names[i + 1]);
5849 /* Implement CONDITIONAL_REGISTER_USAGE. */
5852 mips_conditional_register_usage (void)
5858 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
5859 fixed_regs[regno] = call_used_regs[regno] = 1;
5861 if (!TARGET_HARD_FLOAT)
5865 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
5866 fixed_regs[regno] = call_used_regs[regno] = 1;
5867 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
5868 fixed_regs[regno] = call_used_regs[regno] = 1;
5870 else if (! ISA_HAS_8CC)
5874 /* We only have a single condition code register. We
5875 implement this by hiding all the condition code registers,
5876 and generating RTL that refers directly to ST_REG_FIRST. */
5877 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
5878 fixed_regs[regno] = call_used_regs[regno] = 1;
5880 /* In mips16 mode, we permit the $t temporary registers to be used
5881 for reload. We prohibit the unused $s registers, since they
5882 are caller saved, and saving them via a mips16 register would
5883 probably waste more time than just reloading the value. */
5886 fixed_regs[18] = call_used_regs[18] = 1;
5887 fixed_regs[19] = call_used_regs[19] = 1;
5888 fixed_regs[20] = call_used_regs[20] = 1;
5889 fixed_regs[21] = call_used_regs[21] = 1;
5890 fixed_regs[22] = call_used_regs[22] = 1;
5891 fixed_regs[23] = call_used_regs[23] = 1;
5892 fixed_regs[26] = call_used_regs[26] = 1;
5893 fixed_regs[27] = call_used_regs[27] = 1;
5894 fixed_regs[30] = call_used_regs[30] = 1;
5896 /* fp20-23 are now caller saved. */
5897 if (mips_abi == ABI_64)
5900 for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
5901 call_really_used_regs[regno] = call_used_regs[regno] = 1;
5903 /* Odd registers from fp21 to fp31 are now caller saved. */
5904 if (mips_abi == ABI_N32)
5907 for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
5908 call_really_used_regs[regno] = call_used_regs[regno] = 1;
5910 /* Make sure that double-register accumulator values are correctly
5911 ordered for the current endianness. */
5912 if (TARGET_LITTLE_ENDIAN)
5915 mips_swap_registers (MD_REG_FIRST);
5916 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno += 2)
5917 mips_swap_registers (regno);
5921 /* Allocate a chunk of memory for per-function machine-dependent data. */
5922 static struct machine_function *
5923 mips_init_machine_status (void)
5925 return ((struct machine_function *)
5926 ggc_alloc_cleared (sizeof (struct machine_function)));
5929 /* On the mips16, we want to allocate $24 (T_REG) before other
5930 registers for instructions for which it is possible. This helps
5931 avoid shuffling registers around in order to set up for an xor,
5932 encouraging the compiler to use a cmp instead. */
5935 mips_order_regs_for_local_alloc (void)
5939 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5940 reg_alloc_order[i] = i;
5944 /* It really doesn't matter where we put register 0, since it is
5945 a fixed register anyhow. */
5946 reg_alloc_order[0] = 24;
5947 reg_alloc_order[24] = 0;
5952 /* The MIPS debug format wants all automatic variables and arguments
5953 to be in terms of the virtual frame pointer (stack pointer before
5954 any adjustment in the function), while the MIPS 3.0 linker wants
5955 the frame pointer to be the stack pointer after the initial
5956 adjustment. So, we do the adjustment here. The arg pointer (which
5957 is eliminated) points to the virtual frame pointer, while the frame
5958 pointer (which may be eliminated) points to the stack pointer after
5959 the initial adjustments. */
5962 mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
5964 rtx offset2 = const0_rtx;
5965 rtx reg = eliminate_constant_term (addr, &offset2);
5968 offset = INTVAL (offset2);
5970 if (reg == stack_pointer_rtx || reg == frame_pointer_rtx
5971 || reg == hard_frame_pointer_rtx)
5973 HOST_WIDE_INT frame_size = (!cfun->machine->frame.initialized)
5974 ? compute_frame_size (get_frame_size ())
5975 : cfun->machine->frame.total_size;
5977 /* MIPS16 frame is smaller */
5978 if (frame_pointer_needed && TARGET_MIPS16)
5979 frame_size -= cfun->machine->frame.args_size;
5981 offset = offset - frame_size;
5984 /* sdbout_parms does not want this to crash for unrecognized cases. */
5986 else if (reg != arg_pointer_rtx)
5987 fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
5994 /* If OP is an UNSPEC address, return the address to which it refers,
5995 otherwise return OP itself. */
5998 mips_strip_unspec_address (rtx op)
6002 split_const (op, &base, &offset);
6003 if (UNSPEC_ADDRESS_P (base))
6004 op = plus_constant (UNSPEC_ADDRESS (base), INTVAL (offset));
6008 /* Implement the PRINT_OPERAND macro. The MIPS-specific operand codes are:
6010 'X' OP is CONST_INT, prints 32 bits in hexadecimal format = "0x%08x",
6011 'x' OP is CONST_INT, prints 16 bits in hexadecimal format = "0x%04x",
6012 'h' OP is HIGH, prints %hi(X),
6013 'd' output integer constant in decimal,
6014 'z' if the operand is 0, use $0 instead of normal operand.
6015 'D' print second part of double-word register or memory operand.
6016 'L' print low-order register of double-word register operand.
6017 'M' print high-order register of double-word register operand.
6018 'C' print part of opcode for a branch condition.
6019 'F' print part of opcode for a floating-point branch condition.
6020 'N' print part of opcode for a branch condition, inverted.
6021 'W' print part of opcode for a floating-point branch condition, inverted.
6022 'T' print 'f' for (eq:CC ...), 't' for (ne:CC ...),
6023 'z' for (eq:?I ...), 'n' for (ne:?I ...).
6024 't' like 'T', but with the EQ/NE cases reversed
6025 'Y' for a CONST_INT X, print mips_fp_conditions[X]
6026 'Z' print the operand and a comma for ISA_HAS_8CC, otherwise print nothing
6027 'R' print the reloc associated with LO_SUM
6028 'q' print DSP accumulator registers
6030 The punctuation characters are:
6032 '(' Turn on .set noreorder
6033 ')' Turn on .set reorder
6034 '[' Turn on .set noat
6036 '<' Turn on .set nomacro
6037 '>' Turn on .set macro
6038 '{' Turn on .set volatile (not GAS)
6039 '}' Turn on .set novolatile (not GAS)
6040 '&' Turn on .set noreorder if filling delay slots
6041 '*' Turn on both .set noreorder and .set nomacro if filling delay slots
6042 '!' Turn on .set nomacro if filling delay slots
6043 '#' Print nop if in a .set noreorder section.
6044 '/' Like '#', but does nothing within a delayed branch sequence
6045 '?' Print 'l' if we are to use a branch likely instead of normal branch.
6046 '@' Print the name of the assembler temporary register (at or $1).
6047 '.' Print the name of the register with a hard-wired zero (zero or $0).
6048 '^' Print the name of the pic call-through register (t9 or $25).
6049 '$' Print the name of the stack pointer register (sp or $29).
6050 '+' Print the name of the gp register (usually gp or $28).
6051 '~' Output a branch alignment to LABEL_ALIGN(NULL). */
6054 print_operand (FILE *file, rtx op, int letter)
6056 register enum rtx_code code;
6058 if (PRINT_OPERAND_PUNCT_VALID_P (letter))
6063 if (mips_branch_likely)
6068 fputs (reg_names [GP_REG_FIRST + 1], file);
6072 fputs (reg_names [PIC_FUNCTION_ADDR_REGNUM], file);
6076 fputs (reg_names [GP_REG_FIRST + 0], file);
6080 fputs (reg_names[STACK_POINTER_REGNUM], file);
6084 fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
6088 if (final_sequence != 0 && set_noreorder++ == 0)
6089 fputs (".set\tnoreorder\n\t", file);
6093 if (final_sequence != 0)
6095 if (set_noreorder++ == 0)
6096 fputs (".set\tnoreorder\n\t", file);
6098 if (set_nomacro++ == 0)
6099 fputs (".set\tnomacro\n\t", file);
6104 if (final_sequence != 0 && set_nomacro++ == 0)
6105 fputs ("\n\t.set\tnomacro", file);
6109 if (set_noreorder != 0)
6110 fputs ("\n\tnop", file);
6114 /* Print an extra newline so that the delayed insn is separated
6115 from the following ones. This looks neater and is consistent
6116 with non-nop delayed sequences. */
6117 if (set_noreorder != 0 && final_sequence == 0)
6118 fputs ("\n\tnop\n", file);
6122 if (set_noreorder++ == 0)
6123 fputs (".set\tnoreorder\n\t", file);
6127 if (set_noreorder == 0)
6128 error ("internal error: %%) found without a %%( in assembler pattern");
6130 else if (--set_noreorder == 0)
6131 fputs ("\n\t.set\treorder", file);
6136 if (set_noat++ == 0)
6137 fputs (".set\tnoat\n\t", file);
6142 error ("internal error: %%] found without a %%[ in assembler pattern");
6143 else if (--set_noat == 0)
6144 fputs ("\n\t.set\tat", file);
6149 if (set_nomacro++ == 0)
6150 fputs (".set\tnomacro\n\t", file);
6154 if (set_nomacro == 0)
6155 error ("internal error: %%> found without a %%< in assembler pattern");
6156 else if (--set_nomacro == 0)
6157 fputs ("\n\t.set\tmacro", file);
6162 if (set_volatile++ == 0)
6163 fputs ("#.set\tvolatile\n\t", file);
6167 if (set_volatile == 0)
6168 error ("internal error: %%} found without a %%{ in assembler pattern");
6169 else if (--set_volatile == 0)
6170 fputs ("\n\t#.set\tnovolatile", file);
6176 if (align_labels_log > 0)
6177 ASM_OUTPUT_ALIGN (file, align_labels_log);
6182 error ("PRINT_OPERAND: unknown punctuation '%c'", letter);
6191 error ("PRINT_OPERAND null pointer");
6195 code = GET_CODE (op);
6200 case EQ: fputs ("eq", file); break;
6201 case NE: fputs ("ne", file); break;
6202 case GT: fputs ("gt", file); break;
6203 case GE: fputs ("ge", file); break;
6204 case LT: fputs ("lt", file); break;
6205 case LE: fputs ("le", file); break;
6206 case GTU: fputs ("gtu", file); break;
6207 case GEU: fputs ("geu", file); break;
6208 case LTU: fputs ("ltu", file); break;
6209 case LEU: fputs ("leu", file); break;
6211 fatal_insn ("PRINT_OPERAND, invalid insn for %%C", op);
6214 else if (letter == 'N')
6217 case EQ: fputs ("ne", file); break;
6218 case NE: fputs ("eq", file); break;
6219 case GT: fputs ("le", file); break;
6220 case GE: fputs ("lt", file); break;
6221 case LT: fputs ("ge", file); break;
6222 case LE: fputs ("gt", file); break;
6223 case GTU: fputs ("leu", file); break;
6224 case GEU: fputs ("ltu", file); break;
6225 case LTU: fputs ("geu", file); break;
6226 case LEU: fputs ("gtu", file); break;
6228 fatal_insn ("PRINT_OPERAND, invalid insn for %%N", op);
6231 else if (letter == 'F')
6234 case EQ: fputs ("c1f", file); break;
6235 case NE: fputs ("c1t", file); break;
6237 fatal_insn ("PRINT_OPERAND, invalid insn for %%F", op);
6240 else if (letter == 'W')
6243 case EQ: fputs ("c1t", file); break;
6244 case NE: fputs ("c1f", file); break;
6246 fatal_insn ("PRINT_OPERAND, invalid insn for %%W", op);
6249 else if (letter == 'h')
6251 if (GET_CODE (op) == HIGH)
6254 print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_hi_relocs);
6257 else if (letter == 'R')
6258 print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_lo_relocs);
6260 else if (letter == 'Y')
6262 if (GET_CODE (op) == CONST_INT
6263 && ((unsigned HOST_WIDE_INT) INTVAL (op)
6264 < ARRAY_SIZE (mips_fp_conditions)))
6265 fputs (mips_fp_conditions[INTVAL (op)], file);
6267 output_operand_lossage ("invalid %%Y value");
6270 else if (letter == 'Z')
6274 print_operand (file, op, 0);
6279 else if (letter == 'q')
6284 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op);
6286 regnum = REGNO (op);
6287 if (MD_REG_P (regnum))
6288 fprintf (file, "$ac0");
6289 else if (DSP_ACC_REG_P (regnum))
6290 fprintf (file, "$ac%c", reg_names[regnum][3]);
6292 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op);
6295 else if (code == REG || code == SUBREG)
6297 register int regnum;
6300 regnum = REGNO (op);
6302 regnum = true_regnum (op);
6304 if ((letter == 'M' && ! WORDS_BIG_ENDIAN)
6305 || (letter == 'L' && WORDS_BIG_ENDIAN)
6309 fprintf (file, "%s", reg_names[regnum]);
6312 else if (code == MEM)
6315 output_address (plus_constant (XEXP (op, 0), 4));
6317 output_address (XEXP (op, 0));
6320 else if (letter == 'x' && GET_CODE (op) == CONST_INT)
6321 fprintf (file, HOST_WIDE_INT_PRINT_HEX, 0xffff & INTVAL(op));
6323 else if (letter == 'X' && GET_CODE(op) == CONST_INT)
6324 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
6326 else if (letter == 'd' && GET_CODE(op) == CONST_INT)
6327 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (INTVAL(op)));
6329 else if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
6330 fputs (reg_names[GP_REG_FIRST], file);
6332 else if (letter == 'd' || letter == 'x' || letter == 'X')
6333 output_operand_lossage ("invalid use of %%d, %%x, or %%X");
6335 else if (letter == 'T' || letter == 't')
6337 int truth = (code == NE) == (letter == 'T');
6338 fputc ("zfnt"[truth * 2 + (GET_MODE (op) == CCmode)], file);
6341 else if (CONST_GP_P (op))
6342 fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
6345 output_addr_const (file, mips_strip_unspec_address (op));
6349 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM
6350 in context CONTEXT. RELOCS is the array of relocations to use. */
6353 print_operand_reloc (FILE *file, rtx op, enum mips_symbol_context context,
6354 const char **relocs)
6356 enum mips_symbol_type symbol_type;
6359 symbol_type = mips_classify_symbolic_expression (op, context);
6360 if (relocs[symbol_type] == 0)
6361 fatal_insn ("PRINT_OPERAND, invalid operand for relocation", op);
6363 fputs (relocs[symbol_type], file);
6364 output_addr_const (file, mips_strip_unspec_address (op));
6365 for (p = relocs[symbol_type]; *p != 0; p++)
6370 /* Output address operand X to FILE. */
6373 print_operand_address (FILE *file, rtx x)
6375 struct mips_address_info addr;
6377 if (mips_classify_address (&addr, x, word_mode, true))
6381 print_operand (file, addr.offset, 0);
6382 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
6385 case ADDRESS_LO_SUM:
6386 print_operand_reloc (file, addr.offset, SYMBOL_CONTEXT_MEM,
6388 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
6391 case ADDRESS_CONST_INT:
6392 output_addr_const (file, x);
6393 fprintf (file, "(%s)", reg_names[0]);
6396 case ADDRESS_SYMBOLIC:
6397 output_addr_const (file, mips_strip_unspec_address (x));
6403 /* When using assembler macros, keep track of all of small-data externs
6404 so that mips_file_end can emit the appropriate declarations for them.
6406 In most cases it would be safe (though pointless) to emit .externs
6407 for other symbols too. One exception is when an object is within
6408 the -G limit but declared by the user to be in a section other
6409 than .sbss or .sdata. */
6412 mips_output_external (FILE *file, tree decl, const char *name)
6414 default_elf_asm_output_external (file, decl, name);
6416 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
6417 set in order to avoid putting out names that are never really
6419 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
6421 if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
6423 fputs ("\t.extern\t", file);
6424 assemble_name (file, name);
6425 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC "\n",
6426 int_size_in_bytes (TREE_TYPE (decl)));
6428 else if (TARGET_IRIX
6429 && mips_abi == ABI_32
6430 && TREE_CODE (decl) == FUNCTION_DECL)
6432 /* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
6433 `.global name .text' directive for every used but
6434 undefined function. If we don't, the linker may perform
6435 an optimization (skipping over the insns that set $gp)
6436 when it is unsafe. */
6437 fputs ("\t.globl ", file);
6438 assemble_name (file, name);
6439 fputs (" .text\n", file);
6444 /* Emit a new filename to a stream. If we are smuggling stabs, try to
6445 put out a MIPS ECOFF file and a stab. */
6448 mips_output_filename (FILE *stream, const char *name)
6451 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
6453 if (write_symbols == DWARF2_DEBUG)
6455 else if (mips_output_filename_first_time)
6457 mips_output_filename_first_time = 0;
6458 num_source_filenames += 1;
6459 current_function_file = name;
6460 fprintf (stream, "\t.file\t%d ", num_source_filenames);
6461 output_quoted_string (stream, name);
6462 putc ('\n', stream);
6465 /* If we are emitting stabs, let dbxout.c handle this (except for
6466 the mips_output_filename_first_time case). */
6467 else if (write_symbols == DBX_DEBUG)
6470 else if (name != current_function_file
6471 && strcmp (name, current_function_file) != 0)
6473 num_source_filenames += 1;
6474 current_function_file = name;
6475 fprintf (stream, "\t.file\t%d ", num_source_filenames);
6476 output_quoted_string (stream, name);
6477 putc ('\n', stream);
6481 /* Output an ASCII string, in a space-saving way. PREFIX is the string
6482 that should be written before the opening quote, such as "\t.ascii\t"
6483 for real string data or "\t# " for a comment. */
6486 mips_output_ascii (FILE *stream, const char *string_param, size_t len,
6491 register const unsigned char *string =
6492 (const unsigned char *)string_param;
6494 fprintf (stream, "%s\"", prefix);
6495 for (i = 0; i < len; i++)
6497 register int c = string[i];
6501 if (c == '\\' || c == '\"')
6503 putc ('\\', stream);
6511 fprintf (stream, "\\%03o", c);
6515 if (cur_pos > 72 && i+1 < len)
6518 fprintf (stream, "\"\n%s\"", prefix);
6521 fprintf (stream, "\"\n");
6524 /* Implement TARGET_ASM_FILE_START. */
6527 mips_file_start (void)
6529 default_file_start ();
6533 /* Generate a special section to describe the ABI switches used to
6534 produce the resultant binary. This used to be done by the assembler
6535 setting bits in the ELF header's flags field, but we have run out of
6536 bits. GDB needs this information in order to be able to correctly
6537 debug these binaries. See the function mips_gdbarch_init() in
6538 gdb/mips-tdep.c. This is unnecessary for the IRIX 5/6 ABIs and
6539 causes unnecessary IRIX 6 ld warnings. */
6540 const char * abi_string = NULL;
6544 case ABI_32: abi_string = "abi32"; break;
6545 case ABI_N32: abi_string = "abiN32"; break;
6546 case ABI_64: abi_string = "abi64"; break;
6547 case ABI_O64: abi_string = "abiO64"; break;
6548 case ABI_EABI: abi_string = TARGET_64BIT ? "eabi64" : "eabi32"; break;
6552 /* Note - we use fprintf directly rather than calling switch_to_section
6553 because in this way we can avoid creating an allocated section. We
6554 do not want this section to take up any space in the running
6556 fprintf (asm_out_file, "\t.section .mdebug.%s\n", abi_string);
6558 /* There is no ELF header flag to distinguish long32 forms of the
6559 EABI from long64 forms. Emit a special section to help tools
6560 such as GDB. Do the same for o64, which is sometimes used with
6562 if (mips_abi == ABI_EABI || mips_abi == ABI_O64)
6563 fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n",
6564 TARGET_LONG64 ? 64 : 32);
6566 /* Restore the default section. */
6567 fprintf (asm_out_file, "\t.previous\n");
6569 #ifdef HAVE_AS_GNU_ATTRIBUTE
6570 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
6571 TARGET_HARD_FLOAT_ABI ? (TARGET_DOUBLE_FLOAT ? 1 : 2) : 3);
6575 /* Generate the pseudo ops that System V.4 wants. */
6576 if (TARGET_ABICALLS)
6577 fprintf (asm_out_file, "\t.abicalls\n");
6579 if (flag_verbose_asm)
6580 fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
6582 mips_section_threshold, mips_arch_info->name, mips_isa);
6585 #ifdef BSS_SECTION_ASM_OP
6586 /* Implement ASM_OUTPUT_ALIGNED_BSS. This differs from the default only
6587 in the use of sbss. */
6590 mips_output_aligned_bss (FILE *stream, tree decl, const char *name,
6591 unsigned HOST_WIDE_INT size, int align)
6593 extern tree last_assemble_variable_decl;
6595 if (mips_in_small_data_p (decl))
6596 switch_to_section (get_named_section (NULL, ".sbss", 0));
6598 switch_to_section (bss_section);
6599 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
6600 last_assemble_variable_decl = decl;
6601 ASM_DECLARE_OBJECT_NAME (stream, name, decl);
6602 ASM_OUTPUT_SKIP (stream, size != 0 ? size : 1);
6606 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
6607 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
6610 mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
6611 unsigned HOST_WIDE_INT size,
6614 /* If the target wants uninitialized const declarations in
6615 .rdata then don't put them in .comm. */
6616 if (TARGET_EMBEDDED_DATA && TARGET_UNINIT_CONST_IN_RODATA
6617 && TREE_CODE (decl) == VAR_DECL && TREE_READONLY (decl)
6618 && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
6620 if (TREE_PUBLIC (decl) && DECL_NAME (decl))
6621 targetm.asm_out.globalize_label (stream, name);
6623 switch_to_section (readonly_data_section);
6624 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
6625 mips_declare_object (stream, name, "",
6626 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
6630 mips_declare_common_object (stream, name, "\n\t.comm\t",
6634 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
6635 NAME is the name of the object and ALIGN is the required alignment
6636 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
6637 alignment argument. */
6640 mips_declare_common_object (FILE *stream, const char *name,
6641 const char *init_string,
6642 unsigned HOST_WIDE_INT size,
6643 unsigned int align, bool takes_alignment_p)
6645 if (!takes_alignment_p)
6647 size += (align / BITS_PER_UNIT) - 1;
6648 size -= size % (align / BITS_PER_UNIT);
6649 mips_declare_object (stream, name, init_string,
6650 "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
6653 mips_declare_object (stream, name, init_string,
6654 "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
6655 size, align / BITS_PER_UNIT);
6658 /* Emit either a label, .comm, or .lcomm directive. When using assembler
6659 macros, mark the symbol as written so that mips_file_end won't emit an
6660 .extern for it. STREAM is the output file, NAME is the name of the
6661 symbol, INIT_STRING is the string that should be written before the
6662 symbol and FINAL_STRING is the string that should be written after it.
6663 FINAL_STRING is a printf() format that consumes the remaining arguments. */
6666 mips_declare_object (FILE *stream, const char *name, const char *init_string,
6667 const char *final_string, ...)
6671 fputs (init_string, stream);
6672 assemble_name (stream, name);
6673 va_start (ap, final_string);
6674 vfprintf (stream, final_string, ap);
6677 if (!TARGET_EXPLICIT_RELOCS)
6679 tree name_tree = get_identifier (name);
6680 TREE_ASM_WRITTEN (name_tree) = 1;
6684 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
6685 extern int size_directive_output;
6687 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
6688 definitions except that it uses mips_declare_object() to emit the label. */
6691 mips_declare_object_name (FILE *stream, const char *name,
6692 tree decl ATTRIBUTE_UNUSED)
6694 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
6695 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
6698 size_directive_output = 0;
6699 if (!flag_inhibit_size_directive && DECL_SIZE (decl))
6703 size_directive_output = 1;
6704 size = int_size_in_bytes (TREE_TYPE (decl));
6705 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
6708 mips_declare_object (stream, name, "", ":\n");
6711 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
6714 mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
6718 name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
6719 if (!flag_inhibit_size_directive
6720 && DECL_SIZE (decl) != 0
6721 && !at_end && top_level
6722 && DECL_INITIAL (decl) == error_mark_node
6723 && !size_directive_output)
6727 size_directive_output = 1;
6728 size = int_size_in_bytes (TREE_TYPE (decl));
6729 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
6734 /* Return true if X in context CONTEXT is a small data address that can
6735 be rewritten as a LO_SUM. */
6738 mips_rewrite_small_data_p (rtx x, enum mips_symbol_context context)
6740 enum mips_symbol_type symbol_type;
6742 return (TARGET_EXPLICIT_RELOCS
6743 && mips_symbolic_constant_p (x, context, &symbol_type)
6744 && symbol_type == SYMBOL_GP_RELATIVE);
6748 /* A for_each_rtx callback for mips_small_data_pattern_p. DATA is the
6749 containing MEM, or null if none. */
6752 mips_small_data_pattern_1 (rtx *loc, void *data)
6754 enum mips_symbol_context context;
6756 if (GET_CODE (*loc) == LO_SUM)
6761 if (for_each_rtx (&XEXP (*loc, 0), mips_small_data_pattern_1, *loc))
6766 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
6767 return mips_rewrite_small_data_p (*loc, context);
6770 /* Return true if OP refers to small data symbols directly, not through
6774 mips_small_data_pattern_p (rtx op)
6776 return for_each_rtx (&op, mips_small_data_pattern_1, 0);
6779 /* A for_each_rtx callback, used by mips_rewrite_small_data.
6780 DATA is the containing MEM, or null if none. */
6783 mips_rewrite_small_data_1 (rtx *loc, void *data)
6785 enum mips_symbol_context context;
6789 for_each_rtx (&XEXP (*loc, 0), mips_rewrite_small_data_1, *loc);
6793 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
6794 if (mips_rewrite_small_data_p (*loc, context))
6795 *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
6797 if (GET_CODE (*loc) == LO_SUM)
6803 /* If possible, rewrite OP so that it refers to small data using
6804 explicit relocations. */
6807 mips_rewrite_small_data (rtx op)
6809 op = copy_insn (op);
6810 for_each_rtx (&op, mips_rewrite_small_data_1, 0);
6814 /* Return true if the current function has an insn that implicitly
6818 mips_function_has_gp_insn (void)
6820 /* Don't bother rechecking if we found one last time. */
6821 if (!cfun->machine->has_gp_insn_p)
6825 push_topmost_sequence ();
6826 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6828 && GET_CODE (PATTERN (insn)) != USE
6829 && GET_CODE (PATTERN (insn)) != CLOBBER
6830 && (get_attr_got (insn) != GOT_UNSET
6831 || small_data_pattern (PATTERN (insn), VOIDmode)))
6833 pop_topmost_sequence ();
6835 cfun->machine->has_gp_insn_p = (insn != 0);
6837 return cfun->machine->has_gp_insn_p;
6841 /* Return the register that should be used as the global pointer
6842 within this function. Return 0 if the function doesn't need
6843 a global pointer. */
6846 mips_global_pointer (void)
6850 /* $gp is always available unless we're using a GOT. */
6851 if (!TARGET_USE_GOT)
6852 return GLOBAL_POINTER_REGNUM;
6854 /* We must always provide $gp when it is used implicitly. */
6855 if (!TARGET_EXPLICIT_RELOCS)
6856 return GLOBAL_POINTER_REGNUM;
6858 /* FUNCTION_PROFILER includes a jal macro, so we need to give it
6860 if (current_function_profile)
6861 return GLOBAL_POINTER_REGNUM;
6863 /* If the function has a nonlocal goto, $gp must hold the correct
6864 global pointer for the target function. */
6865 if (current_function_has_nonlocal_goto)
6866 return GLOBAL_POINTER_REGNUM;
6868 /* If the gp is never referenced, there's no need to initialize it.
6869 Note that reload can sometimes introduce constant pool references
6870 into a function that otherwise didn't need them. For example,
6871 suppose we have an instruction like:
6873 (set (reg:DF R1) (float:DF (reg:SI R2)))
6875 If R2 turns out to be constant such as 1, the instruction may have a
6876 REG_EQUAL note saying that R1 == 1.0. Reload then has the option of
6877 using this constant if R2 doesn't get allocated to a register.
6879 In cases like these, reload will have added the constant to the pool
6880 but no instruction will yet refer to it. */
6881 if (!df_regs_ever_live_p (GLOBAL_POINTER_REGNUM)
6882 && !current_function_uses_const_pool
6883 && !mips_function_has_gp_insn ())
6886 /* We need a global pointer, but perhaps we can use a call-clobbered
6887 register instead of $gp. */
6888 if (TARGET_CALL_SAVED_GP && current_function_is_leaf)
6889 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
6890 if (!df_regs_ever_live_p (regno)
6891 && call_used_regs[regno]
6892 && !fixed_regs[regno]
6893 && regno != PIC_FUNCTION_ADDR_REGNUM)
6896 return GLOBAL_POINTER_REGNUM;
6900 /* Return true if the function return value MODE will get returned in a
6901 floating-point register. */
6904 mips_return_mode_in_fpr_p (enum machine_mode mode)
6906 return ((GET_MODE_CLASS (mode) == MODE_FLOAT
6907 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
6908 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
6909 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_HWFPVALUE);
6912 /* Return a two-character string representing a function floating-point
6913 return mode, used to name MIPS16 function stubs. */
6916 mips16_call_stub_mode_suffix (enum machine_mode mode)
6920 else if (mode == DFmode)
6922 else if (mode == SCmode)
6924 else if (mode == DCmode)
6926 else if (mode == V2SFmode)
6932 /* Return true if the current function returns its value in a floating-point
6933 register in MIPS16 mode. */
6936 mips16_cfun_returns_in_fpr_p (void)
6938 tree return_type = DECL_RESULT (current_function_decl);
6939 return (TARGET_MIPS16
6940 && TARGET_HARD_FLOAT_ABI
6941 && !aggregate_value_p (return_type, current_function_decl)
6942 && mips_return_mode_in_fpr_p (DECL_MODE (return_type)));
6946 /* Return true if the current function must save REGNO. */
6949 mips_save_reg_p (unsigned int regno)
6951 /* We only need to save $gp if TARGET_CALL_SAVED_GP and only then
6952 if we have not chosen a call-clobbered substitute. */
6953 if (regno == GLOBAL_POINTER_REGNUM)
6954 return TARGET_CALL_SAVED_GP && cfun->machine->global_pointer == regno;
6956 /* Check call-saved registers. */
6957 if (df_regs_ever_live_p (regno) && !call_used_regs[regno])
6960 /* Save both registers in an FPR pair if either one is used. This is
6961 needed for the case when MIN_FPRS_PER_FMT == 1, which allows the odd
6962 register to be used without the even register. */
6963 if (FP_REG_P (regno)
6964 && MAX_FPRS_PER_FMT == 2
6965 && df_regs_ever_live_p (regno + 1)
6966 && !call_used_regs[regno + 1])
6969 /* We need to save the old frame pointer before setting up a new one. */
6970 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
6973 /* We need to save the incoming return address if it is ever clobbered
6974 within the function. */
6975 if (regno == GP_REG_FIRST + 31 && df_regs_ever_live_p (regno))
6980 /* $18 is a special case in mips16 code. It may be used to call
6981 a function which returns a floating point value, but it is
6982 marked in call_used_regs. */
6983 if (regno == GP_REG_FIRST + 18 && df_regs_ever_live_p (regno))
6986 /* $31 is also a special case. It will be used to copy a return
6987 value into the floating point registers if the return value is
6989 if (regno == GP_REG_FIRST + 31
6990 && mips16_cfun_returns_in_fpr_p ())
6997 /* Return the index of the lowest X in the range [0, SIZE) for which
6998 bit REGS[X] is set in MASK. Return SIZE if there is no such X. */
7001 mips16e_find_first_register (unsigned int mask, const unsigned char *regs,
7006 for (i = 0; i < size; i++)
7007 if (BITSET_P (mask, regs[i]))
7013 /* *MASK_PTR is a mask of general purpose registers and *GP_REG_SIZE_PTR
7014 is the number of bytes that they occupy. If *MASK_PTR contains REGS[X]
7015 for some X in [0, SIZE), adjust *MASK_PTR and *GP_REG_SIZE_PTR so that
7016 the same is true for all indexes (X, SIZE). */
7019 mips16e_mask_registers (unsigned int *mask_ptr, const unsigned char *regs,
7020 unsigned int size, HOST_WIDE_INT *gp_reg_size_ptr)
7024 i = mips16e_find_first_register (*mask_ptr, regs, size);
7025 for (i++; i < size; i++)
7026 if (!BITSET_P (*mask_ptr, regs[i]))
7028 *gp_reg_size_ptr += GET_MODE_SIZE (gpr_mode);
7029 *mask_ptr |= 1 << regs[i];
7033 /* Return the bytes needed to compute the frame pointer from the current
7034 stack pointer. SIZE is the size (in bytes) of the local variables.
7036 MIPS stack frames look like:
7038 Before call After call
7039 high +-----------------------+ +-----------------------+
7041 | caller's temps. | | caller's temps. |
7043 +-----------------------+ +-----------------------+
7045 | arguments on stack. | | arguments on stack. |
7047 +-----------------------+ +-----------------------+
7048 | 4 words to save | | 4 words to save |
7049 | arguments passed | | arguments passed |
7050 | in registers, even | | in registers, even |
7051 | if not passed. | | if not passed. |
7052 SP->+-----------------------+ VFP->+-----------------------+
7053 (VFP = SP+fp_sp_offset) | |\
7054 | fp register save | | fp_reg_size
7056 SP+gp_sp_offset->+-----------------------+
7058 | | gp register save | | gp_reg_size
7059 gp_reg_rounded | | |/
7060 | +-----------------------+
7061 \| alignment padding |
7062 +-----------------------+
7064 | local variables | | var_size
7066 +-----------------------+
7068 | alloca allocations |
7070 +-----------------------+
7072 cprestore_size | | GP save for V.4 abi |
7074 +-----------------------+
7076 | arguments on stack | |
7078 +-----------------------+ |
7079 | 4 words to save | | args_size
7080 | arguments passed | |
7081 | in registers, even | |
7082 | if not passed. | |
7083 low | (TARGET_OLDABI only) |/
7084 memory SP->+-----------------------+
7089 compute_frame_size (HOST_WIDE_INT size)
7092 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
7093 HOST_WIDE_INT var_size; /* # bytes that variables take up */
7094 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
7095 HOST_WIDE_INT cprestore_size; /* # bytes that the cprestore slot takes up */
7096 HOST_WIDE_INT gp_reg_rounded; /* # bytes needed to store gp after rounding */
7097 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
7098 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
7099 unsigned int mask; /* mask of saved gp registers */
7100 unsigned int fmask; /* mask of saved fp registers */
7102 cfun->machine->global_pointer = mips_global_pointer ();
7108 var_size = MIPS_STACK_ALIGN (size);
7109 args_size = current_function_outgoing_args_size;
7110 cprestore_size = MIPS_STACK_ALIGN (STARTING_FRAME_OFFSET) - args_size;
7112 /* The space set aside by STARTING_FRAME_OFFSET isn't needed in leaf
7113 functions. If the function has local variables, we're committed
7114 to allocating it anyway. Otherwise reclaim it here. */
7115 if (var_size == 0 && current_function_is_leaf)
7116 cprestore_size = args_size = 0;
7118 /* The MIPS 3.0 linker does not like functions that dynamically
7119 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
7120 looks like we are trying to create a second frame pointer to the
7121 function, so allocate some stack space to make it happy. */
7123 if (args_size == 0 && current_function_calls_alloca)
7124 args_size = 4 * UNITS_PER_WORD;
7126 total_size = var_size + args_size + cprestore_size;
7128 /* Calculate space needed for gp registers. */
7129 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
7130 if (mips_save_reg_p (regno))
7132 gp_reg_size += GET_MODE_SIZE (gpr_mode);
7133 mask |= 1 << (regno - GP_REG_FIRST);
7136 /* We need to restore these for the handler. */
7137 if (current_function_calls_eh_return)
7142 regno = EH_RETURN_DATA_REGNO (i);
7143 if (regno == INVALID_REGNUM)
7145 gp_reg_size += GET_MODE_SIZE (gpr_mode);
7146 mask |= 1 << (regno - GP_REG_FIRST);
7150 /* The MIPS16e SAVE and RESTORE instructions have two ranges of registers:
7151 $a3-$a0 and $s2-$s8. If we save one register in the range, we must
7152 save all later registers too. */
7153 if (GENERATE_MIPS16E_SAVE_RESTORE)
7155 mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
7156 ARRAY_SIZE (mips16e_s2_s8_regs), &gp_reg_size);
7157 mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
7158 ARRAY_SIZE (mips16e_a0_a3_regs), &gp_reg_size);
7161 /* This loop must iterate over the same space as its companion in
7162 mips_for_each_saved_reg. */
7163 for (regno = (FP_REG_LAST - MAX_FPRS_PER_FMT + 1);
7164 regno >= FP_REG_FIRST;
7165 regno -= MAX_FPRS_PER_FMT)
7167 if (mips_save_reg_p (regno))
7169 fp_reg_size += MAX_FPRS_PER_FMT * UNITS_PER_FPREG;
7170 fmask |= ((1 << MAX_FPRS_PER_FMT) - 1) << (regno - FP_REG_FIRST);
7174 gp_reg_rounded = MIPS_STACK_ALIGN (gp_reg_size);
7175 total_size += gp_reg_rounded + MIPS_STACK_ALIGN (fp_reg_size);
7177 /* Add in the space required for saving incoming register arguments. */
7178 total_size += current_function_pretend_args_size;
7179 total_size += MIPS_STACK_ALIGN (cfun->machine->varargs_size);
7181 /* Save other computed information. */
7182 cfun->machine->frame.total_size = total_size;
7183 cfun->machine->frame.var_size = var_size;
7184 cfun->machine->frame.args_size = args_size;
7185 cfun->machine->frame.cprestore_size = cprestore_size;
7186 cfun->machine->frame.gp_reg_size = gp_reg_size;
7187 cfun->machine->frame.fp_reg_size = fp_reg_size;
7188 cfun->machine->frame.mask = mask;
7189 cfun->machine->frame.fmask = fmask;
7190 cfun->machine->frame.initialized = reload_completed;
7191 cfun->machine->frame.num_gp = gp_reg_size / UNITS_PER_WORD;
7192 cfun->machine->frame.num_fp = (fp_reg_size
7193 / (MAX_FPRS_PER_FMT * UNITS_PER_FPREG));
7197 HOST_WIDE_INT offset;
7199 if (GENERATE_MIPS16E_SAVE_RESTORE)
7200 /* MIPS16e SAVE and RESTORE instructions require the GP save area
7201 to be aligned at the high end with any padding at the low end.
7202 It is only safe to use this calculation for o32, where we never
7203 have pretend arguments, and where any varargs will be saved in
7204 the caller-allocated area rather than at the top of the frame. */
7205 offset = (total_size - GET_MODE_SIZE (gpr_mode));
7207 offset = (args_size + cprestore_size + var_size
7208 + gp_reg_size - GET_MODE_SIZE (gpr_mode));
7209 cfun->machine->frame.gp_sp_offset = offset;
7210 cfun->machine->frame.gp_save_offset = offset - total_size;
7214 cfun->machine->frame.gp_sp_offset = 0;
7215 cfun->machine->frame.gp_save_offset = 0;
7220 HOST_WIDE_INT offset;
7222 offset = (args_size + cprestore_size + var_size
7223 + gp_reg_rounded + fp_reg_size
7224 - MAX_FPRS_PER_FMT * UNITS_PER_FPREG);
7225 cfun->machine->frame.fp_sp_offset = offset;
7226 cfun->machine->frame.fp_save_offset = offset - total_size;
7230 cfun->machine->frame.fp_sp_offset = 0;
7231 cfun->machine->frame.fp_save_offset = 0;
7234 /* Ok, we're done. */
7238 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame
7239 pointer or argument pointer. TO is either the stack pointer or
7240 hard frame pointer. */
7243 mips_initial_elimination_offset (int from, int to)
7245 HOST_WIDE_INT offset;
7247 compute_frame_size (get_frame_size ());
7249 /* Set OFFSET to the offset from the stack pointer. */
7252 case FRAME_POINTER_REGNUM:
7256 case ARG_POINTER_REGNUM:
7257 offset = (cfun->machine->frame.total_size
7258 - current_function_pretend_args_size);
7265 if (TARGET_MIPS16 && to == HARD_FRAME_POINTER_REGNUM)
7266 offset -= cfun->machine->frame.args_size;
7271 /* Implement RETURN_ADDR_RTX. Note, we do not support moving
7272 back to a previous frame. */
7274 mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
7279 return get_hard_reg_initial_val (Pmode, GP_REG_FIRST + 31);
7282 /* Use FN to save or restore register REGNO. MODE is the register's
7283 mode and OFFSET is the offset of its save slot from the current
7287 mips_save_restore_reg (enum machine_mode mode, int regno,
7288 HOST_WIDE_INT offset, mips_save_restore_fn fn)
7292 mem = gen_frame_mem (mode, plus_constant (stack_pointer_rtx, offset));
7294 fn (gen_rtx_REG (mode, regno), mem);
7298 /* Call FN for each register that is saved by the current function.
7299 SP_OFFSET is the offset of the current stack pointer from the start
7303 mips_for_each_saved_reg (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
7305 enum machine_mode fpr_mode;
7306 HOST_WIDE_INT offset;
7309 /* Save registers starting from high to low. The debuggers prefer at least
7310 the return register be stored at func+4, and also it allows us not to
7311 need a nop in the epilogue if at least one register is reloaded in
7312 addition to return address. */
7313 offset = cfun->machine->frame.gp_sp_offset - sp_offset;
7314 for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
7315 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
7317 mips_save_restore_reg (gpr_mode, regno, offset, fn);
7318 offset -= GET_MODE_SIZE (gpr_mode);
7321 /* This loop must iterate over the same space as its companion in
7322 compute_frame_size. */
7323 offset = cfun->machine->frame.fp_sp_offset - sp_offset;
7324 fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
7325 for (regno = (FP_REG_LAST - MAX_FPRS_PER_FMT + 1);
7326 regno >= FP_REG_FIRST;
7327 regno -= MAX_FPRS_PER_FMT)
7328 if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
7330 mips_save_restore_reg (fpr_mode, regno, offset, fn);
7331 offset -= GET_MODE_SIZE (fpr_mode);
7335 /* If we're generating n32 or n64 abicalls, and the current function
7336 does not use $28 as its global pointer, emit a cplocal directive.
7337 Use pic_offset_table_rtx as the argument to the directive. */
7340 mips_output_cplocal (void)
7342 if (!TARGET_EXPLICIT_RELOCS
7343 && cfun->machine->global_pointer > 0
7344 && cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
7345 output_asm_insn (".cplocal %+", 0);
7348 /* Return the style of GP load sequence that is being used for the
7349 current function. */
7351 enum mips_loadgp_style
7352 mips_current_loadgp_style (void)
7354 if (!TARGET_USE_GOT || cfun->machine->global_pointer == 0)
7360 if (TARGET_ABSOLUTE_ABICALLS)
7361 return LOADGP_ABSOLUTE;
7363 return TARGET_NEWABI ? LOADGP_NEWABI : LOADGP_OLDABI;
7366 /* The __gnu_local_gp symbol. */
7368 static GTY(()) rtx mips_gnu_local_gp;
7370 /* If we're generating n32 or n64 abicalls, emit instructions
7371 to set up the global pointer. */
7374 mips_emit_loadgp (void)
7376 rtx addr, offset, incoming_address, base, index;
7378 switch (mips_current_loadgp_style ())
7380 case LOADGP_ABSOLUTE:
7381 if (mips_gnu_local_gp == NULL)
7383 mips_gnu_local_gp = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp");
7384 SYMBOL_REF_FLAGS (mips_gnu_local_gp) |= SYMBOL_FLAG_LOCAL;
7386 emit_insn (gen_loadgp_absolute (mips_gnu_local_gp));
7390 addr = XEXP (DECL_RTL (current_function_decl), 0);
7391 offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
7392 incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
7393 emit_insn (gen_loadgp_newabi (offset, incoming_address));
7394 if (!TARGET_EXPLICIT_RELOCS)
7395 emit_insn (gen_loadgp_blockage ());
7399 base = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_BASE));
7400 index = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_INDEX));
7401 emit_insn (gen_loadgp_rtp (base, index));
7402 if (!TARGET_EXPLICIT_RELOCS)
7403 emit_insn (gen_loadgp_blockage ());
7411 /* Set up the stack and frame (if desired) for the function. */
7414 mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
7417 HOST_WIDE_INT tsize = cfun->machine->frame.total_size;
7419 #ifdef SDB_DEBUGGING_INFO
7420 if (debug_info_level != DINFO_LEVEL_TERSE && write_symbols == SDB_DEBUG)
7421 SDB_OUTPUT_SOURCE_LINE (file, DECL_SOURCE_LINE (current_function_decl));
7424 /* In mips16 mode, we may need to generate a 32 bit to handle
7425 floating point arguments. The linker will arrange for any 32-bit
7426 functions to call this stub, which will then jump to the 16-bit
7429 && TARGET_HARD_FLOAT_ABI
7430 && current_function_args_info.fp_code != 0)
7431 build_mips16_function_stub (file);
7433 /* Select the mips16 mode for this function. */
7435 fprintf (file, "\t.set\tmips16\n");
7437 fprintf (file, "\t.set\tnomips16\n");
7439 if (!FUNCTION_NAME_ALREADY_DECLARED)
7441 /* Get the function name the same way that toplev.c does before calling
7442 assemble_start_function. This is needed so that the name used here
7443 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
7444 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
7446 if (!flag_inhibit_size_directive)
7448 fputs ("\t.ent\t", file);
7449 assemble_name (file, fnname);
7453 assemble_name (file, fnname);
7454 fputs (":\n", file);
7457 /* Stop mips_file_end from treating this function as external. */
7458 if (TARGET_IRIX && mips_abi == ABI_32)
7459 TREE_ASM_WRITTEN (DECL_NAME (cfun->decl)) = 1;
7461 if (!flag_inhibit_size_directive)
7463 /* .frame FRAMEREG, FRAMESIZE, RETREG */
7465 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
7466 "# vars= " HOST_WIDE_INT_PRINT_DEC ", regs= %d/%d"
7467 ", args= " HOST_WIDE_INT_PRINT_DEC
7468 ", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
7469 (reg_names[(frame_pointer_needed)
7470 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM]),
7471 ((frame_pointer_needed && TARGET_MIPS16)
7472 ? tsize - cfun->machine->frame.args_size
7474 reg_names[GP_REG_FIRST + 31],
7475 cfun->machine->frame.var_size,
7476 cfun->machine->frame.num_gp,
7477 cfun->machine->frame.num_fp,
7478 cfun->machine->frame.args_size,
7479 cfun->machine->frame.cprestore_size);
7481 /* .mask MASK, GPOFFSET; .fmask FPOFFSET */
7482 fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
7483 cfun->machine->frame.mask,
7484 cfun->machine->frame.gp_save_offset);
7485 fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
7486 cfun->machine->frame.fmask,
7487 cfun->machine->frame.fp_save_offset);
7490 OLD_SP == *FRAMEREG + FRAMESIZE => can find old_sp from nominated FP reg.
7491 HIGHEST_GP_SAVED == *FRAMEREG + FRAMESIZE + GPOFFSET => can find saved regs. */
7494 if (mips_current_loadgp_style () == LOADGP_OLDABI)
7496 /* Handle the initialization of $gp for SVR4 PIC. */
7497 if (!cfun->machine->all_noreorder_p)
7498 output_asm_insn ("%(.cpload\t%^%)", 0);
7500 output_asm_insn ("%(.cpload\t%^\n\t%<", 0);
7502 else if (cfun->machine->all_noreorder_p)
7503 output_asm_insn ("%(%<", 0);
7505 /* Tell the assembler which register we're using as the global
7506 pointer. This is needed for thunks, since they can use either
7507 explicit relocs or assembler macros. */
7508 mips_output_cplocal ();
7511 /* Make the last instruction frame related and note that it performs
7512 the operation described by FRAME_PATTERN. */
7515 mips_set_frame_expr (rtx frame_pattern)
7519 insn = get_last_insn ();
7520 RTX_FRAME_RELATED_P (insn) = 1;
7521 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7527 /* Return a frame-related rtx that stores REG at MEM.
7528 REG must be a single register. */
7531 mips_frame_set (rtx mem, rtx reg)
7535 /* If we're saving the return address register and the dwarf return
7536 address column differs from the hard register number, adjust the
7537 note reg to refer to the former. */
7538 if (REGNO (reg) == GP_REG_FIRST + 31
7539 && DWARF_FRAME_RETURN_COLUMN != GP_REG_FIRST + 31)
7540 reg = gen_rtx_REG (GET_MODE (reg), DWARF_FRAME_RETURN_COLUMN);
7542 set = gen_rtx_SET (VOIDmode, mem, reg);
7543 RTX_FRAME_RELATED_P (set) = 1;
7549 /* Save register REG to MEM. Make the instruction frame-related. */
7552 mips_save_reg (rtx reg, rtx mem)
7554 if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
7558 if (mips_split_64bit_move_p (mem, reg))
7559 mips_split_64bit_move (mem, reg);
7561 mips_emit_move (mem, reg);
7563 x1 = mips_frame_set (mips_subword (mem, 0), mips_subword (reg, 0));
7564 x2 = mips_frame_set (mips_subword (mem, 1), mips_subword (reg, 1));
7565 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
7570 && REGNO (reg) != GP_REG_FIRST + 31
7571 && !M16_REG_P (REGNO (reg)))
7573 /* Save a non-mips16 register by moving it through a temporary.
7574 We don't need to do this for $31 since there's a special
7575 instruction for it. */
7576 mips_emit_move (MIPS_PROLOGUE_TEMP (GET_MODE (reg)), reg);
7577 mips_emit_move (mem, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
7580 mips_emit_move (mem, reg);
7582 mips_set_frame_expr (mips_frame_set (mem, reg));
7586 /* Return a move between register REGNO and memory location SP + OFFSET.
7587 Make the move a load if RESTORE_P, otherwise make it a frame-related
7591 mips16e_save_restore_reg (bool restore_p, HOST_WIDE_INT offset,
7596 mem = gen_frame_mem (SImode, plus_constant (stack_pointer_rtx, offset));
7597 reg = gen_rtx_REG (SImode, regno);
7599 ? gen_rtx_SET (VOIDmode, reg, mem)
7600 : mips_frame_set (mem, reg));
7603 /* Return RTL for a MIPS16e SAVE or RESTORE instruction; RESTORE_P says which.
7604 The instruction must:
7606 - Allocate or deallocate SIZE bytes in total; SIZE is known
7609 - Save or restore as many registers in *MASK_PTR as possible.
7610 The instruction saves the first registers at the top of the
7611 allocated area, with the other registers below it.
7613 - Save NARGS argument registers above the allocated area.
7615 (NARGS is always zero if RESTORE_P.)
7617 The SAVE and RESTORE instructions cannot save and restore all general
7618 registers, so there may be some registers left over for the caller to
7619 handle. Destructively modify *MASK_PTR so that it contains the registers
7620 that still need to be saved or restored. The caller can save these
7621 registers in the memory immediately below *OFFSET_PTR, which is a
7622 byte offset from the bottom of the allocated stack area. */
7625 mips16e_build_save_restore (bool restore_p, unsigned int *mask_ptr,
7626 HOST_WIDE_INT *offset_ptr, unsigned int nargs,
7630 HOST_WIDE_INT offset, top_offset;
7631 unsigned int i, regno;
7634 gcc_assert (cfun->machine->frame.fp_reg_size == 0);
7636 /* Calculate the number of elements in the PARALLEL. We need one element
7637 for the stack adjustment, one for each argument register save, and one
7638 for each additional register move. */
7640 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
7641 if (BITSET_P (*mask_ptr, mips16e_save_restore_regs[i]))
7644 /* Create the final PARALLEL. */
7645 pattern = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (n));
7648 /* Add the stack pointer adjustment. */
7649 set = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7650 plus_constant (stack_pointer_rtx,
7651 restore_p ? size : -size));
7652 RTX_FRAME_RELATED_P (set) = 1;
7653 XVECEXP (pattern, 0, n++) = set;
7655 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
7656 top_offset = restore_p ? size : 0;
7658 /* Save the arguments. */
7659 for (i = 0; i < nargs; i++)
7661 offset = top_offset + i * GET_MODE_SIZE (gpr_mode);
7662 set = mips16e_save_restore_reg (restore_p, offset, GP_ARG_FIRST + i);
7663 XVECEXP (pattern, 0, n++) = set;
7666 /* Then fill in the other register moves. */
7667 offset = top_offset;
7668 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
7670 regno = mips16e_save_restore_regs[i];
7671 if (BITSET_P (*mask_ptr, regno))
7673 offset -= UNITS_PER_WORD;
7674 set = mips16e_save_restore_reg (restore_p, offset, regno);
7675 XVECEXP (pattern, 0, n++) = set;
7676 *mask_ptr &= ~(1 << regno);
7680 /* Tell the caller what offset it should use for the remaining registers. */
7681 *offset_ptr = size + (offset - top_offset) + size;
7683 gcc_assert (n == XVECLEN (pattern, 0));
7688 /* PATTERN is a PARALLEL whose first element adds ADJUST to the stack
7689 pointer. Return true if PATTERN matches the kind of instruction
7690 generated by mips16e_build_save_restore. If INFO is nonnull,
7691 initialize it when returning true. */
7694 mips16e_save_restore_pattern_p (rtx pattern, HOST_WIDE_INT adjust,
7695 struct mips16e_save_restore_info *info)
7697 unsigned int i, nargs, mask;
7698 HOST_WIDE_INT top_offset, save_offset, offset, extra;
7699 rtx set, reg, mem, base;
7702 if (!GENERATE_MIPS16E_SAVE_RESTORE)
7705 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
7706 top_offset = adjust > 0 ? adjust : 0;
7708 /* Interpret all other members of the PARALLEL. */
7709 save_offset = top_offset - GET_MODE_SIZE (gpr_mode);
7713 for (n = 1; n < XVECLEN (pattern, 0); n++)
7715 /* Check that we have a SET. */
7716 set = XVECEXP (pattern, 0, n);
7717 if (GET_CODE (set) != SET)
7720 /* Check that the SET is a load (if restoring) or a store
7722 mem = adjust > 0 ? SET_SRC (set) : SET_DEST (set);
7726 /* Check that the address is the sum of the stack pointer and a
7727 possibly-zero constant offset. */
7728 mips_split_plus (XEXP (mem, 0), &base, &offset);
7729 if (base != stack_pointer_rtx)
7732 /* Check that SET's other operand is a register. */
7733 reg = adjust > 0 ? SET_DEST (set) : SET_SRC (set);
7737 /* Check for argument saves. */
7738 if (offset == top_offset + nargs * GET_MODE_SIZE (gpr_mode)
7739 && REGNO (reg) == GP_ARG_FIRST + nargs)
7741 else if (offset == save_offset)
7743 while (mips16e_save_restore_regs[i++] != REGNO (reg))
7744 if (i == ARRAY_SIZE (mips16e_save_restore_regs))
7747 mask |= 1 << REGNO (reg);
7748 save_offset -= GET_MODE_SIZE (gpr_mode);
7754 /* Check that the restrictions on register ranges are met. */
7756 mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
7757 ARRAY_SIZE (mips16e_s2_s8_regs), &extra);
7758 mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
7759 ARRAY_SIZE (mips16e_a0_a3_regs), &extra);
7763 /* Make sure that the topmost argument register is not saved twice.
7764 The checks above ensure that the same is then true for the other
7765 argument registers. */
7766 if (nargs > 0 && BITSET_P (mask, GP_ARG_FIRST + nargs - 1))
7769 /* Pass back information, if requested. */
7772 info->nargs = nargs;
7774 info->size = (adjust > 0 ? adjust : -adjust);
7780 /* Add a MIPS16e SAVE or RESTORE register-range argument to string S
7781 for the register range [MIN_REG, MAX_REG]. Return a pointer to
7782 the null terminator. */
7785 mips16e_add_register_range (char *s, unsigned int min_reg,
7786 unsigned int max_reg)
7788 if (min_reg != max_reg)
7789 s += sprintf (s, ",%s-%s", reg_names[min_reg], reg_names[max_reg]);
7791 s += sprintf (s, ",%s", reg_names[min_reg]);
7795 /* Return the assembly instruction for a MIPS16e SAVE or RESTORE instruction.
7796 PATTERN and ADJUST are as for mips16e_save_restore_pattern_p. */
7799 mips16e_output_save_restore (rtx pattern, HOST_WIDE_INT adjust)
7801 static char buffer[300];
7803 struct mips16e_save_restore_info info;
7804 unsigned int i, end;
7807 /* Parse the pattern. */
7808 if (!mips16e_save_restore_pattern_p (pattern, adjust, &info))
7811 /* Add the mnemonic. */
7812 s = strcpy (buffer, adjust > 0 ? "restore\t" : "save\t");
7815 /* Save the arguments. */
7817 s += sprintf (s, "%s-%s,", reg_names[GP_ARG_FIRST],
7818 reg_names[GP_ARG_FIRST + info.nargs - 1]);
7819 else if (info.nargs == 1)
7820 s += sprintf (s, "%s,", reg_names[GP_ARG_FIRST]);
7822 /* Emit the amount of stack space to allocate or deallocate. */
7823 s += sprintf (s, "%d", (int) info.size);
7825 /* Save or restore $16. */
7826 if (BITSET_P (info.mask, 16))
7827 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 16]);
7829 /* Save or restore $17. */
7830 if (BITSET_P (info.mask, 17))
7831 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 17]);
7833 /* Save or restore registers in the range $s2...$s8, which
7834 mips16e_s2_s8_regs lists in decreasing order. Note that this
7835 is a software register range; the hardware registers are not
7836 numbered consecutively. */
7837 end = ARRAY_SIZE (mips16e_s2_s8_regs);
7838 i = mips16e_find_first_register (info.mask, mips16e_s2_s8_regs, end);
7840 s = mips16e_add_register_range (s, mips16e_s2_s8_regs[end - 1],
7841 mips16e_s2_s8_regs[i]);
7843 /* Save or restore registers in the range $a0...$a3. */
7844 end = ARRAY_SIZE (mips16e_a0_a3_regs);
7845 i = mips16e_find_first_register (info.mask, mips16e_a0_a3_regs, end);
7847 s = mips16e_add_register_range (s, mips16e_a0_a3_regs[i],
7848 mips16e_a0_a3_regs[end - 1]);
7850 /* Save or restore $31. */
7851 if (BITSET_P (info.mask, 31))
7852 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 31]);
7857 /* Return a simplified form of X using the register values in REG_VALUES.
7858 REG_VALUES[R] is the last value assigned to hard register R, or null
7859 if R has not been modified.
7861 This function is rather limited, but is good enough for our purposes. */
7864 mips16e_collect_propagate_value (rtx x, rtx *reg_values)
7868 x = avoid_constant_pool_reference (x);
7872 x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
7873 return simplify_gen_unary (GET_CODE (x), GET_MODE (x),
7874 x0, GET_MODE (XEXP (x, 0)));
7877 if (ARITHMETIC_P (x))
7879 x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
7880 x1 = mips16e_collect_propagate_value (XEXP (x, 1), reg_values);
7881 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), x0, x1);
7885 && reg_values[REGNO (x)]
7886 && !rtx_unstable_p (reg_values[REGNO (x)]))
7887 return reg_values[REGNO (x)];
7892 /* Return true if (set DEST SRC) stores an argument register into its
7893 caller-allocated save slot, storing the number of that argument
7894 register in *REGNO_PTR if so. REG_VALUES is as for
7895 mips16e_collect_propagate_value. */
7898 mips16e_collect_argument_save_p (rtx dest, rtx src, rtx *reg_values,
7899 unsigned int *regno_ptr)
7901 unsigned int argno, regno;
7902 HOST_WIDE_INT offset, required_offset;
7905 /* Check that this is a word-mode store. */
7906 if (!MEM_P (dest) || !REG_P (src) || GET_MODE (dest) != word_mode)
7909 /* Check that the register being saved is an unmodified argument
7911 regno = REGNO (src);
7912 if (regno < GP_ARG_FIRST || regno > GP_ARG_LAST || reg_values[regno])
7914 argno = regno - GP_ARG_FIRST;
7916 /* Check whether the address is an appropriate stack pointer or
7917 frame pointer access. The frame pointer is offset from the
7918 stack pointer by the size of the outgoing arguments. */
7919 addr = mips16e_collect_propagate_value (XEXP (dest, 0), reg_values);
7920 mips_split_plus (addr, &base, &offset);
7921 required_offset = cfun->machine->frame.total_size + argno * UNITS_PER_WORD;
7922 if (base == hard_frame_pointer_rtx)
7923 required_offset -= cfun->machine->frame.args_size;
7924 else if (base != stack_pointer_rtx)
7926 if (offset != required_offset)
7933 /* A subroutine of mips_expand_prologue, called only when generating
7934 MIPS16e SAVE instructions. Search the start of the function for any
7935 instructions that save argument registers into their caller-allocated
7936 save slots. Delete such instructions and return a value N such that
7937 saving [GP_ARG_FIRST, GP_ARG_FIRST + N) would make all the deleted
7938 instructions redundant. */
7941 mips16e_collect_argument_saves (void)
7943 rtx reg_values[FIRST_PSEUDO_REGISTER];
7944 rtx insn, next, set, dest, src;
7945 unsigned int nargs, regno;
7947 push_topmost_sequence ();
7949 memset (reg_values, 0, sizeof (reg_values));
7950 for (insn = get_insns (); insn; insn = next)
7952 next = NEXT_INSN (insn);
7959 set = PATTERN (insn);
7960 if (GET_CODE (set) != SET)
7963 dest = SET_DEST (set);
7964 src = SET_SRC (set);
7965 if (mips16e_collect_argument_save_p (dest, src, reg_values, ®no))
7967 if (!BITSET_P (cfun->machine->frame.mask, regno))
7970 nargs = MAX (nargs, (regno - GP_ARG_FIRST) + 1);
7973 else if (REG_P (dest) && GET_MODE (dest) == word_mode)
7974 reg_values[REGNO (dest)]
7975 = mips16e_collect_propagate_value (src, reg_values);
7979 pop_topmost_sequence ();
7984 /* Expand the prologue into a bunch of separate insns. */
7987 mips_expand_prologue (void)
7993 if (cfun->machine->global_pointer > 0)
7994 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
7996 size = compute_frame_size (get_frame_size ());
7998 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
7999 bytes beforehand; this is enough to cover the register save area
8000 without going out of range. */
8001 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
8003 HOST_WIDE_INT step1;
8005 step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
8007 if (GENERATE_MIPS16E_SAVE_RESTORE)
8009 HOST_WIDE_INT offset;
8010 unsigned int mask, regno;
8012 /* Try to merge argument stores into the save instruction. */
8013 nargs = mips16e_collect_argument_saves ();
8015 /* Build the save instruction. */
8016 mask = cfun->machine->frame.mask;
8017 insn = mips16e_build_save_restore (false, &mask, &offset,
8019 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
8022 /* Check if we need to save other registers. */
8023 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
8024 if (BITSET_P (mask, regno - GP_REG_FIRST))
8026 offset -= GET_MODE_SIZE (gpr_mode);
8027 mips_save_restore_reg (gpr_mode, regno, offset, mips_save_reg);
8032 insn = gen_add3_insn (stack_pointer_rtx,
8035 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
8037 mips_for_each_saved_reg (size, mips_save_reg);
8041 /* Allocate the rest of the frame. */
8044 if (SMALL_OPERAND (-size))
8045 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
8047 GEN_INT (-size)))) = 1;
8050 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
8053 /* There are no instructions to add or subtract registers
8054 from the stack pointer, so use the frame pointer as a
8055 temporary. We should always be using a frame pointer
8056 in this case anyway. */
8057 gcc_assert (frame_pointer_needed);
8058 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
8059 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
8060 hard_frame_pointer_rtx,
8061 MIPS_PROLOGUE_TEMP (Pmode)));
8062 mips_emit_move (stack_pointer_rtx, hard_frame_pointer_rtx);
8065 emit_insn (gen_sub3_insn (stack_pointer_rtx,
8067 MIPS_PROLOGUE_TEMP (Pmode)));
8069 /* Describe the combined effect of the previous instructions. */
8071 (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8072 plus_constant (stack_pointer_rtx, -size)));
8076 /* Set up the frame pointer, if we're using one. In mips16 code,
8077 we point the frame pointer ahead of the outgoing argument area.
8078 This should allow more variables & incoming arguments to be
8079 accessed with unextended instructions. */
8080 if (frame_pointer_needed)
8082 if (TARGET_MIPS16 && cfun->machine->frame.args_size != 0)
8084 rtx offset = GEN_INT (cfun->machine->frame.args_size);
8085 if (SMALL_OPERAND (cfun->machine->frame.args_size))
8087 (emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
8092 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), offset);
8093 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
8094 emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
8095 hard_frame_pointer_rtx,
8096 MIPS_PROLOGUE_TEMP (Pmode)));
8098 (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
8099 plus_constant (stack_pointer_rtx,
8100 cfun->machine->frame.args_size)));
8104 RTX_FRAME_RELATED_P (mips_emit_move (hard_frame_pointer_rtx,
8105 stack_pointer_rtx)) = 1;
8108 mips_emit_loadgp ();
8110 /* If generating o32/o64 abicalls, save $gp on the stack. */
8111 if (TARGET_ABICALLS && TARGET_OLDABI && !current_function_is_leaf)
8112 emit_insn (gen_cprestore (GEN_INT (current_function_outgoing_args_size)));
8114 /* If we are profiling, make sure no instructions are scheduled before
8115 the call to mcount. */
8117 if (current_function_profile)
8118 emit_insn (gen_blockage ());
8121 /* Do any necessary cleanup after a function to restore stack, frame,
8124 #define RA_MASK BITMASK_HIGH /* 1 << 31 */
8127 mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
8128 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
8130 /* Reinstate the normal $gp. */
8131 SET_REGNO (pic_offset_table_rtx, GLOBAL_POINTER_REGNUM);
8132 mips_output_cplocal ();
8134 if (cfun->machine->all_noreorder_p)
8136 /* Avoid using %>%) since it adds excess whitespace. */
8137 output_asm_insn (".set\tmacro", 0);
8138 output_asm_insn (".set\treorder", 0);
8139 set_noreorder = set_nomacro = 0;
8142 if (!FUNCTION_NAME_ALREADY_DECLARED && !flag_inhibit_size_directive)
8146 /* Get the function name the same way that toplev.c does before calling
8147 assemble_start_function. This is needed so that the name used here
8148 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
8149 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
8150 fputs ("\t.end\t", file);
8151 assemble_name (file, fnname);
8156 /* Emit instructions to restore register REG from slot MEM. */
8159 mips_restore_reg (rtx reg, rtx mem)
8161 /* There's no mips16 instruction to load $31 directly. Load into
8162 $7 instead and adjust the return insn appropriately. */
8163 if (TARGET_MIPS16 && REGNO (reg) == GP_REG_FIRST + 31)
8164 reg = gen_rtx_REG (GET_MODE (reg), 7);
8166 if (TARGET_MIPS16 && !M16_REG_P (REGNO (reg)))
8168 /* Can't restore directly; move through a temporary. */
8169 mips_emit_move (MIPS_EPILOGUE_TEMP (GET_MODE (reg)), mem);
8170 mips_emit_move (reg, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
8173 mips_emit_move (reg, mem);
8177 /* Expand the epilogue into a bunch of separate insns. SIBCALL_P is true
8178 if this epilogue precedes a sibling call, false if it is for a normal
8179 "epilogue" pattern. */
8182 mips_expand_epilogue (int sibcall_p)
8184 HOST_WIDE_INT step1, step2;
8187 if (!sibcall_p && mips_can_use_return_insn ())
8189 emit_jump_insn (gen_return ());
8193 /* In mips16 mode, if the return value should go into a floating-point
8194 register, we need to call a helper routine to copy it over. */
8195 if (mips16_cfun_returns_in_fpr_p ())
8204 enum machine_mode return_mode;
8206 return_type = DECL_RESULT (current_function_decl);
8207 return_mode = DECL_MODE (return_type);
8209 name = ACONCAT (("__mips16_ret_",
8210 mips16_call_stub_mode_suffix (return_mode),
8212 id = get_identifier (name);
8213 func = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
8214 retval = gen_rtx_REG (return_mode, GP_RETURN);
8215 call = gen_call_value_internal (retval, func, const0_rtx);
8216 insn = emit_call_insn (call);
8217 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), retval);
8220 /* Split the frame into two. STEP1 is the amount of stack we should
8221 deallocate before restoring the registers. STEP2 is the amount we
8222 should deallocate afterwards.
8224 Start off by assuming that no registers need to be restored. */
8225 step1 = cfun->machine->frame.total_size;
8228 /* Work out which register holds the frame address. Account for the
8229 frame pointer offset used by mips16 code. */
8230 if (!frame_pointer_needed)
8231 base = stack_pointer_rtx;
8234 base = hard_frame_pointer_rtx;
8236 step1 -= cfun->machine->frame.args_size;
8239 /* If we need to restore registers, deallocate as much stack as
8240 possible in the second step without going out of range. */
8241 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
8243 step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
8247 /* Set TARGET to BASE + STEP1. */
8253 /* Get an rtx for STEP1 that we can add to BASE. */
8254 adjust = GEN_INT (step1);
8255 if (!SMALL_OPERAND (step1))
8257 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), adjust);
8258 adjust = MIPS_EPILOGUE_TEMP (Pmode);
8261 /* Normal mode code can copy the result straight into $sp. */
8263 target = stack_pointer_rtx;
8265 emit_insn (gen_add3_insn (target, base, adjust));
8268 /* Copy TARGET into the stack pointer. */
8269 if (target != stack_pointer_rtx)
8270 mips_emit_move (stack_pointer_rtx, target);
8272 /* If we're using addressing macros, $gp is implicitly used by all
8273 SYMBOL_REFs. We must emit a blockage insn before restoring $gp
8275 if (TARGET_CALL_SAVED_GP && !TARGET_EXPLICIT_RELOCS)
8276 emit_insn (gen_blockage ());
8278 if (GENERATE_MIPS16E_SAVE_RESTORE && cfun->machine->frame.mask != 0)
8280 unsigned int regno, mask;
8281 HOST_WIDE_INT offset;
8284 /* Generate the restore instruction. */
8285 mask = cfun->machine->frame.mask;
8286 restore = mips16e_build_save_restore (true, &mask, &offset, 0, step2);
8288 /* Restore any other registers manually. */
8289 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
8290 if (BITSET_P (mask, regno - GP_REG_FIRST))
8292 offset -= GET_MODE_SIZE (gpr_mode);
8293 mips_save_restore_reg (gpr_mode, regno, offset, mips_restore_reg);
8296 /* Restore the remaining registers and deallocate the final bit
8298 emit_insn (restore);
8302 /* Restore the registers. */
8303 mips_for_each_saved_reg (cfun->machine->frame.total_size - step2,
8306 /* Deallocate the final bit of the frame. */
8308 emit_insn (gen_add3_insn (stack_pointer_rtx,
8313 /* Add in the __builtin_eh_return stack adjustment. We need to
8314 use a temporary in mips16 code. */
8315 if (current_function_calls_eh_return)
8319 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
8320 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
8321 MIPS_EPILOGUE_TEMP (Pmode),
8322 EH_RETURN_STACKADJ_RTX));
8323 mips_emit_move (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
8326 emit_insn (gen_add3_insn (stack_pointer_rtx,
8328 EH_RETURN_STACKADJ_RTX));
8333 /* When generating MIPS16 code, the normal mips_for_each_saved_reg
8334 path will restore the return address into $7 rather than $31. */
8336 && !GENERATE_MIPS16E_SAVE_RESTORE
8337 && (cfun->machine->frame.mask & RA_MASK) != 0)
8338 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
8339 GP_REG_FIRST + 7)));
8341 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
8342 GP_REG_FIRST + 31)));
8346 /* Return nonzero if this function is known to have a null epilogue.
8347 This allows the optimizer to omit jumps to jumps if no stack
8351 mips_can_use_return_insn (void)
8353 if (! reload_completed)
8356 if (df_regs_ever_live_p (31) || current_function_profile)
8359 /* In mips16 mode, a function that returns a floating point value
8360 needs to arrange to copy the return value into the floating point
8362 if (mips16_cfun_returns_in_fpr_p ())
8365 if (cfun->machine->frame.initialized)
8366 return cfun->machine->frame.total_size == 0;
8368 return compute_frame_size (get_frame_size ()) == 0;
8371 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
8372 in order to avoid duplicating too much logic from elsewhere. */
8375 mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8376 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8379 rtx this, temp1, temp2, insn, fnaddr;
8381 /* Pretend to be a post-reload pass while generating rtl. */
8382 reload_completed = 1;
8384 /* Mark the end of the (empty) prologue. */
8385 emit_note (NOTE_INSN_PROLOGUE_END);
8387 /* Pick a global pointer. Use a call-clobbered register if
8388 TARGET_CALL_SAVED_GP, so that we can use a sibcall. */
8391 cfun->machine->global_pointer =
8392 TARGET_CALL_SAVED_GP ? 15 : GLOBAL_POINTER_REGNUM;
8394 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
8397 /* Set up the global pointer for n32 or n64 abicalls. If
8398 LOADGP_ABSOLUTE then the thunk does not use the gp and there is
8399 no need to load it.*/
8400 if (mips_current_loadgp_style () != LOADGP_ABSOLUTE
8401 || !targetm.binds_local_p (function))
8402 mips_emit_loadgp ();
8404 /* We need two temporary registers in some cases. */
8405 temp1 = gen_rtx_REG (Pmode, 2);
8406 temp2 = gen_rtx_REG (Pmode, 3);
8408 /* Find out which register contains the "this" pointer. */
8409 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8410 this = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
8412 this = gen_rtx_REG (Pmode, GP_ARG_FIRST);
8414 /* Add DELTA to THIS. */
8417 rtx offset = GEN_INT (delta);
8418 if (!SMALL_OPERAND (delta))
8420 mips_emit_move (temp1, offset);
8423 emit_insn (gen_add3_insn (this, this, offset));
8426 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
8427 if (vcall_offset != 0)
8431 /* Set TEMP1 to *THIS. */
8432 mips_emit_move (temp1, gen_rtx_MEM (Pmode, this));
8434 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
8435 addr = mips_add_offset (temp2, temp1, vcall_offset);
8437 /* Load the offset and add it to THIS. */
8438 mips_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
8439 emit_insn (gen_add3_insn (this, this, temp1));
8442 /* Jump to the target function. Use a sibcall if direct jumps are
8443 allowed, otherwise load the address into a register first. */
8444 fnaddr = XEXP (DECL_RTL (function), 0);
8445 if (TARGET_MIPS16 || TARGET_USE_GOT || SYMBOL_REF_LONG_CALL_P (fnaddr)
8446 || SYMBOL_REF_MIPS16_FUNC_P (fnaddr))
8448 /* This is messy. gas treats "la $25,foo" as part of a call
8449 sequence and may allow a global "foo" to be lazily bound.
8450 The general move patterns therefore reject this combination.
8452 In this context, lazy binding would actually be OK
8453 for TARGET_CALL_CLOBBERED_GP, but it's still wrong for
8454 TARGET_CALL_SAVED_GP; see mips_load_call_address.
8455 We must therefore load the address via a temporary
8456 register if mips_dangerous_for_la25_p.
8458 If we jump to the temporary register rather than $25, the assembler
8459 can use the move insn to fill the jump's delay slot. */
8460 if (TARGET_USE_PIC_FN_ADDR_REG
8461 && !mips_dangerous_for_la25_p (fnaddr))
8462 temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
8463 mips_load_call_address (temp1, fnaddr, true);
8465 if (TARGET_USE_PIC_FN_ADDR_REG
8466 && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
8467 mips_emit_move (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
8468 emit_jump_insn (gen_indirect_jump (temp1));
8472 insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
8473 SIBLING_CALL_P (insn) = 1;
8476 /* Run just enough of rest_of_compilation. This sequence was
8477 "borrowed" from alpha.c. */
8478 insn = get_insns ();
8479 insn_locators_alloc ();
8480 split_all_insns_noflow ();
8481 mips16_lay_out_constants ();
8482 shorten_branches (insn);
8483 final_start_function (insn, file, 1);
8484 final (insn, file, 1);
8485 final_end_function ();
8487 /* Clean up the vars set above. Note that final_end_function resets
8488 the global pointer for us. */
8489 reload_completed = 0;
8492 /* Implement TARGET_SELECT_RTX_SECTION. */
8495 mips_select_rtx_section (enum machine_mode mode, rtx x,
8496 unsigned HOST_WIDE_INT align)
8498 /* ??? Consider using mergeable small data sections. */
8499 if (mips_rtx_constant_in_small_data_p (mode))
8500 return get_named_section (NULL, ".sdata", 0);
8502 return default_elf_select_rtx_section (mode, x, align);
8505 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
8507 The complication here is that, with the combination TARGET_ABICALLS
8508 && !TARGET_GPWORD, jump tables will use absolute addresses, and should
8509 therefore not be included in the read-only part of a DSO. Handle such
8510 cases by selecting a normal data section instead of a read-only one.
8511 The logic apes that in default_function_rodata_section. */
8514 mips_function_rodata_section (tree decl)
8516 if (!TARGET_ABICALLS || TARGET_GPWORD)
8517 return default_function_rodata_section (decl);
8519 if (decl && DECL_SECTION_NAME (decl))
8521 const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
8522 if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
8524 char *rname = ASTRDUP (name);
8526 return get_section (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
8528 else if (flag_function_sections && flag_data_sections
8529 && strncmp (name, ".text.", 6) == 0)
8531 char *rname = ASTRDUP (name);
8532 memcpy (rname + 1, "data", 4);
8533 return get_section (rname, SECTION_WRITE, decl);
8536 return data_section;
8539 /* Implement TARGET_IN_SMALL_DATA_P. This function controls whether
8540 locally-defined objects go in a small data section. It also controls
8541 the setting of the SYMBOL_REF_SMALL_P flag, which in turn helps
8542 mips_classify_symbol decide when to use %gp_rel(...)($gp) accesses. */
8545 mips_in_small_data_p (const_tree decl)
8549 if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
8552 /* We don't yet generate small-data references for -mabicalls or
8553 VxWorks RTP code. See the related -G handling in override_options. */
8554 if (TARGET_ABICALLS || TARGET_VXWORKS_RTP)
8557 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
8561 /* Reject anything that isn't in a known small-data section. */
8562 name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
8563 if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
8566 /* If a symbol is defined externally, the assembler will use the
8567 usual -G rules when deciding how to implement macros. */
8568 if (mips_lo_relocs[SYMBOL_GP_RELATIVE] || !DECL_EXTERNAL (decl))
8571 else if (TARGET_EMBEDDED_DATA)
8573 /* Don't put constants into the small data section: we want them
8574 to be in ROM rather than RAM. */
8575 if (TREE_CODE (decl) != VAR_DECL)
8578 if (TREE_READONLY (decl)
8579 && !TREE_SIDE_EFFECTS (decl)
8580 && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
8584 /* Enforce -mlocal-sdata. */
8585 if (!TARGET_LOCAL_SDATA && !TREE_PUBLIC (decl))
8588 /* Enforce -mextern-sdata. */
8589 if (!TARGET_EXTERN_SDATA && DECL_P (decl))
8591 if (DECL_EXTERNAL (decl))
8593 if (DECL_COMMON (decl) && DECL_INITIAL (decl) == NULL)
8597 size = int_size_in_bytes (TREE_TYPE (decl));
8598 return (size > 0 && size <= mips_section_threshold);
8601 /* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
8602 anchors for small data: the GP register acts as an anchor in that
8603 case. We also don't want to use them for PC-relative accesses,
8604 where the PC acts as an anchor. */
8607 mips_use_anchors_for_symbol_p (const_rtx symbol)
8609 switch (mips_classify_symbol (symbol, SYMBOL_CONTEXT_MEM))
8611 case SYMBOL_PC_RELATIVE:
8612 case SYMBOL_GP_RELATIVE:
8620 /* See whether VALTYPE is a record whose fields should be returned in
8621 floating-point registers. If so, return the number of fields and
8622 list them in FIELDS (which should have two elements). Return 0
8625 For n32 & n64, a structure with one or two fields is returned in
8626 floating-point registers as long as every field has a floating-point
8630 mips_fpr_return_fields (const_tree valtype, tree *fields)
8638 if (TREE_CODE (valtype) != RECORD_TYPE)
8642 for (field = TYPE_FIELDS (valtype); field != 0; field = TREE_CHAIN (field))
8644 if (TREE_CODE (field) != FIELD_DECL)
8647 if (TREE_CODE (TREE_TYPE (field)) != REAL_TYPE)
8653 fields[i++] = field;
8659 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
8660 a value in the most significant part of $2/$3 if:
8662 - the target is big-endian;
8664 - the value has a structure or union type (we generalize this to
8665 cover aggregates from other languages too); and
8667 - the structure is not returned in floating-point registers. */
8670 mips_return_in_msb (const_tree valtype)
8674 return (TARGET_NEWABI
8675 && TARGET_BIG_ENDIAN
8676 && AGGREGATE_TYPE_P (valtype)
8677 && mips_fpr_return_fields (valtype, fields) == 0);
8681 /* Return a composite value in a pair of floating-point registers.
8682 MODE1 and OFFSET1 are the mode and byte offset for the first value,
8683 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
8686 For n32 & n64, $f0 always holds the first value and $f2 the second.
8687 Otherwise the values are packed together as closely as possible. */
8690 mips_return_fpr_pair (enum machine_mode mode,
8691 enum machine_mode mode1, HOST_WIDE_INT offset1,
8692 enum machine_mode mode2, HOST_WIDE_INT offset2)
8696 inc = (TARGET_NEWABI ? 2 : MAX_FPRS_PER_FMT);
8697 return gen_rtx_PARALLEL
8700 gen_rtx_EXPR_LIST (VOIDmode,
8701 gen_rtx_REG (mode1, FP_RETURN),
8703 gen_rtx_EXPR_LIST (VOIDmode,
8704 gen_rtx_REG (mode2, FP_RETURN + inc),
8705 GEN_INT (offset2))));
8710 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
8711 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
8712 VALTYPE is null and MODE is the mode of the return value. */
8715 mips_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
8716 enum machine_mode mode)
8723 mode = TYPE_MODE (valtype);
8724 unsignedp = TYPE_UNSIGNED (valtype);
8726 /* Since we define TARGET_PROMOTE_FUNCTION_RETURN that returns
8727 true, we must promote the mode just as PROMOTE_MODE does. */
8728 mode = promote_mode (valtype, mode, &unsignedp, 1);
8730 /* Handle structures whose fields are returned in $f0/$f2. */
8731 switch (mips_fpr_return_fields (valtype, fields))
8734 return gen_rtx_REG (mode, FP_RETURN);
8737 return mips_return_fpr_pair (mode,
8738 TYPE_MODE (TREE_TYPE (fields[0])),
8739 int_byte_position (fields[0]),
8740 TYPE_MODE (TREE_TYPE (fields[1])),
8741 int_byte_position (fields[1]));
8744 /* If a value is passed in the most significant part of a register, see
8745 whether we have to round the mode up to a whole number of words. */
8746 if (mips_return_in_msb (valtype))
8748 HOST_WIDE_INT size = int_size_in_bytes (valtype);
8749 if (size % UNITS_PER_WORD != 0)
8751 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
8752 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
8756 /* For EABI, the class of return register depends entirely on MODE.
8757 For example, "struct { some_type x; }" and "union { some_type x; }"
8758 are returned in the same way as a bare "some_type" would be.
8759 Other ABIs only use FPRs for scalar, complex or vector types. */
8760 if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
8761 return gen_rtx_REG (mode, GP_RETURN);
8766 /* Handle long doubles for n32 & n64. */
8768 return mips_return_fpr_pair (mode,
8770 DImode, GET_MODE_SIZE (mode) / 2);
8772 if (mips_return_mode_in_fpr_p (mode))
8774 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
8775 return mips_return_fpr_pair (mode,
8776 GET_MODE_INNER (mode), 0,
8777 GET_MODE_INNER (mode),
8778 GET_MODE_SIZE (mode) / 2);
8780 return gen_rtx_REG (mode, FP_RETURN);
8784 return gen_rtx_REG (mode, GP_RETURN);
8787 /* Return nonzero when an argument must be passed by reference. */
8790 mips_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
8791 enum machine_mode mode, const_tree type,
8792 bool named ATTRIBUTE_UNUSED)
8794 if (mips_abi == ABI_EABI)
8798 /* ??? How should SCmode be handled? */
8799 if (mode == DImode || mode == DFmode)
8802 size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
8803 return size == -1 || size > UNITS_PER_WORD;
8807 /* If we have a variable-sized parameter, we have no choice. */
8808 return targetm.calls.must_pass_in_stack (mode, type);
8813 mips_callee_copies (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
8814 enum machine_mode mode ATTRIBUTE_UNUSED,
8815 const_tree type ATTRIBUTE_UNUSED, bool named)
8817 return mips_abi == ABI_EABI && named;
8820 /* Return true if registers of class CLASS cannot change from mode FROM
8824 mips_cannot_change_mode_class (enum machine_mode from,
8825 enum machine_mode to, enum reg_class class)
8827 if (MIN (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) <= UNITS_PER_WORD
8828 && MAX (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) > UNITS_PER_WORD)
8830 if (TARGET_BIG_ENDIAN)
8832 /* When a multi-word value is stored in paired floating-point
8833 registers, the first register always holds the low word.
8834 We therefore can't allow FPRs to change between single-word
8835 and multi-word modes. */
8836 if (MAX_FPRS_PER_FMT > 1 && reg_classes_intersect_p (FP_REGS, class))
8841 /* gcc assumes that each word of a multiword register can be accessed
8842 individually using SUBREGs. This is not true for floating-point
8843 registers if they are bigger than a word. */
8844 if (UNITS_PER_FPREG > UNITS_PER_WORD
8845 && GET_MODE_SIZE (from) > UNITS_PER_WORD
8846 && GET_MODE_SIZE (to) < UNITS_PER_FPREG
8847 && reg_classes_intersect_p (FP_REGS, class))
8850 /* Loading a 32-bit value into a 64-bit floating-point register
8851 will not sign-extend the value, despite what LOAD_EXTEND_OP says.
8852 We can't allow 64-bit float registers to change from SImode to
8857 && GET_MODE_SIZE (to) >= UNITS_PER_WORD
8858 && reg_classes_intersect_p (FP_REGS, class))
8864 /* Return true if X should not be moved directly into register $25.
8865 We need this because many versions of GAS will treat "la $25,foo" as
8866 part of a call sequence and so allow a global "foo" to be lazily bound. */
8869 mips_dangerous_for_la25_p (rtx x)
8871 return (!TARGET_EXPLICIT_RELOCS
8873 && GET_CODE (x) == SYMBOL_REF
8874 && mips_global_symbol_p (x));
8877 /* Implement PREFERRED_RELOAD_CLASS. */
8880 mips_preferred_reload_class (rtx x, enum reg_class class)
8882 if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, class))
8885 if (TARGET_HARD_FLOAT
8886 && FLOAT_MODE_P (GET_MODE (x))
8887 && reg_class_subset_p (FP_REGS, class))
8890 if (reg_class_subset_p (GR_REGS, class))
8893 if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, class))
8899 /* This function returns the register class required for a secondary
8900 register when copying between one of the registers in CLASS, and X,
8901 using MODE. If IN_P is nonzero, the copy is going from X to the
8902 register, otherwise the register is the source. A return value of
8903 NO_REGS means that no secondary register is required. */
8906 mips_secondary_reload_class (enum reg_class class,
8907 enum machine_mode mode, rtx x, int in_p)
8909 enum reg_class gr_regs = TARGET_MIPS16 ? M16_REGS : GR_REGS;
8913 if (REG_P (x)|| GET_CODE (x) == SUBREG)
8914 regno = true_regnum (x);
8916 gp_reg_p = TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
8918 if (mips_dangerous_for_la25_p (x))
8921 if (TEST_HARD_REG_BIT (reg_class_contents[(int) class], 25))
8925 /* Copying from HI or LO to anywhere other than a general register
8926 requires a general register.
8927 This rule applies to both the original HI/LO pair and the new
8928 DSP accumulators. */
8929 if (reg_class_subset_p (class, ACC_REGS))
8931 if (TARGET_MIPS16 && in_p)
8933 /* We can't really copy to HI or LO at all in mips16 mode. */
8936 return gp_reg_p ? NO_REGS : gr_regs;
8938 if (ACC_REG_P (regno))
8940 if (TARGET_MIPS16 && ! in_p)
8942 /* We can't really copy to HI or LO at all in mips16 mode. */
8945 return class == gr_regs ? NO_REGS : gr_regs;
8948 /* We can only copy a value to a condition code register from a
8949 floating point register, and even then we require a scratch
8950 floating point register. We can only copy a value out of a
8951 condition code register into a general register. */
8952 if (class == ST_REGS)
8956 return gp_reg_p ? NO_REGS : gr_regs;
8958 if (ST_REG_P (regno))
8962 return class == gr_regs ? NO_REGS : gr_regs;
8965 if (class == FP_REGS)
8969 /* In this case we can use lwc1, swc1, ldc1 or sdc1. */
8972 else if (CONSTANT_P (x) && GET_MODE_CLASS (mode) == MODE_FLOAT)
8974 /* We can use the l.s and l.d macros to load floating-point
8975 constants. ??? For l.s, we could probably get better
8976 code by returning GR_REGS here. */
8979 else if (gp_reg_p || x == CONST0_RTX (mode))
8981 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
8984 else if (FP_REG_P (regno))
8986 /* In this case we can use mov.s or mov.d. */
8991 /* Otherwise, we need to reload through an integer register. */
8996 /* In mips16 mode, going between memory and anything but M16_REGS
8997 requires an M16_REG. */
9000 if (class != M16_REGS && class != M16_NA_REGS)
9008 if (class == M16_REGS || class == M16_NA_REGS)
9017 /* Implement CLASS_MAX_NREGS.
9019 - UNITS_PER_FPREG controls the number of registers needed by FP_REGS.
9021 - ST_REGS are always hold CCmode values, and CCmode values are
9022 considered to be 4 bytes wide.
9024 All other register classes are covered by UNITS_PER_WORD. Note that
9025 this is true even for unions of integer and float registers when the
9026 latter are smaller than the former. The only supported combination
9027 in which case this occurs is -mgp64 -msingle-float, which has 64-bit
9028 words but 32-bit float registers. A word-based calculation is correct
9029 in that case since -msingle-float disallows multi-FPR values. */
9032 mips_class_max_nregs (enum reg_class class ATTRIBUTE_UNUSED,
9033 enum machine_mode mode)
9035 if (class == ST_REGS)
9036 return (GET_MODE_SIZE (mode) + 3) / 4;
9037 else if (class == FP_REGS)
9038 return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
9040 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
9044 mips_valid_pointer_mode (enum machine_mode mode)
9046 return (mode == SImode || (TARGET_64BIT && mode == DImode));
9049 /* Target hook for vector_mode_supported_p. */
9052 mips_vector_mode_supported_p (enum machine_mode mode)
9057 return TARGET_PAIRED_SINGLE_FLOAT;
9068 /* If we can access small data directly (using gp-relative relocation
9069 operators) return the small data pointer, otherwise return null.
9071 For each mips16 function which refers to GP relative symbols, we
9072 use a pseudo register, initialized at the start of the function, to
9073 hold the $gp value. */
9076 mips16_gp_pseudo_reg (void)
9078 if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
9079 cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
9081 /* Don't initialize the pseudo register if we are being called from
9082 the tree optimizers' cost-calculation routines. */
9083 if (!cfun->machine->initialized_mips16_gp_pseudo_p
9084 && (current_ir_type () != IR_GIMPLE || currently_expanding_to_rtl))
9088 /* We want to initialize this to a value which gcc will believe
9090 insn = gen_load_const_gp (cfun->machine->mips16_gp_pseudo_rtx);
9092 push_topmost_sequence ();
9093 /* We need to emit the initialization after the FUNCTION_BEG
9094 note, so that it will be integrated. */
9095 for (scan = get_insns (); scan != NULL_RTX; scan = NEXT_INSN (scan))
9097 && NOTE_KIND (scan) == NOTE_INSN_FUNCTION_BEG)
9099 if (scan == NULL_RTX)
9100 scan = get_insns ();
9101 insn = emit_insn_after (insn, scan);
9102 pop_topmost_sequence ();
9104 cfun->machine->initialized_mips16_gp_pseudo_p = true;
9107 return cfun->machine->mips16_gp_pseudo_rtx;
9110 /* Write out code to move floating point arguments in or out of
9111 general registers. Output the instructions to FILE. FP_CODE is
9112 the code describing which arguments are present (see the comment at
9113 the definition of CUMULATIVE_ARGS in mips.h). FROM_FP_P is nonzero if
9114 we are copying from the floating point registers. */
9117 mips16_fp_args (FILE *file, int fp_code, int from_fp_p)
9122 CUMULATIVE_ARGS cum;
9124 /* This code only works for the original 32-bit ABI and the O64 ABI. */
9125 gcc_assert (TARGET_OLDABI);
9132 init_cumulative_args (&cum, NULL, NULL);
9134 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
9136 enum machine_mode mode;
9137 struct mips_arg_info info;
9141 else if ((f & 3) == 2)
9146 mips_arg_info (&cum, mode, NULL, true, &info);
9147 gparg = mips_arg_regno (&info, false);
9148 fparg = mips_arg_regno (&info, true);
9151 fprintf (file, "\t%s\t%s,%s\n", s,
9152 reg_names[gparg], reg_names[fparg]);
9153 else if (TARGET_64BIT)
9154 fprintf (file, "\td%s\t%s,%s\n", s,
9155 reg_names[gparg], reg_names[fparg]);
9156 else if (ISA_HAS_MXHC1)
9157 /* -mips32r2 -mfp64 */
9158 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n",
9160 reg_names[gparg + (WORDS_BIG_ENDIAN ? 1 : 0)],
9162 from_fp_p ? "mfhc1" : "mthc1",
9163 reg_names[gparg + (WORDS_BIG_ENDIAN ? 0 : 1)],
9165 else if (TARGET_BIG_ENDIAN)
9166 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
9167 reg_names[gparg], reg_names[fparg + 1], s,
9168 reg_names[gparg + 1], reg_names[fparg]);
9170 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
9171 reg_names[gparg], reg_names[fparg], s,
9172 reg_names[gparg + 1], reg_names[fparg + 1]);
9174 function_arg_advance (&cum, mode, NULL, true);
9178 /* Build a mips16 function stub. This is used for functions which
9179 take arguments in the floating point registers. It is 32-bit code
9180 that moves the floating point args into the general registers, and
9181 then jumps to the 16-bit code. */
9184 build_mips16_function_stub (FILE *file)
9187 char *secname, *stubname;
9188 tree stubid, stubdecl;
9192 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
9193 secname = (char *) alloca (strlen (fnname) + 20);
9194 sprintf (secname, ".mips16.fn.%s", fnname);
9195 stubname = (char *) alloca (strlen (fnname) + 20);
9196 sprintf (stubname, "__fn_stub_%s", fnname);
9197 stubid = get_identifier (stubname);
9198 stubdecl = build_decl (FUNCTION_DECL, stubid,
9199 build_function_type (void_type_node, NULL_TREE));
9200 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
9201 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
9203 fprintf (file, "\t# Stub function for %s (", current_function_name ());
9205 for (f = (unsigned int) current_function_args_info.fp_code; f != 0; f >>= 2)
9207 fprintf (file, "%s%s",
9208 need_comma ? ", " : "",
9209 (f & 3) == 1 ? "float" : "double");
9212 fprintf (file, ")\n");
9214 fprintf (file, "\t.set\tnomips16\n");
9215 switch_to_section (function_section (stubdecl));
9216 ASM_OUTPUT_ALIGN (file, floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT));
9218 /* ??? If FUNCTION_NAME_ALREADY_DECLARED is defined, then we are
9219 within a .ent, and we cannot emit another .ent. */
9220 if (!FUNCTION_NAME_ALREADY_DECLARED)
9222 fputs ("\t.ent\t", file);
9223 assemble_name (file, stubname);
9227 assemble_name (file, stubname);
9228 fputs (":\n", file);
9230 /* We don't want the assembler to insert any nops here. */
9231 fprintf (file, "\t.set\tnoreorder\n");
9233 mips16_fp_args (file, current_function_args_info.fp_code, 1);
9235 fprintf (asm_out_file, "\t.set\tnoat\n");
9236 fprintf (asm_out_file, "\tla\t%s,", reg_names[GP_REG_FIRST + 1]);
9237 assemble_name (file, fnname);
9238 fprintf (file, "\n");
9239 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
9240 fprintf (asm_out_file, "\t.set\tat\n");
9242 /* Unfortunately, we can't fill the jump delay slot. We can't fill
9243 with one of the mfc1 instructions, because the result is not
9244 available for one instruction, so if the very first instruction
9245 in the function refers to the register, it will see the wrong
9247 fprintf (file, "\tnop\n");
9249 fprintf (file, "\t.set\treorder\n");
9251 if (!FUNCTION_NAME_ALREADY_DECLARED)
9253 fputs ("\t.end\t", file);
9254 assemble_name (file, stubname);
9258 switch_to_section (function_section (current_function_decl));
9261 /* We keep a list of functions for which we have already built stubs
9262 in build_mips16_call_stub. */
9266 struct mips16_stub *next;
9271 static struct mips16_stub *mips16_stubs;
9273 /* Emit code to return a double value from a mips16 stub. GPREG is the
9274 first GP reg to use, FPREG is the first FP reg to use. */
9277 mips16_fpret_double (int gpreg, int fpreg)
9280 fprintf (asm_out_file, "\tdmfc1\t%s,%s\n",
9281 reg_names[gpreg], reg_names[fpreg]);
9282 else if (TARGET_FLOAT64)
9284 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9285 reg_names[gpreg + WORDS_BIG_ENDIAN],
9287 fprintf (asm_out_file, "\tmfhc1\t%s,%s\n",
9288 reg_names[gpreg + !WORDS_BIG_ENDIAN],
9293 if (TARGET_BIG_ENDIAN)
9295 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9296 reg_names[gpreg + 0],
9297 reg_names[fpreg + 1]);
9298 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9299 reg_names[gpreg + 1],
9300 reg_names[fpreg + 0]);
9304 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9305 reg_names[gpreg + 0],
9306 reg_names[fpreg + 0]);
9307 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9308 reg_names[gpreg + 1],
9309 reg_names[fpreg + 1]);
9314 /* Build a call stub for a mips16 call. A stub is needed if we are
9315 passing any floating point values which should go into the floating
9316 point registers. If we are, and the call turns out to be to a
9317 32-bit function, the stub will be used to move the values into the
9318 floating point registers before calling the 32-bit function. The
9319 linker will magically adjust the function call to either the 16-bit
9320 function or the 32-bit stub, depending upon where the function call
9321 is actually defined.
9323 Similarly, we need a stub if the return value might come back in a
9324 floating point register.
9326 RETVAL is the location of the return value, or null if this is
9327 a call rather than a call_value. FN is the address of the
9328 function and ARG_SIZE is the size of the arguments. FP_CODE
9329 is the code built by function_arg. This function returns a nonzero
9330 value if it builds the call instruction itself. */
9333 build_mips16_call_stub (rtx retval, rtx fn, rtx arg_size, int fp_code)
9337 char *secname, *stubname;
9338 struct mips16_stub *l;
9339 tree stubid, stubdecl;
9344 /* We don't need to do anything if we aren't in mips16 mode, or if
9345 we were invoked with the -msoft-float option. */
9346 if (!TARGET_MIPS16 || TARGET_SOFT_FLOAT_ABI)
9349 /* Figure out whether the value might come back in a floating point
9352 fpret = mips_return_mode_in_fpr_p (GET_MODE (retval));
9354 /* We don't need to do anything if there were no floating point
9355 arguments and the value will not be returned in a floating point
9357 if (fp_code == 0 && ! fpret)
9360 /* We don't need to do anything if this is a call to a special
9361 mips16 support function. */
9362 if (GET_CODE (fn) == SYMBOL_REF
9363 && strncmp (XSTR (fn, 0), "__mips16_", 9) == 0)
9366 /* This code will only work for o32 and o64 abis. The other ABI's
9367 require more sophisticated support. */
9368 gcc_assert (TARGET_OLDABI);
9370 /* If we're calling via a function pointer, then we must always call
9371 via a stub. There are magic stubs provided in libgcc.a for each
9372 of the required cases. Each of them expects the function address
9373 to arrive in register $2. */
9375 if (GET_CODE (fn) != SYMBOL_REF)
9381 /* ??? If this code is modified to support other ABI's, we need
9382 to handle PARALLEL return values here. */
9385 sprintf (buf, "__mips16_call_stub_%s_%d",
9386 mips16_call_stub_mode_suffix (GET_MODE (retval)),
9389 sprintf (buf, "__mips16_call_stub_%d",
9392 id = get_identifier (buf);
9393 stub_fn = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
9395 mips_emit_move (gen_rtx_REG (Pmode, 2), fn);
9397 if (retval == NULL_RTX)
9398 insn = gen_call_internal (stub_fn, arg_size);
9400 insn = gen_call_value_internal (retval, stub_fn, arg_size);
9401 insn = emit_call_insn (insn);
9403 /* Put the register usage information on the CALL. */
9404 CALL_INSN_FUNCTION_USAGE (insn) =
9405 gen_rtx_EXPR_LIST (VOIDmode,
9406 gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 2)),
9407 CALL_INSN_FUNCTION_USAGE (insn));
9409 /* If we are handling a floating point return value, we need to
9410 save $18 in the function prologue. Putting a note on the
9411 call will mean that df_regs_ever_live_p ($18) will be true if the
9412 call is not eliminated, and we can check that in the prologue
9415 CALL_INSN_FUNCTION_USAGE (insn) =
9416 gen_rtx_EXPR_LIST (VOIDmode,
9417 gen_rtx_USE (VOIDmode,
9418 gen_rtx_REG (word_mode, 18)),
9419 CALL_INSN_FUNCTION_USAGE (insn));
9421 /* Return 1 to tell the caller that we've generated the call
9426 /* We know the function we are going to call. If we have already
9427 built a stub, we don't need to do anything further. */
9429 fnname = XSTR (fn, 0);
9430 for (l = mips16_stubs; l != NULL; l = l->next)
9431 if (strcmp (l->name, fnname) == 0)
9436 /* Build a special purpose stub. When the linker sees a
9437 function call in mips16 code, it will check where the target
9438 is defined. If the target is a 32-bit call, the linker will
9439 search for the section defined here. It can tell which
9440 symbol this section is associated with by looking at the
9441 relocation information (the name is unreliable, since this
9442 might be a static function). If such a section is found, the
9443 linker will redirect the call to the start of the magic
9446 If the function does not return a floating point value, the
9447 special stub section is named
9450 If the function does return a floating point value, the stub
9452 .mips16.call.fp.FNNAME
9455 secname = (char *) alloca (strlen (fnname) + 40);
9456 sprintf (secname, ".mips16.call.%s%s",
9459 stubname = (char *) alloca (strlen (fnname) + 20);
9460 sprintf (stubname, "__call_stub_%s%s",
9463 stubid = get_identifier (stubname);
9464 stubdecl = build_decl (FUNCTION_DECL, stubid,
9465 build_function_type (void_type_node, NULL_TREE));
9466 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
9467 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
9469 fprintf (asm_out_file, "\t# Stub function to call %s%s (",
9471 ? (GET_MODE (retval) == SFmode ? "float " : "double ")
9475 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
9477 fprintf (asm_out_file, "%s%s",
9478 need_comma ? ", " : "",
9479 (f & 3) == 1 ? "float" : "double");
9482 fprintf (asm_out_file, ")\n");
9484 fprintf (asm_out_file, "\t.set\tnomips16\n");
9485 assemble_start_function (stubdecl, stubname);
9487 if (!FUNCTION_NAME_ALREADY_DECLARED)
9489 fputs ("\t.ent\t", asm_out_file);
9490 assemble_name (asm_out_file, stubname);
9491 fputs ("\n", asm_out_file);
9493 assemble_name (asm_out_file, stubname);
9494 fputs (":\n", asm_out_file);
9497 /* We build the stub code by hand. That's the only way we can
9498 do it, since we can't generate 32-bit code during a 16-bit
9501 /* We don't want the assembler to insert any nops here. */
9502 fprintf (asm_out_file, "\t.set\tnoreorder\n");
9504 mips16_fp_args (asm_out_file, fp_code, 0);
9508 fprintf (asm_out_file, "\t.set\tnoat\n");
9509 fprintf (asm_out_file, "\tla\t%s,%s\n", reg_names[GP_REG_FIRST + 1],
9511 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
9512 fprintf (asm_out_file, "\t.set\tat\n");
9513 /* Unfortunately, we can't fill the jump delay slot. We
9514 can't fill with one of the mtc1 instructions, because the
9515 result is not available for one instruction, so if the
9516 very first instruction in the function refers to the
9517 register, it will see the wrong value. */
9518 fprintf (asm_out_file, "\tnop\n");
9522 fprintf (asm_out_file, "\tmove\t%s,%s\n",
9523 reg_names[GP_REG_FIRST + 18], reg_names[GP_REG_FIRST + 31]);
9524 fprintf (asm_out_file, "\tjal\t%s\n", fnname);
9525 /* As above, we can't fill the delay slot. */
9526 fprintf (asm_out_file, "\tnop\n");
9527 if (GET_MODE (retval) == SFmode)
9528 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9529 reg_names[GP_REG_FIRST + 2], reg_names[FP_REG_FIRST + 0]);
9530 else if (GET_MODE (retval) == SCmode)
9532 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9533 reg_names[GP_REG_FIRST + 2],
9534 reg_names[FP_REG_FIRST + 0]);
9535 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9536 reg_names[GP_REG_FIRST + 3],
9537 reg_names[FP_REG_FIRST + MAX_FPRS_PER_FMT]);
9539 else if (GET_MODE (retval) == DFmode
9540 || GET_MODE (retval) == V2SFmode)
9542 mips16_fpret_double (GP_REG_FIRST + 2, FP_REG_FIRST + 0);
9544 else if (GET_MODE (retval) == DCmode)
9546 mips16_fpret_double (GP_REG_FIRST + 2,
9548 mips16_fpret_double (GP_REG_FIRST + 4,
9549 FP_REG_FIRST + MAX_FPRS_PER_FMT);
9553 if (TARGET_BIG_ENDIAN)
9555 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9556 reg_names[GP_REG_FIRST + 2],
9557 reg_names[FP_REG_FIRST + 1]);
9558 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9559 reg_names[GP_REG_FIRST + 3],
9560 reg_names[FP_REG_FIRST + 0]);
9564 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9565 reg_names[GP_REG_FIRST + 2],
9566 reg_names[FP_REG_FIRST + 0]);
9567 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9568 reg_names[GP_REG_FIRST + 3],
9569 reg_names[FP_REG_FIRST + 1]);
9572 fprintf (asm_out_file, "\tj\t%s\n", reg_names[GP_REG_FIRST + 18]);
9573 /* As above, we can't fill the delay slot. */
9574 fprintf (asm_out_file, "\tnop\n");
9577 fprintf (asm_out_file, "\t.set\treorder\n");
9579 #ifdef ASM_DECLARE_FUNCTION_SIZE
9580 ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
9583 if (!FUNCTION_NAME_ALREADY_DECLARED)
9585 fputs ("\t.end\t", asm_out_file);
9586 assemble_name (asm_out_file, stubname);
9587 fputs ("\n", asm_out_file);
9590 /* Record this stub. */
9591 l = (struct mips16_stub *) xmalloc (sizeof *l);
9592 l->name = xstrdup (fnname);
9594 l->next = mips16_stubs;
9598 /* If we expect a floating point return value, but we've built a
9599 stub which does not expect one, then we're in trouble. We can't
9600 use the existing stub, because it won't handle the floating point
9601 value. We can't build a new stub, because the linker won't know
9602 which stub to use for the various calls in this object file.
9603 Fortunately, this case is illegal, since it means that a function
9604 was declared in two different ways in a single compilation. */
9605 if (fpret && ! l->fpret)
9606 error ("cannot handle inconsistent calls to %qs", fnname);
9608 if (retval == NULL_RTX)
9609 insn = gen_call_internal_direct (fn, arg_size);
9611 insn = gen_call_value_internal_direct (retval, fn, arg_size);
9612 insn = emit_call_insn (insn);
9614 /* If we are calling a stub which handles a floating point return
9615 value, we need to arrange to save $18 in the prologue. We do
9616 this by marking the function call as using the register. The
9617 prologue will later see that it is used, and emit code to save
9620 CALL_INSN_FUNCTION_USAGE (insn) =
9621 gen_rtx_EXPR_LIST (VOIDmode,
9622 gen_rtx_USE (VOIDmode, gen_rtx_REG (word_mode, 18)),
9623 CALL_INSN_FUNCTION_USAGE (insn));
9625 /* Return 1 to tell the caller that we've generated the call
9630 /* An entry in the mips16 constant pool. VALUE is the pool constant,
9631 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
9633 struct mips16_constant {
9634 struct mips16_constant *next;
9637 enum machine_mode mode;
9640 /* Information about an incomplete mips16 constant pool. FIRST is the
9641 first constant, HIGHEST_ADDRESS is the highest address that the first
9642 byte of the pool can have, and INSN_ADDRESS is the current instruction
9645 struct mips16_constant_pool {
9646 struct mips16_constant *first;
9647 int highest_address;
9651 /* Add constant VALUE to POOL and return its label. MODE is the
9652 value's mode (used for CONST_INTs, etc.). */
9655 add_constant (struct mips16_constant_pool *pool,
9656 rtx value, enum machine_mode mode)
9658 struct mips16_constant **p, *c;
9659 bool first_of_size_p;
9661 /* See whether the constant is already in the pool. If so, return the
9662 existing label, otherwise leave P pointing to the place where the
9663 constant should be added.
9665 Keep the pool sorted in increasing order of mode size so that we can
9666 reduce the number of alignments needed. */
9667 first_of_size_p = true;
9668 for (p = &pool->first; *p != 0; p = &(*p)->next)
9670 if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
9672 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
9674 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
9675 first_of_size_p = false;
9678 /* In the worst case, the constant needed by the earliest instruction
9679 will end up at the end of the pool. The entire pool must then be
9680 accessible from that instruction.
9682 When adding the first constant, set the pool's highest address to
9683 the address of the first out-of-range byte. Adjust this address
9684 downwards each time a new constant is added. */
9685 if (pool->first == 0)
9686 /* For pc-relative lw, addiu and daddiu instructions, the base PC value
9687 is the address of the instruction with the lowest two bits clear.
9688 The base PC value for ld has the lowest three bits clear. Assume
9689 the worst case here. */
9690 pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
9691 pool->highest_address -= GET_MODE_SIZE (mode);
9692 if (first_of_size_p)
9693 /* Take into account the worst possible padding due to alignment. */
9694 pool->highest_address -= GET_MODE_SIZE (mode) - 1;
9696 /* Create a new entry. */
9697 c = (struct mips16_constant *) xmalloc (sizeof *c);
9700 c->label = gen_label_rtx ();
9707 /* Output constant VALUE after instruction INSN and return the last
9708 instruction emitted. MODE is the mode of the constant. */
9711 dump_constants_1 (enum machine_mode mode, rtx value, rtx insn)
9713 switch (GET_MODE_CLASS (mode))
9717 rtx size = GEN_INT (GET_MODE_SIZE (mode));
9718 return emit_insn_after (gen_consttable_int (value, size), insn);
9722 return emit_insn_after (gen_consttable_float (value), insn);
9724 case MODE_VECTOR_FLOAT:
9725 case MODE_VECTOR_INT:
9728 for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
9729 insn = dump_constants_1 (GET_MODE_INNER (mode),
9730 CONST_VECTOR_ELT (value, i), insn);
9740 /* Dump out the constants in CONSTANTS after INSN. */
9743 dump_constants (struct mips16_constant *constants, rtx insn)
9745 struct mips16_constant *c, *next;
9749 for (c = constants; c != NULL; c = next)
9751 /* If necessary, increase the alignment of PC. */
9752 if (align < GET_MODE_SIZE (c->mode))
9754 int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
9755 insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
9757 align = GET_MODE_SIZE (c->mode);
9759 insn = emit_label_after (c->label, insn);
9760 insn = dump_constants_1 (c->mode, c->value, insn);
9766 emit_barrier_after (insn);
9769 /* Return the length of instruction INSN. */
9772 mips16_insn_length (rtx insn)
9776 rtx body = PATTERN (insn);
9777 if (GET_CODE (body) == ADDR_VEC)
9778 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
9779 if (GET_CODE (body) == ADDR_DIFF_VEC)
9780 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
9782 return get_attr_length (insn);
9785 /* Rewrite *X so that constant pool references refer to the constant's
9786 label instead. DATA points to the constant pool structure. */
9789 mips16_rewrite_pool_refs (rtx *x, void *data)
9791 struct mips16_constant_pool *pool = data;
9792 rtx base, offset, label;
9796 else if (!TARGET_MIPS16_TEXT_LOADS)
9799 split_const (*x, &base, &offset);
9800 if (GET_CODE (base) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (base))
9802 label = add_constant (pool, get_pool_constant (base),
9803 get_pool_mode (base));
9804 base = gen_rtx_LABEL_REF (Pmode, label);
9805 *x = mips_unspec_address_offset (base, offset, SYMBOL_PC_RELATIVE);
9808 return GET_CODE (*x) == CONST ? -1 : 0;
9811 /* Build MIPS16 constant pools. */
9814 mips16_lay_out_constants (void)
9816 struct mips16_constant_pool pool;
9819 if (!TARGET_MIPS16_PCREL_LOADS)
9823 memset (&pool, 0, sizeof (pool));
9824 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9826 /* Rewrite constant pool references in INSN. */
9828 for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &pool);
9830 pool.insn_address += mips16_insn_length (insn);
9832 if (pool.first != NULL)
9834 /* If there are no natural barriers between the first user of
9835 the pool and the highest acceptable address, we'll need to
9836 create a new instruction to jump around the constant pool.
9837 In the worst case, this instruction will be 4 bytes long.
9839 If it's too late to do this transformation after INSN,
9840 do it immediately before INSN. */
9841 if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
9845 label = gen_label_rtx ();
9847 jump = emit_jump_insn_before (gen_jump (label), insn);
9848 JUMP_LABEL (jump) = label;
9849 LABEL_NUSES (label) = 1;
9850 barrier = emit_barrier_after (jump);
9852 emit_label_after (label, barrier);
9853 pool.insn_address += 4;
9856 /* See whether the constant pool is now out of range of the first
9857 user. If so, output the constants after the previous barrier.
9858 Note that any instructions between BARRIER and INSN (inclusive)
9859 will use negative offsets to refer to the pool. */
9860 if (pool.insn_address > pool.highest_address)
9862 dump_constants (pool.first, barrier);
9866 else if (BARRIER_P (insn))
9870 dump_constants (pool.first, get_last_insn ());
9873 /* A temporary variable used by for_each_rtx callbacks, etc. */
9874 static rtx mips_sim_insn;
9876 /* A structure representing the state of the processor pipeline.
9877 Used by the mips_sim_* family of functions. */
9879 /* The maximum number of instructions that can be issued in a cycle.
9880 (Caches mips_issue_rate.) */
9881 unsigned int issue_rate;
9883 /* The current simulation time. */
9886 /* How many more instructions can be issued in the current cycle. */
9887 unsigned int insns_left;
9889 /* LAST_SET[X].INSN is the last instruction to set register X.
9890 LAST_SET[X].TIME is the time at which that instruction was issued.
9891 INSN is null if no instruction has yet set register X. */
9895 } last_set[FIRST_PSEUDO_REGISTER];
9897 /* The pipeline's current DFA state. */
9901 /* Reset STATE to the initial simulation state. */
9904 mips_sim_reset (struct mips_sim *state)
9907 state->insns_left = state->issue_rate;
9908 memset (&state->last_set, 0, sizeof (state->last_set));
9909 state_reset (state->dfa_state);
9912 /* Initialize STATE before its first use. DFA_STATE points to an
9913 allocated but uninitialized DFA state. */
9916 mips_sim_init (struct mips_sim *state, state_t dfa_state)
9918 state->issue_rate = mips_issue_rate ();
9919 state->dfa_state = dfa_state;
9920 mips_sim_reset (state);
9923 /* Advance STATE by one clock cycle. */
9926 mips_sim_next_cycle (struct mips_sim *state)
9929 state->insns_left = state->issue_rate;
9930 state_transition (state->dfa_state, 0);
9933 /* Advance simulation state STATE until instruction INSN can read
9937 mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
9941 for (i = 0; i < HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)); i++)
9942 if (state->last_set[REGNO (reg) + i].insn != 0)
9946 t = state->last_set[REGNO (reg) + i].time;
9947 t += insn_latency (state->last_set[REGNO (reg) + i].insn, insn);
9948 while (state->time < t)
9949 mips_sim_next_cycle (state);
9953 /* A for_each_rtx callback. If *X is a register, advance simulation state
9954 DATA until mips_sim_insn can read the register's value. */
9957 mips_sim_wait_regs_2 (rtx *x, void *data)
9960 mips_sim_wait_reg (data, mips_sim_insn, *x);
9964 /* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
9967 mips_sim_wait_regs_1 (rtx *x, void *data)
9969 for_each_rtx (x, mips_sim_wait_regs_2, data);
9972 /* Advance simulation state STATE until all of INSN's register
9973 dependencies are satisfied. */
9976 mips_sim_wait_regs (struct mips_sim *state, rtx insn)
9978 mips_sim_insn = insn;
9979 note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
9982 /* Advance simulation state STATE until the units required by
9983 instruction INSN are available. */
9986 mips_sim_wait_units (struct mips_sim *state, rtx insn)
9990 tmp_state = alloca (state_size ());
9991 while (state->insns_left == 0
9992 || (memcpy (tmp_state, state->dfa_state, state_size ()),
9993 state_transition (tmp_state, insn) >= 0))
9994 mips_sim_next_cycle (state);
9997 /* Advance simulation state STATE until INSN is ready to issue. */
10000 mips_sim_wait_insn (struct mips_sim *state, rtx insn)
10002 mips_sim_wait_regs (state, insn);
10003 mips_sim_wait_units (state, insn);
10006 /* mips_sim_insn has just set X. Update the LAST_SET array
10007 in simulation state DATA. */
10010 mips_sim_record_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
10012 struct mips_sim *state;
10017 for (i = 0; i < HARD_REGNO_NREGS (REGNO (x), GET_MODE (x)); i++)
10019 state->last_set[REGNO (x) + i].insn = mips_sim_insn;
10020 state->last_set[REGNO (x) + i].time = state->time;
10024 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
10025 can issue immediately (i.e., that mips_sim_wait_insn has already
10029 mips_sim_issue_insn (struct mips_sim *state, rtx insn)
10031 state_transition (state->dfa_state, insn);
10032 state->insns_left--;
10034 mips_sim_insn = insn;
10035 note_stores (PATTERN (insn), mips_sim_record_set, state);
10038 /* Simulate issuing a NOP in state STATE. */
10041 mips_sim_issue_nop (struct mips_sim *state)
10043 if (state->insns_left == 0)
10044 mips_sim_next_cycle (state);
10045 state->insns_left--;
10048 /* Update simulation state STATE so that it's ready to accept the instruction
10049 after INSN. INSN should be part of the main rtl chain, not a member of a
10053 mips_sim_finish_insn (struct mips_sim *state, rtx insn)
10055 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
10057 mips_sim_issue_nop (state);
10059 switch (GET_CODE (SEQ_BEGIN (insn)))
10063 /* We can't predict the processor state after a call or label. */
10064 mips_sim_reset (state);
10068 /* The delay slots of branch likely instructions are only executed
10069 when the branch is taken. Therefore, if the caller has simulated
10070 the delay slot instruction, STATE does not really reflect the state
10071 of the pipeline for the instruction after the delay slot. Also,
10072 branch likely instructions tend to incur a penalty when not taken,
10073 so there will probably be an extra delay between the branch and
10074 the instruction after the delay slot. */
10075 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
10076 mips_sim_reset (state);
10084 /* The VR4130 pipeline issues aligned pairs of instructions together,
10085 but it stalls the second instruction if it depends on the first.
10086 In order to cut down the amount of logic required, this dependence
10087 check is not based on a full instruction decode. Instead, any non-SPECIAL
10088 instruction is assumed to modify the register specified by bits 20-16
10089 (which is usually the "rt" field).
10091 In beq, beql, bne and bnel instructions, the rt field is actually an
10092 input, so we can end up with a false dependence between the branch
10093 and its delay slot. If this situation occurs in instruction INSN,
10094 try to avoid it by swapping rs and rt. */
10097 vr4130_avoid_branch_rt_conflict (rtx insn)
10101 first = SEQ_BEGIN (insn);
10102 second = SEQ_END (insn);
10104 && NONJUMP_INSN_P (second)
10105 && GET_CODE (PATTERN (first)) == SET
10106 && GET_CODE (SET_DEST (PATTERN (first))) == PC
10107 && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
10109 /* Check for the right kind of condition. */
10110 rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
10111 if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
10112 && REG_P (XEXP (cond, 0))
10113 && REG_P (XEXP (cond, 1))
10114 && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
10115 && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
10117 /* SECOND mentions the rt register but not the rs register. */
10118 rtx tmp = XEXP (cond, 0);
10119 XEXP (cond, 0) = XEXP (cond, 1);
10120 XEXP (cond, 1) = tmp;
10125 /* Implement -mvr4130-align. Go through each basic block and simulate the
10126 processor pipeline. If we find that a pair of instructions could execute
10127 in parallel, and the first of those instruction is not 8-byte aligned,
10128 insert a nop to make it aligned. */
10131 vr4130_align_insns (void)
10133 struct mips_sim state;
10134 rtx insn, subinsn, last, last2, next;
10139 /* LAST is the last instruction before INSN to have a nonzero length.
10140 LAST2 is the last such instruction before LAST. */
10144 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
10147 mips_sim_init (&state, alloca (state_size ()));
10148 for (insn = get_insns (); insn != 0; insn = next)
10150 unsigned int length;
10152 next = NEXT_INSN (insn);
10154 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
10155 This isn't really related to the alignment pass, but we do it on
10156 the fly to avoid a separate instruction walk. */
10157 vr4130_avoid_branch_rt_conflict (insn);
10159 if (USEFUL_INSN_P (insn))
10160 FOR_EACH_SUBINSN (subinsn, insn)
10162 mips_sim_wait_insn (&state, subinsn);
10164 /* If we want this instruction to issue in parallel with the
10165 previous one, make sure that the previous instruction is
10166 aligned. There are several reasons why this isn't worthwhile
10167 when the second instruction is a call:
10169 - Calls are less likely to be performance critical,
10170 - There's a good chance that the delay slot can execute
10171 in parallel with the call.
10172 - The return address would then be unaligned.
10174 In general, if we're going to insert a nop between instructions
10175 X and Y, it's better to insert it immediately after X. That
10176 way, if the nop makes Y aligned, it will also align any labels
10177 between X and Y. */
10178 if (state.insns_left != state.issue_rate
10179 && !CALL_P (subinsn))
10181 if (subinsn == SEQ_BEGIN (insn) && aligned_p)
10183 /* SUBINSN is the first instruction in INSN and INSN is
10184 aligned. We want to align the previous instruction
10185 instead, so insert a nop between LAST2 and LAST.
10187 Note that LAST could be either a single instruction
10188 or a branch with a delay slot. In the latter case,
10189 LAST, like INSN, is already aligned, but the delay
10190 slot must have some extra delay that stops it from
10191 issuing at the same time as the branch. We therefore
10192 insert a nop before the branch in order to align its
10194 emit_insn_after (gen_nop (), last2);
10197 else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
10199 /* SUBINSN is the delay slot of INSN, but INSN is
10200 currently unaligned. Insert a nop between
10201 LAST and INSN to align it. */
10202 emit_insn_after (gen_nop (), last);
10206 mips_sim_issue_insn (&state, subinsn);
10208 mips_sim_finish_insn (&state, insn);
10210 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
10211 length = get_attr_length (insn);
10214 /* If the instruction is an asm statement or multi-instruction
10215 mips.md patern, the length is only an estimate. Insert an
10216 8 byte alignment after it so that the following instructions
10217 can be handled correctly. */
10218 if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
10219 && (recog_memoized (insn) < 0 || length >= 8))
10221 next = emit_insn_after (gen_align (GEN_INT (3)), insn);
10222 next = NEXT_INSN (next);
10223 mips_sim_next_cycle (&state);
10226 else if (length & 4)
10227 aligned_p = !aligned_p;
10232 /* See whether INSN is an aligned label. */
10233 if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
10239 /* Subroutine of mips_reorg. If there is a hazard between INSN
10240 and a previous instruction, avoid it by inserting nops after
10243 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
10244 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
10245 before using the value of that register. *HILO_DELAY counts the
10246 number of instructions since the last hilo hazard (that is,
10247 the number of instructions since the last mflo or mfhi).
10249 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
10250 for the next instruction.
10252 LO_REG is an rtx for the LO register, used in dependence checking. */
10255 mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
10256 rtx *delayed_reg, rtx lo_reg)
10261 if (!INSN_P (insn))
10264 pattern = PATTERN (insn);
10266 /* Do not put the whole function in .set noreorder if it contains
10267 an asm statement. We don't know whether there will be hazards
10268 between the asm statement and the gcc-generated code. */
10269 if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
10270 cfun->machine->all_noreorder_p = false;
10272 /* Ignore zero-length instructions (barriers and the like). */
10273 ninsns = get_attr_length (insn) / 4;
10277 /* Work out how many nops are needed. Note that we only care about
10278 registers that are explicitly mentioned in the instruction's pattern.
10279 It doesn't matter that calls use the argument registers or that they
10280 clobber hi and lo. */
10281 if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
10282 nops = 2 - *hilo_delay;
10283 else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
10288 /* Insert the nops between this instruction and the previous one.
10289 Each new nop takes us further from the last hilo hazard. */
10290 *hilo_delay += nops;
10292 emit_insn_after (gen_hazard_nop (), after);
10294 /* Set up the state for the next instruction. */
10295 *hilo_delay += ninsns;
10297 if (INSN_CODE (insn) >= 0)
10298 switch (get_attr_hazard (insn))
10308 set = single_set (insn);
10309 gcc_assert (set != 0);
10310 *delayed_reg = SET_DEST (set);
10316 /* Go through the instruction stream and insert nops where necessary.
10317 See if the whole function can then be put into .set noreorder &
10321 mips_avoid_hazards (void)
10323 rtx insn, last_insn, lo_reg, delayed_reg;
10326 /* Force all instructions to be split into their final form. */
10327 split_all_insns_noflow ();
10329 /* Recalculate instruction lengths without taking nops into account. */
10330 cfun->machine->ignore_hazard_length_p = true;
10331 shorten_branches (get_insns ());
10333 cfun->machine->all_noreorder_p = true;
10335 /* Profiled functions can't be all noreorder because the profiler
10336 support uses assembler macros. */
10337 if (current_function_profile)
10338 cfun->machine->all_noreorder_p = false;
10340 /* Code compiled with -mfix-vr4120 can't be all noreorder because
10341 we rely on the assembler to work around some errata. */
10342 if (TARGET_FIX_VR4120)
10343 cfun->machine->all_noreorder_p = false;
10345 /* The same is true for -mfix-vr4130 if we might generate mflo or
10346 mfhi instructions. Note that we avoid using mflo and mfhi if
10347 the VR4130 macc and dmacc instructions are available instead;
10348 see the *mfhilo_{si,di}_macc patterns. */
10349 if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
10350 cfun->machine->all_noreorder_p = false;
10355 lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
10357 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
10360 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
10361 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
10362 mips_avoid_hazard (last_insn, XVECEXP (PATTERN (insn), 0, i),
10363 &hilo_delay, &delayed_reg, lo_reg);
10365 mips_avoid_hazard (last_insn, insn, &hilo_delay,
10366 &delayed_reg, lo_reg);
10373 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
10378 mips16_lay_out_constants ();
10379 if (TARGET_EXPLICIT_RELOCS)
10381 if (mips_flag_delayed_branch)
10382 dbr_schedule (get_insns ());
10383 mips_avoid_hazards ();
10384 if (TUNE_MIPS4130 && TARGET_VR4130_ALIGN)
10385 vr4130_align_insns ();
10389 /* This function does three things:
10391 - Register the special divsi3 and modsi3 functions if -mfix-vr4120.
10392 - Register the mips16 hardware floating point stubs.
10393 - Register the gofast functions if selected using --enable-gofast. */
10395 #include "config/gofast.h"
10398 mips_init_libfuncs (void)
10400 if (TARGET_FIX_VR4120)
10402 set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
10403 set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
10406 if (TARGET_MIPS16 && TARGET_HARD_FLOAT_ABI)
10408 set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
10409 set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
10410 set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
10411 set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
10413 set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
10414 set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
10415 set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
10416 set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
10417 set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
10418 set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
10419 set_optab_libfunc (unord_optab, SFmode, "__mips16_unordsf2");
10421 set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
10422 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
10423 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__mips16_floatunsisf");
10425 if (TARGET_DOUBLE_FLOAT)
10427 set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
10428 set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
10429 set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
10430 set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
10432 set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
10433 set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
10434 set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
10435 set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
10436 set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
10437 set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
10438 set_optab_libfunc (unord_optab, DFmode, "__mips16_unorddf2");
10440 set_conv_libfunc (sext_optab, DFmode, SFmode, "__mips16_extendsfdf2");
10441 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__mips16_truncdfsf2");
10443 set_conv_libfunc (sfix_optab, SImode, DFmode, "__mips16_fix_truncdfsi");
10444 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__mips16_floatsidf");
10445 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__mips16_floatunsidf");
10449 gofast_maybe_init_libfuncs ();
10452 /* Return a number assessing the cost of moving a register in class
10453 FROM to class TO. The classes are expressed using the enumeration
10454 values such as `GENERAL_REGS'. A value of 2 is the default; other
10455 values are interpreted relative to that.
10457 It is not required that the cost always equal 2 when FROM is the
10458 same as TO; on some machines it is expensive to move between
10459 registers if they are not general registers.
10461 If reload sees an insn consisting of a single `set' between two
10462 hard registers, and if `REGISTER_MOVE_COST' applied to their
10463 classes returns a value of 2, reload does not check to ensure that
10464 the constraints of the insn are met. Setting a cost of other than
10465 2 will allow reload to verify that the constraints are met. You
10466 should do this if the `movM' pattern's constraints do not allow
10469 ??? We make the cost of moving from HI/LO into general
10470 registers the same as for one of moving general registers to
10471 HI/LO for TARGET_MIPS16 in order to prevent allocating a
10472 pseudo to HI/LO. This might hurt optimizations though, it
10473 isn't clear if it is wise. And it might not work in all cases. We
10474 could solve the DImode LO reg problem by using a multiply, just
10475 like reload_{in,out}si. We could solve the SImode/HImode HI reg
10476 problem by using divide instructions. divu puts the remainder in
10477 the HI reg, so doing a divide by -1 will move the value in the HI
10478 reg for all values except -1. We could handle that case by using a
10479 signed divide, e.g. -1 / 2 (or maybe 1 / -2?). We'd have to emit
10480 a compare/branch to test the input value to see which instruction
10481 we need to use. This gets pretty messy, but it is feasible. */
10484 mips_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
10485 enum reg_class to, enum reg_class from)
10487 if (from == M16_REGS && reg_class_subset_p (to, GENERAL_REGS))
10489 else if (from == M16_NA_REGS && reg_class_subset_p (to, GENERAL_REGS))
10491 else if (reg_class_subset_p (from, GENERAL_REGS))
10493 if (to == M16_REGS)
10495 else if (to == M16_NA_REGS)
10497 else if (reg_class_subset_p (to, GENERAL_REGS))
10504 else if (to == FP_REGS)
10506 else if (reg_class_subset_p (to, ACC_REGS))
10513 else if (reg_class_subset_p (to, ALL_COP_REGS))
10518 else if (from == FP_REGS)
10520 if (reg_class_subset_p (to, GENERAL_REGS))
10522 else if (to == FP_REGS)
10524 else if (to == ST_REGS)
10527 else if (reg_class_subset_p (from, ACC_REGS))
10529 if (reg_class_subset_p (to, GENERAL_REGS))
10537 else if (from == ST_REGS && reg_class_subset_p (to, GENERAL_REGS))
10539 else if (reg_class_subset_p (from, ALL_COP_REGS))
10545 ??? What cases are these? Shouldn't we return 2 here? */
10550 /* Return the length of INSN. LENGTH is the initial length computed by
10551 attributes in the machine-description file. */
10554 mips_adjust_insn_length (rtx insn, int length)
10556 /* A unconditional jump has an unfilled delay slot if it is not part
10557 of a sequence. A conditional jump normally has a delay slot, but
10558 does not on MIPS16. */
10559 if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
10562 /* See how many nops might be needed to avoid hardware hazards. */
10563 if (!cfun->machine->ignore_hazard_length_p && INSN_CODE (insn) >= 0)
10564 switch (get_attr_hazard (insn))
10578 /* All MIPS16 instructions are a measly two bytes. */
10586 /* Return an asm sequence to start a noat block and load the address
10587 of a label into $1. */
10590 mips_output_load_label (void)
10592 if (TARGET_EXPLICIT_RELOCS)
10596 return "%[lw\t%@,%%got_page(%0)(%+)\n\taddiu\t%@,%@,%%got_ofst(%0)";
10599 return "%[ld\t%@,%%got_page(%0)(%+)\n\tdaddiu\t%@,%@,%%got_ofst(%0)";
10602 if (ISA_HAS_LOAD_DELAY)
10603 return "%[lw\t%@,%%got(%0)(%+)%#\n\taddiu\t%@,%@,%%lo(%0)";
10604 return "%[lw\t%@,%%got(%0)(%+)\n\taddiu\t%@,%@,%%lo(%0)";
10608 if (Pmode == DImode)
10609 return "%[dla\t%@,%0";
10611 return "%[la\t%@,%0";
10615 /* Return the assembly code for INSN, which has the operands given by
10616 OPERANDS, and which branches to OPERANDS[1] if some condition is true.
10617 BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[1]
10618 is in range of a direct branch. BRANCH_IF_FALSE is an inverted
10619 version of BRANCH_IF_TRUE. */
10622 mips_output_conditional_branch (rtx insn, rtx *operands,
10623 const char *branch_if_true,
10624 const char *branch_if_false)
10626 unsigned int length;
10627 rtx taken, not_taken;
10629 length = get_attr_length (insn);
10632 /* Just a simple conditional branch. */
10633 mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
10634 return branch_if_true;
10637 /* Generate a reversed branch around a direct jump. This fallback does
10638 not use branch-likely instructions. */
10639 mips_branch_likely = false;
10640 not_taken = gen_label_rtx ();
10641 taken = operands[1];
10643 /* Generate the reversed branch to NOT_TAKEN. */
10644 operands[1] = not_taken;
10645 output_asm_insn (branch_if_false, operands);
10647 /* If INSN has a delay slot, we must provide delay slots for both the
10648 branch to NOT_TAKEN and the conditional jump. We must also ensure
10649 that INSN's delay slot is executed in the appropriate cases. */
10650 if (final_sequence)
10652 /* This first delay slot will always be executed, so use INSN's
10653 delay slot if is not annulled. */
10654 if (!INSN_ANNULLED_BRANCH_P (insn))
10656 final_scan_insn (XVECEXP (final_sequence, 0, 1),
10657 asm_out_file, optimize, 1, NULL);
10658 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
10661 output_asm_insn ("nop", 0);
10662 fprintf (asm_out_file, "\n");
10665 /* Output the unconditional branch to TAKEN. */
10667 output_asm_insn ("j\t%0%/", &taken);
10670 output_asm_insn (mips_output_load_label (), &taken);
10671 output_asm_insn ("jr\t%@%]%/", 0);
10674 /* Now deal with its delay slot; see above. */
10675 if (final_sequence)
10677 /* This delay slot will only be executed if the branch is taken.
10678 Use INSN's delay slot if is annulled. */
10679 if (INSN_ANNULLED_BRANCH_P (insn))
10681 final_scan_insn (XVECEXP (final_sequence, 0, 1),
10682 asm_out_file, optimize, 1, NULL);
10683 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
10686 output_asm_insn ("nop", 0);
10687 fprintf (asm_out_file, "\n");
10690 /* Output NOT_TAKEN. */
10691 (*targetm.asm_out.internal_label) (asm_out_file, "L",
10692 CODE_LABEL_NUMBER (not_taken));
10696 /* Return the assembly code for INSN, which branches to OPERANDS[1]
10697 if some ordered condition is true. The condition is given by
10698 OPERANDS[0] if !INVERTED_P, otherwise it is the inverse of
10699 OPERANDS[0]. OPERANDS[2] is the comparison's first operand;
10700 its second is always zero. */
10703 mips_output_order_conditional_branch (rtx insn, rtx *operands, bool inverted_p)
10705 const char *branch[2];
10707 /* Make BRANCH[1] branch to OPERANDS[1] when the condition is true.
10708 Make BRANCH[0] branch on the inverse condition. */
10709 switch (GET_CODE (operands[0]))
10711 /* These cases are equivalent to comparisons against zero. */
10713 inverted_p = !inverted_p;
10714 /* Fall through. */
10716 branch[!inverted_p] = MIPS_BRANCH ("bne", "%2,%.,%1");
10717 branch[inverted_p] = MIPS_BRANCH ("beq", "%2,%.,%1");
10720 /* These cases are always true or always false. */
10722 inverted_p = !inverted_p;
10723 /* Fall through. */
10725 branch[!inverted_p] = MIPS_BRANCH ("beq", "%.,%.,%1");
10726 branch[inverted_p] = MIPS_BRANCH ("bne", "%.,%.,%1");
10730 branch[!inverted_p] = MIPS_BRANCH ("b%C0z", "%2,%1");
10731 branch[inverted_p] = MIPS_BRANCH ("b%N0z", "%2,%1");
10734 return mips_output_conditional_branch (insn, operands, branch[1], branch[0]);
10737 /* Used to output div or ddiv instruction DIVISION, which has the operands
10738 given by OPERANDS. Add in a divide-by-zero check if needed.
10740 When working around R4000 and R4400 errata, we need to make sure that
10741 the division is not immediately followed by a shift[1][2]. We also
10742 need to stop the division from being put into a branch delay slot[3].
10743 The easiest way to avoid both problems is to add a nop after the
10744 division. When a divide-by-zero check is needed, this nop can be
10745 used to fill the branch delay slot.
10747 [1] If a double-word or a variable shift executes immediately
10748 after starting an integer division, the shift may give an
10749 incorrect result. See quotations of errata #16 and #28 from
10750 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
10751 in mips.md for details.
10753 [2] A similar bug to [1] exists for all revisions of the
10754 R4000 and the R4400 when run in an MC configuration.
10755 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
10757 "19. In this following sequence:
10759 ddiv (or ddivu or div or divu)
10760 dsll32 (or dsrl32, dsra32)
10762 if an MPT stall occurs, while the divide is slipping the cpu
10763 pipeline, then the following double shift would end up with an
10766 Workaround: The compiler needs to avoid generating any
10767 sequence with divide followed by extended double shift."
10769 This erratum is also present in "MIPS R4400MC Errata, Processor
10770 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
10771 & 3.0" as errata #10 and #4, respectively.
10773 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
10774 (also valid for MIPS R4000MC processors):
10776 "52. R4000SC: This bug does not apply for the R4000PC.
10778 There are two flavors of this bug:
10780 1) If the instruction just after divide takes an RF exception
10781 (tlb-refill, tlb-invalid) and gets an instruction cache
10782 miss (both primary and secondary) and the line which is
10783 currently in secondary cache at this index had the first
10784 data word, where the bits 5..2 are set, then R4000 would
10785 get a wrong result for the div.
10790 ------------------- # end-of page. -tlb-refill
10795 ------------------- # end-of page. -tlb-invalid
10798 2) If the divide is in the taken branch delay slot, where the
10799 target takes RF exception and gets an I-cache miss for the
10800 exception vector or where I-cache miss occurs for the
10801 target address, under the above mentioned scenarios, the
10802 div would get wrong results.
10805 j r2 # to next page mapped or unmapped
10806 div r8,r9 # this bug would be there as long
10807 # as there is an ICache miss and
10808 nop # the "data pattern" is present
10811 beq r0, r0, NextPage # to Next page
10815 This bug is present for div, divu, ddiv, and ddivu
10818 Workaround: For item 1), OS could make sure that the next page
10819 after the divide instruction is also mapped. For item 2), the
10820 compiler could make sure that the divide instruction is not in
10821 the branch delay slot."
10823 These processors have PRId values of 0x00004220 and 0x00004300 for
10824 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
10827 mips_output_division (const char *division, rtx *operands)
10832 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
10834 output_asm_insn (s, operands);
10837 if (TARGET_CHECK_ZERO_DIV)
10841 output_asm_insn (s, operands);
10842 s = "bnez\t%2,1f\n\tbreak\t7\n1:";
10844 else if (GENERATE_DIVIDE_TRAPS)
10846 output_asm_insn (s, operands);
10847 s = "teq\t%2,%.,7";
10851 output_asm_insn ("%(bne\t%2,%.,1f", operands);
10852 output_asm_insn (s, operands);
10853 s = "break\t7%)\n1:";
10859 /* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
10860 with a final "000" replaced by "k". Ignore case.
10862 Note: this function is shared between GCC and GAS. */
10865 mips_strict_matching_cpu_name_p (const char *canonical, const char *given)
10867 while (*given != 0 && TOLOWER (*given) == TOLOWER (*canonical))
10868 given++, canonical++;
10870 return ((*given == 0 && *canonical == 0)
10871 || (strcmp (canonical, "000") == 0 && strcasecmp (given, "k") == 0));
10875 /* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
10876 CPU name. We've traditionally allowed a lot of variation here.
10878 Note: this function is shared between GCC and GAS. */
10881 mips_matching_cpu_name_p (const char *canonical, const char *given)
10883 /* First see if the name matches exactly, or with a final "000"
10884 turned into "k". */
10885 if (mips_strict_matching_cpu_name_p (canonical, given))
10888 /* If not, try comparing based on numerical designation alone.
10889 See if GIVEN is an unadorned number, or 'r' followed by a number. */
10890 if (TOLOWER (*given) == 'r')
10892 if (!ISDIGIT (*given))
10895 /* Skip over some well-known prefixes in the canonical name,
10896 hoping to find a number there too. */
10897 if (TOLOWER (canonical[0]) == 'v' && TOLOWER (canonical[1]) == 'r')
10899 else if (TOLOWER (canonical[0]) == 'r' && TOLOWER (canonical[1]) == 'm')
10901 else if (TOLOWER (canonical[0]) == 'r')
10904 return mips_strict_matching_cpu_name_p (canonical, given);
10908 /* Return the mips_cpu_info entry for the processor or ISA given
10909 by CPU_STRING. Return null if the string isn't recognized.
10911 A similar function exists in GAS. */
10913 static const struct mips_cpu_info *
10914 mips_parse_cpu (const char *cpu_string)
10916 const struct mips_cpu_info *p;
10919 /* In the past, we allowed upper-case CPU names, but it doesn't
10920 work well with the multilib machinery. */
10921 for (s = cpu_string; *s != 0; s++)
10924 warning (0, "the cpu name must be lower case");
10928 /* 'from-abi' selects the most compatible architecture for the given
10929 ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
10930 EABIs, we have to decide whether we're using the 32-bit or 64-bit
10931 version. Look first at the -mgp options, if given, otherwise base
10932 the choice on MASK_64BIT in TARGET_DEFAULT. */
10933 if (strcasecmp (cpu_string, "from-abi") == 0)
10934 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
10935 : ABI_NEEDS_64BIT_REGS ? 3
10936 : (TARGET_64BIT ? 3 : 1));
10938 /* 'default' has traditionally been a no-op. Probably not very useful. */
10939 if (strcasecmp (cpu_string, "default") == 0)
10942 for (p = mips_cpu_info_table; p->name != 0; p++)
10943 if (mips_matching_cpu_name_p (p->name, cpu_string))
10950 /* Return the processor associated with the given ISA level, or null
10951 if the ISA isn't valid. */
10953 static const struct mips_cpu_info *
10954 mips_cpu_info_from_isa (int isa)
10956 const struct mips_cpu_info *p;
10958 for (p = mips_cpu_info_table; p->name != 0; p++)
10965 /* Implement HARD_REGNO_NREGS. The size of FP registers is controlled
10966 by UNITS_PER_FPREG. The size of FP status registers is always 4, because
10967 they only hold condition code modes, and CCmode is always considered to
10968 be 4 bytes wide. All other registers are word sized. */
10971 mips_hard_regno_nregs (int regno, enum machine_mode mode)
10973 if (ST_REG_P (regno))
10974 return ((GET_MODE_SIZE (mode) + 3) / 4);
10975 else if (! FP_REG_P (regno))
10976 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
10978 return ((GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG);
10981 /* Implement TARGET_RETURN_IN_MEMORY. Under the old (i.e., 32 and O64 ABIs)
10982 all BLKmode objects are returned in memory. Under the new (N32 and
10983 64-bit MIPS ABIs) small structures are returned in a register.
10984 Objects with varying size must still be returned in memory, of
10988 mips_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
10991 return (TYPE_MODE (type) == BLKmode);
10993 return ((int_size_in_bytes (type) > (2 * UNITS_PER_WORD))
10994 || (int_size_in_bytes (type) == -1));
10998 mips_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
11000 return !TARGET_OLDABI;
11003 /* Return true if INSN is a multiply-add or multiply-subtract
11004 instruction and PREV assigns to the accumulator operand. */
11007 mips_linked_madd_p (rtx prev, rtx insn)
11011 x = single_set (insn);
11017 if (GET_CODE (x) == PLUS
11018 && GET_CODE (XEXP (x, 0)) == MULT
11019 && reg_set_p (XEXP (x, 1), prev))
11022 if (GET_CODE (x) == MINUS
11023 && GET_CODE (XEXP (x, 1)) == MULT
11024 && reg_set_p (XEXP (x, 0), prev))
11030 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
11031 that may clobber hi or lo. */
11033 static rtx mips_macc_chains_last_hilo;
11035 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
11036 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
11039 mips_macc_chains_record (rtx insn)
11041 if (get_attr_may_clobber_hilo (insn))
11042 mips_macc_chains_last_hilo = insn;
11045 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
11046 has NREADY elements, looking for a multiply-add or multiply-subtract
11047 instruction that is cumulative with mips_macc_chains_last_hilo.
11048 If there is one, promote it ahead of anything else that might
11049 clobber hi or lo. */
11052 mips_macc_chains_reorder (rtx *ready, int nready)
11056 if (mips_macc_chains_last_hilo != 0)
11057 for (i = nready - 1; i >= 0; i--)
11058 if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
11060 for (j = nready - 1; j > i; j--)
11061 if (recog_memoized (ready[j]) >= 0
11062 && get_attr_may_clobber_hilo (ready[j]))
11064 mips_promote_ready (ready, i, j);
11071 /* The last instruction to be scheduled. */
11073 static rtx vr4130_last_insn;
11075 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
11076 points to an rtx that is initially an instruction. Nullify the rtx
11077 if the instruction uses the value of register X. */
11080 vr4130_true_reg_dependence_p_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
11082 rtx *insn_ptr = data;
11085 && reg_referenced_p (x, PATTERN (*insn_ptr)))
11089 /* Return true if there is true register dependence between vr4130_last_insn
11093 vr4130_true_reg_dependence_p (rtx insn)
11095 note_stores (PATTERN (vr4130_last_insn),
11096 vr4130_true_reg_dependence_p_1, &insn);
11100 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
11101 the ready queue and that INSN2 is the instruction after it, return
11102 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
11103 in which INSN1 and INSN2 can probably issue in parallel, but for
11104 which (INSN2, INSN1) should be less sensitive to instruction
11105 alignment than (INSN1, INSN2). See 4130.md for more details. */
11108 vr4130_swap_insns_p (rtx insn1, rtx insn2)
11110 sd_iterator_def sd_it;
11113 /* Check for the following case:
11115 1) there is some other instruction X with an anti dependence on INSN1;
11116 2) X has a higher priority than INSN2; and
11117 3) X is an arithmetic instruction (and thus has no unit restrictions).
11119 If INSN1 is the last instruction blocking X, it would better to
11120 choose (INSN1, X) over (INSN2, INSN1). */
11121 FOR_EACH_DEP (insn1, SD_LIST_FORW, sd_it, dep)
11122 if (DEP_TYPE (dep) == REG_DEP_ANTI
11123 && INSN_PRIORITY (DEP_CON (dep)) > INSN_PRIORITY (insn2)
11124 && recog_memoized (DEP_CON (dep)) >= 0
11125 && get_attr_vr4130_class (DEP_CON (dep)) == VR4130_CLASS_ALU)
11128 if (vr4130_last_insn != 0
11129 && recog_memoized (insn1) >= 0
11130 && recog_memoized (insn2) >= 0)
11132 /* See whether INSN1 and INSN2 use different execution units,
11133 or if they are both ALU-type instructions. If so, they can
11134 probably execute in parallel. */
11135 enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
11136 enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
11137 if (class1 != class2 || class1 == VR4130_CLASS_ALU)
11139 /* If only one of the instructions has a dependence on
11140 vr4130_last_insn, prefer to schedule the other one first. */
11141 bool dep1 = vr4130_true_reg_dependence_p (insn1);
11142 bool dep2 = vr4130_true_reg_dependence_p (insn2);
11146 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
11147 is not an ALU-type instruction and if INSN1 uses the same
11148 execution unit. (Note that if this condition holds, we already
11149 know that INSN2 uses a different execution unit.) */
11150 if (class1 != VR4130_CLASS_ALU
11151 && recog_memoized (vr4130_last_insn) >= 0
11152 && class1 == get_attr_vr4130_class (vr4130_last_insn))
11159 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
11160 queue with at least two instructions. Swap the first two if
11161 vr4130_swap_insns_p says that it could be worthwhile. */
11164 vr4130_reorder (rtx *ready, int nready)
11166 if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
11167 mips_promote_ready (ready, nready - 2, nready - 1);
11170 /* Remove the instruction at index LOWER from ready queue READY and
11171 reinsert it in front of the instruction at index HIGHER. LOWER must
11175 mips_promote_ready (rtx *ready, int lower, int higher)
11180 new_head = ready[lower];
11181 for (i = lower; i < higher; i++)
11182 ready[i] = ready[i + 1];
11183 ready[i] = new_head;
11186 /* If the priority of the instruction at POS2 in the ready queue READY
11187 is within LIMIT units of that of the instruction at POS1, swap the
11188 instructions if POS2 is not already less than POS1. */
11191 mips_maybe_swap_ready (rtx *ready, int pos1, int pos2, int limit)
11194 && INSN_PRIORITY (ready[pos1]) + limit >= INSN_PRIORITY (ready[pos2]))
11197 temp = ready[pos1];
11198 ready[pos1] = ready[pos2];
11199 ready[pos2] = temp;
11203 /* Record whether last 74k AGEN instruction was a load or store. */
11205 static enum attr_type mips_last_74k_agen_insn = TYPE_UNKNOWN;
11207 /* Initialize mips_last_74k_agen_insn from INSN. A null argument
11208 resets to TYPE_UNKNOWN state. */
11211 mips_74k_agen_init (rtx insn)
11213 if (!insn || !NONJUMP_INSN_P (insn))
11214 mips_last_74k_agen_insn = TYPE_UNKNOWN;
11215 else if (USEFUL_INSN_P (insn))
11217 enum attr_type type = get_attr_type (insn);
11218 if (type == TYPE_LOAD || type == TYPE_STORE)
11219 mips_last_74k_agen_insn = type;
11223 /* A TUNE_74K helper function. The 74K AGEN pipeline likes multiple
11224 loads to be grouped together, and multiple stores to be grouped
11225 together. Swap things around in the ready queue to make this happen. */
11228 mips_74k_agen_reorder (rtx *ready, int nready)
11231 int store_pos, load_pos;
11236 for (i = nready - 1; i >= 0; i--)
11238 rtx insn = ready[i];
11239 if (USEFUL_INSN_P (insn))
11240 switch (get_attr_type (insn))
11243 if (store_pos == -1)
11248 if (load_pos == -1)
11257 if (load_pos == -1 || store_pos == -1)
11260 switch (mips_last_74k_agen_insn)
11263 /* Prefer to schedule loads since they have a higher latency. */
11265 /* Swap loads to the front of the queue. */
11266 mips_maybe_swap_ready (ready, load_pos, store_pos, 4);
11269 /* Swap stores to the front of the queue. */
11270 mips_maybe_swap_ready (ready, store_pos, load_pos, 4);
11277 /* Implement TARGET_SCHED_INIT. */
11280 mips_sched_init (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
11281 int max_ready ATTRIBUTE_UNUSED)
11283 mips_macc_chains_last_hilo = 0;
11284 vr4130_last_insn = 0;
11285 mips_74k_agen_init (NULL_RTX);
11288 /* Implement TARGET_SCHED_REORDER and TARG_SCHED_REORDER2. */
11291 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
11292 rtx *ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
11294 if (!reload_completed
11295 && TUNE_MACC_CHAINS
11297 mips_macc_chains_reorder (ready, *nreadyp);
11298 if (reload_completed
11300 && !TARGET_VR4130_ALIGN
11302 vr4130_reorder (ready, *nreadyp);
11304 mips_74k_agen_reorder (ready, *nreadyp);
11305 return mips_issue_rate ();
11308 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
11311 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
11312 rtx insn, int more)
11315 mips_74k_agen_init (insn);
11316 switch (GET_CODE (PATTERN (insn)))
11320 /* Don't count USEs and CLOBBERs against the issue rate. */
11325 if (!reload_completed && TUNE_MACC_CHAINS)
11326 mips_macc_chains_record (insn);
11327 vr4130_last_insn = insn;
11333 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
11334 dependencies have no cost, except on the 20Kc where output-dependence
11335 is treated like input-dependence. */
11338 mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
11339 rtx dep ATTRIBUTE_UNUSED, int cost)
11341 if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
11344 if (REG_NOTE_KIND (link) != 0)
11349 /* Return the number of instructions that can be issued per cycle. */
11352 mips_issue_rate (void)
11356 case PROCESSOR_74KC:
11357 case PROCESSOR_74KF2_1:
11358 case PROCESSOR_74KF1_1:
11359 case PROCESSOR_74KF3_2:
11360 /* The 74k is not strictly quad-issue cpu, but can be seen as one
11361 by the scheduler. It can issue 1 ALU, 1 AGEN and 2 FPU insns,
11362 but in reality only a maximum of 3 insns can be issued as the
11363 floating point load/stores also require a slot in the AGEN pipe. */
11366 case PROCESSOR_20KC:
11367 case PROCESSOR_R4130:
11368 case PROCESSOR_R5400:
11369 case PROCESSOR_R5500:
11370 case PROCESSOR_R7000:
11371 case PROCESSOR_R9000:
11374 case PROCESSOR_SB1:
11375 case PROCESSOR_SB1A:
11376 /* This is actually 4, but we get better performance if we claim 3.
11377 This is partly because of unwanted speculative code motion with the
11378 larger number, and partly because in most common cases we can't
11379 reach the theoretical max of 4. */
11387 /* Implements TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
11388 be as wide as the scheduling freedom in the DFA. */
11391 mips_multipass_dfa_lookahead (void)
11393 /* Can schedule up to 4 of the 6 function units in any one cycle. */
11400 /* Implements a store data bypass check. We need this because the cprestore
11401 pattern is type store, but defined using an UNSPEC. This UNSPEC causes the
11402 default routine to abort. We just return false for that case. */
11403 /* ??? Should try to give a better result here than assuming false. */
11406 mips_store_data_bypass_p (rtx out_insn, rtx in_insn)
11408 if (GET_CODE (PATTERN (in_insn)) == UNSPEC_VOLATILE)
11411 return ! store_data_bypass_p (out_insn, in_insn);
11414 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
11415 return the first operand of the associated "pref" or "prefx" insn. */
11418 mips_prefetch_cookie (rtx write, rtx locality)
11420 /* store_streamed / load_streamed. */
11421 if (INTVAL (locality) <= 0)
11422 return GEN_INT (INTVAL (write) + 4);
11424 /* store / load. */
11425 if (INTVAL (locality) <= 2)
11428 /* store_retained / load_retained. */
11429 return GEN_INT (INTVAL (write) + 6);
11432 /* MIPS builtin function support. */
11434 struct builtin_description
11436 /* The code of the main .md file instruction. See mips_builtin_type
11437 for more information. */
11438 enum insn_code icode;
11440 /* The floating-point comparison code to use with ICODE, if any. */
11441 enum mips_fp_condition cond;
11443 /* The name of the builtin function. */
11446 /* Specifies how the function should be expanded. */
11447 enum mips_builtin_type builtin_type;
11449 /* The function's prototype. */
11450 enum mips_function_type function_type;
11452 /* The target flags required for this function. */
11456 /* Define a MIPS_BUILTIN_DIRECT function for instruction CODE_FOR_mips_<INSN>.
11457 FUNCTION_TYPE and TARGET_FLAGS are builtin_description fields. */
11458 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
11459 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
11460 MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, TARGET_FLAGS }
11462 /* Define __builtin_mips_<INSN>_<COND>_{s,d}, both of which require
11464 #define CMP_SCALAR_BUILTINS(INSN, COND, TARGET_FLAGS) \
11465 { CODE_FOR_mips_ ## INSN ## _cond_s, MIPS_FP_COND_ ## COND, \
11466 "__builtin_mips_" #INSN "_" #COND "_s", \
11467 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, TARGET_FLAGS }, \
11468 { CODE_FOR_mips_ ## INSN ## _cond_d, MIPS_FP_COND_ ## COND, \
11469 "__builtin_mips_" #INSN "_" #COND "_d", \
11470 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, TARGET_FLAGS }
11472 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
11473 The lower and upper forms require TARGET_FLAGS while the any and all
11474 forms require MASK_MIPS3D. */
11475 #define CMP_PS_BUILTINS(INSN, COND, TARGET_FLAGS) \
11476 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11477 "__builtin_mips_any_" #INSN "_" #COND "_ps", \
11478 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
11479 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11480 "__builtin_mips_all_" #INSN "_" #COND "_ps", \
11481 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
11482 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11483 "__builtin_mips_lower_" #INSN "_" #COND "_ps", \
11484 MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }, \
11485 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11486 "__builtin_mips_upper_" #INSN "_" #COND "_ps", \
11487 MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }
11489 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
11490 require MASK_MIPS3D. */
11491 #define CMP_4S_BUILTINS(INSN, COND) \
11492 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
11493 "__builtin_mips_any_" #INSN "_" #COND "_4s", \
11494 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11496 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
11497 "__builtin_mips_all_" #INSN "_" #COND "_4s", \
11498 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11501 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
11502 instruction requires TARGET_FLAGS. */
11503 #define MOVTF_BUILTINS(INSN, COND, TARGET_FLAGS) \
11504 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11505 "__builtin_mips_movt_" #INSN "_" #COND "_ps", \
11506 MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11508 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11509 "__builtin_mips_movf_" #INSN "_" #COND "_ps", \
11510 MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11513 /* Define all the builtins related to c.cond.fmt condition COND. */
11514 #define CMP_BUILTINS(COND) \
11515 MOVTF_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
11516 MOVTF_BUILTINS (cabs, COND, MASK_MIPS3D), \
11517 CMP_SCALAR_BUILTINS (cabs, COND, MASK_MIPS3D), \
11518 CMP_PS_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
11519 CMP_PS_BUILTINS (cabs, COND, MASK_MIPS3D), \
11520 CMP_4S_BUILTINS (c, COND), \
11521 CMP_4S_BUILTINS (cabs, COND)
11523 static const struct builtin_description mips_bdesc[] =
11525 DIRECT_BUILTIN (pll_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11526 DIRECT_BUILTIN (pul_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11527 DIRECT_BUILTIN (plu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11528 DIRECT_BUILTIN (puu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11529 DIRECT_BUILTIN (cvt_ps_s, MIPS_V2SF_FTYPE_SF_SF, MASK_PAIRED_SINGLE_FLOAT),
11530 DIRECT_BUILTIN (cvt_s_pl, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11531 DIRECT_BUILTIN (cvt_s_pu, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11532 DIRECT_BUILTIN (abs_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11534 DIRECT_BUILTIN (alnv_ps, MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
11535 MASK_PAIRED_SINGLE_FLOAT),
11536 DIRECT_BUILTIN (addr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
11537 DIRECT_BUILTIN (mulr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
11538 DIRECT_BUILTIN (cvt_pw_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
11539 DIRECT_BUILTIN (cvt_ps_pw, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
11541 DIRECT_BUILTIN (recip1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
11542 DIRECT_BUILTIN (recip1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
11543 DIRECT_BUILTIN (recip1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
11544 DIRECT_BUILTIN (recip2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
11545 DIRECT_BUILTIN (recip2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
11546 DIRECT_BUILTIN (recip2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
11548 DIRECT_BUILTIN (rsqrt1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
11549 DIRECT_BUILTIN (rsqrt1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
11550 DIRECT_BUILTIN (rsqrt1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
11551 DIRECT_BUILTIN (rsqrt2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
11552 DIRECT_BUILTIN (rsqrt2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
11553 DIRECT_BUILTIN (rsqrt2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
11555 MIPS_FP_CONDITIONS (CMP_BUILTINS)
11558 /* Builtin functions for the SB-1 processor. */
11560 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
11562 static const struct builtin_description sb1_bdesc[] =
11564 DIRECT_BUILTIN (sqrt_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT)
11567 /* Builtin functions for DSP ASE. */
11569 #define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
11570 #define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
11571 #define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
11572 #define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
11573 #define CODE_FOR_mips_mul_ph CODE_FOR_mulv2hi3
11575 /* Define a MIPS_BUILTIN_DIRECT_NO_TARGET function for instruction
11576 CODE_FOR_mips_<INSN>. FUNCTION_TYPE and TARGET_FLAGS are
11577 builtin_description fields. */
11578 #define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
11579 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
11580 MIPS_BUILTIN_DIRECT_NO_TARGET, FUNCTION_TYPE, TARGET_FLAGS }
11582 /* Define __builtin_mips_bposge<VALUE>. <VALUE> is 32 for the MIPS32 DSP
11583 branch instruction. TARGET_FLAGS is a builtin_description field. */
11584 #define BPOSGE_BUILTIN(VALUE, TARGET_FLAGS) \
11585 { CODE_FOR_mips_bposge, 0, "__builtin_mips_bposge" #VALUE, \
11586 MIPS_BUILTIN_BPOSGE ## VALUE, MIPS_SI_FTYPE_VOID, TARGET_FLAGS }
11588 static const struct builtin_description dsp_bdesc[] =
11590 DIRECT_BUILTIN (addq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11591 DIRECT_BUILTIN (addq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11592 DIRECT_BUILTIN (addq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11593 DIRECT_BUILTIN (addu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11594 DIRECT_BUILTIN (addu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11595 DIRECT_BUILTIN (subq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11596 DIRECT_BUILTIN (subq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11597 DIRECT_BUILTIN (subq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11598 DIRECT_BUILTIN (subu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11599 DIRECT_BUILTIN (subu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11600 DIRECT_BUILTIN (addsc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11601 DIRECT_BUILTIN (addwc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11602 DIRECT_BUILTIN (modsub, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11603 DIRECT_BUILTIN (raddu_w_qb, MIPS_SI_FTYPE_V4QI, MASK_DSP),
11604 DIRECT_BUILTIN (absq_s_ph, MIPS_V2HI_FTYPE_V2HI, MASK_DSP),
11605 DIRECT_BUILTIN (absq_s_w, MIPS_SI_FTYPE_SI, MASK_DSP),
11606 DIRECT_BUILTIN (precrq_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
11607 DIRECT_BUILTIN (precrq_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
11608 DIRECT_BUILTIN (precrq_rs_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
11609 DIRECT_BUILTIN (precrqu_s_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
11610 DIRECT_BUILTIN (preceq_w_phl, MIPS_SI_FTYPE_V2HI, MASK_DSP),
11611 DIRECT_BUILTIN (preceq_w_phr, MIPS_SI_FTYPE_V2HI, MASK_DSP),
11612 DIRECT_BUILTIN (precequ_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11613 DIRECT_BUILTIN (precequ_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11614 DIRECT_BUILTIN (precequ_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11615 DIRECT_BUILTIN (precequ_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11616 DIRECT_BUILTIN (preceu_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11617 DIRECT_BUILTIN (preceu_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11618 DIRECT_BUILTIN (preceu_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11619 DIRECT_BUILTIN (preceu_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11620 DIRECT_BUILTIN (shll_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
11621 DIRECT_BUILTIN (shll_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
11622 DIRECT_BUILTIN (shll_s_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
11623 DIRECT_BUILTIN (shll_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11624 DIRECT_BUILTIN (shrl_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
11625 DIRECT_BUILTIN (shra_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
11626 DIRECT_BUILTIN (shra_r_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
11627 DIRECT_BUILTIN (shra_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11628 DIRECT_BUILTIN (muleu_s_ph_qbl, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
11629 DIRECT_BUILTIN (muleu_s_ph_qbr, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
11630 DIRECT_BUILTIN (mulq_rs_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11631 DIRECT_BUILTIN (muleq_s_w_phl, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
11632 DIRECT_BUILTIN (muleq_s_w_phr, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
11633 DIRECT_BUILTIN (bitrev, MIPS_SI_FTYPE_SI, MASK_DSP),
11634 DIRECT_BUILTIN (insv, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11635 DIRECT_BUILTIN (repl_qb, MIPS_V4QI_FTYPE_SI, MASK_DSP),
11636 DIRECT_BUILTIN (repl_ph, MIPS_V2HI_FTYPE_SI, MASK_DSP),
11637 DIRECT_NO_TARGET_BUILTIN (cmpu_eq_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
11638 DIRECT_NO_TARGET_BUILTIN (cmpu_lt_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
11639 DIRECT_NO_TARGET_BUILTIN (cmpu_le_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
11640 DIRECT_BUILTIN (cmpgu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
11641 DIRECT_BUILTIN (cmpgu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
11642 DIRECT_BUILTIN (cmpgu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
11643 DIRECT_NO_TARGET_BUILTIN (cmp_eq_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
11644 DIRECT_NO_TARGET_BUILTIN (cmp_lt_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
11645 DIRECT_NO_TARGET_BUILTIN (cmp_le_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
11646 DIRECT_BUILTIN (pick_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11647 DIRECT_BUILTIN (pick_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11648 DIRECT_BUILTIN (packrl_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11649 DIRECT_NO_TARGET_BUILTIN (wrdsp, MIPS_VOID_FTYPE_SI_SI, MASK_DSP),
11650 DIRECT_BUILTIN (rddsp, MIPS_SI_FTYPE_SI, MASK_DSP),
11651 DIRECT_BUILTIN (lbux, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
11652 DIRECT_BUILTIN (lhx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
11653 DIRECT_BUILTIN (lwx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
11654 BPOSGE_BUILTIN (32, MASK_DSP),
11656 /* The following are for the MIPS DSP ASE REV 2. */
11657 DIRECT_BUILTIN (absq_s_qb, MIPS_V4QI_FTYPE_V4QI, MASK_DSPR2),
11658 DIRECT_BUILTIN (addu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11659 DIRECT_BUILTIN (addu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11660 DIRECT_BUILTIN (adduh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11661 DIRECT_BUILTIN (adduh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11662 DIRECT_BUILTIN (append, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
11663 DIRECT_BUILTIN (balign, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
11664 DIRECT_BUILTIN (cmpgdu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11665 DIRECT_BUILTIN (cmpgdu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11666 DIRECT_BUILTIN (cmpgdu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11667 DIRECT_BUILTIN (mul_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11668 DIRECT_BUILTIN (mul_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11669 DIRECT_BUILTIN (mulq_rs_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11670 DIRECT_BUILTIN (mulq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11671 DIRECT_BUILTIN (mulq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11672 DIRECT_BUILTIN (precr_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11673 DIRECT_BUILTIN (precr_sra_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, MASK_DSPR2),
11674 DIRECT_BUILTIN (precr_sra_r_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, MASK_DSPR2),
11675 DIRECT_BUILTIN (prepend, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
11676 DIRECT_BUILTIN (shra_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSPR2),
11677 DIRECT_BUILTIN (shra_r_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSPR2),
11678 DIRECT_BUILTIN (shrl_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSPR2),
11679 DIRECT_BUILTIN (subu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11680 DIRECT_BUILTIN (subu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11681 DIRECT_BUILTIN (subuh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11682 DIRECT_BUILTIN (subuh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11683 DIRECT_BUILTIN (addqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11684 DIRECT_BUILTIN (addqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11685 DIRECT_BUILTIN (addqh_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11686 DIRECT_BUILTIN (addqh_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11687 DIRECT_BUILTIN (subqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11688 DIRECT_BUILTIN (subqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11689 DIRECT_BUILTIN (subqh_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11690 DIRECT_BUILTIN (subqh_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2)
11693 static const struct builtin_description dsp_32only_bdesc[] =
11695 DIRECT_BUILTIN (dpau_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
11696 DIRECT_BUILTIN (dpau_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
11697 DIRECT_BUILTIN (dpsu_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
11698 DIRECT_BUILTIN (dpsu_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
11699 DIRECT_BUILTIN (dpaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11700 DIRECT_BUILTIN (dpsq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11701 DIRECT_BUILTIN (mulsaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11702 DIRECT_BUILTIN (dpaq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
11703 DIRECT_BUILTIN (dpsq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
11704 DIRECT_BUILTIN (maq_s_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11705 DIRECT_BUILTIN (maq_s_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11706 DIRECT_BUILTIN (maq_sa_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11707 DIRECT_BUILTIN (maq_sa_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11708 DIRECT_BUILTIN (extr_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11709 DIRECT_BUILTIN (extr_r_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11710 DIRECT_BUILTIN (extr_rs_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11711 DIRECT_BUILTIN (extr_s_h, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11712 DIRECT_BUILTIN (extp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11713 DIRECT_BUILTIN (extpdp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11714 DIRECT_BUILTIN (shilo, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
11715 DIRECT_BUILTIN (mthlip, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
11717 /* The following are for the MIPS DSP ASE REV 2. */
11718 DIRECT_BUILTIN (dpa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11719 DIRECT_BUILTIN (dps_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11720 DIRECT_BUILTIN (madd, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSPR2),
11721 DIRECT_BUILTIN (maddu, MIPS_DI_FTYPE_DI_USI_USI, MASK_DSPR2),
11722 DIRECT_BUILTIN (msub, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSPR2),
11723 DIRECT_BUILTIN (msubu, MIPS_DI_FTYPE_DI_USI_USI, MASK_DSPR2),
11724 DIRECT_BUILTIN (mulsa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11725 DIRECT_BUILTIN (mult, MIPS_DI_FTYPE_SI_SI, MASK_DSPR2),
11726 DIRECT_BUILTIN (multu, MIPS_DI_FTYPE_USI_USI, MASK_DSPR2),
11727 DIRECT_BUILTIN (dpax_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11728 DIRECT_BUILTIN (dpsx_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11729 DIRECT_BUILTIN (dpaqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11730 DIRECT_BUILTIN (dpaqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11731 DIRECT_BUILTIN (dpsqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11732 DIRECT_BUILTIN (dpsqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2)
11735 /* This helps provide a mapping from builtin function codes to bdesc
11740 /* The builtin function table that this entry describes. */
11741 const struct builtin_description *bdesc;
11743 /* The number of entries in the builtin function table. */
11746 /* The target processor that supports these builtin functions.
11747 PROCESSOR_MAX means we enable them for all processors. */
11748 enum processor_type proc;
11750 /* If the target has these flags, this builtin function table
11751 will not be supported. */
11752 int unsupported_target_flags;
11755 static const struct bdesc_map bdesc_arrays[] =
11757 { mips_bdesc, ARRAY_SIZE (mips_bdesc), PROCESSOR_MAX, 0 },
11758 { sb1_bdesc, ARRAY_SIZE (sb1_bdesc), PROCESSOR_SB1, 0 },
11759 { dsp_bdesc, ARRAY_SIZE (dsp_bdesc), PROCESSOR_MAX, 0 },
11760 { dsp_32only_bdesc, ARRAY_SIZE (dsp_32only_bdesc), PROCESSOR_MAX,
11764 /* Take the argument ARGNUM of the arglist of EXP and convert it into a form
11765 suitable for input operand OP of instruction ICODE. Return the value. */
11768 mips_prepare_builtin_arg (enum insn_code icode,
11769 unsigned int op, tree exp, unsigned int argnum)
11772 enum machine_mode mode;
11774 value = expand_normal (CALL_EXPR_ARG (exp, argnum));
11775 mode = insn_data[icode].operand[op].mode;
11776 if (!insn_data[icode].operand[op].predicate (value, mode))
11778 value = copy_to_mode_reg (mode, value);
11779 /* Check the predicate again. */
11780 if (!insn_data[icode].operand[op].predicate (value, mode))
11782 error ("invalid argument to builtin function");
11790 /* Return an rtx suitable for output operand OP of instruction ICODE.
11791 If TARGET is non-null, try to use it where possible. */
11794 mips_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
11796 enum machine_mode mode;
11798 mode = insn_data[icode].operand[op].mode;
11799 if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
11800 target = gen_reg_rtx (mode);
11805 /* Expand builtin functions. This is called from TARGET_EXPAND_BUILTIN. */
11808 mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
11809 enum machine_mode mode ATTRIBUTE_UNUSED,
11810 int ignore ATTRIBUTE_UNUSED)
11812 enum insn_code icode;
11813 enum mips_builtin_type type;
11815 unsigned int fcode;
11816 const struct builtin_description *bdesc;
11817 const struct bdesc_map *m;
11819 fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11820 fcode = DECL_FUNCTION_CODE (fndecl);
11824 error ("built-in function %qs not supported for MIPS16",
11825 IDENTIFIER_POINTER (DECL_NAME (fndecl)));
11830 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
11832 if (fcode < m->size)
11835 icode = bdesc[fcode].icode;
11836 type = bdesc[fcode].builtin_type;
11846 case MIPS_BUILTIN_DIRECT:
11847 return mips_expand_builtin_direct (icode, target, exp, true);
11849 case MIPS_BUILTIN_DIRECT_NO_TARGET:
11850 return mips_expand_builtin_direct (icode, target, exp, false);
11852 case MIPS_BUILTIN_MOVT:
11853 case MIPS_BUILTIN_MOVF:
11854 return mips_expand_builtin_movtf (type, icode, bdesc[fcode].cond,
11857 case MIPS_BUILTIN_CMP_ANY:
11858 case MIPS_BUILTIN_CMP_ALL:
11859 case MIPS_BUILTIN_CMP_UPPER:
11860 case MIPS_BUILTIN_CMP_LOWER:
11861 case MIPS_BUILTIN_CMP_SINGLE:
11862 return mips_expand_builtin_compare (type, icode, bdesc[fcode].cond,
11865 case MIPS_BUILTIN_BPOSGE32:
11866 return mips_expand_builtin_bposge (type, target);
11873 /* Init builtin functions. This is called from TARGET_INIT_BUILTIN. */
11876 mips_init_builtins (void)
11878 const struct builtin_description *d;
11879 const struct bdesc_map *m;
11880 tree types[(int) MIPS_MAX_FTYPE_MAX];
11881 tree V2SF_type_node;
11882 tree V2HI_type_node;
11883 tree V4QI_type_node;
11884 unsigned int offset;
11886 /* We have only builtins for -mpaired-single, -mips3d and -mdsp. */
11887 if (!TARGET_PAIRED_SINGLE_FLOAT && !TARGET_DSP)
11890 if (TARGET_PAIRED_SINGLE_FLOAT)
11892 V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
11894 types[MIPS_V2SF_FTYPE_V2SF]
11895 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
11897 types[MIPS_V2SF_FTYPE_V2SF_V2SF]
11898 = build_function_type_list (V2SF_type_node,
11899 V2SF_type_node, V2SF_type_node, NULL_TREE);
11901 types[MIPS_V2SF_FTYPE_V2SF_V2SF_INT]
11902 = build_function_type_list (V2SF_type_node,
11903 V2SF_type_node, V2SF_type_node,
11904 integer_type_node, NULL_TREE);
11906 types[MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF]
11907 = build_function_type_list (V2SF_type_node,
11908 V2SF_type_node, V2SF_type_node,
11909 V2SF_type_node, V2SF_type_node, NULL_TREE);
11911 types[MIPS_V2SF_FTYPE_SF_SF]
11912 = build_function_type_list (V2SF_type_node,
11913 float_type_node, float_type_node, NULL_TREE);
11915 types[MIPS_INT_FTYPE_V2SF_V2SF]
11916 = build_function_type_list (integer_type_node,
11917 V2SF_type_node, V2SF_type_node, NULL_TREE);
11919 types[MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF]
11920 = build_function_type_list (integer_type_node,
11921 V2SF_type_node, V2SF_type_node,
11922 V2SF_type_node, V2SF_type_node, NULL_TREE);
11924 types[MIPS_INT_FTYPE_SF_SF]
11925 = build_function_type_list (integer_type_node,
11926 float_type_node, float_type_node, NULL_TREE);
11928 types[MIPS_INT_FTYPE_DF_DF]
11929 = build_function_type_list (integer_type_node,
11930 double_type_node, double_type_node, NULL_TREE);
11932 types[MIPS_SF_FTYPE_V2SF]
11933 = build_function_type_list (float_type_node, V2SF_type_node, NULL_TREE);
11935 types[MIPS_SF_FTYPE_SF]
11936 = build_function_type_list (float_type_node,
11937 float_type_node, NULL_TREE);
11939 types[MIPS_SF_FTYPE_SF_SF]
11940 = build_function_type_list (float_type_node,
11941 float_type_node, float_type_node, NULL_TREE);
11943 types[MIPS_DF_FTYPE_DF]
11944 = build_function_type_list (double_type_node,
11945 double_type_node, NULL_TREE);
11947 types[MIPS_DF_FTYPE_DF_DF]
11948 = build_function_type_list (double_type_node,
11949 double_type_node, double_type_node, NULL_TREE);
11954 V2HI_type_node = build_vector_type_for_mode (intHI_type_node, V2HImode);
11955 V4QI_type_node = build_vector_type_for_mode (intQI_type_node, V4QImode);
11957 types[MIPS_V2HI_FTYPE_V2HI_V2HI]
11958 = build_function_type_list (V2HI_type_node,
11959 V2HI_type_node, V2HI_type_node,
11962 types[MIPS_SI_FTYPE_SI_SI]
11963 = build_function_type_list (intSI_type_node,
11964 intSI_type_node, intSI_type_node,
11967 types[MIPS_V4QI_FTYPE_V4QI_V4QI]
11968 = build_function_type_list (V4QI_type_node,
11969 V4QI_type_node, V4QI_type_node,
11972 types[MIPS_SI_FTYPE_V4QI]
11973 = build_function_type_list (intSI_type_node,
11977 types[MIPS_V2HI_FTYPE_V2HI]
11978 = build_function_type_list (V2HI_type_node,
11982 types[MIPS_SI_FTYPE_SI]
11983 = build_function_type_list (intSI_type_node,
11987 types[MIPS_V4QI_FTYPE_V2HI_V2HI]
11988 = build_function_type_list (V4QI_type_node,
11989 V2HI_type_node, V2HI_type_node,
11992 types[MIPS_V2HI_FTYPE_SI_SI]
11993 = build_function_type_list (V2HI_type_node,
11994 intSI_type_node, intSI_type_node,
11997 types[MIPS_SI_FTYPE_V2HI]
11998 = build_function_type_list (intSI_type_node,
12002 types[MIPS_V2HI_FTYPE_V4QI]
12003 = build_function_type_list (V2HI_type_node,
12007 types[MIPS_V4QI_FTYPE_V4QI_SI]
12008 = build_function_type_list (V4QI_type_node,
12009 V4QI_type_node, intSI_type_node,
12012 types[MIPS_V2HI_FTYPE_V2HI_SI]
12013 = build_function_type_list (V2HI_type_node,
12014 V2HI_type_node, intSI_type_node,
12017 types[MIPS_V2HI_FTYPE_V4QI_V2HI]
12018 = build_function_type_list (V2HI_type_node,
12019 V4QI_type_node, V2HI_type_node,
12022 types[MIPS_SI_FTYPE_V2HI_V2HI]
12023 = build_function_type_list (intSI_type_node,
12024 V2HI_type_node, V2HI_type_node,
12027 types[MIPS_DI_FTYPE_DI_V4QI_V4QI]
12028 = build_function_type_list (intDI_type_node,
12029 intDI_type_node, V4QI_type_node, V4QI_type_node,
12032 types[MIPS_DI_FTYPE_DI_V2HI_V2HI]
12033 = build_function_type_list (intDI_type_node,
12034 intDI_type_node, V2HI_type_node, V2HI_type_node,
12037 types[MIPS_DI_FTYPE_DI_SI_SI]
12038 = build_function_type_list (intDI_type_node,
12039 intDI_type_node, intSI_type_node, intSI_type_node,
12042 types[MIPS_V4QI_FTYPE_SI]
12043 = build_function_type_list (V4QI_type_node,
12047 types[MIPS_V2HI_FTYPE_SI]
12048 = build_function_type_list (V2HI_type_node,
12052 types[MIPS_VOID_FTYPE_V4QI_V4QI]
12053 = build_function_type_list (void_type_node,
12054 V4QI_type_node, V4QI_type_node,
12057 types[MIPS_SI_FTYPE_V4QI_V4QI]
12058 = build_function_type_list (intSI_type_node,
12059 V4QI_type_node, V4QI_type_node,
12062 types[MIPS_VOID_FTYPE_V2HI_V2HI]
12063 = build_function_type_list (void_type_node,
12064 V2HI_type_node, V2HI_type_node,
12067 types[MIPS_SI_FTYPE_DI_SI]
12068 = build_function_type_list (intSI_type_node,
12069 intDI_type_node, intSI_type_node,
12072 types[MIPS_DI_FTYPE_DI_SI]
12073 = build_function_type_list (intDI_type_node,
12074 intDI_type_node, intSI_type_node,
12077 types[MIPS_VOID_FTYPE_SI_SI]
12078 = build_function_type_list (void_type_node,
12079 intSI_type_node, intSI_type_node,
12082 types[MIPS_SI_FTYPE_PTR_SI]
12083 = build_function_type_list (intSI_type_node,
12084 ptr_type_node, intSI_type_node,
12087 types[MIPS_SI_FTYPE_VOID]
12088 = build_function_type (intSI_type_node, void_list_node);
12092 types[MIPS_V4QI_FTYPE_V4QI]
12093 = build_function_type_list (V4QI_type_node,
12097 types[MIPS_SI_FTYPE_SI_SI_SI]
12098 = build_function_type_list (intSI_type_node,
12099 intSI_type_node, intSI_type_node,
12100 intSI_type_node, NULL_TREE);
12102 types[MIPS_DI_FTYPE_DI_USI_USI]
12103 = build_function_type_list (intDI_type_node,
12105 unsigned_intSI_type_node,
12106 unsigned_intSI_type_node, NULL_TREE);
12108 types[MIPS_DI_FTYPE_SI_SI]
12109 = build_function_type_list (intDI_type_node,
12110 intSI_type_node, intSI_type_node,
12113 types[MIPS_DI_FTYPE_USI_USI]
12114 = build_function_type_list (intDI_type_node,
12115 unsigned_intSI_type_node,
12116 unsigned_intSI_type_node, NULL_TREE);
12118 types[MIPS_V2HI_FTYPE_SI_SI_SI]
12119 = build_function_type_list (V2HI_type_node,
12120 intSI_type_node, intSI_type_node,
12121 intSI_type_node, NULL_TREE);
12126 /* Iterate through all of the bdesc arrays, initializing all of the
12127 builtin functions. */
12130 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
12132 if ((m->proc == PROCESSOR_MAX || (m->proc == mips_arch))
12133 && (m->unsupported_target_flags & target_flags) == 0)
12134 for (d = m->bdesc; d < &m->bdesc[m->size]; d++)
12135 if ((d->target_flags & target_flags) == d->target_flags)
12136 add_builtin_function (d->name, types[d->function_type],
12137 d - m->bdesc + offset,
12138 BUILT_IN_MD, NULL, NULL);
12143 /* Expand a MIPS_BUILTIN_DIRECT function. ICODE is the code of the
12144 .md pattern and CALL is the function expr with arguments. TARGET,
12145 if nonnull, suggests a good place to put the result.
12146 HAS_TARGET indicates the function must return something. */
12149 mips_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
12152 rtx ops[MAX_RECOG_OPERANDS];
12158 /* We save target to ops[0]. */
12159 ops[0] = mips_prepare_builtin_target (icode, 0, target);
12163 /* We need to test if the arglist is not zero. Some instructions have extra
12164 clobber registers. */
12165 for (; i < insn_data[icode].n_operands && i <= call_expr_nargs (exp); i++, j++)
12166 ops[i] = mips_prepare_builtin_arg (icode, i, exp, j);
12171 emit_insn (GEN_FCN (icode) (ops[0], ops[1]));
12175 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2]));
12179 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]));
12183 gcc_unreachable ();
12188 /* Expand a __builtin_mips_movt_*_ps() or __builtin_mips_movf_*_ps()
12189 function (TYPE says which). EXP is the tree for the function
12190 function, ICODE is the instruction that should be used to compare
12191 the first two arguments, and COND is the condition it should test.
12192 TARGET, if nonnull, suggests a good place to put the result. */
12195 mips_expand_builtin_movtf (enum mips_builtin_type type,
12196 enum insn_code icode, enum mips_fp_condition cond,
12197 rtx target, tree exp)
12199 rtx cmp_result, op0, op1;
12201 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
12202 op0 = mips_prepare_builtin_arg (icode, 1, exp, 0);
12203 op1 = mips_prepare_builtin_arg (icode, 2, exp, 1);
12204 emit_insn (GEN_FCN (icode) (cmp_result, op0, op1, GEN_INT (cond)));
12206 icode = CODE_FOR_mips_cond_move_tf_ps;
12207 target = mips_prepare_builtin_target (icode, 0, target);
12208 if (type == MIPS_BUILTIN_MOVT)
12210 op1 = mips_prepare_builtin_arg (icode, 2, exp, 2);
12211 op0 = mips_prepare_builtin_arg (icode, 1, exp, 3);
12215 op0 = mips_prepare_builtin_arg (icode, 1, exp, 2);
12216 op1 = mips_prepare_builtin_arg (icode, 2, exp, 3);
12218 emit_insn (gen_mips_cond_move_tf_ps (target, op0, op1, cmp_result));
12222 /* Move VALUE_IF_TRUE into TARGET if CONDITION is true; move VALUE_IF_FALSE
12223 into TARGET otherwise. Return TARGET. */
12226 mips_builtin_branch_and_move (rtx condition, rtx target,
12227 rtx value_if_true, rtx value_if_false)
12229 rtx true_label, done_label;
12231 true_label = gen_label_rtx ();
12232 done_label = gen_label_rtx ();
12234 /* First assume that CONDITION is false. */
12235 mips_emit_move (target, value_if_false);
12237 /* Branch to TRUE_LABEL if CONDITION is true and DONE_LABEL otherwise. */
12238 emit_jump_insn (gen_condjump (condition, true_label));
12239 emit_jump_insn (gen_jump (done_label));
12242 /* Fix TARGET if CONDITION is true. */
12243 emit_label (true_label);
12244 mips_emit_move (target, value_if_true);
12246 emit_label (done_label);
12250 /* Expand a comparison builtin of type BUILTIN_TYPE. ICODE is the code
12251 of the comparison instruction and COND is the condition it should test.
12252 EXP is the function call and arguments and TARGET, if nonnull,
12253 suggests a good place to put the boolean result. */
12256 mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
12257 enum insn_code icode, enum mips_fp_condition cond,
12258 rtx target, tree exp)
12260 rtx offset, condition, cmp_result, ops[MAX_RECOG_OPERANDS];
12264 if (target == 0 || GET_MODE (target) != SImode)
12265 target = gen_reg_rtx (SImode);
12267 /* Prepare the operands to the comparison. */
12268 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
12269 for (i = 1; i < insn_data[icode].n_operands - 1; i++, j++)
12270 ops[i] = mips_prepare_builtin_arg (icode, i, exp, j);
12272 switch (insn_data[icode].n_operands)
12275 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2], GEN_INT (cond)));
12279 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2],
12280 ops[3], ops[4], GEN_INT (cond)));
12284 gcc_unreachable ();
12287 /* If the comparison sets more than one register, we define the result
12288 to be 0 if all registers are false and -1 if all registers are true.
12289 The value of the complete result is indeterminate otherwise. */
12290 switch (builtin_type)
12292 case MIPS_BUILTIN_CMP_ALL:
12293 condition = gen_rtx_NE (VOIDmode, cmp_result, constm1_rtx);
12294 return mips_builtin_branch_and_move (condition, target,
12295 const0_rtx, const1_rtx);
12297 case MIPS_BUILTIN_CMP_UPPER:
12298 case MIPS_BUILTIN_CMP_LOWER:
12299 offset = GEN_INT (builtin_type == MIPS_BUILTIN_CMP_UPPER);
12300 condition = gen_single_cc (cmp_result, offset);
12301 return mips_builtin_branch_and_move (condition, target,
12302 const1_rtx, const0_rtx);
12305 condition = gen_rtx_NE (VOIDmode, cmp_result, const0_rtx);
12306 return mips_builtin_branch_and_move (condition, target,
12307 const1_rtx, const0_rtx);
12311 /* Expand a bposge builtin of type BUILTIN_TYPE. TARGET, if nonnull,
12312 suggests a good place to put the boolean result. */
12315 mips_expand_builtin_bposge (enum mips_builtin_type builtin_type, rtx target)
12317 rtx condition, cmp_result;
12320 if (target == 0 || GET_MODE (target) != SImode)
12321 target = gen_reg_rtx (SImode);
12323 cmp_result = gen_rtx_REG (CCDSPmode, CCDSP_PO_REGNUM);
12325 if (builtin_type == MIPS_BUILTIN_BPOSGE32)
12330 condition = gen_rtx_GE (VOIDmode, cmp_result, GEN_INT (cmp_value));
12331 return mips_builtin_branch_and_move (condition, target,
12332 const1_rtx, const0_rtx);
12335 /* Return true if we should force MIPS16 mode for the function named by
12336 the SYMBOL_REF SYMBOL, which belongs to DECL and has type TYPE.
12337 FIRST is true if this is the first time handling this decl. */
12340 mips_use_mips16_mode_p (rtx symbol, tree decl, int first, tree type)
12344 /* Explicit function attributes take precedence. */
12345 if (mips_mips16_type_p (type))
12347 if (mips_nomips16_type_p (type))
12350 /* A nested function should inherit the MIPS16 setting from its parent. */
12351 parent = decl_function_context (decl);
12353 return SYMBOL_REF_MIPS16_FUNC_P (XEXP (DECL_RTL (parent), 0));
12355 /* Handle -mflip-mips16. */
12356 if (TARGET_FLIP_MIPS16
12357 && !DECL_BUILT_IN (decl)
12358 && !DECL_ARTIFICIAL (decl))
12361 /* Use the setting we picked first time around. */
12362 return SYMBOL_REF_MIPS16_FUNC_P (symbol);
12364 mips16_flipper = !mips16_flipper;
12365 if (mips16_flipper)
12366 return !mips_base_mips16;
12369 return mips_base_mips16;
12372 /* Set SYMBOL_REF_FLAGS for the SYMBOL_REF inside RTL, which belongs to DECL.
12373 FIRST is true if this is the first time handling this decl. */
12376 mips_encode_section_info (tree decl, rtx rtl, int first)
12378 default_encode_section_info (decl, rtl, first);
12380 if (TREE_CODE (decl) == FUNCTION_DECL)
12382 rtx symbol = XEXP (rtl, 0);
12383 tree type = TREE_TYPE (decl);
12385 if ((TARGET_LONG_CALLS && !mips_near_type_p (type))
12386 || mips_far_type_p (type))
12387 SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL;
12389 if (mips_use_mips16_mode_p (symbol, decl, first, type))
12391 if (flag_pic || TARGET_ABICALLS)
12392 sorry ("MIPS16 PIC");
12394 SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_MIPS16_FUNC;
12399 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. Some code models use the incoming
12400 value of PIC_FUNCTION_ADDR_REGNUM to set up the global pointer. */
12403 mips_extra_live_on_entry (bitmap regs)
12405 if (TARGET_USE_GOT && !TARGET_ABSOLUTE_ABICALLS)
12406 bitmap_set_bit (regs, PIC_FUNCTION_ADDR_REGNUM);
12409 /* SImode values are represented as sign-extended to DImode. */
12412 mips_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
12414 if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
12415 return SIGN_EXTEND;
12420 /* MIPS implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
12423 mips_output_dwarf_dtprel (FILE *file, int size, rtx x)
12428 fputs ("\t.dtprelword\t", file);
12432 fputs ("\t.dtpreldword\t", file);
12436 gcc_unreachable ();
12438 output_addr_const (file, x);
12439 fputs ("+0x8000", file);
12442 #include "gt-mips.h"