1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5 Contributed by A. Lichnewsky, lich@inria.inria.fr.
6 Changes by Michael Meissner, meissner@osf.org.
7 64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
8 Brendan Eich, brendan@microunity.com.
10 This file is part of GCC.
12 GCC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 3, or (at your option)
17 GCC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
28 #include "coretypes.h"
33 #include "hard-reg-set.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "insn-attr.h"
53 #include "target-def.h"
54 #include "integrate.h"
55 #include "langhooks.h"
56 #include "cfglayout.h"
57 #include "sched-int.h"
58 #include "tree-gimple.h"
61 /* True if X is an unspec wrapper around a SYMBOL_REF or LABEL_REF. */
62 #define UNSPEC_ADDRESS_P(X) \
63 (GET_CODE (X) == UNSPEC \
64 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
65 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
67 /* Extract the symbol or label from UNSPEC wrapper X. */
68 #define UNSPEC_ADDRESS(X) \
71 /* Extract the symbol type from UNSPEC wrapper X. */
72 #define UNSPEC_ADDRESS_TYPE(X) \
73 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
75 /* The maximum distance between the top of the stack frame and the
76 value $sp has when we save and restore registers.
78 The value for normal-mode code must be a SMALL_OPERAND and must
79 preserve the maximum stack alignment. We therefore use a value
80 of 0x7ff0 in this case.
82 MIPS16e SAVE and RESTORE instructions can adjust the stack pointer by
83 up to 0x7f8 bytes and can usually save or restore all the registers
84 that we need to save or restore. (Note that we can only use these
85 instructions for o32, for which the stack alignment is 8 bytes.)
87 We use a maximum gap of 0x100 or 0x400 for MIPS16 code when SAVE and
88 RESTORE are not available. We can then use unextended instructions
89 to save and restore registers, and to allocate and deallocate the top
91 #define MIPS_MAX_FIRST_STACK_STEP \
92 (!TARGET_MIPS16 ? 0x7ff0 \
93 : GENERATE_MIPS16E_SAVE_RESTORE ? 0x7f8 \
94 : TARGET_64BIT ? 0x100 : 0x400)
96 /* True if INSN is a mips.md pattern or asm statement. */
97 #define USEFUL_INSN_P(INSN) \
99 && GET_CODE (PATTERN (INSN)) != USE \
100 && GET_CODE (PATTERN (INSN)) != CLOBBER \
101 && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
102 && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
104 /* If INSN is a delayed branch sequence, return the first instruction
105 in the sequence, otherwise return INSN itself. */
106 #define SEQ_BEGIN(INSN) \
107 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
108 ? XVECEXP (PATTERN (INSN), 0, 0) \
111 /* Likewise for the last instruction in a delayed branch sequence. */
112 #define SEQ_END(INSN) \
113 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
114 ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
117 /* Execute the following loop body with SUBINSN set to each instruction
118 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
119 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
120 for ((SUBINSN) = SEQ_BEGIN (INSN); \
121 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
122 (SUBINSN) = NEXT_INSN (SUBINSN))
124 /* True if bit BIT is set in VALUE. */
125 #define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
127 /* Classifies an address.
130 A natural register + offset address. The register satisfies
131 mips_valid_base_register_p and the offset is a const_arith_operand.
134 A LO_SUM rtx. The first operand is a valid base register and
135 the second operand is a symbolic address.
138 A signed 16-bit constant address.
141 A constant symbolic address (equivalent to CONSTANT_SYMBOLIC). */
142 enum mips_address_type {
149 /* Classifies the prototype of a builtin function. */
150 enum mips_function_type
152 MIPS_V2SF_FTYPE_V2SF,
153 MIPS_V2SF_FTYPE_V2SF_V2SF,
154 MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
155 MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF,
156 MIPS_V2SF_FTYPE_SF_SF,
157 MIPS_INT_FTYPE_V2SF_V2SF,
158 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF,
159 MIPS_INT_FTYPE_SF_SF,
160 MIPS_INT_FTYPE_DF_DF,
167 /* For MIPS DSP ASE */
169 MIPS_DI_FTYPE_DI_SI_SI,
170 MIPS_DI_FTYPE_DI_V2HI_V2HI,
171 MIPS_DI_FTYPE_DI_V4QI_V4QI,
173 MIPS_SI_FTYPE_PTR_SI,
177 MIPS_SI_FTYPE_V2HI_V2HI,
179 MIPS_SI_FTYPE_V4QI_V4QI,
182 MIPS_V2HI_FTYPE_SI_SI,
183 MIPS_V2HI_FTYPE_V2HI,
184 MIPS_V2HI_FTYPE_V2HI_SI,
185 MIPS_V2HI_FTYPE_V2HI_V2HI,
186 MIPS_V2HI_FTYPE_V4QI,
187 MIPS_V2HI_FTYPE_V4QI_V2HI,
189 MIPS_V4QI_FTYPE_V2HI_V2HI,
190 MIPS_V4QI_FTYPE_V4QI_SI,
191 MIPS_V4QI_FTYPE_V4QI_V4QI,
192 MIPS_VOID_FTYPE_SI_SI,
193 MIPS_VOID_FTYPE_V2HI_V2HI,
194 MIPS_VOID_FTYPE_V4QI_V4QI,
196 /* For MIPS DSP REV 2 ASE. */
197 MIPS_V4QI_FTYPE_V4QI,
198 MIPS_SI_FTYPE_SI_SI_SI,
199 MIPS_DI_FTYPE_DI_USI_USI,
201 MIPS_DI_FTYPE_USI_USI,
202 MIPS_V2HI_FTYPE_SI_SI_SI,
208 /* Specifies how a builtin function should be converted into rtl. */
209 enum mips_builtin_type
211 /* The builtin corresponds directly to an .md pattern. The return
212 value is mapped to operand 0 and the arguments are mapped to
213 operands 1 and above. */
216 /* The builtin corresponds directly to an .md pattern. There is no return
217 value and the arguments are mapped to operands 0 and above. */
218 MIPS_BUILTIN_DIRECT_NO_TARGET,
220 /* The builtin corresponds to a comparison instruction followed by
221 a mips_cond_move_tf_ps pattern. The first two arguments are the
222 values to compare and the second two arguments are the vector
223 operands for the movt.ps or movf.ps instruction (in assembly order). */
227 /* The builtin corresponds to a V2SF comparison instruction. Operand 0
228 of this instruction is the result of the comparison, which has mode
229 CCV2 or CCV4. The function arguments are mapped to operands 1 and
230 above. The function's return value is an SImode boolean that is
231 true under the following conditions:
233 MIPS_BUILTIN_CMP_ANY: one of the registers is true
234 MIPS_BUILTIN_CMP_ALL: all of the registers are true
235 MIPS_BUILTIN_CMP_LOWER: the first register is true
236 MIPS_BUILTIN_CMP_UPPER: the second register is true. */
237 MIPS_BUILTIN_CMP_ANY,
238 MIPS_BUILTIN_CMP_ALL,
239 MIPS_BUILTIN_CMP_UPPER,
240 MIPS_BUILTIN_CMP_LOWER,
242 /* As above, but the instruction only sets a single $fcc register. */
243 MIPS_BUILTIN_CMP_SINGLE,
245 /* For generating bposge32 branch instructions in MIPS32 DSP ASE. */
246 MIPS_BUILTIN_BPOSGE32
249 /* Invokes MACRO (COND) for each c.cond.fmt condition. */
250 #define MIPS_FP_CONDITIONS(MACRO) \
268 /* Enumerates the codes above as MIPS_FP_COND_<X>. */
269 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
270 enum mips_fp_condition {
271 MIPS_FP_CONDITIONS (DECLARE_MIPS_COND)
274 /* Index X provides the string representation of MIPS_FP_COND_<X>. */
275 #define STRINGIFY(X) #X
276 static const char *const mips_fp_conditions[] = {
277 MIPS_FP_CONDITIONS (STRINGIFY)
280 /* A function to save or store a register. The first argument is the
281 register and the second is the stack slot. */
282 typedef void (*mips_save_restore_fn) (rtx, rtx);
284 struct mips16_constant;
285 struct mips_arg_info;
286 struct mips_address_info;
287 struct mips_integer_op;
290 static enum mips_symbol_type mips_classify_symbol (rtx);
291 static bool mips_valid_base_register_p (rtx, enum machine_mode, int);
292 static bool mips_symbolic_address_p (enum mips_symbol_type, enum machine_mode);
293 static bool mips_classify_address (struct mips_address_info *, rtx,
294 enum machine_mode, int);
295 static bool mips_cannot_force_const_mem (rtx);
296 static bool mips_use_blocks_for_constant_p (enum machine_mode, rtx);
297 static int mips_symbol_insns (enum mips_symbol_type);
298 static bool mips16_unextended_reference_p (enum machine_mode mode, rtx, rtx);
299 static rtx mips_force_temporary (rtx, rtx);
300 static rtx mips_unspec_offset_high (rtx, rtx, rtx, enum mips_symbol_type);
301 static rtx mips_add_offset (rtx, rtx, HOST_WIDE_INT);
302 static unsigned int mips_build_shift (struct mips_integer_op *, HOST_WIDE_INT);
303 static unsigned int mips_build_lower (struct mips_integer_op *,
304 unsigned HOST_WIDE_INT);
305 static unsigned int mips_build_integer (struct mips_integer_op *,
306 unsigned HOST_WIDE_INT);
307 static void mips_legitimize_const_move (enum machine_mode, rtx, rtx);
308 static int m16_check_op (rtx, int, int, int);
309 static bool mips_rtx_costs (rtx, int, int, int *);
310 static int mips_address_cost (rtx);
311 static void mips_emit_compare (enum rtx_code *, rtx *, rtx *, bool);
312 static void mips_load_call_address (rtx, rtx, int);
313 static bool mips_function_ok_for_sibcall (tree, tree);
314 static void mips_block_move_straight (rtx, rtx, HOST_WIDE_INT);
315 static void mips_adjust_block_mem (rtx, HOST_WIDE_INT, rtx *, rtx *);
316 static void mips_block_move_loop (rtx, rtx, HOST_WIDE_INT);
317 static void mips_arg_info (const CUMULATIVE_ARGS *, enum machine_mode,
318 tree, int, struct mips_arg_info *);
319 static bool mips_get_unaligned_mem (rtx *, unsigned int, int, rtx *, rtx *);
320 static void mips_set_architecture (const struct mips_cpu_info *);
321 static void mips_set_tune (const struct mips_cpu_info *);
322 static bool mips_handle_option (size_t, const char *, int);
323 static struct machine_function *mips_init_machine_status (void);
324 static void print_operand_reloc (FILE *, rtx, const char **);
325 static void mips_file_start (void);
326 static bool mips_rewrite_small_data_p (rtx);
327 static int mips_small_data_pattern_1 (rtx *, void *);
328 static int mips_rewrite_small_data_1 (rtx *, void *);
329 static bool mips_function_has_gp_insn (void);
330 static unsigned int mips_global_pointer (void);
331 static bool mips_save_reg_p (unsigned int);
332 static void mips_save_restore_reg (enum machine_mode, int, HOST_WIDE_INT,
333 mips_save_restore_fn);
334 static void mips_for_each_saved_reg (HOST_WIDE_INT, mips_save_restore_fn);
335 static void mips_output_cplocal (void);
336 static void mips_emit_loadgp (void);
337 static void mips_output_function_prologue (FILE *, HOST_WIDE_INT);
338 static void mips_set_frame_expr (rtx);
339 static rtx mips_frame_set (rtx, rtx);
340 static void mips_save_reg (rtx, rtx);
341 static void mips_output_function_epilogue (FILE *, HOST_WIDE_INT);
342 static void mips_restore_reg (rtx, rtx);
343 static void mips_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
344 HOST_WIDE_INT, tree);
345 static int symbolic_expression_p (rtx);
346 static section *mips_select_rtx_section (enum machine_mode, rtx,
347 unsigned HOST_WIDE_INT);
348 static section *mips_function_rodata_section (tree);
349 static bool mips_in_small_data_p (tree);
350 static bool mips_use_anchors_for_symbol_p (rtx);
351 static int mips_fpr_return_fields (tree, tree *);
352 static bool mips_return_in_msb (tree);
353 static rtx mips_return_fpr_pair (enum machine_mode mode,
354 enum machine_mode mode1, HOST_WIDE_INT,
355 enum machine_mode mode2, HOST_WIDE_INT);
356 static rtx mips16_gp_pseudo_reg (void);
357 static void mips16_fp_args (FILE *, int, int);
358 static void build_mips16_function_stub (FILE *);
359 static rtx dump_constants_1 (enum machine_mode, rtx, rtx);
360 static void dump_constants (struct mips16_constant *, rtx);
361 static int mips16_insn_length (rtx);
362 static int mips16_rewrite_pool_refs (rtx *, void *);
363 static void mips16_lay_out_constants (void);
364 static void mips_sim_reset (struct mips_sim *);
365 static void mips_sim_init (struct mips_sim *, state_t);
366 static void mips_sim_next_cycle (struct mips_sim *);
367 static void mips_sim_wait_reg (struct mips_sim *, rtx, rtx);
368 static int mips_sim_wait_regs_2 (rtx *, void *);
369 static void mips_sim_wait_regs_1 (rtx *, void *);
370 static void mips_sim_wait_regs (struct mips_sim *, rtx);
371 static void mips_sim_wait_units (struct mips_sim *, rtx);
372 static void mips_sim_wait_insn (struct mips_sim *, rtx);
373 static void mips_sim_record_set (rtx, const_rtx, void *);
374 static void mips_sim_issue_insn (struct mips_sim *, rtx);
375 static void mips_sim_issue_nop (struct mips_sim *);
376 static void mips_sim_finish_insn (struct mips_sim *, rtx);
377 static void vr4130_avoid_branch_rt_conflict (rtx);
378 static void vr4130_align_insns (void);
379 static void mips_avoid_hazard (rtx, rtx, int *, rtx *, rtx);
380 static void mips_avoid_hazards (void);
381 static void mips_reorg (void);
382 static bool mips_strict_matching_cpu_name_p (const char *, const char *);
383 static bool mips_matching_cpu_name_p (const char *, const char *);
384 static const struct mips_cpu_info *mips_parse_cpu (const char *);
385 static const struct mips_cpu_info *mips_cpu_info_from_isa (int);
386 static bool mips_return_in_memory (tree, tree);
387 static bool mips_strict_argument_naming (CUMULATIVE_ARGS *);
388 static void mips_macc_chains_record (rtx);
389 static void mips_macc_chains_reorder (rtx *, int);
390 static void vr4130_true_reg_dependence_p_1 (rtx, const_rtx, void *);
391 static bool vr4130_true_reg_dependence_p (rtx);
392 static bool vr4130_swap_insns_p (rtx, rtx);
393 static void vr4130_reorder (rtx *, int);
394 static void mips_promote_ready (rtx *, int, int);
395 static int mips_sched_reorder (FILE *, int, rtx *, int *, int);
396 static int mips_variable_issue (FILE *, int, rtx, int);
397 static int mips_adjust_cost (rtx, rtx, rtx, int);
398 static int mips_issue_rate (void);
399 static int mips_multipass_dfa_lookahead (void);
400 static void mips_init_libfuncs (void);
401 static void mips_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
403 static tree mips_build_builtin_va_list (void);
404 static tree mips_gimplify_va_arg_expr (tree, tree, tree *, tree *);
405 static bool mips_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode mode,
407 static bool mips_callee_copies (CUMULATIVE_ARGS *, enum machine_mode mode,
409 static int mips_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode mode,
411 static bool mips_valid_pointer_mode (enum machine_mode);
412 static bool mips_vector_mode_supported_p (enum machine_mode);
413 static rtx mips_prepare_builtin_arg (enum insn_code, unsigned int, tree, unsigned int);
414 static rtx mips_prepare_builtin_target (enum insn_code, unsigned int, rtx);
415 static rtx mips_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
416 static void mips_init_builtins (void);
417 static rtx mips_expand_builtin_direct (enum insn_code, rtx, tree, bool);
418 static rtx mips_expand_builtin_movtf (enum mips_builtin_type,
419 enum insn_code, enum mips_fp_condition,
421 static rtx mips_expand_builtin_compare (enum mips_builtin_type,
422 enum insn_code, enum mips_fp_condition,
424 static rtx mips_expand_builtin_bposge (enum mips_builtin_type, rtx);
425 static void mips_encode_section_info (tree, rtx, int);
426 static void mips_extra_live_on_entry (bitmap);
427 static int mips_comp_type_attributes (tree, tree);
428 static int mips_mode_rep_extended (enum machine_mode, enum machine_mode);
429 static bool mips_offset_within_alignment_p (rtx, HOST_WIDE_INT);
430 static void mips_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
432 /* Structure to be filled in by compute_frame_size with register
433 save masks, and offsets for the current function. */
435 struct mips_frame_info GTY(())
437 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
438 HOST_WIDE_INT var_size; /* # bytes that variables take up */
439 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
440 HOST_WIDE_INT cprestore_size; /* # bytes that the .cprestore slot takes up */
441 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
442 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
443 unsigned int mask; /* mask of saved gp registers */
444 unsigned int fmask; /* mask of saved fp registers */
445 HOST_WIDE_INT gp_save_offset; /* offset from vfp to store gp registers */
446 HOST_WIDE_INT fp_save_offset; /* offset from vfp to store fp registers */
447 HOST_WIDE_INT gp_sp_offset; /* offset from new sp to store gp registers */
448 HOST_WIDE_INT fp_sp_offset; /* offset from new sp to store fp registers */
449 bool initialized; /* true if frame size already calculated */
450 int num_gp; /* number of gp registers saved */
451 int num_fp; /* number of fp registers saved */
454 struct machine_function GTY(()) {
455 /* Pseudo-reg holding the value of $28 in a mips16 function which
456 refers to GP relative global variables. */
457 rtx mips16_gp_pseudo_rtx;
459 /* The number of extra stack bytes taken up by register varargs.
460 This area is allocated by the callee at the very top of the frame. */
463 /* Current frame information, calculated by compute_frame_size. */
464 struct mips_frame_info frame;
466 /* The register to use as the global pointer within this function. */
467 unsigned int global_pointer;
469 /* True if mips_adjust_insn_length should ignore an instruction's
471 bool ignore_hazard_length_p;
473 /* True if the whole function is suitable for .set noreorder and
475 bool all_noreorder_p;
477 /* True if the function is known to have an instruction that needs $gp. */
480 /* True if we have emitted an instruction to initialize
481 mips16_gp_pseudo_rtx. */
482 bool initialized_mips16_gp_pseudo_p;
485 /* Information about a single argument. */
488 /* True if the argument is passed in a floating-point register, or
489 would have been if we hadn't run out of registers. */
492 /* The number of words passed in registers, rounded up. */
493 unsigned int reg_words;
495 /* For EABI, the offset of the first register from GP_ARG_FIRST or
496 FP_ARG_FIRST. For other ABIs, the offset of the first register from
497 the start of the ABI's argument structure (see the CUMULATIVE_ARGS
498 comment for details).
500 The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
502 unsigned int reg_offset;
504 /* The number of words that must be passed on the stack, rounded up. */
505 unsigned int stack_words;
507 /* The offset from the start of the stack overflow area of the argument's
508 first stack word. Only meaningful when STACK_WORDS is nonzero. */
509 unsigned int stack_offset;
513 /* Information about an address described by mips_address_type.
519 REG is the base register and OFFSET is the constant offset.
522 REG is the register that contains the high part of the address,
523 OFFSET is the symbolic address being referenced and SYMBOL_TYPE
524 is the type of OFFSET's symbol.
527 SYMBOL_TYPE is the type of symbol being referenced. */
529 struct mips_address_info
531 enum mips_address_type type;
534 enum mips_symbol_type symbol_type;
538 /* One stage in a constant building sequence. These sequences have
542 A = A CODE[1] VALUE[1]
543 A = A CODE[2] VALUE[2]
546 where A is an accumulator, each CODE[i] is a binary rtl operation
547 and each VALUE[i] is a constant integer. */
548 struct mips_integer_op {
550 unsigned HOST_WIDE_INT value;
554 /* The largest number of operations needed to load an integer constant.
555 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
556 When the lowest bit is clear, we can try, but reject a sequence with
557 an extra SLL at the end. */
558 #define MIPS_MAX_INTEGER_OPS 7
560 /* Information about a MIPS16e SAVE or RESTORE instruction. */
561 struct mips16e_save_restore_info {
562 /* The number of argument registers saved by a SAVE instruction.
563 0 for RESTORE instructions. */
566 /* Bit X is set if the instruction saves or restores GPR X. */
569 /* The total number of bytes to allocate. */
573 /* Global variables for machine-dependent things. */
575 /* Threshold for data being put into the small data/bss area, instead
576 of the normal data area. */
577 int mips_section_threshold = -1;
579 /* Count the number of .file directives, so that .loc is up to date. */
580 int num_source_filenames = 0;
582 /* Count the number of sdb related labels are generated (to find block
583 start and end boundaries). */
584 int sdb_label_count = 0;
586 /* Next label # for each statement for Silicon Graphics IRIS systems. */
589 /* Name of the file containing the current function. */
590 const char *current_function_file = "";
592 /* Number of nested .set noreorder, noat, nomacro, and volatile requests. */
598 /* The next branch instruction is a branch likely, not branch normal. */
599 int mips_branch_likely;
601 /* The operands passed to the last cmpMM expander. */
604 /* The target cpu for code generation. */
605 enum processor_type mips_arch;
606 const struct mips_cpu_info *mips_arch_info;
608 /* The target cpu for optimization and scheduling. */
609 enum processor_type mips_tune;
610 const struct mips_cpu_info *mips_tune_info;
612 /* Which instruction set architecture to use. */
615 /* Which ABI to use. */
616 int mips_abi = MIPS_ABI_DEFAULT;
618 /* Cost information to use. */
619 const struct mips_rtx_cost_data *mips_cost;
621 /* Whether we are generating mips16 hard float code. In mips16 mode
622 we always set TARGET_SOFT_FLOAT; this variable is nonzero if
623 -msoft-float was not specified by the user, which means that we
624 should arrange to call mips32 hard floating point code. */
625 int mips16_hard_float;
627 /* The architecture selected by -mipsN. */
628 static const struct mips_cpu_info *mips_isa_info;
630 /* If TRUE, we split addresses into their high and low parts in the RTL. */
631 int mips_split_addresses;
633 /* Mode used for saving/restoring general purpose registers. */
634 static enum machine_mode gpr_mode;
636 /* Array giving truth value on whether or not a given hard register
637 can support a given mode. */
638 char mips_hard_regno_mode_ok[(int)MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
640 /* List of all MIPS punctuation characters used by print_operand. */
641 char mips_print_operand_punct[256];
643 /* Map GCC register number to debugger register number. */
644 int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
645 int mips_dwarf_regno[FIRST_PSEUDO_REGISTER];
647 /* A copy of the original flag_delayed_branch: see override_options. */
648 static int mips_flag_delayed_branch;
650 static GTY (()) int mips_output_filename_first_time = 1;
652 /* mips_split_p[X] is true if symbols of type X can be split by
653 mips_split_symbol(). */
654 bool mips_split_p[NUM_SYMBOL_TYPES];
656 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
657 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
658 if they are matched by a special .md file pattern. */
659 static const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
661 /* Likewise for HIGHs. */
662 static const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
664 /* Map hard register number to register class */
665 const enum reg_class mips_regno_to_class[] =
667 LEA_REGS, LEA_REGS, M16_NA_REGS, V1_REG,
668 M16_REGS, M16_REGS, M16_REGS, M16_REGS,
669 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
670 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
671 M16_NA_REGS, M16_NA_REGS, LEA_REGS, LEA_REGS,
672 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
673 T_REG, PIC_FN_ADDR_REG, LEA_REGS, LEA_REGS,
674 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
675 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
676 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
677 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
678 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
679 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
680 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
681 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
682 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
683 MD0_REG, MD1_REG, NO_REGS, ST_REGS,
684 ST_REGS, ST_REGS, ST_REGS, ST_REGS,
685 ST_REGS, ST_REGS, ST_REGS, NO_REGS,
686 NO_REGS, ALL_REGS, ALL_REGS, NO_REGS,
687 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
688 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
689 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
690 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
691 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
692 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
693 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
694 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
695 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
696 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
697 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
698 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
699 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
700 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
701 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
702 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
703 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
704 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
705 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
706 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
707 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
708 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
709 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
710 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
711 DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS,
712 DSP_ACC_REGS, DSP_ACC_REGS, ALL_REGS, ALL_REGS,
713 ALL_REGS, ALL_REGS, ALL_REGS, ALL_REGS
716 /* Table of machine dependent attributes. */
717 const struct attribute_spec mips_attribute_table[] =
719 { "long_call", 0, 0, false, true, true, NULL },
720 { "far", 0, 0, false, true, true, NULL },
721 { "near", 0, 0, false, true, true, NULL },
722 { NULL, 0, 0, false, false, false, NULL }
725 /* A table describing all the processors gcc knows about. Names are
726 matched in the order listed. The first mention of an ISA level is
727 taken as the canonical name for that ISA.
729 To ease comparison, please keep this table in the same order as
730 gas's mips_cpu_info_table[]. Please also make sure that
731 MIPS_ISA_LEVEL_SPEC handles all -march options correctly. */
732 const struct mips_cpu_info mips_cpu_info_table[] = {
733 /* Entries for generic ISAs */
734 { "mips1", PROCESSOR_R3000, 1 },
735 { "mips2", PROCESSOR_R6000, 2 },
736 { "mips3", PROCESSOR_R4000, 3 },
737 { "mips4", PROCESSOR_R8000, 4 },
738 { "mips32", PROCESSOR_4KC, 32 },
739 { "mips32r2", PROCESSOR_M4K, 33 },
740 { "mips64", PROCESSOR_5KC, 64 },
743 { "r3000", PROCESSOR_R3000, 1 },
744 { "r2000", PROCESSOR_R3000, 1 }, /* = r3000 */
745 { "r3900", PROCESSOR_R3900, 1 },
748 { "r6000", PROCESSOR_R6000, 2 },
751 { "r4000", PROCESSOR_R4000, 3 },
752 { "vr4100", PROCESSOR_R4100, 3 },
753 { "vr4111", PROCESSOR_R4111, 3 },
754 { "vr4120", PROCESSOR_R4120, 3 },
755 { "vr4130", PROCESSOR_R4130, 3 },
756 { "vr4300", PROCESSOR_R4300, 3 },
757 { "r4400", PROCESSOR_R4000, 3 }, /* = r4000 */
758 { "r4600", PROCESSOR_R4600, 3 },
759 { "orion", PROCESSOR_R4600, 3 }, /* = r4600 */
760 { "r4650", PROCESSOR_R4650, 3 },
763 { "r8000", PROCESSOR_R8000, 4 },
764 { "vr5000", PROCESSOR_R5000, 4 },
765 { "vr5400", PROCESSOR_R5400, 4 },
766 { "vr5500", PROCESSOR_R5500, 4 },
767 { "rm7000", PROCESSOR_R7000, 4 },
768 { "rm9000", PROCESSOR_R9000, 4 },
771 { "4kc", PROCESSOR_4KC, 32 },
772 { "4km", PROCESSOR_4KC, 32 }, /* = 4kc */
773 { "4kp", PROCESSOR_4KP, 32 },
774 { "4ksc", PROCESSOR_4KC, 32 },
776 /* MIPS32 Release 2 */
777 { "m4k", PROCESSOR_M4K, 33 },
778 { "4kec", PROCESSOR_4KC, 33 },
779 { "4kem", PROCESSOR_4KC, 33 },
780 { "4kep", PROCESSOR_4KP, 33 },
781 { "4ksd", PROCESSOR_4KC, 33 },
783 { "24kc", PROCESSOR_24KC, 33 },
784 { "24kf2_1", PROCESSOR_24KF2_1, 33 },
785 { "24kf", PROCESSOR_24KF2_1, 33 },
786 { "24kf1_1", PROCESSOR_24KF1_1, 33 },
787 { "24kfx", PROCESSOR_24KF1_1, 33 },
788 { "24kx", PROCESSOR_24KF1_1, 33 },
790 { "24kec", PROCESSOR_24KC, 33 }, /* 24K with DSP */
791 { "24kef2_1", PROCESSOR_24KF2_1, 33 },
792 { "24kef", PROCESSOR_24KF2_1, 33 },
793 { "24kef1_1", PROCESSOR_24KF1_1, 33 },
794 { "24kefx", PROCESSOR_24KF1_1, 33 },
795 { "24kex", PROCESSOR_24KF1_1, 33 },
797 { "34kc", PROCESSOR_24KC, 33 }, /* 34K with MT/DSP */
798 { "34kf2_1", PROCESSOR_24KF2_1, 33 },
799 { "34kf", PROCESSOR_24KF2_1, 33 },
800 { "34kf1_1", PROCESSOR_24KF1_1, 33 },
801 { "34kfx", PROCESSOR_24KF1_1, 33 },
802 { "34kx", PROCESSOR_24KF1_1, 33 },
804 { "74kc", PROCESSOR_74KC, 33 }, /* 74K with DSPr2 */
805 { "74kf2_1", PROCESSOR_74KF2_1, 33 },
806 { "74kf", PROCESSOR_74KF2_1, 33 },
807 { "74kf1_1", PROCESSOR_74KF1_1, 33 },
808 { "74kfx", PROCESSOR_74KF1_1, 33 },
809 { "74kx", PROCESSOR_74KF1_1, 33 },
810 { "74kf3_2", PROCESSOR_74KF3_2, 33 },
813 { "5kc", PROCESSOR_5KC, 64 },
814 { "5kf", PROCESSOR_5KF, 64 },
815 { "20kc", PROCESSOR_20KC, 64 },
816 { "sb1", PROCESSOR_SB1, 64 },
817 { "sb1a", PROCESSOR_SB1A, 64 },
818 { "sr71000", PROCESSOR_SR71000, 64 },
824 /* Default costs. If these are used for a processor we should look
825 up the actual costs. */
826 #define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \
827 COSTS_N_INSNS (7), /* fp_mult_sf */ \
828 COSTS_N_INSNS (8), /* fp_mult_df */ \
829 COSTS_N_INSNS (23), /* fp_div_sf */ \
830 COSTS_N_INSNS (36), /* fp_div_df */ \
831 COSTS_N_INSNS (10), /* int_mult_si */ \
832 COSTS_N_INSNS (10), /* int_mult_di */ \
833 COSTS_N_INSNS (69), /* int_div_si */ \
834 COSTS_N_INSNS (69), /* int_div_di */ \
835 2, /* branch_cost */ \
836 4 /* memory_latency */
838 /* Need to replace these with the costs of calling the appropriate
840 #define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \
841 COSTS_N_INSNS (256), /* fp_mult_sf */ \
842 COSTS_N_INSNS (256), /* fp_mult_df */ \
843 COSTS_N_INSNS (256), /* fp_div_sf */ \
844 COSTS_N_INSNS (256) /* fp_div_df */
846 static struct mips_rtx_cost_data const mips_rtx_cost_optimize_size =
848 COSTS_N_INSNS (1), /* fp_add */
849 COSTS_N_INSNS (1), /* fp_mult_sf */
850 COSTS_N_INSNS (1), /* fp_mult_df */
851 COSTS_N_INSNS (1), /* fp_div_sf */
852 COSTS_N_INSNS (1), /* fp_div_df */
853 COSTS_N_INSNS (1), /* int_mult_si */
854 COSTS_N_INSNS (1), /* int_mult_di */
855 COSTS_N_INSNS (1), /* int_div_si */
856 COSTS_N_INSNS (1), /* int_div_di */
858 4 /* memory_latency */
861 static struct mips_rtx_cost_data const mips_rtx_cost_data[PROCESSOR_MAX] =
864 COSTS_N_INSNS (2), /* fp_add */
865 COSTS_N_INSNS (4), /* fp_mult_sf */
866 COSTS_N_INSNS (5), /* fp_mult_df */
867 COSTS_N_INSNS (12), /* fp_div_sf */
868 COSTS_N_INSNS (19), /* fp_div_df */
869 COSTS_N_INSNS (12), /* int_mult_si */
870 COSTS_N_INSNS (12), /* int_mult_di */
871 COSTS_N_INSNS (35), /* int_div_si */
872 COSTS_N_INSNS (35), /* int_div_di */
874 4 /* memory_latency */
879 COSTS_N_INSNS (6), /* int_mult_si */
880 COSTS_N_INSNS (6), /* int_mult_di */
881 COSTS_N_INSNS (36), /* int_div_si */
882 COSTS_N_INSNS (36), /* int_div_di */
884 4 /* memory_latency */
888 COSTS_N_INSNS (36), /* int_mult_si */
889 COSTS_N_INSNS (36), /* int_mult_di */
890 COSTS_N_INSNS (37), /* int_div_si */
891 COSTS_N_INSNS (37), /* int_div_di */
893 4 /* memory_latency */
897 COSTS_N_INSNS (4), /* int_mult_si */
898 COSTS_N_INSNS (11), /* int_mult_di */
899 COSTS_N_INSNS (36), /* int_div_si */
900 COSTS_N_INSNS (68), /* int_div_di */
902 4 /* memory_latency */
905 COSTS_N_INSNS (4), /* fp_add */
906 COSTS_N_INSNS (4), /* fp_mult_sf */
907 COSTS_N_INSNS (5), /* fp_mult_df */
908 COSTS_N_INSNS (17), /* fp_div_sf */
909 COSTS_N_INSNS (32), /* fp_div_df */
910 COSTS_N_INSNS (4), /* int_mult_si */
911 COSTS_N_INSNS (11), /* int_mult_di */
912 COSTS_N_INSNS (36), /* int_div_si */
913 COSTS_N_INSNS (68), /* int_div_di */
915 4 /* memory_latency */
918 COSTS_N_INSNS (4), /* fp_add */
919 COSTS_N_INSNS (4), /* fp_mult_sf */
920 COSTS_N_INSNS (5), /* fp_mult_df */
921 COSTS_N_INSNS (17), /* fp_div_sf */
922 COSTS_N_INSNS (32), /* fp_div_df */
923 COSTS_N_INSNS (4), /* int_mult_si */
924 COSTS_N_INSNS (7), /* int_mult_di */
925 COSTS_N_INSNS (42), /* int_div_si */
926 COSTS_N_INSNS (72), /* int_div_di */
928 4 /* memory_latency */
932 COSTS_N_INSNS (5), /* int_mult_si */
933 COSTS_N_INSNS (5), /* int_mult_di */
934 COSTS_N_INSNS (41), /* int_div_si */
935 COSTS_N_INSNS (41), /* int_div_di */
937 4 /* memory_latency */
940 COSTS_N_INSNS (8), /* fp_add */
941 COSTS_N_INSNS (8), /* fp_mult_sf */
942 COSTS_N_INSNS (10), /* fp_mult_df */
943 COSTS_N_INSNS (34), /* fp_div_sf */
944 COSTS_N_INSNS (64), /* fp_div_df */
945 COSTS_N_INSNS (5), /* int_mult_si */
946 COSTS_N_INSNS (5), /* int_mult_di */
947 COSTS_N_INSNS (41), /* int_div_si */
948 COSTS_N_INSNS (41), /* int_div_di */
950 4 /* memory_latency */
953 COSTS_N_INSNS (4), /* fp_add */
954 COSTS_N_INSNS (4), /* fp_mult_sf */
955 COSTS_N_INSNS (5), /* fp_mult_df */
956 COSTS_N_INSNS (17), /* fp_div_sf */
957 COSTS_N_INSNS (32), /* fp_div_df */
958 COSTS_N_INSNS (5), /* int_mult_si */
959 COSTS_N_INSNS (5), /* int_mult_di */
960 COSTS_N_INSNS (41), /* int_div_si */
961 COSTS_N_INSNS (41), /* int_div_di */
963 4 /* memory_latency */
967 COSTS_N_INSNS (5), /* int_mult_si */
968 COSTS_N_INSNS (5), /* int_mult_di */
969 COSTS_N_INSNS (41), /* int_div_si */
970 COSTS_N_INSNS (41), /* int_div_di */
972 4 /* memory_latency */
975 COSTS_N_INSNS (8), /* fp_add */
976 COSTS_N_INSNS (8), /* fp_mult_sf */
977 COSTS_N_INSNS (10), /* fp_mult_df */
978 COSTS_N_INSNS (34), /* fp_div_sf */
979 COSTS_N_INSNS (64), /* fp_div_df */
980 COSTS_N_INSNS (5), /* int_mult_si */
981 COSTS_N_INSNS (5), /* int_mult_di */
982 COSTS_N_INSNS (41), /* int_div_si */
983 COSTS_N_INSNS (41), /* int_div_di */
985 4 /* memory_latency */
988 COSTS_N_INSNS (4), /* fp_add */
989 COSTS_N_INSNS (4), /* fp_mult_sf */
990 COSTS_N_INSNS (5), /* fp_mult_df */
991 COSTS_N_INSNS (17), /* fp_div_sf */
992 COSTS_N_INSNS (32), /* fp_div_df */
993 COSTS_N_INSNS (5), /* int_mult_si */
994 COSTS_N_INSNS (5), /* int_mult_di */
995 COSTS_N_INSNS (41), /* int_div_si */
996 COSTS_N_INSNS (41), /* int_div_di */
998 4 /* memory_latency */
1001 COSTS_N_INSNS (6), /* fp_add */
1002 COSTS_N_INSNS (6), /* fp_mult_sf */
1003 COSTS_N_INSNS (7), /* fp_mult_df */
1004 COSTS_N_INSNS (25), /* fp_div_sf */
1005 COSTS_N_INSNS (48), /* fp_div_df */
1006 COSTS_N_INSNS (5), /* int_mult_si */
1007 COSTS_N_INSNS (5), /* int_mult_di */
1008 COSTS_N_INSNS (41), /* int_div_si */
1009 COSTS_N_INSNS (41), /* int_div_di */
1010 1, /* branch_cost */
1011 4 /* memory_latency */
1017 COSTS_N_INSNS (2), /* fp_add */
1018 COSTS_N_INSNS (4), /* fp_mult_sf */
1019 COSTS_N_INSNS (5), /* fp_mult_df */
1020 COSTS_N_INSNS (12), /* fp_div_sf */
1021 COSTS_N_INSNS (19), /* fp_div_df */
1022 COSTS_N_INSNS (2), /* int_mult_si */
1023 COSTS_N_INSNS (2), /* int_mult_di */
1024 COSTS_N_INSNS (35), /* int_div_si */
1025 COSTS_N_INSNS (35), /* int_div_di */
1026 1, /* branch_cost */
1027 4 /* memory_latency */
1030 COSTS_N_INSNS (3), /* fp_add */
1031 COSTS_N_INSNS (5), /* fp_mult_sf */
1032 COSTS_N_INSNS (6), /* fp_mult_df */
1033 COSTS_N_INSNS (15), /* fp_div_sf */
1034 COSTS_N_INSNS (16), /* fp_div_df */
1035 COSTS_N_INSNS (17), /* int_mult_si */
1036 COSTS_N_INSNS (17), /* int_mult_di */
1037 COSTS_N_INSNS (38), /* int_div_si */
1038 COSTS_N_INSNS (38), /* int_div_di */
1039 2, /* branch_cost */
1040 6 /* memory_latency */
1043 COSTS_N_INSNS (6), /* fp_add */
1044 COSTS_N_INSNS (7), /* fp_mult_sf */
1045 COSTS_N_INSNS (8), /* fp_mult_df */
1046 COSTS_N_INSNS (23), /* fp_div_sf */
1047 COSTS_N_INSNS (36), /* fp_div_df */
1048 COSTS_N_INSNS (10), /* int_mult_si */
1049 COSTS_N_INSNS (10), /* int_mult_di */
1050 COSTS_N_INSNS (69), /* int_div_si */
1051 COSTS_N_INSNS (69), /* int_div_di */
1052 2, /* branch_cost */
1053 6 /* memory_latency */
1065 /* The only costs that appear to be updated here are
1066 integer multiplication. */
1068 COSTS_N_INSNS (4), /* int_mult_si */
1069 COSTS_N_INSNS (6), /* int_mult_di */
1070 COSTS_N_INSNS (69), /* int_div_si */
1071 COSTS_N_INSNS (69), /* int_div_di */
1072 1, /* branch_cost */
1073 4 /* memory_latency */
1085 COSTS_N_INSNS (6), /* fp_add */
1086 COSTS_N_INSNS (4), /* fp_mult_sf */
1087 COSTS_N_INSNS (5), /* fp_mult_df */
1088 COSTS_N_INSNS (23), /* fp_div_sf */
1089 COSTS_N_INSNS (36), /* fp_div_df */
1090 COSTS_N_INSNS (5), /* int_mult_si */
1091 COSTS_N_INSNS (5), /* int_mult_di */
1092 COSTS_N_INSNS (36), /* int_div_si */
1093 COSTS_N_INSNS (36), /* int_div_di */
1094 1, /* branch_cost */
1095 4 /* memory_latency */
1098 COSTS_N_INSNS (6), /* fp_add */
1099 COSTS_N_INSNS (5), /* fp_mult_sf */
1100 COSTS_N_INSNS (6), /* fp_mult_df */
1101 COSTS_N_INSNS (30), /* fp_div_sf */
1102 COSTS_N_INSNS (59), /* fp_div_df */
1103 COSTS_N_INSNS (3), /* int_mult_si */
1104 COSTS_N_INSNS (4), /* int_mult_di */
1105 COSTS_N_INSNS (42), /* int_div_si */
1106 COSTS_N_INSNS (74), /* int_div_di */
1107 1, /* branch_cost */
1108 4 /* memory_latency */
1111 COSTS_N_INSNS (6), /* fp_add */
1112 COSTS_N_INSNS (5), /* fp_mult_sf */
1113 COSTS_N_INSNS (6), /* fp_mult_df */
1114 COSTS_N_INSNS (30), /* fp_div_sf */
1115 COSTS_N_INSNS (59), /* fp_div_df */
1116 COSTS_N_INSNS (5), /* int_mult_si */
1117 COSTS_N_INSNS (9), /* int_mult_di */
1118 COSTS_N_INSNS (42), /* int_div_si */
1119 COSTS_N_INSNS (74), /* int_div_di */
1120 1, /* branch_cost */
1121 4 /* memory_latency */
1124 /* The only costs that are changed here are
1125 integer multiplication. */
1126 COSTS_N_INSNS (6), /* fp_add */
1127 COSTS_N_INSNS (7), /* fp_mult_sf */
1128 COSTS_N_INSNS (8), /* fp_mult_df */
1129 COSTS_N_INSNS (23), /* fp_div_sf */
1130 COSTS_N_INSNS (36), /* fp_div_df */
1131 COSTS_N_INSNS (5), /* int_mult_si */
1132 COSTS_N_INSNS (9), /* int_mult_di */
1133 COSTS_N_INSNS (69), /* int_div_si */
1134 COSTS_N_INSNS (69), /* int_div_di */
1135 1, /* branch_cost */
1136 4 /* memory_latency */
1142 /* The only costs that are changed here are
1143 integer multiplication. */
1144 COSTS_N_INSNS (6), /* fp_add */
1145 COSTS_N_INSNS (7), /* fp_mult_sf */
1146 COSTS_N_INSNS (8), /* fp_mult_df */
1147 COSTS_N_INSNS (23), /* fp_div_sf */
1148 COSTS_N_INSNS (36), /* fp_div_df */
1149 COSTS_N_INSNS (3), /* int_mult_si */
1150 COSTS_N_INSNS (8), /* int_mult_di */
1151 COSTS_N_INSNS (69), /* int_div_si */
1152 COSTS_N_INSNS (69), /* int_div_di */
1153 1, /* branch_cost */
1154 4 /* memory_latency */
1157 /* These costs are the same as the SB-1A below. */
1158 COSTS_N_INSNS (4), /* fp_add */
1159 COSTS_N_INSNS (4), /* fp_mult_sf */
1160 COSTS_N_INSNS (4), /* fp_mult_df */
1161 COSTS_N_INSNS (24), /* fp_div_sf */
1162 COSTS_N_INSNS (32), /* fp_div_df */
1163 COSTS_N_INSNS (3), /* int_mult_si */
1164 COSTS_N_INSNS (4), /* int_mult_di */
1165 COSTS_N_INSNS (36), /* int_div_si */
1166 COSTS_N_INSNS (68), /* int_div_di */
1167 1, /* branch_cost */
1168 4 /* memory_latency */
1171 /* These costs are the same as the SB-1 above. */
1172 COSTS_N_INSNS (4), /* fp_add */
1173 COSTS_N_INSNS (4), /* fp_mult_sf */
1174 COSTS_N_INSNS (4), /* fp_mult_df */
1175 COSTS_N_INSNS (24), /* fp_div_sf */
1176 COSTS_N_INSNS (32), /* fp_div_df */
1177 COSTS_N_INSNS (3), /* int_mult_si */
1178 COSTS_N_INSNS (4), /* int_mult_di */
1179 COSTS_N_INSNS (36), /* int_div_si */
1180 COSTS_N_INSNS (68), /* int_div_di */
1181 1, /* branch_cost */
1182 4 /* memory_latency */
1189 /* If a MIPS16e SAVE or RESTORE instruction saves or restores register
1190 mips16e_s2_s8_regs[X], it must also save the registers in indexes
1191 X + 1 onwards. Likewise mips16e_a0_a3_regs. */
1192 static const unsigned char mips16e_s2_s8_regs[] = {
1193 30, 23, 22, 21, 20, 19, 18
1195 static const unsigned char mips16e_a0_a3_regs[] = {
1199 /* A list of the registers that can be saved by the MIPS16e SAVE instruction,
1200 ordered from the uppermost in memory to the lowest in memory. */
1201 static const unsigned char mips16e_save_restore_regs[] = {
1202 31, 30, 23, 22, 21, 20, 19, 18, 17, 16, 7, 6, 5, 4
1205 /* Nonzero if -march should decide the default value of MASK_SOFT_FLOAT. */
1206 #ifndef MIPS_MARCH_CONTROLS_SOFT_FLOAT
1207 #define MIPS_MARCH_CONTROLS_SOFT_FLOAT 0
1210 /* Initialize the GCC target structure. */
1211 #undef TARGET_ASM_ALIGNED_HI_OP
1212 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
1213 #undef TARGET_ASM_ALIGNED_SI_OP
1214 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
1215 #undef TARGET_ASM_ALIGNED_DI_OP
1216 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
1218 #undef TARGET_ASM_FUNCTION_PROLOGUE
1219 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
1220 #undef TARGET_ASM_FUNCTION_EPILOGUE
1221 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
1222 #undef TARGET_ASM_SELECT_RTX_SECTION
1223 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
1224 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
1225 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
1227 #undef TARGET_SCHED_REORDER
1228 #define TARGET_SCHED_REORDER mips_sched_reorder
1229 #undef TARGET_SCHED_VARIABLE_ISSUE
1230 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
1231 #undef TARGET_SCHED_ADJUST_COST
1232 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
1233 #undef TARGET_SCHED_ISSUE_RATE
1234 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
1235 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1236 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1237 mips_multipass_dfa_lookahead
1239 #undef TARGET_DEFAULT_TARGET_FLAGS
1240 #define TARGET_DEFAULT_TARGET_FLAGS \
1242 | TARGET_CPU_DEFAULT \
1243 | TARGET_ENDIAN_DEFAULT \
1244 | TARGET_FP_EXCEPTIONS_DEFAULT \
1245 | MASK_CHECK_ZERO_DIV \
1247 #undef TARGET_HANDLE_OPTION
1248 #define TARGET_HANDLE_OPTION mips_handle_option
1250 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1251 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
1253 #undef TARGET_VALID_POINTER_MODE
1254 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
1255 #undef TARGET_RTX_COSTS
1256 #define TARGET_RTX_COSTS mips_rtx_costs
1257 #undef TARGET_ADDRESS_COST
1258 #define TARGET_ADDRESS_COST mips_address_cost
1260 #undef TARGET_IN_SMALL_DATA_P
1261 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
1263 #undef TARGET_MACHINE_DEPENDENT_REORG
1264 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
1266 #undef TARGET_ASM_FILE_START
1267 #define TARGET_ASM_FILE_START mips_file_start
1268 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
1269 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
1271 #undef TARGET_INIT_LIBFUNCS
1272 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
1274 #undef TARGET_BUILD_BUILTIN_VA_LIST
1275 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
1276 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1277 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
1279 #undef TARGET_PROMOTE_FUNCTION_ARGS
1280 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
1281 #undef TARGET_PROMOTE_FUNCTION_RETURN
1282 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
1283 #undef TARGET_PROMOTE_PROTOTYPES
1284 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
1286 #undef TARGET_RETURN_IN_MEMORY
1287 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
1288 #undef TARGET_RETURN_IN_MSB
1289 #define TARGET_RETURN_IN_MSB mips_return_in_msb
1291 #undef TARGET_ASM_OUTPUT_MI_THUNK
1292 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
1293 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1294 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
1296 #undef TARGET_SETUP_INCOMING_VARARGS
1297 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
1298 #undef TARGET_STRICT_ARGUMENT_NAMING
1299 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
1300 #undef TARGET_MUST_PASS_IN_STACK
1301 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
1302 #undef TARGET_PASS_BY_REFERENCE
1303 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
1304 #undef TARGET_CALLEE_COPIES
1305 #define TARGET_CALLEE_COPIES mips_callee_copies
1306 #undef TARGET_ARG_PARTIAL_BYTES
1307 #define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
1309 #undef TARGET_MODE_REP_EXTENDED
1310 #define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
1312 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1313 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
1315 #undef TARGET_INIT_BUILTINS
1316 #define TARGET_INIT_BUILTINS mips_init_builtins
1317 #undef TARGET_EXPAND_BUILTIN
1318 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
1320 #undef TARGET_HAVE_TLS
1321 #define TARGET_HAVE_TLS HAVE_AS_TLS
1323 #undef TARGET_CANNOT_FORCE_CONST_MEM
1324 #define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
1326 #undef TARGET_ENCODE_SECTION_INFO
1327 #define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
1329 #undef TARGET_ATTRIBUTE_TABLE
1330 #define TARGET_ATTRIBUTE_TABLE mips_attribute_table
1332 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1333 #define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
1335 #undef TARGET_MIN_ANCHOR_OFFSET
1336 #define TARGET_MIN_ANCHOR_OFFSET -32768
1337 #undef TARGET_MAX_ANCHOR_OFFSET
1338 #define TARGET_MAX_ANCHOR_OFFSET 32767
1339 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1340 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
1341 #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
1342 #define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
1344 #undef TARGET_COMP_TYPE_ATTRIBUTES
1345 #define TARGET_COMP_TYPE_ATTRIBUTES mips_comp_type_attributes
1347 #ifdef HAVE_AS_DTPRELWORD
1348 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1349 #define TARGET_ASM_OUTPUT_DWARF_DTPREL mips_output_dwarf_dtprel
1352 struct gcc_target targetm = TARGET_INITIALIZER;
1355 /* Predicates to test for presence of "near" and "far"/"long_call"
1356 attributes on the given TYPE. */
1359 mips_near_type_p (tree type)
1361 return lookup_attribute ("near", TYPE_ATTRIBUTES (type)) != NULL;
1365 mips_far_type_p (tree type)
1367 return (lookup_attribute ("long_call", TYPE_ATTRIBUTES (type)) != NULL
1368 || lookup_attribute ("far", TYPE_ATTRIBUTES (type)) != NULL);
1372 /* Return 0 if the attributes for two types are incompatible, 1 if they
1373 are compatible, and 2 if they are nearly compatible (which causes a
1374 warning to be generated). */
1377 mips_comp_type_attributes (tree type1, tree type2)
1379 /* Check for mismatch of non-default calling convention. */
1380 if (TREE_CODE (type1) != FUNCTION_TYPE)
1383 /* Disallow mixed near/far attributes. */
1384 if (mips_far_type_p (type1) && mips_near_type_p (type2))
1386 if (mips_near_type_p (type1) && mips_far_type_p (type2))
1392 /* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR
1393 and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */
1396 mips_split_plus (rtx x, rtx *base_ptr, HOST_WIDE_INT *offset_ptr)
1398 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
1400 *base_ptr = XEXP (x, 0);
1401 *offset_ptr = INTVAL (XEXP (x, 1));
1410 /* Return true if SYMBOL_REF X is associated with a global symbol
1411 (in the STB_GLOBAL sense). */
1414 mips_global_symbol_p (rtx x)
1418 decl = SYMBOL_REF_DECL (x);
1420 return !SYMBOL_REF_LOCAL_P (x);
1422 /* Weakref symbols are not TREE_PUBLIC, but their targets are global
1423 or weak symbols. Relocations in the object file will be against
1424 the target symbol, so it's that symbol's binding that matters here. */
1425 return DECL_P (decl) && (TREE_PUBLIC (decl) || DECL_WEAK (decl));
1428 /* Return true if SYMBOL_REF X binds locally. */
1431 mips_symbol_binds_local_p (rtx x)
1433 return (SYMBOL_REF_DECL (x)
1434 ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
1435 : SYMBOL_REF_LOCAL_P (x));
1438 /* Classify symbol X, which must be a SYMBOL_REF or a LABEL_REF. */
1440 static enum mips_symbol_type
1441 mips_classify_symbol (rtx x)
1444 return SYMBOL_GOT_DISP;
1446 if (GET_CODE (x) == LABEL_REF)
1449 return SYMBOL_CONSTANT_POOL;
1450 if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
1451 return SYMBOL_GOT_PAGE_OFST;
1452 return SYMBOL_GENERAL;
1455 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1457 if (SYMBOL_REF_TLS_MODEL (x))
1460 if (CONSTANT_POOL_ADDRESS_P (x))
1463 return SYMBOL_CONSTANT_POOL;
1465 if (!TARGET_EMBEDDED_DATA
1466 && GET_MODE_SIZE (get_pool_mode (x)) <= mips_section_threshold)
1467 return SYMBOL_SMALL_DATA;
1470 /* Do not use small-data accesses for weak symbols; they may end up
1472 if (SYMBOL_REF_SMALL_P (x)
1473 && !SYMBOL_REF_WEAK (x))
1474 return SYMBOL_SMALL_DATA;
1476 if (TARGET_ABICALLS)
1478 /* Don't use GOT accesses for locally-binding symbols; we can use
1479 %hi and %lo instead. */
1480 if (TARGET_ABSOLUTE_ABICALLS && mips_symbol_binds_local_p (x))
1481 return SYMBOL_GENERAL;
1483 /* There are three cases to consider:
1485 - o32 PIC (either with or without explicit relocs)
1486 - n32/n64 PIC without explicit relocs
1487 - n32/n64 PIC with explicit relocs
1489 In the first case, both local and global accesses will use an
1490 R_MIPS_GOT16 relocation. We must correctly predict which of
1491 the two semantics (local or global) the assembler and linker
1492 will apply. The choice depends on the symbol's binding rather
1493 than its visibility.
1495 In the second case, the assembler will not use R_MIPS_GOT16
1496 relocations, but it chooses between local and global accesses
1497 in the same way as for o32 PIC.
1499 In the third case we have more freedom since both forms of
1500 access will work for any kind of symbol. However, there seems
1501 little point in doing things differently. */
1502 if (mips_global_symbol_p (x))
1503 return SYMBOL_GOT_DISP;
1505 return SYMBOL_GOT_PAGE_OFST;
1508 return SYMBOL_GENERAL;
1511 /* Return true if OFFSET is within the range [0, ALIGN), where ALIGN
1512 is the alignment (in bytes) of SYMBOL_REF X. */
1515 mips_offset_within_alignment_p (rtx x, HOST_WIDE_INT offset)
1517 /* If for some reason we can't get the alignment for the
1518 symbol, initializing this to one means we will only accept
1520 HOST_WIDE_INT align = 1;
1523 /* Get the alignment of the symbol we're referring to. */
1524 t = SYMBOL_REF_DECL (x);
1526 align = DECL_ALIGN_UNIT (t);
1528 return offset >= 0 && offset < align;
1531 /* Return true if X is a symbolic constant that can be calculated in
1532 the same way as a bare symbol. If it is, store the type of the
1533 symbol in *SYMBOL_TYPE. */
1536 mips_symbolic_constant_p (rtx x, enum mips_symbol_type *symbol_type)
1540 split_const (x, &x, &offset);
1541 if (UNSPEC_ADDRESS_P (x))
1543 *symbol_type = UNSPEC_ADDRESS_TYPE (x);
1544 x = UNSPEC_ADDRESS (x);
1546 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1548 *symbol_type = mips_classify_symbol (x);
1549 if (*symbol_type == SYMBOL_TLS)
1555 if (offset == const0_rtx)
1558 /* Check whether a nonzero offset is valid for the underlying
1560 switch (*symbol_type)
1562 case SYMBOL_GENERAL:
1563 case SYMBOL_64_HIGH:
1566 /* If the target has 64-bit pointers and the object file only
1567 supports 32-bit symbols, the values of those symbols will be
1568 sign-extended. In this case we can't allow an arbitrary offset
1569 in case the 32-bit value X + OFFSET has a different sign from X. */
1570 if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
1571 return offset_within_block_p (x, INTVAL (offset));
1573 /* In other cases the relocations can handle any offset. */
1576 case SYMBOL_CONSTANT_POOL:
1577 /* Allow constant pool references to be converted to LABEL+CONSTANT.
1578 In this case, we no longer have access to the underlying constant,
1579 but the original symbol-based access was known to be valid. */
1580 if (GET_CODE (x) == LABEL_REF)
1585 case SYMBOL_SMALL_DATA:
1586 /* Make sure that the offset refers to something within the
1587 same object block. This should guarantee that the final
1588 PC- or GP-relative offset is within the 16-bit limit. */
1589 return offset_within_block_p (x, INTVAL (offset));
1591 case SYMBOL_GOT_PAGE_OFST:
1592 case SYMBOL_GOTOFF_PAGE:
1593 /* If the symbol is global, the GOT entry will contain the symbol's
1594 address, and we will apply a 16-bit offset after loading it.
1595 If the symbol is local, the linker should provide enough local
1596 GOT entries for a 16-bit offset, but larger offsets may lead
1598 return SMALL_INT (offset);
1602 /* There is no carry between the HI and LO REL relocations, so the
1603 offset is only valid if we know it won't lead to such a carry. */
1604 return mips_offset_within_alignment_p (x, INTVAL (offset));
1606 case SYMBOL_GOT_DISP:
1607 case SYMBOL_GOTOFF_DISP:
1608 case SYMBOL_GOTOFF_CALL:
1609 case SYMBOL_GOTOFF_LOADGP:
1612 case SYMBOL_GOTTPREL:
1621 /* This function is used to implement REG_MODE_OK_FOR_BASE_P. */
1624 mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode, int strict)
1626 if (!HARD_REGISTER_NUM_P (regno))
1630 regno = reg_renumber[regno];
1633 /* These fake registers will be eliminated to either the stack or
1634 hard frame pointer, both of which are usually valid base registers.
1635 Reload deals with the cases where the eliminated form isn't valid. */
1636 if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
1639 /* In mips16 mode, the stack pointer can only address word and doubleword
1640 values, nothing smaller. There are two problems here:
1642 (a) Instantiating virtual registers can introduce new uses of the
1643 stack pointer. If these virtual registers are valid addresses,
1644 the stack pointer should be too.
1646 (b) Most uses of the stack pointer are not made explicit until
1647 FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
1648 We don't know until that stage whether we'll be eliminating to the
1649 stack pointer (which needs the restriction) or the hard frame
1650 pointer (which doesn't).
1652 All in all, it seems more consistent to only enforce this restriction
1653 during and after reload. */
1654 if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
1655 return !strict || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1657 return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
1661 /* Return true if X is a valid base register for the given mode.
1662 Allow only hard registers if STRICT. */
1665 mips_valid_base_register_p (rtx x, enum machine_mode mode, int strict)
1667 if (!strict && GET_CODE (x) == SUBREG)
1671 && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict));
1675 /* Return true if symbols of type SYMBOL_TYPE can directly address a value
1676 with mode MODE. This is used for both symbolic and LO_SUM addresses. */
1679 mips_symbolic_address_p (enum mips_symbol_type symbol_type,
1680 enum machine_mode mode)
1682 switch (symbol_type)
1684 case SYMBOL_GENERAL:
1685 return !TARGET_MIPS16;
1687 case SYMBOL_SMALL_DATA:
1690 case SYMBOL_CONSTANT_POOL:
1691 /* PC-relative addressing is only available for lw and ld. */
1692 return GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1694 case SYMBOL_GOT_PAGE_OFST:
1697 case SYMBOL_GOT_DISP:
1698 /* The address will have to be loaded from the GOT first. */
1701 case SYMBOL_GOTOFF_PAGE:
1702 case SYMBOL_GOTOFF_DISP:
1703 case SYMBOL_GOTOFF_CALL:
1704 case SYMBOL_GOTOFF_LOADGP:
1709 case SYMBOL_GOTTPREL:
1711 case SYMBOL_64_HIGH:
1721 /* Return true if X is a valid address for machine mode MODE. If it is,
1722 fill in INFO appropriately. STRICT is true if we should only accept
1723 hard base registers. */
1726 mips_classify_address (struct mips_address_info *info, rtx x,
1727 enum machine_mode mode, int strict)
1729 switch (GET_CODE (x))
1733 info->type = ADDRESS_REG;
1735 info->offset = const0_rtx;
1736 return mips_valid_base_register_p (info->reg, mode, strict);
1739 info->type = ADDRESS_REG;
1740 info->reg = XEXP (x, 0);
1741 info->offset = XEXP (x, 1);
1742 return (mips_valid_base_register_p (info->reg, mode, strict)
1743 && const_arith_operand (info->offset, VOIDmode));
1746 info->type = ADDRESS_LO_SUM;
1747 info->reg = XEXP (x, 0);
1748 info->offset = XEXP (x, 1);
1749 return (mips_valid_base_register_p (info->reg, mode, strict)
1750 && mips_symbolic_constant_p (info->offset, &info->symbol_type)
1751 && mips_symbolic_address_p (info->symbol_type, mode)
1752 && mips_lo_relocs[info->symbol_type] != 0);
1755 /* Small-integer addresses don't occur very often, but they
1756 are legitimate if $0 is a valid base register. */
1757 info->type = ADDRESS_CONST_INT;
1758 return !TARGET_MIPS16 && SMALL_INT (x);
1763 info->type = ADDRESS_SYMBOLIC;
1764 return (mips_symbolic_constant_p (x, &info->symbol_type)
1765 && mips_symbolic_address_p (info->symbol_type, mode)
1766 && !mips_split_p[info->symbol_type]);
1773 /* Return true if X is a thread-local symbol. */
1776 mips_tls_operand_p (rtx x)
1778 return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
1781 /* Return true if X can not be forced into a constant pool. */
1784 mips_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1786 return mips_tls_operand_p (*x);
1789 /* Return true if X can not be forced into a constant pool. */
1792 mips_cannot_force_const_mem (rtx x)
1798 /* As an optimization, reject constants that mips_legitimize_move
1801 Suppose we have a multi-instruction sequence that loads constant C
1802 into register R. If R does not get allocated a hard register, and
1803 R is used in an operand that allows both registers and memory
1804 references, reload will consider forcing C into memory and using
1805 one of the instruction's memory alternatives. Returning false
1806 here will force it to use an input reload instead. */
1807 if (GET_CODE (x) == CONST_INT)
1810 split_const (x, &base, &offset);
1811 if (symbolic_operand (base, VOIDmode) && SMALL_INT (offset))
1815 if (TARGET_HAVE_TLS && for_each_rtx (&x, &mips_tls_symbol_ref_1, 0))
1821 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. MIPS16 uses per-function
1822 constant pools, but normal-mode code doesn't need to. */
1825 mips_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
1826 rtx x ATTRIBUTE_UNUSED)
1828 return !TARGET_MIPS16;
1831 /* Return the number of instructions needed to load a symbol of the
1832 given type into a register. If valid in an address, the same number
1833 of instructions are needed for loads and stores. Treat extended
1834 mips16 instructions as two instructions. */
1837 mips_symbol_insns (enum mips_symbol_type type)
1841 case SYMBOL_GENERAL:
1842 /* In mips16 code, general symbols must be fetched from the
1847 /* When using 64-bit symbols, we need 5 preparatory instructions,
1850 lui $at,%highest(symbol)
1851 daddiu $at,$at,%higher(symbol)
1853 daddiu $at,$at,%hi(symbol)
1856 The final address is then $at + %lo(symbol). With 32-bit
1857 symbols we just need a preparatory lui. */
1858 return (ABI_HAS_64BIT_SYMBOLS ? 6 : 2);
1860 case SYMBOL_SMALL_DATA:
1864 case SYMBOL_CONSTANT_POOL:
1865 /* This case is for mips16 only. Assume we'll need an
1866 extended instruction. */
1869 case SYMBOL_GOT_PAGE_OFST:
1870 case SYMBOL_GOT_DISP:
1871 /* Unless -funit-at-a-time is in effect, we can't be sure whether
1872 the local/global classification is accurate. See override_options
1875 The worst cases are:
1877 (1) For local symbols when generating o32 or o64 code. The assembler
1883 ...and the final address will be $at + %lo(symbol).
1885 (2) For global symbols when -mxgot. The assembler will use:
1887 lui $at,%got_hi(symbol)
1890 ...and the final address will be $at + %got_lo(symbol). */
1893 case SYMBOL_GOTOFF_PAGE:
1894 case SYMBOL_GOTOFF_DISP:
1895 case SYMBOL_GOTOFF_CALL:
1896 case SYMBOL_GOTOFF_LOADGP:
1897 case SYMBOL_64_HIGH:
1903 case SYMBOL_GOTTPREL:
1905 /* Check whether the offset is a 16- or 32-bit value. */
1906 return mips_split_p[type] ? 2 : 1;
1909 /* We don't treat a bare TLS symbol as a constant. */
1915 /* Return true if X is a legitimate $sp-based address for mode MDOE. */
1918 mips_stack_address_p (rtx x, enum machine_mode mode)
1920 struct mips_address_info addr;
1922 return (mips_classify_address (&addr, x, mode, false)
1923 && addr.type == ADDRESS_REG
1924 && addr.reg == stack_pointer_rtx);
1927 /* Return true if a value at OFFSET bytes from BASE can be accessed
1928 using an unextended mips16 instruction. MODE is the mode of the
1931 Usually the offset in an unextended instruction is a 5-bit field.
1932 The offset is unsigned and shifted left once for HIs, twice
1933 for SIs, and so on. An exception is SImode accesses off the
1934 stack pointer, which have an 8-bit immediate field. */
1937 mips16_unextended_reference_p (enum machine_mode mode, rtx base, rtx offset)
1940 && GET_CODE (offset) == CONST_INT
1941 && INTVAL (offset) >= 0
1942 && (INTVAL (offset) & (GET_MODE_SIZE (mode) - 1)) == 0)
1944 if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
1945 return INTVAL (offset) < 256 * GET_MODE_SIZE (mode);
1946 return INTVAL (offset) < 32 * GET_MODE_SIZE (mode);
1952 /* Return the number of instructions needed to load or store a value
1953 of mode MODE at X. Return 0 if X isn't valid for MODE.
1955 For mips16 code, count extended instructions as two instructions. */
1958 mips_address_insns (rtx x, enum machine_mode mode)
1960 struct mips_address_info addr;
1963 if (mode == BLKmode)
1964 /* BLKmode is used for single unaligned loads and stores. */
1967 /* Each word of a multi-word value will be accessed individually. */
1968 factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
1970 if (mips_classify_address (&addr, x, mode, false))
1975 && !mips16_unextended_reference_p (mode, addr.reg, addr.offset))
1979 case ADDRESS_LO_SUM:
1980 return (TARGET_MIPS16 ? factor * 2 : factor);
1982 case ADDRESS_CONST_INT:
1985 case ADDRESS_SYMBOLIC:
1986 return factor * mips_symbol_insns (addr.symbol_type);
1992 /* Likewise for constant X. */
1995 mips_const_insns (rtx x)
1997 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
1998 enum mips_symbol_type symbol_type;
2001 switch (GET_CODE (x))
2005 || !mips_symbolic_constant_p (XEXP (x, 0), &symbol_type)
2006 || !mips_split_p[symbol_type])
2013 /* Unsigned 8-bit constants can be loaded using an unextended
2014 LI instruction. Unsigned 16-bit constants can be loaded
2015 using an extended LI. Negative constants must be loaded
2016 using LI and then negated. */
2017 return (INTVAL (x) >= 0 && INTVAL (x) < 256 ? 1
2018 : SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
2019 : INTVAL (x) > -256 && INTVAL (x) < 0 ? 2
2020 : SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
2023 return mips_build_integer (codes, INTVAL (x));
2027 return (!TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0);
2033 /* See if we can refer to X directly. */
2034 if (mips_symbolic_constant_p (x, &symbol_type))
2035 return mips_symbol_insns (symbol_type);
2037 /* Otherwise try splitting the constant into a base and offset.
2038 16-bit offsets can be added using an extra addiu. Larger offsets
2039 must be calculated separately and then added to the base. */
2040 split_const (x, &x, &offset);
2043 int n = mips_const_insns (x);
2046 if (SMALL_INT (offset))
2049 return n + 1 + mips_build_integer (codes, INTVAL (offset));
2056 return mips_symbol_insns (mips_classify_symbol (x));
2064 /* Return the number of instructions needed for memory reference X.
2065 Count extended mips16 instructions as two instructions. */
2068 mips_fetch_insns (rtx x)
2070 gcc_assert (MEM_P (x));
2071 return mips_address_insns (XEXP (x, 0), GET_MODE (x));
2075 /* Return the number of instructions needed for an integer division. */
2078 mips_idiv_insns (void)
2083 if (TARGET_CHECK_ZERO_DIV)
2085 if (GENERATE_DIVIDE_TRAPS)
2091 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
2096 /* This function is used to implement GO_IF_LEGITIMATE_ADDRESS. It
2097 returns a nonzero value if X is a legitimate address for a memory
2098 operand of the indicated MODE. STRICT is nonzero if this function
2099 is called during reload. */
2102 mips_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
2104 struct mips_address_info addr;
2106 return mips_classify_address (&addr, x, mode, strict);
2110 /* Copy VALUE to a register and return that register. If new psuedos
2111 are allowed, copy it into a new register, otherwise use DEST. */
2114 mips_force_temporary (rtx dest, rtx value)
2116 if (can_create_pseudo_p ())
2117 return force_reg (Pmode, value);
2120 emit_move_insn (copy_rtx (dest), value);
2126 /* Return a LO_SUM expression for ADDR. TEMP is as for mips_force_temporary
2127 and is used to load the high part into a register. */
2130 mips_split_symbol (rtx temp, rtx addr)
2135 high = mips_force_temporary (temp, gen_rtx_HIGH (Pmode, copy_rtx (addr)));
2136 else if (!can_create_pseudo_p ())
2138 emit_insn (gen_load_const_gp (copy_rtx (temp)));
2142 high = mips16_gp_pseudo_reg ();
2143 return gen_rtx_LO_SUM (Pmode, high, addr);
2147 /* Return an UNSPEC address with underlying address ADDRESS and symbol
2148 type SYMBOL_TYPE. */
2151 mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
2155 split_const (address, &base, &offset);
2156 base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
2157 UNSPEC_ADDRESS_FIRST + symbol_type);
2158 if (offset != const0_rtx)
2159 base = gen_rtx_PLUS (Pmode, base, offset);
2160 return gen_rtx_CONST (Pmode, base);
2164 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
2165 high part to BASE and return the result. Just return BASE otherwise.
2166 TEMP is available as a temporary register if needed.
2168 The returned expression can be used as the first operand to a LO_SUM. */
2171 mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
2172 enum mips_symbol_type symbol_type)
2174 if (mips_split_p[symbol_type])
2176 addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
2177 addr = mips_force_temporary (temp, addr);
2178 return mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
2184 /* Return a legitimate address for REG + OFFSET. TEMP is as for
2185 mips_force_temporary; it is only needed when OFFSET is not a
2189 mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
2191 if (!SMALL_OPERAND (offset))
2196 /* Load the full offset into a register so that we can use
2197 an unextended instruction for the address itself. */
2198 high = GEN_INT (offset);
2203 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. */
2204 high = GEN_INT (CONST_HIGH_PART (offset));
2205 offset = CONST_LOW_PART (offset);
2207 high = mips_force_temporary (temp, high);
2208 reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
2210 return plus_constant (reg, offset);
2213 /* Emit a call to __tls_get_addr. SYM is the TLS symbol we are
2214 referencing, and TYPE is the symbol type to use (either global
2215 dynamic or local dynamic). V0 is an RTX for the return value
2216 location. The entire insn sequence is returned. */
2218 static GTY(()) rtx mips_tls_symbol;
2221 mips_call_tls_get_addr (rtx sym, enum mips_symbol_type type, rtx v0)
2223 rtx insn, loc, tga, a0;
2225 a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
2227 if (!mips_tls_symbol)
2228 mips_tls_symbol = init_one_libfunc ("__tls_get_addr");
2230 loc = mips_unspec_address (sym, type);
2234 emit_insn (gen_rtx_SET (Pmode, a0,
2235 gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, loc)));
2236 tga = gen_rtx_MEM (Pmode, mips_tls_symbol);
2237 insn = emit_call_insn (gen_call_value (v0, tga, const0_rtx, const0_rtx));
2238 CONST_OR_PURE_CALL_P (insn) = 1;
2239 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), v0);
2240 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
2241 insn = get_insns ();
2248 /* Generate the code to access LOC, a thread local SYMBOL_REF. The
2249 return value will be a valid address and move_operand (either a REG
2253 mips_legitimize_tls_address (rtx loc)
2255 rtx dest, insn, v0, v1, tmp1, tmp2, eqv;
2256 enum tls_model model;
2258 v0 = gen_rtx_REG (Pmode, GP_RETURN);
2259 v1 = gen_rtx_REG (Pmode, GP_RETURN + 1);
2261 model = SYMBOL_REF_TLS_MODEL (loc);
2262 /* Only TARGET_ABICALLS code can have more than one module; other
2263 code must be be static and should not use a GOT. All TLS models
2264 reduce to local exec in this situation. */
2265 if (!TARGET_ABICALLS)
2266 model = TLS_MODEL_LOCAL_EXEC;
2270 case TLS_MODEL_GLOBAL_DYNAMIC:
2271 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSGD, v0);
2272 dest = gen_reg_rtx (Pmode);
2273 emit_libcall_block (insn, dest, v0, loc);
2276 case TLS_MODEL_LOCAL_DYNAMIC:
2277 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSLDM, v0);
2278 tmp1 = gen_reg_rtx (Pmode);
2280 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2281 share the LDM result with other LD model accesses. */
2282 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2284 emit_libcall_block (insn, tmp1, v0, eqv);
2286 tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_DTPREL);
2287 dest = gen_rtx_LO_SUM (Pmode, tmp2,
2288 mips_unspec_address (loc, SYMBOL_DTPREL));
2291 case TLS_MODEL_INITIAL_EXEC:
2292 tmp1 = gen_reg_rtx (Pmode);
2293 tmp2 = mips_unspec_address (loc, SYMBOL_GOTTPREL);
2294 if (Pmode == DImode)
2296 emit_insn (gen_tls_get_tp_di (v1));
2297 emit_insn (gen_load_gotdi (tmp1, pic_offset_table_rtx, tmp2));
2301 emit_insn (gen_tls_get_tp_si (v1));
2302 emit_insn (gen_load_gotsi (tmp1, pic_offset_table_rtx, tmp2));
2304 dest = gen_reg_rtx (Pmode);
2305 emit_insn (gen_add3_insn (dest, tmp1, v1));
2308 case TLS_MODEL_LOCAL_EXEC:
2309 if (Pmode == DImode)
2310 emit_insn (gen_tls_get_tp_di (v1));
2312 emit_insn (gen_tls_get_tp_si (v1));
2314 tmp1 = mips_unspec_offset_high (NULL, v1, loc, SYMBOL_TPREL);
2315 dest = gen_rtx_LO_SUM (Pmode, tmp1,
2316 mips_unspec_address (loc, SYMBOL_TPREL));
2326 /* This function is used to implement LEGITIMIZE_ADDRESS. If *XLOC can
2327 be legitimized in a way that the generic machinery might not expect,
2328 put the new address in *XLOC and return true. MODE is the mode of
2329 the memory being accessed. */
2332 mips_legitimize_address (rtx *xloc, enum machine_mode mode)
2334 enum mips_symbol_type symbol_type;
2336 if (mips_tls_operand_p (*xloc))
2338 *xloc = mips_legitimize_tls_address (*xloc);
2342 /* See if the address can split into a high part and a LO_SUM. */
2343 if (mips_symbolic_constant_p (*xloc, &symbol_type)
2344 && mips_symbolic_address_p (symbol_type, mode)
2345 && mips_split_p[symbol_type])
2347 *xloc = mips_split_symbol (0, *xloc);
2351 if (GET_CODE (*xloc) == PLUS && GET_CODE (XEXP (*xloc, 1)) == CONST_INT)
2353 /* Handle REG + CONSTANT using mips_add_offset. */
2356 reg = XEXP (*xloc, 0);
2357 if (!mips_valid_base_register_p (reg, mode, 0))
2358 reg = copy_to_mode_reg (Pmode, reg);
2359 *xloc = mips_add_offset (0, reg, INTVAL (XEXP (*xloc, 1)));
2367 /* Subroutine of mips_build_integer (with the same interface).
2368 Assume that the final action in the sequence should be a left shift. */
2371 mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
2373 unsigned int i, shift;
2375 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
2376 since signed numbers are easier to load than unsigned ones. */
2378 while ((value & 1) == 0)
2379 value /= 2, shift++;
2381 i = mips_build_integer (codes, value);
2382 codes[i].code = ASHIFT;
2383 codes[i].value = shift;
2388 /* As for mips_build_shift, but assume that the final action will be
2389 an IOR or PLUS operation. */
2392 mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
2394 unsigned HOST_WIDE_INT high;
2397 high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
2398 if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
2400 /* The constant is too complex to load with a simple lui/ori pair
2401 so our goal is to clear as many trailing zeros as possible.
2402 In this case, we know bit 16 is set and that the low 16 bits
2403 form a negative number. If we subtract that number from VALUE,
2404 we will clear at least the lowest 17 bits, maybe more. */
2405 i = mips_build_integer (codes, CONST_HIGH_PART (value));
2406 codes[i].code = PLUS;
2407 codes[i].value = CONST_LOW_PART (value);
2411 i = mips_build_integer (codes, high);
2412 codes[i].code = IOR;
2413 codes[i].value = value & 0xffff;
2419 /* Fill CODES with a sequence of rtl operations to load VALUE.
2420 Return the number of operations needed. */
2423 mips_build_integer (struct mips_integer_op *codes,
2424 unsigned HOST_WIDE_INT value)
2426 if (SMALL_OPERAND (value)
2427 || SMALL_OPERAND_UNSIGNED (value)
2428 || LUI_OPERAND (value))
2430 /* The value can be loaded with a single instruction. */
2431 codes[0].code = UNKNOWN;
2432 codes[0].value = value;
2435 else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
2437 /* Either the constant is a simple LUI/ORI combination or its
2438 lowest bit is set. We don't want to shift in this case. */
2439 return mips_build_lower (codes, value);
2441 else if ((value & 0xffff) == 0)
2443 /* The constant will need at least three actions. The lowest
2444 16 bits are clear, so the final action will be a shift. */
2445 return mips_build_shift (codes, value);
2449 /* The final action could be a shift, add or inclusive OR.
2450 Rather than use a complex condition to select the best
2451 approach, try both mips_build_shift and mips_build_lower
2452 and pick the one that gives the shortest sequence.
2453 Note that this case is only used once per constant. */
2454 struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
2455 unsigned int cost, alt_cost;
2457 cost = mips_build_shift (codes, value);
2458 alt_cost = mips_build_lower (alt_codes, value);
2459 if (alt_cost < cost)
2461 memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
2469 /* Load VALUE into DEST, using TEMP as a temporary register if need be. */
2472 mips_move_integer (rtx dest, rtx temp, unsigned HOST_WIDE_INT value)
2474 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2475 enum machine_mode mode;
2476 unsigned int i, cost;
2479 mode = GET_MODE (dest);
2480 cost = mips_build_integer (codes, value);
2482 /* Apply each binary operation to X. Invariant: X is a legitimate
2483 source operand for a SET pattern. */
2484 x = GEN_INT (codes[0].value);
2485 for (i = 1; i < cost; i++)
2487 if (!can_create_pseudo_p ())
2489 emit_insn (gen_rtx_SET (VOIDmode, temp, x));
2493 x = force_reg (mode, x);
2494 x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
2497 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
2501 /* Subroutine of mips_legitimize_move. Move constant SRC into register
2502 DEST given that SRC satisfies immediate_operand but doesn't satisfy
2506 mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
2510 /* Split moves of big integers into smaller pieces. */
2511 if (splittable_const_int_operand (src, mode))
2513 mips_move_integer (dest, dest, INTVAL (src));
2517 /* Split moves of symbolic constants into high/low pairs. */
2518 if (splittable_symbolic_operand (src, mode))
2520 emit_insn (gen_rtx_SET (VOIDmode, dest, mips_split_symbol (dest, src)));
2524 if (mips_tls_operand_p (src))
2526 emit_move_insn (dest, mips_legitimize_tls_address (src));
2530 /* If we have (const (plus symbol offset)), load the symbol first
2531 and then add in the offset. This is usually better than forcing
2532 the constant into memory, at least in non-mips16 code. */
2533 split_const (src, &base, &offset);
2535 && offset != const0_rtx
2536 && (can_create_pseudo_p () || SMALL_INT (offset)))
2538 base = mips_force_temporary (dest, base);
2539 emit_move_insn (dest, mips_add_offset (0, base, INTVAL (offset)));
2543 src = force_const_mem (mode, src);
2545 /* When using explicit relocs, constant pool references are sometimes
2546 not legitimate addresses. */
2547 if (!memory_operand (src, VOIDmode))
2548 src = replace_equiv_address (src, mips_split_symbol (dest, XEXP (src, 0)));
2549 emit_move_insn (dest, src);
2553 /* If (set DEST SRC) is not a valid instruction, emit an equivalent
2554 sequence that is valid. */
2557 mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
2559 if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
2561 emit_move_insn (dest, force_reg (mode, src));
2565 /* Check for individual, fully-reloaded mflo and mfhi instructions. */
2566 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
2567 && REG_P (src) && MD_REG_P (REGNO (src))
2568 && REG_P (dest) && GP_REG_P (REGNO (dest)))
2570 int other_regno = REGNO (src) == HI_REGNUM ? LO_REGNUM : HI_REGNUM;
2571 if (GET_MODE_SIZE (mode) <= 4)
2572 emit_insn (gen_mfhilo_si (gen_rtx_REG (SImode, REGNO (dest)),
2573 gen_rtx_REG (SImode, REGNO (src)),
2574 gen_rtx_REG (SImode, other_regno)));
2576 emit_insn (gen_mfhilo_di (gen_rtx_REG (DImode, REGNO (dest)),
2577 gen_rtx_REG (DImode, REGNO (src)),
2578 gen_rtx_REG (DImode, other_regno)));
2582 /* We need to deal with constants that would be legitimate
2583 immediate_operands but not legitimate move_operands. */
2584 if (CONSTANT_P (src) && !move_operand (src, mode))
2586 mips_legitimize_const_move (mode, dest, src);
2587 set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
2593 /* We need a lot of little routines to check constant values on the
2594 mips16. These are used to figure out how long the instruction will
2595 be. It would be much better to do this using constraints, but
2596 there aren't nearly enough letters available. */
2599 m16_check_op (rtx op, int low, int high, int mask)
2601 return (GET_CODE (op) == CONST_INT
2602 && INTVAL (op) >= low
2603 && INTVAL (op) <= high
2604 && (INTVAL (op) & mask) == 0);
2608 m16_uimm3_b (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2610 return m16_check_op (op, 0x1, 0x8, 0);
2614 m16_simm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2616 return m16_check_op (op, - 0x8, 0x7, 0);
2620 m16_nsimm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2622 return m16_check_op (op, - 0x7, 0x8, 0);
2626 m16_simm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2628 return m16_check_op (op, - 0x10, 0xf, 0);
2632 m16_nsimm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2634 return m16_check_op (op, - 0xf, 0x10, 0);
2638 m16_uimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2640 return m16_check_op (op, (- 0x10) << 2, 0xf << 2, 3);
2644 m16_nuimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2646 return m16_check_op (op, (- 0xf) << 2, 0x10 << 2, 3);
2650 m16_simm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2652 return m16_check_op (op, - 0x80, 0x7f, 0);
2656 m16_nsimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2658 return m16_check_op (op, - 0x7f, 0x80, 0);
2662 m16_uimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2664 return m16_check_op (op, 0x0, 0xff, 0);
2668 m16_nuimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2670 return m16_check_op (op, - 0xff, 0x0, 0);
2674 m16_uimm8_m1_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2676 return m16_check_op (op, - 0x1, 0xfe, 0);
2680 m16_uimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2682 return m16_check_op (op, 0x0, 0xff << 2, 3);
2686 m16_nuimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2688 return m16_check_op (op, (- 0xff) << 2, 0x0, 3);
2692 m16_simm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2694 return m16_check_op (op, (- 0x80) << 3, 0x7f << 3, 7);
2698 m16_nsimm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2700 return m16_check_op (op, (- 0x7f) << 3, 0x80 << 3, 7);
2703 /* Return true if ADDR matches the pattern for the lwxs load scaled indexed
2704 address instruction. */
2707 mips_lwxs_address_p (rtx addr)
2710 && GET_CODE (addr) == PLUS
2711 && REG_P (XEXP (addr, 1)))
2713 rtx offset = XEXP (addr, 0);
2714 if (GET_CODE (offset) == MULT
2715 && REG_P (XEXP (offset, 0))
2716 && GET_CODE (XEXP (offset, 1)) == CONST_INT
2717 && INTVAL (XEXP (offset, 1)) == 4)
2724 mips_rtx_costs (rtx x, int code, int outer_code, int *total)
2726 enum machine_mode mode = GET_MODE (x);
2727 bool float_mode_p = FLOAT_MODE_P (mode);
2734 /* A number between 1 and 8 inclusive is efficient for a shift.
2735 Otherwise, we will need an extended instruction. */
2736 if ((outer_code) == ASHIFT || (outer_code) == ASHIFTRT
2737 || (outer_code) == LSHIFTRT)
2739 if (INTVAL (x) >= 1 && INTVAL (x) <= 8)
2742 *total = COSTS_N_INSNS (1);
2746 /* We can use cmpi for an xor with an unsigned 16-bit value. */
2747 if ((outer_code) == XOR
2748 && INTVAL (x) >= 0 && INTVAL (x) < 0x10000)
2754 /* We may be able to use slt or sltu for a comparison with a
2755 signed 16-bit value. (The boundary conditions aren't quite
2756 right, but this is just a heuristic anyhow.) */
2757 if (((outer_code) == LT || (outer_code) == LE
2758 || (outer_code) == GE || (outer_code) == GT
2759 || (outer_code) == LTU || (outer_code) == LEU
2760 || (outer_code) == GEU || (outer_code) == GTU)
2761 && INTVAL (x) >= -0x8000 && INTVAL (x) < 0x8000)
2767 /* Equality comparisons with 0 are cheap. */
2768 if (((outer_code) == EQ || (outer_code) == NE)
2775 /* Constants in the range 0...255 can be loaded with an unextended
2776 instruction. They are therefore as cheap as a register move.
2778 Given the choice between "li R1,0...255" and "move R1,R2"
2779 (where R2 is a known constant), it is usually better to use "li",
2780 since we do not want to unnecessarily extend the lifetime
2782 if (outer_code == SET
2784 && INTVAL (x) < 256)
2792 /* These can be used anywhere. */
2797 /* Otherwise fall through to the handling below because
2798 we'll need to construct the constant. */
2804 if (LEGITIMATE_CONSTANT_P (x))
2806 *total = COSTS_N_INSNS (1);
2811 /* The value will need to be fetched from the constant pool. */
2812 *total = CONSTANT_POOL_COST;
2818 /* If the address is legitimate, return the number of
2819 instructions it needs. */
2820 rtx addr = XEXP (x, 0);
2821 int n = mips_address_insns (addr, GET_MODE (x));
2824 *total = COSTS_N_INSNS (n + 1);
2827 /* Check for scaled indexed address. */
2828 if (mips_lwxs_address_p (addr))
2830 *total = COSTS_N_INSNS (2);
2833 /* Otherwise use the default handling. */
2838 *total = COSTS_N_INSNS (6);
2842 *total = COSTS_N_INSNS ((mode == DImode && !TARGET_64BIT) ? 2 : 1);
2848 if (mode == DImode && !TARGET_64BIT)
2850 *total = COSTS_N_INSNS (2);
2858 if (mode == DImode && !TARGET_64BIT)
2860 *total = COSTS_N_INSNS ((GET_CODE (XEXP (x, 1)) == CONST_INT)
2868 *total = COSTS_N_INSNS (1);
2870 *total = COSTS_N_INSNS (4);
2874 *total = COSTS_N_INSNS (1);
2881 *total = mips_cost->fp_add;
2885 else if (mode == DImode && !TARGET_64BIT)
2887 *total = COSTS_N_INSNS (4);
2893 if (mode == DImode && !TARGET_64BIT)
2895 *total = COSTS_N_INSNS (4);
2902 *total = mips_cost->fp_mult_sf;
2904 else if (mode == DFmode)
2905 *total = mips_cost->fp_mult_df;
2907 else if (mode == SImode)
2908 *total = mips_cost->int_mult_si;
2911 *total = mips_cost->int_mult_di;
2920 *total = mips_cost->fp_div_sf;
2922 *total = mips_cost->fp_div_df;
2931 *total = mips_cost->int_div_di;
2933 *total = mips_cost->int_div_si;
2938 /* A sign extend from SImode to DImode in 64-bit mode is often
2939 zero instructions, because the result can often be used
2940 directly by another instruction; we'll call it one. */
2941 if (TARGET_64BIT && mode == DImode
2942 && GET_MODE (XEXP (x, 0)) == SImode)
2943 *total = COSTS_N_INSNS (1);
2945 *total = COSTS_N_INSNS (2);
2949 if (TARGET_64BIT && mode == DImode
2950 && GET_MODE (XEXP (x, 0)) == SImode)
2951 *total = COSTS_N_INSNS (2);
2953 *total = COSTS_N_INSNS (1);
2957 case UNSIGNED_FLOAT:
2960 case FLOAT_TRUNCATE:
2962 *total = mips_cost->fp_add;
2970 /* Provide the costs of an addressing mode that contains ADDR.
2971 If ADDR is not a valid address, its cost is irrelevant. */
2974 mips_address_cost (rtx addr)
2976 return mips_address_insns (addr, SImode);
2979 /* Return one word of double-word value OP, taking into account the fixed
2980 endianness of certain registers. HIGH_P is true to select the high part,
2981 false to select the low part. */
2984 mips_subword (rtx op, int high_p)
2987 enum machine_mode mode;
2989 mode = GET_MODE (op);
2990 if (mode == VOIDmode)
2993 if (TARGET_BIG_ENDIAN ? !high_p : high_p)
2994 byte = UNITS_PER_WORD;
2998 if (FP_REG_RTX_P (op))
2999 return gen_rtx_REG (word_mode, high_p ? REGNO (op) + 1 : REGNO (op));
3002 return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
3004 return simplify_gen_subreg (word_mode, op, mode, byte);
3008 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
3011 mips_split_64bit_move_p (rtx dest, rtx src)
3016 /* FP->FP moves can be done in a single instruction. */
3017 if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
3020 /* Check for floating-point loads and stores. They can be done using
3021 ldc1 and sdc1 on MIPS II and above. */
3024 if (FP_REG_RTX_P (dest) && MEM_P (src))
3026 if (FP_REG_RTX_P (src) && MEM_P (dest))
3033 /* Split a 64-bit move from SRC to DEST assuming that
3034 mips_split_64bit_move_p holds.
3036 Moves into and out of FPRs cause some difficulty here. Such moves
3037 will always be DFmode, since paired FPRs are not allowed to store
3038 DImode values. The most natural representation would be two separate
3039 32-bit moves, such as:
3041 (set (reg:SI $f0) (mem:SI ...))
3042 (set (reg:SI $f1) (mem:SI ...))
3044 However, the second insn is invalid because odd-numbered FPRs are
3045 not allowed to store independent values. Use the patterns load_df_low,
3046 load_df_high and store_df_high instead. */
3049 mips_split_64bit_move (rtx dest, rtx src)
3051 if (FP_REG_RTX_P (dest))
3053 /* Loading an FPR from memory or from GPRs. */
3056 dest = gen_lowpart (DFmode, dest);
3057 emit_insn (gen_load_df_low (dest, mips_subword (src, 0)));
3058 emit_insn (gen_mthc1 (dest, mips_subword (src, 1),
3063 emit_insn (gen_load_df_low (copy_rtx (dest),
3064 mips_subword (src, 0)));
3065 emit_insn (gen_load_df_high (dest, mips_subword (src, 1),
3069 else if (FP_REG_RTX_P (src))
3071 /* Storing an FPR into memory or GPRs. */
3074 src = gen_lowpart (DFmode, src);
3075 emit_move_insn (mips_subword (dest, 0), mips_subword (src, 0));
3076 emit_insn (gen_mfhc1 (mips_subword (dest, 1), src));
3080 emit_move_insn (mips_subword (dest, 0), mips_subword (src, 0));
3081 emit_insn (gen_store_df_high (mips_subword (dest, 1), src));
3086 /* The operation can be split into two normal moves. Decide in
3087 which order to do them. */
3090 low_dest = mips_subword (dest, 0);
3091 if (REG_P (low_dest)
3092 && reg_overlap_mentioned_p (low_dest, src))
3094 emit_move_insn (mips_subword (dest, 1), mips_subword (src, 1));
3095 emit_move_insn (low_dest, mips_subword (src, 0));
3099 emit_move_insn (low_dest, mips_subword (src, 0));
3100 emit_move_insn (mips_subword (dest, 1), mips_subword (src, 1));
3105 /* Return the appropriate instructions to move SRC into DEST. Assume
3106 that SRC is operand 1 and DEST is operand 0. */
3109 mips_output_move (rtx dest, rtx src)
3111 enum rtx_code dest_code, src_code;
3114 dest_code = GET_CODE (dest);
3115 src_code = GET_CODE (src);
3116 dbl_p = (GET_MODE_SIZE (GET_MODE (dest)) == 8);
3118 if (dbl_p && mips_split_64bit_move_p (dest, src))
3121 if ((src_code == REG && GP_REG_P (REGNO (src)))
3122 || (!TARGET_MIPS16 && src == CONST0_RTX (GET_MODE (dest))))
3124 if (dest_code == REG)
3126 if (GP_REG_P (REGNO (dest)))
3127 return "move\t%0,%z1";
3129 if (MD_REG_P (REGNO (dest)))
3132 if (DSP_ACC_REG_P (REGNO (dest)))
3134 static char retval[] = "mt__\t%z1,%q0";
3135 retval[2] = reg_names[REGNO (dest)][4];
3136 retval[3] = reg_names[REGNO (dest)][5];
3140 if (FP_REG_P (REGNO (dest)))
3141 return (dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0");
3143 if (ALL_COP_REG_P (REGNO (dest)))
3145 static char retval[] = "dmtc_\t%z1,%0";
3147 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3148 return (dbl_p ? retval : retval + 1);
3151 if (dest_code == MEM)
3152 return (dbl_p ? "sd\t%z1,%0" : "sw\t%z1,%0");
3154 if (dest_code == REG && GP_REG_P (REGNO (dest)))
3156 if (src_code == REG)
3158 if (DSP_ACC_REG_P (REGNO (src)))
3160 static char retval[] = "mf__\t%0,%q1";
3161 retval[2] = reg_names[REGNO (src)][4];
3162 retval[3] = reg_names[REGNO (src)][5];
3166 if (ST_REG_P (REGNO (src)) && ISA_HAS_8CC)
3167 return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
3169 if (FP_REG_P (REGNO (src)))
3170 return (dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1");
3172 if (ALL_COP_REG_P (REGNO (src)))
3174 static char retval[] = "dmfc_\t%0,%1";
3176 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3177 return (dbl_p ? retval : retval + 1);
3181 if (src_code == MEM)
3182 return (dbl_p ? "ld\t%0,%1" : "lw\t%0,%1");
3184 if (src_code == CONST_INT)
3186 /* Don't use the X format, because that will give out of
3187 range numbers for 64-bit hosts and 32-bit targets. */
3189 return "li\t%0,%1\t\t\t# %X1";
3191 if (INTVAL (src) >= 0 && INTVAL (src) <= 0xffff)
3194 if (INTVAL (src) < 0 && INTVAL (src) >= -0xffff)
3198 if (src_code == HIGH)
3199 return "lui\t%0,%h1";
3201 if (CONST_GP_P (src))
3202 return "move\t%0,%1";
3204 if (symbolic_operand (src, VOIDmode))
3205 return (dbl_p ? "dla\t%0,%1" : "la\t%0,%1");
3207 if (src_code == REG && FP_REG_P (REGNO (src)))
3209 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3211 if (GET_MODE (dest) == V2SFmode)
3212 return "mov.ps\t%0,%1";
3214 return (dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1");
3217 if (dest_code == MEM)
3218 return (dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0");
3220 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3222 if (src_code == MEM)
3223 return (dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1");
3225 if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
3227 static char retval[] = "l_c_\t%0,%1";
3229 retval[1] = (dbl_p ? 'd' : 'w');
3230 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3233 if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
3235 static char retval[] = "s_c_\t%1,%0";
3237 retval[1] = (dbl_p ? 'd' : 'w');
3238 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3244 /* Restore $gp from its save slot. Valid only when using o32 or
3248 mips_restore_gp (void)
3252 gcc_assert (TARGET_ABICALLS && TARGET_OLDABI);
3254 address = mips_add_offset (pic_offset_table_rtx,
3255 frame_pointer_needed
3256 ? hard_frame_pointer_rtx
3257 : stack_pointer_rtx,
3258 current_function_outgoing_args_size);
3259 slot = gen_rtx_MEM (Pmode, address);
3261 emit_move_insn (pic_offset_table_rtx, slot);
3262 if (!TARGET_EXPLICIT_RELOCS)
3263 emit_insn (gen_blockage ());
3266 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
3269 mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
3271 emit_insn (gen_rtx_SET (VOIDmode, target,
3272 gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
3275 /* Return true if CMP1 is a suitable second operand for relational
3276 operator CODE. See also the *sCC patterns in mips.md. */
3279 mips_relational_operand_ok_p (enum rtx_code code, rtx cmp1)
3285 return reg_or_0_operand (cmp1, VOIDmode);
3289 return !TARGET_MIPS16 && cmp1 == const1_rtx;
3293 return arith_operand (cmp1, VOIDmode);
3296 return sle_operand (cmp1, VOIDmode);
3299 return sleu_operand (cmp1, VOIDmode);
3306 /* Canonicalize LE or LEU comparisons into LT comparisons when
3307 possible to avoid extra instructions or inverting the
3311 mips_canonicalize_comparison (enum rtx_code *code, rtx *cmp1,
3312 enum machine_mode mode)
3314 HOST_WIDE_INT original, plus_one;
3316 if (GET_CODE (*cmp1) != CONST_INT)
3319 original = INTVAL (*cmp1);
3320 plus_one = trunc_int_for_mode ((unsigned HOST_WIDE_INT) original + 1, mode);
3325 if (original < plus_one)
3328 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3337 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3350 /* Compare CMP0 and CMP1 using relational operator CODE and store the
3351 result in TARGET. CMP0 and TARGET are register_operands that have
3352 the same integer mode. If INVERT_PTR is nonnull, it's OK to set
3353 TARGET to the inverse of the result and flip *INVERT_PTR instead. */
3356 mips_emit_int_relational (enum rtx_code code, bool *invert_ptr,
3357 rtx target, rtx cmp0, rtx cmp1)
3359 /* First see if there is a MIPS instruction that can do this operation
3360 with CMP1 in its current form. If not, try to canonicalize the
3361 comparison to LT. If that fails, try doing the same for the
3362 inverse operation. If that also fails, force CMP1 into a register
3364 if (mips_relational_operand_ok_p (code, cmp1))
3365 mips_emit_binary (code, target, cmp0, cmp1);
3366 else if (mips_canonicalize_comparison (&code, &cmp1, GET_MODE (target)))
3367 mips_emit_binary (code, target, cmp0, cmp1);
3370 enum rtx_code inv_code = reverse_condition (code);
3371 if (!mips_relational_operand_ok_p (inv_code, cmp1))
3373 cmp1 = force_reg (GET_MODE (cmp0), cmp1);
3374 mips_emit_int_relational (code, invert_ptr, target, cmp0, cmp1);
3376 else if (invert_ptr == 0)
3378 rtx inv_target = gen_reg_rtx (GET_MODE (target));
3379 mips_emit_binary (inv_code, inv_target, cmp0, cmp1);
3380 mips_emit_binary (XOR, target, inv_target, const1_rtx);
3384 *invert_ptr = !*invert_ptr;
3385 mips_emit_binary (inv_code, target, cmp0, cmp1);
3390 /* Return a register that is zero iff CMP0 and CMP1 are equal.
3391 The register will have the same mode as CMP0. */
3394 mips_zero_if_equal (rtx cmp0, rtx cmp1)
3396 if (cmp1 == const0_rtx)
3399 if (uns_arith_operand (cmp1, VOIDmode))
3400 return expand_binop (GET_MODE (cmp0), xor_optab,
3401 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3403 return expand_binop (GET_MODE (cmp0), sub_optab,
3404 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3407 /* Convert *CODE into a code that can be used in a floating-point
3408 scc instruction (c.<cond>.<fmt>). Return true if the values of
3409 the condition code registers will be inverted, with 0 indicating
3410 that the condition holds. */
3413 mips_reverse_fp_cond_p (enum rtx_code *code)
3420 *code = reverse_condition_maybe_unordered (*code);
3428 /* Convert a comparison into something that can be used in a branch or
3429 conditional move. cmp_operands[0] and cmp_operands[1] are the values
3430 being compared and *CODE is the code used to compare them.
3432 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
3433 If NEED_EQ_NE_P, then only EQ/NE comparisons against zero are possible,
3434 otherwise any standard branch condition can be used. The standard branch
3437 - EQ/NE between two registers.
3438 - any comparison between a register and zero. */
3441 mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
3443 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) == MODE_INT)
3445 if (!need_eq_ne_p && cmp_operands[1] == const0_rtx)
3447 *op0 = cmp_operands[0];
3448 *op1 = cmp_operands[1];
3450 else if (*code == EQ || *code == NE)
3454 *op0 = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3459 *op0 = cmp_operands[0];
3460 *op1 = force_reg (GET_MODE (*op0), cmp_operands[1]);
3465 /* The comparison needs a separate scc instruction. Store the
3466 result of the scc in *OP0 and compare it against zero. */
3467 bool invert = false;
3468 *op0 = gen_reg_rtx (GET_MODE (cmp_operands[0]));
3470 mips_emit_int_relational (*code, &invert, *op0,
3471 cmp_operands[0], cmp_operands[1]);
3472 *code = (invert ? EQ : NE);
3477 enum rtx_code cmp_code;
3479 /* Floating-point tests use a separate c.cond.fmt comparison to
3480 set a condition code register. The branch or conditional move
3481 will then compare that register against zero.
3483 Set CMP_CODE to the code of the comparison instruction and
3484 *CODE to the code that the branch or move should use. */
3486 *code = mips_reverse_fp_cond_p (&cmp_code) ? EQ : NE;
3488 ? gen_reg_rtx (CCmode)
3489 : gen_rtx_REG (CCmode, FPSW_REGNUM));
3491 mips_emit_binary (cmp_code, *op0, cmp_operands[0], cmp_operands[1]);
3495 /* Try comparing cmp_operands[0] and cmp_operands[1] using rtl code CODE.
3496 Store the result in TARGET and return true if successful.
3498 On 64-bit targets, TARGET may be wider than cmp_operands[0]. */
3501 mips_emit_scc (enum rtx_code code, rtx target)
3503 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) != MODE_INT)
3506 target = gen_lowpart (GET_MODE (cmp_operands[0]), target);
3507 if (code == EQ || code == NE)
3509 rtx zie = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3510 mips_emit_binary (code, target, zie, const0_rtx);
3513 mips_emit_int_relational (code, 0, target,
3514 cmp_operands[0], cmp_operands[1]);
3518 /* Emit the common code for doing conditional branches.
3519 operand[0] is the label to jump to.
3520 The comparison operands are saved away by cmp{si,di,sf,df}. */
3523 gen_conditional_branch (rtx *operands, enum rtx_code code)
3525 rtx op0, op1, condition;
3527 mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
3528 condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
3529 emit_jump_insn (gen_condjump (condition, operands[0]));
3534 (set temp (COND:CCV2 CMP_OP0 CMP_OP1))
3535 (set DEST (unspec [TRUE_SRC FALSE_SRC temp] UNSPEC_MOVE_TF_PS)) */
3538 mips_expand_vcondv2sf (rtx dest, rtx true_src, rtx false_src,
3539 enum rtx_code cond, rtx cmp_op0, rtx cmp_op1)
3544 reversed_p = mips_reverse_fp_cond_p (&cond);
3545 cmp_result = gen_reg_rtx (CCV2mode);
3546 emit_insn (gen_scc_ps (cmp_result,
3547 gen_rtx_fmt_ee (cond, VOIDmode, cmp_op0, cmp_op1)));
3549 emit_insn (gen_mips_cond_move_tf_ps (dest, false_src, true_src,
3552 emit_insn (gen_mips_cond_move_tf_ps (dest, true_src, false_src,
3556 /* Emit the common code for conditional moves. OPERANDS is the array
3557 of operands passed to the conditional move define_expand. */
3560 gen_conditional_move (rtx *operands)
3565 code = GET_CODE (operands[1]);
3566 mips_emit_compare (&code, &op0, &op1, true);
3567 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
3568 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3569 gen_rtx_fmt_ee (code,
3572 operands[2], operands[3])));
3575 /* Emit a conditional trap. OPERANDS is the array of operands passed to
3576 the conditional_trap expander. */
3579 mips_gen_conditional_trap (rtx *operands)
3582 enum rtx_code cmp_code = GET_CODE (operands[0]);
3583 enum machine_mode mode = GET_MODE (cmp_operands[0]);
3585 /* MIPS conditional trap machine instructions don't have GT or LE
3586 flavors, so we must invert the comparison and convert to LT and
3587 GE, respectively. */
3590 case GT: cmp_code = LT; break;
3591 case LE: cmp_code = GE; break;
3592 case GTU: cmp_code = LTU; break;
3593 case LEU: cmp_code = GEU; break;
3596 if (cmp_code == GET_CODE (operands[0]))
3598 op0 = cmp_operands[0];
3599 op1 = cmp_operands[1];
3603 op0 = cmp_operands[1];
3604 op1 = cmp_operands[0];
3606 op0 = force_reg (mode, op0);
3607 if (!arith_operand (op1, mode))
3608 op1 = force_reg (mode, op1);
3610 emit_insn (gen_rtx_TRAP_IF (VOIDmode,
3611 gen_rtx_fmt_ee (cmp_code, mode, op0, op1),
3615 /* Return true if calls to X can use R_MIPS_CALL* relocations. */
3618 mips_ok_for_lazy_binding_p (rtx x)
3620 return (TARGET_USE_GOT
3621 && GET_CODE (x) == SYMBOL_REF
3622 && !mips_symbol_binds_local_p (x));
3625 /* Load function address ADDR into register DEST. SIBCALL_P is true
3626 if the address is needed for a sibling call. */
3629 mips_load_call_address (rtx dest, rtx addr, int sibcall_p)
3631 /* If we're generating PIC, and this call is to a global function,
3632 try to allow its address to be resolved lazily. This isn't
3633 possible if TARGET_CALL_SAVED_GP since the value of $gp on entry
3634 to the stub would be our caller's gp, not ours. */
3635 if (TARGET_EXPLICIT_RELOCS
3636 && !(sibcall_p && TARGET_CALL_SAVED_GP)
3637 && mips_ok_for_lazy_binding_p (addr))
3639 rtx high, lo_sum_symbol;
3641 high = mips_unspec_offset_high (dest, pic_offset_table_rtx,
3642 addr, SYMBOL_GOTOFF_CALL);
3643 lo_sum_symbol = mips_unspec_address (addr, SYMBOL_GOTOFF_CALL);
3644 if (Pmode == SImode)
3645 emit_insn (gen_load_callsi (dest, high, lo_sum_symbol));
3647 emit_insn (gen_load_calldi (dest, high, lo_sum_symbol));
3650 emit_move_insn (dest, addr);
3654 /* Expand a call or call_value instruction. RESULT is where the
3655 result will go (null for calls), ADDR is the address of the
3656 function, ARGS_SIZE is the size of the arguments and AUX is
3657 the value passed to us by mips_function_arg. SIBCALL_P is true
3658 if we are expanding a sibling call, false if we're expanding
3662 mips_expand_call (rtx result, rtx addr, rtx args_size, rtx aux, int sibcall_p)
3664 rtx orig_addr, pattern, insn;
3667 if (!call_insn_operand (addr, VOIDmode))
3669 addr = gen_reg_rtx (Pmode);
3670 mips_load_call_address (addr, orig_addr, sibcall_p);
3673 if (mips16_hard_float
3674 && build_mips16_call_stub (result, addr, args_size,
3675 aux == 0 ? 0 : (int) GET_MODE (aux)))
3679 pattern = (sibcall_p
3680 ? gen_sibcall_internal (addr, args_size)
3681 : gen_call_internal (addr, args_size));
3682 else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
3686 reg1 = XEXP (XVECEXP (result, 0, 0), 0);
3687 reg2 = XEXP (XVECEXP (result, 0, 1), 0);
3690 ? gen_sibcall_value_multiple_internal (reg1, addr, args_size, reg2)
3691 : gen_call_value_multiple_internal (reg1, addr, args_size, reg2));
3694 pattern = (sibcall_p
3695 ? gen_sibcall_value_internal (result, addr, args_size)
3696 : gen_call_value_internal (result, addr, args_size));
3698 insn = emit_call_insn (pattern);
3700 /* Lazy-binding stubs require $gp to be valid on entry. */
3701 if (mips_ok_for_lazy_binding_p (orig_addr))
3702 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
3706 /* We can handle any sibcall when TARGET_SIBCALLS is true. */
3709 mips_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED,
3710 tree exp ATTRIBUTE_UNUSED)
3712 return TARGET_SIBCALLS;
3715 /* Emit code to move general operand SRC into condition-code
3716 register DEST. SCRATCH is a scratch TFmode float register.
3723 where FP1 and FP2 are single-precision float registers
3724 taken from SCRATCH. */
3727 mips_emit_fcc_reload (rtx dest, rtx src, rtx scratch)
3731 /* Change the source to SFmode. */
3733 src = adjust_address (src, SFmode, 0);
3734 else if (REG_P (src) || GET_CODE (src) == SUBREG)
3735 src = gen_rtx_REG (SFmode, true_regnum (src));
3737 fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
3738 fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + MAX_FPRS_PER_FMT);
3740 emit_move_insn (copy_rtx (fp1), src);
3741 emit_move_insn (copy_rtx (fp2), CONST0_RTX (SFmode));
3742 emit_insn (gen_slt_sf (dest, fp2, fp1));
3745 /* Emit code to change the current function's return address to
3746 ADDRESS. SCRATCH is available as a scratch register, if needed.
3747 ADDRESS and SCRATCH are both word-mode GPRs. */
3750 mips_set_return_address (rtx address, rtx scratch)
3754 compute_frame_size (get_frame_size ());
3755 gcc_assert ((cfun->machine->frame.mask >> 31) & 1);
3756 slot_address = mips_add_offset (scratch, stack_pointer_rtx,
3757 cfun->machine->frame.gp_sp_offset);
3759 emit_move_insn (gen_rtx_MEM (GET_MODE (address), slot_address), address);
3762 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
3763 Assume that the areas do not overlap. */
3766 mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
3768 HOST_WIDE_INT offset, delta;
3769 unsigned HOST_WIDE_INT bits;
3771 enum machine_mode mode;
3774 /* Work out how many bits to move at a time. If both operands have
3775 half-word alignment, it is usually better to move in half words.
3776 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
3777 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
3778 Otherwise move word-sized chunks. */
3779 if (MEM_ALIGN (src) == BITS_PER_WORD / 2
3780 && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
3781 bits = BITS_PER_WORD / 2;
3783 bits = BITS_PER_WORD;
3785 mode = mode_for_size (bits, MODE_INT, 0);
3786 delta = bits / BITS_PER_UNIT;
3788 /* Allocate a buffer for the temporary registers. */
3789 regs = alloca (sizeof (rtx) * length / delta);
3791 /* Load as many BITS-sized chunks as possible. Use a normal load if
3792 the source has enough alignment, otherwise use left/right pairs. */
3793 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
3795 regs[i] = gen_reg_rtx (mode);
3796 if (MEM_ALIGN (src) >= bits)
3797 emit_move_insn (regs[i], adjust_address (src, mode, offset));
3800 rtx part = adjust_address (src, BLKmode, offset);
3801 if (!mips_expand_unaligned_load (regs[i], part, bits, 0))
3806 /* Copy the chunks to the destination. */
3807 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
3808 if (MEM_ALIGN (dest) >= bits)
3809 emit_move_insn (adjust_address (dest, mode, offset), regs[i]);
3812 rtx part = adjust_address (dest, BLKmode, offset);
3813 if (!mips_expand_unaligned_store (part, regs[i], bits, 0))
3817 /* Mop up any left-over bytes. */
3818 if (offset < length)
3820 src = adjust_address (src, BLKmode, offset);
3821 dest = adjust_address (dest, BLKmode, offset);
3822 move_by_pieces (dest, src, length - offset,
3823 MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
3827 #define MAX_MOVE_REGS 4
3828 #define MAX_MOVE_BYTES (MAX_MOVE_REGS * UNITS_PER_WORD)
3831 /* Helper function for doing a loop-based block operation on memory
3832 reference MEM. Each iteration of the loop will operate on LENGTH
3835 Create a new base register for use within the loop and point it to
3836 the start of MEM. Create a new memory reference that uses this
3837 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
3840 mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
3841 rtx *loop_reg, rtx *loop_mem)
3843 *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
3845 /* Although the new mem does not refer to a known location,
3846 it does keep up to LENGTH bytes of alignment. */
3847 *loop_mem = change_address (mem, BLKmode, *loop_reg);
3848 set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
3852 /* Move LENGTH bytes from SRC to DEST using a loop that moves MAX_MOVE_BYTES
3853 per iteration. LENGTH must be at least MAX_MOVE_BYTES. Assume that the
3854 memory regions do not overlap. */
3857 mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length)
3859 rtx label, src_reg, dest_reg, final_src;
3860 HOST_WIDE_INT leftover;
3862 leftover = length % MAX_MOVE_BYTES;
3865 /* Create registers and memory references for use within the loop. */
3866 mips_adjust_block_mem (src, MAX_MOVE_BYTES, &src_reg, &src);
3867 mips_adjust_block_mem (dest, MAX_MOVE_BYTES, &dest_reg, &dest);
3869 /* Calculate the value that SRC_REG should have after the last iteration
3871 final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
3874 /* Emit the start of the loop. */
3875 label = gen_label_rtx ();
3878 /* Emit the loop body. */
3879 mips_block_move_straight (dest, src, MAX_MOVE_BYTES);
3881 /* Move on to the next block. */
3882 emit_move_insn (src_reg, plus_constant (src_reg, MAX_MOVE_BYTES));
3883 emit_move_insn (dest_reg, plus_constant (dest_reg, MAX_MOVE_BYTES));
3885 /* Emit the loop condition. */
3886 if (Pmode == DImode)
3887 emit_insn (gen_cmpdi (src_reg, final_src));
3889 emit_insn (gen_cmpsi (src_reg, final_src));
3890 emit_jump_insn (gen_bne (label));
3892 /* Mop up any left-over bytes. */
3894 mips_block_move_straight (dest, src, leftover);
3898 /* Expand a loop of synci insns for the address range [BEGIN, END). */
3901 mips_expand_synci_loop (rtx begin, rtx end)
3903 rtx inc, label, cmp, cmp_result;
3905 /* Load INC with the cache line size (rdhwr INC,$1). */
3906 inc = gen_reg_rtx (SImode);
3907 emit_insn (gen_rdhwr (inc, const1_rtx));
3909 /* Loop back to here. */
3910 label = gen_label_rtx ();
3913 emit_insn (gen_synci (begin));
3915 cmp = gen_reg_rtx (Pmode);
3916 mips_emit_binary (GTU, cmp, begin, end);
3918 mips_emit_binary (PLUS, begin, begin, inc);
3920 cmp_result = gen_rtx_EQ (VOIDmode, cmp, const0_rtx);
3921 emit_jump_insn (gen_condjump (cmp_result, label));
3924 /* Expand a movmemsi instruction. */
3927 mips_expand_block_move (rtx dest, rtx src, rtx length)
3929 if (GET_CODE (length) == CONST_INT)
3931 if (INTVAL (length) <= 2 * MAX_MOVE_BYTES)
3933 mips_block_move_straight (dest, src, INTVAL (length));
3938 mips_block_move_loop (dest, src, INTVAL (length));
3945 /* Argument support functions. */
3947 /* Initialize CUMULATIVE_ARGS for a function. */
3950 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
3951 rtx libname ATTRIBUTE_UNUSED)
3953 static CUMULATIVE_ARGS zero_cum;
3954 tree param, next_param;
3957 cum->prototype = (fntype && TYPE_ARG_TYPES (fntype));
3959 /* Determine if this function has variable arguments. This is
3960 indicated by the last argument being 'void_type_mode' if there
3961 are no variable arguments. The standard MIPS calling sequence
3962 passes all arguments in the general purpose registers in this case. */
3964 for (param = fntype ? TYPE_ARG_TYPES (fntype) : 0;
3965 param != 0; param = next_param)
3967 next_param = TREE_CHAIN (param);
3968 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
3969 cum->gp_reg_found = 1;
3974 /* Fill INFO with information about a single argument. CUM is the
3975 cumulative state for earlier arguments. MODE is the mode of this
3976 argument and TYPE is its type (if known). NAMED is true if this
3977 is a named (fixed) argument rather than a variable one. */
3980 mips_arg_info (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
3981 tree type, int named, struct mips_arg_info *info)
3983 bool doubleword_aligned_p;
3984 unsigned int num_bytes, num_words, max_regs;
3986 /* Work out the size of the argument. */
3987 num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
3988 num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3990 /* Decide whether it should go in a floating-point register, assuming
3991 one is free. Later code checks for availability.
3993 The checks against UNITS_PER_FPVALUE handle the soft-float and
3994 single-float cases. */
3998 /* The EABI conventions have traditionally been defined in terms
3999 of TYPE_MODE, regardless of the actual type. */
4000 info->fpr_p = ((GET_MODE_CLASS (mode) == MODE_FLOAT
4001 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4002 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4007 /* Only leading floating-point scalars are passed in
4008 floating-point registers. We also handle vector floats the same
4009 say, which is OK because they are not covered by the standard ABI. */
4010 info->fpr_p = (!cum->gp_reg_found
4011 && cum->arg_number < 2
4012 && (type == 0 || SCALAR_FLOAT_TYPE_P (type)
4013 || VECTOR_FLOAT_TYPE_P (type))
4014 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4015 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4016 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4021 /* Scalar and complex floating-point types are passed in
4022 floating-point registers. */
4023 info->fpr_p = (named
4024 && (type == 0 || FLOAT_TYPE_P (type))
4025 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4026 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4027 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4028 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
4030 /* ??? According to the ABI documentation, the real and imaginary
4031 parts of complex floats should be passed in individual registers.
4032 The real and imaginary parts of stack arguments are supposed
4033 to be contiguous and there should be an extra word of padding
4036 This has two problems. First, it makes it impossible to use a
4037 single "void *" va_list type, since register and stack arguments
4038 are passed differently. (At the time of writing, MIPSpro cannot
4039 handle complex float varargs correctly.) Second, it's unclear
4040 what should happen when there is only one register free.
4042 For now, we assume that named complex floats should go into FPRs
4043 if there are two FPRs free, otherwise they should be passed in the
4044 same way as a struct containing two floats. */
4046 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4047 && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
4049 if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
4050 info->fpr_p = false;
4060 /* See whether the argument has doubleword alignment. */
4061 doubleword_aligned_p = FUNCTION_ARG_BOUNDARY (mode, type) > BITS_PER_WORD;
4063 /* Set REG_OFFSET to the register count we're interested in.
4064 The EABI allocates the floating-point registers separately,
4065 but the other ABIs allocate them like integer registers. */
4066 info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
4070 /* Advance to an even register if the argument is doubleword-aligned. */
4071 if (doubleword_aligned_p)
4072 info->reg_offset += info->reg_offset & 1;
4074 /* Work out the offset of a stack argument. */
4075 info->stack_offset = cum->stack_words;
4076 if (doubleword_aligned_p)
4077 info->stack_offset += info->stack_offset & 1;
4079 max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
4081 /* Partition the argument between registers and stack. */
4082 info->reg_words = MIN (num_words, max_regs);
4083 info->stack_words = num_words - info->reg_words;
4087 /* INFO describes an argument that is passed in a single-register value.
4088 Return the register it uses, assuming that FPRs are available if
4092 mips_arg_regno (const struct mips_arg_info *info, bool hard_float_p)
4094 if (!info->fpr_p || !hard_float_p)
4095 return GP_ARG_FIRST + info->reg_offset;
4096 else if (mips_abi == ABI_32 && TARGET_DOUBLE_FLOAT && info->reg_offset > 0)
4097 /* In o32, the second argument is always passed in $f14
4098 for TARGET_DOUBLE_FLOAT, regardless of whether the
4099 first argument was a word or doubleword. */
4100 return FP_ARG_FIRST + 2;
4102 return FP_ARG_FIRST + info->reg_offset;
4105 /* Implement FUNCTION_ARG_ADVANCE. */
4108 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4109 tree type, int named)
4111 struct mips_arg_info info;
4113 mips_arg_info (cum, mode, type, named, &info);
4116 cum->gp_reg_found = true;
4118 /* See the comment above the cumulative args structure in mips.h
4119 for an explanation of what this code does. It assumes the O32
4120 ABI, which passes at most 2 arguments in float registers. */
4121 if (cum->arg_number < 2 && info.fpr_p)
4122 cum->fp_code += (mode == SFmode ? 1 : 2) << (cum->arg_number * 2);
4124 if (mips_abi != ABI_EABI || !info.fpr_p)
4125 cum->num_gprs = info.reg_offset + info.reg_words;
4126 else if (info.reg_words > 0)
4127 cum->num_fprs += MAX_FPRS_PER_FMT;
4129 if (info.stack_words > 0)
4130 cum->stack_words = info.stack_offset + info.stack_words;
4135 /* Implement FUNCTION_ARG. */
4138 function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
4139 tree type, int named)
4141 struct mips_arg_info info;
4143 /* We will be called with a mode of VOIDmode after the last argument
4144 has been seen. Whatever we return will be passed to the call
4145 insn. If we need a mips16 fp_code, return a REG with the code
4146 stored as the mode. */
4147 if (mode == VOIDmode)
4149 if (TARGET_MIPS16 && cum->fp_code != 0)
4150 return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
4156 mips_arg_info (cum, mode, type, named, &info);
4158 /* Return straight away if the whole argument is passed on the stack. */
4159 if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
4163 && TREE_CODE (type) == RECORD_TYPE
4165 && TYPE_SIZE_UNIT (type)
4166 && host_integerp (TYPE_SIZE_UNIT (type), 1)
4169 /* The Irix 6 n32/n64 ABIs say that if any 64-bit chunk of the
4170 structure contains a double in its entirety, then that 64-bit
4171 chunk is passed in a floating point register. */
4174 /* First check to see if there is any such field. */
4175 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4176 if (TREE_CODE (field) == FIELD_DECL
4177 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
4178 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
4179 && host_integerp (bit_position (field), 0)
4180 && int_bit_position (field) % BITS_PER_WORD == 0)
4185 /* Now handle the special case by returning a PARALLEL
4186 indicating where each 64-bit chunk goes. INFO.REG_WORDS
4187 chunks are passed in registers. */
4189 HOST_WIDE_INT bitpos;
4192 /* assign_parms checks the mode of ENTRY_PARM, so we must
4193 use the actual mode here. */
4194 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
4197 field = TYPE_FIELDS (type);
4198 for (i = 0; i < info.reg_words; i++)
4202 for (; field; field = TREE_CHAIN (field))
4203 if (TREE_CODE (field) == FIELD_DECL
4204 && int_bit_position (field) >= bitpos)
4208 && int_bit_position (field) == bitpos
4209 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
4210 && !TARGET_SOFT_FLOAT
4211 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
4212 reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
4214 reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
4217 = gen_rtx_EXPR_LIST (VOIDmode, reg,
4218 GEN_INT (bitpos / BITS_PER_UNIT));
4220 bitpos += BITS_PER_WORD;
4226 /* Handle the n32/n64 conventions for passing complex floating-point
4227 arguments in FPR pairs. The real part goes in the lower register
4228 and the imaginary part goes in the upper register. */
4231 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4234 enum machine_mode inner;
4237 inner = GET_MODE_INNER (mode);
4238 reg = FP_ARG_FIRST + info.reg_offset;
4239 if (info.reg_words * UNITS_PER_WORD == GET_MODE_SIZE (inner))
4241 /* Real part in registers, imaginary part on stack. */
4242 gcc_assert (info.stack_words == info.reg_words);
4243 return gen_rtx_REG (inner, reg);
4247 gcc_assert (info.stack_words == 0);
4248 real = gen_rtx_EXPR_LIST (VOIDmode,
4249 gen_rtx_REG (inner, reg),
4251 imag = gen_rtx_EXPR_LIST (VOIDmode,
4253 reg + info.reg_words / 2),
4254 GEN_INT (GET_MODE_SIZE (inner)));
4255 return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
4259 return gen_rtx_REG (mode, mips_arg_regno (&info, TARGET_HARD_FLOAT));
4263 /* Implement TARGET_ARG_PARTIAL_BYTES. */
4266 mips_arg_partial_bytes (CUMULATIVE_ARGS *cum,
4267 enum machine_mode mode, tree type, bool named)
4269 struct mips_arg_info info;
4271 mips_arg_info (cum, mode, type, named, &info);
4272 return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
4276 /* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
4277 PARM_BOUNDARY bits of alignment, but will be given anything up
4278 to STACK_BOUNDARY bits if the type requires it. */
4281 function_arg_boundary (enum machine_mode mode, tree type)
4283 unsigned int alignment;
4285 alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
4286 if (alignment < PARM_BOUNDARY)
4287 alignment = PARM_BOUNDARY;
4288 if (alignment > STACK_BOUNDARY)
4289 alignment = STACK_BOUNDARY;
4293 /* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
4294 upward rather than downward. In other words, return true if the
4295 first byte of the stack slot has useful data, false if the last
4299 mips_pad_arg_upward (enum machine_mode mode, tree type)
4301 /* On little-endian targets, the first byte of every stack argument
4302 is passed in the first byte of the stack slot. */
4303 if (!BYTES_BIG_ENDIAN)
4306 /* Otherwise, integral types are padded downward: the last byte of a
4307 stack argument is passed in the last byte of the stack slot. */
4309 ? INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)
4310 : GET_MODE_CLASS (mode) == MODE_INT)
4313 /* Big-endian o64 pads floating-point arguments downward. */
4314 if (mips_abi == ABI_O64)
4315 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4318 /* Other types are padded upward for o32, o64, n32 and n64. */
4319 if (mips_abi != ABI_EABI)
4322 /* Arguments smaller than a stack slot are padded downward. */
4323 if (mode != BLKmode)
4324 return (GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY);
4326 return (int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT));
4330 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
4331 if the least significant byte of the register has useful data. Return
4332 the opposite if the most significant byte does. */
4335 mips_pad_reg_upward (enum machine_mode mode, tree type)
4337 /* No shifting is required for floating-point arguments. */
4338 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4339 return !BYTES_BIG_ENDIAN;
4341 /* Otherwise, apply the same padding to register arguments as we do
4342 to stack arguments. */
4343 return mips_pad_arg_upward (mode, type);
4347 mips_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4348 tree type, int *pretend_size ATTRIBUTE_UNUSED,
4351 CUMULATIVE_ARGS local_cum;
4352 int gp_saved, fp_saved;
4354 /* The caller has advanced CUM up to, but not beyond, the last named
4355 argument. Advance a local copy of CUM past the last "real" named
4356 argument, to find out how many registers are left over. */
4359 FUNCTION_ARG_ADVANCE (local_cum, mode, type, 1);
4361 /* Found out how many registers we need to save. */
4362 gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
4363 fp_saved = (EABI_FLOAT_VARARGS_P
4364 ? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
4373 ptr = plus_constant (virtual_incoming_args_rtx,
4374 REG_PARM_STACK_SPACE (cfun->decl)
4375 - gp_saved * UNITS_PER_WORD);
4376 mem = gen_rtx_MEM (BLKmode, ptr);
4377 set_mem_alias_set (mem, get_varargs_alias_set ());
4379 move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
4384 /* We can't use move_block_from_reg, because it will use
4386 enum machine_mode mode;
4389 /* Set OFF to the offset from virtual_incoming_args_rtx of
4390 the first float register. The FP save area lies below
4391 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
4392 off = -gp_saved * UNITS_PER_WORD;
4393 off &= ~(UNITS_PER_FPVALUE - 1);
4394 off -= fp_saved * UNITS_PER_FPREG;
4396 mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
4398 for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS;
4399 i += MAX_FPRS_PER_FMT)
4403 ptr = plus_constant (virtual_incoming_args_rtx, off);
4404 mem = gen_rtx_MEM (mode, ptr);
4405 set_mem_alias_set (mem, get_varargs_alias_set ());
4406 emit_move_insn (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
4407 off += UNITS_PER_HWFPVALUE;
4411 if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
4412 cfun->machine->varargs_size = (gp_saved * UNITS_PER_WORD
4413 + fp_saved * UNITS_PER_FPREG);
4416 /* Create the va_list data type.
4417 We keep 3 pointers, and two offsets.
4418 Two pointers are to the overflow area, which starts at the CFA.
4419 One of these is constant, for addressing into the GPR save area below it.
4420 The other is advanced up the stack through the overflow region.
4421 The third pointer is to the GPR save area. Since the FPR save area
4422 is just below it, we can address FPR slots off this pointer.
4423 We also keep two one-byte offsets, which are to be subtracted from the
4424 constant pointers to yield addresses in the GPR and FPR save areas.
4425 These are downcounted as float or non-float arguments are used,
4426 and when they get to zero, the argument must be obtained from the
4428 If !EABI_FLOAT_VARARGS_P, then no FPR save area exists, and a single
4429 pointer is enough. It's started at the GPR save area, and is
4431 Note that the GPR save area is not constant size, due to optimization
4432 in the prologue. Hence, we can't use a design with two pointers
4433 and two offsets, although we could have designed this with two pointers
4434 and three offsets. */
4437 mips_build_builtin_va_list (void)
4439 if (EABI_FLOAT_VARARGS_P)
4441 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
4444 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
4446 f_ovfl = build_decl (FIELD_DECL, get_identifier ("__overflow_argptr"),
4448 f_gtop = build_decl (FIELD_DECL, get_identifier ("__gpr_top"),
4450 f_ftop = build_decl (FIELD_DECL, get_identifier ("__fpr_top"),
4452 f_goff = build_decl (FIELD_DECL, get_identifier ("__gpr_offset"),
4453 unsigned_char_type_node);
4454 f_foff = build_decl (FIELD_DECL, get_identifier ("__fpr_offset"),
4455 unsigned_char_type_node);
4456 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
4457 warn on every user file. */
4458 index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1);
4459 array = build_array_type (unsigned_char_type_node,
4460 build_index_type (index));
4461 f_res = build_decl (FIELD_DECL, get_identifier ("__reserved"), array);
4463 DECL_FIELD_CONTEXT (f_ovfl) = record;
4464 DECL_FIELD_CONTEXT (f_gtop) = record;
4465 DECL_FIELD_CONTEXT (f_ftop) = record;
4466 DECL_FIELD_CONTEXT (f_goff) = record;
4467 DECL_FIELD_CONTEXT (f_foff) = record;
4468 DECL_FIELD_CONTEXT (f_res) = record;
4470 TYPE_FIELDS (record) = f_ovfl;
4471 TREE_CHAIN (f_ovfl) = f_gtop;
4472 TREE_CHAIN (f_gtop) = f_ftop;
4473 TREE_CHAIN (f_ftop) = f_goff;
4474 TREE_CHAIN (f_goff) = f_foff;
4475 TREE_CHAIN (f_foff) = f_res;
4477 layout_type (record);
4480 else if (TARGET_IRIX && TARGET_IRIX6)
4481 /* On IRIX 6, this type is 'char *'. */
4482 return build_pointer_type (char_type_node);
4484 /* Otherwise, we use 'void *'. */
4485 return ptr_type_node;
4488 /* Implement va_start. */
4491 mips_va_start (tree valist, rtx nextarg)
4493 if (EABI_FLOAT_VARARGS_P)
4495 const CUMULATIVE_ARGS *cum;
4496 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
4497 tree ovfl, gtop, ftop, goff, foff;
4499 int gpr_save_area_size;
4500 int fpr_save_area_size;
4503 cum = ¤t_function_args_info;
4505 = (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
4507 = (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
4509 f_ovfl = TYPE_FIELDS (va_list_type_node);
4510 f_gtop = TREE_CHAIN (f_ovfl);
4511 f_ftop = TREE_CHAIN (f_gtop);
4512 f_goff = TREE_CHAIN (f_ftop);
4513 f_foff = TREE_CHAIN (f_goff);
4515 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
4517 gtop = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
4519 ftop = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
4521 goff = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
4523 foff = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
4526 /* Emit code to initialize OVFL, which points to the next varargs
4527 stack argument. CUM->STACK_WORDS gives the number of stack
4528 words used by named arguments. */
4529 t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
4530 if (cum->stack_words > 0)
4531 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), t,
4532 size_int (cum->stack_words * UNITS_PER_WORD));
4533 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
4534 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4536 /* Emit code to initialize GTOP, the top of the GPR save area. */
4537 t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
4538 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (gtop), gtop, t);
4539 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4541 /* Emit code to initialize FTOP, the top of the FPR save area.
4542 This address is gpr_save_area_bytes below GTOP, rounded
4543 down to the next fp-aligned boundary. */
4544 t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
4545 fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
4546 fpr_offset &= ~(UNITS_PER_FPVALUE - 1);
4548 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ftop), t,
4549 size_int (-fpr_offset));
4550 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ftop), ftop, t);
4551 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4553 /* Emit code to initialize GOFF, the offset from GTOP of the
4554 next GPR argument. */
4555 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (goff), goff,
4556 build_int_cst (NULL_TREE, gpr_save_area_size));
4557 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4559 /* Likewise emit code to initialize FOFF, the offset from FTOP
4560 of the next FPR argument. */
4561 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (foff), foff,
4562 build_int_cst (NULL_TREE, fpr_save_area_size));
4563 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4567 nextarg = plus_constant (nextarg, -cfun->machine->varargs_size);
4568 std_expand_builtin_va_start (valist, nextarg);
4572 /* Implement va_arg. */
4575 mips_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
4577 HOST_WIDE_INT size, rsize;
4581 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
4584 type = build_pointer_type (type);
4586 size = int_size_in_bytes (type);
4587 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
4589 if (mips_abi != ABI_EABI || !EABI_FLOAT_VARARGS_P)
4590 addr = std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4593 /* Not a simple merged stack. */
4595 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
4596 tree ovfl, top, off, align;
4597 HOST_WIDE_INT osize;
4600 f_ovfl = TYPE_FIELDS (va_list_type_node);
4601 f_gtop = TREE_CHAIN (f_ovfl);
4602 f_ftop = TREE_CHAIN (f_gtop);
4603 f_goff = TREE_CHAIN (f_ftop);
4604 f_foff = TREE_CHAIN (f_goff);
4606 /* We maintain separate pointers and offsets for floating-point
4607 and integer arguments, but we need similar code in both cases.
4610 TOP be the top of the register save area;
4611 OFF be the offset from TOP of the next register;
4612 ADDR_RTX be the address of the argument;
4613 RSIZE be the number of bytes used to store the argument
4614 when it's in the register save area;
4615 OSIZE be the number of bytes used to store it when it's
4616 in the stack overflow area; and
4617 PADDING be (BYTES_BIG_ENDIAN ? OSIZE - RSIZE : 0)
4619 The code we want is:
4621 1: off &= -rsize; // round down
4624 4: addr_rtx = top - off;
4629 9: ovfl += ((intptr_t) ovfl + osize - 1) & -osize;
4630 10: addr_rtx = ovfl + PADDING;
4634 [1] and [9] can sometimes be optimized away. */
4636 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
4639 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
4640 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
4642 top = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
4644 off = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
4647 /* When floating-point registers are saved to the stack,
4648 each one will take up UNITS_PER_HWFPVALUE bytes, regardless
4649 of the float's precision. */
4650 rsize = UNITS_PER_HWFPVALUE;
4652 /* Overflow arguments are padded to UNITS_PER_WORD bytes
4653 (= PARM_BOUNDARY bits). This can be different from RSIZE
4656 (1) On 32-bit targets when TYPE is a structure such as:
4658 struct s { float f; };
4660 Such structures are passed in paired FPRs, so RSIZE
4661 will be 8 bytes. However, the structure only takes
4662 up 4 bytes of memory, so OSIZE will only be 4.
4664 (2) In combinations such as -mgp64 -msingle-float
4665 -fshort-double. Doubles passed in registers
4666 will then take up 4 (UNITS_PER_HWFPVALUE) bytes,
4667 but those passed on the stack take up
4668 UNITS_PER_WORD bytes. */
4669 osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
4673 top = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
4675 off = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
4677 if (rsize > UNITS_PER_WORD)
4679 /* [1] Emit code for: off &= -rsize. */
4680 t = build2 (BIT_AND_EXPR, TREE_TYPE (off), off,
4681 build_int_cst (NULL_TREE, -rsize));
4682 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (off), off, t);
4683 gimplify_and_add (t, pre_p);
4688 /* [2] Emit code to branch if off == 0. */
4689 t = build2 (NE_EXPR, boolean_type_node, off,
4690 build_int_cst (TREE_TYPE (off), 0));
4691 addr = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
4693 /* [5] Emit code for: off -= rsize. We do this as a form of
4694 post-increment not available to C. Also widen for the
4695 coming pointer arithmetic. */
4696 t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize));
4697 t = build2 (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
4698 t = fold_convert (sizetype, t);
4699 t = fold_build1 (NEGATE_EXPR, sizetype, t);
4701 /* [4] Emit code for: addr_rtx = top - off. On big endian machines,
4702 the argument has RSIZE - SIZE bytes of leading padding. */
4703 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (top), top, t);
4704 if (BYTES_BIG_ENDIAN && rsize > size)
4706 u = size_int (rsize - size);
4707 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
4709 COND_EXPR_THEN (addr) = t;
4711 if (osize > UNITS_PER_WORD)
4713 /* [9] Emit: ovfl += ((intptr_t) ovfl + osize - 1) & -osize. */
4714 u = size_int (osize - 1);
4715 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), ovfl, u);
4716 t = fold_convert (sizetype, t);
4717 u = size_int (-osize);
4718 t = build2 (BIT_AND_EXPR, sizetype, t, u);
4719 t = fold_convert (TREE_TYPE (ovfl), t);
4720 align = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
4725 /* [10, 11]. Emit code to store ovfl in addr_rtx, then
4726 post-increment ovfl by osize. On big-endian machines,
4727 the argument has OSIZE - SIZE bytes of leading padding. */
4728 u = fold_convert (TREE_TYPE (ovfl),
4729 build_int_cst (NULL_TREE, osize));
4730 t = build2 (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
4731 if (BYTES_BIG_ENDIAN && osize > size)
4733 u = size_int (osize - size);
4734 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
4737 /* String [9] and [10,11] together. */
4739 t = build2 (COMPOUND_EXPR, TREE_TYPE (t), align, t);
4740 COND_EXPR_ELSE (addr) = t;
4742 addr = fold_convert (build_pointer_type (type), addr);
4743 addr = build_va_arg_indirect_ref (addr);
4747 addr = build_va_arg_indirect_ref (addr);
4752 /* Return true if it is possible to use left/right accesses for a
4753 bitfield of WIDTH bits starting BITPOS bits into *OP. When
4754 returning true, update *OP, *LEFT and *RIGHT as follows:
4756 *OP is a BLKmode reference to the whole field.
4758 *LEFT is a QImode reference to the first byte if big endian or
4759 the last byte if little endian. This address can be used in the
4760 left-side instructions (lwl, swl, ldl, sdl).
4762 *RIGHT is a QImode reference to the opposite end of the field and
4763 can be used in the patterning right-side instruction. */
4766 mips_get_unaligned_mem (rtx *op, unsigned int width, int bitpos,
4767 rtx *left, rtx *right)
4771 /* Check that the operand really is a MEM. Not all the extv and
4772 extzv predicates are checked. */
4776 /* Check that the size is valid. */
4777 if (width != 32 && (!TARGET_64BIT || width != 64))
4780 /* We can only access byte-aligned values. Since we are always passed
4781 a reference to the first byte of the field, it is not necessary to
4782 do anything with BITPOS after this check. */
4783 if (bitpos % BITS_PER_UNIT != 0)
4786 /* Reject aligned bitfields: we want to use a normal load or store
4787 instead of a left/right pair. */
4788 if (MEM_ALIGN (*op) >= width)
4791 /* Adjust *OP to refer to the whole field. This also has the effect
4792 of legitimizing *OP's address for BLKmode, possibly simplifying it. */
4793 *op = adjust_address (*op, BLKmode, 0);
4794 set_mem_size (*op, GEN_INT (width / BITS_PER_UNIT));
4796 /* Get references to both ends of the field. We deliberately don't
4797 use the original QImode *OP for FIRST since the new BLKmode one
4798 might have a simpler address. */
4799 first = adjust_address (*op, QImode, 0);
4800 last = adjust_address (*op, QImode, width / BITS_PER_UNIT - 1);
4802 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
4803 be the upper word and RIGHT the lower word. */
4804 if (TARGET_BIG_ENDIAN)
4805 *left = first, *right = last;
4807 *left = last, *right = first;
4813 /* Try to emit the equivalent of (set DEST (zero_extract SRC WIDTH BITPOS)).
4814 Return true on success. We only handle cases where zero_extract is
4815 equivalent to sign_extract. */
4818 mips_expand_unaligned_load (rtx dest, rtx src, unsigned int width, int bitpos)
4820 rtx left, right, temp;
4822 /* If TARGET_64BIT, the destination of a 32-bit load will be a
4823 paradoxical word_mode subreg. This is the only case in which
4824 we allow the destination to be larger than the source. */
4825 if (GET_CODE (dest) == SUBREG
4826 && GET_MODE (dest) == DImode
4827 && SUBREG_BYTE (dest) == 0
4828 && GET_MODE (SUBREG_REG (dest)) == SImode)
4829 dest = SUBREG_REG (dest);
4831 /* After the above adjustment, the destination must be the same
4832 width as the source. */
4833 if (GET_MODE_BITSIZE (GET_MODE (dest)) != width)
4836 if (!mips_get_unaligned_mem (&src, width, bitpos, &left, &right))
4839 temp = gen_reg_rtx (GET_MODE (dest));
4840 if (GET_MODE (dest) == DImode)
4842 emit_insn (gen_mov_ldl (temp, src, left));
4843 emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
4847 emit_insn (gen_mov_lwl (temp, src, left));
4848 emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
4854 /* Try to expand (set (zero_extract DEST WIDTH BITPOS) SRC). Return
4858 mips_expand_unaligned_store (rtx dest, rtx src, unsigned int width, int bitpos)
4861 enum machine_mode mode;
4863 if (!mips_get_unaligned_mem (&dest, width, bitpos, &left, &right))
4866 mode = mode_for_size (width, MODE_INT, 0);
4867 src = gen_lowpart (mode, src);
4871 emit_insn (gen_mov_sdl (dest, src, left));
4872 emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
4876 emit_insn (gen_mov_swl (dest, src, left));
4877 emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
4882 /* Return true if X is a MEM with the same size as MODE. */
4885 mips_mem_fits_mode_p (enum machine_mode mode, rtx x)
4892 size = MEM_SIZE (x);
4893 return size && INTVAL (size) == GET_MODE_SIZE (mode);
4896 /* Return true if (zero_extract OP SIZE POSITION) can be used as the
4897 source of an "ext" instruction or the destination of an "ins"
4898 instruction. OP must be a register operand and the following
4899 conditions must hold:
4901 0 <= POSITION < GET_MODE_BITSIZE (GET_MODE (op))
4902 0 < SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
4903 0 < POSITION + SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
4905 Also reject lengths equal to a word as they are better handled
4906 by the move patterns. */
4909 mips_use_ins_ext_p (rtx op, rtx size, rtx position)
4911 HOST_WIDE_INT len, pos;
4913 if (!ISA_HAS_EXT_INS
4914 || !register_operand (op, VOIDmode)
4915 || GET_MODE_BITSIZE (GET_MODE (op)) > BITS_PER_WORD)
4918 len = INTVAL (size);
4919 pos = INTVAL (position);
4921 if (len <= 0 || len >= GET_MODE_BITSIZE (GET_MODE (op))
4922 || pos < 0 || pos + len > GET_MODE_BITSIZE (GET_MODE (op)))
4928 /* Set up globals to generate code for the ISA or processor
4929 described by INFO. */
4932 mips_set_architecture (const struct mips_cpu_info *info)
4936 mips_arch_info = info;
4937 mips_arch = info->cpu;
4938 mips_isa = info->isa;
4943 /* Likewise for tuning. */
4946 mips_set_tune (const struct mips_cpu_info *info)
4950 mips_tune_info = info;
4951 mips_tune = info->cpu;
4955 /* Implement TARGET_HANDLE_OPTION. */
4958 mips_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
4963 if (strcmp (arg, "32") == 0)
4965 else if (strcmp (arg, "o64") == 0)
4967 else if (strcmp (arg, "n32") == 0)
4969 else if (strcmp (arg, "64") == 0)
4971 else if (strcmp (arg, "eabi") == 0)
4972 mips_abi = ABI_EABI;
4979 return mips_parse_cpu (arg) != 0;
4982 mips_isa_info = mips_parse_cpu (ACONCAT (("mips", arg, NULL)));
4983 return mips_isa_info != 0;
4985 case OPT_mno_flush_func:
4986 mips_cache_flush_func = NULL;
4994 /* Set up the threshold for data to go into the small data area, instead
4995 of the normal data area, and detect any conflicts in the switches. */
4998 override_options (void)
5000 int i, start, regno;
5001 enum machine_mode mode;
5003 #ifdef SUBTARGET_OVERRIDE_OPTIONS
5004 SUBTARGET_OVERRIDE_OPTIONS;
5007 mips_section_threshold = g_switch_set ? g_switch_value : MIPS_DEFAULT_GVALUE;
5009 /* The following code determines the architecture and register size.
5010 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
5011 The GAS and GCC code should be kept in sync as much as possible. */
5013 if (mips_arch_string != 0)
5014 mips_set_architecture (mips_parse_cpu (mips_arch_string));
5016 if (mips_isa_info != 0)
5018 if (mips_arch_info == 0)
5019 mips_set_architecture (mips_isa_info);
5020 else if (mips_arch_info->isa != mips_isa_info->isa)
5021 error ("-%s conflicts with the other architecture options, "
5022 "which specify a %s processor",
5023 mips_isa_info->name,
5024 mips_cpu_info_from_isa (mips_arch_info->isa)->name);
5027 if (mips_arch_info == 0)
5029 #ifdef MIPS_CPU_STRING_DEFAULT
5030 mips_set_architecture (mips_parse_cpu (MIPS_CPU_STRING_DEFAULT));
5032 mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT));
5036 if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
5037 error ("-march=%s is not compatible with the selected ABI",
5038 mips_arch_info->name);
5040 /* Optimize for mips_arch, unless -mtune selects a different processor. */
5041 if (mips_tune_string != 0)
5042 mips_set_tune (mips_parse_cpu (mips_tune_string));
5044 if (mips_tune_info == 0)
5045 mips_set_tune (mips_arch_info);
5047 /* Set cost structure for the processor. */
5049 mips_cost = &mips_rtx_cost_optimize_size;
5051 mips_cost = &mips_rtx_cost_data[mips_tune];
5053 /* If the user hasn't specified a branch cost, use the processor's
5055 if (mips_branch_cost == 0)
5056 mips_branch_cost = mips_cost->branch_cost;
5058 if ((target_flags_explicit & MASK_64BIT) != 0)
5060 /* The user specified the size of the integer registers. Make sure
5061 it agrees with the ABI and ISA. */
5062 if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
5063 error ("-mgp64 used with a 32-bit processor");
5064 else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
5065 error ("-mgp32 used with a 64-bit ABI");
5066 else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
5067 error ("-mgp64 used with a 32-bit ABI");
5071 /* Infer the integer register size from the ABI and processor.
5072 Restrict ourselves to 32-bit registers if that's all the
5073 processor has, or if the ABI cannot handle 64-bit registers. */
5074 if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
5075 target_flags &= ~MASK_64BIT;
5077 target_flags |= MASK_64BIT;
5080 if ((target_flags_explicit & MASK_FLOAT64) != 0)
5082 /* Really, -mfp32 and -mfp64 are ornamental options. There's
5083 only one right answer here. */
5084 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
5085 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
5086 else if (!TARGET_64BIT && TARGET_FLOAT64
5087 && !(ISA_HAS_MXHC1 && mips_abi == ABI_32))
5088 error ("-mgp32 and -mfp64 can only be combined if the target"
5089 " supports the mfhc1 and mthc1 instructions");
5090 else if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
5091 error ("unsupported combination: %s", "-mfp64 -msingle-float");
5095 /* -msingle-float selects 32-bit float registers. Otherwise the
5096 float registers should be the same size as the integer ones. */
5097 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
5098 target_flags |= MASK_FLOAT64;
5100 target_flags &= ~MASK_FLOAT64;
5103 /* End of code shared with GAS. */
5105 if ((target_flags_explicit & MASK_LONG64) == 0)
5107 if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
5108 target_flags |= MASK_LONG64;
5110 target_flags &= ~MASK_LONG64;
5113 if (MIPS_MARCH_CONTROLS_SOFT_FLOAT
5114 && (target_flags_explicit & MASK_SOFT_FLOAT) == 0)
5116 /* For some configurations, it is useful to have -march control
5117 the default setting of MASK_SOFT_FLOAT. */
5118 switch ((int) mips_arch)
5120 case PROCESSOR_R4100:
5121 case PROCESSOR_R4111:
5122 case PROCESSOR_R4120:
5123 case PROCESSOR_R4130:
5124 target_flags |= MASK_SOFT_FLOAT;
5128 target_flags &= ~MASK_SOFT_FLOAT;
5134 flag_pcc_struct_return = 0;
5136 if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
5138 /* If neither -mbranch-likely nor -mno-branch-likely was given
5139 on the command line, set MASK_BRANCHLIKELY based on the target
5142 By default, we enable use of Branch Likely instructions on
5143 all architectures which support them with the following
5144 exceptions: when creating MIPS32 or MIPS64 code, and when
5145 tuning for architectures where their use tends to hurt
5148 The MIPS32 and MIPS64 architecture specifications say "Software
5149 is strongly encouraged to avoid use of Branch Likely
5150 instructions, as they will be removed from a future revision
5151 of the [MIPS32 and MIPS64] architecture." Therefore, we do not
5152 issue those instructions unless instructed to do so by
5154 if (ISA_HAS_BRANCHLIKELY
5155 && !(ISA_MIPS32 || ISA_MIPS32R2 || ISA_MIPS64)
5156 && !(TUNE_MIPS5500 || TUNE_SB1))
5157 target_flags |= MASK_BRANCHLIKELY;
5159 target_flags &= ~MASK_BRANCHLIKELY;
5161 if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
5162 warning (0, "generation of Branch Likely instructions enabled, but not supported by architecture");
5164 /* The effect of -mabicalls isn't defined for the EABI. */
5165 if (mips_abi == ABI_EABI && TARGET_ABICALLS)
5167 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
5168 target_flags &= ~MASK_ABICALLS;
5171 if (TARGET_ABICALLS)
5173 /* We need to set flag_pic for executables as well as DSOs
5174 because we may reference symbols that are not defined in
5175 the final executable. (MIPS does not use things like
5176 copy relocs, for example.)
5178 Also, there is a body of code that uses __PIC__ to distinguish
5179 between -mabicalls and -mno-abicalls code. */
5181 if (mips_section_threshold > 0)
5182 warning (0, "%<-G%> is incompatible with %<-mabicalls%>");
5185 if (TARGET_VXWORKS_RTP && mips_section_threshold > 0)
5186 warning (0, "-G and -mrtp are incompatible");
5188 /* mips_split_addresses is a half-way house between explicit
5189 relocations and the traditional assembler macros. It can
5190 split absolute 32-bit symbolic constants into a high/lo_sum
5191 pair but uses macros for other sorts of access.
5193 Like explicit relocation support for REL targets, it relies
5194 on GNU extensions in the assembler and the linker.
5196 Although this code should work for -O0, it has traditionally
5197 been treated as an optimization. */
5198 if (!TARGET_MIPS16 && TARGET_SPLIT_ADDRESSES
5199 && optimize && !flag_pic
5200 && !ABI_HAS_64BIT_SYMBOLS)
5201 mips_split_addresses = 1;
5203 mips_split_addresses = 0;
5205 /* -mvr4130-align is a "speed over size" optimization: it usually produces
5206 faster code, but at the expense of more nops. Enable it at -O3 and
5208 if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
5209 target_flags |= MASK_VR4130_ALIGN;
5211 /* When compiling for the mips16, we cannot use floating point. We
5212 record the original hard float value in mips16_hard_float. */
5215 if (TARGET_SOFT_FLOAT)
5216 mips16_hard_float = 0;
5218 mips16_hard_float = 1;
5219 target_flags |= MASK_SOFT_FLOAT;
5221 /* Don't run the scheduler before reload, since it tends to
5222 increase register pressure. */
5223 flag_schedule_insns = 0;
5225 /* Don't do hot/cold partitioning. The constant layout code expects
5226 the whole function to be in a single section. */
5227 flag_reorder_blocks_and_partition = 0;
5229 /* Silently disable -mexplicit-relocs since it doesn't apply
5230 to mips16 code. Even so, it would overly pedantic to warn
5231 about "-mips16 -mexplicit-relocs", especially given that
5232 we use a %gprel() operator. */
5233 target_flags &= ~MASK_EXPLICIT_RELOCS;
5236 /* When using explicit relocs, we call dbr_schedule from within
5238 if (TARGET_EXPLICIT_RELOCS)
5240 mips_flag_delayed_branch = flag_delayed_branch;
5241 flag_delayed_branch = 0;
5244 #ifdef MIPS_TFMODE_FORMAT
5245 REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT;
5248 /* Make sure that the user didn't turn off paired single support when
5249 MIPS-3D support is requested. */
5250 if (TARGET_MIPS3D && (target_flags_explicit & MASK_PAIRED_SINGLE_FLOAT)
5251 && !TARGET_PAIRED_SINGLE_FLOAT)
5252 error ("-mips3d requires -mpaired-single");
5254 /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
5256 target_flags |= MASK_PAIRED_SINGLE_FLOAT;
5258 /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
5259 and TARGET_HARD_FLOAT are both true. */
5260 if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT))
5261 error ("-mips3d/-mpaired-single must be used with -mfp64 -mhard-float");
5263 /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
5265 if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_MIPS64)
5266 error ("-mips3d/-mpaired-single must be used with -mips64");
5268 /* If TARGET_DSPR2, enable MASK_DSP. */
5270 target_flags |= MASK_DSP;
5272 if (TARGET_MIPS16 && TARGET_DSP)
5273 error ("-mips16 and -mdsp cannot be used together");
5275 mips_print_operand_punct['?'] = 1;
5276 mips_print_operand_punct['#'] = 1;
5277 mips_print_operand_punct['/'] = 1;
5278 mips_print_operand_punct['&'] = 1;
5279 mips_print_operand_punct['!'] = 1;
5280 mips_print_operand_punct['*'] = 1;
5281 mips_print_operand_punct['@'] = 1;
5282 mips_print_operand_punct['.'] = 1;
5283 mips_print_operand_punct['('] = 1;
5284 mips_print_operand_punct[')'] = 1;
5285 mips_print_operand_punct['['] = 1;
5286 mips_print_operand_punct[']'] = 1;
5287 mips_print_operand_punct['<'] = 1;
5288 mips_print_operand_punct['>'] = 1;
5289 mips_print_operand_punct['{'] = 1;
5290 mips_print_operand_punct['}'] = 1;
5291 mips_print_operand_punct['^'] = 1;
5292 mips_print_operand_punct['$'] = 1;
5293 mips_print_operand_punct['+'] = 1;
5294 mips_print_operand_punct['~'] = 1;
5296 /* Set up array to map GCC register number to debug register number.
5297 Ignore the special purpose register numbers. */
5299 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5301 mips_dbx_regno[i] = INVALID_REGNUM;
5302 if (GP_REG_P (i) || FP_REG_P (i) || ALL_COP_REG_P (i))
5303 mips_dwarf_regno[i] = i;
5305 mips_dwarf_regno[i] = INVALID_REGNUM;
5308 start = GP_DBX_FIRST - GP_REG_FIRST;
5309 for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
5310 mips_dbx_regno[i] = i + start;
5312 start = FP_DBX_FIRST - FP_REG_FIRST;
5313 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
5314 mips_dbx_regno[i] = i + start;
5316 /* HI and LO debug registers use big-endian ordering. */
5317 mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
5318 mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
5319 mips_dwarf_regno[HI_REGNUM] = MD_REG_FIRST + 0;
5320 mips_dwarf_regno[LO_REGNUM] = MD_REG_FIRST + 1;
5321 for (i = DSP_ACC_REG_FIRST; i <= DSP_ACC_REG_LAST; i += 2)
5323 mips_dwarf_regno[i + TARGET_LITTLE_ENDIAN] = i;
5324 mips_dwarf_regno[i + TARGET_BIG_ENDIAN] = i + 1;
5327 /* Set up array giving whether a given register can hold a given mode. */
5329 for (mode = VOIDmode;
5330 mode != MAX_MACHINE_MODE;
5331 mode = (enum machine_mode) ((int)mode + 1))
5333 register int size = GET_MODE_SIZE (mode);
5334 register enum mode_class class = GET_MODE_CLASS (mode);
5336 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5340 if (mode == CCV2mode)
5343 && (regno - ST_REG_FIRST) % 2 == 0);
5345 else if (mode == CCV4mode)
5348 && (regno - ST_REG_FIRST) % 4 == 0);
5350 else if (mode == CCmode)
5353 temp = (regno == FPSW_REGNUM);
5355 temp = (ST_REG_P (regno) || GP_REG_P (regno)
5356 || FP_REG_P (regno));
5359 else if (GP_REG_P (regno))
5360 temp = ((regno & 1) == 0 || size <= UNITS_PER_WORD);
5362 else if (FP_REG_P (regno))
5363 temp = ((((regno % MAX_FPRS_PER_FMT) == 0)
5364 || (MIN_FPRS_PER_FMT == 1
5365 && size <= UNITS_PER_FPREG))
5366 && (((class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT
5367 || class == MODE_VECTOR_FLOAT)
5368 && size <= UNITS_PER_FPVALUE)
5369 /* Allow integer modes that fit into a single
5370 register. We need to put integers into FPRs
5371 when using instructions like cvt and trunc.
5372 We can't allow sizes smaller than a word,
5373 the FPU has no appropriate load/store
5374 instructions for those. */
5375 || (class == MODE_INT
5376 && size >= MIN_UNITS_PER_WORD
5377 && size <= UNITS_PER_FPREG)
5378 /* Allow TFmode for CCmode reloads. */
5379 || (ISA_HAS_8CC && mode == TFmode)));
5381 else if (ACC_REG_P (regno))
5382 temp = (INTEGRAL_MODE_P (mode)
5383 && size <= UNITS_PER_WORD * 2
5384 && (size <= UNITS_PER_WORD
5385 || regno == MD_REG_FIRST
5386 || (DSP_ACC_REG_P (regno)
5387 && ((regno - DSP_ACC_REG_FIRST) & 1) == 0)));
5389 else if (ALL_COP_REG_P (regno))
5390 temp = (class == MODE_INT && size <= UNITS_PER_WORD);
5394 mips_hard_regno_mode_ok[(int)mode][regno] = temp;
5398 /* Save GPR registers in word_mode sized hunks. word_mode hasn't been
5399 initialized yet, so we can't use that here. */
5400 gpr_mode = TARGET_64BIT ? DImode : SImode;
5402 /* Provide default values for align_* for 64-bit targets. */
5403 if (TARGET_64BIT && !TARGET_MIPS16)
5405 if (align_loops == 0)
5407 if (align_jumps == 0)
5409 if (align_functions == 0)
5410 align_functions = 8;
5413 /* Function to allocate machine-dependent function status. */
5414 init_machine_status = &mips_init_machine_status;
5416 if (ABI_HAS_64BIT_SYMBOLS)
5418 if (TARGET_EXPLICIT_RELOCS)
5420 mips_split_p[SYMBOL_64_HIGH] = true;
5421 mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
5422 mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
5424 mips_split_p[SYMBOL_64_MID] = true;
5425 mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
5426 mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
5428 mips_split_p[SYMBOL_64_LOW] = true;
5429 mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
5430 mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
5432 mips_split_p[SYMBOL_GENERAL] = true;
5433 mips_lo_relocs[SYMBOL_GENERAL] = "%lo(";
5438 if (TARGET_EXPLICIT_RELOCS || mips_split_addresses)
5440 mips_split_p[SYMBOL_GENERAL] = true;
5441 mips_hi_relocs[SYMBOL_GENERAL] = "%hi(";
5442 mips_lo_relocs[SYMBOL_GENERAL] = "%lo(";
5448 /* The high part is provided by a pseudo copy of $gp. */
5449 mips_split_p[SYMBOL_SMALL_DATA] = true;
5450 mips_lo_relocs[SYMBOL_SMALL_DATA] = "%gprel(";
5453 if (TARGET_EXPLICIT_RELOCS)
5455 /* Small data constants are kept whole until after reload,
5456 then lowered by mips_rewrite_small_data. */
5457 mips_lo_relocs[SYMBOL_SMALL_DATA] = "%gp_rel(";
5459 mips_split_p[SYMBOL_GOT_PAGE_OFST] = true;
5462 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
5463 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%got_ofst(";
5467 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
5468 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%lo(";
5473 /* The HIGH and LO_SUM are matched by special .md patterns. */
5474 mips_split_p[SYMBOL_GOT_DISP] = true;
5476 mips_split_p[SYMBOL_GOTOFF_DISP] = true;
5477 mips_hi_relocs[SYMBOL_GOTOFF_DISP] = "%got_hi(";
5478 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_lo(";
5480 mips_split_p[SYMBOL_GOTOFF_CALL] = true;
5481 mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
5482 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
5487 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_disp(";
5489 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got(";
5490 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
5496 mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
5497 mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
5498 mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
5501 /* Thread-local relocation operators. */
5502 mips_lo_relocs[SYMBOL_TLSGD] = "%tlsgd(";
5503 mips_lo_relocs[SYMBOL_TLSLDM] = "%tlsldm(";
5504 mips_split_p[SYMBOL_DTPREL] = 1;
5505 mips_hi_relocs[SYMBOL_DTPREL] = "%dtprel_hi(";
5506 mips_lo_relocs[SYMBOL_DTPREL] = "%dtprel_lo(";
5507 mips_lo_relocs[SYMBOL_GOTTPREL] = "%gottprel(";
5508 mips_split_p[SYMBOL_TPREL] = 1;
5509 mips_hi_relocs[SYMBOL_TPREL] = "%tprel_hi(";
5510 mips_lo_relocs[SYMBOL_TPREL] = "%tprel_lo(";
5512 mips_lo_relocs[SYMBOL_HALF] = "%half(";
5514 /* We don't have a thread pointer access instruction on MIPS16, or
5515 appropriate TLS relocations. */
5517 targetm.have_tls = false;
5519 /* Default to working around R4000 errata only if the processor
5520 was selected explicitly. */
5521 if ((target_flags_explicit & MASK_FIX_R4000) == 0
5522 && mips_matching_cpu_name_p (mips_arch_info->name, "r4000"))
5523 target_flags |= MASK_FIX_R4000;
5525 /* Default to working around R4400 errata only if the processor
5526 was selected explicitly. */
5527 if ((target_flags_explicit & MASK_FIX_R4400) == 0
5528 && mips_matching_cpu_name_p (mips_arch_info->name, "r4400"))
5529 target_flags |= MASK_FIX_R4400;
5532 /* Swap the register information for registers I and I + 1, which
5533 currently have the wrong endianness. Note that the registers'
5534 fixedness and call-clobberedness might have been set on the
5538 mips_swap_registers (unsigned int i)
5543 #define SWAP_INT(X, Y) (tmpi = (X), (X) = (Y), (Y) = tmpi)
5544 #define SWAP_STRING(X, Y) (tmps = (X), (X) = (Y), (Y) = tmps)
5546 SWAP_INT (fixed_regs[i], fixed_regs[i + 1]);
5547 SWAP_INT (call_used_regs[i], call_used_regs[i + 1]);
5548 SWAP_INT (call_really_used_regs[i], call_really_used_regs[i + 1]);
5549 SWAP_STRING (reg_names[i], reg_names[i + 1]);
5555 /* Implement CONDITIONAL_REGISTER_USAGE. */
5558 mips_conditional_register_usage (void)
5564 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
5565 fixed_regs[regno] = call_used_regs[regno] = 1;
5567 if (!TARGET_HARD_FLOAT)
5571 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
5572 fixed_regs[regno] = call_used_regs[regno] = 1;
5573 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
5574 fixed_regs[regno] = call_used_regs[regno] = 1;
5576 else if (! ISA_HAS_8CC)
5580 /* We only have a single condition code register. We
5581 implement this by hiding all the condition code registers,
5582 and generating RTL that refers directly to ST_REG_FIRST. */
5583 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
5584 fixed_regs[regno] = call_used_regs[regno] = 1;
5586 /* In mips16 mode, we permit the $t temporary registers to be used
5587 for reload. We prohibit the unused $s registers, since they
5588 are caller saved, and saving them via a mips16 register would
5589 probably waste more time than just reloading the value. */
5592 fixed_regs[18] = call_used_regs[18] = 1;
5593 fixed_regs[19] = call_used_regs[19] = 1;
5594 fixed_regs[20] = call_used_regs[20] = 1;
5595 fixed_regs[21] = call_used_regs[21] = 1;
5596 fixed_regs[22] = call_used_regs[22] = 1;
5597 fixed_regs[23] = call_used_regs[23] = 1;
5598 fixed_regs[26] = call_used_regs[26] = 1;
5599 fixed_regs[27] = call_used_regs[27] = 1;
5600 fixed_regs[30] = call_used_regs[30] = 1;
5602 /* fp20-23 are now caller saved. */
5603 if (mips_abi == ABI_64)
5606 for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
5607 call_really_used_regs[regno] = call_used_regs[regno] = 1;
5609 /* Odd registers from fp21 to fp31 are now caller saved. */
5610 if (mips_abi == ABI_N32)
5613 for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
5614 call_really_used_regs[regno] = call_used_regs[regno] = 1;
5616 /* Make sure that double-register accumulator values are correctly
5617 ordered for the current endianness. */
5618 if (TARGET_LITTLE_ENDIAN)
5621 mips_swap_registers (MD_REG_FIRST);
5622 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno += 2)
5623 mips_swap_registers (regno);
5627 /* Allocate a chunk of memory for per-function machine-dependent data. */
5628 static struct machine_function *
5629 mips_init_machine_status (void)
5631 return ((struct machine_function *)
5632 ggc_alloc_cleared (sizeof (struct machine_function)));
5635 /* On the mips16, we want to allocate $24 (T_REG) before other
5636 registers for instructions for which it is possible. This helps
5637 avoid shuffling registers around in order to set up for an xor,
5638 encouraging the compiler to use a cmp instead. */
5641 mips_order_regs_for_local_alloc (void)
5645 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5646 reg_alloc_order[i] = i;
5650 /* It really doesn't matter where we put register 0, since it is
5651 a fixed register anyhow. */
5652 reg_alloc_order[0] = 24;
5653 reg_alloc_order[24] = 0;
5658 /* The MIPS debug format wants all automatic variables and arguments
5659 to be in terms of the virtual frame pointer (stack pointer before
5660 any adjustment in the function), while the MIPS 3.0 linker wants
5661 the frame pointer to be the stack pointer after the initial
5662 adjustment. So, we do the adjustment here. The arg pointer (which
5663 is eliminated) points to the virtual frame pointer, while the frame
5664 pointer (which may be eliminated) points to the stack pointer after
5665 the initial adjustments. */
5668 mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
5670 rtx offset2 = const0_rtx;
5671 rtx reg = eliminate_constant_term (addr, &offset2);
5674 offset = INTVAL (offset2);
5676 if (reg == stack_pointer_rtx || reg == frame_pointer_rtx
5677 || reg == hard_frame_pointer_rtx)
5679 HOST_WIDE_INT frame_size = (!cfun->machine->frame.initialized)
5680 ? compute_frame_size (get_frame_size ())
5681 : cfun->machine->frame.total_size;
5683 /* MIPS16 frame is smaller */
5684 if (frame_pointer_needed && TARGET_MIPS16)
5685 frame_size -= cfun->machine->frame.args_size;
5687 offset = offset - frame_size;
5690 /* sdbout_parms does not want this to crash for unrecognized cases. */
5692 else if (reg != arg_pointer_rtx)
5693 fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
5700 /* Implement the PRINT_OPERAND macro. The MIPS-specific operand codes are:
5702 'X' OP is CONST_INT, prints 32 bits in hexadecimal format = "0x%08x",
5703 'x' OP is CONST_INT, prints 16 bits in hexadecimal format = "0x%04x",
5704 'h' OP is HIGH, prints %hi(X),
5705 'd' output integer constant in decimal,
5706 'z' if the operand is 0, use $0 instead of normal operand.
5707 'D' print second part of double-word register or memory operand.
5708 'L' print low-order register of double-word register operand.
5709 'M' print high-order register of double-word register operand.
5710 'C' print part of opcode for a branch condition.
5711 'F' print part of opcode for a floating-point branch condition.
5712 'N' print part of opcode for a branch condition, inverted.
5713 'W' print part of opcode for a floating-point branch condition, inverted.
5714 'T' print 'f' for (eq:CC ...), 't' for (ne:CC ...),
5715 'z' for (eq:?I ...), 'n' for (ne:?I ...).
5716 't' like 'T', but with the EQ/NE cases reversed
5717 'Y' for a CONST_INT X, print mips_fp_conditions[X]
5718 'Z' print the operand and a comma for ISA_HAS_8CC, otherwise print nothing
5719 'R' print the reloc associated with LO_SUM
5720 'q' print DSP accumulator registers
5722 The punctuation characters are:
5724 '(' Turn on .set noreorder
5725 ')' Turn on .set reorder
5726 '[' Turn on .set noat
5728 '<' Turn on .set nomacro
5729 '>' Turn on .set macro
5730 '{' Turn on .set volatile (not GAS)
5731 '}' Turn on .set novolatile (not GAS)
5732 '&' Turn on .set noreorder if filling delay slots
5733 '*' Turn on both .set noreorder and .set nomacro if filling delay slots
5734 '!' Turn on .set nomacro if filling delay slots
5735 '#' Print nop if in a .set noreorder section.
5736 '/' Like '#', but does nothing within a delayed branch sequence
5737 '?' Print 'l' if we are to use a branch likely instead of normal branch.
5738 '@' Print the name of the assembler temporary register (at or $1).
5739 '.' Print the name of the register with a hard-wired zero (zero or $0).
5740 '^' Print the name of the pic call-through register (t9 or $25).
5741 '$' Print the name of the stack pointer register (sp or $29).
5742 '+' Print the name of the gp register (usually gp or $28).
5743 '~' Output a branch alignment to LABEL_ALIGN(NULL). */
5746 print_operand (FILE *file, rtx op, int letter)
5748 register enum rtx_code code;
5750 if (PRINT_OPERAND_PUNCT_VALID_P (letter))
5755 if (mips_branch_likely)
5760 fputs (reg_names [GP_REG_FIRST + 1], file);
5764 fputs (reg_names [PIC_FUNCTION_ADDR_REGNUM], file);
5768 fputs (reg_names [GP_REG_FIRST + 0], file);
5772 fputs (reg_names[STACK_POINTER_REGNUM], file);
5776 fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
5780 if (final_sequence != 0 && set_noreorder++ == 0)
5781 fputs (".set\tnoreorder\n\t", file);
5785 if (final_sequence != 0)
5787 if (set_noreorder++ == 0)
5788 fputs (".set\tnoreorder\n\t", file);
5790 if (set_nomacro++ == 0)
5791 fputs (".set\tnomacro\n\t", file);
5796 if (final_sequence != 0 && set_nomacro++ == 0)
5797 fputs ("\n\t.set\tnomacro", file);
5801 if (set_noreorder != 0)
5802 fputs ("\n\tnop", file);
5806 /* Print an extra newline so that the delayed insn is separated
5807 from the following ones. This looks neater and is consistent
5808 with non-nop delayed sequences. */
5809 if (set_noreorder != 0 && final_sequence == 0)
5810 fputs ("\n\tnop\n", file);
5814 if (set_noreorder++ == 0)
5815 fputs (".set\tnoreorder\n\t", file);
5819 if (set_noreorder == 0)
5820 error ("internal error: %%) found without a %%( in assembler pattern");
5822 else if (--set_noreorder == 0)
5823 fputs ("\n\t.set\treorder", file);
5828 if (set_noat++ == 0)
5829 fputs (".set\tnoat\n\t", file);
5834 error ("internal error: %%] found without a %%[ in assembler pattern");
5835 else if (--set_noat == 0)
5836 fputs ("\n\t.set\tat", file);
5841 if (set_nomacro++ == 0)
5842 fputs (".set\tnomacro\n\t", file);
5846 if (set_nomacro == 0)
5847 error ("internal error: %%> found without a %%< in assembler pattern");
5848 else if (--set_nomacro == 0)
5849 fputs ("\n\t.set\tmacro", file);
5854 if (set_volatile++ == 0)
5855 fputs ("#.set\tvolatile\n\t", file);
5859 if (set_volatile == 0)
5860 error ("internal error: %%} found without a %%{ in assembler pattern");
5861 else if (--set_volatile == 0)
5862 fputs ("\n\t#.set\tnovolatile", file);
5868 if (align_labels_log > 0)
5869 ASM_OUTPUT_ALIGN (file, align_labels_log);
5874 error ("PRINT_OPERAND: unknown punctuation '%c'", letter);
5883 error ("PRINT_OPERAND null pointer");
5887 code = GET_CODE (op);
5892 case EQ: fputs ("eq", file); break;
5893 case NE: fputs ("ne", file); break;
5894 case GT: fputs ("gt", file); break;
5895 case GE: fputs ("ge", file); break;
5896 case LT: fputs ("lt", file); break;
5897 case LE: fputs ("le", file); break;
5898 case GTU: fputs ("gtu", file); break;
5899 case GEU: fputs ("geu", file); break;
5900 case LTU: fputs ("ltu", file); break;
5901 case LEU: fputs ("leu", file); break;
5903 fatal_insn ("PRINT_OPERAND, invalid insn for %%C", op);
5906 else if (letter == 'N')
5909 case EQ: fputs ("ne", file); break;
5910 case NE: fputs ("eq", file); break;
5911 case GT: fputs ("le", file); break;
5912 case GE: fputs ("lt", file); break;
5913 case LT: fputs ("ge", file); break;
5914 case LE: fputs ("gt", file); break;
5915 case GTU: fputs ("leu", file); break;
5916 case GEU: fputs ("ltu", file); break;
5917 case LTU: fputs ("geu", file); break;
5918 case LEU: fputs ("gtu", file); break;
5920 fatal_insn ("PRINT_OPERAND, invalid insn for %%N", op);
5923 else if (letter == 'F')
5926 case EQ: fputs ("c1f", file); break;
5927 case NE: fputs ("c1t", file); break;
5929 fatal_insn ("PRINT_OPERAND, invalid insn for %%F", op);
5932 else if (letter == 'W')
5935 case EQ: fputs ("c1t", file); break;
5936 case NE: fputs ("c1f", file); break;
5938 fatal_insn ("PRINT_OPERAND, invalid insn for %%W", op);
5941 else if (letter == 'h')
5943 if (GET_CODE (op) == HIGH)
5946 print_operand_reloc (file, op, mips_hi_relocs);
5949 else if (letter == 'R')
5950 print_operand_reloc (file, op, mips_lo_relocs);
5952 else if (letter == 'Y')
5954 if (GET_CODE (op) == CONST_INT
5955 && ((unsigned HOST_WIDE_INT) INTVAL (op)
5956 < ARRAY_SIZE (mips_fp_conditions)))
5957 fputs (mips_fp_conditions[INTVAL (op)], file);
5959 output_operand_lossage ("invalid %%Y value");
5962 else if (letter == 'Z')
5966 print_operand (file, op, 0);
5971 else if (letter == 'q')
5976 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op);
5978 regnum = REGNO (op);
5979 if (MD_REG_P (regnum))
5980 fprintf (file, "$ac0");
5981 else if (DSP_ACC_REG_P (regnum))
5982 fprintf (file, "$ac%c", reg_names[regnum][3]);
5984 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op);
5987 else if (code == REG || code == SUBREG)
5989 register int regnum;
5992 regnum = REGNO (op);
5994 regnum = true_regnum (op);
5996 if ((letter == 'M' && ! WORDS_BIG_ENDIAN)
5997 || (letter == 'L' && WORDS_BIG_ENDIAN)
6001 fprintf (file, "%s", reg_names[regnum]);
6004 else if (code == MEM)
6007 output_address (plus_constant (XEXP (op, 0), 4));
6009 output_address (XEXP (op, 0));
6012 else if (letter == 'x' && GET_CODE (op) == CONST_INT)
6013 fprintf (file, HOST_WIDE_INT_PRINT_HEX, 0xffff & INTVAL(op));
6015 else if (letter == 'X' && GET_CODE(op) == CONST_INT)
6016 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
6018 else if (letter == 'd' && GET_CODE(op) == CONST_INT)
6019 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (INTVAL(op)));
6021 else if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
6022 fputs (reg_names[GP_REG_FIRST], file);
6024 else if (letter == 'd' || letter == 'x' || letter == 'X')
6025 output_operand_lossage ("invalid use of %%d, %%x, or %%X");
6027 else if (letter == 'T' || letter == 't')
6029 int truth = (code == NE) == (letter == 'T');
6030 fputc ("zfnt"[truth * 2 + (GET_MODE (op) == CCmode)], file);
6033 else if (CONST_GP_P (op))
6034 fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
6037 output_addr_const (file, op);
6041 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM.
6042 RELOCS is the array of relocations to use. */
6045 print_operand_reloc (FILE *file, rtx op, const char **relocs)
6047 enum mips_symbol_type symbol_type;
6051 if (!mips_symbolic_constant_p (op, &symbol_type) || relocs[symbol_type] == 0)
6052 fatal_insn ("PRINT_OPERAND, invalid operand for relocation", op);
6054 /* If OP uses an UNSPEC address, we want to print the inner symbol. */
6055 split_const (op, &base, &offset);
6056 if (UNSPEC_ADDRESS_P (base))
6057 op = plus_constant (UNSPEC_ADDRESS (base), INTVAL (offset));
6059 fputs (relocs[symbol_type], file);
6060 output_addr_const (file, op);
6061 for (p = relocs[symbol_type]; *p != 0; p++)
6066 /* Output address operand X to FILE. */
6069 print_operand_address (FILE *file, rtx x)
6071 struct mips_address_info addr;
6073 if (mips_classify_address (&addr, x, word_mode, true))
6077 print_operand (file, addr.offset, 0);
6078 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
6081 case ADDRESS_LO_SUM:
6082 print_operand (file, addr.offset, 'R');
6083 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
6086 case ADDRESS_CONST_INT:
6087 output_addr_const (file, x);
6088 fprintf (file, "(%s)", reg_names[0]);
6091 case ADDRESS_SYMBOLIC:
6092 output_addr_const (file, x);
6098 /* When using assembler macros, keep track of all of small-data externs
6099 so that mips_file_end can emit the appropriate declarations for them.
6101 In most cases it would be safe (though pointless) to emit .externs
6102 for other symbols too. One exception is when an object is within
6103 the -G limit but declared by the user to be in a section other
6104 than .sbss or .sdata. */
6107 mips_output_external (FILE *file, tree decl, const char *name)
6109 default_elf_asm_output_external (file, decl, name);
6111 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
6112 set in order to avoid putting out names that are never really
6114 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
6116 if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
6118 fputs ("\t.extern\t", file);
6119 assemble_name (file, name);
6120 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC "\n",
6121 int_size_in_bytes (TREE_TYPE (decl)));
6123 else if (TARGET_IRIX
6124 && mips_abi == ABI_32
6125 && TREE_CODE (decl) == FUNCTION_DECL)
6127 /* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
6128 `.global name .text' directive for every used but
6129 undefined function. If we don't, the linker may perform
6130 an optimization (skipping over the insns that set $gp)
6131 when it is unsafe. */
6132 fputs ("\t.globl ", file);
6133 assemble_name (file, name);
6134 fputs (" .text\n", file);
6139 /* Emit a new filename to a stream. If we are smuggling stabs, try to
6140 put out a MIPS ECOFF file and a stab. */
6143 mips_output_filename (FILE *stream, const char *name)
6146 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
6148 if (write_symbols == DWARF2_DEBUG)
6150 else if (mips_output_filename_first_time)
6152 mips_output_filename_first_time = 0;
6153 num_source_filenames += 1;
6154 current_function_file = name;
6155 fprintf (stream, "\t.file\t%d ", num_source_filenames);
6156 output_quoted_string (stream, name);
6157 putc ('\n', stream);
6160 /* If we are emitting stabs, let dbxout.c handle this (except for
6161 the mips_output_filename_first_time case). */
6162 else if (write_symbols == DBX_DEBUG)
6165 else if (name != current_function_file
6166 && strcmp (name, current_function_file) != 0)
6168 num_source_filenames += 1;
6169 current_function_file = name;
6170 fprintf (stream, "\t.file\t%d ", num_source_filenames);
6171 output_quoted_string (stream, name);
6172 putc ('\n', stream);
6176 /* Output an ASCII string, in a space-saving way. PREFIX is the string
6177 that should be written before the opening quote, such as "\t.ascii\t"
6178 for real string data or "\t# " for a comment. */
6181 mips_output_ascii (FILE *stream, const char *string_param, size_t len,
6186 register const unsigned char *string =
6187 (const unsigned char *)string_param;
6189 fprintf (stream, "%s\"", prefix);
6190 for (i = 0; i < len; i++)
6192 register int c = string[i];
6196 if (c == '\\' || c == '\"')
6198 putc ('\\', stream);
6206 fprintf (stream, "\\%03o", c);
6210 if (cur_pos > 72 && i+1 < len)
6213 fprintf (stream, "\"\n%s\"", prefix);
6216 fprintf (stream, "\"\n");
6219 /* Implement TARGET_ASM_FILE_START. */
6222 mips_file_start (void)
6224 default_file_start ();
6228 /* Generate a special section to describe the ABI switches used to
6229 produce the resultant binary. This used to be done by the assembler
6230 setting bits in the ELF header's flags field, but we have run out of
6231 bits. GDB needs this information in order to be able to correctly
6232 debug these binaries. See the function mips_gdbarch_init() in
6233 gdb/mips-tdep.c. This is unnecessary for the IRIX 5/6 ABIs and
6234 causes unnecessary IRIX 6 ld warnings. */
6235 const char * abi_string = NULL;
6239 case ABI_32: abi_string = "abi32"; break;
6240 case ABI_N32: abi_string = "abiN32"; break;
6241 case ABI_64: abi_string = "abi64"; break;
6242 case ABI_O64: abi_string = "abiO64"; break;
6243 case ABI_EABI: abi_string = TARGET_64BIT ? "eabi64" : "eabi32"; break;
6247 /* Note - we use fprintf directly rather than calling switch_to_section
6248 because in this way we can avoid creating an allocated section. We
6249 do not want this section to take up any space in the running
6251 fprintf (asm_out_file, "\t.section .mdebug.%s\n", abi_string);
6253 /* There is no ELF header flag to distinguish long32 forms of the
6254 EABI from long64 forms. Emit a special section to help tools
6255 such as GDB. Do the same for o64, which is sometimes used with
6257 if (mips_abi == ABI_EABI || mips_abi == ABI_O64)
6258 fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n",
6259 TARGET_LONG64 ? 64 : 32);
6261 /* Restore the default section. */
6262 fprintf (asm_out_file, "\t.previous\n");
6264 #ifdef HAVE_AS_GNU_ATTRIBUTE
6265 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
6266 TARGET_HARD_FLOAT_ABI ? (TARGET_DOUBLE_FLOAT ? 1 : 2) : 3);
6270 /* Generate the pseudo ops that System V.4 wants. */
6271 if (TARGET_ABICALLS)
6272 fprintf (asm_out_file, "\t.abicalls\n");
6275 fprintf (asm_out_file, "\t.set\tmips16\n");
6277 if (flag_verbose_asm)
6278 fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
6280 mips_section_threshold, mips_arch_info->name, mips_isa);
6283 #ifdef BSS_SECTION_ASM_OP
6284 /* Implement ASM_OUTPUT_ALIGNED_BSS. This differs from the default only
6285 in the use of sbss. */
6288 mips_output_aligned_bss (FILE *stream, tree decl, const char *name,
6289 unsigned HOST_WIDE_INT size, int align)
6291 extern tree last_assemble_variable_decl;
6293 if (mips_in_small_data_p (decl))
6294 switch_to_section (get_named_section (NULL, ".sbss", 0));
6296 switch_to_section (bss_section);
6297 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
6298 last_assemble_variable_decl = decl;
6299 ASM_DECLARE_OBJECT_NAME (stream, name, decl);
6300 ASM_OUTPUT_SKIP (stream, size != 0 ? size : 1);
6304 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
6305 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
6308 mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
6309 unsigned HOST_WIDE_INT size,
6312 /* If the target wants uninitialized const declarations in
6313 .rdata then don't put them in .comm. */
6314 if (TARGET_EMBEDDED_DATA && TARGET_UNINIT_CONST_IN_RODATA
6315 && TREE_CODE (decl) == VAR_DECL && TREE_READONLY (decl)
6316 && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
6318 if (TREE_PUBLIC (decl) && DECL_NAME (decl))
6319 targetm.asm_out.globalize_label (stream, name);
6321 switch_to_section (readonly_data_section);
6322 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
6323 mips_declare_object (stream, name, "",
6324 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
6328 mips_declare_common_object (stream, name, "\n\t.comm\t",
6332 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
6333 NAME is the name of the object and ALIGN is the required alignment
6334 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
6335 alignment argument. */
6338 mips_declare_common_object (FILE *stream, const char *name,
6339 const char *init_string,
6340 unsigned HOST_WIDE_INT size,
6341 unsigned int align, bool takes_alignment_p)
6343 if (!takes_alignment_p)
6345 size += (align / BITS_PER_UNIT) - 1;
6346 size -= size % (align / BITS_PER_UNIT);
6347 mips_declare_object (stream, name, init_string,
6348 "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
6351 mips_declare_object (stream, name, init_string,
6352 "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
6353 size, align / BITS_PER_UNIT);
6356 /* Emit either a label, .comm, or .lcomm directive. When using assembler
6357 macros, mark the symbol as written so that mips_file_end won't emit an
6358 .extern for it. STREAM is the output file, NAME is the name of the
6359 symbol, INIT_STRING is the string that should be written before the
6360 symbol and FINAL_STRING is the string that should be written after it.
6361 FINAL_STRING is a printf() format that consumes the remaining arguments. */
6364 mips_declare_object (FILE *stream, const char *name, const char *init_string,
6365 const char *final_string, ...)
6369 fputs (init_string, stream);
6370 assemble_name (stream, name);
6371 va_start (ap, final_string);
6372 vfprintf (stream, final_string, ap);
6375 if (!TARGET_EXPLICIT_RELOCS)
6377 tree name_tree = get_identifier (name);
6378 TREE_ASM_WRITTEN (name_tree) = 1;
6382 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
6383 extern int size_directive_output;
6385 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
6386 definitions except that it uses mips_declare_object() to emit the label. */
6389 mips_declare_object_name (FILE *stream, const char *name,
6390 tree decl ATTRIBUTE_UNUSED)
6392 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
6393 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
6396 size_directive_output = 0;
6397 if (!flag_inhibit_size_directive && DECL_SIZE (decl))
6401 size_directive_output = 1;
6402 size = int_size_in_bytes (TREE_TYPE (decl));
6403 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
6406 mips_declare_object (stream, name, "", ":\n");
6409 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
6412 mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
6416 name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
6417 if (!flag_inhibit_size_directive
6418 && DECL_SIZE (decl) != 0
6419 && !at_end && top_level
6420 && DECL_INITIAL (decl) == error_mark_node
6421 && !size_directive_output)
6425 size_directive_output = 1;
6426 size = int_size_in_bytes (TREE_TYPE (decl));
6427 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
6432 /* Return true if X is a small data address that can be rewritten
6436 mips_rewrite_small_data_p (rtx x)
6438 enum mips_symbol_type symbol_type;
6440 return (TARGET_EXPLICIT_RELOCS
6441 && mips_symbolic_constant_p (x, &symbol_type)
6442 && symbol_type == SYMBOL_SMALL_DATA);
6446 /* A for_each_rtx callback for mips_small_data_pattern_p. */
6449 mips_small_data_pattern_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
6451 if (GET_CODE (*loc) == LO_SUM)
6454 return mips_rewrite_small_data_p (*loc);
6457 /* Return true if OP refers to small data symbols directly, not through
6461 mips_small_data_pattern_p (rtx op)
6463 return for_each_rtx (&op, mips_small_data_pattern_1, 0);
6466 /* A for_each_rtx callback, used by mips_rewrite_small_data. */
6469 mips_rewrite_small_data_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
6471 if (mips_rewrite_small_data_p (*loc))
6472 *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
6474 if (GET_CODE (*loc) == LO_SUM)
6480 /* If possible, rewrite OP so that it refers to small data using
6481 explicit relocations. */
6484 mips_rewrite_small_data (rtx op)
6486 op = copy_insn (op);
6487 for_each_rtx (&op, mips_rewrite_small_data_1, 0);
6491 /* Return true if the current function has an insn that implicitly
6495 mips_function_has_gp_insn (void)
6497 /* Don't bother rechecking if we found one last time. */
6498 if (!cfun->machine->has_gp_insn_p)
6502 push_topmost_sequence ();
6503 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6505 && GET_CODE (PATTERN (insn)) != USE
6506 && GET_CODE (PATTERN (insn)) != CLOBBER
6507 && (get_attr_got (insn) != GOT_UNSET
6508 || small_data_pattern (PATTERN (insn), VOIDmode)))
6510 pop_topmost_sequence ();
6512 cfun->machine->has_gp_insn_p = (insn != 0);
6514 return cfun->machine->has_gp_insn_p;
6518 /* Return the register that should be used as the global pointer
6519 within this function. Return 0 if the function doesn't need
6520 a global pointer. */
6523 mips_global_pointer (void)
6527 /* $gp is always available unless we're using a GOT. */
6528 if (!TARGET_USE_GOT)
6529 return GLOBAL_POINTER_REGNUM;
6531 /* We must always provide $gp when it is used implicitly. */
6532 if (!TARGET_EXPLICIT_RELOCS)
6533 return GLOBAL_POINTER_REGNUM;
6535 /* FUNCTION_PROFILER includes a jal macro, so we need to give it
6537 if (current_function_profile)
6538 return GLOBAL_POINTER_REGNUM;
6540 /* If the function has a nonlocal goto, $gp must hold the correct
6541 global pointer for the target function. */
6542 if (current_function_has_nonlocal_goto)
6543 return GLOBAL_POINTER_REGNUM;
6545 /* If the gp is never referenced, there's no need to initialize it.
6546 Note that reload can sometimes introduce constant pool references
6547 into a function that otherwise didn't need them. For example,
6548 suppose we have an instruction like:
6550 (set (reg:DF R1) (float:DF (reg:SI R2)))
6552 If R2 turns out to be constant such as 1, the instruction may have a
6553 REG_EQUAL note saying that R1 == 1.0. Reload then has the option of
6554 using this constant if R2 doesn't get allocated to a register.
6556 In cases like these, reload will have added the constant to the pool
6557 but no instruction will yet refer to it. */
6558 if (!df_regs_ever_live_p (GLOBAL_POINTER_REGNUM)
6559 && !current_function_uses_const_pool
6560 && !mips_function_has_gp_insn ())
6563 /* We need a global pointer, but perhaps we can use a call-clobbered
6564 register instead of $gp. */
6565 if (TARGET_CALL_SAVED_GP && current_function_is_leaf)
6566 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
6567 if (!df_regs_ever_live_p (regno)
6568 && call_used_regs[regno]
6569 && !fixed_regs[regno]
6570 && regno != PIC_FUNCTION_ADDR_REGNUM)
6573 return GLOBAL_POINTER_REGNUM;
6577 /* Return true if the function return value MODE will get returned in a
6578 floating-point register. */
6581 mips_return_mode_in_fpr_p (enum machine_mode mode)
6583 return ((GET_MODE_CLASS (mode) == MODE_FLOAT
6584 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
6585 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
6586 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_HWFPVALUE);
6589 /* Return a two-character string representing a function floating-point
6590 return mode, used to name MIPS16 function stubs. */
6593 mips16_call_stub_mode_suffix (enum machine_mode mode)
6597 else if (mode == DFmode)
6599 else if (mode == SCmode)
6601 else if (mode == DCmode)
6603 else if (mode == V2SFmode)
6609 /* Return true if the current function returns its value in a floating-point
6610 register in MIPS16 mode. */
6613 mips16_cfun_returns_in_fpr_p (void)
6615 tree return_type = DECL_RESULT (current_function_decl);
6616 return (mips16_hard_float
6617 && !aggregate_value_p (return_type, current_function_decl)
6618 && mips_return_mode_in_fpr_p (DECL_MODE (return_type)));
6622 /* Return true if the current function must save REGNO. */
6625 mips_save_reg_p (unsigned int regno)
6627 /* We only need to save $gp if TARGET_CALL_SAVED_GP and only then
6628 if we have not chosen a call-clobbered substitute. */
6629 if (regno == GLOBAL_POINTER_REGNUM)
6630 return TARGET_CALL_SAVED_GP && cfun->machine->global_pointer == regno;
6632 /* Check call-saved registers. */
6633 if (df_regs_ever_live_p (regno) && !call_used_regs[regno])
6636 /* Save both registers in an FPR pair if either one is used. This is
6637 needed for the case when MIN_FPRS_PER_FMT == 1, which allows the odd
6638 register to be used without the even register. */
6639 if (FP_REG_P (regno)
6640 && MAX_FPRS_PER_FMT == 2
6641 && df_regs_ever_live_p (regno + 1)
6642 && !call_used_regs[regno + 1])
6645 /* We need to save the old frame pointer before setting up a new one. */
6646 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
6649 /* We need to save the incoming return address if it is ever clobbered
6650 within the function. */
6651 if (regno == GP_REG_FIRST + 31 && df_regs_ever_live_p (regno))
6656 /* $18 is a special case in mips16 code. It may be used to call
6657 a function which returns a floating point value, but it is
6658 marked in call_used_regs. */
6659 if (regno == GP_REG_FIRST + 18 && df_regs_ever_live_p (regno))
6662 /* $31 is also a special case. It will be used to copy a return
6663 value into the floating point registers if the return value is
6665 if (regno == GP_REG_FIRST + 31
6666 && mips16_cfun_returns_in_fpr_p ())
6673 /* Return the index of the lowest X in the range [0, SIZE) for which
6674 bit REGS[X] is set in MASK. Return SIZE if there is no such X. */
6677 mips16e_find_first_register (unsigned int mask, const unsigned char *regs,
6682 for (i = 0; i < size; i++)
6683 if (BITSET_P (mask, regs[i]))
6689 /* *MASK_PTR is a mask of general purpose registers and *GP_REG_SIZE_PTR
6690 is the number of bytes that they occupy. If *MASK_PTR contains REGS[X]
6691 for some X in [0, SIZE), adjust *MASK_PTR and *GP_REG_SIZE_PTR so that
6692 the same is true for all indexes (X, SIZE). */
6695 mips16e_mask_registers (unsigned int *mask_ptr, const unsigned char *regs,
6696 unsigned int size, HOST_WIDE_INT *gp_reg_size_ptr)
6700 i = mips16e_find_first_register (*mask_ptr, regs, size);
6701 for (i++; i < size; i++)
6702 if (!BITSET_P (*mask_ptr, regs[i]))
6704 *gp_reg_size_ptr += GET_MODE_SIZE (gpr_mode);
6705 *mask_ptr |= 1 << regs[i];
6709 /* Return the bytes needed to compute the frame pointer from the current
6710 stack pointer. SIZE is the size (in bytes) of the local variables.
6712 MIPS stack frames look like:
6714 Before call After call
6715 high +-----------------------+ +-----------------------+
6717 | caller's temps. | | caller's temps. |
6719 +-----------------------+ +-----------------------+
6721 | arguments on stack. | | arguments on stack. |
6723 +-----------------------+ +-----------------------+
6724 | 4 words to save | | 4 words to save |
6725 | arguments passed | | arguments passed |
6726 | in registers, even | | in registers, even |
6727 | if not passed. | | if not passed. |
6728 SP->+-----------------------+ VFP->+-----------------------+
6729 (VFP = SP+fp_sp_offset) | |\
6730 | fp register save | | fp_reg_size
6732 SP+gp_sp_offset->+-----------------------+
6734 | | gp register save | | gp_reg_size
6735 gp_reg_rounded | | |/
6736 | +-----------------------+
6737 \| alignment padding |
6738 +-----------------------+
6740 | local variables | | var_size
6742 +-----------------------+
6744 | alloca allocations |
6746 +-----------------------+
6748 cprestore_size | | GP save for V.4 abi |
6750 +-----------------------+
6752 | arguments on stack | |
6754 +-----------------------+ |
6755 | 4 words to save | | args_size
6756 | arguments passed | |
6757 | in registers, even | |
6758 | if not passed. | |
6759 low | (TARGET_OLDABI only) |/
6760 memory SP->+-----------------------+
6765 compute_frame_size (HOST_WIDE_INT size)
6768 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
6769 HOST_WIDE_INT var_size; /* # bytes that variables take up */
6770 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
6771 HOST_WIDE_INT cprestore_size; /* # bytes that the cprestore slot takes up */
6772 HOST_WIDE_INT gp_reg_rounded; /* # bytes needed to store gp after rounding */
6773 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
6774 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
6775 unsigned int mask; /* mask of saved gp registers */
6776 unsigned int fmask; /* mask of saved fp registers */
6778 cfun->machine->global_pointer = mips_global_pointer ();
6784 var_size = MIPS_STACK_ALIGN (size);
6785 args_size = current_function_outgoing_args_size;
6786 cprestore_size = MIPS_STACK_ALIGN (STARTING_FRAME_OFFSET) - args_size;
6788 /* The space set aside by STARTING_FRAME_OFFSET isn't needed in leaf
6789 functions. If the function has local variables, we're committed
6790 to allocating it anyway. Otherwise reclaim it here. */
6791 if (var_size == 0 && current_function_is_leaf)
6792 cprestore_size = args_size = 0;
6794 /* The MIPS 3.0 linker does not like functions that dynamically
6795 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
6796 looks like we are trying to create a second frame pointer to the
6797 function, so allocate some stack space to make it happy. */
6799 if (args_size == 0 && current_function_calls_alloca)
6800 args_size = 4 * UNITS_PER_WORD;
6802 total_size = var_size + args_size + cprestore_size;
6804 /* Calculate space needed for gp registers. */
6805 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
6806 if (mips_save_reg_p (regno))
6808 gp_reg_size += GET_MODE_SIZE (gpr_mode);
6809 mask |= 1 << (regno - GP_REG_FIRST);
6812 /* We need to restore these for the handler. */
6813 if (current_function_calls_eh_return)
6818 regno = EH_RETURN_DATA_REGNO (i);
6819 if (regno == INVALID_REGNUM)
6821 gp_reg_size += GET_MODE_SIZE (gpr_mode);
6822 mask |= 1 << (regno - GP_REG_FIRST);
6826 /* The MIPS16e SAVE and RESTORE instructions have two ranges of registers:
6827 $a3-$a0 and $s2-$s8. If we save one register in the range, we must
6828 save all later registers too. */
6829 if (GENERATE_MIPS16E_SAVE_RESTORE)
6831 mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
6832 ARRAY_SIZE (mips16e_s2_s8_regs), &gp_reg_size);
6833 mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
6834 ARRAY_SIZE (mips16e_a0_a3_regs), &gp_reg_size);
6837 /* This loop must iterate over the same space as its companion in
6838 mips_for_each_saved_reg. */
6839 for (regno = (FP_REG_LAST - MAX_FPRS_PER_FMT + 1);
6840 regno >= FP_REG_FIRST;
6841 regno -= MAX_FPRS_PER_FMT)
6843 if (mips_save_reg_p (regno))
6845 fp_reg_size += MAX_FPRS_PER_FMT * UNITS_PER_FPREG;
6846 fmask |= ((1 << MAX_FPRS_PER_FMT) - 1) << (regno - FP_REG_FIRST);
6850 gp_reg_rounded = MIPS_STACK_ALIGN (gp_reg_size);
6851 total_size += gp_reg_rounded + MIPS_STACK_ALIGN (fp_reg_size);
6853 /* Add in the space required for saving incoming register arguments. */
6854 total_size += current_function_pretend_args_size;
6855 total_size += MIPS_STACK_ALIGN (cfun->machine->varargs_size);
6857 /* Save other computed information. */
6858 cfun->machine->frame.total_size = total_size;
6859 cfun->machine->frame.var_size = var_size;
6860 cfun->machine->frame.args_size = args_size;
6861 cfun->machine->frame.cprestore_size = cprestore_size;
6862 cfun->machine->frame.gp_reg_size = gp_reg_size;
6863 cfun->machine->frame.fp_reg_size = fp_reg_size;
6864 cfun->machine->frame.mask = mask;
6865 cfun->machine->frame.fmask = fmask;
6866 cfun->machine->frame.initialized = reload_completed;
6867 cfun->machine->frame.num_gp = gp_reg_size / UNITS_PER_WORD;
6868 cfun->machine->frame.num_fp = (fp_reg_size
6869 / (MAX_FPRS_PER_FMT * UNITS_PER_FPREG));
6873 HOST_WIDE_INT offset;
6875 if (GENERATE_MIPS16E_SAVE_RESTORE)
6876 /* MIPS16e SAVE and RESTORE instructions require the GP save area
6877 to be aligned at the high end with any padding at the low end.
6878 It is only safe to use this calculation for o32, where we never
6879 have pretend arguments, and where any varargs will be saved in
6880 the caller-allocated area rather than at the top of the frame. */
6881 offset = (total_size - GET_MODE_SIZE (gpr_mode));
6883 offset = (args_size + cprestore_size + var_size
6884 + gp_reg_size - GET_MODE_SIZE (gpr_mode));
6885 cfun->machine->frame.gp_sp_offset = offset;
6886 cfun->machine->frame.gp_save_offset = offset - total_size;
6890 cfun->machine->frame.gp_sp_offset = 0;
6891 cfun->machine->frame.gp_save_offset = 0;
6896 HOST_WIDE_INT offset;
6898 offset = (args_size + cprestore_size + var_size
6899 + gp_reg_rounded + fp_reg_size
6900 - MAX_FPRS_PER_FMT * UNITS_PER_FPREG);
6901 cfun->machine->frame.fp_sp_offset = offset;
6902 cfun->machine->frame.fp_save_offset = offset - total_size;
6906 cfun->machine->frame.fp_sp_offset = 0;
6907 cfun->machine->frame.fp_save_offset = 0;
6910 /* Ok, we're done. */
6914 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame
6915 pointer or argument pointer. TO is either the stack pointer or
6916 hard frame pointer. */
6919 mips_initial_elimination_offset (int from, int to)
6921 HOST_WIDE_INT offset;
6923 compute_frame_size (get_frame_size ());
6925 /* Set OFFSET to the offset from the stack pointer. */
6928 case FRAME_POINTER_REGNUM:
6932 case ARG_POINTER_REGNUM:
6933 offset = (cfun->machine->frame.total_size
6934 - current_function_pretend_args_size);
6941 if (TARGET_MIPS16 && to == HARD_FRAME_POINTER_REGNUM)
6942 offset -= cfun->machine->frame.args_size;
6947 /* Implement RETURN_ADDR_RTX. Note, we do not support moving
6948 back to a previous frame. */
6950 mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
6955 return get_hard_reg_initial_val (Pmode, GP_REG_FIRST + 31);
6958 /* Use FN to save or restore register REGNO. MODE is the register's
6959 mode and OFFSET is the offset of its save slot from the current
6963 mips_save_restore_reg (enum machine_mode mode, int regno,
6964 HOST_WIDE_INT offset, mips_save_restore_fn fn)
6968 mem = gen_frame_mem (mode, plus_constant (stack_pointer_rtx, offset));
6970 fn (gen_rtx_REG (mode, regno), mem);
6974 /* Call FN for each register that is saved by the current function.
6975 SP_OFFSET is the offset of the current stack pointer from the start
6979 mips_for_each_saved_reg (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
6981 enum machine_mode fpr_mode;
6982 HOST_WIDE_INT offset;
6985 /* Save registers starting from high to low. The debuggers prefer at least
6986 the return register be stored at func+4, and also it allows us not to
6987 need a nop in the epilogue if at least one register is reloaded in
6988 addition to return address. */
6989 offset = cfun->machine->frame.gp_sp_offset - sp_offset;
6990 for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
6991 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
6993 mips_save_restore_reg (gpr_mode, regno, offset, fn);
6994 offset -= GET_MODE_SIZE (gpr_mode);
6997 /* This loop must iterate over the same space as its companion in
6998 compute_frame_size. */
6999 offset = cfun->machine->frame.fp_sp_offset - sp_offset;
7000 fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
7001 for (regno = (FP_REG_LAST - MAX_FPRS_PER_FMT + 1);
7002 regno >= FP_REG_FIRST;
7003 regno -= MAX_FPRS_PER_FMT)
7004 if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
7006 mips_save_restore_reg (fpr_mode, regno, offset, fn);
7007 offset -= GET_MODE_SIZE (fpr_mode);
7011 /* If we're generating n32 or n64 abicalls, and the current function
7012 does not use $28 as its global pointer, emit a cplocal directive.
7013 Use pic_offset_table_rtx as the argument to the directive. */
7016 mips_output_cplocal (void)
7018 if (!TARGET_EXPLICIT_RELOCS
7019 && cfun->machine->global_pointer > 0
7020 && cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
7021 output_asm_insn (".cplocal %+", 0);
7024 /* Return the style of GP load sequence that is being used for the
7025 current function. */
7027 enum mips_loadgp_style
7028 mips_current_loadgp_style (void)
7030 if (!TARGET_USE_GOT || cfun->machine->global_pointer == 0)
7036 if (TARGET_ABSOLUTE_ABICALLS)
7037 return LOADGP_ABSOLUTE;
7039 return TARGET_NEWABI ? LOADGP_NEWABI : LOADGP_OLDABI;
7042 /* The __gnu_local_gp symbol. */
7044 static GTY(()) rtx mips_gnu_local_gp;
7046 /* If we're generating n32 or n64 abicalls, emit instructions
7047 to set up the global pointer. */
7050 mips_emit_loadgp (void)
7052 rtx addr, offset, incoming_address, base, index;
7054 switch (mips_current_loadgp_style ())
7056 case LOADGP_ABSOLUTE:
7057 if (mips_gnu_local_gp == NULL)
7059 mips_gnu_local_gp = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp");
7060 SYMBOL_REF_FLAGS (mips_gnu_local_gp) |= SYMBOL_FLAG_LOCAL;
7062 emit_insn (gen_loadgp_absolute (mips_gnu_local_gp));
7066 addr = XEXP (DECL_RTL (current_function_decl), 0);
7067 offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
7068 incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
7069 emit_insn (gen_loadgp_newabi (offset, incoming_address));
7070 if (!TARGET_EXPLICIT_RELOCS)
7071 emit_insn (gen_loadgp_blockage ());
7075 base = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_BASE));
7076 index = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_INDEX));
7077 emit_insn (gen_loadgp_rtp (base, index));
7078 if (!TARGET_EXPLICIT_RELOCS)
7079 emit_insn (gen_loadgp_blockage ());
7087 /* Set up the stack and frame (if desired) for the function. */
7090 mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
7093 HOST_WIDE_INT tsize = cfun->machine->frame.total_size;
7095 #ifdef SDB_DEBUGGING_INFO
7096 if (debug_info_level != DINFO_LEVEL_TERSE && write_symbols == SDB_DEBUG)
7097 SDB_OUTPUT_SOURCE_LINE (file, DECL_SOURCE_LINE (current_function_decl));
7100 /* In mips16 mode, we may need to generate a 32 bit to handle
7101 floating point arguments. The linker will arrange for any 32-bit
7102 functions to call this stub, which will then jump to the 16-bit
7104 if (mips16_hard_float
7105 && current_function_args_info.fp_code != 0)
7106 build_mips16_function_stub (file);
7108 if (!FUNCTION_NAME_ALREADY_DECLARED)
7110 /* Get the function name the same way that toplev.c does before calling
7111 assemble_start_function. This is needed so that the name used here
7112 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
7113 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
7115 if (!flag_inhibit_size_directive)
7117 fputs ("\t.ent\t", file);
7118 assemble_name (file, fnname);
7122 assemble_name (file, fnname);
7123 fputs (":\n", file);
7126 /* Stop mips_file_end from treating this function as external. */
7127 if (TARGET_IRIX && mips_abi == ABI_32)
7128 TREE_ASM_WRITTEN (DECL_NAME (cfun->decl)) = 1;
7130 if (!flag_inhibit_size_directive)
7132 /* .frame FRAMEREG, FRAMESIZE, RETREG */
7134 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
7135 "# vars= " HOST_WIDE_INT_PRINT_DEC ", regs= %d/%d"
7136 ", args= " HOST_WIDE_INT_PRINT_DEC
7137 ", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
7138 (reg_names[(frame_pointer_needed)
7139 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM]),
7140 ((frame_pointer_needed && TARGET_MIPS16)
7141 ? tsize - cfun->machine->frame.args_size
7143 reg_names[GP_REG_FIRST + 31],
7144 cfun->machine->frame.var_size,
7145 cfun->machine->frame.num_gp,
7146 cfun->machine->frame.num_fp,
7147 cfun->machine->frame.args_size,
7148 cfun->machine->frame.cprestore_size);
7150 /* .mask MASK, GPOFFSET; .fmask FPOFFSET */
7151 fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
7152 cfun->machine->frame.mask,
7153 cfun->machine->frame.gp_save_offset);
7154 fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
7155 cfun->machine->frame.fmask,
7156 cfun->machine->frame.fp_save_offset);
7159 OLD_SP == *FRAMEREG + FRAMESIZE => can find old_sp from nominated FP reg.
7160 HIGHEST_GP_SAVED == *FRAMEREG + FRAMESIZE + GPOFFSET => can find saved regs. */
7163 if (mips_current_loadgp_style () == LOADGP_OLDABI)
7165 /* Handle the initialization of $gp for SVR4 PIC. */
7166 if (!cfun->machine->all_noreorder_p)
7167 output_asm_insn ("%(.cpload\t%^%)", 0);
7169 output_asm_insn ("%(.cpload\t%^\n\t%<", 0);
7171 else if (cfun->machine->all_noreorder_p)
7172 output_asm_insn ("%(%<", 0);
7174 /* Tell the assembler which register we're using as the global
7175 pointer. This is needed for thunks, since they can use either
7176 explicit relocs or assembler macros. */
7177 mips_output_cplocal ();
7180 /* Make the last instruction frame related and note that it performs
7181 the operation described by FRAME_PATTERN. */
7184 mips_set_frame_expr (rtx frame_pattern)
7188 insn = get_last_insn ();
7189 RTX_FRAME_RELATED_P (insn) = 1;
7190 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7196 /* Return a frame-related rtx that stores REG at MEM.
7197 REG must be a single register. */
7200 mips_frame_set (rtx mem, rtx reg)
7204 /* If we're saving the return address register and the dwarf return
7205 address column differs from the hard register number, adjust the
7206 note reg to refer to the former. */
7207 if (REGNO (reg) == GP_REG_FIRST + 31
7208 && DWARF_FRAME_RETURN_COLUMN != GP_REG_FIRST + 31)
7209 reg = gen_rtx_REG (GET_MODE (reg), DWARF_FRAME_RETURN_COLUMN);
7211 set = gen_rtx_SET (VOIDmode, mem, reg);
7212 RTX_FRAME_RELATED_P (set) = 1;
7218 /* Save register REG to MEM. Make the instruction frame-related. */
7221 mips_save_reg (rtx reg, rtx mem)
7223 if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
7227 if (mips_split_64bit_move_p (mem, reg))
7228 mips_split_64bit_move (mem, reg);
7230 emit_move_insn (mem, reg);
7232 x1 = mips_frame_set (mips_subword (mem, 0), mips_subword (reg, 0));
7233 x2 = mips_frame_set (mips_subword (mem, 1), mips_subword (reg, 1));
7234 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
7239 && REGNO (reg) != GP_REG_FIRST + 31
7240 && !M16_REG_P (REGNO (reg)))
7242 /* Save a non-mips16 register by moving it through a temporary.
7243 We don't need to do this for $31 since there's a special
7244 instruction for it. */
7245 emit_move_insn (MIPS_PROLOGUE_TEMP (GET_MODE (reg)), reg);
7246 emit_move_insn (mem, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
7249 emit_move_insn (mem, reg);
7251 mips_set_frame_expr (mips_frame_set (mem, reg));
7255 /* Return a move between register REGNO and memory location SP + OFFSET.
7256 Make the move a load if RESTORE_P, otherwise make it a frame-related
7260 mips16e_save_restore_reg (bool restore_p, HOST_WIDE_INT offset,
7265 mem = gen_frame_mem (SImode, plus_constant (stack_pointer_rtx, offset));
7266 reg = gen_rtx_REG (SImode, regno);
7268 ? gen_rtx_SET (VOIDmode, reg, mem)
7269 : mips_frame_set (mem, reg));
7272 /* Return RTL for a MIPS16e SAVE or RESTORE instruction; RESTORE_P says which.
7273 The instruction must:
7275 - Allocate or deallocate SIZE bytes in total; SIZE is known
7278 - Save or restore as many registers in *MASK_PTR as possible.
7279 The instruction saves the first registers at the top of the
7280 allocated area, with the other registers below it.
7282 - Save NARGS argument registers above the allocated area.
7284 (NARGS is always zero if RESTORE_P.)
7286 The SAVE and RESTORE instructions cannot save and restore all general
7287 registers, so there may be some registers left over for the caller to
7288 handle. Destructively modify *MASK_PTR so that it contains the registers
7289 that still need to be saved or restored. The caller can save these
7290 registers in the memory immediately below *OFFSET_PTR, which is a
7291 byte offset from the bottom of the allocated stack area. */
7294 mips16e_build_save_restore (bool restore_p, unsigned int *mask_ptr,
7295 HOST_WIDE_INT *offset_ptr, unsigned int nargs,
7299 HOST_WIDE_INT offset, top_offset;
7300 unsigned int i, regno;
7303 gcc_assert (cfun->machine->frame.fp_reg_size == 0);
7305 /* Calculate the number of elements in the PARALLEL. We need one element
7306 for the stack adjustment, one for each argument register save, and one
7307 for each additional register move. */
7309 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
7310 if (BITSET_P (*mask_ptr, mips16e_save_restore_regs[i]))
7313 /* Create the final PARALLEL. */
7314 pattern = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (n));
7317 /* Add the stack pointer adjustment. */
7318 set = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7319 plus_constant (stack_pointer_rtx,
7320 restore_p ? size : -size));
7321 RTX_FRAME_RELATED_P (set) = 1;
7322 XVECEXP (pattern, 0, n++) = set;
7324 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
7325 top_offset = restore_p ? size : 0;
7327 /* Save the arguments. */
7328 for (i = 0; i < nargs; i++)
7330 offset = top_offset + i * GET_MODE_SIZE (gpr_mode);
7331 set = mips16e_save_restore_reg (restore_p, offset, GP_ARG_FIRST + i);
7332 XVECEXP (pattern, 0, n++) = set;
7335 /* Then fill in the other register moves. */
7336 offset = top_offset;
7337 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
7339 regno = mips16e_save_restore_regs[i];
7340 if (BITSET_P (*mask_ptr, regno))
7342 offset -= UNITS_PER_WORD;
7343 set = mips16e_save_restore_reg (restore_p, offset, regno);
7344 XVECEXP (pattern, 0, n++) = set;
7345 *mask_ptr &= ~(1 << regno);
7349 /* Tell the caller what offset it should use for the remaining registers. */
7350 *offset_ptr = size + (offset - top_offset) + size;
7352 gcc_assert (n == XVECLEN (pattern, 0));
7357 /* PATTERN is a PARALLEL whose first element adds ADJUST to the stack
7358 pointer. Return true if PATTERN matches the kind of instruction
7359 generated by mips16e_build_save_restore. If INFO is nonnull,
7360 initialize it when returning true. */
7363 mips16e_save_restore_pattern_p (rtx pattern, HOST_WIDE_INT adjust,
7364 struct mips16e_save_restore_info *info)
7366 unsigned int i, nargs, mask;
7367 HOST_WIDE_INT top_offset, save_offset, offset, extra;
7368 rtx set, reg, mem, base;
7371 if (!GENERATE_MIPS16E_SAVE_RESTORE)
7374 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
7375 top_offset = adjust > 0 ? adjust : 0;
7377 /* Interpret all other members of the PARALLEL. */
7378 save_offset = top_offset - GET_MODE_SIZE (gpr_mode);
7382 for (n = 1; n < XVECLEN (pattern, 0); n++)
7384 /* Check that we have a SET. */
7385 set = XVECEXP (pattern, 0, n);
7386 if (GET_CODE (set) != SET)
7389 /* Check that the SET is a load (if restoring) or a store
7391 mem = adjust > 0 ? SET_SRC (set) : SET_DEST (set);
7395 /* Check that the address is the sum of the stack pointer and a
7396 possibly-zero constant offset. */
7397 mips_split_plus (XEXP (mem, 0), &base, &offset);
7398 if (base != stack_pointer_rtx)
7401 /* Check that SET's other operand is a register. */
7402 reg = adjust > 0 ? SET_DEST (set) : SET_SRC (set);
7406 /* Check for argument saves. */
7407 if (offset == top_offset + nargs * GET_MODE_SIZE (gpr_mode)
7408 && REGNO (reg) == GP_ARG_FIRST + nargs)
7410 else if (offset == save_offset)
7412 while (mips16e_save_restore_regs[i++] != REGNO (reg))
7413 if (i == ARRAY_SIZE (mips16e_save_restore_regs))
7416 mask |= 1 << REGNO (reg);
7417 save_offset -= GET_MODE_SIZE (gpr_mode);
7423 /* Check that the restrictions on register ranges are met. */
7425 mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
7426 ARRAY_SIZE (mips16e_s2_s8_regs), &extra);
7427 mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
7428 ARRAY_SIZE (mips16e_a0_a3_regs), &extra);
7432 /* Make sure that the topmost argument register is not saved twice.
7433 The checks above ensure that the same is then true for the other
7434 argument registers. */
7435 if (nargs > 0 && BITSET_P (mask, GP_ARG_FIRST + nargs - 1))
7438 /* Pass back information, if requested. */
7441 info->nargs = nargs;
7443 info->size = (adjust > 0 ? adjust : -adjust);
7449 /* Add a MIPS16e SAVE or RESTORE register-range argument to string S
7450 for the register range [MIN_REG, MAX_REG]. Return a pointer to
7451 the null terminator. */
7454 mips16e_add_register_range (char *s, unsigned int min_reg,
7455 unsigned int max_reg)
7457 if (min_reg != max_reg)
7458 s += sprintf (s, ",%s-%s", reg_names[min_reg], reg_names[max_reg]);
7460 s += sprintf (s, ",%s", reg_names[min_reg]);
7464 /* Return the assembly instruction for a MIPS16e SAVE or RESTORE instruction.
7465 PATTERN and ADJUST are as for mips16e_save_restore_pattern_p. */
7468 mips16e_output_save_restore (rtx pattern, HOST_WIDE_INT adjust)
7470 static char buffer[300];
7472 struct mips16e_save_restore_info info;
7473 unsigned int i, end;
7476 /* Parse the pattern. */
7477 if (!mips16e_save_restore_pattern_p (pattern, adjust, &info))
7480 /* Add the mnemonic. */
7481 s = strcpy (buffer, adjust > 0 ? "restore\t" : "save\t");
7484 /* Save the arguments. */
7486 s += sprintf (s, "%s-%s,", reg_names[GP_ARG_FIRST],
7487 reg_names[GP_ARG_FIRST + info.nargs - 1]);
7488 else if (info.nargs == 1)
7489 s += sprintf (s, "%s,", reg_names[GP_ARG_FIRST]);
7491 /* Emit the amount of stack space to allocate or deallocate. */
7492 s += sprintf (s, "%d", (int) info.size);
7494 /* Save or restore $16. */
7495 if (BITSET_P (info.mask, 16))
7496 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 16]);
7498 /* Save or restore $17. */
7499 if (BITSET_P (info.mask, 17))
7500 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 17]);
7502 /* Save or restore registers in the range $s2...$s8, which
7503 mips16e_s2_s8_regs lists in decreasing order. Note that this
7504 is a software register range; the hardware registers are not
7505 numbered consecutively. */
7506 end = ARRAY_SIZE (mips16e_s2_s8_regs);
7507 i = mips16e_find_first_register (info.mask, mips16e_s2_s8_regs, end);
7509 s = mips16e_add_register_range (s, mips16e_s2_s8_regs[end - 1],
7510 mips16e_s2_s8_regs[i]);
7512 /* Save or restore registers in the range $a0...$a3. */
7513 end = ARRAY_SIZE (mips16e_a0_a3_regs);
7514 i = mips16e_find_first_register (info.mask, mips16e_a0_a3_regs, end);
7516 s = mips16e_add_register_range (s, mips16e_a0_a3_regs[i],
7517 mips16e_a0_a3_regs[end - 1]);
7519 /* Save or restore $31. */
7520 if (BITSET_P (info.mask, 31))
7521 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 31]);
7526 /* Return a simplified form of X using the register values in REG_VALUES.
7527 REG_VALUES[R] is the last value assigned to hard register R, or null
7528 if R has not been modified.
7530 This function is rather limited, but is good enough for our purposes. */
7533 mips16e_collect_propagate_value (rtx x, rtx *reg_values)
7537 x = avoid_constant_pool_reference (x);
7541 x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
7542 return simplify_gen_unary (GET_CODE (x), GET_MODE (x),
7543 x0, GET_MODE (XEXP (x, 0)));
7546 if (ARITHMETIC_P (x))
7548 x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
7549 x1 = mips16e_collect_propagate_value (XEXP (x, 1), reg_values);
7550 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), x0, x1);
7554 && reg_values[REGNO (x)]
7555 && !rtx_unstable_p (reg_values[REGNO (x)]))
7556 return reg_values[REGNO (x)];
7561 /* Return true if (set DEST SRC) stores an argument register into its
7562 caller-allocated save slot, storing the number of that argument
7563 register in *REGNO_PTR if so. REG_VALUES is as for
7564 mips16e_collect_propagate_value. */
7567 mips16e_collect_argument_save_p (rtx dest, rtx src, rtx *reg_values,
7568 unsigned int *regno_ptr)
7570 unsigned int argno, regno;
7571 HOST_WIDE_INT offset, required_offset;
7574 /* Check that this is a word-mode store. */
7575 if (!MEM_P (dest) || !REG_P (src) || GET_MODE (dest) != word_mode)
7578 /* Check that the register being saved is an unmodified argument
7580 regno = REGNO (src);
7581 if (regno < GP_ARG_FIRST || regno > GP_ARG_LAST || reg_values[regno])
7583 argno = regno - GP_ARG_FIRST;
7585 /* Check whether the address is an appropriate stack pointer or
7586 frame pointer access. The frame pointer is offset from the
7587 stack pointer by the size of the outgoing arguments. */
7588 addr = mips16e_collect_propagate_value (XEXP (dest, 0), reg_values);
7589 mips_split_plus (addr, &base, &offset);
7590 required_offset = cfun->machine->frame.total_size + argno * UNITS_PER_WORD;
7591 if (base == hard_frame_pointer_rtx)
7592 required_offset -= cfun->machine->frame.args_size;
7593 else if (base != stack_pointer_rtx)
7595 if (offset != required_offset)
7602 /* A subroutine of mips_expand_prologue, called only when generating
7603 MIPS16e SAVE instructions. Search the start of the function for any
7604 instructions that save argument registers into their caller-allocated
7605 save slots. Delete such instructions and return a value N such that
7606 saving [GP_ARG_FIRST, GP_ARG_FIRST + N) would make all the deleted
7607 instructions redundant. */
7610 mips16e_collect_argument_saves (void)
7612 rtx reg_values[FIRST_PSEUDO_REGISTER];
7613 rtx insn, next, set, dest, src;
7614 unsigned int nargs, regno;
7616 push_topmost_sequence ();
7618 memset (reg_values, 0, sizeof (reg_values));
7619 for (insn = get_insns (); insn; insn = next)
7621 next = NEXT_INSN (insn);
7628 set = PATTERN (insn);
7629 if (GET_CODE (set) != SET)
7632 dest = SET_DEST (set);
7633 src = SET_SRC (set);
7634 if (mips16e_collect_argument_save_p (dest, src, reg_values, ®no))
7636 if (!BITSET_P (cfun->machine->frame.mask, regno))
7639 nargs = MAX (nargs, (regno - GP_ARG_FIRST) + 1);
7642 else if (REG_P (dest) && GET_MODE (dest) == word_mode)
7643 reg_values[REGNO (dest)]
7644 = mips16e_collect_propagate_value (src, reg_values);
7648 pop_topmost_sequence ();
7653 /* Expand the prologue into a bunch of separate insns. */
7656 mips_expand_prologue (void)
7662 if (cfun->machine->global_pointer > 0)
7663 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
7665 size = compute_frame_size (get_frame_size ());
7667 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
7668 bytes beforehand; this is enough to cover the register save area
7669 without going out of range. */
7670 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
7672 HOST_WIDE_INT step1;
7674 step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
7676 if (GENERATE_MIPS16E_SAVE_RESTORE)
7678 HOST_WIDE_INT offset;
7679 unsigned int mask, regno;
7681 /* Try to merge argument stores into the save instruction. */
7682 nargs = mips16e_collect_argument_saves ();
7684 /* Build the save instruction. */
7685 mask = cfun->machine->frame.mask;
7686 insn = mips16e_build_save_restore (false, &mask, &offset,
7688 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
7691 /* Check if we need to save other registers. */
7692 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
7693 if (BITSET_P (mask, regno - GP_REG_FIRST))
7695 offset -= GET_MODE_SIZE (gpr_mode);
7696 mips_save_restore_reg (gpr_mode, regno, offset, mips_save_reg);
7701 insn = gen_add3_insn (stack_pointer_rtx,
7704 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
7706 mips_for_each_saved_reg (size, mips_save_reg);
7710 /* Allocate the rest of the frame. */
7713 if (SMALL_OPERAND (-size))
7714 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
7716 GEN_INT (-size)))) = 1;
7719 emit_move_insn (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
7722 /* There are no instructions to add or subtract registers
7723 from the stack pointer, so use the frame pointer as a
7724 temporary. We should always be using a frame pointer
7725 in this case anyway. */
7726 gcc_assert (frame_pointer_needed);
7727 emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
7728 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
7729 hard_frame_pointer_rtx,
7730 MIPS_PROLOGUE_TEMP (Pmode)));
7731 emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
7734 emit_insn (gen_sub3_insn (stack_pointer_rtx,
7736 MIPS_PROLOGUE_TEMP (Pmode)));
7738 /* Describe the combined effect of the previous instructions. */
7740 (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7741 plus_constant (stack_pointer_rtx, -size)));
7745 /* Set up the frame pointer, if we're using one. In mips16 code,
7746 we point the frame pointer ahead of the outgoing argument area.
7747 This should allow more variables & incoming arguments to be
7748 accessed with unextended instructions. */
7749 if (frame_pointer_needed)
7751 if (TARGET_MIPS16 && cfun->machine->frame.args_size != 0)
7753 rtx offset = GEN_INT (cfun->machine->frame.args_size);
7754 if (SMALL_OPERAND (cfun->machine->frame.args_size))
7756 (emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
7761 emit_move_insn (MIPS_PROLOGUE_TEMP (Pmode), offset);
7762 emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
7763 emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
7764 hard_frame_pointer_rtx,
7765 MIPS_PROLOGUE_TEMP (Pmode)));
7767 (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
7768 plus_constant (stack_pointer_rtx,
7769 cfun->machine->frame.args_size)));
7773 RTX_FRAME_RELATED_P (emit_move_insn (hard_frame_pointer_rtx,
7774 stack_pointer_rtx)) = 1;
7777 mips_emit_loadgp ();
7779 /* If generating o32/o64 abicalls, save $gp on the stack. */
7780 if (TARGET_ABICALLS && TARGET_OLDABI && !current_function_is_leaf)
7781 emit_insn (gen_cprestore (GEN_INT (current_function_outgoing_args_size)));
7783 /* If we are profiling, make sure no instructions are scheduled before
7784 the call to mcount. */
7786 if (current_function_profile)
7787 emit_insn (gen_blockage ());
7790 /* Do any necessary cleanup after a function to restore stack, frame,
7793 #define RA_MASK BITMASK_HIGH /* 1 << 31 */
7796 mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
7797 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
7799 /* Reinstate the normal $gp. */
7800 SET_REGNO (pic_offset_table_rtx, GLOBAL_POINTER_REGNUM);
7801 mips_output_cplocal ();
7803 if (cfun->machine->all_noreorder_p)
7805 /* Avoid using %>%) since it adds excess whitespace. */
7806 output_asm_insn (".set\tmacro", 0);
7807 output_asm_insn (".set\treorder", 0);
7808 set_noreorder = set_nomacro = 0;
7811 if (!FUNCTION_NAME_ALREADY_DECLARED && !flag_inhibit_size_directive)
7815 /* Get the function name the same way that toplev.c does before calling
7816 assemble_start_function. This is needed so that the name used here
7817 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
7818 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
7819 fputs ("\t.end\t", file);
7820 assemble_name (file, fnname);
7825 /* Emit instructions to restore register REG from slot MEM. */
7828 mips_restore_reg (rtx reg, rtx mem)
7830 /* There's no mips16 instruction to load $31 directly. Load into
7831 $7 instead and adjust the return insn appropriately. */
7832 if (TARGET_MIPS16 && REGNO (reg) == GP_REG_FIRST + 31)
7833 reg = gen_rtx_REG (GET_MODE (reg), 7);
7835 if (TARGET_MIPS16 && !M16_REG_P (REGNO (reg)))
7837 /* Can't restore directly; move through a temporary. */
7838 emit_move_insn (MIPS_EPILOGUE_TEMP (GET_MODE (reg)), mem);
7839 emit_move_insn (reg, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
7842 emit_move_insn (reg, mem);
7846 /* Expand the epilogue into a bunch of separate insns. SIBCALL_P is true
7847 if this epilogue precedes a sibling call, false if it is for a normal
7848 "epilogue" pattern. */
7851 mips_expand_epilogue (int sibcall_p)
7853 HOST_WIDE_INT step1, step2;
7856 if (!sibcall_p && mips_can_use_return_insn ())
7858 emit_jump_insn (gen_return ());
7862 /* In mips16 mode, if the return value should go into a floating-point
7863 register, we need to call a helper routine to copy it over. */
7864 if (mips16_cfun_returns_in_fpr_p ())
7873 enum machine_mode return_mode;
7875 return_type = DECL_RESULT (current_function_decl);
7876 return_mode = DECL_MODE (return_type);
7878 name = ACONCAT (("__mips16_ret_",
7879 mips16_call_stub_mode_suffix (return_mode),
7881 id = get_identifier (name);
7882 func = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
7883 retval = gen_rtx_REG (return_mode, GP_RETURN);
7884 call = gen_call_value_internal (retval, func, const0_rtx);
7885 insn = emit_call_insn (call);
7886 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), retval);
7889 /* Split the frame into two. STEP1 is the amount of stack we should
7890 deallocate before restoring the registers. STEP2 is the amount we
7891 should deallocate afterwards.
7893 Start off by assuming that no registers need to be restored. */
7894 step1 = cfun->machine->frame.total_size;
7897 /* Work out which register holds the frame address. Account for the
7898 frame pointer offset used by mips16 code. */
7899 if (!frame_pointer_needed)
7900 base = stack_pointer_rtx;
7903 base = hard_frame_pointer_rtx;
7905 step1 -= cfun->machine->frame.args_size;
7908 /* If we need to restore registers, deallocate as much stack as
7909 possible in the second step without going out of range. */
7910 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
7912 step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
7916 /* Set TARGET to BASE + STEP1. */
7922 /* Get an rtx for STEP1 that we can add to BASE. */
7923 adjust = GEN_INT (step1);
7924 if (!SMALL_OPERAND (step1))
7926 emit_move_insn (MIPS_EPILOGUE_TEMP (Pmode), adjust);
7927 adjust = MIPS_EPILOGUE_TEMP (Pmode);
7930 /* Normal mode code can copy the result straight into $sp. */
7932 target = stack_pointer_rtx;
7934 emit_insn (gen_add3_insn (target, base, adjust));
7937 /* Copy TARGET into the stack pointer. */
7938 if (target != stack_pointer_rtx)
7939 emit_move_insn (stack_pointer_rtx, target);
7941 /* If we're using addressing macros, $gp is implicitly used by all
7942 SYMBOL_REFs. We must emit a blockage insn before restoring $gp
7944 if (TARGET_CALL_SAVED_GP && !TARGET_EXPLICIT_RELOCS)
7945 emit_insn (gen_blockage ());
7947 if (GENERATE_MIPS16E_SAVE_RESTORE && cfun->machine->frame.mask != 0)
7949 unsigned int regno, mask;
7950 HOST_WIDE_INT offset;
7953 /* Generate the restore instruction. */
7954 mask = cfun->machine->frame.mask;
7955 restore = mips16e_build_save_restore (true, &mask, &offset, 0, step2);
7957 /* Restore any other registers manually. */
7958 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
7959 if (BITSET_P (mask, regno - GP_REG_FIRST))
7961 offset -= GET_MODE_SIZE (gpr_mode);
7962 mips_save_restore_reg (gpr_mode, regno, offset, mips_restore_reg);
7965 /* Restore the remaining registers and deallocate the final bit
7967 emit_insn (restore);
7971 /* Restore the registers. */
7972 mips_for_each_saved_reg (cfun->machine->frame.total_size - step2,
7975 /* Deallocate the final bit of the frame. */
7977 emit_insn (gen_add3_insn (stack_pointer_rtx,
7982 /* Add in the __builtin_eh_return stack adjustment. We need to
7983 use a temporary in mips16 code. */
7984 if (current_function_calls_eh_return)
7988 emit_move_insn (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
7989 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
7990 MIPS_EPILOGUE_TEMP (Pmode),
7991 EH_RETURN_STACKADJ_RTX));
7992 emit_move_insn (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
7995 emit_insn (gen_add3_insn (stack_pointer_rtx,
7997 EH_RETURN_STACKADJ_RTX));
8002 /* When generating MIPS16 code, the normal mips_for_each_saved_reg
8003 path will restore the return address into $7 rather than $31. */
8005 && !GENERATE_MIPS16E_SAVE_RESTORE
8006 && (cfun->machine->frame.mask & RA_MASK) != 0)
8007 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
8008 GP_REG_FIRST + 7)));
8010 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
8011 GP_REG_FIRST + 31)));
8015 /* Return nonzero if this function is known to have a null epilogue.
8016 This allows the optimizer to omit jumps to jumps if no stack
8020 mips_can_use_return_insn (void)
8022 if (! reload_completed)
8025 if (df_regs_ever_live_p (31) || current_function_profile)
8028 /* In mips16 mode, a function that returns a floating point value
8029 needs to arrange to copy the return value into the floating point
8031 if (mips16_cfun_returns_in_fpr_p ())
8034 if (cfun->machine->frame.initialized)
8035 return cfun->machine->frame.total_size == 0;
8037 return compute_frame_size (get_frame_size ()) == 0;
8040 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
8041 in order to avoid duplicating too much logic from elsewhere. */
8044 mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8045 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8048 rtx this, temp1, temp2, insn, fnaddr;
8050 /* Pretend to be a post-reload pass while generating rtl. */
8051 reload_completed = 1;
8053 /* Mark the end of the (empty) prologue. */
8054 emit_note (NOTE_INSN_PROLOGUE_END);
8056 /* Pick a global pointer. Use a call-clobbered register if
8057 TARGET_CALL_SAVED_GP, so that we can use a sibcall. */
8060 cfun->machine->global_pointer =
8061 TARGET_CALL_SAVED_GP ? 15 : GLOBAL_POINTER_REGNUM;
8063 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
8067 /* Set up the global pointer for n32 or n64 abicalls. If
8068 LOADGP_ABSOLUTE then the thunk does not use the gp and there is
8069 no need to load it.*/
8070 if (mips_current_loadgp_style () != LOADGP_ABSOLUTE
8071 || !targetm.binds_local_p (function))
8072 mips_emit_loadgp ();
8074 /* We need two temporary registers in some cases. */
8075 temp1 = gen_rtx_REG (Pmode, 2);
8076 temp2 = gen_rtx_REG (Pmode, 3);
8078 /* Find out which register contains the "this" pointer. */
8079 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8080 this = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
8082 this = gen_rtx_REG (Pmode, GP_ARG_FIRST);
8084 /* Add DELTA to THIS. */
8087 rtx offset = GEN_INT (delta);
8088 if (!SMALL_OPERAND (delta))
8090 emit_move_insn (temp1, offset);
8093 emit_insn (gen_add3_insn (this, this, offset));
8096 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
8097 if (vcall_offset != 0)
8101 /* Set TEMP1 to *THIS. */
8102 emit_move_insn (temp1, gen_rtx_MEM (Pmode, this));
8104 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
8105 addr = mips_add_offset (temp2, temp1, vcall_offset);
8107 /* Load the offset and add it to THIS. */
8108 emit_move_insn (temp1, gen_rtx_MEM (Pmode, addr));
8109 emit_insn (gen_add3_insn (this, this, temp1));
8112 /* Jump to the target function. Use a sibcall if direct jumps are
8113 allowed, otherwise load the address into a register first. */
8114 fnaddr = XEXP (DECL_RTL (function), 0);
8115 if (TARGET_MIPS16 || TARGET_USE_GOT || SYMBOL_REF_LONG_CALL_P (fnaddr))
8117 /* This is messy. gas treats "la $25,foo" as part of a call
8118 sequence and may allow a global "foo" to be lazily bound.
8119 The general move patterns therefore reject this combination.
8121 In this context, lazy binding would actually be OK
8122 for TARGET_CALL_CLOBBERED_GP, but it's still wrong for
8123 TARGET_CALL_SAVED_GP; see mips_load_call_address.
8124 We must therefore load the address via a temporary
8125 register if mips_dangerous_for_la25_p.
8127 If we jump to the temporary register rather than $25, the assembler
8128 can use the move insn to fill the jump's delay slot. */
8129 if (TARGET_USE_PIC_FN_ADDR_REG
8130 && !mips_dangerous_for_la25_p (fnaddr))
8131 temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
8132 mips_load_call_address (temp1, fnaddr, true);
8134 if (TARGET_USE_PIC_FN_ADDR_REG
8135 && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
8136 emit_move_insn (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
8137 emit_jump_insn (gen_indirect_jump (temp1));
8141 insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
8142 SIBLING_CALL_P (insn) = 1;
8145 /* Run just enough of rest_of_compilation. This sequence was
8146 "borrowed" from alpha.c. */
8147 insn = get_insns ();
8148 insn_locators_alloc ();
8149 split_all_insns_noflow ();
8151 mips16_lay_out_constants ();
8152 shorten_branches (insn);
8153 final_start_function (insn, file, 1);
8154 final (insn, file, 1);
8155 final_end_function ();
8157 /* Clean up the vars set above. Note that final_end_function resets
8158 the global pointer for us. */
8159 reload_completed = 0;
8162 /* Returns nonzero if X contains a SYMBOL_REF. */
8165 symbolic_expression_p (rtx x)
8167 if (GET_CODE (x) == SYMBOL_REF)
8170 if (GET_CODE (x) == CONST)
8171 return symbolic_expression_p (XEXP (x, 0));
8174 return symbolic_expression_p (XEXP (x, 0));
8176 if (ARITHMETIC_P (x))
8177 return (symbolic_expression_p (XEXP (x, 0))
8178 || symbolic_expression_p (XEXP (x, 1)));
8183 /* Choose the section to use for the constant rtx expression X that has
8187 mips_select_rtx_section (enum machine_mode mode, rtx x,
8188 unsigned HOST_WIDE_INT align)
8192 /* In mips16 mode, the constant table always goes in the same section
8193 as the function, so that constants can be loaded using PC relative
8195 return function_section (current_function_decl);
8197 else if (TARGET_EMBEDDED_DATA)
8199 /* For embedded applications, always put constants in read-only data,
8200 in order to reduce RAM usage. */
8201 return mergeable_constant_section (mode, align, 0);
8205 /* For hosted applications, always put constants in small data if
8206 possible, as this gives the best performance. */
8207 /* ??? Consider using mergeable small data sections. */
8209 if (GET_MODE_SIZE (mode) <= (unsigned) mips_section_threshold
8210 && mips_section_threshold > 0)
8211 return get_named_section (NULL, ".sdata", 0);
8212 else if (flag_pic && symbolic_expression_p (x))
8213 return get_named_section (NULL, ".data.rel.ro", 3);
8215 return mergeable_constant_section (mode, align, 0);
8219 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
8221 The complication here is that, with the combination TARGET_ABICALLS
8222 && !TARGET_GPWORD, jump tables will use absolute addresses, and should
8223 therefore not be included in the read-only part of a DSO. Handle such
8224 cases by selecting a normal data section instead of a read-only one.
8225 The logic apes that in default_function_rodata_section. */
8228 mips_function_rodata_section (tree decl)
8230 if (!TARGET_ABICALLS || TARGET_GPWORD)
8231 return default_function_rodata_section (decl);
8233 if (decl && DECL_SECTION_NAME (decl))
8235 const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
8236 if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
8238 char *rname = ASTRDUP (name);
8240 return get_section (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
8242 else if (flag_function_sections && flag_data_sections
8243 && strncmp (name, ".text.", 6) == 0)
8245 char *rname = ASTRDUP (name);
8246 memcpy (rname + 1, "data", 4);
8247 return get_section (rname, SECTION_WRITE, decl);
8250 return data_section;
8253 /* Implement TARGET_IN_SMALL_DATA_P. This function controls whether
8254 locally-defined objects go in a small data section. It also controls
8255 the setting of the SYMBOL_REF_SMALL_P flag, which in turn helps
8256 mips_classify_symbol decide when to use %gp_rel(...)($gp) accesses. */
8259 mips_in_small_data_p (tree decl)
8263 if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
8266 /* We don't yet generate small-data references for -mabicalls or
8267 VxWorks RTP code. See the related -G handling in override_options. */
8268 if (TARGET_ABICALLS || TARGET_VXWORKS_RTP)
8271 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
8275 /* Reject anything that isn't in a known small-data section. */
8276 name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
8277 if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
8280 /* If a symbol is defined externally, the assembler will use the
8281 usual -G rules when deciding how to implement macros. */
8282 if (TARGET_EXPLICIT_RELOCS || !DECL_EXTERNAL (decl))
8285 else if (TARGET_EMBEDDED_DATA)
8287 /* Don't put constants into the small data section: we want them
8288 to be in ROM rather than RAM. */
8289 if (TREE_CODE (decl) != VAR_DECL)
8292 if (TREE_READONLY (decl)
8293 && !TREE_SIDE_EFFECTS (decl)
8294 && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
8298 size = int_size_in_bytes (TREE_TYPE (decl));
8299 return (size > 0 && size <= mips_section_threshold);
8302 /* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
8303 anchors for small data: the GP register acts as an anchor in that
8304 case. We also don't want to use them for PC-relative accesses,
8305 where the PC acts as an anchor. */
8308 mips_use_anchors_for_symbol_p (rtx symbol)
8310 switch (mips_classify_symbol (symbol))
8312 case SYMBOL_CONSTANT_POOL:
8313 case SYMBOL_SMALL_DATA:
8321 /* See whether VALTYPE is a record whose fields should be returned in
8322 floating-point registers. If so, return the number of fields and
8323 list them in FIELDS (which should have two elements). Return 0
8326 For n32 & n64, a structure with one or two fields is returned in
8327 floating-point registers as long as every field has a floating-point
8331 mips_fpr_return_fields (tree valtype, tree *fields)
8339 if (TREE_CODE (valtype) != RECORD_TYPE)
8343 for (field = TYPE_FIELDS (valtype); field != 0; field = TREE_CHAIN (field))
8345 if (TREE_CODE (field) != FIELD_DECL)
8348 if (TREE_CODE (TREE_TYPE (field)) != REAL_TYPE)
8354 fields[i++] = field;
8360 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
8361 a value in the most significant part of $2/$3 if:
8363 - the target is big-endian;
8365 - the value has a structure or union type (we generalize this to
8366 cover aggregates from other languages too); and
8368 - the structure is not returned in floating-point registers. */
8371 mips_return_in_msb (tree valtype)
8375 return (TARGET_NEWABI
8376 && TARGET_BIG_ENDIAN
8377 && AGGREGATE_TYPE_P (valtype)
8378 && mips_fpr_return_fields (valtype, fields) == 0);
8382 /* Return a composite value in a pair of floating-point registers.
8383 MODE1 and OFFSET1 are the mode and byte offset for the first value,
8384 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
8387 For n32 & n64, $f0 always holds the first value and $f2 the second.
8388 Otherwise the values are packed together as closely as possible. */
8391 mips_return_fpr_pair (enum machine_mode mode,
8392 enum machine_mode mode1, HOST_WIDE_INT offset1,
8393 enum machine_mode mode2, HOST_WIDE_INT offset2)
8397 inc = (TARGET_NEWABI ? 2 : MAX_FPRS_PER_FMT);
8398 return gen_rtx_PARALLEL
8401 gen_rtx_EXPR_LIST (VOIDmode,
8402 gen_rtx_REG (mode1, FP_RETURN),
8404 gen_rtx_EXPR_LIST (VOIDmode,
8405 gen_rtx_REG (mode2, FP_RETURN + inc),
8406 GEN_INT (offset2))));
8411 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
8412 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
8413 VALTYPE is null and MODE is the mode of the return value. */
8416 mips_function_value (tree valtype, tree func ATTRIBUTE_UNUSED,
8417 enum machine_mode mode)
8424 mode = TYPE_MODE (valtype);
8425 unsignedp = TYPE_UNSIGNED (valtype);
8427 /* Since we define TARGET_PROMOTE_FUNCTION_RETURN that returns
8428 true, we must promote the mode just as PROMOTE_MODE does. */
8429 mode = promote_mode (valtype, mode, &unsignedp, 1);
8431 /* Handle structures whose fields are returned in $f0/$f2. */
8432 switch (mips_fpr_return_fields (valtype, fields))
8435 return gen_rtx_REG (mode, FP_RETURN);
8438 return mips_return_fpr_pair (mode,
8439 TYPE_MODE (TREE_TYPE (fields[0])),
8440 int_byte_position (fields[0]),
8441 TYPE_MODE (TREE_TYPE (fields[1])),
8442 int_byte_position (fields[1]));
8445 /* If a value is passed in the most significant part of a register, see
8446 whether we have to round the mode up to a whole number of words. */
8447 if (mips_return_in_msb (valtype))
8449 HOST_WIDE_INT size = int_size_in_bytes (valtype);
8450 if (size % UNITS_PER_WORD != 0)
8452 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
8453 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
8457 /* For EABI, the class of return register depends entirely on MODE.
8458 For example, "struct { some_type x; }" and "union { some_type x; }"
8459 are returned in the same way as a bare "some_type" would be.
8460 Other ABIs only use FPRs for scalar, complex or vector types. */
8461 if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
8462 return gen_rtx_REG (mode, GP_RETURN);
8467 /* Handle long doubles for n32 & n64. */
8469 return mips_return_fpr_pair (mode,
8471 DImode, GET_MODE_SIZE (mode) / 2);
8473 if (mips_return_mode_in_fpr_p (mode))
8475 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
8476 return mips_return_fpr_pair (mode,
8477 GET_MODE_INNER (mode), 0,
8478 GET_MODE_INNER (mode),
8479 GET_MODE_SIZE (mode) / 2);
8481 return gen_rtx_REG (mode, FP_RETURN);
8485 return gen_rtx_REG (mode, GP_RETURN);
8488 /* Return nonzero when an argument must be passed by reference. */
8491 mips_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
8492 enum machine_mode mode, tree type,
8493 bool named ATTRIBUTE_UNUSED)
8495 if (mips_abi == ABI_EABI)
8499 /* ??? How should SCmode be handled? */
8500 if (mode == DImode || mode == DFmode)
8503 size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
8504 return size == -1 || size > UNITS_PER_WORD;
8508 /* If we have a variable-sized parameter, we have no choice. */
8509 return targetm.calls.must_pass_in_stack (mode, type);
8514 mips_callee_copies (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
8515 enum machine_mode mode ATTRIBUTE_UNUSED,
8516 tree type ATTRIBUTE_UNUSED, bool named)
8518 return mips_abi == ABI_EABI && named;
8521 /* Return true if registers of class CLASS cannot change from mode FROM
8525 mips_cannot_change_mode_class (enum machine_mode from,
8526 enum machine_mode to, enum reg_class class)
8528 if (MIN (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) <= UNITS_PER_WORD
8529 && MAX (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) > UNITS_PER_WORD)
8531 if (TARGET_BIG_ENDIAN)
8533 /* When a multi-word value is stored in paired floating-point
8534 registers, the first register always holds the low word.
8535 We therefore can't allow FPRs to change between single-word
8536 and multi-word modes. */
8537 if (MAX_FPRS_PER_FMT > 1 && reg_classes_intersect_p (FP_REGS, class))
8542 /* gcc assumes that each word of a multiword register can be accessed
8543 individually using SUBREGs. This is not true for floating-point
8544 registers if they are bigger than a word. */
8545 if (UNITS_PER_FPREG > UNITS_PER_WORD
8546 && GET_MODE_SIZE (from) > UNITS_PER_WORD
8547 && GET_MODE_SIZE (to) < UNITS_PER_FPREG
8548 && reg_classes_intersect_p (FP_REGS, class))
8551 /* Loading a 32-bit value into a 64-bit floating-point register
8552 will not sign-extend the value, despite what LOAD_EXTEND_OP says.
8553 We can't allow 64-bit float registers to change from SImode to
8558 && GET_MODE_SIZE (to) >= UNITS_PER_WORD
8559 && reg_classes_intersect_p (FP_REGS, class))
8565 /* Return true if X should not be moved directly into register $25.
8566 We need this because many versions of GAS will treat "la $25,foo" as
8567 part of a call sequence and so allow a global "foo" to be lazily bound. */
8570 mips_dangerous_for_la25_p (rtx x)
8572 return (!TARGET_EXPLICIT_RELOCS
8574 && GET_CODE (x) == SYMBOL_REF
8575 && mips_global_symbol_p (x));
8578 /* Implement PREFERRED_RELOAD_CLASS. */
8581 mips_preferred_reload_class (rtx x, enum reg_class class)
8583 if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, class))
8586 if (TARGET_HARD_FLOAT
8587 && FLOAT_MODE_P (GET_MODE (x))
8588 && reg_class_subset_p (FP_REGS, class))
8591 if (reg_class_subset_p (GR_REGS, class))
8594 if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, class))
8600 /* This function returns the register class required for a secondary
8601 register when copying between one of the registers in CLASS, and X,
8602 using MODE. If IN_P is nonzero, the copy is going from X to the
8603 register, otherwise the register is the source. A return value of
8604 NO_REGS means that no secondary register is required. */
8607 mips_secondary_reload_class (enum reg_class class,
8608 enum machine_mode mode, rtx x, int in_p)
8610 enum reg_class gr_regs = TARGET_MIPS16 ? M16_REGS : GR_REGS;
8614 if (REG_P (x)|| GET_CODE (x) == SUBREG)
8615 regno = true_regnum (x);
8617 gp_reg_p = TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
8619 if (mips_dangerous_for_la25_p (x))
8622 if (TEST_HARD_REG_BIT (reg_class_contents[(int) class], 25))
8626 /* Copying from HI or LO to anywhere other than a general register
8627 requires a general register.
8628 This rule applies to both the original HI/LO pair and the new
8629 DSP accumulators. */
8630 if (reg_class_subset_p (class, ACC_REGS))
8632 if (TARGET_MIPS16 && in_p)
8634 /* We can't really copy to HI or LO at all in mips16 mode. */
8637 return gp_reg_p ? NO_REGS : gr_regs;
8639 if (ACC_REG_P (regno))
8641 if (TARGET_MIPS16 && ! in_p)
8643 /* We can't really copy to HI or LO at all in mips16 mode. */
8646 return class == gr_regs ? NO_REGS : gr_regs;
8649 /* We can only copy a value to a condition code register from a
8650 floating point register, and even then we require a scratch
8651 floating point register. We can only copy a value out of a
8652 condition code register into a general register. */
8653 if (class == ST_REGS)
8657 return gp_reg_p ? NO_REGS : gr_regs;
8659 if (ST_REG_P (regno))
8663 return class == gr_regs ? NO_REGS : gr_regs;
8666 if (class == FP_REGS)
8670 /* In this case we can use lwc1, swc1, ldc1 or sdc1. */
8673 else if (CONSTANT_P (x) && GET_MODE_CLASS (mode) == MODE_FLOAT)
8675 /* We can use the l.s and l.d macros to load floating-point
8676 constants. ??? For l.s, we could probably get better
8677 code by returning GR_REGS here. */
8680 else if (gp_reg_p || x == CONST0_RTX (mode))
8682 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
8685 else if (FP_REG_P (regno))
8687 /* In this case we can use mov.s or mov.d. */
8692 /* Otherwise, we need to reload through an integer register. */
8697 /* In mips16 mode, going between memory and anything but M16_REGS
8698 requires an M16_REG. */
8701 if (class != M16_REGS && class != M16_NA_REGS)
8709 if (class == M16_REGS || class == M16_NA_REGS)
8718 /* Implement CLASS_MAX_NREGS.
8720 - UNITS_PER_FPREG controls the number of registers needed by FP_REGS.
8722 - ST_REGS are always hold CCmode values, and CCmode values are
8723 considered to be 4 bytes wide.
8725 All other register classes are covered by UNITS_PER_WORD. Note that
8726 this is true even for unions of integer and float registers when the
8727 latter are smaller than the former. The only supported combination
8728 in which case this occurs is -mgp64 -msingle-float, which has 64-bit
8729 words but 32-bit float registers. A word-based calculation is correct
8730 in that case since -msingle-float disallows multi-FPR values. */
8733 mips_class_max_nregs (enum reg_class class ATTRIBUTE_UNUSED,
8734 enum machine_mode mode)
8736 if (class == ST_REGS)
8737 return (GET_MODE_SIZE (mode) + 3) / 4;
8738 else if (class == FP_REGS)
8739 return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
8741 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
8745 mips_valid_pointer_mode (enum machine_mode mode)
8747 return (mode == SImode || (TARGET_64BIT && mode == DImode));
8750 /* Target hook for vector_mode_supported_p. */
8753 mips_vector_mode_supported_p (enum machine_mode mode)
8758 return TARGET_PAIRED_SINGLE_FLOAT;
8769 /* If we can access small data directly (using gp-relative relocation
8770 operators) return the small data pointer, otherwise return null.
8772 For each mips16 function which refers to GP relative symbols, we
8773 use a pseudo register, initialized at the start of the function, to
8774 hold the $gp value. */
8777 mips16_gp_pseudo_reg (void)
8779 if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
8780 cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
8782 /* Don't initialize the pseudo register if we are being called from
8783 the tree optimizers' cost-calculation routines. */
8784 if (!cfun->machine->initialized_mips16_gp_pseudo_p
8785 && current_ir_type () != IR_GIMPLE)
8789 /* We want to initialize this to a value which gcc will believe
8791 insn = gen_load_const_gp (cfun->machine->mips16_gp_pseudo_rtx);
8793 push_topmost_sequence ();
8794 /* We need to emit the initialization after the FUNCTION_BEG
8795 note, so that it will be integrated. */
8796 for (scan = get_insns (); scan != NULL_RTX; scan = NEXT_INSN (scan))
8798 && NOTE_KIND (scan) == NOTE_INSN_FUNCTION_BEG)
8800 if (scan == NULL_RTX)
8801 scan = get_insns ();
8802 insn = emit_insn_after (insn, scan);
8803 pop_topmost_sequence ();
8805 cfun->machine->initialized_mips16_gp_pseudo_p = true;
8808 return cfun->machine->mips16_gp_pseudo_rtx;
8811 /* Write out code to move floating point arguments in or out of
8812 general registers. Output the instructions to FILE. FP_CODE is
8813 the code describing which arguments are present (see the comment at
8814 the definition of CUMULATIVE_ARGS in mips.h). FROM_FP_P is nonzero if
8815 we are copying from the floating point registers. */
8818 mips16_fp_args (FILE *file, int fp_code, int from_fp_p)
8823 CUMULATIVE_ARGS cum;
8825 /* This code only works for the original 32-bit ABI and the O64 ABI. */
8826 gcc_assert (TARGET_OLDABI);
8833 init_cumulative_args (&cum, NULL, NULL);
8835 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
8837 enum machine_mode mode;
8838 struct mips_arg_info info;
8842 else if ((f & 3) == 2)
8847 mips_arg_info (&cum, mode, NULL, true, &info);
8848 gparg = mips_arg_regno (&info, false);
8849 fparg = mips_arg_regno (&info, true);
8852 fprintf (file, "\t%s\t%s,%s\n", s,
8853 reg_names[gparg], reg_names[fparg]);
8854 else if (TARGET_64BIT)
8855 fprintf (file, "\td%s\t%s,%s\n", s,
8856 reg_names[gparg], reg_names[fparg]);
8857 else if (ISA_HAS_MXHC1)
8858 /* -mips32r2 -mfp64 */
8859 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n",
8861 reg_names[gparg + (WORDS_BIG_ENDIAN ? 1 : 0)],
8863 from_fp_p ? "mfhc1" : "mthc1",
8864 reg_names[gparg + (WORDS_BIG_ENDIAN ? 0 : 1)],
8866 else if (TARGET_BIG_ENDIAN)
8867 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
8868 reg_names[gparg], reg_names[fparg + 1], s,
8869 reg_names[gparg + 1], reg_names[fparg]);
8871 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
8872 reg_names[gparg], reg_names[fparg], s,
8873 reg_names[gparg + 1], reg_names[fparg + 1]);
8875 function_arg_advance (&cum, mode, NULL, true);
8879 /* Build a mips16 function stub. This is used for functions which
8880 take arguments in the floating point registers. It is 32-bit code
8881 that moves the floating point args into the general registers, and
8882 then jumps to the 16-bit code. */
8885 build_mips16_function_stub (FILE *file)
8888 char *secname, *stubname;
8889 tree stubid, stubdecl;
8893 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
8894 secname = (char *) alloca (strlen (fnname) + 20);
8895 sprintf (secname, ".mips16.fn.%s", fnname);
8896 stubname = (char *) alloca (strlen (fnname) + 20);
8897 sprintf (stubname, "__fn_stub_%s", fnname);
8898 stubid = get_identifier (stubname);
8899 stubdecl = build_decl (FUNCTION_DECL, stubid,
8900 build_function_type (void_type_node, NULL_TREE));
8901 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
8902 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
8904 fprintf (file, "\t# Stub function for %s (", current_function_name ());
8906 for (f = (unsigned int) current_function_args_info.fp_code; f != 0; f >>= 2)
8908 fprintf (file, "%s%s",
8909 need_comma ? ", " : "",
8910 (f & 3) == 1 ? "float" : "double");
8913 fprintf (file, ")\n");
8915 fprintf (file, "\t.set\tnomips16\n");
8916 switch_to_section (function_section (stubdecl));
8917 ASM_OUTPUT_ALIGN (file, floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT));
8919 /* ??? If FUNCTION_NAME_ALREADY_DECLARED is defined, then we are
8920 within a .ent, and we cannot emit another .ent. */
8921 if (!FUNCTION_NAME_ALREADY_DECLARED)
8923 fputs ("\t.ent\t", file);
8924 assemble_name (file, stubname);
8928 assemble_name (file, stubname);
8929 fputs (":\n", file);
8931 /* We don't want the assembler to insert any nops here. */
8932 fprintf (file, "\t.set\tnoreorder\n");
8934 mips16_fp_args (file, current_function_args_info.fp_code, 1);
8936 fprintf (asm_out_file, "\t.set\tnoat\n");
8937 fprintf (asm_out_file, "\tla\t%s,", reg_names[GP_REG_FIRST + 1]);
8938 assemble_name (file, fnname);
8939 fprintf (file, "\n");
8940 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
8941 fprintf (asm_out_file, "\t.set\tat\n");
8943 /* Unfortunately, we can't fill the jump delay slot. We can't fill
8944 with one of the mfc1 instructions, because the result is not
8945 available for one instruction, so if the very first instruction
8946 in the function refers to the register, it will see the wrong
8948 fprintf (file, "\tnop\n");
8950 fprintf (file, "\t.set\treorder\n");
8952 if (!FUNCTION_NAME_ALREADY_DECLARED)
8954 fputs ("\t.end\t", file);
8955 assemble_name (file, stubname);
8959 fprintf (file, "\t.set\tmips16\n");
8961 switch_to_section (function_section (current_function_decl));
8964 /* We keep a list of functions for which we have already built stubs
8965 in build_mips16_call_stub. */
8969 struct mips16_stub *next;
8974 static struct mips16_stub *mips16_stubs;
8976 /* Emit code to return a double value from a mips16 stub. GPREG is the
8977 first GP reg to use, FPREG is the first FP reg to use. */
8980 mips16_fpret_double (int gpreg, int fpreg)
8983 fprintf (asm_out_file, "\tdmfc1\t%s,%s\n",
8984 reg_names[gpreg], reg_names[fpreg]);
8985 else if (TARGET_FLOAT64)
8987 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8988 reg_names[gpreg + WORDS_BIG_ENDIAN],
8990 fprintf (asm_out_file, "\tmfhc1\t%s,%s\n",
8991 reg_names[gpreg + !WORDS_BIG_ENDIAN],
8996 if (TARGET_BIG_ENDIAN)
8998 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8999 reg_names[gpreg + 0],
9000 reg_names[fpreg + 1]);
9001 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9002 reg_names[gpreg + 1],
9003 reg_names[fpreg + 0]);
9007 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9008 reg_names[gpreg + 0],
9009 reg_names[fpreg + 0]);
9010 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9011 reg_names[gpreg + 1],
9012 reg_names[fpreg + 1]);
9017 /* Build a call stub for a mips16 call. A stub is needed if we are
9018 passing any floating point values which should go into the floating
9019 point registers. If we are, and the call turns out to be to a
9020 32-bit function, the stub will be used to move the values into the
9021 floating point registers before calling the 32-bit function. The
9022 linker will magically adjust the function call to either the 16-bit
9023 function or the 32-bit stub, depending upon where the function call
9024 is actually defined.
9026 Similarly, we need a stub if the return value might come back in a
9027 floating point register.
9029 RETVAL is the location of the return value, or null if this is
9030 a call rather than a call_value. FN is the address of the
9031 function and ARG_SIZE is the size of the arguments. FP_CODE
9032 is the code built by function_arg. This function returns a nonzero
9033 value if it builds the call instruction itself. */
9036 build_mips16_call_stub (rtx retval, rtx fn, rtx arg_size, int fp_code)
9040 char *secname, *stubname;
9041 struct mips16_stub *l;
9042 tree stubid, stubdecl;
9046 /* We don't need to do anything if we aren't in mips16 mode, or if
9047 we were invoked with the -msoft-float option. */
9048 if (!mips16_hard_float)
9051 /* Figure out whether the value might come back in a floating point
9054 fpret = mips_return_mode_in_fpr_p (GET_MODE (retval));
9056 /* We don't need to do anything if there were no floating point
9057 arguments and the value will not be returned in a floating point
9059 if (fp_code == 0 && ! fpret)
9062 /* We don't need to do anything if this is a call to a special
9063 mips16 support function. */
9064 if (GET_CODE (fn) == SYMBOL_REF
9065 && strncmp (XSTR (fn, 0), "__mips16_", 9) == 0)
9068 /* This code will only work for o32 and o64 abis. The other ABI's
9069 require more sophisticated support. */
9070 gcc_assert (TARGET_OLDABI);
9072 /* If we're calling via a function pointer, then we must always call
9073 via a stub. There are magic stubs provided in libgcc.a for each
9074 of the required cases. Each of them expects the function address
9075 to arrive in register $2. */
9077 if (GET_CODE (fn) != SYMBOL_REF)
9083 /* ??? If this code is modified to support other ABI's, we need
9084 to handle PARALLEL return values here. */
9087 sprintf (buf, "__mips16_call_stub_%s_%d",
9088 mips16_call_stub_mode_suffix (GET_MODE (retval)),
9091 sprintf (buf, "__mips16_call_stub_%d",
9094 id = get_identifier (buf);
9095 stub_fn = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
9097 emit_move_insn (gen_rtx_REG (Pmode, 2), fn);
9099 if (retval == NULL_RTX)
9100 insn = gen_call_internal (stub_fn, arg_size);
9102 insn = gen_call_value_internal (retval, stub_fn, arg_size);
9103 insn = emit_call_insn (insn);
9105 /* Put the register usage information on the CALL. */
9106 CALL_INSN_FUNCTION_USAGE (insn) =
9107 gen_rtx_EXPR_LIST (VOIDmode,
9108 gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 2)),
9109 CALL_INSN_FUNCTION_USAGE (insn));
9111 /* If we are handling a floating point return value, we need to
9112 save $18 in the function prologue. Putting a note on the
9113 call will mean that df_regs_ever_live_p ($18) will be true if the
9114 call is not eliminated, and we can check that in the prologue
9117 CALL_INSN_FUNCTION_USAGE (insn) =
9118 gen_rtx_EXPR_LIST (VOIDmode,
9119 gen_rtx_USE (VOIDmode,
9120 gen_rtx_REG (word_mode, 18)),
9121 CALL_INSN_FUNCTION_USAGE (insn));
9123 /* Return 1 to tell the caller that we've generated the call
9128 /* We know the function we are going to call. If we have already
9129 built a stub, we don't need to do anything further. */
9131 fnname = XSTR (fn, 0);
9132 for (l = mips16_stubs; l != NULL; l = l->next)
9133 if (strcmp (l->name, fnname) == 0)
9138 /* Build a special purpose stub. When the linker sees a
9139 function call in mips16 code, it will check where the target
9140 is defined. If the target is a 32-bit call, the linker will
9141 search for the section defined here. It can tell which
9142 symbol this section is associated with by looking at the
9143 relocation information (the name is unreliable, since this
9144 might be a static function). If such a section is found, the
9145 linker will redirect the call to the start of the magic
9148 If the function does not return a floating point value, the
9149 special stub section is named
9152 If the function does return a floating point value, the stub
9154 .mips16.call.fp.FNNAME
9157 secname = (char *) alloca (strlen (fnname) + 40);
9158 sprintf (secname, ".mips16.call.%s%s",
9161 stubname = (char *) alloca (strlen (fnname) + 20);
9162 sprintf (stubname, "__call_stub_%s%s",
9165 stubid = get_identifier (stubname);
9166 stubdecl = build_decl (FUNCTION_DECL, stubid,
9167 build_function_type (void_type_node, NULL_TREE));
9168 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
9169 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
9171 fprintf (asm_out_file, "\t# Stub function to call %s%s (",
9173 ? (GET_MODE (retval) == SFmode ? "float " : "double ")
9177 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
9179 fprintf (asm_out_file, "%s%s",
9180 need_comma ? ", " : "",
9181 (f & 3) == 1 ? "float" : "double");
9184 fprintf (asm_out_file, ")\n");
9186 fprintf (asm_out_file, "\t.set\tnomips16\n");
9187 assemble_start_function (stubdecl, stubname);
9189 if (!FUNCTION_NAME_ALREADY_DECLARED)
9191 fputs ("\t.ent\t", asm_out_file);
9192 assemble_name (asm_out_file, stubname);
9193 fputs ("\n", asm_out_file);
9195 assemble_name (asm_out_file, stubname);
9196 fputs (":\n", asm_out_file);
9199 /* We build the stub code by hand. That's the only way we can
9200 do it, since we can't generate 32-bit code during a 16-bit
9203 /* We don't want the assembler to insert any nops here. */
9204 fprintf (asm_out_file, "\t.set\tnoreorder\n");
9206 mips16_fp_args (asm_out_file, fp_code, 0);
9210 fprintf (asm_out_file, "\t.set\tnoat\n");
9211 fprintf (asm_out_file, "\tla\t%s,%s\n", reg_names[GP_REG_FIRST + 1],
9213 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
9214 fprintf (asm_out_file, "\t.set\tat\n");
9215 /* Unfortunately, we can't fill the jump delay slot. We
9216 can't fill with one of the mtc1 instructions, because the
9217 result is not available for one instruction, so if the
9218 very first instruction in the function refers to the
9219 register, it will see the wrong value. */
9220 fprintf (asm_out_file, "\tnop\n");
9224 fprintf (asm_out_file, "\tmove\t%s,%s\n",
9225 reg_names[GP_REG_FIRST + 18], reg_names[GP_REG_FIRST + 31]);
9226 fprintf (asm_out_file, "\tjal\t%s\n", fnname);
9227 /* As above, we can't fill the delay slot. */
9228 fprintf (asm_out_file, "\tnop\n");
9229 if (GET_MODE (retval) == SFmode)
9230 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9231 reg_names[GP_REG_FIRST + 2], reg_names[FP_REG_FIRST + 0]);
9232 else if (GET_MODE (retval) == SCmode)
9234 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9235 reg_names[GP_REG_FIRST + 2],
9236 reg_names[FP_REG_FIRST + 0]);
9237 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9238 reg_names[GP_REG_FIRST + 3],
9239 reg_names[FP_REG_FIRST + MAX_FPRS_PER_FMT]);
9241 else if (GET_MODE (retval) == DFmode
9242 || GET_MODE (retval) == V2SFmode)
9244 mips16_fpret_double (GP_REG_FIRST + 2, FP_REG_FIRST + 0);
9246 else if (GET_MODE (retval) == DCmode)
9248 mips16_fpret_double (GP_REG_FIRST + 2,
9250 mips16_fpret_double (GP_REG_FIRST + 4,
9251 FP_REG_FIRST + MAX_FPRS_PER_FMT);
9255 if (TARGET_BIG_ENDIAN)
9257 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9258 reg_names[GP_REG_FIRST + 2],
9259 reg_names[FP_REG_FIRST + 1]);
9260 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9261 reg_names[GP_REG_FIRST + 3],
9262 reg_names[FP_REG_FIRST + 0]);
9266 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9267 reg_names[GP_REG_FIRST + 2],
9268 reg_names[FP_REG_FIRST + 0]);
9269 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9270 reg_names[GP_REG_FIRST + 3],
9271 reg_names[FP_REG_FIRST + 1]);
9274 fprintf (asm_out_file, "\tj\t%s\n", reg_names[GP_REG_FIRST + 18]);
9275 /* As above, we can't fill the delay slot. */
9276 fprintf (asm_out_file, "\tnop\n");
9279 fprintf (asm_out_file, "\t.set\treorder\n");
9281 #ifdef ASM_DECLARE_FUNCTION_SIZE
9282 ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
9285 if (!FUNCTION_NAME_ALREADY_DECLARED)
9287 fputs ("\t.end\t", asm_out_file);
9288 assemble_name (asm_out_file, stubname);
9289 fputs ("\n", asm_out_file);
9292 fprintf (asm_out_file, "\t.set\tmips16\n");
9294 /* Record this stub. */
9295 l = (struct mips16_stub *) xmalloc (sizeof *l);
9296 l->name = xstrdup (fnname);
9298 l->next = mips16_stubs;
9302 /* If we expect a floating point return value, but we've built a
9303 stub which does not expect one, then we're in trouble. We can't
9304 use the existing stub, because it won't handle the floating point
9305 value. We can't build a new stub, because the linker won't know
9306 which stub to use for the various calls in this object file.
9307 Fortunately, this case is illegal, since it means that a function
9308 was declared in two different ways in a single compilation. */
9309 if (fpret && ! l->fpret)
9310 error ("cannot handle inconsistent calls to %qs", fnname);
9312 /* If we are calling a stub which handles a floating point return
9313 value, we need to arrange to save $18 in the prologue. We do
9314 this by marking the function call as using the register. The
9315 prologue will later see that it is used, and emit code to save
9322 if (retval == NULL_RTX)
9323 insn = gen_call_internal (fn, arg_size);
9325 insn = gen_call_value_internal (retval, fn, arg_size);
9326 insn = emit_call_insn (insn);
9328 CALL_INSN_FUNCTION_USAGE (insn) =
9329 gen_rtx_EXPR_LIST (VOIDmode,
9330 gen_rtx_USE (VOIDmode, gen_rtx_REG (word_mode, 18)),
9331 CALL_INSN_FUNCTION_USAGE (insn));
9333 /* Return 1 to tell the caller that we've generated the call
9338 /* Return 0 to let the caller generate the call insn. */
9342 /* An entry in the mips16 constant pool. VALUE is the pool constant,
9343 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
9345 struct mips16_constant {
9346 struct mips16_constant *next;
9349 enum machine_mode mode;
9352 /* Information about an incomplete mips16 constant pool. FIRST is the
9353 first constant, HIGHEST_ADDRESS is the highest address that the first
9354 byte of the pool can have, and INSN_ADDRESS is the current instruction
9357 struct mips16_constant_pool {
9358 struct mips16_constant *first;
9359 int highest_address;
9363 /* Add constant VALUE to POOL and return its label. MODE is the
9364 value's mode (used for CONST_INTs, etc.). */
9367 add_constant (struct mips16_constant_pool *pool,
9368 rtx value, enum machine_mode mode)
9370 struct mips16_constant **p, *c;
9371 bool first_of_size_p;
9373 /* See whether the constant is already in the pool. If so, return the
9374 existing label, otherwise leave P pointing to the place where the
9375 constant should be added.
9377 Keep the pool sorted in increasing order of mode size so that we can
9378 reduce the number of alignments needed. */
9379 first_of_size_p = true;
9380 for (p = &pool->first; *p != 0; p = &(*p)->next)
9382 if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
9384 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
9386 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
9387 first_of_size_p = false;
9390 /* In the worst case, the constant needed by the earliest instruction
9391 will end up at the end of the pool. The entire pool must then be
9392 accessible from that instruction.
9394 When adding the first constant, set the pool's highest address to
9395 the address of the first out-of-range byte. Adjust this address
9396 downwards each time a new constant is added. */
9397 if (pool->first == 0)
9398 /* For pc-relative lw, addiu and daddiu instructions, the base PC value
9399 is the address of the instruction with the lowest two bits clear.
9400 The base PC value for ld has the lowest three bits clear. Assume
9401 the worst case here. */
9402 pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
9403 pool->highest_address -= GET_MODE_SIZE (mode);
9404 if (first_of_size_p)
9405 /* Take into account the worst possible padding due to alignment. */
9406 pool->highest_address -= GET_MODE_SIZE (mode) - 1;
9408 /* Create a new entry. */
9409 c = (struct mips16_constant *) xmalloc (sizeof *c);
9412 c->label = gen_label_rtx ();
9419 /* Output constant VALUE after instruction INSN and return the last
9420 instruction emitted. MODE is the mode of the constant. */
9423 dump_constants_1 (enum machine_mode mode, rtx value, rtx insn)
9425 switch (GET_MODE_CLASS (mode))
9429 rtx size = GEN_INT (GET_MODE_SIZE (mode));
9430 return emit_insn_after (gen_consttable_int (value, size), insn);
9434 return emit_insn_after (gen_consttable_float (value), insn);
9436 case MODE_VECTOR_FLOAT:
9437 case MODE_VECTOR_INT:
9440 for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
9441 insn = dump_constants_1 (GET_MODE_INNER (mode),
9442 CONST_VECTOR_ELT (value, i), insn);
9452 /* Dump out the constants in CONSTANTS after INSN. */
9455 dump_constants (struct mips16_constant *constants, rtx insn)
9457 struct mips16_constant *c, *next;
9461 for (c = constants; c != NULL; c = next)
9463 /* If necessary, increase the alignment of PC. */
9464 if (align < GET_MODE_SIZE (c->mode))
9466 int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
9467 insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
9469 align = GET_MODE_SIZE (c->mode);
9471 insn = emit_label_after (c->label, insn);
9472 insn = dump_constants_1 (c->mode, c->value, insn);
9478 emit_barrier_after (insn);
9481 /* Return the length of instruction INSN. */
9484 mips16_insn_length (rtx insn)
9488 rtx body = PATTERN (insn);
9489 if (GET_CODE (body) == ADDR_VEC)
9490 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
9491 if (GET_CODE (body) == ADDR_DIFF_VEC)
9492 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
9494 return get_attr_length (insn);
9497 /* Rewrite *X so that constant pool references refer to the constant's
9498 label instead. DATA points to the constant pool structure. */
9501 mips16_rewrite_pool_refs (rtx *x, void *data)
9503 struct mips16_constant_pool *pool = data;
9504 if (GET_CODE (*x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (*x))
9505 *x = gen_rtx_LABEL_REF (Pmode, add_constant (pool,
9506 get_pool_constant (*x),
9507 get_pool_mode (*x)));
9511 /* Build MIPS16 constant pools. */
9514 mips16_lay_out_constants (void)
9516 struct mips16_constant_pool pool;
9520 memset (&pool, 0, sizeof (pool));
9521 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9523 /* Rewrite constant pool references in INSN. */
9525 for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &pool);
9527 pool.insn_address += mips16_insn_length (insn);
9529 if (pool.first != NULL)
9531 /* If there are no natural barriers between the first user of
9532 the pool and the highest acceptable address, we'll need to
9533 create a new instruction to jump around the constant pool.
9534 In the worst case, this instruction will be 4 bytes long.
9536 If it's too late to do this transformation after INSN,
9537 do it immediately before INSN. */
9538 if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
9542 label = gen_label_rtx ();
9544 jump = emit_jump_insn_before (gen_jump (label), insn);
9545 JUMP_LABEL (jump) = label;
9546 LABEL_NUSES (label) = 1;
9547 barrier = emit_barrier_after (jump);
9549 emit_label_after (label, barrier);
9550 pool.insn_address += 4;
9553 /* See whether the constant pool is now out of range of the first
9554 user. If so, output the constants after the previous barrier.
9555 Note that any instructions between BARRIER and INSN (inclusive)
9556 will use negative offsets to refer to the pool. */
9557 if (pool.insn_address > pool.highest_address)
9559 dump_constants (pool.first, barrier);
9563 else if (BARRIER_P (insn))
9567 dump_constants (pool.first, get_last_insn ());
9570 /* A temporary variable used by for_each_rtx callbacks, etc. */
9571 static rtx mips_sim_insn;
9573 /* A structure representing the state of the processor pipeline.
9574 Used by the mips_sim_* family of functions. */
9576 /* The maximum number of instructions that can be issued in a cycle.
9577 (Caches mips_issue_rate.) */
9578 unsigned int issue_rate;
9580 /* The current simulation time. */
9583 /* How many more instructions can be issued in the current cycle. */
9584 unsigned int insns_left;
9586 /* LAST_SET[X].INSN is the last instruction to set register X.
9587 LAST_SET[X].TIME is the time at which that instruction was issued.
9588 INSN is null if no instruction has yet set register X. */
9592 } last_set[FIRST_PSEUDO_REGISTER];
9594 /* The pipeline's current DFA state. */
9598 /* Reset STATE to the initial simulation state. */
9601 mips_sim_reset (struct mips_sim *state)
9604 state->insns_left = state->issue_rate;
9605 memset (&state->last_set, 0, sizeof (state->last_set));
9606 state_reset (state->dfa_state);
9609 /* Initialize STATE before its first use. DFA_STATE points to an
9610 allocated but uninitialized DFA state. */
9613 mips_sim_init (struct mips_sim *state, state_t dfa_state)
9615 state->issue_rate = mips_issue_rate ();
9616 state->dfa_state = dfa_state;
9617 mips_sim_reset (state);
9620 /* Advance STATE by one clock cycle. */
9623 mips_sim_next_cycle (struct mips_sim *state)
9626 state->insns_left = state->issue_rate;
9627 state_transition (state->dfa_state, 0);
9630 /* Advance simulation state STATE until instruction INSN can read
9634 mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
9638 for (i = 0; i < HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)); i++)
9639 if (state->last_set[REGNO (reg) + i].insn != 0)
9643 t = state->last_set[REGNO (reg) + i].time;
9644 t += insn_latency (state->last_set[REGNO (reg) + i].insn, insn);
9645 while (state->time < t)
9646 mips_sim_next_cycle (state);
9650 /* A for_each_rtx callback. If *X is a register, advance simulation state
9651 DATA until mips_sim_insn can read the register's value. */
9654 mips_sim_wait_regs_2 (rtx *x, void *data)
9657 mips_sim_wait_reg (data, mips_sim_insn, *x);
9661 /* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
9664 mips_sim_wait_regs_1 (rtx *x, void *data)
9666 for_each_rtx (x, mips_sim_wait_regs_2, data);
9669 /* Advance simulation state STATE until all of INSN's register
9670 dependencies are satisfied. */
9673 mips_sim_wait_regs (struct mips_sim *state, rtx insn)
9675 mips_sim_insn = insn;
9676 note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
9679 /* Advance simulation state STATE until the units required by
9680 instruction INSN are available. */
9683 mips_sim_wait_units (struct mips_sim *state, rtx insn)
9687 tmp_state = alloca (state_size ());
9688 while (state->insns_left == 0
9689 || (memcpy (tmp_state, state->dfa_state, state_size ()),
9690 state_transition (tmp_state, insn) >= 0))
9691 mips_sim_next_cycle (state);
9694 /* Advance simulation state STATE until INSN is ready to issue. */
9697 mips_sim_wait_insn (struct mips_sim *state, rtx insn)
9699 mips_sim_wait_regs (state, insn);
9700 mips_sim_wait_units (state, insn);
9703 /* mips_sim_insn has just set X. Update the LAST_SET array
9704 in simulation state DATA. */
9707 mips_sim_record_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
9709 struct mips_sim *state;
9714 for (i = 0; i < HARD_REGNO_NREGS (REGNO (x), GET_MODE (x)); i++)
9716 state->last_set[REGNO (x) + i].insn = mips_sim_insn;
9717 state->last_set[REGNO (x) + i].time = state->time;
9721 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
9722 can issue immediately (i.e., that mips_sim_wait_insn has already
9726 mips_sim_issue_insn (struct mips_sim *state, rtx insn)
9728 state_transition (state->dfa_state, insn);
9729 state->insns_left--;
9731 mips_sim_insn = insn;
9732 note_stores (PATTERN (insn), mips_sim_record_set, state);
9735 /* Simulate issuing a NOP in state STATE. */
9738 mips_sim_issue_nop (struct mips_sim *state)
9740 if (state->insns_left == 0)
9741 mips_sim_next_cycle (state);
9742 state->insns_left--;
9745 /* Update simulation state STATE so that it's ready to accept the instruction
9746 after INSN. INSN should be part of the main rtl chain, not a member of a
9750 mips_sim_finish_insn (struct mips_sim *state, rtx insn)
9752 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
9754 mips_sim_issue_nop (state);
9756 switch (GET_CODE (SEQ_BEGIN (insn)))
9760 /* We can't predict the processor state after a call or label. */
9761 mips_sim_reset (state);
9765 /* The delay slots of branch likely instructions are only executed
9766 when the branch is taken. Therefore, if the caller has simulated
9767 the delay slot instruction, STATE does not really reflect the state
9768 of the pipeline for the instruction after the delay slot. Also,
9769 branch likely instructions tend to incur a penalty when not taken,
9770 so there will probably be an extra delay between the branch and
9771 the instruction after the delay slot. */
9772 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
9773 mips_sim_reset (state);
9781 /* The VR4130 pipeline issues aligned pairs of instructions together,
9782 but it stalls the second instruction if it depends on the first.
9783 In order to cut down the amount of logic required, this dependence
9784 check is not based on a full instruction decode. Instead, any non-SPECIAL
9785 instruction is assumed to modify the register specified by bits 20-16
9786 (which is usually the "rt" field).
9788 In beq, beql, bne and bnel instructions, the rt field is actually an
9789 input, so we can end up with a false dependence between the branch
9790 and its delay slot. If this situation occurs in instruction INSN,
9791 try to avoid it by swapping rs and rt. */
9794 vr4130_avoid_branch_rt_conflict (rtx insn)
9798 first = SEQ_BEGIN (insn);
9799 second = SEQ_END (insn);
9801 && NONJUMP_INSN_P (second)
9802 && GET_CODE (PATTERN (first)) == SET
9803 && GET_CODE (SET_DEST (PATTERN (first))) == PC
9804 && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
9806 /* Check for the right kind of condition. */
9807 rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
9808 if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
9809 && REG_P (XEXP (cond, 0))
9810 && REG_P (XEXP (cond, 1))
9811 && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
9812 && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
9814 /* SECOND mentions the rt register but not the rs register. */
9815 rtx tmp = XEXP (cond, 0);
9816 XEXP (cond, 0) = XEXP (cond, 1);
9817 XEXP (cond, 1) = tmp;
9822 /* Implement -mvr4130-align. Go through each basic block and simulate the
9823 processor pipeline. If we find that a pair of instructions could execute
9824 in parallel, and the first of those instruction is not 8-byte aligned,
9825 insert a nop to make it aligned. */
9828 vr4130_align_insns (void)
9830 struct mips_sim state;
9831 rtx insn, subinsn, last, last2, next;
9836 /* LAST is the last instruction before INSN to have a nonzero length.
9837 LAST2 is the last such instruction before LAST. */
9841 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
9844 mips_sim_init (&state, alloca (state_size ()));
9845 for (insn = get_insns (); insn != 0; insn = next)
9847 unsigned int length;
9849 next = NEXT_INSN (insn);
9851 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
9852 This isn't really related to the alignment pass, but we do it on
9853 the fly to avoid a separate instruction walk. */
9854 vr4130_avoid_branch_rt_conflict (insn);
9856 if (USEFUL_INSN_P (insn))
9857 FOR_EACH_SUBINSN (subinsn, insn)
9859 mips_sim_wait_insn (&state, subinsn);
9861 /* If we want this instruction to issue in parallel with the
9862 previous one, make sure that the previous instruction is
9863 aligned. There are several reasons why this isn't worthwhile
9864 when the second instruction is a call:
9866 - Calls are less likely to be performance critical,
9867 - There's a good chance that the delay slot can execute
9868 in parallel with the call.
9869 - The return address would then be unaligned.
9871 In general, if we're going to insert a nop between instructions
9872 X and Y, it's better to insert it immediately after X. That
9873 way, if the nop makes Y aligned, it will also align any labels
9875 if (state.insns_left != state.issue_rate
9876 && !CALL_P (subinsn))
9878 if (subinsn == SEQ_BEGIN (insn) && aligned_p)
9880 /* SUBINSN is the first instruction in INSN and INSN is
9881 aligned. We want to align the previous instruction
9882 instead, so insert a nop between LAST2 and LAST.
9884 Note that LAST could be either a single instruction
9885 or a branch with a delay slot. In the latter case,
9886 LAST, like INSN, is already aligned, but the delay
9887 slot must have some extra delay that stops it from
9888 issuing at the same time as the branch. We therefore
9889 insert a nop before the branch in order to align its
9891 emit_insn_after (gen_nop (), last2);
9894 else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
9896 /* SUBINSN is the delay slot of INSN, but INSN is
9897 currently unaligned. Insert a nop between
9898 LAST and INSN to align it. */
9899 emit_insn_after (gen_nop (), last);
9903 mips_sim_issue_insn (&state, subinsn);
9905 mips_sim_finish_insn (&state, insn);
9907 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
9908 length = get_attr_length (insn);
9911 /* If the instruction is an asm statement or multi-instruction
9912 mips.md patern, the length is only an estimate. Insert an
9913 8 byte alignment after it so that the following instructions
9914 can be handled correctly. */
9915 if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
9916 && (recog_memoized (insn) < 0 || length >= 8))
9918 next = emit_insn_after (gen_align (GEN_INT (3)), insn);
9919 next = NEXT_INSN (next);
9920 mips_sim_next_cycle (&state);
9923 else if (length & 4)
9924 aligned_p = !aligned_p;
9929 /* See whether INSN is an aligned label. */
9930 if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
9936 /* Subroutine of mips_reorg. If there is a hazard between INSN
9937 and a previous instruction, avoid it by inserting nops after
9940 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
9941 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
9942 before using the value of that register. *HILO_DELAY counts the
9943 number of instructions since the last hilo hazard (that is,
9944 the number of instructions since the last mflo or mfhi).
9946 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
9947 for the next instruction.
9949 LO_REG is an rtx for the LO register, used in dependence checking. */
9952 mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
9953 rtx *delayed_reg, rtx lo_reg)
9961 pattern = PATTERN (insn);
9963 /* Do not put the whole function in .set noreorder if it contains
9964 an asm statement. We don't know whether there will be hazards
9965 between the asm statement and the gcc-generated code. */
9966 if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
9967 cfun->machine->all_noreorder_p = false;
9969 /* Ignore zero-length instructions (barriers and the like). */
9970 ninsns = get_attr_length (insn) / 4;
9974 /* Work out how many nops are needed. Note that we only care about
9975 registers that are explicitly mentioned in the instruction's pattern.
9976 It doesn't matter that calls use the argument registers or that they
9977 clobber hi and lo. */
9978 if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
9979 nops = 2 - *hilo_delay;
9980 else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
9985 /* Insert the nops between this instruction and the previous one.
9986 Each new nop takes us further from the last hilo hazard. */
9987 *hilo_delay += nops;
9989 emit_insn_after (gen_hazard_nop (), after);
9991 /* Set up the state for the next instruction. */
9992 *hilo_delay += ninsns;
9994 if (INSN_CODE (insn) >= 0)
9995 switch (get_attr_hazard (insn))
10005 set = single_set (insn);
10006 gcc_assert (set != 0);
10007 *delayed_reg = SET_DEST (set);
10013 /* Go through the instruction stream and insert nops where necessary.
10014 See if the whole function can then be put into .set noreorder &
10018 mips_avoid_hazards (void)
10020 rtx insn, last_insn, lo_reg, delayed_reg;
10023 /* Force all instructions to be split into their final form. */
10024 split_all_insns_noflow ();
10026 /* Recalculate instruction lengths without taking nops into account. */
10027 cfun->machine->ignore_hazard_length_p = true;
10028 shorten_branches (get_insns ());
10030 cfun->machine->all_noreorder_p = true;
10032 /* Profiled functions can't be all noreorder because the profiler
10033 support uses assembler macros. */
10034 if (current_function_profile)
10035 cfun->machine->all_noreorder_p = false;
10037 /* Code compiled with -mfix-vr4120 can't be all noreorder because
10038 we rely on the assembler to work around some errata. */
10039 if (TARGET_FIX_VR4120)
10040 cfun->machine->all_noreorder_p = false;
10042 /* The same is true for -mfix-vr4130 if we might generate mflo or
10043 mfhi instructions. Note that we avoid using mflo and mfhi if
10044 the VR4130 macc and dmacc instructions are available instead;
10045 see the *mfhilo_{si,di}_macc patterns. */
10046 if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
10047 cfun->machine->all_noreorder_p = false;
10052 lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
10054 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
10057 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
10058 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
10059 mips_avoid_hazard (last_insn, XVECEXP (PATTERN (insn), 0, i),
10060 &hilo_delay, &delayed_reg, lo_reg);
10062 mips_avoid_hazard (last_insn, insn, &hilo_delay,
10063 &delayed_reg, lo_reg);
10070 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
10076 mips16_lay_out_constants ();
10077 else if (TARGET_EXPLICIT_RELOCS)
10079 if (mips_flag_delayed_branch)
10080 dbr_schedule (get_insns ());
10081 mips_avoid_hazards ();
10082 if (TUNE_MIPS4130 && TARGET_VR4130_ALIGN)
10083 vr4130_align_insns ();
10087 /* This function does three things:
10089 - Register the special divsi3 and modsi3 functions if -mfix-vr4120.
10090 - Register the mips16 hardware floating point stubs.
10091 - Register the gofast functions if selected using --enable-gofast. */
10093 #include "config/gofast.h"
10096 mips_init_libfuncs (void)
10098 if (TARGET_FIX_VR4120)
10100 set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
10101 set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
10104 if (mips16_hard_float)
10106 set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
10107 set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
10108 set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
10109 set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
10111 set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
10112 set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
10113 set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
10114 set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
10115 set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
10116 set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
10117 set_optab_libfunc (unord_optab, SFmode, "__mips16_unordsf2");
10119 set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
10120 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
10121 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__mips16_floatunsisf");
10123 if (TARGET_DOUBLE_FLOAT)
10125 set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
10126 set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
10127 set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
10128 set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
10130 set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
10131 set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
10132 set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
10133 set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
10134 set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
10135 set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
10136 set_optab_libfunc (unord_optab, DFmode, "__mips16_unorddf2");
10138 set_conv_libfunc (sext_optab, DFmode, SFmode, "__mips16_extendsfdf2");
10139 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__mips16_truncdfsf2");
10141 set_conv_libfunc (sfix_optab, SImode, DFmode, "__mips16_fix_truncdfsi");
10142 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__mips16_floatsidf");
10143 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__mips16_floatunsidf");
10147 gofast_maybe_init_libfuncs ();
10150 /* Return a number assessing the cost of moving a register in class
10151 FROM to class TO. The classes are expressed using the enumeration
10152 values such as `GENERAL_REGS'. A value of 2 is the default; other
10153 values are interpreted relative to that.
10155 It is not required that the cost always equal 2 when FROM is the
10156 same as TO; on some machines it is expensive to move between
10157 registers if they are not general registers.
10159 If reload sees an insn consisting of a single `set' between two
10160 hard registers, and if `REGISTER_MOVE_COST' applied to their
10161 classes returns a value of 2, reload does not check to ensure that
10162 the constraints of the insn are met. Setting a cost of other than
10163 2 will allow reload to verify that the constraints are met. You
10164 should do this if the `movM' pattern's constraints do not allow
10167 ??? We make the cost of moving from HI/LO into general
10168 registers the same as for one of moving general registers to
10169 HI/LO for TARGET_MIPS16 in order to prevent allocating a
10170 pseudo to HI/LO. This might hurt optimizations though, it
10171 isn't clear if it is wise. And it might not work in all cases. We
10172 could solve the DImode LO reg problem by using a multiply, just
10173 like reload_{in,out}si. We could solve the SImode/HImode HI reg
10174 problem by using divide instructions. divu puts the remainder in
10175 the HI reg, so doing a divide by -1 will move the value in the HI
10176 reg for all values except -1. We could handle that case by using a
10177 signed divide, e.g. -1 / 2 (or maybe 1 / -2?). We'd have to emit
10178 a compare/branch to test the input value to see which instruction
10179 we need to use. This gets pretty messy, but it is feasible. */
10182 mips_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
10183 enum reg_class to, enum reg_class from)
10185 if (from == M16_REGS && reg_class_subset_p (to, GENERAL_REGS))
10187 else if (from == M16_NA_REGS && reg_class_subset_p (to, GENERAL_REGS))
10189 else if (reg_class_subset_p (from, GENERAL_REGS))
10191 if (to == M16_REGS)
10193 else if (to == M16_NA_REGS)
10195 else if (reg_class_subset_p (to, GENERAL_REGS))
10202 else if (to == FP_REGS)
10204 else if (reg_class_subset_p (to, ACC_REGS))
10211 else if (reg_class_subset_p (to, ALL_COP_REGS))
10216 else if (from == FP_REGS)
10218 if (reg_class_subset_p (to, GENERAL_REGS))
10220 else if (to == FP_REGS)
10222 else if (to == ST_REGS)
10225 else if (reg_class_subset_p (from, ACC_REGS))
10227 if (reg_class_subset_p (to, GENERAL_REGS))
10235 else if (from == ST_REGS && reg_class_subset_p (to, GENERAL_REGS))
10237 else if (reg_class_subset_p (from, ALL_COP_REGS))
10243 ??? What cases are these? Shouldn't we return 2 here? */
10248 /* Return the length of INSN. LENGTH is the initial length computed by
10249 attributes in the machine-description file. */
10252 mips_adjust_insn_length (rtx insn, int length)
10254 /* A unconditional jump has an unfilled delay slot if it is not part
10255 of a sequence. A conditional jump normally has a delay slot, but
10256 does not on MIPS16. */
10257 if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
10260 /* See how many nops might be needed to avoid hardware hazards. */
10261 if (!cfun->machine->ignore_hazard_length_p && INSN_CODE (insn) >= 0)
10262 switch (get_attr_hazard (insn))
10276 /* All MIPS16 instructions are a measly two bytes. */
10284 /* Return an asm sequence to start a noat block and load the address
10285 of a label into $1. */
10288 mips_output_load_label (void)
10290 if (TARGET_EXPLICIT_RELOCS)
10294 return "%[lw\t%@,%%got_page(%0)(%+)\n\taddiu\t%@,%@,%%got_ofst(%0)";
10297 return "%[ld\t%@,%%got_page(%0)(%+)\n\tdaddiu\t%@,%@,%%got_ofst(%0)";
10300 if (ISA_HAS_LOAD_DELAY)
10301 return "%[lw\t%@,%%got(%0)(%+)%#\n\taddiu\t%@,%@,%%lo(%0)";
10302 return "%[lw\t%@,%%got(%0)(%+)\n\taddiu\t%@,%@,%%lo(%0)";
10306 if (Pmode == DImode)
10307 return "%[dla\t%@,%0";
10309 return "%[la\t%@,%0";
10313 /* Return the assembly code for INSN, which has the operands given by
10314 OPERANDS, and which branches to OPERANDS[1] if some condition is true.
10315 BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[1]
10316 is in range of a direct branch. BRANCH_IF_FALSE is an inverted
10317 version of BRANCH_IF_TRUE. */
10320 mips_output_conditional_branch (rtx insn, rtx *operands,
10321 const char *branch_if_true,
10322 const char *branch_if_false)
10324 unsigned int length;
10325 rtx taken, not_taken;
10327 length = get_attr_length (insn);
10330 /* Just a simple conditional branch. */
10331 mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
10332 return branch_if_true;
10335 /* Generate a reversed branch around a direct jump. This fallback does
10336 not use branch-likely instructions. */
10337 mips_branch_likely = false;
10338 not_taken = gen_label_rtx ();
10339 taken = operands[1];
10341 /* Generate the reversed branch to NOT_TAKEN. */
10342 operands[1] = not_taken;
10343 output_asm_insn (branch_if_false, operands);
10345 /* If INSN has a delay slot, we must provide delay slots for both the
10346 branch to NOT_TAKEN and the conditional jump. We must also ensure
10347 that INSN's delay slot is executed in the appropriate cases. */
10348 if (final_sequence)
10350 /* This first delay slot will always be executed, so use INSN's
10351 delay slot if is not annulled. */
10352 if (!INSN_ANNULLED_BRANCH_P (insn))
10354 final_scan_insn (XVECEXP (final_sequence, 0, 1),
10355 asm_out_file, optimize, 1, NULL);
10356 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
10359 output_asm_insn ("nop", 0);
10360 fprintf (asm_out_file, "\n");
10363 /* Output the unconditional branch to TAKEN. */
10365 output_asm_insn ("j\t%0%/", &taken);
10368 output_asm_insn (mips_output_load_label (), &taken);
10369 output_asm_insn ("jr\t%@%]%/", 0);
10372 /* Now deal with its delay slot; see above. */
10373 if (final_sequence)
10375 /* This delay slot will only be executed if the branch is taken.
10376 Use INSN's delay slot if is annulled. */
10377 if (INSN_ANNULLED_BRANCH_P (insn))
10379 final_scan_insn (XVECEXP (final_sequence, 0, 1),
10380 asm_out_file, optimize, 1, NULL);
10381 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
10384 output_asm_insn ("nop", 0);
10385 fprintf (asm_out_file, "\n");
10388 /* Output NOT_TAKEN. */
10389 (*targetm.asm_out.internal_label) (asm_out_file, "L",
10390 CODE_LABEL_NUMBER (not_taken));
10394 /* Return the assembly code for INSN, which branches to OPERANDS[1]
10395 if some ordered condition is true. The condition is given by
10396 OPERANDS[0] if !INVERTED_P, otherwise it is the inverse of
10397 OPERANDS[0]. OPERANDS[2] is the comparison's first operand;
10398 its second is always zero. */
10401 mips_output_order_conditional_branch (rtx insn, rtx *operands, bool inverted_p)
10403 const char *branch[2];
10405 /* Make BRANCH[1] branch to OPERANDS[1] when the condition is true.
10406 Make BRANCH[0] branch on the inverse condition. */
10407 switch (GET_CODE (operands[0]))
10409 /* These cases are equivalent to comparisons against zero. */
10411 inverted_p = !inverted_p;
10412 /* Fall through. */
10414 branch[!inverted_p] = MIPS_BRANCH ("bne", "%2,%.,%1");
10415 branch[inverted_p] = MIPS_BRANCH ("beq", "%2,%.,%1");
10418 /* These cases are always true or always false. */
10420 inverted_p = !inverted_p;
10421 /* Fall through. */
10423 branch[!inverted_p] = MIPS_BRANCH ("beq", "%.,%.,%1");
10424 branch[inverted_p] = MIPS_BRANCH ("bne", "%.,%.,%1");
10428 branch[!inverted_p] = MIPS_BRANCH ("b%C0z", "%2,%1");
10429 branch[inverted_p] = MIPS_BRANCH ("b%N0z", "%2,%1");
10432 return mips_output_conditional_branch (insn, operands, branch[1], branch[0]);
10435 /* Used to output div or ddiv instruction DIVISION, which has the operands
10436 given by OPERANDS. Add in a divide-by-zero check if needed.
10438 When working around R4000 and R4400 errata, we need to make sure that
10439 the division is not immediately followed by a shift[1][2]. We also
10440 need to stop the division from being put into a branch delay slot[3].
10441 The easiest way to avoid both problems is to add a nop after the
10442 division. When a divide-by-zero check is needed, this nop can be
10443 used to fill the branch delay slot.
10445 [1] If a double-word or a variable shift executes immediately
10446 after starting an integer division, the shift may give an
10447 incorrect result. See quotations of errata #16 and #28 from
10448 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
10449 in mips.md for details.
10451 [2] A similar bug to [1] exists for all revisions of the
10452 R4000 and the R4400 when run in an MC configuration.
10453 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
10455 "19. In this following sequence:
10457 ddiv (or ddivu or div or divu)
10458 dsll32 (or dsrl32, dsra32)
10460 if an MPT stall occurs, while the divide is slipping the cpu
10461 pipeline, then the following double shift would end up with an
10464 Workaround: The compiler needs to avoid generating any
10465 sequence with divide followed by extended double shift."
10467 This erratum is also present in "MIPS R4400MC Errata, Processor
10468 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
10469 & 3.0" as errata #10 and #4, respectively.
10471 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
10472 (also valid for MIPS R4000MC processors):
10474 "52. R4000SC: This bug does not apply for the R4000PC.
10476 There are two flavors of this bug:
10478 1) If the instruction just after divide takes an RF exception
10479 (tlb-refill, tlb-invalid) and gets an instruction cache
10480 miss (both primary and secondary) and the line which is
10481 currently in secondary cache at this index had the first
10482 data word, where the bits 5..2 are set, then R4000 would
10483 get a wrong result for the div.
10488 ------------------- # end-of page. -tlb-refill
10493 ------------------- # end-of page. -tlb-invalid
10496 2) If the divide is in the taken branch delay slot, where the
10497 target takes RF exception and gets an I-cache miss for the
10498 exception vector or where I-cache miss occurs for the
10499 target address, under the above mentioned scenarios, the
10500 div would get wrong results.
10503 j r2 # to next page mapped or unmapped
10504 div r8,r9 # this bug would be there as long
10505 # as there is an ICache miss and
10506 nop # the "data pattern" is present
10509 beq r0, r0, NextPage # to Next page
10513 This bug is present for div, divu, ddiv, and ddivu
10516 Workaround: For item 1), OS could make sure that the next page
10517 after the divide instruction is also mapped. For item 2), the
10518 compiler could make sure that the divide instruction is not in
10519 the branch delay slot."
10521 These processors have PRId values of 0x00004220 and 0x00004300 for
10522 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
10525 mips_output_division (const char *division, rtx *operands)
10530 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
10532 output_asm_insn (s, operands);
10535 if (TARGET_CHECK_ZERO_DIV)
10539 output_asm_insn (s, operands);
10540 s = "bnez\t%2,1f\n\tbreak\t7\n1:";
10542 else if (GENERATE_DIVIDE_TRAPS)
10544 output_asm_insn (s, operands);
10545 s = "teq\t%2,%.,7";
10549 output_asm_insn ("%(bne\t%2,%.,1f", operands);
10550 output_asm_insn (s, operands);
10551 s = "break\t7%)\n1:";
10557 /* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
10558 with a final "000" replaced by "k". Ignore case.
10560 Note: this function is shared between GCC and GAS. */
10563 mips_strict_matching_cpu_name_p (const char *canonical, const char *given)
10565 while (*given != 0 && TOLOWER (*given) == TOLOWER (*canonical))
10566 given++, canonical++;
10568 return ((*given == 0 && *canonical == 0)
10569 || (strcmp (canonical, "000") == 0 && strcasecmp (given, "k") == 0));
10573 /* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
10574 CPU name. We've traditionally allowed a lot of variation here.
10576 Note: this function is shared between GCC and GAS. */
10579 mips_matching_cpu_name_p (const char *canonical, const char *given)
10581 /* First see if the name matches exactly, or with a final "000"
10582 turned into "k". */
10583 if (mips_strict_matching_cpu_name_p (canonical, given))
10586 /* If not, try comparing based on numerical designation alone.
10587 See if GIVEN is an unadorned number, or 'r' followed by a number. */
10588 if (TOLOWER (*given) == 'r')
10590 if (!ISDIGIT (*given))
10593 /* Skip over some well-known prefixes in the canonical name,
10594 hoping to find a number there too. */
10595 if (TOLOWER (canonical[0]) == 'v' && TOLOWER (canonical[1]) == 'r')
10597 else if (TOLOWER (canonical[0]) == 'r' && TOLOWER (canonical[1]) == 'm')
10599 else if (TOLOWER (canonical[0]) == 'r')
10602 return mips_strict_matching_cpu_name_p (canonical, given);
10606 /* Return the mips_cpu_info entry for the processor or ISA given
10607 by CPU_STRING. Return null if the string isn't recognized.
10609 A similar function exists in GAS. */
10611 static const struct mips_cpu_info *
10612 mips_parse_cpu (const char *cpu_string)
10614 const struct mips_cpu_info *p;
10617 /* In the past, we allowed upper-case CPU names, but it doesn't
10618 work well with the multilib machinery. */
10619 for (s = cpu_string; *s != 0; s++)
10622 warning (0, "the cpu name must be lower case");
10626 /* 'from-abi' selects the most compatible architecture for the given
10627 ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
10628 EABIs, we have to decide whether we're using the 32-bit or 64-bit
10629 version. Look first at the -mgp options, if given, otherwise base
10630 the choice on MASK_64BIT in TARGET_DEFAULT. */
10631 if (strcasecmp (cpu_string, "from-abi") == 0)
10632 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
10633 : ABI_NEEDS_64BIT_REGS ? 3
10634 : (TARGET_64BIT ? 3 : 1));
10636 /* 'default' has traditionally been a no-op. Probably not very useful. */
10637 if (strcasecmp (cpu_string, "default") == 0)
10640 for (p = mips_cpu_info_table; p->name != 0; p++)
10641 if (mips_matching_cpu_name_p (p->name, cpu_string))
10648 /* Return the processor associated with the given ISA level, or null
10649 if the ISA isn't valid. */
10651 static const struct mips_cpu_info *
10652 mips_cpu_info_from_isa (int isa)
10654 const struct mips_cpu_info *p;
10656 for (p = mips_cpu_info_table; p->name != 0; p++)
10663 /* Implement HARD_REGNO_NREGS. The size of FP registers is controlled
10664 by UNITS_PER_FPREG. The size of FP status registers is always 4, because
10665 they only hold condition code modes, and CCmode is always considered to
10666 be 4 bytes wide. All other registers are word sized. */
10669 mips_hard_regno_nregs (int regno, enum machine_mode mode)
10671 if (ST_REG_P (regno))
10672 return ((GET_MODE_SIZE (mode) + 3) / 4);
10673 else if (! FP_REG_P (regno))
10674 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
10676 return ((GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG);
10679 /* Implement TARGET_RETURN_IN_MEMORY. Under the old (i.e., 32 and O64 ABIs)
10680 all BLKmode objects are returned in memory. Under the new (N32 and
10681 64-bit MIPS ABIs) small structures are returned in a register.
10682 Objects with varying size must still be returned in memory, of
10686 mips_return_in_memory (tree type, tree fndecl ATTRIBUTE_UNUSED)
10689 return (TYPE_MODE (type) == BLKmode);
10691 return ((int_size_in_bytes (type) > (2 * UNITS_PER_WORD))
10692 || (int_size_in_bytes (type) == -1));
10696 mips_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
10698 return !TARGET_OLDABI;
10701 /* Return true if INSN is a multiply-add or multiply-subtract
10702 instruction and PREV assigns to the accumulator operand. */
10705 mips_linked_madd_p (rtx prev, rtx insn)
10709 x = single_set (insn);
10715 if (GET_CODE (x) == PLUS
10716 && GET_CODE (XEXP (x, 0)) == MULT
10717 && reg_set_p (XEXP (x, 1), prev))
10720 if (GET_CODE (x) == MINUS
10721 && GET_CODE (XEXP (x, 1)) == MULT
10722 && reg_set_p (XEXP (x, 0), prev))
10728 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
10729 that may clobber hi or lo. */
10731 static rtx mips_macc_chains_last_hilo;
10733 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
10734 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
10737 mips_macc_chains_record (rtx insn)
10739 if (get_attr_may_clobber_hilo (insn))
10740 mips_macc_chains_last_hilo = insn;
10743 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
10744 has NREADY elements, looking for a multiply-add or multiply-subtract
10745 instruction that is cumulative with mips_macc_chains_last_hilo.
10746 If there is one, promote it ahead of anything else that might
10747 clobber hi or lo. */
10750 mips_macc_chains_reorder (rtx *ready, int nready)
10754 if (mips_macc_chains_last_hilo != 0)
10755 for (i = nready - 1; i >= 0; i--)
10756 if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
10758 for (j = nready - 1; j > i; j--)
10759 if (recog_memoized (ready[j]) >= 0
10760 && get_attr_may_clobber_hilo (ready[j]))
10762 mips_promote_ready (ready, i, j);
10769 /* The last instruction to be scheduled. */
10771 static rtx vr4130_last_insn;
10773 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
10774 points to an rtx that is initially an instruction. Nullify the rtx
10775 if the instruction uses the value of register X. */
10778 vr4130_true_reg_dependence_p_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
10780 rtx *insn_ptr = data;
10783 && reg_referenced_p (x, PATTERN (*insn_ptr)))
10787 /* Return true if there is true register dependence between vr4130_last_insn
10791 vr4130_true_reg_dependence_p (rtx insn)
10793 note_stores (PATTERN (vr4130_last_insn),
10794 vr4130_true_reg_dependence_p_1, &insn);
10798 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
10799 the ready queue and that INSN2 is the instruction after it, return
10800 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
10801 in which INSN1 and INSN2 can probably issue in parallel, but for
10802 which (INSN2, INSN1) should be less sensitive to instruction
10803 alignment than (INSN1, INSN2). See 4130.md for more details. */
10806 vr4130_swap_insns_p (rtx insn1, rtx insn2)
10810 /* Check for the following case:
10812 1) there is some other instruction X with an anti dependence on INSN1;
10813 2) X has a higher priority than INSN2; and
10814 3) X is an arithmetic instruction (and thus has no unit restrictions).
10816 If INSN1 is the last instruction blocking X, it would better to
10817 choose (INSN1, X) over (INSN2, INSN1). */
10818 FOR_EACH_DEP_LINK (dep, INSN_FORW_DEPS (insn1))
10819 if (DEP_LINK_KIND (dep) == REG_DEP_ANTI
10820 && INSN_PRIORITY (DEP_LINK_CON (dep)) > INSN_PRIORITY (insn2)
10821 && recog_memoized (DEP_LINK_CON (dep)) >= 0
10822 && get_attr_vr4130_class (DEP_LINK_CON (dep)) == VR4130_CLASS_ALU)
10825 if (vr4130_last_insn != 0
10826 && recog_memoized (insn1) >= 0
10827 && recog_memoized (insn2) >= 0)
10829 /* See whether INSN1 and INSN2 use different execution units,
10830 or if they are both ALU-type instructions. If so, they can
10831 probably execute in parallel. */
10832 enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
10833 enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
10834 if (class1 != class2 || class1 == VR4130_CLASS_ALU)
10836 /* If only one of the instructions has a dependence on
10837 vr4130_last_insn, prefer to schedule the other one first. */
10838 bool dep1 = vr4130_true_reg_dependence_p (insn1);
10839 bool dep2 = vr4130_true_reg_dependence_p (insn2);
10843 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
10844 is not an ALU-type instruction and if INSN1 uses the same
10845 execution unit. (Note that if this condition holds, we already
10846 know that INSN2 uses a different execution unit.) */
10847 if (class1 != VR4130_CLASS_ALU
10848 && recog_memoized (vr4130_last_insn) >= 0
10849 && class1 == get_attr_vr4130_class (vr4130_last_insn))
10856 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
10857 queue with at least two instructions. Swap the first two if
10858 vr4130_swap_insns_p says that it could be worthwhile. */
10861 vr4130_reorder (rtx *ready, int nready)
10863 if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
10864 mips_promote_ready (ready, nready - 2, nready - 1);
10867 /* Remove the instruction at index LOWER from ready queue READY and
10868 reinsert it in front of the instruction at index HIGHER. LOWER must
10872 mips_promote_ready (rtx *ready, int lower, int higher)
10877 new_head = ready[lower];
10878 for (i = lower; i < higher; i++)
10879 ready[i] = ready[i + 1];
10880 ready[i] = new_head;
10883 /* Implement TARGET_SCHED_REORDER. */
10886 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
10887 rtx *ready, int *nreadyp, int cycle)
10889 if (!reload_completed && TUNE_MACC_CHAINS)
10892 mips_macc_chains_last_hilo = 0;
10894 mips_macc_chains_reorder (ready, *nreadyp);
10896 if (reload_completed && TUNE_MIPS4130 && !TARGET_VR4130_ALIGN)
10899 vr4130_last_insn = 0;
10901 vr4130_reorder (ready, *nreadyp);
10903 return mips_issue_rate ();
10906 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
10909 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
10910 rtx insn, int more)
10912 switch (GET_CODE (PATTERN (insn)))
10916 /* Don't count USEs and CLOBBERs against the issue rate. */
10921 if (!reload_completed && TUNE_MACC_CHAINS)
10922 mips_macc_chains_record (insn);
10923 vr4130_last_insn = insn;
10929 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
10930 dependencies have no cost, except on the 20Kc where output-dependence
10931 is treated like input-dependence. */
10934 mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
10935 rtx dep ATTRIBUTE_UNUSED, int cost)
10937 if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
10940 if (REG_NOTE_KIND (link) != 0)
10945 /* Return the number of instructions that can be issued per cycle. */
10948 mips_issue_rate (void)
10952 case PROCESSOR_74KC:
10953 case PROCESSOR_74KF2_1:
10954 case PROCESSOR_74KF1_1:
10955 case PROCESSOR_74KF3_2:
10956 /* The 74k is not strictly quad-issue cpu, but can be seen as one
10957 by the scheduler. It can issue 1 ALU, 1 AGEN and 2 FPU insns,
10958 but in reality only a maximum of 3 insns can be issued as the
10959 floating point load/stores also require a slot in the AGEN pipe. */
10962 case PROCESSOR_20KC:
10963 case PROCESSOR_R4130:
10964 case PROCESSOR_R5400:
10965 case PROCESSOR_R5500:
10966 case PROCESSOR_R7000:
10967 case PROCESSOR_R9000:
10970 case PROCESSOR_SB1:
10971 case PROCESSOR_SB1A:
10972 /* This is actually 4, but we get better performance if we claim 3.
10973 This is partly because of unwanted speculative code motion with the
10974 larger number, and partly because in most common cases we can't
10975 reach the theoretical max of 4. */
10983 /* Implements TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
10984 be as wide as the scheduling freedom in the DFA. */
10987 mips_multipass_dfa_lookahead (void)
10989 /* Can schedule up to 4 of the 6 function units in any one cycle. */
10996 /* Implements a store data bypass check. We need this because the cprestore
10997 pattern is type store, but defined using an UNSPEC. This UNSPEC causes the
10998 default routine to abort. We just return false for that case. */
10999 /* ??? Should try to give a better result here than assuming false. */
11002 mips_store_data_bypass_p (rtx out_insn, rtx in_insn)
11004 if (GET_CODE (PATTERN (in_insn)) == UNSPEC_VOLATILE)
11007 return ! store_data_bypass_p (out_insn, in_insn);
11010 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
11011 return the first operand of the associated "pref" or "prefx" insn. */
11014 mips_prefetch_cookie (rtx write, rtx locality)
11016 /* store_streamed / load_streamed. */
11017 if (INTVAL (locality) <= 0)
11018 return GEN_INT (INTVAL (write) + 4);
11020 /* store / load. */
11021 if (INTVAL (locality) <= 2)
11024 /* store_retained / load_retained. */
11025 return GEN_INT (INTVAL (write) + 6);
11028 /* MIPS builtin function support. */
11030 struct builtin_description
11032 /* The code of the main .md file instruction. See mips_builtin_type
11033 for more information. */
11034 enum insn_code icode;
11036 /* The floating-point comparison code to use with ICODE, if any. */
11037 enum mips_fp_condition cond;
11039 /* The name of the builtin function. */
11042 /* Specifies how the function should be expanded. */
11043 enum mips_builtin_type builtin_type;
11045 /* The function's prototype. */
11046 enum mips_function_type function_type;
11048 /* The target flags required for this function. */
11052 /* Define a MIPS_BUILTIN_DIRECT function for instruction CODE_FOR_mips_<INSN>.
11053 FUNCTION_TYPE and TARGET_FLAGS are builtin_description fields. */
11054 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
11055 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
11056 MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, TARGET_FLAGS }
11058 /* Define __builtin_mips_<INSN>_<COND>_{s,d}, both of which require
11060 #define CMP_SCALAR_BUILTINS(INSN, COND, TARGET_FLAGS) \
11061 { CODE_FOR_mips_ ## INSN ## _cond_s, MIPS_FP_COND_ ## COND, \
11062 "__builtin_mips_" #INSN "_" #COND "_s", \
11063 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, TARGET_FLAGS }, \
11064 { CODE_FOR_mips_ ## INSN ## _cond_d, MIPS_FP_COND_ ## COND, \
11065 "__builtin_mips_" #INSN "_" #COND "_d", \
11066 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, TARGET_FLAGS }
11068 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
11069 The lower and upper forms require TARGET_FLAGS while the any and all
11070 forms require MASK_MIPS3D. */
11071 #define CMP_PS_BUILTINS(INSN, COND, TARGET_FLAGS) \
11072 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11073 "__builtin_mips_any_" #INSN "_" #COND "_ps", \
11074 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
11075 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11076 "__builtin_mips_all_" #INSN "_" #COND "_ps", \
11077 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
11078 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11079 "__builtin_mips_lower_" #INSN "_" #COND "_ps", \
11080 MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }, \
11081 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11082 "__builtin_mips_upper_" #INSN "_" #COND "_ps", \
11083 MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }
11085 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
11086 require MASK_MIPS3D. */
11087 #define CMP_4S_BUILTINS(INSN, COND) \
11088 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
11089 "__builtin_mips_any_" #INSN "_" #COND "_4s", \
11090 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11092 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
11093 "__builtin_mips_all_" #INSN "_" #COND "_4s", \
11094 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11097 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
11098 instruction requires TARGET_FLAGS. */
11099 #define MOVTF_BUILTINS(INSN, COND, TARGET_FLAGS) \
11100 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11101 "__builtin_mips_movt_" #INSN "_" #COND "_ps", \
11102 MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11104 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11105 "__builtin_mips_movf_" #INSN "_" #COND "_ps", \
11106 MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11109 /* Define all the builtins related to c.cond.fmt condition COND. */
11110 #define CMP_BUILTINS(COND) \
11111 MOVTF_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
11112 MOVTF_BUILTINS (cabs, COND, MASK_MIPS3D), \
11113 CMP_SCALAR_BUILTINS (cabs, COND, MASK_MIPS3D), \
11114 CMP_PS_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
11115 CMP_PS_BUILTINS (cabs, COND, MASK_MIPS3D), \
11116 CMP_4S_BUILTINS (c, COND), \
11117 CMP_4S_BUILTINS (cabs, COND)
11119 static const struct builtin_description mips_bdesc[] =
11121 DIRECT_BUILTIN (pll_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11122 DIRECT_BUILTIN (pul_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11123 DIRECT_BUILTIN (plu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11124 DIRECT_BUILTIN (puu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11125 DIRECT_BUILTIN (cvt_ps_s, MIPS_V2SF_FTYPE_SF_SF, MASK_PAIRED_SINGLE_FLOAT),
11126 DIRECT_BUILTIN (cvt_s_pl, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11127 DIRECT_BUILTIN (cvt_s_pu, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11128 DIRECT_BUILTIN (abs_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11130 DIRECT_BUILTIN (alnv_ps, MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
11131 MASK_PAIRED_SINGLE_FLOAT),
11132 DIRECT_BUILTIN (addr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
11133 DIRECT_BUILTIN (mulr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
11134 DIRECT_BUILTIN (cvt_pw_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
11135 DIRECT_BUILTIN (cvt_ps_pw, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
11137 DIRECT_BUILTIN (recip1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
11138 DIRECT_BUILTIN (recip1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
11139 DIRECT_BUILTIN (recip1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
11140 DIRECT_BUILTIN (recip2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
11141 DIRECT_BUILTIN (recip2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
11142 DIRECT_BUILTIN (recip2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
11144 DIRECT_BUILTIN (rsqrt1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
11145 DIRECT_BUILTIN (rsqrt1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
11146 DIRECT_BUILTIN (rsqrt1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
11147 DIRECT_BUILTIN (rsqrt2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
11148 DIRECT_BUILTIN (rsqrt2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
11149 DIRECT_BUILTIN (rsqrt2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
11151 MIPS_FP_CONDITIONS (CMP_BUILTINS)
11154 /* Builtin functions for the SB-1 processor. */
11156 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
11158 static const struct builtin_description sb1_bdesc[] =
11160 DIRECT_BUILTIN (sqrt_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT)
11163 /* Builtin functions for DSP ASE. */
11165 #define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
11166 #define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
11167 #define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
11168 #define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
11169 #define CODE_FOR_mips_mul_ph CODE_FOR_mulv2hi3
11171 /* Define a MIPS_BUILTIN_DIRECT_NO_TARGET function for instruction
11172 CODE_FOR_mips_<INSN>. FUNCTION_TYPE and TARGET_FLAGS are
11173 builtin_description fields. */
11174 #define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
11175 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
11176 MIPS_BUILTIN_DIRECT_NO_TARGET, FUNCTION_TYPE, TARGET_FLAGS }
11178 /* Define __builtin_mips_bposge<VALUE>. <VALUE> is 32 for the MIPS32 DSP
11179 branch instruction. TARGET_FLAGS is a builtin_description field. */
11180 #define BPOSGE_BUILTIN(VALUE, TARGET_FLAGS) \
11181 { CODE_FOR_mips_bposge, 0, "__builtin_mips_bposge" #VALUE, \
11182 MIPS_BUILTIN_BPOSGE ## VALUE, MIPS_SI_FTYPE_VOID, TARGET_FLAGS }
11184 static const struct builtin_description dsp_bdesc[] =
11186 DIRECT_BUILTIN (addq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11187 DIRECT_BUILTIN (addq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11188 DIRECT_BUILTIN (addq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11189 DIRECT_BUILTIN (addu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11190 DIRECT_BUILTIN (addu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11191 DIRECT_BUILTIN (subq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11192 DIRECT_BUILTIN (subq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11193 DIRECT_BUILTIN (subq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11194 DIRECT_BUILTIN (subu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11195 DIRECT_BUILTIN (subu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11196 DIRECT_BUILTIN (addsc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11197 DIRECT_BUILTIN (addwc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11198 DIRECT_BUILTIN (modsub, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11199 DIRECT_BUILTIN (raddu_w_qb, MIPS_SI_FTYPE_V4QI, MASK_DSP),
11200 DIRECT_BUILTIN (absq_s_ph, MIPS_V2HI_FTYPE_V2HI, MASK_DSP),
11201 DIRECT_BUILTIN (absq_s_w, MIPS_SI_FTYPE_SI, MASK_DSP),
11202 DIRECT_BUILTIN (precrq_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
11203 DIRECT_BUILTIN (precrq_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
11204 DIRECT_BUILTIN (precrq_rs_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
11205 DIRECT_BUILTIN (precrqu_s_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
11206 DIRECT_BUILTIN (preceq_w_phl, MIPS_SI_FTYPE_V2HI, MASK_DSP),
11207 DIRECT_BUILTIN (preceq_w_phr, MIPS_SI_FTYPE_V2HI, MASK_DSP),
11208 DIRECT_BUILTIN (precequ_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11209 DIRECT_BUILTIN (precequ_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11210 DIRECT_BUILTIN (precequ_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11211 DIRECT_BUILTIN (precequ_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11212 DIRECT_BUILTIN (preceu_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11213 DIRECT_BUILTIN (preceu_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11214 DIRECT_BUILTIN (preceu_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11215 DIRECT_BUILTIN (preceu_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11216 DIRECT_BUILTIN (shll_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
11217 DIRECT_BUILTIN (shll_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
11218 DIRECT_BUILTIN (shll_s_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
11219 DIRECT_BUILTIN (shll_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11220 DIRECT_BUILTIN (shrl_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
11221 DIRECT_BUILTIN (shra_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
11222 DIRECT_BUILTIN (shra_r_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
11223 DIRECT_BUILTIN (shra_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11224 DIRECT_BUILTIN (muleu_s_ph_qbl, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
11225 DIRECT_BUILTIN (muleu_s_ph_qbr, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
11226 DIRECT_BUILTIN (mulq_rs_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11227 DIRECT_BUILTIN (muleq_s_w_phl, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
11228 DIRECT_BUILTIN (muleq_s_w_phr, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
11229 DIRECT_BUILTIN (bitrev, MIPS_SI_FTYPE_SI, MASK_DSP),
11230 DIRECT_BUILTIN (insv, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11231 DIRECT_BUILTIN (repl_qb, MIPS_V4QI_FTYPE_SI, MASK_DSP),
11232 DIRECT_BUILTIN (repl_ph, MIPS_V2HI_FTYPE_SI, MASK_DSP),
11233 DIRECT_NO_TARGET_BUILTIN (cmpu_eq_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
11234 DIRECT_NO_TARGET_BUILTIN (cmpu_lt_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
11235 DIRECT_NO_TARGET_BUILTIN (cmpu_le_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
11236 DIRECT_BUILTIN (cmpgu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
11237 DIRECT_BUILTIN (cmpgu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
11238 DIRECT_BUILTIN (cmpgu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
11239 DIRECT_NO_TARGET_BUILTIN (cmp_eq_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
11240 DIRECT_NO_TARGET_BUILTIN (cmp_lt_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
11241 DIRECT_NO_TARGET_BUILTIN (cmp_le_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
11242 DIRECT_BUILTIN (pick_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11243 DIRECT_BUILTIN (pick_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11244 DIRECT_BUILTIN (packrl_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11245 DIRECT_NO_TARGET_BUILTIN (wrdsp, MIPS_VOID_FTYPE_SI_SI, MASK_DSP),
11246 DIRECT_BUILTIN (rddsp, MIPS_SI_FTYPE_SI, MASK_DSP),
11247 DIRECT_BUILTIN (lbux, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
11248 DIRECT_BUILTIN (lhx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
11249 DIRECT_BUILTIN (lwx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
11250 BPOSGE_BUILTIN (32, MASK_DSP),
11252 /* The following are for the MIPS DSP ASE REV 2. */
11253 DIRECT_BUILTIN (absq_s_qb, MIPS_V4QI_FTYPE_V4QI, MASK_DSPR2),
11254 DIRECT_BUILTIN (addu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11255 DIRECT_BUILTIN (addu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11256 DIRECT_BUILTIN (adduh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11257 DIRECT_BUILTIN (adduh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11258 DIRECT_BUILTIN (append, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
11259 DIRECT_BUILTIN (balign, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
11260 DIRECT_BUILTIN (cmpgdu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11261 DIRECT_BUILTIN (cmpgdu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11262 DIRECT_BUILTIN (cmpgdu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11263 DIRECT_BUILTIN (mul_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11264 DIRECT_BUILTIN (mul_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11265 DIRECT_BUILTIN (mulq_rs_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11266 DIRECT_BUILTIN (mulq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11267 DIRECT_BUILTIN (mulq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11268 DIRECT_BUILTIN (precr_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11269 DIRECT_BUILTIN (precr_sra_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, MASK_DSPR2),
11270 DIRECT_BUILTIN (precr_sra_r_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, MASK_DSPR2),
11271 DIRECT_BUILTIN (prepend, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
11272 DIRECT_BUILTIN (shra_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSPR2),
11273 DIRECT_BUILTIN (shra_r_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSPR2),
11274 DIRECT_BUILTIN (shrl_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSPR2),
11275 DIRECT_BUILTIN (subu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11276 DIRECT_BUILTIN (subu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11277 DIRECT_BUILTIN (subuh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11278 DIRECT_BUILTIN (subuh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11279 DIRECT_BUILTIN (addqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11280 DIRECT_BUILTIN (addqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11281 DIRECT_BUILTIN (addqh_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11282 DIRECT_BUILTIN (addqh_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11283 DIRECT_BUILTIN (subqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11284 DIRECT_BUILTIN (subqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11285 DIRECT_BUILTIN (subqh_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11286 DIRECT_BUILTIN (subqh_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2)
11289 static const struct builtin_description dsp_32only_bdesc[] =
11291 DIRECT_BUILTIN (dpau_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
11292 DIRECT_BUILTIN (dpau_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
11293 DIRECT_BUILTIN (dpsu_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
11294 DIRECT_BUILTIN (dpsu_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
11295 DIRECT_BUILTIN (dpaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11296 DIRECT_BUILTIN (dpsq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11297 DIRECT_BUILTIN (mulsaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11298 DIRECT_BUILTIN (dpaq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
11299 DIRECT_BUILTIN (dpsq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
11300 DIRECT_BUILTIN (maq_s_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11301 DIRECT_BUILTIN (maq_s_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11302 DIRECT_BUILTIN (maq_sa_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11303 DIRECT_BUILTIN (maq_sa_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11304 DIRECT_BUILTIN (extr_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11305 DIRECT_BUILTIN (extr_r_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11306 DIRECT_BUILTIN (extr_rs_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11307 DIRECT_BUILTIN (extr_s_h, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11308 DIRECT_BUILTIN (extp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11309 DIRECT_BUILTIN (extpdp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11310 DIRECT_BUILTIN (shilo, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
11311 DIRECT_BUILTIN (mthlip, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
11313 /* The following are for the MIPS DSP ASE REV 2. */
11314 DIRECT_BUILTIN (dpa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11315 DIRECT_BUILTIN (dps_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11316 DIRECT_BUILTIN (madd, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSPR2),
11317 DIRECT_BUILTIN (maddu, MIPS_DI_FTYPE_DI_USI_USI, MASK_DSPR2),
11318 DIRECT_BUILTIN (msub, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSPR2),
11319 DIRECT_BUILTIN (msubu, MIPS_DI_FTYPE_DI_USI_USI, MASK_DSPR2),
11320 DIRECT_BUILTIN (mulsa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11321 DIRECT_BUILTIN (mult, MIPS_DI_FTYPE_SI_SI, MASK_DSPR2),
11322 DIRECT_BUILTIN (multu, MIPS_DI_FTYPE_USI_USI, MASK_DSPR2),
11323 DIRECT_BUILTIN (dpax_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11324 DIRECT_BUILTIN (dpsx_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11325 DIRECT_BUILTIN (dpaqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11326 DIRECT_BUILTIN (dpaqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11327 DIRECT_BUILTIN (dpsqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11328 DIRECT_BUILTIN (dpsqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2)
11331 /* This helps provide a mapping from builtin function codes to bdesc
11336 /* The builtin function table that this entry describes. */
11337 const struct builtin_description *bdesc;
11339 /* The number of entries in the builtin function table. */
11342 /* The target processor that supports these builtin functions.
11343 PROCESSOR_MAX means we enable them for all processors. */
11344 enum processor_type proc;
11346 /* If the target has these flags, this builtin function table
11347 will not be supported. */
11348 int unsupported_target_flags;
11351 static const struct bdesc_map bdesc_arrays[] =
11353 { mips_bdesc, ARRAY_SIZE (mips_bdesc), PROCESSOR_MAX, 0 },
11354 { sb1_bdesc, ARRAY_SIZE (sb1_bdesc), PROCESSOR_SB1, 0 },
11355 { dsp_bdesc, ARRAY_SIZE (dsp_bdesc), PROCESSOR_MAX, 0 },
11356 { dsp_32only_bdesc, ARRAY_SIZE (dsp_32only_bdesc), PROCESSOR_MAX,
11360 /* Take the argument ARGNUM of the arglist of EXP and convert it into a form
11361 suitable for input operand OP of instruction ICODE. Return the value. */
11364 mips_prepare_builtin_arg (enum insn_code icode,
11365 unsigned int op, tree exp, unsigned int argnum)
11368 enum machine_mode mode;
11370 value = expand_normal (CALL_EXPR_ARG (exp, argnum));
11371 mode = insn_data[icode].operand[op].mode;
11372 if (!insn_data[icode].operand[op].predicate (value, mode))
11374 value = copy_to_mode_reg (mode, value);
11375 /* Check the predicate again. */
11376 if (!insn_data[icode].operand[op].predicate (value, mode))
11378 error ("invalid argument to builtin function");
11386 /* Return an rtx suitable for output operand OP of instruction ICODE.
11387 If TARGET is non-null, try to use it where possible. */
11390 mips_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
11392 enum machine_mode mode;
11394 mode = insn_data[icode].operand[op].mode;
11395 if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
11396 target = gen_reg_rtx (mode);
11401 /* Expand builtin functions. This is called from TARGET_EXPAND_BUILTIN. */
11404 mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
11405 enum machine_mode mode ATTRIBUTE_UNUSED,
11406 int ignore ATTRIBUTE_UNUSED)
11408 enum insn_code icode;
11409 enum mips_builtin_type type;
11411 unsigned int fcode;
11412 const struct builtin_description *bdesc;
11413 const struct bdesc_map *m;
11415 fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11416 fcode = DECL_FUNCTION_CODE (fndecl);
11419 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
11421 if (fcode < m->size)
11424 icode = bdesc[fcode].icode;
11425 type = bdesc[fcode].builtin_type;
11435 case MIPS_BUILTIN_DIRECT:
11436 return mips_expand_builtin_direct (icode, target, exp, true);
11438 case MIPS_BUILTIN_DIRECT_NO_TARGET:
11439 return mips_expand_builtin_direct (icode, target, exp, false);
11441 case MIPS_BUILTIN_MOVT:
11442 case MIPS_BUILTIN_MOVF:
11443 return mips_expand_builtin_movtf (type, icode, bdesc[fcode].cond,
11446 case MIPS_BUILTIN_CMP_ANY:
11447 case MIPS_BUILTIN_CMP_ALL:
11448 case MIPS_BUILTIN_CMP_UPPER:
11449 case MIPS_BUILTIN_CMP_LOWER:
11450 case MIPS_BUILTIN_CMP_SINGLE:
11451 return mips_expand_builtin_compare (type, icode, bdesc[fcode].cond,
11454 case MIPS_BUILTIN_BPOSGE32:
11455 return mips_expand_builtin_bposge (type, target);
11462 /* Init builtin functions. This is called from TARGET_INIT_BUILTIN. */
11465 mips_init_builtins (void)
11467 const struct builtin_description *d;
11468 const struct bdesc_map *m;
11469 tree types[(int) MIPS_MAX_FTYPE_MAX];
11470 tree V2SF_type_node;
11471 tree V2HI_type_node;
11472 tree V4QI_type_node;
11473 unsigned int offset;
11475 /* We have only builtins for -mpaired-single, -mips3d and -mdsp. */
11476 if (!TARGET_PAIRED_SINGLE_FLOAT && !TARGET_DSP)
11479 if (TARGET_PAIRED_SINGLE_FLOAT)
11481 V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
11483 types[MIPS_V2SF_FTYPE_V2SF]
11484 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
11486 types[MIPS_V2SF_FTYPE_V2SF_V2SF]
11487 = build_function_type_list (V2SF_type_node,
11488 V2SF_type_node, V2SF_type_node, NULL_TREE);
11490 types[MIPS_V2SF_FTYPE_V2SF_V2SF_INT]
11491 = build_function_type_list (V2SF_type_node,
11492 V2SF_type_node, V2SF_type_node,
11493 integer_type_node, NULL_TREE);
11495 types[MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF]
11496 = build_function_type_list (V2SF_type_node,
11497 V2SF_type_node, V2SF_type_node,
11498 V2SF_type_node, V2SF_type_node, NULL_TREE);
11500 types[MIPS_V2SF_FTYPE_SF_SF]
11501 = build_function_type_list (V2SF_type_node,
11502 float_type_node, float_type_node, NULL_TREE);
11504 types[MIPS_INT_FTYPE_V2SF_V2SF]
11505 = build_function_type_list (integer_type_node,
11506 V2SF_type_node, V2SF_type_node, NULL_TREE);
11508 types[MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF]
11509 = build_function_type_list (integer_type_node,
11510 V2SF_type_node, V2SF_type_node,
11511 V2SF_type_node, V2SF_type_node, NULL_TREE);
11513 types[MIPS_INT_FTYPE_SF_SF]
11514 = build_function_type_list (integer_type_node,
11515 float_type_node, float_type_node, NULL_TREE);
11517 types[MIPS_INT_FTYPE_DF_DF]
11518 = build_function_type_list (integer_type_node,
11519 double_type_node, double_type_node, NULL_TREE);
11521 types[MIPS_SF_FTYPE_V2SF]
11522 = build_function_type_list (float_type_node, V2SF_type_node, NULL_TREE);
11524 types[MIPS_SF_FTYPE_SF]
11525 = build_function_type_list (float_type_node,
11526 float_type_node, NULL_TREE);
11528 types[MIPS_SF_FTYPE_SF_SF]
11529 = build_function_type_list (float_type_node,
11530 float_type_node, float_type_node, NULL_TREE);
11532 types[MIPS_DF_FTYPE_DF]
11533 = build_function_type_list (double_type_node,
11534 double_type_node, NULL_TREE);
11536 types[MIPS_DF_FTYPE_DF_DF]
11537 = build_function_type_list (double_type_node,
11538 double_type_node, double_type_node, NULL_TREE);
11543 V2HI_type_node = build_vector_type_for_mode (intHI_type_node, V2HImode);
11544 V4QI_type_node = build_vector_type_for_mode (intQI_type_node, V4QImode);
11546 types[MIPS_V2HI_FTYPE_V2HI_V2HI]
11547 = build_function_type_list (V2HI_type_node,
11548 V2HI_type_node, V2HI_type_node,
11551 types[MIPS_SI_FTYPE_SI_SI]
11552 = build_function_type_list (intSI_type_node,
11553 intSI_type_node, intSI_type_node,
11556 types[MIPS_V4QI_FTYPE_V4QI_V4QI]
11557 = build_function_type_list (V4QI_type_node,
11558 V4QI_type_node, V4QI_type_node,
11561 types[MIPS_SI_FTYPE_V4QI]
11562 = build_function_type_list (intSI_type_node,
11566 types[MIPS_V2HI_FTYPE_V2HI]
11567 = build_function_type_list (V2HI_type_node,
11571 types[MIPS_SI_FTYPE_SI]
11572 = build_function_type_list (intSI_type_node,
11576 types[MIPS_V4QI_FTYPE_V2HI_V2HI]
11577 = build_function_type_list (V4QI_type_node,
11578 V2HI_type_node, V2HI_type_node,
11581 types[MIPS_V2HI_FTYPE_SI_SI]
11582 = build_function_type_list (V2HI_type_node,
11583 intSI_type_node, intSI_type_node,
11586 types[MIPS_SI_FTYPE_V2HI]
11587 = build_function_type_list (intSI_type_node,
11591 types[MIPS_V2HI_FTYPE_V4QI]
11592 = build_function_type_list (V2HI_type_node,
11596 types[MIPS_V4QI_FTYPE_V4QI_SI]
11597 = build_function_type_list (V4QI_type_node,
11598 V4QI_type_node, intSI_type_node,
11601 types[MIPS_V2HI_FTYPE_V2HI_SI]
11602 = build_function_type_list (V2HI_type_node,
11603 V2HI_type_node, intSI_type_node,
11606 types[MIPS_V2HI_FTYPE_V4QI_V2HI]
11607 = build_function_type_list (V2HI_type_node,
11608 V4QI_type_node, V2HI_type_node,
11611 types[MIPS_SI_FTYPE_V2HI_V2HI]
11612 = build_function_type_list (intSI_type_node,
11613 V2HI_type_node, V2HI_type_node,
11616 types[MIPS_DI_FTYPE_DI_V4QI_V4QI]
11617 = build_function_type_list (intDI_type_node,
11618 intDI_type_node, V4QI_type_node, V4QI_type_node,
11621 types[MIPS_DI_FTYPE_DI_V2HI_V2HI]
11622 = build_function_type_list (intDI_type_node,
11623 intDI_type_node, V2HI_type_node, V2HI_type_node,
11626 types[MIPS_DI_FTYPE_DI_SI_SI]
11627 = build_function_type_list (intDI_type_node,
11628 intDI_type_node, intSI_type_node, intSI_type_node,
11631 types[MIPS_V4QI_FTYPE_SI]
11632 = build_function_type_list (V4QI_type_node,
11636 types[MIPS_V2HI_FTYPE_SI]
11637 = build_function_type_list (V2HI_type_node,
11641 types[MIPS_VOID_FTYPE_V4QI_V4QI]
11642 = build_function_type_list (void_type_node,
11643 V4QI_type_node, V4QI_type_node,
11646 types[MIPS_SI_FTYPE_V4QI_V4QI]
11647 = build_function_type_list (intSI_type_node,
11648 V4QI_type_node, V4QI_type_node,
11651 types[MIPS_VOID_FTYPE_V2HI_V2HI]
11652 = build_function_type_list (void_type_node,
11653 V2HI_type_node, V2HI_type_node,
11656 types[MIPS_SI_FTYPE_DI_SI]
11657 = build_function_type_list (intSI_type_node,
11658 intDI_type_node, intSI_type_node,
11661 types[MIPS_DI_FTYPE_DI_SI]
11662 = build_function_type_list (intDI_type_node,
11663 intDI_type_node, intSI_type_node,
11666 types[MIPS_VOID_FTYPE_SI_SI]
11667 = build_function_type_list (void_type_node,
11668 intSI_type_node, intSI_type_node,
11671 types[MIPS_SI_FTYPE_PTR_SI]
11672 = build_function_type_list (intSI_type_node,
11673 ptr_type_node, intSI_type_node,
11676 types[MIPS_SI_FTYPE_VOID]
11677 = build_function_type (intSI_type_node, void_list_node);
11681 types[MIPS_V4QI_FTYPE_V4QI]
11682 = build_function_type_list (V4QI_type_node,
11686 types[MIPS_SI_FTYPE_SI_SI_SI]
11687 = build_function_type_list (intSI_type_node,
11688 intSI_type_node, intSI_type_node,
11689 intSI_type_node, NULL_TREE);
11691 types[MIPS_DI_FTYPE_DI_USI_USI]
11692 = build_function_type_list (intDI_type_node,
11694 unsigned_intSI_type_node,
11695 unsigned_intSI_type_node, NULL_TREE);
11697 types[MIPS_DI_FTYPE_SI_SI]
11698 = build_function_type_list (intDI_type_node,
11699 intSI_type_node, intSI_type_node,
11702 types[MIPS_DI_FTYPE_USI_USI]
11703 = build_function_type_list (intDI_type_node,
11704 unsigned_intSI_type_node,
11705 unsigned_intSI_type_node, NULL_TREE);
11707 types[MIPS_V2HI_FTYPE_SI_SI_SI]
11708 = build_function_type_list (V2HI_type_node,
11709 intSI_type_node, intSI_type_node,
11710 intSI_type_node, NULL_TREE);
11715 /* Iterate through all of the bdesc arrays, initializing all of the
11716 builtin functions. */
11719 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
11721 if ((m->proc == PROCESSOR_MAX || (m->proc == mips_arch))
11722 && (m->unsupported_target_flags & target_flags) == 0)
11723 for (d = m->bdesc; d < &m->bdesc[m->size]; d++)
11724 if ((d->target_flags & target_flags) == d->target_flags)
11725 add_builtin_function (d->name, types[d->function_type],
11726 d - m->bdesc + offset,
11727 BUILT_IN_MD, NULL, NULL);
11732 /* Expand a MIPS_BUILTIN_DIRECT function. ICODE is the code of the
11733 .md pattern and CALL is the function expr with arguments. TARGET,
11734 if nonnull, suggests a good place to put the result.
11735 HAS_TARGET indicates the function must return something. */
11738 mips_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
11741 rtx ops[MAX_RECOG_OPERANDS];
11747 /* We save target to ops[0]. */
11748 ops[0] = mips_prepare_builtin_target (icode, 0, target);
11752 /* We need to test if the arglist is not zero. Some instructions have extra
11753 clobber registers. */
11754 for (; i < insn_data[icode].n_operands && i <= call_expr_nargs (exp); i++, j++)
11755 ops[i] = mips_prepare_builtin_arg (icode, i, exp, j);
11760 emit_insn (GEN_FCN (icode) (ops[0], ops[1]));
11764 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2]));
11768 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]));
11772 gcc_unreachable ();
11777 /* Expand a __builtin_mips_movt_*_ps() or __builtin_mips_movf_*_ps()
11778 function (TYPE says which). EXP is the tree for the function
11779 function, ICODE is the instruction that should be used to compare
11780 the first two arguments, and COND is the condition it should test.
11781 TARGET, if nonnull, suggests a good place to put the result. */
11784 mips_expand_builtin_movtf (enum mips_builtin_type type,
11785 enum insn_code icode, enum mips_fp_condition cond,
11786 rtx target, tree exp)
11788 rtx cmp_result, op0, op1;
11790 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
11791 op0 = mips_prepare_builtin_arg (icode, 1, exp, 0);
11792 op1 = mips_prepare_builtin_arg (icode, 2, exp, 1);
11793 emit_insn (GEN_FCN (icode) (cmp_result, op0, op1, GEN_INT (cond)));
11795 icode = CODE_FOR_mips_cond_move_tf_ps;
11796 target = mips_prepare_builtin_target (icode, 0, target);
11797 if (type == MIPS_BUILTIN_MOVT)
11799 op1 = mips_prepare_builtin_arg (icode, 2, exp, 2);
11800 op0 = mips_prepare_builtin_arg (icode, 1, exp, 3);
11804 op0 = mips_prepare_builtin_arg (icode, 1, exp, 2);
11805 op1 = mips_prepare_builtin_arg (icode, 2, exp, 3);
11807 emit_insn (gen_mips_cond_move_tf_ps (target, op0, op1, cmp_result));
11811 /* Move VALUE_IF_TRUE into TARGET if CONDITION is true; move VALUE_IF_FALSE
11812 into TARGET otherwise. Return TARGET. */
11815 mips_builtin_branch_and_move (rtx condition, rtx target,
11816 rtx value_if_true, rtx value_if_false)
11818 rtx true_label, done_label;
11820 true_label = gen_label_rtx ();
11821 done_label = gen_label_rtx ();
11823 /* First assume that CONDITION is false. */
11824 emit_move_insn (target, value_if_false);
11826 /* Branch to TRUE_LABEL if CONDITION is true and DONE_LABEL otherwise. */
11827 emit_jump_insn (gen_condjump (condition, true_label));
11828 emit_jump_insn (gen_jump (done_label));
11831 /* Fix TARGET if CONDITION is true. */
11832 emit_label (true_label);
11833 emit_move_insn (target, value_if_true);
11835 emit_label (done_label);
11839 /* Expand a comparison builtin of type BUILTIN_TYPE. ICODE is the code
11840 of the comparison instruction and COND is the condition it should test.
11841 EXP is the function call and arguments and TARGET, if nonnull,
11842 suggests a good place to put the boolean result. */
11845 mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
11846 enum insn_code icode, enum mips_fp_condition cond,
11847 rtx target, tree exp)
11849 rtx offset, condition, cmp_result, ops[MAX_RECOG_OPERANDS];
11853 if (target == 0 || GET_MODE (target) != SImode)
11854 target = gen_reg_rtx (SImode);
11856 /* Prepare the operands to the comparison. */
11857 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
11858 for (i = 1; i < insn_data[icode].n_operands - 1; i++, j++)
11859 ops[i] = mips_prepare_builtin_arg (icode, i, exp, j);
11861 switch (insn_data[icode].n_operands)
11864 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2], GEN_INT (cond)));
11868 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2],
11869 ops[3], ops[4], GEN_INT (cond)));
11873 gcc_unreachable ();
11876 /* If the comparison sets more than one register, we define the result
11877 to be 0 if all registers are false and -1 if all registers are true.
11878 The value of the complete result is indeterminate otherwise. */
11879 switch (builtin_type)
11881 case MIPS_BUILTIN_CMP_ALL:
11882 condition = gen_rtx_NE (VOIDmode, cmp_result, constm1_rtx);
11883 return mips_builtin_branch_and_move (condition, target,
11884 const0_rtx, const1_rtx);
11886 case MIPS_BUILTIN_CMP_UPPER:
11887 case MIPS_BUILTIN_CMP_LOWER:
11888 offset = GEN_INT (builtin_type == MIPS_BUILTIN_CMP_UPPER);
11889 condition = gen_single_cc (cmp_result, offset);
11890 return mips_builtin_branch_and_move (condition, target,
11891 const1_rtx, const0_rtx);
11894 condition = gen_rtx_NE (VOIDmode, cmp_result, const0_rtx);
11895 return mips_builtin_branch_and_move (condition, target,
11896 const1_rtx, const0_rtx);
11900 /* Expand a bposge builtin of type BUILTIN_TYPE. TARGET, if nonnull,
11901 suggests a good place to put the boolean result. */
11904 mips_expand_builtin_bposge (enum mips_builtin_type builtin_type, rtx target)
11906 rtx condition, cmp_result;
11909 if (target == 0 || GET_MODE (target) != SImode)
11910 target = gen_reg_rtx (SImode);
11912 cmp_result = gen_rtx_REG (CCDSPmode, CCDSP_PO_REGNUM);
11914 if (builtin_type == MIPS_BUILTIN_BPOSGE32)
11919 condition = gen_rtx_GE (VOIDmode, cmp_result, GEN_INT (cmp_value));
11920 return mips_builtin_branch_and_move (condition, target,
11921 const1_rtx, const0_rtx);
11924 /* Set SYMBOL_REF_FLAGS for the SYMBOL_REF inside RTL, which belongs to DECL.
11925 FIRST is true if this is the first time handling this decl. */
11928 mips_encode_section_info (tree decl, rtx rtl, int first)
11930 default_encode_section_info (decl, rtl, first);
11932 if (TREE_CODE (decl) == FUNCTION_DECL)
11934 rtx symbol = XEXP (rtl, 0);
11936 if ((TARGET_LONG_CALLS && !mips_near_type_p (TREE_TYPE (decl)))
11937 || mips_far_type_p (TREE_TYPE (decl)))
11938 SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL;
11942 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. Some code models use the incoming
11943 value of PIC_FUNCTION_ADDR_REGNUM to set up the global pointer. */
11946 mips_extra_live_on_entry (bitmap regs)
11948 if (TARGET_USE_GOT && !TARGET_ABSOLUTE_ABICALLS)
11949 bitmap_set_bit (regs, PIC_FUNCTION_ADDR_REGNUM);
11952 /* SImode values are represented as sign-extended to DImode. */
11955 mips_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
11957 if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
11958 return SIGN_EXTEND;
11963 /* MIPS implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
11966 mips_output_dwarf_dtprel (FILE *file, int size, rtx x)
11971 fputs ("\t.dtprelword\t", file);
11975 fputs ("\t.dtpreldword\t", file);
11979 gcc_unreachable ();
11981 output_addr_const (file, x);
11982 fputs ("+0x8000", file);
11985 #include "gt-mips.h"