/* Definitions for code generation pass of GNU compiler.
- Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
+ Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
Free Software Foundation, Inc.
This file is part of GCC.
int insn_code;
};
+struct widening_optab_handlers
+{
+ struct optab_handlers handlers[NUM_MACHINE_MODES][NUM_MACHINE_MODES];
+};
+
struct optab_d
{
enum rtx_code code;
void (*libcall_gen)(struct optab_d *, const char *name, char suffix,
enum machine_mode);
struct optab_handlers handlers[NUM_MACHINE_MODES];
+ struct widening_optab_handlers *widening;
};
typedef struct optab_d * optab;
OTI_ffs,
OTI_clz,
OTI_ctz,
+ OTI_clrsb,
OTI_popcount,
OTI_parity,
/* Square root */
OTI_vec_set,
/* Extract specified field of vector operand. */
OTI_vec_extract,
- /* Extract even/odd fields of vector operands. */
- OTI_vec_extract_even,
- OTI_vec_extract_odd,
- /* Interleave fields of vector operands. */
- OTI_vec_interleave_high,
- OTI_vec_interleave_low,
/* Initialize vector operand. */
OTI_vec_init,
/* Whole vector shift. The shift amount is in bits. */
OTI_vec_widen_umult_lo,
OTI_vec_widen_smult_hi,
OTI_vec_widen_smult_lo,
+ /* Widening shift left.
+ The high/low part of the resulting vector is returned. */
+ OTI_vec_widen_ushiftl_hi,
+ OTI_vec_widen_ushiftl_lo,
+ OTI_vec_widen_sshiftl_hi,
+ OTI_vec_widen_sshiftl_lo,
/* Extract and widen the high/low part of a vector of signed or
floating point elements. */
OTI_vec_unpacks_hi,
/* Perform a raise to the power of integer. */
OTI_powi,
+ /* Atomic compare and swap. */
+ OTI_sync_compare_and_swap,
+
+ /* Atomic exchange with acquire semantics. */
+ OTI_sync_lock_test_and_set,
+
+ /* This second set is atomic operations in which we return the value
+ that existed in memory before the operation. */
+ OTI_sync_old_add,
+ OTI_sync_old_sub,
+ OTI_sync_old_ior,
+ OTI_sync_old_and,
+ OTI_sync_old_xor,
+ OTI_sync_old_nand,
+
+ /* This third set is atomic operations in which we return the value
+ that resulted after performing the operation. */
+ OTI_sync_new_add,
+ OTI_sync_new_sub,
+ OTI_sync_new_ior,
+ OTI_sync_new_and,
+ OTI_sync_new_xor,
+ OTI_sync_new_nand,
+
OTI_MAX
};
#define ffs_optab (&optab_table[OTI_ffs])
#define clz_optab (&optab_table[OTI_clz])
#define ctz_optab (&optab_table[OTI_ctz])
+#define clrsb_optab (&optab_table[OTI_clrsb])
#define popcount_optab (&optab_table[OTI_popcount])
#define parity_optab (&optab_table[OTI_parity])
#define sqrt_optab (&optab_table[OTI_sqrt])
#define vec_set_optab (&optab_table[OTI_vec_set])
#define vec_extract_optab (&optab_table[OTI_vec_extract])
-#define vec_extract_even_optab (&optab_table[OTI_vec_extract_even])
-#define vec_extract_odd_optab (&optab_table[OTI_vec_extract_odd])
-#define vec_interleave_high_optab (&optab_table[OTI_vec_interleave_high])
-#define vec_interleave_low_optab (&optab_table[OTI_vec_interleave_low])
#define vec_init_optab (&optab_table[OTI_vec_init])
#define vec_shl_optab (&optab_table[OTI_vec_shl])
#define vec_shr_optab (&optab_table[OTI_vec_shr])
#define vec_widen_umult_lo_optab (&optab_table[OTI_vec_widen_umult_lo])
#define vec_widen_smult_hi_optab (&optab_table[OTI_vec_widen_smult_hi])
#define vec_widen_smult_lo_optab (&optab_table[OTI_vec_widen_smult_lo])
+#define vec_widen_ushiftl_hi_optab (&optab_table[OTI_vec_widen_ushiftl_hi])
+#define vec_widen_ushiftl_lo_optab (&optab_table[OTI_vec_widen_ushiftl_lo])
+#define vec_widen_sshiftl_hi_optab (&optab_table[OTI_vec_widen_sshiftl_hi])
+#define vec_widen_sshiftl_lo_optab (&optab_table[OTI_vec_widen_sshiftl_lo])
#define vec_unpacks_hi_optab (&optab_table[OTI_vec_unpacks_hi])
#define vec_unpacks_lo_optab (&optab_table[OTI_vec_unpacks_lo])
#define vec_unpacku_hi_optab (&optab_table[OTI_vec_unpacku_hi])
#define powi_optab (&optab_table[OTI_powi])
+#define sync_compare_and_swap_optab \
+ (&optab_table[(int) OTI_sync_compare_and_swap])
+#define sync_lock_test_and_set_optab \
+ (&optab_table[(int) OTI_sync_lock_test_and_set])
+#define sync_old_add_optab (&optab_table[(int) OTI_sync_old_add])
+#define sync_old_sub_optab (&optab_table[(int) OTI_sync_old_sub])
+#define sync_old_ior_optab (&optab_table[(int) OTI_sync_old_ior])
+#define sync_old_and_optab (&optab_table[(int) OTI_sync_old_and])
+#define sync_old_xor_optab (&optab_table[(int) OTI_sync_old_xor])
+#define sync_old_nand_optab (&optab_table[(int) OTI_sync_old_nand])
+#define sync_new_add_optab (&optab_table[(int) OTI_sync_new_add])
+#define sync_new_sub_optab (&optab_table[(int) OTI_sync_new_sub])
+#define sync_new_ior_optab (&optab_table[(int) OTI_sync_new_ior])
+#define sync_new_and_optab (&optab_table[(int) OTI_sync_new_and])
+#define sync_new_xor_optab (&optab_table[(int) OTI_sync_new_xor])
+#define sync_new_nand_optab (&optab_table[(int) OTI_sync_new_nand])
+
/* Conversion optabs have their own table and indexes. */
enum convert_optab_index
{
COI_satfract,
COI_satfractuns,
+ COI_vec_load_lanes,
+ COI_vec_store_lanes,
+
+ /* Vector conditional operations. */
+ COI_vcond,
+ COI_vcondu,
+
COI_MAX
};
#define fractuns_optab (&convert_optab_table[COI_fractuns])
#define satfract_optab (&convert_optab_table[COI_satfract])
#define satfractuns_optab (&convert_optab_table[COI_satfractuns])
+#define vec_load_lanes_optab (&convert_optab_table[COI_vec_load_lanes])
+#define vec_store_lanes_optab (&convert_optab_table[COI_vec_store_lanes])
+#define vcond_optab (&convert_optab_table[(int) COI_vcond])
+#define vcondu_optab (&convert_optab_table[(int) COI_vcondu])
/* Contains the optab used for each rtx code. */
extern optab code_to_optab[NUM_RTX_CODE + 1];
DOI_reload_in,
DOI_reload_out,
- /* Vector conditional operations. */
- DOI_vcond,
- DOI_vcondu,
-
/* Block move operation. */
DOI_movmem,
DOI_cmpstrn,
DOI_cmpmem,
- /* Synchronization primitives. This first set is atomic operation for
- which we don't care about the resulting value. */
+ /* Atomic clear with release semantics. */
+ DOI_sync_lock_release,
+
+ /* Atomic operation with no resulting value. */
DOI_sync_add,
DOI_sync_sub,
DOI_sync_ior,
DOI_sync_xor,
DOI_sync_nand,
- /* This second set is atomic operations in which we return the value
- that existed in memory before the operation. */
- DOI_sync_old_add,
- DOI_sync_old_sub,
- DOI_sync_old_ior,
- DOI_sync_old_and,
- DOI_sync_old_xor,
- DOI_sync_old_nand,
-
- /* This third set is atomic operations in which we return the value
- that resulted after performing the operation. */
- DOI_sync_new_add,
- DOI_sync_new_sub,
- DOI_sync_new_ior,
- DOI_sync_new_and,
- DOI_sync_new_xor,
- DOI_sync_new_nand,
-
- /* Atomic compare and swap. */
- DOI_sync_compare_and_swap,
-
- /* Atomic exchange with acquire semantics. */
- DOI_sync_lock_test_and_set,
-
- /* Atomic clear with release semantics. */
- DOI_sync_lock_release,
+ /* Atomic operations with memory model parameters. */
+ DOI_atomic_exchange,
+ DOI_atomic_compare_and_swap,
+ DOI_atomic_load,
+ DOI_atomic_store,
+ DOI_atomic_add_fetch,
+ DOI_atomic_sub_fetch,
+ DOI_atomic_and_fetch,
+ DOI_atomic_nand_fetch,
+ DOI_atomic_xor_fetch,
+ DOI_atomic_or_fetch,
+ DOI_atomic_fetch_add,
+ DOI_atomic_fetch_sub,
+ DOI_atomic_fetch_and,
+ DOI_atomic_fetch_nand,
+ DOI_atomic_fetch_xor,
+ DOI_atomic_fetch_or,
+ DOI_atomic_add,
+ DOI_atomic_sub,
+ DOI_atomic_and,
+ DOI_atomic_nand,
+ DOI_atomic_xor,
+ DOI_atomic_or,
+ DOI_atomic_always_lock_free,
+ DOI_atomic_is_lock_free,
+ DOI_atomic_thread_fence,
+ DOI_atomic_signal_fence,
+
+ /* Vector permutation. */
+ DOI_vec_perm,
+ DOI_vec_perm_const,
DOI_MAX
};
#endif
#define reload_in_optab (&direct_optab_table[(int) DOI_reload_in])
#define reload_out_optab (&direct_optab_table[(int) DOI_reload_out])
-#define vcond_optab (&direct_optab_table[(int) DOI_vcond])
-#define vcondu_optab (&direct_optab_table[(int) DOI_vcondu])
#define movmem_optab (&direct_optab_table[(int) DOI_movmem])
#define setmem_optab (&direct_optab_table[(int) DOI_setmem])
#define cmpstr_optab (&direct_optab_table[(int) DOI_cmpstr])
#define cmpstrn_optab (&direct_optab_table[(int) DOI_cmpstrn])
#define cmpmem_optab (&direct_optab_table[(int) DOI_cmpmem])
+#define sync_lock_release_optab \
+ (&direct_optab_table[(int) DOI_sync_lock_release])
#define sync_add_optab (&direct_optab_table[(int) DOI_sync_add])
#define sync_sub_optab (&direct_optab_table[(int) DOI_sync_sub])
#define sync_ior_optab (&direct_optab_table[(int) DOI_sync_ior])
#define sync_and_optab (&direct_optab_table[(int) DOI_sync_and])
#define sync_xor_optab (&direct_optab_table[(int) DOI_sync_xor])
#define sync_nand_optab (&direct_optab_table[(int) DOI_sync_nand])
-#define sync_old_add_optab (&direct_optab_table[(int) DOI_sync_old_add])
-#define sync_old_sub_optab (&direct_optab_table[(int) DOI_sync_old_sub])
-#define sync_old_ior_optab (&direct_optab_table[(int) DOI_sync_old_ior])
-#define sync_old_and_optab (&direct_optab_table[(int) DOI_sync_old_and])
-#define sync_old_xor_optab (&direct_optab_table[(int) DOI_sync_old_xor])
-#define sync_old_nand_optab (&direct_optab_table[(int) DOI_sync_old_nand])
-#define sync_new_add_optab (&direct_optab_table[(int) DOI_sync_new_add])
-#define sync_new_sub_optab (&direct_optab_table[(int) DOI_sync_new_sub])
-#define sync_new_ior_optab (&direct_optab_table[(int) DOI_sync_new_ior])
-#define sync_new_and_optab (&direct_optab_table[(int) DOI_sync_new_and])
-#define sync_new_xor_optab (&direct_optab_table[(int) DOI_sync_new_xor])
-#define sync_new_nand_optab (&direct_optab_table[(int) DOI_sync_new_nand])
-#define sync_compare_and_swap_optab \
- (&direct_optab_table[(int) DOI_sync_compare_and_swap])
-#define sync_lock_test_and_set_optab \
- (&direct_optab_table[(int) DOI_sync_lock_test_and_set])
-#define sync_lock_release_optab \
- (&direct_optab_table[(int) DOI_sync_lock_release])
+
+#define atomic_exchange_optab \
+ (&direct_optab_table[(int) DOI_atomic_exchange])
+#define atomic_compare_and_swap_optab \
+ (&direct_optab_table[(int) DOI_atomic_compare_and_swap])
+#define atomic_load_optab \
+ (&direct_optab_table[(int) DOI_atomic_load])
+#define atomic_store_optab \
+ (&direct_optab_table[(int) DOI_atomic_store])
+#define atomic_add_fetch_optab \
+ (&direct_optab_table[(int) DOI_atomic_add_fetch])
+#define atomic_sub_fetch_optab \
+ (&direct_optab_table[(int) DOI_atomic_sub_fetch])
+#define atomic_and_fetch_optab \
+ (&direct_optab_table[(int) DOI_atomic_and_fetch])
+#define atomic_nand_fetch_optab \
+ (&direct_optab_table[(int) DOI_atomic_nand_fetch])
+#define atomic_xor_fetch_optab \
+ (&direct_optab_table[(int) DOI_atomic_xor_fetch])
+#define atomic_or_fetch_optab \
+ (&direct_optab_table[(int) DOI_atomic_or_fetch])
+#define atomic_fetch_add_optab \
+ (&direct_optab_table[(int) DOI_atomic_fetch_add])
+#define atomic_fetch_sub_optab \
+ (&direct_optab_table[(int) DOI_atomic_fetch_sub])
+#define atomic_fetch_and_optab \
+ (&direct_optab_table[(int) DOI_atomic_fetch_and])
+#define atomic_fetch_nand_optab \
+ (&direct_optab_table[(int) DOI_atomic_fetch_nand])
+#define atomic_fetch_xor_optab \
+ (&direct_optab_table[(int) DOI_atomic_fetch_xor])
+#define atomic_fetch_or_optab \
+ (&direct_optab_table[(int) DOI_atomic_fetch_or])
+#define atomic_add_optab \
+ (&direct_optab_table[(int) DOI_atomic_add])
+#define atomic_sub_optab \
+ (&direct_optab_table[(int) DOI_atomic_sub])
+#define atomic_and_optab \
+ (&direct_optab_table[(int) DOI_atomic_and])
+#define atomic_nand_optab \
+ (&direct_optab_table[(int) DOI_atomic_nand])
+#define atomic_xor_optab \
+ (&direct_optab_table[(int) DOI_atomic_xor])
+#define atomic_or_optab \
+ (&direct_optab_table[(int) DOI_atomic_or])
+#define atomic_always_lock_free_optab \
+ (&direct_optab_table[(int) DOI_atomic_always_lock_free])
+#define atomic_is_lock_free_optab \
+ (&direct_optab_table[(int) DOI_atomic_is_lock_free])
+#define atomic_thread_fence_optab \
+ (&direct_optab_table[(int) DOI_atomic_thread_fence])
+#define atomic_signal_fence_optab \
+ (&direct_optab_table[(int) DOI_atomic_signal_fence])
+
+#define vec_perm_optab (&direct_optab_table[DOI_vec_perm])
+#define vec_perm_const_optab (&direct_optab_table[(int) DOI_vec_perm_const])
\f
/* Target-dependent globals. */
struct target_optabs {
extern rtx expand_binop (enum machine_mode, optab, rtx, rtx, rtx, int,
enum optab_methods);
+extern rtx simplify_expand_binop (enum machine_mode mode, optab binoptab,
+ rtx op0, rtx op1, rtx target, int unsignedp,
+ enum optab_methods methods);
+
extern bool force_expand_binop (enum machine_mode, optab, rtx, rtx, rtx, int,
enum optab_methods);
/* Generate an instruction with a given INSN_CODE with an output and
an input. */
-extern void emit_unop_insn (int, rtx, rtx, enum rtx_code);
-extern bool maybe_emit_unop_insn (int, rtx, rtx, enum rtx_code);
+extern void emit_unop_insn (enum insn_code, rtx, rtx, enum rtx_code);
+extern bool maybe_emit_unop_insn (enum insn_code, rtx, rtx, enum rtx_code);
+
+/* Find a widening optab even if it doesn't widen as much as we want. */
+#define find_widening_optab_handler(A,B,C,D) \
+ find_widening_optab_handler_and_mode (A, B, C, D, NULL)
+extern enum insn_code find_widening_optab_handler_and_mode (optab,
+ enum machine_mode,
+ enum machine_mode,
+ int,
+ enum machine_mode *);
/* An extra flag to control optab_for_tree_code's behavior. This is needed to
distinguish between machines with a vector shift that takes a scalar for the
extern void set_conv_libfunc (convert_optab, enum machine_mode,
enum machine_mode, const char *);
+/* Call this to install all of the __sync libcalls up to size MAX. */
+extern void init_sync_libfuncs (int max);
+
/* Generate code for a FIXED_CONVERT_EXPR. */
extern void expand_fixed_convert (rtx, rtx, int, int);
/* Generate code for a FLOAT_EXPR. */
extern void expand_float (rtx, rtx, int);
+/* Return the insn_code for a FLOAT_EXPR. */
+enum insn_code can_float_p (enum machine_mode, enum machine_mode, int);
+
+/* Return true if there is an inline compare and swap pattern. */
+extern bool can_compare_and_swap_p (enum machine_mode, bool);
+
+/* Return true if there is an inline atomic exchange pattern. */
+extern bool can_atomic_exchange_p (enum machine_mode, bool);
+
+/* Generate code for a compare and swap. */
+extern bool expand_atomic_compare_and_swap (rtx *, rtx *, rtx, rtx, rtx, bool,
+ enum memmodel, enum memmodel);
+
+/* Generate memory barriers. */
+extern void expand_mem_thread_fence (enum memmodel);
+extern void expand_mem_signal_fence (enum memmodel);
+
+/* Check whether an operation represented by the code CODE is a
+ convert operation that is supported by the target platform in
+ vector form */
+bool supportable_convert_operation (enum tree_code, tree, tree, tree *,
+ enum tree_code *);
+
/* Generate code for a FIX_EXPR. */
extern void expand_fix (rtx, rtx, int);
extern rtx expand_widening_mult (enum machine_mode, rtx, rtx, rtx, int, optab);
/* Return tree if target supports vector operations for COND_EXPR. */
-bool expand_vec_cond_expr_p (tree, enum machine_mode);
+bool expand_vec_cond_expr_p (tree, tree);
/* Generate code for VEC_COND_EXPR. */
extern rtx expand_vec_cond_expr (tree, tree, tree, tree, rtx);
/* Generate code for VEC_LSHIFT_EXPR and VEC_RSHIFT_EXPR. */
extern rtx expand_vec_shift_expr (sepops, rtx);
+/* Return tree if target supports vector operations for VEC_PERM_EXPR. */
+extern bool can_vec_perm_p (enum machine_mode, bool, const unsigned char *);
+
+/* Generate code for VEC_PERM_EXPR. */
+extern rtx expand_vec_perm (enum machine_mode, rtx, rtx, rtx, rtx);
+
/* Return the insn used to implement mode MODE of OP, or CODE_FOR_nothing
if the target does not have such an insn. */
+ (int) CODE_FOR_nothing);
}
+/* Like optab_handler, but for widening_operations that have a TO_MODE and
+ a FROM_MODE. */
+
+static inline enum insn_code
+widening_optab_handler (optab op, enum machine_mode to_mode,
+ enum machine_mode from_mode)
+{
+ if (to_mode == from_mode || from_mode == VOIDmode)
+ return optab_handler (op, to_mode);
+
+ if (op->widening)
+ return (enum insn_code) (op->widening->handlers[(int) to_mode][(int) from_mode].insn_code
+ + (int) CODE_FOR_nothing);
+
+ return CODE_FOR_nothing;
+}
+
/* Record that insn CODE should be used to implement mode MODE of OP. */
static inline void
op->handlers[(int) mode].insn_code = (int) code - (int) CODE_FOR_nothing;
}
+/* Like set_optab_handler, but for widening operations that have a TO_MODE
+ and a FROM_MODE. */
+
+static inline void
+set_widening_optab_handler (optab op, enum machine_mode to_mode,
+ enum machine_mode from_mode, enum insn_code code)
+{
+ if (to_mode == from_mode)
+ set_optab_handler (op, to_mode, code);
+ else
+ {
+ if (op->widening == NULL)
+ op->widening = (struct widening_optab_handlers *)
+ xcalloc (1, sizeof (struct widening_optab_handlers));
+
+ op->widening->handlers[(int) to_mode][(int) from_mode].insn_code
+ = (int) code - (int) CODE_FOR_nothing;
+ }
+}
+
/* Return the insn used to perform conversion OP from mode FROM_MODE
to mode TO_MODE; return CODE_FOR_nothing if the target does not have
such an insn. */
extern rtx optab_libfunc (optab optab, enum machine_mode mode);
extern rtx convert_optab_libfunc (convert_optab optab, enum machine_mode mode1,
enum machine_mode mode2);
+
+extern bool insn_operand_matches (enum insn_code icode, unsigned int opno,
+ rtx operand);
+
+/* Describes the type of an expand_operand. Each value is associated
+ with a create_*_operand function; see the comments above those
+ functions for details. */
+enum expand_operand_type {
+ EXPAND_FIXED,
+ EXPAND_OUTPUT,
+ EXPAND_INPUT,
+ EXPAND_CONVERT_TO,
+ EXPAND_CONVERT_FROM,
+ EXPAND_ADDRESS,
+ EXPAND_INTEGER
+};
+
+/* Information about an operand for instruction expansion. */
+struct expand_operand {
+ /* The type of operand. */
+ ENUM_BITFIELD (expand_operand_type) type : 8;
+
+ /* True if any conversion should treat VALUE as being unsigned
+ rather than signed. Only meaningful for certain types. */
+ unsigned int unsigned_p : 1;
+
+ /* Unused; available for future use. */
+ unsigned int unused : 7;
+
+ /* The mode passed to the convert_*_operand function. It has a
+ type-dependent meaning. */
+ ENUM_BITFIELD (machine_mode) mode : 16;
+
+ /* The value of the operand. */
+ rtx value;
+};
+
+/* Initialize OP with the given fields. Initialise the other fields
+ to their default values. */
+
+static inline void
+create_expand_operand (struct expand_operand *op,
+ enum expand_operand_type type,
+ rtx value, enum machine_mode mode,
+ bool unsigned_p)
+{
+ op->type = type;
+ op->unsigned_p = unsigned_p;
+ op->unused = 0;
+ op->mode = mode;
+ op->value = value;
+}
+
+/* Make OP describe an operand that must use rtx X, even if X is volatile. */
+
+static inline void
+create_fixed_operand (struct expand_operand *op, rtx x)
+{
+ create_expand_operand (op, EXPAND_FIXED, x, VOIDmode, false);
+}
+
+/* Make OP describe an output operand that must have mode MODE.
+ X, if nonnull, is a suggestion for where the output should be stored.
+ It is OK for VALUE to be inconsistent with MODE, although it will just
+ be ignored in that case. */
+
+static inline void
+create_output_operand (struct expand_operand *op, rtx x,
+ enum machine_mode mode)
+{
+ create_expand_operand (op, EXPAND_OUTPUT, x, mode, false);
+}
+
+/* Make OP describe an input operand that must have mode MODE and
+ value VALUE; MODE cannot be VOIDmode. The backend may request that
+ VALUE be copied into a different kind of rtx before being passed
+ as an operand. */
+
+static inline void
+create_input_operand (struct expand_operand *op, rtx value,
+ enum machine_mode mode)
+{
+ create_expand_operand (op, EXPAND_INPUT, value, mode, false);
+}
+
+/* Like create_input_operand, except that VALUE must first be converted
+ to mode MODE. UNSIGNED_P says whether VALUE is unsigned. */
+
+static inline void
+create_convert_operand_to (struct expand_operand *op, rtx value,
+ enum machine_mode mode, bool unsigned_p)
+{
+ create_expand_operand (op, EXPAND_CONVERT_TO, value, mode, unsigned_p);
+}
+
+/* Make OP describe an input operand that should have the same value
+ as VALUE, after any mode conversion that the backend might request.
+ If VALUE is a CONST_INT, it should be treated as having mode MODE.
+ UNSIGNED_P says whether VALUE is unsigned. */
+
+static inline void
+create_convert_operand_from (struct expand_operand *op, rtx value,
+ enum machine_mode mode, bool unsigned_p)
+{
+ create_expand_operand (op, EXPAND_CONVERT_FROM, value, mode, unsigned_p);
+}
+
+extern void create_convert_operand_from_type (struct expand_operand *op,
+ rtx value, tree type);
+
+/* Make OP describe an input Pmode address operand. VALUE is the value
+ of the address, but it may need to be converted to Pmode first. */
+
+static inline void
+create_address_operand (struct expand_operand *op, rtx value)
+{
+ create_expand_operand (op, EXPAND_ADDRESS, value, Pmode, false);
+}
+
+/* Make OP describe an input operand that has value INTVAL and that has
+ no inherent mode. This function should only be used for operands that
+ are always expand-time constants. The backend may request that INTVAL
+ be copied into a different kind of rtx, but it must specify the mode
+ of that rtx if so. */
+
+static inline void
+create_integer_operand (struct expand_operand *op, HOST_WIDE_INT intval)
+{
+ create_expand_operand (op, EXPAND_INTEGER, GEN_INT (intval), VOIDmode, false);
+}
+
+extern bool valid_multiword_target_p (rtx);
+
+extern bool maybe_legitimize_operands (enum insn_code icode,
+ unsigned int opno, unsigned int nops,
+ struct expand_operand *ops);
+extern rtx maybe_gen_insn (enum insn_code icode, unsigned int nops,
+ struct expand_operand *ops);
+extern bool maybe_expand_insn (enum insn_code icode, unsigned int nops,
+ struct expand_operand *ops);
+extern bool maybe_expand_jump_insn (enum insn_code icode, unsigned int nops,
+ struct expand_operand *ops);
+extern void expand_insn (enum insn_code icode, unsigned int nops,
+ struct expand_operand *ops);
+extern void expand_jump_insn (enum insn_code icode, unsigned int nops,
+ struct expand_operand *ops);
+
+extern rtx prepare_operand (enum insn_code, rtx, int, enum machine_mode,
+ enum machine_mode, int);
+
#endif /* GCC_OPTABS_H */