/* This structure is used by move_by_pieces to describe the move to
be performed. */
-struct move_by_pieces
+struct move_by_pieces_d
{
rtx to;
rtx to_addr;
/* This structure is used by store_by_pieces to describe the clear to
be performed. */
-struct store_by_pieces
+struct store_by_pieces_d
{
rtx to;
rtx to_addr;
unsigned int,
unsigned int);
static void move_by_pieces_1 (rtx (*) (rtx, ...), enum machine_mode,
- struct move_by_pieces *);
+ struct move_by_pieces_d *);
static bool block_move_libcall_safe_for_call_parm (void);
static bool emit_block_move_via_movmem (rtx, rtx, rtx, unsigned, unsigned, HOST_WIDE_INT);
static tree emit_block_move_libcall_fn (int);
static void emit_block_move_via_loop (rtx, rtx, rtx, unsigned);
static rtx clear_by_pieces_1 (void *, HOST_WIDE_INT, enum machine_mode);
static void clear_by_pieces (rtx, unsigned HOST_WIDE_INT, unsigned int);
-static void store_by_pieces_1 (struct store_by_pieces *, unsigned int);
+static void store_by_pieces_1 (struct store_by_pieces_d *, unsigned int);
static void store_by_pieces_2 (rtx (*) (rtx, ...), enum machine_mode,
- struct store_by_pieces *);
+ struct store_by_pieces_d *);
static tree clear_storage_libcall_fn (int);
static rtx compress_float_constant (rtx, rtx);
static rtx get_subtarget (rtx);
if (unsignedp && GET_MODE_CLASS (mode) == MODE_INT
&& GET_MODE_BITSIZE (mode) == 2 * HOST_BITS_PER_WIDE_INT
- && GET_CODE (x) == CONST_INT && INTVAL (x) < 0)
+ && CONST_INT_P (x) && INTVAL (x) < 0)
{
HOST_WIDE_INT val = INTVAL (x);
non-volatile MEM. Except for the constant case where MODE is no
wider than HOST_BITS_PER_WIDE_INT, we must be narrowing the operand. */
- if ((GET_CODE (x) == CONST_INT
+ if ((CONST_INT_P (x)
&& GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
|| (GET_MODE_CLASS (mode) == MODE_INT
&& GET_MODE_CLASS (oldmode) == MODE_INT
/* ?? If we don't know OLDMODE, we have to assume here that
X does not need sign- or zero-extension. This may not be
the case, but it's the best we can do. */
- if (GET_CODE (x) == CONST_INT && oldmode != VOIDmode
+ if (CONST_INT_P (x) && oldmode != VOIDmode
&& GET_MODE_SIZE (mode) > GET_MODE_SIZE (oldmode))
{
HOST_WIDE_INT val = INTVAL (x);
move_by_pieces (rtx to, rtx from, unsigned HOST_WIDE_INT len,
unsigned int align, int endp)
{
- struct move_by_pieces data;
+ struct move_by_pieces_d data;
rtx to_addr, from_addr = XEXP (from, 0);
unsigned int max_size = MOVE_MAX_PIECES + 1;
enum machine_mode mode = VOIDmode, tmode;
static void
move_by_pieces_1 (rtx (*genfun) (rtx, ...), enum machine_mode mode,
- struct move_by_pieces *data)
+ struct move_by_pieces_d *data)
{
unsigned int size = GET_MODE_SIZE (mode);
rtx to1 = NULL_RTX, from1;
/* Set MEM_SIZE as appropriate for this block copy. The main place this
can be incorrect is coming from __builtin_memcpy. */
- if (GET_CODE (size) == CONST_INT)
+ if (CONST_INT_P (size))
{
if (INTVAL (size) == 0)
return 0;
set_mem_size (y, size);
}
- if (GET_CODE (size) == CONST_INT && MOVE_BY_PIECES_P (INTVAL (size), align))
+ if (CONST_INT_P (size) && MOVE_BY_PIECES_P (INTVAL (size), align))
move_by_pieces (x, y, INTVAL (size), align, 0);
else if (emit_block_move_via_movmem (x, y, size, align,
expected_align, expected_size))
here because if SIZE is less than the mode mask, as it is
returned by the macro, it will definitely be less than the
actual mode mask. */
- && ((GET_CODE (size) == CONST_INT
+ && ((CONST_INT_P (size)
&& ((unsigned HOST_WIDE_INT) INTVAL (size)
<= (GET_MODE_MASK (mode) >> 1)))
|| GET_MODE_BITSIZE (mode) >= BITS_PER_WORD)
rtx (*constfun) (void *, HOST_WIDE_INT, enum machine_mode),
void *constfundata, unsigned int align, bool memsetp, int endp)
{
- struct store_by_pieces data;
+ struct store_by_pieces_d data;
if (len == 0)
{
static void
clear_by_pieces (rtx to, unsigned HOST_WIDE_INT len, unsigned int align)
{
- struct store_by_pieces data;
+ struct store_by_pieces_d data;
if (len == 0)
return;
rtx with BLKmode). ALIGN is maximum alignment we can assume. */
static void
-store_by_pieces_1 (struct store_by_pieces *data ATTRIBUTE_UNUSED,
+store_by_pieces_1 (struct store_by_pieces_d *data ATTRIBUTE_UNUSED,
unsigned int align ATTRIBUTE_UNUSED)
{
rtx to_addr = XEXP (data->to, 0);
static void
store_by_pieces_2 (rtx (*genfun) (rtx, ...), enum machine_mode mode,
- struct store_by_pieces *data)
+ struct store_by_pieces_d *data)
{
unsigned int size = GET_MODE_SIZE (mode);
rtx to1, cst;
/* If OBJECT is not BLKmode and SIZE is the same size as its mode,
just move a zero. Otherwise, do this a piece at a time. */
if (mode != BLKmode
- && GET_CODE (size) == CONST_INT
+ && CONST_INT_P (size)
&& INTVAL (size) == (HOST_WIDE_INT) GET_MODE_SIZE (mode))
{
rtx zero = CONST0_RTX (mode);
align = MEM_ALIGN (object);
- if (GET_CODE (size) == CONST_INT
+ if (CONST_INT_P (size)
&& CLEAR_BY_PIECES_P (INTVAL (size), align))
clear_by_pieces (object, INTVAL (size), align);
else if (set_storage_via_setmem (object, size, const0_rtx, align,
for returning pointers, we could end up generating incorrect code. */
object_tree = make_tree (ptr_type_node, object);
- if (GET_CODE (val) != CONST_INT)
+ if (!CONST_INT_P (val))
val = convert_to_mode (TYPE_MODE (integer_type_node), val, 1);
size_tree = make_tree (sizetype, size);
val_tree = make_tree (integer_type_node, val);
BITS_PER_HOST_WIDE_INT here because if SIZE is less than
the mode mask, as it is returned by the macro, it will
definitely be less than the actual mode mask. */
- && ((GET_CODE (size) == CONST_INT
+ && ((CONST_INT_P (size)
&& ((unsigned HOST_WIDE_INT) INTVAL (size)
<= (GET_MODE_MASK (mode) >> 1)))
|| GET_MODE_BITSIZE (mode) >= BITS_PER_WORD)
HOST_WIDE_INT val;
gcc_assert (GET_CODE (expr) == PLUS || GET_CODE (expr) == MINUS);
- gcc_assert (GET_CODE (XEXP (expr, 1)) == CONST_INT);
+ gcc_assert (CONST_INT_P (XEXP (expr, 1)));
val = INTVAL (XEXP (expr, 1));
if (GET_CODE (expr) == MINUS)
val = -val;
}
else
{
- if (GET_CODE (size) == CONST_INT)
+ if (CONST_INT_P (size))
temp = plus_constant (virtual_outgoing_args_rtx,
-INTVAL (size) - (below ? 0 : extra));
else if (extra != 0 && !below)
on the stack for alignment purposes. */
if (args_addr == 0
&& PUSH_ARGS
- && GET_CODE (size) == CONST_INT
+ && CONST_INT_P (size)
&& skip == 0
&& MEM_ALIGN (xinner) >= align
&& (MOVE_BY_PIECES_P ((unsigned) INTVAL (size) - used, align))
/* Deduct words put into registers from the size we must copy. */
if (partial != 0)
{
- if (GET_CODE (size) == CONST_INT)
+ if (CONST_INT_P (size))
size = GEN_INT (INTVAL (size) - used);
else
size = expand_binop (GET_MODE (size), sub_optab, size,
temp = push_block (size, extra, where_pad == downward);
extra = 0;
}
- else if (GET_CODE (args_so_far) == CONST_INT)
+ else if (CONST_INT_P (args_so_far))
temp = memory_address (BLKmode,
plus_constant (args_addr,
skip + INTVAL (args_so_far)));
else
#endif
{
- if (GET_CODE (args_so_far) == CONST_INT)
+ if (CONST_INT_P (args_so_far))
addr
= memory_address (mode,
plus_constant (args_addr,
rtx temp;
rtx alt_rtl = NULL_RTX;
int dont_return_target = 0;
+ location_t loc = EXPR_LOCATION (exp);
if (VOID_TYPE_P (TREE_TYPE (exp)))
{
(TYPE_MODE (TREE_TYPE (exp)),
SUBREG_PROMOTED_UNSIGNED_P (target));
- exp = fold_convert (ntype, exp);
+ exp = fold_convert_loc (loc, ntype, exp);
}
- exp = fold_convert (lang_hooks.types.type_for_mode
- (GET_MODE (SUBREG_REG (target)),
- SUBREG_PROMOTED_UNSIGNED_P (target)),
- exp);
+ exp = fold_convert_loc (loc, lang_hooks.types.type_for_mode
+ (GET_MODE (SUBREG_REG (target)),
+ SUBREG_PROMOTED_UNSIGNED_P (target)),
+ exp);
inner_target = SUBREG_REG (target);
}
type of the string, which is actually the size of the target. */
rtx size = expr_size (exp);
- if (GET_CODE (size) == CONST_INT
+ if (CONST_INT_P (size)
&& INTVAL (size) < TREE_STRING_LENGTH (exp))
emit_block_move (target, temp, size,
(call_param_p
{
/* Compute the size of the data to copy from the string. */
tree copy_size
- = size_binop (MIN_EXPR,
- make_tree (sizetype, size),
- size_int (TREE_STRING_LENGTH (exp)));
+ = size_binop_loc (loc, MIN_EXPR,
+ make_tree (sizetype, size),
+ size_int (TREE_STRING_LENGTH (exp)));
rtx copy_size_rtx
= expand_expr (copy_size, NULL_RTX, VOIDmode,
(call_param_p
/* Figure out how much is left in TARGET that we have to clear.
Do all calculations in ptr_mode. */
- if (GET_CODE (copy_size_rtx) == CONST_INT)
+ if (CONST_INT_P (copy_size_rtx))
{
size = plus_constant (size, -INTVAL (copy_size_rtx));
target = adjust_address (target, BLKmode,
enum machine_mode mode;
HOST_WIDE_INT bitsize;
HOST_WIDE_INT bitpos;
- int unsignedp;
rtx xtarget = target;
if (cleared && initializer_zerop (value))
continue;
- unsignedp = TYPE_UNSIGNED (elttype);
mode = TYPE_MODE (elttype);
if (mode == BLKmode)
bitsize = (host_integerp (TYPE_SIZE (elttype), 1)
tree exit_cond;
expand_normal (hi_index);
- unsignedp = TYPE_UNSIGNED (domain);
index = build_decl (EXPR_LOCATION (exp),
VAR_DECL, NULL_TREE, domain);
-
- index_r
- = gen_reg_rtx (promote_mode (domain, DECL_MODE (index),
- &unsignedp, 0));
+ index_r = gen_reg_rtx (promote_decl_mode (index, NULL));
SET_DECL_RTL (index, index_r);
store_expr (lo_index, index_r, 0, false);
{
tree aligned_size = TREE_OPERAND (exp, 3);
tree elmt_type = TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 0)));
+ location_t loc = EXPR_LOCATION (exp);
/* If a size was specified in the ARRAY_REF, it's the size measured
in alignment units of the element type. So multiply by that value. */
/* ??? tree_ssa_useless_type_conversion will eliminate casts to
sizetype from another type of the same width and signedness. */
if (TREE_TYPE (aligned_size) != sizetype)
- aligned_size = fold_convert (sizetype, aligned_size);
- return size_binop (MULT_EXPR, aligned_size,
- size_int (TYPE_ALIGN_UNIT (elmt_type)));
+ aligned_size = fold_convert_loc (loc, sizetype, aligned_size);
+ return size_binop_loc (loc, MULT_EXPR, aligned_size,
+ size_int (TYPE_ALIGN_UNIT (elmt_type)));
}
/* Otherwise, take the size from that of the element type. Substitute
{
tree aligned_offset = TREE_OPERAND (exp, 2);
tree field = TREE_OPERAND (exp, 1);
+ location_t loc = EXPR_LOCATION (exp);
/* If an offset was specified in the COMPONENT_REF, it's the offset measured
in units of DECL_OFFSET_ALIGN / BITS_PER_UNIT. So multiply by that
/* ??? tree_ssa_useless_type_conversion will eliminate casts to
sizetype from another type of the same width and signedness. */
if (TREE_TYPE (aligned_offset) != sizetype)
- aligned_offset = fold_convert (sizetype, aligned_offset);
- return size_binop (MULT_EXPR, aligned_offset,
- size_int (DECL_OFFSET_ALIGN (field) / BITS_PER_UNIT));
+ aligned_offset = fold_convert_loc (loc, sizetype, aligned_offset);
+ return size_binop_loc (loc, MULT_EXPR, aligned_offset,
+ size_int (DECL_OFFSET_ALIGN (field)
+ / BITS_PER_UNIT));
}
/* Otherwise, take the offset from that of the field. Substitute
op2 = XEXP (value, 1);
if (!CONSTANT_P (op2) && !(REG_P (op2) && op2 != subtarget))
subtarget = 0;
- if (code == MINUS && GET_CODE (op2) == CONST_INT)
+ if (code == MINUS && CONST_INT_P (op2))
{
code = PLUS;
op2 = negate_rtx (GET_MODE (value), op2);
constant first and then add the other value. This allows virtual
register instantiation to simply modify the constant rather than
creating another one around this addition. */
- if (code == PLUS && GET_CODE (op2) == CONST_INT
+ if (code == PLUS && CONST_INT_P (op2)
&& GET_CODE (XEXP (value, 0)) == PLUS
&& REG_P (XEXP (XEXP (value, 0), 0))
&& REGNO (XEXP (XEXP (value, 0), 0)) >= FIRST_VIRTUAL_REGISTER
tree fn = built_in_decls [BUILT_IN_EMUTLS_GET_ADDRESS];
tree arg = build_fold_addr_expr_with_type (emuvar, ptr_type_node);
tree arglist = build_tree_list (NULL_TREE, arg);
- tree call = build_function_call_expr (fn, arglist);
+ tree call = build_function_call_expr (UNKNOWN_LOCATION, fn, arglist);
return fold_convert (build_pointer_type (TREE_TYPE (var)), call);
}
\f
bool reduce_bit_field;
gimple subexp0_def, subexp1_def;
tree top0, top1;
+ location_t loc = EXPR_LOCATION (exp);
#define REDUCE_BIT_FIELD(expr) (reduce_bit_field \
? reduce_to_bit_field_precision ((expr), \
target, \
&& TREE_CODE (exp) == VAR_DECL
&& DECL_THREAD_LOCAL_P (exp))
{
- exp = build_fold_indirect_ref (emutls_var_address (exp));
+ exp = build_fold_indirect_ref_loc (loc, emutls_var_address (exp));
return expand_expr_real_1 (exp, target, tmode, modifier, NULL);
}
/* Get the signedness used for this variable. Ensure we get the
same mode we got when the variable was declared. */
- pmode = promote_mode (type, DECL_MODE (exp), &unsignedp,
- (TREE_CODE (exp) == RESULT_DECL
- || TREE_CODE (exp) == PARM_DECL) ? 1 : 0);
+ pmode = promote_decl_mode (exp, &unsignedp);
gcc_assert (GET_MODE (decl_rtl) == pmode);
temp = gen_lowpart_SUBREG (mode, decl_rtl);
{
tree type_for_mode = lang_hooks.types.type_for_mode (mode, 1);
if (type_for_mode)
- tmp = fold_unary (VIEW_CONVERT_EXPR, type_for_mode, exp);
+ tmp = fold_unary_loc (loc, VIEW_CONVERT_EXPR, type_for_mode, exp);
}
if (!tmp)
tmp = build_constructor_from_list (type,
{
tree index1 = index;
tree low_bound = array_ref_low_bound (exp);
- index1 = fold_convert (sizetype, TREE_OPERAND (exp, 1));
+ index1 = fold_convert_loc (loc, sizetype,
+ TREE_OPERAND (exp, 1));
/* Optimize the special-case of a zero lower bound.
+INDEX), which becomes (ARRAY+255+INDEX). Opps!) */
if (! integer_zerop (low_bound))
- index1 = size_diffop (index1, fold_convert (sizetype,
- low_bound));
+ index1 = size_diffop_loc (loc, index1,
+ fold_convert_loc (loc, sizetype,
+ low_bound));
if (0 > compare_tree_int (index1,
TREE_STRING_LENGTH (init)))
/* Make sure to sign-extend the sizetype offset in a POINTER_PLUS_EXPR
if sizetype precision is smaller than pointer precision. */
if (TYPE_PRECISION (sizetype) < TYPE_PRECISION (type))
- exp = build2 (PLUS_EXPR, type,
- TREE_OPERAND (exp, 0),
- fold_convert (type,
- fold_convert (ssizetype,
- TREE_OPERAND (exp, 1))));
+ exp
+ = build2 (PLUS_EXPR, type,
+ TREE_OPERAND (exp, 0),
+ fold_convert_loc (loc, type,
+ fold_convert_loc (loc, ssizetype,
+ TREE_OPERAND (exp, 1))));
case PLUS_EXPR:
/* Check if this is a case for multiplication and addition. */
/* If the last operand is a CONST_INT, use plus_constant of
the negated constant. Else make the MINUS. */
- if (GET_CODE (op1) == CONST_INT)
+ if (CONST_INT_P (op1))
return REDUCE_BIT_FIELD (plus_constant (op0, - INTVAL (op1)));
else
return REDUCE_BIT_FIELD (gen_rtx_MINUS (mode, op0, op1));
subtarget, &op0, &op1, modifier);
/* Convert A - const to A + (-const). */
- if (GET_CODE (op1) == CONST_INT)
+ if (CONST_INT_P (op1))
{
op1 = negate_rtx (mode, op1);
return REDUCE_BIT_FIELD (simplify_gen_binary (PLUS, mode, op0, op1));
temp = do_store_flag (exp,
modifier != EXPAND_STACK_PARM ? target : NULL_RTX,
tmode != VOIDmode ? tmode : mode);
- if (temp != 0)
+ if (temp)
return temp;
- /* For foo != 0, load foo, and if it is nonzero load 1 instead. */
- if (code == NE_EXPR && integer_zerop (TREE_OPERAND (exp, 1))
- && original_target
- && REG_P (original_target)
- && (GET_MODE (original_target)
- == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
- {
- temp = expand_expr (TREE_OPERAND (exp, 0), original_target,
- VOIDmode, EXPAND_NORMAL);
-
- /* If temp is constant, we can just compute the result. */
- if (GET_CODE (temp) == CONST_INT)
- {
- if (INTVAL (temp) != 0)
- emit_move_insn (target, const1_rtx);
- else
- emit_move_insn (target, const0_rtx);
-
- return target;
- }
-
- if (temp != original_target)
- {
- enum machine_mode mode1 = GET_MODE (temp);
- if (mode1 == VOIDmode)
- mode1 = tmode != VOIDmode ? tmode : mode;
-
- temp = copy_to_mode_reg (mode1, temp);
- }
-
- op1 = gen_label_rtx ();
- emit_cmp_and_jump_insns (temp, const0_rtx, EQ, NULL_RTX,
- GET_MODE (temp), unsignedp, op1);
- emit_move_insn (temp, const1_rtx);
- emit_label (op1);
- return temp;
- }
+ /* Use a compare and a jump for BLKmode comparisons, or for function
+ type comparisons is HAVE_canonicalize_funcptr_for_compare. */
- /* If no set-flag instruction, must generate a conditional store
- into a temporary variable. Drop through and handle this
- like && and ||. */
/* Although TRUTH_{AND,OR}IF_EXPR aren't present in GIMPLE, they
are occassionally created by folding during expansion. */
case TRUTH_ANDIF_EXPR:
if (target && GET_MODE (target) != GET_MODE (exp))
target = 0;
/* For constant values, reduce using build_int_cst_type. */
- if (GET_CODE (exp) == CONST_INT)
+ if (CONST_INT_P (exp))
{
HOST_WIDE_INT value = INTVAL (exp);
tree t = build_int_cst_type (type, value);
}
\f
/* Generate code to calculate EXP using a store-flag instruction
- and return an rtx for the result. EXP is either a comparison
- or a TRUTH_NOT_EXPR whose operand is a comparison.
+ and return an rtx for the result. EXP is a comparison.
If TARGET is nonzero, store the result there if convenient.
tree arg0, arg1, type;
tree tem;
enum machine_mode operand_mode;
- int invert = 0;
int unsignedp;
rtx op0, op1;
rtx subtarget = target;
- rtx result, label;
-
- /* If this is a TRUTH_NOT_EXPR, set a flag indicating we must invert the
- result at the end. We can't simply invert the test since it would
- have already been inverted if it were valid. This case occurs for
- some floating-point comparisons. */
-
- if (TREE_CODE (exp) == TRUTH_NOT_EXPR)
- invert = 1, exp = TREE_OPERAND (exp, 0);
+ location_t loc = EXPR_LOCATION (exp);
arg0 = TREE_OPERAND (exp, 0);
arg1 = TREE_OPERAND (exp, 1);
&& integer_pow2p (TREE_OPERAND (arg0, 1)))
{
tree type = lang_hooks.types.type_for_mode (mode, unsignedp);
- return expand_expr (fold_single_bit_test (code == NE ? NE_EXPR : EQ_EXPR,
+ return expand_expr (fold_single_bit_test (loc,
+ code == NE ? NE_EXPR : EQ_EXPR,
arg0, arg1, type),
target, VOIDmode, EXPAND_NORMAL);
}
- /* Now see if we are likely to be able to do this. Return if not. */
- if (! can_compare_p (code, operand_mode, ccp_store_flag))
- return 0;
-
if (! get_subtarget (target)
|| GET_MODE (subtarget) != operand_mode)
subtarget = 0;
if (target == 0)
target = gen_reg_rtx (mode);
- result = emit_store_flag (target, code, op0, op1,
- operand_mode, unsignedp, 1);
-
- if (result)
- {
- if (invert)
- result = expand_binop (mode, xor_optab, result, const1_rtx,
- result, 0, OPTAB_LIB_WIDEN);
- return result;
- }
-
- /* If this failed, we have to do this with set/compare/jump/set code. */
- if (!REG_P (target)
- || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
- target = gen_reg_rtx (GET_MODE (target));
-
- emit_move_insn (target, invert ? const0_rtx : const1_rtx);
- label = gen_label_rtx ();
- do_compare_rtx_and_jump (op0, op1, code, unsignedp, operand_mode, NULL_RTX,
- NULL_RTX, label);
-
- emit_move_insn (target, invert ? const1_rtx : const0_rtx);
- emit_label (label);
-
- return target;
+ /* Try a cstore if possible. */
+ return emit_store_flag_force (target, code, op0, op1,
+ operand_mode, unsignedp, 1);
}
\f