+ if (mode == GET_MODE_2XWIDER_MODE (innermode))
+ {
+ if (optab_handler (this_optab, mode)->insn_code != CODE_FOR_nothing)
+ {
+ if (TREE_CODE (subexp1) == INTEGER_CST)
+ expand_operands (top0, subexp1, NULL_RTX, &op0, &op1,
+ EXPAND_NORMAL);
+ else
+ expand_operands (top0, top1, NULL_RTX, &op0, &op1,
+ EXPAND_NORMAL);
+ goto binop3;
+ }
+ else if (optab_handler (other_optab, mode)->insn_code != CODE_FOR_nothing
+ && innermode == word_mode)
+ {
+ rtx htem, hipart;
+ op0 = expand_normal (top0);
+ if (TREE_CODE (subexp1) == INTEGER_CST)
+ op1 = convert_modes (innermode, mode,
+ expand_normal (subexp1), unsignedp);
+ else
+ op1 = expand_normal (top1);
+ temp = expand_binop (mode, other_optab, op0, op1, target,
+ unsignedp, OPTAB_LIB_WIDEN);
+ hipart = gen_highpart (innermode, temp);
+ htem = expand_mult_highpart_adjust (innermode, hipart,
+ op0, op1, hipart,
+ zextend_p);
+ if (htem != hipart)
+ emit_move_insn (hipart, htem);
+ return REDUCE_BIT_FIELD (temp);
+ }
+ }
+ }
+ expand_operands (subexp0, subexp1, subtarget, &op0, &op1, EXPAND_NORMAL);
+ return REDUCE_BIT_FIELD (expand_mult (mode, op0, op1, target, unsignedp));
+
+ case TRUNC_DIV_EXPR:
+ case FLOOR_DIV_EXPR:
+ case CEIL_DIV_EXPR:
+ case ROUND_DIV_EXPR:
+ case EXACT_DIV_EXPR:
+ /* If this is a fixed-point operation, then we cannot use the code
+ below because "expand_divmod" doesn't support sat/no-sat fixed-point
+ divisions. */
+ if (ALL_FIXED_POINT_MODE_P (mode))
+ goto binop;
+
+ if (modifier == EXPAND_STACK_PARM)
+ target = 0;
+ /* Possible optimization: compute the dividend with EXPAND_SUM
+ then if the divisor is constant can optimize the case
+ where some terms of the dividend have coeffs divisible by it. */
+ expand_operands (treeop0, treeop1,
+ subtarget, &op0, &op1, EXPAND_NORMAL);
+ return expand_divmod (0, code, mode, op0, op1, target, unsignedp);
+
+ case RDIV_EXPR:
+ goto binop;
+
+ case TRUNC_MOD_EXPR:
+ case FLOOR_MOD_EXPR:
+ case CEIL_MOD_EXPR:
+ case ROUND_MOD_EXPR:
+ if (modifier == EXPAND_STACK_PARM)
+ target = 0;
+ expand_operands (treeop0, treeop1,
+ subtarget, &op0, &op1, EXPAND_NORMAL);
+ return expand_divmod (1, code, mode, op0, op1, target, unsignedp);
+
+ case FIXED_CONVERT_EXPR:
+ op0 = expand_normal (treeop0);
+ if (target == 0 || modifier == EXPAND_STACK_PARM)
+ target = gen_reg_rtx (mode);
+
+ if ((TREE_CODE (TREE_TYPE (treeop0)) == INTEGER_TYPE
+ && TYPE_UNSIGNED (TREE_TYPE (treeop0)))
+ || (TREE_CODE (type) == INTEGER_TYPE && TYPE_UNSIGNED (type)))
+ expand_fixed_convert (target, op0, 1, TYPE_SATURATING (type));
+ else
+ expand_fixed_convert (target, op0, 0, TYPE_SATURATING (type));
+ return target;
+
+ case FIX_TRUNC_EXPR:
+ op0 = expand_normal (treeop0);
+ if (target == 0 || modifier == EXPAND_STACK_PARM)
+ target = gen_reg_rtx (mode);
+ expand_fix (target, op0, unsignedp);
+ return target;
+
+ case FLOAT_EXPR:
+ op0 = expand_normal (treeop0);
+ if (target == 0 || modifier == EXPAND_STACK_PARM)
+ target = gen_reg_rtx (mode);
+ /* expand_float can't figure out what to do if FROM has VOIDmode.
+ So give it the correct mode. With -O, cse will optimize this. */
+ if (GET_MODE (op0) == VOIDmode)
+ op0 = copy_to_mode_reg (TYPE_MODE (TREE_TYPE (treeop0)),
+ op0);
+ expand_float (target, op0,
+ TYPE_UNSIGNED (TREE_TYPE (treeop0)));
+ return target;
+
+ case NEGATE_EXPR:
+ op0 = expand_expr (treeop0, subtarget,
+ VOIDmode, EXPAND_NORMAL);
+ if (modifier == EXPAND_STACK_PARM)
+ target = 0;
+ temp = expand_unop (mode,
+ optab_for_tree_code (NEGATE_EXPR, type,
+ optab_default),
+ op0, target, 0);
+ gcc_assert (temp);
+ return REDUCE_BIT_FIELD (temp);
+
+ case ABS_EXPR:
+ op0 = expand_expr (treeop0, subtarget,
+ VOIDmode, EXPAND_NORMAL);
+ if (modifier == EXPAND_STACK_PARM)
+ target = 0;
+
+ /* ABS_EXPR is not valid for complex arguments. */
+ gcc_assert (GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
+ && GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT);
+
+ /* Unsigned abs is simply the operand. Testing here means we don't
+ risk generating incorrect code below. */
+ if (TYPE_UNSIGNED (type))
+ return op0;
+
+ return expand_abs (mode, op0, target, unsignedp,
+ safe_from_p (target, treeop0, 1));
+
+ case MAX_EXPR:
+ case MIN_EXPR:
+ target = original_target;
+ if (target == 0
+ || modifier == EXPAND_STACK_PARM
+ || (MEM_P (target) && MEM_VOLATILE_P (target))
+ || GET_MODE (target) != mode
+ || (REG_P (target)
+ && REGNO (target) < FIRST_PSEUDO_REGISTER))
+ target = gen_reg_rtx (mode);
+ expand_operands (treeop0, treeop1,
+ target, &op0, &op1, EXPAND_NORMAL);
+
+ /* First try to do it with a special MIN or MAX instruction.
+ If that does not win, use a conditional jump to select the proper
+ value. */
+ this_optab = optab_for_tree_code (code, type, optab_default);
+ temp = expand_binop (mode, this_optab, op0, op1, target, unsignedp,
+ OPTAB_WIDEN);
+ if (temp != 0)
+ return temp;
+
+ /* At this point, a MEM target is no longer useful; we will get better
+ code without it. */
+
+ if (! REG_P (target))
+ target = gen_reg_rtx (mode);
+
+ /* If op1 was placed in target, swap op0 and op1. */
+ if (target != op0 && target == op1)
+ {
+ temp = op0;
+ op0 = op1;
+ op1 = temp;
+ }
+
+ /* We generate better code and avoid problems with op1 mentioning
+ target by forcing op1 into a pseudo if it isn't a constant. */
+ if (! CONSTANT_P (op1))
+ op1 = force_reg (mode, op1);
+
+ {
+ enum rtx_code comparison_code;
+ rtx cmpop1 = op1;
+
+ if (code == MAX_EXPR)
+ comparison_code = unsignedp ? GEU : GE;
+ else
+ comparison_code = unsignedp ? LEU : LE;
+
+ /* Canonicalize to comparisons against 0. */
+ if (op1 == const1_rtx)
+ {
+ /* Converting (a >= 1 ? a : 1) into (a > 0 ? a : 1)
+ or (a != 0 ? a : 1) for unsigned.
+ For MIN we are safe converting (a <= 1 ? a : 1)
+ into (a <= 0 ? a : 1) */
+ cmpop1 = const0_rtx;
+ if (code == MAX_EXPR)
+ comparison_code = unsignedp ? NE : GT;
+ }
+ if (op1 == constm1_rtx && !unsignedp)
+ {
+ /* Converting (a >= -1 ? a : -1) into (a >= 0 ? a : -1)
+ and (a <= -1 ? a : -1) into (a < 0 ? a : -1) */
+ cmpop1 = const0_rtx;
+ if (code == MIN_EXPR)
+ comparison_code = LT;
+ }
+#ifdef HAVE_conditional_move
+ /* Use a conditional move if possible. */
+ if (can_conditionally_move_p (mode))
+ {
+ rtx insn;
+
+ /* ??? Same problem as in expmed.c: emit_conditional_move
+ forces a stack adjustment via compare_from_rtx, and we
+ lose the stack adjustment if the sequence we are about
+ to create is discarded. */
+ do_pending_stack_adjust ();
+
+ start_sequence ();
+
+ /* Try to emit the conditional move. */
+ insn = emit_conditional_move (target, comparison_code,
+ op0, cmpop1, mode,
+ op0, op1, mode,
+ unsignedp);
+
+ /* If we could do the conditional move, emit the sequence,
+ and return. */
+ if (insn)
+ {
+ rtx seq = get_insns ();
+ end_sequence ();
+ emit_insn (seq);
+ return target;
+ }
+
+ /* Otherwise discard the sequence and fall back to code with
+ branches. */
+ end_sequence ();
+ }
+#endif
+ if (target != op0)
+ emit_move_insn (target, op0);
+
+ temp = gen_label_rtx ();
+ do_compare_rtx_and_jump (target, cmpop1, comparison_code,
+ unsignedp, mode, NULL_RTX, NULL_RTX, temp,
+ -1);
+ }
+ emit_move_insn (target, op1);
+ emit_label (temp);
+ return target;
+
+ case BIT_NOT_EXPR:
+ op0 = expand_expr (treeop0, subtarget,
+ VOIDmode, EXPAND_NORMAL);
+ if (modifier == EXPAND_STACK_PARM)
+ target = 0;
+ temp = expand_unop (mode, one_cmpl_optab, op0, target, 1);
+ gcc_assert (temp);
+ return temp;
+
+ /* ??? Can optimize bitwise operations with one arg constant.
+ Can optimize (a bitwise1 n) bitwise2 (a bitwise3 b)
+ and (a bitwise1 b) bitwise2 b (etc)
+ but that is probably not worth while. */
+
+ /* BIT_AND_EXPR is for bitwise anding. TRUTH_AND_EXPR is for anding two
+ boolean values when we want in all cases to compute both of them. In
+ general it is fastest to do TRUTH_AND_EXPR by computing both operands
+ as actual zero-or-1 values and then bitwise anding. In cases where
+ there cannot be any side effects, better code would be made by
+ treating TRUTH_AND_EXPR like TRUTH_ANDIF_EXPR; but the question is
+ how to recognize those cases. */
+
+ case TRUTH_AND_EXPR:
+ code = BIT_AND_EXPR;
+ case BIT_AND_EXPR:
+ goto binop;
+
+ case TRUTH_OR_EXPR:
+ code = BIT_IOR_EXPR;
+ case BIT_IOR_EXPR:
+ goto binop;
+
+ case TRUTH_XOR_EXPR:
+ code = BIT_XOR_EXPR;
+ case BIT_XOR_EXPR:
+ goto binop;
+
+ case LROTATE_EXPR:
+ case RROTATE_EXPR:
+ gcc_assert (VECTOR_MODE_P (TYPE_MODE (type))
+ || (GET_MODE_PRECISION (TYPE_MODE (type))
+ == TYPE_PRECISION (type)));
+ /* fall through */
+
+ case LSHIFT_EXPR:
+ case RSHIFT_EXPR:
+ /* If this is a fixed-point operation, then we cannot use the code
+ below because "expand_shift" doesn't support sat/no-sat fixed-point
+ shifts. */
+ if (ALL_FIXED_POINT_MODE_P (mode))
+ goto binop;
+
+ if (! safe_from_p (subtarget, treeop1, 1))
+ subtarget = 0;
+ if (modifier == EXPAND_STACK_PARM)
+ target = 0;
+ op0 = expand_expr (treeop0, subtarget,
+ VOIDmode, EXPAND_NORMAL);
+ temp = expand_shift (code, mode, op0, treeop1, target,
+ unsignedp);
+ if (code == LSHIFT_EXPR)
+ temp = REDUCE_BIT_FIELD (temp);
+ return temp;