1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
5 Free Software Foundation, Inc.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
26 #include "coretypes.h"
33 #include "insn-config.h"
38 #include "langhooks.h"
42 static void store_fixed_bit_field (rtx, unsigned HOST_WIDE_INT,
43 unsigned HOST_WIDE_INT,
44 unsigned HOST_WIDE_INT, rtx);
45 static void store_split_bit_field (rtx, unsigned HOST_WIDE_INT,
46 unsigned HOST_WIDE_INT, rtx);
47 static rtx extract_fixed_bit_field (enum machine_mode, rtx,
48 unsigned HOST_WIDE_INT,
49 unsigned HOST_WIDE_INT,
50 unsigned HOST_WIDE_INT, rtx, int);
51 static rtx mask_rtx (enum machine_mode, int, int, int);
52 static rtx lshift_value (enum machine_mode, rtx, int, int);
53 static rtx extract_split_bit_field (rtx, unsigned HOST_WIDE_INT,
54 unsigned HOST_WIDE_INT, int);
55 static void do_cmp_and_jump (rtx, rtx, enum rtx_code, enum machine_mode, rtx);
56 static rtx expand_smod_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
57 static rtx expand_sdiv_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
59 /* Test whether a value is zero of a power of two. */
60 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
62 /* Nonzero means divides or modulus operations are relatively cheap for
63 powers of two, so don't use branches; emit the operation instead.
64 Usually, this will mean that the MD file will emit non-branch
67 static bool sdiv_pow2_cheap[2][NUM_MACHINE_MODES];
68 static bool smod_pow2_cheap[2][NUM_MACHINE_MODES];
70 #ifndef SLOW_UNALIGNED_ACCESS
71 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
74 /* For compilers that support multiple targets with different word sizes,
75 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
76 is the H8/300(H) compiler. */
78 #ifndef MAX_BITS_PER_WORD
79 #define MAX_BITS_PER_WORD BITS_PER_WORD
82 /* Reduce conditional compilation elsewhere. */
85 #define CODE_FOR_insv CODE_FOR_nothing
86 #define gen_insv(a,b,c,d) NULL_RTX
90 #define CODE_FOR_extv CODE_FOR_nothing
91 #define gen_extv(a,b,c,d) NULL_RTX
95 #define CODE_FOR_extzv CODE_FOR_nothing
96 #define gen_extzv(a,b,c,d) NULL_RTX
99 /* Cost of various pieces of RTL. Note that some of these are indexed by
100 shift count and some by mode. */
101 static int zero_cost[2];
102 static int add_cost[2][NUM_MACHINE_MODES];
103 static int neg_cost[2][NUM_MACHINE_MODES];
104 static int shift_cost[2][NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
105 static int shiftadd_cost[2][NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
106 static int shiftsub_cost[2][NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
107 static int mul_cost[2][NUM_MACHINE_MODES];
108 static int sdiv_cost[2][NUM_MACHINE_MODES];
109 static int udiv_cost[2][NUM_MACHINE_MODES];
110 static int mul_widen_cost[2][NUM_MACHINE_MODES];
111 static int mul_highpart_cost[2][NUM_MACHINE_MODES];
118 struct rtx_def reg; rtunion reg_fld[2];
119 struct rtx_def plus; rtunion plus_fld1;
121 struct rtx_def mult; rtunion mult_fld1;
122 struct rtx_def sdiv; rtunion sdiv_fld1;
123 struct rtx_def udiv; rtunion udiv_fld1;
125 struct rtx_def sdiv_32; rtunion sdiv_32_fld1;
126 struct rtx_def smod_32; rtunion smod_32_fld1;
127 struct rtx_def wide_mult; rtunion wide_mult_fld1;
128 struct rtx_def wide_lshr; rtunion wide_lshr_fld1;
129 struct rtx_def wide_trunc;
130 struct rtx_def shift; rtunion shift_fld1;
131 struct rtx_def shift_mult; rtunion shift_mult_fld1;
132 struct rtx_def shift_add; rtunion shift_add_fld1;
133 struct rtx_def shift_sub; rtunion shift_sub_fld1;
136 rtx pow2[MAX_BITS_PER_WORD];
137 rtx cint[MAX_BITS_PER_WORD];
139 enum machine_mode mode, wider_mode;
143 for (m = 1; m < MAX_BITS_PER_WORD; m++)
145 pow2[m] = GEN_INT ((HOST_WIDE_INT) 1 << m);
146 cint[m] = GEN_INT (m);
148 memset (&all, 0, sizeof all);
150 PUT_CODE (&all.reg, REG);
151 /* Avoid using hard regs in ways which may be unsupported. */
152 SET_REGNO (&all.reg, LAST_VIRTUAL_REGISTER + 1);
154 PUT_CODE (&all.plus, PLUS);
155 XEXP (&all.plus, 0) = &all.reg;
156 XEXP (&all.plus, 1) = &all.reg;
158 PUT_CODE (&all.neg, NEG);
159 XEXP (&all.neg, 0) = &all.reg;
161 PUT_CODE (&all.mult, MULT);
162 XEXP (&all.mult, 0) = &all.reg;
163 XEXP (&all.mult, 1) = &all.reg;
165 PUT_CODE (&all.sdiv, DIV);
166 XEXP (&all.sdiv, 0) = &all.reg;
167 XEXP (&all.sdiv, 1) = &all.reg;
169 PUT_CODE (&all.udiv, UDIV);
170 XEXP (&all.udiv, 0) = &all.reg;
171 XEXP (&all.udiv, 1) = &all.reg;
173 PUT_CODE (&all.sdiv_32, DIV);
174 XEXP (&all.sdiv_32, 0) = &all.reg;
175 XEXP (&all.sdiv_32, 1) = 32 < MAX_BITS_PER_WORD ? cint[32] : GEN_INT (32);
177 PUT_CODE (&all.smod_32, MOD);
178 XEXP (&all.smod_32, 0) = &all.reg;
179 XEXP (&all.smod_32, 1) = XEXP (&all.sdiv_32, 1);
181 PUT_CODE (&all.zext, ZERO_EXTEND);
182 XEXP (&all.zext, 0) = &all.reg;
184 PUT_CODE (&all.wide_mult, MULT);
185 XEXP (&all.wide_mult, 0) = &all.zext;
186 XEXP (&all.wide_mult, 1) = &all.zext;
188 PUT_CODE (&all.wide_lshr, LSHIFTRT);
189 XEXP (&all.wide_lshr, 0) = &all.wide_mult;
191 PUT_CODE (&all.wide_trunc, TRUNCATE);
192 XEXP (&all.wide_trunc, 0) = &all.wide_lshr;
194 PUT_CODE (&all.shift, ASHIFT);
195 XEXP (&all.shift, 0) = &all.reg;
197 PUT_CODE (&all.shift_mult, MULT);
198 XEXP (&all.shift_mult, 0) = &all.reg;
200 PUT_CODE (&all.shift_add, PLUS);
201 XEXP (&all.shift_add, 0) = &all.shift_mult;
202 XEXP (&all.shift_add, 1) = &all.reg;
204 PUT_CODE (&all.shift_sub, MINUS);
205 XEXP (&all.shift_sub, 0) = &all.shift_mult;
206 XEXP (&all.shift_sub, 1) = &all.reg;
208 for (speed = 0; speed < 2; speed++)
210 crtl->maybe_hot_insn_p = speed;
211 zero_cost[speed] = rtx_cost (const0_rtx, 0, speed);
213 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
215 mode = GET_MODE_WIDER_MODE (mode))
217 PUT_MODE (&all.reg, mode);
218 PUT_MODE (&all.plus, mode);
219 PUT_MODE (&all.neg, mode);
220 PUT_MODE (&all.mult, mode);
221 PUT_MODE (&all.sdiv, mode);
222 PUT_MODE (&all.udiv, mode);
223 PUT_MODE (&all.sdiv_32, mode);
224 PUT_MODE (&all.smod_32, mode);
225 PUT_MODE (&all.wide_trunc, mode);
226 PUT_MODE (&all.shift, mode);
227 PUT_MODE (&all.shift_mult, mode);
228 PUT_MODE (&all.shift_add, mode);
229 PUT_MODE (&all.shift_sub, mode);
231 add_cost[speed][mode] = rtx_cost (&all.plus, SET, speed);
232 neg_cost[speed][mode] = rtx_cost (&all.neg, SET, speed);
233 mul_cost[speed][mode] = rtx_cost (&all.mult, SET, speed);
234 sdiv_cost[speed][mode] = rtx_cost (&all.sdiv, SET, speed);
235 udiv_cost[speed][mode] = rtx_cost (&all.udiv, SET, speed);
237 sdiv_pow2_cheap[speed][mode] = (rtx_cost (&all.sdiv_32, SET, speed)
238 <= 2 * add_cost[speed][mode]);
239 smod_pow2_cheap[speed][mode] = (rtx_cost (&all.smod_32, SET, speed)
240 <= 4 * add_cost[speed][mode]);
242 wider_mode = GET_MODE_WIDER_MODE (mode);
243 if (wider_mode != VOIDmode)
245 PUT_MODE (&all.zext, wider_mode);
246 PUT_MODE (&all.wide_mult, wider_mode);
247 PUT_MODE (&all.wide_lshr, wider_mode);
248 XEXP (&all.wide_lshr, 1) = GEN_INT (GET_MODE_BITSIZE (mode));
250 mul_widen_cost[speed][wider_mode]
251 = rtx_cost (&all.wide_mult, SET, speed);
252 mul_highpart_cost[speed][mode]
253 = rtx_cost (&all.wide_trunc, SET, speed);
256 shift_cost[speed][mode][0] = 0;
257 shiftadd_cost[speed][mode][0] = shiftsub_cost[speed][mode][0]
258 = add_cost[speed][mode];
260 n = MIN (MAX_BITS_PER_WORD, GET_MODE_BITSIZE (mode));
261 for (m = 1; m < n; m++)
263 XEXP (&all.shift, 1) = cint[m];
264 XEXP (&all.shift_mult, 1) = pow2[m];
266 shift_cost[speed][mode][m] = rtx_cost (&all.shift, SET, speed);
267 shiftadd_cost[speed][mode][m] = rtx_cost (&all.shift_add, SET, speed);
268 shiftsub_cost[speed][mode][m] = rtx_cost (&all.shift_sub, SET, speed);
272 default_rtl_profile ();
275 /* Return an rtx representing minus the value of X.
276 MODE is the intended mode of the result,
277 useful if X is a CONST_INT. */
280 negate_rtx (enum machine_mode mode, rtx x)
282 rtx result = simplify_unary_operation (NEG, mode, x, mode);
285 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
290 /* Report on the availability of insv/extv/extzv and the desired mode
291 of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo
292 is false; else the mode of the specified operand. If OPNO is -1,
293 all the caller cares about is whether the insn is available. */
295 mode_for_extraction (enum extraction_pattern pattern, int opno)
297 const struct insn_data *data;
304 data = &insn_data[CODE_FOR_insv];
307 return MAX_MACHINE_MODE;
312 data = &insn_data[CODE_FOR_extv];
315 return MAX_MACHINE_MODE;
320 data = &insn_data[CODE_FOR_extzv];
323 return MAX_MACHINE_MODE;
332 /* Everyone who uses this function used to follow it with
333 if (result == VOIDmode) result = word_mode; */
334 if (data->operand[opno].mode == VOIDmode)
336 return data->operand[opno].mode;
339 /* Return true if X, of mode MODE, matches the predicate for operand
340 OPNO of instruction ICODE. Allow volatile memories, regardless of
341 the ambient volatile_ok setting. */
344 check_predicate_volatile_ok (enum insn_code icode, int opno,
345 rtx x, enum machine_mode mode)
347 bool save_volatile_ok, result;
349 save_volatile_ok = volatile_ok;
350 result = insn_data[(int) icode].operand[opno].predicate (x, mode);
351 volatile_ok = save_volatile_ok;
355 /* A subroutine of store_bit_field, with the same arguments. Return true
356 if the operation could be implemented.
358 If FALLBACK_P is true, fall back to store_fixed_bit_field if we have
359 no other way of implementing the operation. If FALLBACK_P is false,
360 return false instead. */
363 store_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
364 unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode,
365 rtx value, bool fallback_p)
368 = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD;
369 unsigned HOST_WIDE_INT offset, bitpos;
374 enum machine_mode op_mode = mode_for_extraction (EP_insv, 3);
376 while (GET_CODE (op0) == SUBREG)
378 /* The following line once was done only if WORDS_BIG_ENDIAN,
379 but I think that is a mistake. WORDS_BIG_ENDIAN is
380 meaningful at a much higher level; when structures are copied
381 between memory and regs, the higher-numbered regs
382 always get higher addresses. */
383 int inner_mode_size = GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)));
384 int outer_mode_size = GET_MODE_SIZE (GET_MODE (op0));
388 /* Paradoxical subregs need special handling on big endian machines. */
389 if (SUBREG_BYTE (op0) == 0 && inner_mode_size < outer_mode_size)
391 int difference = inner_mode_size - outer_mode_size;
393 if (WORDS_BIG_ENDIAN)
394 byte_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
395 if (BYTES_BIG_ENDIAN)
396 byte_offset += difference % UNITS_PER_WORD;
399 byte_offset = SUBREG_BYTE (op0);
401 bitnum += byte_offset * BITS_PER_UNIT;
402 op0 = SUBREG_REG (op0);
405 /* No action is needed if the target is a register and if the field
406 lies completely outside that register. This can occur if the source
407 code contains an out-of-bounds access to a small array. */
408 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
411 /* Use vec_set patterns for inserting parts of vectors whenever
413 if (VECTOR_MODE_P (GET_MODE (op0))
415 && (optab_handler (vec_set_optab, GET_MODE (op0))->insn_code
417 && fieldmode == GET_MODE_INNER (GET_MODE (op0))
418 && bitsize == GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
419 && !(bitnum % GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
421 enum machine_mode outermode = GET_MODE (op0);
422 enum machine_mode innermode = GET_MODE_INNER (outermode);
423 int icode = (int) optab_handler (vec_set_optab, outermode)->insn_code;
424 int pos = bitnum / GET_MODE_BITSIZE (innermode);
425 rtx rtxpos = GEN_INT (pos);
429 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
430 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
431 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
435 if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
436 src = copy_to_mode_reg (mode1, src);
438 if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
439 rtxpos = copy_to_mode_reg (mode1, rtxpos);
441 /* We could handle this, but we should always be called with a pseudo
442 for our targets and all insns should take them as outputs. */
443 gcc_assert ((*insn_data[icode].operand[0].predicate) (dest, mode0)
444 && (*insn_data[icode].operand[1].predicate) (src, mode1)
445 && (*insn_data[icode].operand[2].predicate) (rtxpos, mode2));
446 pat = GEN_FCN (icode) (dest, src, rtxpos);
457 /* If the target is a register, overwriting the entire object, or storing
458 a full-word or multi-word field can be done with just a SUBREG.
460 If the target is memory, storing any naturally aligned field can be
461 done with a simple store. For targets that support fast unaligned
462 memory, any naturally sized, unit aligned field can be done directly. */
464 offset = bitnum / unit;
465 bitpos = bitnum % unit;
466 byte_offset = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
467 + (offset * UNITS_PER_WORD);
470 && bitsize == GET_MODE_BITSIZE (fieldmode)
472 ? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
473 || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
474 && byte_offset % GET_MODE_SIZE (fieldmode) == 0)
475 : (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
476 || (offset * BITS_PER_UNIT % bitsize == 0
477 && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0))))
480 op0 = adjust_address (op0, fieldmode, offset);
481 else if (GET_MODE (op0) != fieldmode)
482 op0 = simplify_gen_subreg (fieldmode, op0, GET_MODE (op0),
484 emit_move_insn (op0, value);
488 /* Make sure we are playing with integral modes. Pun with subregs
489 if we aren't. This must come after the entire register case above,
490 since that case is valid for any mode. The following cases are only
491 valid for integral modes. */
493 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
494 if (imode != GET_MODE (op0))
497 op0 = adjust_address (op0, imode, 0);
500 gcc_assert (imode != BLKmode);
501 op0 = gen_lowpart (imode, op0);
506 /* We may be accessing data outside the field, which means
507 we can alias adjacent data. */
510 op0 = shallow_copy_rtx (op0);
511 set_mem_alias_set (op0, 0);
512 set_mem_expr (op0, 0);
515 /* If OP0 is a register, BITPOS must count within a word.
516 But as we have it, it counts within whatever size OP0 now has.
517 On a bigendian machine, these are not the same, so convert. */
520 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
521 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
523 /* Storing an lsb-aligned field in a register
524 can be done with a movestrict instruction. */
527 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
528 && bitsize == GET_MODE_BITSIZE (fieldmode)
529 && (optab_handler (movstrict_optab, fieldmode)->insn_code
530 != CODE_FOR_nothing))
532 int icode = optab_handler (movstrict_optab, fieldmode)->insn_code;
534 rtx start = get_last_insn ();
536 /* Get appropriate low part of the value being stored. */
537 if (GET_CODE (value) == CONST_INT || REG_P (value))
538 value = gen_lowpart (fieldmode, value);
539 else if (!(GET_CODE (value) == SYMBOL_REF
540 || GET_CODE (value) == LABEL_REF
541 || GET_CODE (value) == CONST))
542 value = convert_to_mode (fieldmode, value, 0);
544 if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode))
545 value = copy_to_mode_reg (fieldmode, value);
547 if (GET_CODE (op0) == SUBREG)
549 /* Else we've got some float mode source being extracted into
550 a different float mode destination -- this combination of
551 subregs results in Severe Tire Damage. */
552 gcc_assert (GET_MODE (SUBREG_REG (op0)) == fieldmode
553 || GET_MODE_CLASS (fieldmode) == MODE_INT
554 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT);
555 op0 = SUBREG_REG (op0);
558 insn = (GEN_FCN (icode)
559 (gen_rtx_SUBREG (fieldmode, op0,
560 (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
561 + (offset * UNITS_PER_WORD)),
568 delete_insns_since (start);
571 /* Handle fields bigger than a word. */
573 if (bitsize > BITS_PER_WORD)
575 /* Here we transfer the words of the field
576 in the order least significant first.
577 This is because the most significant word is the one which may
579 However, only do that if the value is not BLKmode. */
581 unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
582 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
586 /* This is the mode we must force value to, so that there will be enough
587 subwords to extract. Note that fieldmode will often (always?) be
588 VOIDmode, because that is what store_field uses to indicate that this
589 is a bit field, but passing VOIDmode to operand_subword_force
591 fieldmode = GET_MODE (value);
592 if (fieldmode == VOIDmode)
593 fieldmode = smallest_mode_for_size (nwords * BITS_PER_WORD, MODE_INT);
595 last = get_last_insn ();
596 for (i = 0; i < nwords; i++)
598 /* If I is 0, use the low-order word in both field and target;
599 if I is 1, use the next to lowest word; and so on. */
600 unsigned int wordnum = (backwards ? nwords - i - 1 : i);
601 unsigned int bit_offset = (backwards
602 ? MAX ((int) bitsize - ((int) i + 1)
605 : (int) i * BITS_PER_WORD);
606 rtx value_word = operand_subword_force (value, wordnum, fieldmode);
608 if (!store_bit_field_1 (op0, MIN (BITS_PER_WORD,
609 bitsize - i * BITS_PER_WORD),
610 bitnum + bit_offset, word_mode,
611 value_word, fallback_p))
613 delete_insns_since (last);
620 /* From here on we can assume that the field to be stored in is
621 a full-word (whatever type that is), since it is shorter than a word. */
623 /* OFFSET is the number of words or bytes (UNIT says which)
624 from STR_RTX to the first word or byte containing part of the field. */
629 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
633 /* Since this is a destination (lvalue), we can't copy
634 it to a pseudo. We can remove a SUBREG that does not
635 change the size of the operand. Such a SUBREG may
636 have been added above. */
637 gcc_assert (GET_CODE (op0) == SUBREG
638 && (GET_MODE_SIZE (GET_MODE (op0))
639 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))));
640 op0 = SUBREG_REG (op0);
642 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
643 op0, (offset * UNITS_PER_WORD));
648 /* If VALUE has a floating-point or complex mode, access it as an
649 integer of the corresponding size. This can occur on a machine
650 with 64 bit registers that uses SFmode for float. It can also
651 occur for unaligned float or complex fields. */
653 if (GET_MODE (value) != VOIDmode
654 && GET_MODE_CLASS (GET_MODE (value)) != MODE_INT
655 && GET_MODE_CLASS (GET_MODE (value)) != MODE_PARTIAL_INT)
657 value = gen_reg_rtx (int_mode_for_mode (GET_MODE (value)));
658 emit_move_insn (gen_lowpart (GET_MODE (orig_value), value), orig_value);
661 /* Now OFFSET is nonzero only if OP0 is memory
662 and is therefore always measured in bytes. */
665 && GET_MODE (value) != BLKmode
667 && GET_MODE_BITSIZE (op_mode) >= bitsize
668 && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
669 && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode)))
670 && insn_data[CODE_FOR_insv].operand[1].predicate (GEN_INT (bitsize),
672 && check_predicate_volatile_ok (CODE_FOR_insv, 0, op0, VOIDmode))
674 int xbitpos = bitpos;
677 rtx last = get_last_insn ();
680 /* Add OFFSET into OP0's address. */
682 xop0 = adjust_address (xop0, byte_mode, offset);
684 /* If xop0 is a register, we need it in OP_MODE
685 to make it acceptable to the format of insv. */
686 if (GET_CODE (xop0) == SUBREG)
687 /* We can't just change the mode, because this might clobber op0,
688 and we will need the original value of op0 if insv fails. */
689 xop0 = gen_rtx_SUBREG (op_mode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
690 if (REG_P (xop0) && GET_MODE (xop0) != op_mode)
691 xop0 = gen_rtx_SUBREG (op_mode, xop0, 0);
693 /* On big-endian machines, we count bits from the most significant.
694 If the bit field insn does not, we must invert. */
696 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
697 xbitpos = unit - bitsize - xbitpos;
699 /* We have been counting XBITPOS within UNIT.
700 Count instead within the size of the register. */
701 if (BITS_BIG_ENDIAN && !MEM_P (xop0))
702 xbitpos += GET_MODE_BITSIZE (op_mode) - unit;
704 unit = GET_MODE_BITSIZE (op_mode);
706 /* Convert VALUE to op_mode (which insv insn wants) in VALUE1. */
708 if (GET_MODE (value) != op_mode)
710 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
712 /* Optimization: Don't bother really extending VALUE
713 if it has all the bits we will actually use. However,
714 if we must narrow it, be sure we do it correctly. */
716 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (op_mode))
720 tmp = simplify_subreg (op_mode, value1, GET_MODE (value), 0);
722 tmp = simplify_gen_subreg (op_mode,
723 force_reg (GET_MODE (value),
725 GET_MODE (value), 0);
729 value1 = gen_lowpart (op_mode, value1);
731 else if (GET_CODE (value) == CONST_INT)
732 value1 = gen_int_mode (INTVAL (value), op_mode);
734 /* Parse phase is supposed to make VALUE's data type
735 match that of the component reference, which is a type
736 at least as wide as the field; so VALUE should have
737 a mode that corresponds to that type. */
738 gcc_assert (CONSTANT_P (value));
741 /* If this machine's insv insists on a register,
742 get VALUE1 into a register. */
743 if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate)
745 value1 = force_reg (op_mode, value1);
747 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
753 delete_insns_since (last);
756 /* If OP0 is a memory, try copying it to a register and seeing if a
757 cheap register alternative is available. */
758 if (HAVE_insv && MEM_P (op0))
760 enum machine_mode bestmode;
762 /* Get the mode to use for inserting into this field. If OP0 is
763 BLKmode, get the smallest mode consistent with the alignment. If
764 OP0 is a non-BLKmode object that is no wider than OP_MODE, use its
765 mode. Otherwise, use the smallest mode containing the field. */
767 if (GET_MODE (op0) == BLKmode
768 || (op_mode != MAX_MACHINE_MODE
769 && GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (op_mode)))
770 bestmode = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0),
771 (op_mode == MAX_MACHINE_MODE
772 ? VOIDmode : op_mode),
773 MEM_VOLATILE_P (op0));
775 bestmode = GET_MODE (op0);
777 if (bestmode != VOIDmode
778 && GET_MODE_SIZE (bestmode) >= GET_MODE_SIZE (fieldmode)
779 && !(SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
780 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
782 rtx last, tempreg, xop0;
783 unsigned HOST_WIDE_INT xoffset, xbitpos;
785 last = get_last_insn ();
787 /* Adjust address to point to the containing unit of
788 that mode. Compute the offset as a multiple of this unit,
789 counting in bytes. */
790 unit = GET_MODE_BITSIZE (bestmode);
791 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
792 xbitpos = bitnum % unit;
793 xop0 = adjust_address (op0, bestmode, xoffset);
795 /* Fetch that unit, store the bitfield in it, then store
797 tempreg = copy_to_reg (xop0);
798 if (store_bit_field_1 (tempreg, bitsize, xbitpos,
799 fieldmode, orig_value, false))
801 emit_move_insn (xop0, tempreg);
804 delete_insns_since (last);
811 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
815 /* Generate code to store value from rtx VALUE
816 into a bit-field within structure STR_RTX
817 containing BITSIZE bits starting at bit BITNUM.
818 FIELDMODE is the machine-mode of the FIELD_DECL node for this field. */
821 store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
822 unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode,
825 if (!store_bit_field_1 (str_rtx, bitsize, bitnum, fieldmode, value, true))
829 /* Use shifts and boolean operations to store VALUE
830 into a bit field of width BITSIZE
831 in a memory location specified by OP0 except offset by OFFSET bytes.
832 (OFFSET must be 0 if OP0 is a register.)
833 The field starts at position BITPOS within the byte.
834 (If OP0 is a register, it may be a full word or a narrower mode,
835 but BITPOS still counts within a full word,
836 which is significant on bigendian machines.) */
839 store_fixed_bit_field (rtx op0, unsigned HOST_WIDE_INT offset,
840 unsigned HOST_WIDE_INT bitsize,
841 unsigned HOST_WIDE_INT bitpos, rtx value)
843 enum machine_mode mode;
844 unsigned int total_bits = BITS_PER_WORD;
849 /* There is a case not handled here:
850 a structure with a known alignment of just a halfword
851 and a field split across two aligned halfwords within the structure.
852 Or likewise a structure with a known alignment of just a byte
853 and a field split across two bytes.
854 Such cases are not supposed to be able to occur. */
856 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
858 gcc_assert (!offset);
859 /* Special treatment for a bit field split across two registers. */
860 if (bitsize + bitpos > BITS_PER_WORD)
862 store_split_bit_field (op0, bitsize, bitpos, value);
868 /* Get the proper mode to use for this field. We want a mode that
869 includes the entire field. If such a mode would be larger than
870 a word, we won't be doing the extraction the normal way.
871 We don't want a mode bigger than the destination. */
873 mode = GET_MODE (op0);
874 if (GET_MODE_BITSIZE (mode) == 0
875 || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
877 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
878 MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
880 if (mode == VOIDmode)
882 /* The only way this should occur is if the field spans word
884 store_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT,
889 total_bits = GET_MODE_BITSIZE (mode);
891 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
892 be in the range 0 to total_bits-1, and put any excess bytes in
894 if (bitpos >= total_bits)
896 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
897 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
901 /* Get ref to an aligned byte, halfword, or word containing the field.
902 Adjust BITPOS to be position within a word,
903 and OFFSET to be the offset of that word.
904 Then alter OP0 to refer to that word. */
905 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
906 offset -= (offset % (total_bits / BITS_PER_UNIT));
907 op0 = adjust_address (op0, mode, offset);
910 mode = GET_MODE (op0);
912 /* Now MODE is either some integral mode for a MEM as OP0,
913 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
914 The bit field is contained entirely within OP0.
915 BITPOS is the starting bit number within OP0.
916 (OP0's mode may actually be narrower than MODE.) */
918 if (BYTES_BIG_ENDIAN)
919 /* BITPOS is the distance between our msb
920 and that of the containing datum.
921 Convert it to the distance from the lsb. */
922 bitpos = total_bits - bitsize - bitpos;
924 /* Now BITPOS is always the distance between our lsb
927 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
928 we must first convert its mode to MODE. */
930 if (GET_CODE (value) == CONST_INT)
932 HOST_WIDE_INT v = INTVAL (value);
934 if (bitsize < HOST_BITS_PER_WIDE_INT)
935 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
939 else if ((bitsize < HOST_BITS_PER_WIDE_INT
940 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
941 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
944 value = lshift_value (mode, value, bitpos, bitsize);
948 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
949 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
951 if (GET_MODE (value) != mode)
952 value = convert_to_mode (mode, value, 1);
955 value = expand_binop (mode, and_optab, value,
956 mask_rtx (mode, 0, bitsize, 0),
957 NULL_RTX, 1, OPTAB_LIB_WIDEN);
959 value = expand_shift (LSHIFT_EXPR, mode, value,
960 build_int_cst (NULL_TREE, bitpos), NULL_RTX, 1);
963 /* Now clear the chosen bits in OP0,
964 except that if VALUE is -1 we need not bother. */
965 /* We keep the intermediates in registers to allow CSE to combine
966 consecutive bitfield assignments. */
968 temp = force_reg (mode, op0);
972 temp = expand_binop (mode, and_optab, temp,
973 mask_rtx (mode, bitpos, bitsize, 1),
974 NULL_RTX, 1, OPTAB_LIB_WIDEN);
975 temp = force_reg (mode, temp);
978 /* Now logical-or VALUE into OP0, unless it is zero. */
982 temp = expand_binop (mode, ior_optab, temp, value,
983 NULL_RTX, 1, OPTAB_LIB_WIDEN);
984 temp = force_reg (mode, temp);
989 op0 = copy_rtx (op0);
990 emit_move_insn (op0, temp);
994 /* Store a bit field that is split across multiple accessible memory objects.
996 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
997 BITSIZE is the field width; BITPOS the position of its first bit
999 VALUE is the value to store.
1001 This does not yet handle fields wider than BITS_PER_WORD. */
1004 store_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1005 unsigned HOST_WIDE_INT bitpos, rtx value)
1008 unsigned int bitsdone = 0;
1010 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1012 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
1013 unit = BITS_PER_WORD;
1015 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1017 /* If VALUE is a constant other than a CONST_INT, get it into a register in
1018 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
1019 that VALUE might be a floating-point constant. */
1020 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
1022 rtx word = gen_lowpart_common (word_mode, value);
1024 if (word && (value != word))
1027 value = gen_lowpart_common (word_mode,
1028 force_reg (GET_MODE (value) != VOIDmode
1030 : word_mode, value));
1033 while (bitsdone < bitsize)
1035 unsigned HOST_WIDE_INT thissize;
1037 unsigned HOST_WIDE_INT thispos;
1038 unsigned HOST_WIDE_INT offset;
1040 offset = (bitpos + bitsdone) / unit;
1041 thispos = (bitpos + bitsdone) % unit;
1043 /* THISSIZE must not overrun a word boundary. Otherwise,
1044 store_fixed_bit_field will call us again, and we will mutually
1046 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1047 thissize = MIN (thissize, unit - thispos);
1049 if (BYTES_BIG_ENDIAN)
1053 /* We must do an endian conversion exactly the same way as it is
1054 done in extract_bit_field, so that the two calls to
1055 extract_fixed_bit_field will have comparable arguments. */
1056 if (!MEM_P (value) || GET_MODE (value) == BLKmode)
1057 total_bits = BITS_PER_WORD;
1059 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
1061 /* Fetch successively less significant portions. */
1062 if (GET_CODE (value) == CONST_INT)
1063 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1064 >> (bitsize - bitsdone - thissize))
1065 & (((HOST_WIDE_INT) 1 << thissize) - 1));
1067 /* The args are chosen so that the last part includes the
1068 lsb. Give extract_bit_field the value it needs (with
1069 endianness compensation) to fetch the piece we want. */
1070 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
1071 total_bits - bitsize + bitsdone,
1076 /* Fetch successively more significant portions. */
1077 if (GET_CODE (value) == CONST_INT)
1078 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1080 & (((HOST_WIDE_INT) 1 << thissize) - 1));
1082 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
1083 bitsdone, NULL_RTX, 1);
1086 /* If OP0 is a register, then handle OFFSET here.
1088 When handling multiword bitfields, extract_bit_field may pass
1089 down a word_mode SUBREG of a larger REG for a bitfield that actually
1090 crosses a word boundary. Thus, for a SUBREG, we must find
1091 the current word starting from the base register. */
1092 if (GET_CODE (op0) == SUBREG)
1094 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1095 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1096 GET_MODE (SUBREG_REG (op0)));
1099 else if (REG_P (op0))
1101 word = operand_subword_force (op0, offset, GET_MODE (op0));
1107 /* OFFSET is in UNITs, and UNIT is in bits.
1108 store_fixed_bit_field wants offset in bytes. */
1109 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT, thissize,
1111 bitsdone += thissize;
1115 /* A subroutine of extract_bit_field_1 that converts return value X
1116 to either MODE or TMODE. MODE, TMODE and UNSIGNEDP are arguments
1117 to extract_bit_field. */
1120 convert_extracted_bit_field (rtx x, enum machine_mode mode,
1121 enum machine_mode tmode, bool unsignedp)
1123 if (GET_MODE (x) == tmode || GET_MODE (x) == mode)
1126 /* If the x mode is not a scalar integral, first convert to the
1127 integer mode of that size and then access it as a floating-point
1128 value via a SUBREG. */
1129 if (!SCALAR_INT_MODE_P (tmode))
1131 enum machine_mode smode;
1133 smode = mode_for_size (GET_MODE_BITSIZE (tmode), MODE_INT, 0);
1134 x = convert_to_mode (smode, x, unsignedp);
1135 x = force_reg (smode, x);
1136 return gen_lowpart (tmode, x);
1139 return convert_to_mode (tmode, x, unsignedp);
1142 /* A subroutine of extract_bit_field, with the same arguments.
1143 If FALLBACK_P is true, fall back to extract_fixed_bit_field
1144 if we can find no other means of implementing the operation.
1145 if FALLBACK_P is false, return NULL instead. */
1148 extract_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1149 unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target,
1150 enum machine_mode mode, enum machine_mode tmode,
1154 = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD;
1155 unsigned HOST_WIDE_INT offset, bitpos;
1157 enum machine_mode int_mode;
1158 enum machine_mode ext_mode;
1159 enum machine_mode mode1;
1160 enum insn_code icode;
1163 if (tmode == VOIDmode)
1166 while (GET_CODE (op0) == SUBREG)
1168 bitnum += SUBREG_BYTE (op0) * BITS_PER_UNIT;
1169 op0 = SUBREG_REG (op0);
1172 /* If we have an out-of-bounds access to a register, just return an
1173 uninitialized register of the required mode. This can occur if the
1174 source code contains an out-of-bounds access to a small array. */
1175 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
1176 return gen_reg_rtx (tmode);
1179 && mode == GET_MODE (op0)
1181 && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
1183 /* We're trying to extract a full register from itself. */
1187 /* See if we can get a better vector mode before extracting. */
1188 if (VECTOR_MODE_P (GET_MODE (op0))
1190 && GET_MODE_INNER (GET_MODE (op0)) != tmode)
1192 enum machine_mode new_mode;
1193 int nunits = GET_MODE_NUNITS (GET_MODE (op0));
1195 if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
1196 new_mode = MIN_MODE_VECTOR_FLOAT;
1197 else if (GET_MODE_CLASS (tmode) == MODE_FRACT)
1198 new_mode = MIN_MODE_VECTOR_FRACT;
1199 else if (GET_MODE_CLASS (tmode) == MODE_UFRACT)
1200 new_mode = MIN_MODE_VECTOR_UFRACT;
1201 else if (GET_MODE_CLASS (tmode) == MODE_ACCUM)
1202 new_mode = MIN_MODE_VECTOR_ACCUM;
1203 else if (GET_MODE_CLASS (tmode) == MODE_UACCUM)
1204 new_mode = MIN_MODE_VECTOR_UACCUM;
1206 new_mode = MIN_MODE_VECTOR_INT;
1208 for (; new_mode != VOIDmode ; new_mode = GET_MODE_WIDER_MODE (new_mode))
1209 if (GET_MODE_NUNITS (new_mode) == nunits
1210 && GET_MODE_SIZE (new_mode) == GET_MODE_SIZE (GET_MODE (op0))
1211 && targetm.vector_mode_supported_p (new_mode))
1213 if (new_mode != VOIDmode)
1214 op0 = gen_lowpart (new_mode, op0);
1217 /* Use vec_extract patterns for extracting parts of vectors whenever
1219 if (VECTOR_MODE_P (GET_MODE (op0))
1221 && (optab_handler (vec_extract_optab, GET_MODE (op0))->insn_code
1222 != CODE_FOR_nothing)
1223 && ((bitnum + bitsize - 1) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
1224 == bitnum / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
1226 enum machine_mode outermode = GET_MODE (op0);
1227 enum machine_mode innermode = GET_MODE_INNER (outermode);
1228 int icode = (int) optab_handler (vec_extract_optab, outermode)->insn_code;
1229 unsigned HOST_WIDE_INT pos = bitnum / GET_MODE_BITSIZE (innermode);
1230 rtx rtxpos = GEN_INT (pos);
1232 rtx dest = NULL, pat, seq;
1233 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
1234 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
1235 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
1237 if (innermode == tmode || innermode == mode)
1241 dest = gen_reg_rtx (innermode);
1245 if (! (*insn_data[icode].operand[0].predicate) (dest, mode0))
1246 dest = copy_to_mode_reg (mode0, dest);
1248 if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
1249 src = copy_to_mode_reg (mode1, src);
1251 if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
1252 rtxpos = copy_to_mode_reg (mode1, rtxpos);
1254 /* We could handle this, but we should always be called with a pseudo
1255 for our targets and all insns should take them as outputs. */
1256 gcc_assert ((*insn_data[icode].operand[0].predicate) (dest, mode0)
1257 && (*insn_data[icode].operand[1].predicate) (src, mode1)
1258 && (*insn_data[icode].operand[2].predicate) (rtxpos, mode2));
1260 pat = GEN_FCN (icode) (dest, src, rtxpos);
1268 return gen_lowpart (tmode, dest);
1273 /* Make sure we are playing with integral modes. Pun with subregs
1276 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1277 if (imode != GET_MODE (op0))
1280 op0 = adjust_address (op0, imode, 0);
1281 else if (imode != BLKmode)
1283 op0 = gen_lowpart (imode, op0);
1285 /* If we got a SUBREG, force it into a register since we
1286 aren't going to be able to do another SUBREG on it. */
1287 if (GET_CODE (op0) == SUBREG)
1288 op0 = force_reg (imode, op0);
1290 else if (REG_P (op0))
1293 imode = smallest_mode_for_size (GET_MODE_BITSIZE (GET_MODE (op0)),
1295 reg = gen_reg_rtx (imode);
1296 subreg = gen_lowpart_SUBREG (GET_MODE (op0), reg);
1297 emit_move_insn (subreg, op0);
1299 bitnum += SUBREG_BYTE (subreg) * BITS_PER_UNIT;
1303 rtx mem = assign_stack_temp (GET_MODE (op0),
1304 GET_MODE_SIZE (GET_MODE (op0)), 0);
1305 emit_move_insn (mem, op0);
1306 op0 = adjust_address (mem, BLKmode, 0);
1311 /* We may be accessing data outside the field, which means
1312 we can alias adjacent data. */
1315 op0 = shallow_copy_rtx (op0);
1316 set_mem_alias_set (op0, 0);
1317 set_mem_expr (op0, 0);
1320 /* Extraction of a full-word or multi-word value from a structure
1321 in a register or aligned memory can be done with just a SUBREG.
1322 A subword value in the least significant part of a register
1323 can also be extracted with a SUBREG. For this, we need the
1324 byte offset of the value in op0. */
1326 bitpos = bitnum % unit;
1327 offset = bitnum / unit;
1328 byte_offset = bitpos / BITS_PER_UNIT + offset * UNITS_PER_WORD;
1330 /* If OP0 is a register, BITPOS must count within a word.
1331 But as we have it, it counts within whatever size OP0 now has.
1332 On a bigendian machine, these are not the same, so convert. */
1333 if (BYTES_BIG_ENDIAN
1335 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1336 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1338 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1339 If that's wrong, the solution is to test for it and set TARGET to 0
1342 /* Only scalar integer modes can be converted via subregs. There is an
1343 additional problem for FP modes here in that they can have a precision
1344 which is different from the size. mode_for_size uses precision, but
1345 we want a mode based on the size, so we must avoid calling it for FP
1347 mode1 = (SCALAR_INT_MODE_P (tmode)
1348 ? mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0)
1351 if (((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1352 && bitpos % BITS_PER_WORD == 0)
1353 || (mode1 != BLKmode
1354 /* ??? The big endian test here is wrong. This is correct
1355 if the value is in a register, and if mode_for_size is not
1356 the same mode as op0. This causes us to get unnecessarily
1357 inefficient code from the Thumb port when -mbig-endian. */
1358 && (BYTES_BIG_ENDIAN
1359 ? bitpos + bitsize == BITS_PER_WORD
1362 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode1),
1363 GET_MODE_BITSIZE (GET_MODE (op0)))
1364 && GET_MODE_SIZE (mode1) != 0
1365 && byte_offset % GET_MODE_SIZE (mode1) == 0)
1367 && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
1368 || (offset * BITS_PER_UNIT % bitsize == 0
1369 && MEM_ALIGN (op0) % bitsize == 0)))))
1372 op0 = adjust_address (op0, mode1, offset);
1373 else if (mode1 != GET_MODE (op0))
1375 rtx sub = simplify_gen_subreg (mode1, op0, GET_MODE (op0),
1378 goto no_subreg_mode_swap;
1382 return convert_to_mode (tmode, op0, unsignedp);
1385 no_subreg_mode_swap:
1387 /* Handle fields bigger than a word. */
1389 if (bitsize > BITS_PER_WORD)
1391 /* Here we transfer the words of the field
1392 in the order least significant first.
1393 This is because the most significant word is the one which may
1394 be less than full. */
1396 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1399 if (target == 0 || !REG_P (target))
1400 target = gen_reg_rtx (mode);
1402 /* Indicate for flow that the entire target reg is being set. */
1403 emit_clobber (target);
1405 for (i = 0; i < nwords; i++)
1407 /* If I is 0, use the low-order word in both field and target;
1408 if I is 1, use the next to lowest word; and so on. */
1409 /* Word number in TARGET to use. */
1410 unsigned int wordnum
1412 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1414 /* Offset from start of field in OP0. */
1415 unsigned int bit_offset = (WORDS_BIG_ENDIAN
1416 ? MAX (0, ((int) bitsize - ((int) i + 1)
1417 * (int) BITS_PER_WORD))
1418 : (int) i * BITS_PER_WORD);
1419 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1421 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1422 bitsize - i * BITS_PER_WORD),
1423 bitnum + bit_offset, 1, target_part, mode,
1426 gcc_assert (target_part);
1428 if (result_part != target_part)
1429 emit_move_insn (target_part, result_part);
1434 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1435 need to be zero'd out. */
1436 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1438 unsigned int i, total_words;
1440 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1441 for (i = nwords; i < total_words; i++)
1443 (operand_subword (target,
1444 WORDS_BIG_ENDIAN ? total_words - i - 1 : i,
1451 /* Signed bit field: sign-extend with two arithmetic shifts. */
1452 target = expand_shift (LSHIFT_EXPR, mode, target,
1453 build_int_cst (NULL_TREE,
1454 GET_MODE_BITSIZE (mode) - bitsize),
1456 return expand_shift (RSHIFT_EXPR, mode, target,
1457 build_int_cst (NULL_TREE,
1458 GET_MODE_BITSIZE (mode) - bitsize),
1462 /* From here on we know the desired field is smaller than a word. */
1464 /* Check if there is a correspondingly-sized integer field, so we can
1465 safely extract it as one size of integer, if necessary; then
1466 truncate or extend to the size that is wanted; then use SUBREGs or
1467 convert_to_mode to get one of the modes we really wanted. */
1469 int_mode = int_mode_for_mode (tmode);
1470 if (int_mode == BLKmode)
1471 int_mode = int_mode_for_mode (mode);
1472 /* Should probably push op0 out to memory and then do a load. */
1473 gcc_assert (int_mode != BLKmode);
1475 /* OFFSET is the number of words or bytes (UNIT says which)
1476 from STR_RTX to the first word or byte containing part of the field. */
1480 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1483 op0 = copy_to_reg (op0);
1484 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1485 op0, (offset * UNITS_PER_WORD));
1490 /* Now OFFSET is nonzero only for memory operands. */
1491 ext_mode = mode_for_extraction (unsignedp ? EP_extzv : EP_extv, 0);
1492 icode = unsignedp ? CODE_FOR_extzv : CODE_FOR_extv;
1493 if (ext_mode != MAX_MACHINE_MODE
1495 && GET_MODE_BITSIZE (ext_mode) >= bitsize
1496 /* If op0 is a register, we need it in EXT_MODE to make it
1497 acceptable to the format of ext(z)v. */
1498 && !(GET_CODE (op0) == SUBREG && GET_MODE (op0) != ext_mode)
1499 && !((REG_P (op0) || GET_CODE (op0) == SUBREG)
1500 && (bitsize + bitpos > GET_MODE_BITSIZE (ext_mode)))
1501 && check_predicate_volatile_ok (icode, 1, op0, GET_MODE (op0)))
1503 unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset;
1504 rtx bitsize_rtx, bitpos_rtx;
1505 rtx last = get_last_insn ();
1507 rtx xtarget = target;
1508 rtx xspec_target = target;
1509 rtx xspec_target_subreg = 0;
1512 /* If op0 is a register, we need it in EXT_MODE to make it
1513 acceptable to the format of ext(z)v. */
1514 if (REG_P (xop0) && GET_MODE (xop0) != ext_mode)
1515 xop0 = gen_rtx_SUBREG (ext_mode, xop0, 0);
1517 /* Get ref to first byte containing part of the field. */
1518 xop0 = adjust_address (xop0, byte_mode, xoffset);
1520 /* On big-endian machines, we count bits from the most significant.
1521 If the bit field insn does not, we must invert. */
1522 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1523 xbitpos = unit - bitsize - xbitpos;
1525 /* Now convert from counting within UNIT to counting in EXT_MODE. */
1526 if (BITS_BIG_ENDIAN && !MEM_P (xop0))
1527 xbitpos += GET_MODE_BITSIZE (ext_mode) - unit;
1529 unit = GET_MODE_BITSIZE (ext_mode);
1532 xtarget = xspec_target = gen_reg_rtx (tmode);
1534 if (GET_MODE (xtarget) != ext_mode)
1536 if (REG_P (xtarget))
1538 xtarget = gen_lowpart (ext_mode, xtarget);
1539 if (GET_MODE_SIZE (ext_mode)
1540 > GET_MODE_SIZE (GET_MODE (xspec_target)))
1541 xspec_target_subreg = xtarget;
1544 xtarget = gen_reg_rtx (ext_mode);
1547 /* If this machine's ext(z)v insists on a register target,
1548 make sure we have one. */
1549 if (!insn_data[(int) icode].operand[0].predicate (xtarget, ext_mode))
1550 xtarget = gen_reg_rtx (ext_mode);
1552 bitsize_rtx = GEN_INT (bitsize);
1553 bitpos_rtx = GEN_INT (xbitpos);
1556 ? gen_extzv (xtarget, xop0, bitsize_rtx, bitpos_rtx)
1557 : gen_extv (xtarget, xop0, bitsize_rtx, bitpos_rtx));
1561 if (xtarget == xspec_target)
1563 if (xtarget == xspec_target_subreg)
1564 return xspec_target;
1565 return convert_extracted_bit_field (xtarget, mode, tmode, unsignedp);
1567 delete_insns_since (last);
1570 /* If OP0 is a memory, try copying it to a register and seeing if a
1571 cheap register alternative is available. */
1572 if (ext_mode != MAX_MACHINE_MODE && MEM_P (op0))
1574 enum machine_mode bestmode;
1576 /* Get the mode to use for inserting into this field. If
1577 OP0 is BLKmode, get the smallest mode consistent with the
1578 alignment. If OP0 is a non-BLKmode object that is no
1579 wider than EXT_MODE, use its mode. Otherwise, use the
1580 smallest mode containing the field. */
1582 if (GET_MODE (op0) == BLKmode
1583 || (ext_mode != MAX_MACHINE_MODE
1584 && GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (ext_mode)))
1585 bestmode = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0),
1586 (ext_mode == MAX_MACHINE_MODE
1587 ? VOIDmode : ext_mode),
1588 MEM_VOLATILE_P (op0));
1590 bestmode = GET_MODE (op0);
1592 if (bestmode != VOIDmode
1593 && !(SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
1594 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
1596 unsigned HOST_WIDE_INT xoffset, xbitpos;
1598 /* Compute the offset as a multiple of this unit,
1599 counting in bytes. */
1600 unit = GET_MODE_BITSIZE (bestmode);
1601 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1602 xbitpos = bitnum % unit;
1604 /* Make sure the register is big enough for the whole field. */
1605 if (xoffset * BITS_PER_UNIT + unit
1606 >= offset * BITS_PER_UNIT + bitsize)
1608 rtx last, result, xop0;
1610 last = get_last_insn ();
1612 /* Fetch it to a register in that size. */
1613 xop0 = adjust_address (op0, bestmode, xoffset);
1614 xop0 = force_reg (bestmode, xop0);
1615 result = extract_bit_field_1 (xop0, bitsize, xbitpos,
1617 mode, tmode, false);
1621 delete_insns_since (last);
1629 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1630 bitpos, target, unsignedp);
1631 return convert_extracted_bit_field (target, mode, tmode, unsignedp);
1634 /* Generate code to extract a byte-field from STR_RTX
1635 containing BITSIZE bits, starting at BITNUM,
1636 and put it in TARGET if possible (if TARGET is nonzero).
1637 Regardless of TARGET, we return the rtx for where the value is placed.
1639 STR_RTX is the structure containing the byte (a REG or MEM).
1640 UNSIGNEDP is nonzero if this is an unsigned bit field.
1641 MODE is the natural mode of the field value once extracted.
1642 TMODE is the mode the caller would like the value to have;
1643 but the value may be returned with type MODE instead.
1645 If a TARGET is specified and we can store in it at no extra cost,
1646 we do so, and return TARGET.
1647 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1648 if they are equally easy. */
1651 extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1652 unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target,
1653 enum machine_mode mode, enum machine_mode tmode)
1655 return extract_bit_field_1 (str_rtx, bitsize, bitnum, unsignedp,
1656 target, mode, tmode, true);
1659 /* Extract a bit field using shifts and boolean operations
1660 Returns an rtx to represent the value.
1661 OP0 addresses a register (word) or memory (byte).
1662 BITPOS says which bit within the word or byte the bit field starts in.
1663 OFFSET says how many bytes farther the bit field starts;
1664 it is 0 if OP0 is a register.
1665 BITSIZE says how many bits long the bit field is.
1666 (If OP0 is a register, it may be narrower than a full word,
1667 but BITPOS still counts within a full word,
1668 which is significant on bigendian machines.)
1670 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1671 If TARGET is nonzero, attempts to store the value there
1672 and return TARGET, but this is not guaranteed.
1673 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1676 extract_fixed_bit_field (enum machine_mode tmode, rtx op0,
1677 unsigned HOST_WIDE_INT offset,
1678 unsigned HOST_WIDE_INT bitsize,
1679 unsigned HOST_WIDE_INT bitpos, rtx target,
1682 unsigned int total_bits = BITS_PER_WORD;
1683 enum machine_mode mode;
1685 if (GET_CODE (op0) == SUBREG || REG_P (op0))
1687 /* Special treatment for a bit field split across two registers. */
1688 if (bitsize + bitpos > BITS_PER_WORD)
1689 return extract_split_bit_field (op0, bitsize, bitpos, unsignedp);
1693 /* Get the proper mode to use for this field. We want a mode that
1694 includes the entire field. If such a mode would be larger than
1695 a word, we won't be doing the extraction the normal way. */
1697 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1698 MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
1700 if (mode == VOIDmode)
1701 /* The only way this should occur is if the field spans word
1703 return extract_split_bit_field (op0, bitsize,
1704 bitpos + offset * BITS_PER_UNIT,
1707 total_bits = GET_MODE_BITSIZE (mode);
1709 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1710 be in the range 0 to total_bits-1, and put any excess bytes in
1712 if (bitpos >= total_bits)
1714 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1715 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1719 /* Get ref to an aligned byte, halfword, or word containing the field.
1720 Adjust BITPOS to be position within a word,
1721 and OFFSET to be the offset of that word.
1722 Then alter OP0 to refer to that word. */
1723 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1724 offset -= (offset % (total_bits / BITS_PER_UNIT));
1725 op0 = adjust_address (op0, mode, offset);
1728 mode = GET_MODE (op0);
1730 if (BYTES_BIG_ENDIAN)
1731 /* BITPOS is the distance between our msb and that of OP0.
1732 Convert it to the distance from the lsb. */
1733 bitpos = total_bits - bitsize - bitpos;
1735 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1736 We have reduced the big-endian case to the little-endian case. */
1742 /* If the field does not already start at the lsb,
1743 shift it so it does. */
1744 tree amount = build_int_cst (NULL_TREE, bitpos);
1745 /* Maybe propagate the target for the shift. */
1746 /* But not if we will return it--could confuse integrate.c. */
1747 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1748 if (tmode != mode) subtarget = 0;
1749 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1751 /* Convert the value to the desired mode. */
1753 op0 = convert_to_mode (tmode, op0, 1);
1755 /* Unless the msb of the field used to be the msb when we shifted,
1756 mask out the upper bits. */
1758 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize)
1759 return expand_binop (GET_MODE (op0), and_optab, op0,
1760 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1761 target, 1, OPTAB_LIB_WIDEN);
1765 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1766 then arithmetic-shift its lsb to the lsb of the word. */
1767 op0 = force_reg (mode, op0);
1771 /* Find the narrowest integer mode that contains the field. */
1773 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1774 mode = GET_MODE_WIDER_MODE (mode))
1775 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1777 op0 = convert_to_mode (mode, op0, 0);
1781 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1784 = build_int_cst (NULL_TREE,
1785 GET_MODE_BITSIZE (mode) - (bitsize + bitpos));
1786 /* Maybe propagate the target for the shift. */
1787 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1788 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1791 return expand_shift (RSHIFT_EXPR, mode, op0,
1792 build_int_cst (NULL_TREE,
1793 GET_MODE_BITSIZE (mode) - bitsize),
1797 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1798 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1799 complement of that if COMPLEMENT. The mask is truncated if
1800 necessary to the width of mode MODE. The mask is zero-extended if
1801 BITSIZE+BITPOS is too small for MODE. */
1804 mask_rtx (enum machine_mode mode, int bitpos, int bitsize, int complement)
1806 HOST_WIDE_INT masklow, maskhigh;
1810 else if (bitpos < HOST_BITS_PER_WIDE_INT)
1811 masklow = (HOST_WIDE_INT) -1 << bitpos;
1815 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1816 masklow &= ((unsigned HOST_WIDE_INT) -1
1817 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1819 if (bitpos <= HOST_BITS_PER_WIDE_INT)
1822 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1826 else if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1827 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1828 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1834 maskhigh = ~maskhigh;
1838 return immed_double_const (masklow, maskhigh, mode);
1841 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1842 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1845 lshift_value (enum machine_mode mode, rtx value, int bitpos, int bitsize)
1847 unsigned HOST_WIDE_INT v = INTVAL (value);
1848 HOST_WIDE_INT low, high;
1850 if (bitsize < HOST_BITS_PER_WIDE_INT)
1851 v &= ~((HOST_WIDE_INT) -1 << bitsize);
1853 if (bitpos < HOST_BITS_PER_WIDE_INT)
1856 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1861 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1864 return immed_double_const (low, high, mode);
1867 /* Extract a bit field that is split across two words
1868 and return an RTX for the result.
1870 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1871 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1872 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
1875 extract_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1876 unsigned HOST_WIDE_INT bitpos, int unsignedp)
1879 unsigned int bitsdone = 0;
1880 rtx result = NULL_RTX;
1883 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1885 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
1886 unit = BITS_PER_WORD;
1888 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1890 while (bitsdone < bitsize)
1892 unsigned HOST_WIDE_INT thissize;
1894 unsigned HOST_WIDE_INT thispos;
1895 unsigned HOST_WIDE_INT offset;
1897 offset = (bitpos + bitsdone) / unit;
1898 thispos = (bitpos + bitsdone) % unit;
1900 /* THISSIZE must not overrun a word boundary. Otherwise,
1901 extract_fixed_bit_field will call us again, and we will mutually
1903 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1904 thissize = MIN (thissize, unit - thispos);
1906 /* If OP0 is a register, then handle OFFSET here.
1908 When handling multiword bitfields, extract_bit_field may pass
1909 down a word_mode SUBREG of a larger REG for a bitfield that actually
1910 crosses a word boundary. Thus, for a SUBREG, we must find
1911 the current word starting from the base register. */
1912 if (GET_CODE (op0) == SUBREG)
1914 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1915 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1916 GET_MODE (SUBREG_REG (op0)));
1919 else if (REG_P (op0))
1921 word = operand_subword_force (op0, offset, GET_MODE (op0));
1927 /* Extract the parts in bit-counting order,
1928 whose meaning is determined by BYTES_PER_UNIT.
1929 OFFSET is in UNITs, and UNIT is in bits.
1930 extract_fixed_bit_field wants offset in bytes. */
1931 part = extract_fixed_bit_field (word_mode, word,
1932 offset * unit / BITS_PER_UNIT,
1933 thissize, thispos, 0, 1);
1934 bitsdone += thissize;
1936 /* Shift this part into place for the result. */
1937 if (BYTES_BIG_ENDIAN)
1939 if (bitsize != bitsdone)
1940 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1941 build_int_cst (NULL_TREE, bitsize - bitsdone),
1946 if (bitsdone != thissize)
1947 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1948 build_int_cst (NULL_TREE,
1949 bitsdone - thissize), 0, 1);
1955 /* Combine the parts with bitwise or. This works
1956 because we extracted each part as an unsigned bit field. */
1957 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
1963 /* Unsigned bit field: we are done. */
1966 /* Signed bit field: sign-extend with two arithmetic shifts. */
1967 result = expand_shift (LSHIFT_EXPR, word_mode, result,
1968 build_int_cst (NULL_TREE, BITS_PER_WORD - bitsize),
1970 return expand_shift (RSHIFT_EXPR, word_mode, result,
1971 build_int_cst (NULL_TREE, BITS_PER_WORD - bitsize),
1975 /* Try to read the low bits of SRC as an rvalue of mode MODE, preserving
1976 the bit pattern. SRC_MODE is the mode of SRC; if this is smaller than
1977 MODE, fill the upper bits with zeros. Fail if the layout of either
1978 mode is unknown (as for CC modes) or if the extraction would involve
1979 unprofitable mode punning. Return the value on success, otherwise
1982 This is different from gen_lowpart* in these respects:
1984 - the returned value must always be considered an rvalue
1986 - when MODE is wider than SRC_MODE, the extraction involves
1989 - when MODE is smaller than SRC_MODE, the extraction involves
1990 a truncation (and is thus subject to TRULY_NOOP_TRUNCATION).
1992 In other words, this routine performs a computation, whereas the
1993 gen_lowpart* routines are conceptually lvalue or rvalue subreg
1997 extract_low_bits (enum machine_mode mode, enum machine_mode src_mode, rtx src)
1999 enum machine_mode int_mode, src_int_mode;
2001 if (mode == src_mode)
2004 if (CONSTANT_P (src))
2006 /* simplify_gen_subreg can't be used here, as if simplify_subreg
2007 fails, it will happily create (subreg (symbol_ref)) or similar
2009 unsigned int byte = subreg_lowpart_offset (mode, src_mode);
2010 rtx ret = simplify_subreg (mode, src, src_mode, byte);
2014 if (GET_MODE (src) == VOIDmode
2015 || !validate_subreg (mode, src_mode, src, byte))
2018 src = force_reg (GET_MODE (src), src);
2019 return gen_rtx_SUBREG (mode, src, byte);
2022 if (GET_MODE_CLASS (mode) == MODE_CC || GET_MODE_CLASS (src_mode) == MODE_CC)
2025 if (GET_MODE_BITSIZE (mode) == GET_MODE_BITSIZE (src_mode)
2026 && MODES_TIEABLE_P (mode, src_mode))
2028 rtx x = gen_lowpart_common (mode, src);
2033 src_int_mode = int_mode_for_mode (src_mode);
2034 int_mode = int_mode_for_mode (mode);
2035 if (src_int_mode == BLKmode || int_mode == BLKmode)
2038 if (!MODES_TIEABLE_P (src_int_mode, src_mode))
2040 if (!MODES_TIEABLE_P (int_mode, mode))
2043 src = gen_lowpart (src_int_mode, src);
2044 src = convert_modes (int_mode, src_int_mode, src, true);
2045 src = gen_lowpart (mode, src);
2049 /* Add INC into TARGET. */
2052 expand_inc (rtx target, rtx inc)
2054 rtx value = expand_binop (GET_MODE (target), add_optab,
2056 target, 0, OPTAB_LIB_WIDEN);
2057 if (value != target)
2058 emit_move_insn (target, value);
2061 /* Subtract DEC from TARGET. */
2064 expand_dec (rtx target, rtx dec)
2066 rtx value = expand_binop (GET_MODE (target), sub_optab,
2068 target, 0, OPTAB_LIB_WIDEN);
2069 if (value != target)
2070 emit_move_insn (target, value);
2073 /* Output a shift instruction for expression code CODE,
2074 with SHIFTED being the rtx for the value to shift,
2075 and AMOUNT the tree for the amount to shift by.
2076 Store the result in the rtx TARGET, if that is convenient.
2077 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2078 Return the rtx for where the value is. */
2081 expand_shift (enum tree_code code, enum machine_mode mode, rtx shifted,
2082 tree amount, rtx target, int unsignedp)
2085 int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
2086 int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
2087 optab lshift_optab = ashl_optab;
2088 optab rshift_arith_optab = ashr_optab;
2089 optab rshift_uns_optab = lshr_optab;
2090 optab lrotate_optab = rotl_optab;
2091 optab rrotate_optab = rotr_optab;
2092 enum machine_mode op1_mode;
2094 bool speed = optimize_insn_for_speed_p ();
2096 op1 = expand_normal (amount);
2097 op1_mode = GET_MODE (op1);
2099 /* Determine whether the shift/rotate amount is a vector, or scalar. If the
2100 shift amount is a vector, use the vector/vector shift patterns. */
2101 if (VECTOR_MODE_P (mode) && VECTOR_MODE_P (op1_mode))
2103 lshift_optab = vashl_optab;
2104 rshift_arith_optab = vashr_optab;
2105 rshift_uns_optab = vlshr_optab;
2106 lrotate_optab = vrotl_optab;
2107 rrotate_optab = vrotr_optab;
2110 /* Previously detected shift-counts computed by NEGATE_EXPR
2111 and shifted in the other direction; but that does not work
2114 if (SHIFT_COUNT_TRUNCATED)
2116 if (GET_CODE (op1) == CONST_INT
2117 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
2118 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
2119 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
2120 % GET_MODE_BITSIZE (mode));
2121 else if (GET_CODE (op1) == SUBREG
2122 && subreg_lowpart_p (op1))
2123 op1 = SUBREG_REG (op1);
2126 if (op1 == const0_rtx)
2129 /* Check whether its cheaper to implement a left shift by a constant
2130 bit count by a sequence of additions. */
2131 if (code == LSHIFT_EXPR
2132 && GET_CODE (op1) == CONST_INT
2134 && INTVAL (op1) < GET_MODE_BITSIZE (mode)
2135 && INTVAL (op1) < MAX_BITS_PER_WORD
2136 && shift_cost[speed][mode][INTVAL (op1)] > INTVAL (op1) * add_cost[speed][mode]
2137 && shift_cost[speed][mode][INTVAL (op1)] != MAX_COST)
2140 for (i = 0; i < INTVAL (op1); i++)
2142 temp = force_reg (mode, shifted);
2143 shifted = expand_binop (mode, add_optab, temp, temp, NULL_RTX,
2144 unsignedp, OPTAB_LIB_WIDEN);
2149 for (attempt = 0; temp == 0 && attempt < 3; attempt++)
2151 enum optab_methods methods;
2154 methods = OPTAB_DIRECT;
2155 else if (attempt == 1)
2156 methods = OPTAB_WIDEN;
2158 methods = OPTAB_LIB_WIDEN;
2162 /* Widening does not work for rotation. */
2163 if (methods == OPTAB_WIDEN)
2165 else if (methods == OPTAB_LIB_WIDEN)
2167 /* If we have been unable to open-code this by a rotation,
2168 do it as the IOR of two shifts. I.e., to rotate A
2169 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
2170 where C is the bitsize of A.
2172 It is theoretically possible that the target machine might
2173 not be able to perform either shift and hence we would
2174 be making two libcalls rather than just the one for the
2175 shift (similarly if IOR could not be done). We will allow
2176 this extremely unlikely lossage to avoid complicating the
2179 rtx subtarget = target == shifted ? 0 : target;
2180 tree new_amount, other_amount;
2182 tree type = TREE_TYPE (amount);
2183 if (GET_MODE (op1) != TYPE_MODE (type)
2184 && GET_MODE (op1) != VOIDmode)
2185 op1 = convert_to_mode (TYPE_MODE (type), op1, 1);
2186 new_amount = make_tree (type, op1);
2188 = fold_build2 (MINUS_EXPR, type,
2189 build_int_cst (type, GET_MODE_BITSIZE (mode)),
2192 shifted = force_reg (mode, shifted);
2194 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
2195 mode, shifted, new_amount, 0, 1);
2196 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
2197 mode, shifted, other_amount, subtarget, 1);
2198 return expand_binop (mode, ior_optab, temp, temp1, target,
2199 unsignedp, methods);
2202 temp = expand_binop (mode,
2203 left ? lrotate_optab : rrotate_optab,
2204 shifted, op1, target, unsignedp, methods);
2207 temp = expand_binop (mode,
2208 left ? lshift_optab : rshift_uns_optab,
2209 shifted, op1, target, unsignedp, methods);
2211 /* Do arithmetic shifts.
2212 Also, if we are going to widen the operand, we can just as well
2213 use an arithmetic right-shift instead of a logical one. */
2214 if (temp == 0 && ! rotate
2215 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2217 enum optab_methods methods1 = methods;
2219 /* If trying to widen a log shift to an arithmetic shift,
2220 don't accept an arithmetic shift of the same size. */
2222 methods1 = OPTAB_MUST_WIDEN;
2224 /* Arithmetic shift */
2226 temp = expand_binop (mode,
2227 left ? lshift_optab : rshift_arith_optab,
2228 shifted, op1, target, unsignedp, methods1);
2231 /* We used to try extzv here for logical right shifts, but that was
2232 only useful for one machine, the VAX, and caused poor code
2233 generation there for lshrdi3, so the code was deleted and a
2234 define_expand for lshrsi3 was added to vax.md. */
2254 /* This structure holds the "cost" of a multiply sequence. The
2255 "cost" field holds the total rtx_cost of every operator in the
2256 synthetic multiplication sequence, hence cost(a op b) is defined
2257 as rtx_cost(op) + cost(a) + cost(b), where cost(leaf) is zero.
2258 The "latency" field holds the minimum possible latency of the
2259 synthetic multiply, on a hypothetical infinitely parallel CPU.
2260 This is the critical path, or the maximum height, of the expression
2261 tree which is the sum of rtx_costs on the most expensive path from
2262 any leaf to the root. Hence latency(a op b) is defined as zero for
2263 leaves and rtx_cost(op) + max(latency(a), latency(b)) otherwise. */
2266 short cost; /* Total rtx_cost of the multiplication sequence. */
2267 short latency; /* The latency of the multiplication sequence. */
2270 /* This macro is used to compare a pointer to a mult_cost against an
2271 single integer "rtx_cost" value. This is equivalent to the macro
2272 CHEAPER_MULT_COST(X,Z) where Z = {Y,Y}. */
2273 #define MULT_COST_LESS(X,Y) ((X)->cost < (Y) \
2274 || ((X)->cost == (Y) && (X)->latency < (Y)))
2276 /* This macro is used to compare two pointers to mult_costs against
2277 each other. The macro returns true if X is cheaper than Y.
2278 Currently, the cheaper of two mult_costs is the one with the
2279 lower "cost". If "cost"s are tied, the lower latency is cheaper. */
2280 #define CHEAPER_MULT_COST(X,Y) ((X)->cost < (Y)->cost \
2281 || ((X)->cost == (Y)->cost \
2282 && (X)->latency < (Y)->latency))
2284 /* This structure records a sequence of operations.
2285 `ops' is the number of operations recorded.
2286 `cost' is their total cost.
2287 The operations are stored in `op' and the corresponding
2288 logarithms of the integer coefficients in `log'.
2290 These are the operations:
2291 alg_zero total := 0;
2292 alg_m total := multiplicand;
2293 alg_shift total := total * coeff
2294 alg_add_t_m2 total := total + multiplicand * coeff;
2295 alg_sub_t_m2 total := total - multiplicand * coeff;
2296 alg_add_factor total := total * coeff + total;
2297 alg_sub_factor total := total * coeff - total;
2298 alg_add_t2_m total := total * coeff + multiplicand;
2299 alg_sub_t2_m total := total * coeff - multiplicand;
2301 The first operand must be either alg_zero or alg_m. */
2305 struct mult_cost cost;
2307 /* The size of the OP and LOG fields are not directly related to the
2308 word size, but the worst-case algorithms will be if we have few
2309 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2310 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2311 in total wordsize operations. */
2312 enum alg_code op[MAX_BITS_PER_WORD];
2313 char log[MAX_BITS_PER_WORD];
2316 /* The entry for our multiplication cache/hash table. */
2317 struct alg_hash_entry {
2318 /* The number we are multiplying by. */
2319 unsigned HOST_WIDE_INT t;
2321 /* The mode in which we are multiplying something by T. */
2322 enum machine_mode mode;
2324 /* The best multiplication algorithm for t. */
2327 /* The cost of multiplication if ALG_CODE is not alg_impossible.
2328 Otherwise, the cost within which multiplication by T is
2330 struct mult_cost cost;
2332 /* OPtimized for speed? */
2336 /* The number of cache/hash entries. */
2337 #if HOST_BITS_PER_WIDE_INT == 64
2338 #define NUM_ALG_HASH_ENTRIES 1031
2340 #define NUM_ALG_HASH_ENTRIES 307
2343 /* Each entry of ALG_HASH caches alg_code for some integer. This is
2344 actually a hash table. If we have a collision, that the older
2345 entry is kicked out. */
2346 static struct alg_hash_entry alg_hash[NUM_ALG_HASH_ENTRIES];
2348 /* Indicates the type of fixup needed after a constant multiplication.
2349 BASIC_VARIANT means no fixup is needed, NEGATE_VARIANT means that
2350 the result should be negated, and ADD_VARIANT means that the
2351 multiplicand should be added to the result. */
2352 enum mult_variant {basic_variant, negate_variant, add_variant};
2354 static void synth_mult (struct algorithm *, unsigned HOST_WIDE_INT,
2355 const struct mult_cost *, enum machine_mode mode);
2356 static bool choose_mult_variant (enum machine_mode, HOST_WIDE_INT,
2357 struct algorithm *, enum mult_variant *, int);
2358 static rtx expand_mult_const (enum machine_mode, rtx, HOST_WIDE_INT, rtx,
2359 const struct algorithm *, enum mult_variant);
2360 static unsigned HOST_WIDE_INT choose_multiplier (unsigned HOST_WIDE_INT, int,
2361 int, rtx *, int *, int *);
2362 static unsigned HOST_WIDE_INT invert_mod2n (unsigned HOST_WIDE_INT, int);
2363 static rtx extract_high_half (enum machine_mode, rtx);
2364 static rtx expand_mult_highpart (enum machine_mode, rtx, rtx, rtx, int, int);
2365 static rtx expand_mult_highpart_optab (enum machine_mode, rtx, rtx, rtx,
2367 /* Compute and return the best algorithm for multiplying by T.
2368 The algorithm must cost less than cost_limit
2369 If retval.cost >= COST_LIMIT, no algorithm was found and all
2370 other field of the returned struct are undefined.
2371 MODE is the machine mode of the multiplication. */
2374 synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t,
2375 const struct mult_cost *cost_limit, enum machine_mode mode)
2378 struct algorithm *alg_in, *best_alg;
2379 struct mult_cost best_cost;
2380 struct mult_cost new_limit;
2381 int op_cost, op_latency;
2382 unsigned HOST_WIDE_INT q;
2383 int maxm = MIN (BITS_PER_WORD, GET_MODE_BITSIZE (mode));
2385 bool cache_hit = false;
2386 enum alg_code cache_alg = alg_zero;
2387 bool speed = optimize_insn_for_speed_p ();
2389 /* Indicate that no algorithm is yet found. If no algorithm
2390 is found, this value will be returned and indicate failure. */
2391 alg_out->cost.cost = cost_limit->cost + 1;
2392 alg_out->cost.latency = cost_limit->latency + 1;
2394 if (cost_limit->cost < 0
2395 || (cost_limit->cost == 0 && cost_limit->latency <= 0))
2398 /* Restrict the bits of "t" to the multiplication's mode. */
2399 t &= GET_MODE_MASK (mode);
2401 /* t == 1 can be done in zero cost. */
2405 alg_out->cost.cost = 0;
2406 alg_out->cost.latency = 0;
2407 alg_out->op[0] = alg_m;
2411 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2415 if (MULT_COST_LESS (cost_limit, zero_cost[speed]))
2420 alg_out->cost.cost = zero_cost[speed];
2421 alg_out->cost.latency = zero_cost[speed];
2422 alg_out->op[0] = alg_zero;
2427 /* We'll be needing a couple extra algorithm structures now. */
2429 alg_in = XALLOCA (struct algorithm);
2430 best_alg = XALLOCA (struct algorithm);
2431 best_cost = *cost_limit;
2433 /* Compute the hash index. */
2434 hash_index = (t ^ (unsigned int) mode ^ (speed * 256)) % NUM_ALG_HASH_ENTRIES;
2436 /* See if we already know what to do for T. */
2437 if (alg_hash[hash_index].t == t
2438 && alg_hash[hash_index].mode == mode
2439 && alg_hash[hash_index].mode == mode
2440 && alg_hash[hash_index].speed == speed
2441 && alg_hash[hash_index].alg != alg_unknown)
2443 cache_alg = alg_hash[hash_index].alg;
2445 if (cache_alg == alg_impossible)
2447 /* The cache tells us that it's impossible to synthesize
2448 multiplication by T within alg_hash[hash_index].cost. */
2449 if (!CHEAPER_MULT_COST (&alg_hash[hash_index].cost, cost_limit))
2450 /* COST_LIMIT is at least as restrictive as the one
2451 recorded in the hash table, in which case we have no
2452 hope of synthesizing a multiplication. Just
2456 /* If we get here, COST_LIMIT is less restrictive than the
2457 one recorded in the hash table, so we may be able to
2458 synthesize a multiplication. Proceed as if we didn't
2459 have the cache entry. */
2463 if (CHEAPER_MULT_COST (cost_limit, &alg_hash[hash_index].cost))
2464 /* The cached algorithm shows that this multiplication
2465 requires more cost than COST_LIMIT. Just return. This
2466 way, we don't clobber this cache entry with
2467 alg_impossible but retain useful information. */
2479 goto do_alg_addsub_t_m2;
2481 case alg_add_factor:
2482 case alg_sub_factor:
2483 goto do_alg_addsub_factor;
2486 goto do_alg_add_t2_m;
2489 goto do_alg_sub_t2_m;
2497 /* If we have a group of zero bits at the low-order part of T, try
2498 multiplying by the remaining bits and then doing a shift. */
2503 m = floor_log2 (t & -t); /* m = number of low zero bits */
2507 /* The function expand_shift will choose between a shift and
2508 a sequence of additions, so the observed cost is given as
2509 MIN (m * add_cost[speed][mode], shift_cost[speed][mode][m]). */
2510 op_cost = m * add_cost[speed][mode];
2511 if (shift_cost[speed][mode][m] < op_cost)
2512 op_cost = shift_cost[speed][mode][m];
2513 new_limit.cost = best_cost.cost - op_cost;
2514 new_limit.latency = best_cost.latency - op_cost;
2515 synth_mult (alg_in, q, &new_limit, mode);
2517 alg_in->cost.cost += op_cost;
2518 alg_in->cost.latency += op_cost;
2519 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2521 struct algorithm *x;
2522 best_cost = alg_in->cost;
2523 x = alg_in, alg_in = best_alg, best_alg = x;
2524 best_alg->log[best_alg->ops] = m;
2525 best_alg->op[best_alg->ops] = alg_shift;
2532 /* If we have an odd number, add or subtract one. */
2535 unsigned HOST_WIDE_INT w;
2538 for (w = 1; (w & t) != 0; w <<= 1)
2540 /* If T was -1, then W will be zero after the loop. This is another
2541 case where T ends with ...111. Handling this with (T + 1) and
2542 subtract 1 produces slightly better code and results in algorithm
2543 selection much faster than treating it like the ...0111 case
2547 /* Reject the case where t is 3.
2548 Thus we prefer addition in that case. */
2551 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2553 op_cost = add_cost[speed][mode];
2554 new_limit.cost = best_cost.cost - op_cost;
2555 new_limit.latency = best_cost.latency - op_cost;
2556 synth_mult (alg_in, t + 1, &new_limit, mode);
2558 alg_in->cost.cost += op_cost;
2559 alg_in->cost.latency += op_cost;
2560 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2562 struct algorithm *x;
2563 best_cost = alg_in->cost;
2564 x = alg_in, alg_in = best_alg, best_alg = x;
2565 best_alg->log[best_alg->ops] = 0;
2566 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2571 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2573 op_cost = add_cost[speed][mode];
2574 new_limit.cost = best_cost.cost - op_cost;
2575 new_limit.latency = best_cost.latency - op_cost;
2576 synth_mult (alg_in, t - 1, &new_limit, mode);
2578 alg_in->cost.cost += op_cost;
2579 alg_in->cost.latency += op_cost;
2580 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2582 struct algorithm *x;
2583 best_cost = alg_in->cost;
2584 x = alg_in, alg_in = best_alg, best_alg = x;
2585 best_alg->log[best_alg->ops] = 0;
2586 best_alg->op[best_alg->ops] = alg_add_t_m2;
2593 /* Look for factors of t of the form
2594 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2595 If we find such a factor, we can multiply by t using an algorithm that
2596 multiplies by q, shift the result by m and add/subtract it to itself.
2598 We search for large factors first and loop down, even if large factors
2599 are less probable than small; if we find a large factor we will find a
2600 good sequence quickly, and therefore be able to prune (by decreasing
2601 COST_LIMIT) the search. */
2603 do_alg_addsub_factor:
2604 for (m = floor_log2 (t - 1); m >= 2; m--)
2606 unsigned HOST_WIDE_INT d;
2608 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2609 if (t % d == 0 && t > d && m < maxm
2610 && (!cache_hit || cache_alg == alg_add_factor))
2612 /* If the target has a cheap shift-and-add instruction use
2613 that in preference to a shift insn followed by an add insn.
2614 Assume that the shift-and-add is "atomic" with a latency
2615 equal to its cost, otherwise assume that on superscalar
2616 hardware the shift may be executed concurrently with the
2617 earlier steps in the algorithm. */
2618 op_cost = add_cost[speed][mode] + shift_cost[speed][mode][m];
2619 if (shiftadd_cost[speed][mode][m] < op_cost)
2621 op_cost = shiftadd_cost[speed][mode][m];
2622 op_latency = op_cost;
2625 op_latency = add_cost[speed][mode];
2627 new_limit.cost = best_cost.cost - op_cost;
2628 new_limit.latency = best_cost.latency - op_latency;
2629 synth_mult (alg_in, t / d, &new_limit, mode);
2631 alg_in->cost.cost += op_cost;
2632 alg_in->cost.latency += op_latency;
2633 if (alg_in->cost.latency < op_cost)
2634 alg_in->cost.latency = op_cost;
2635 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2637 struct algorithm *x;
2638 best_cost = alg_in->cost;
2639 x = alg_in, alg_in = best_alg, best_alg = x;
2640 best_alg->log[best_alg->ops] = m;
2641 best_alg->op[best_alg->ops] = alg_add_factor;
2643 /* Other factors will have been taken care of in the recursion. */
2647 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2648 if (t % d == 0 && t > d && m < maxm
2649 && (!cache_hit || cache_alg == alg_sub_factor))
2651 /* If the target has a cheap shift-and-subtract insn use
2652 that in preference to a shift insn followed by a sub insn.
2653 Assume that the shift-and-sub is "atomic" with a latency
2654 equal to it's cost, otherwise assume that on superscalar
2655 hardware the shift may be executed concurrently with the
2656 earlier steps in the algorithm. */
2657 op_cost = add_cost[speed][mode] + shift_cost[speed][mode][m];
2658 if (shiftsub_cost[speed][mode][m] < op_cost)
2660 op_cost = shiftsub_cost[speed][mode][m];
2661 op_latency = op_cost;
2664 op_latency = add_cost[speed][mode];
2666 new_limit.cost = best_cost.cost - op_cost;
2667 new_limit.latency = best_cost.latency - op_latency;
2668 synth_mult (alg_in, t / d, &new_limit, mode);
2670 alg_in->cost.cost += op_cost;
2671 alg_in->cost.latency += op_latency;
2672 if (alg_in->cost.latency < op_cost)
2673 alg_in->cost.latency = op_cost;
2674 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2676 struct algorithm *x;
2677 best_cost = alg_in->cost;
2678 x = alg_in, alg_in = best_alg, best_alg = x;
2679 best_alg->log[best_alg->ops] = m;
2680 best_alg->op[best_alg->ops] = alg_sub_factor;
2688 /* Try shift-and-add (load effective address) instructions,
2689 i.e. do a*3, a*5, a*9. */
2696 if (m >= 0 && m < maxm)
2698 op_cost = shiftadd_cost[speed][mode][m];
2699 new_limit.cost = best_cost.cost - op_cost;
2700 new_limit.latency = best_cost.latency - op_cost;
2701 synth_mult (alg_in, (t - 1) >> m, &new_limit, mode);
2703 alg_in->cost.cost += op_cost;
2704 alg_in->cost.latency += op_cost;
2705 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2707 struct algorithm *x;
2708 best_cost = alg_in->cost;
2709 x = alg_in, alg_in = best_alg, best_alg = x;
2710 best_alg->log[best_alg->ops] = m;
2711 best_alg->op[best_alg->ops] = alg_add_t2_m;
2721 if (m >= 0 && m < maxm)
2723 op_cost = shiftsub_cost[speed][mode][m];
2724 new_limit.cost = best_cost.cost - op_cost;
2725 new_limit.latency = best_cost.latency - op_cost;
2726 synth_mult (alg_in, (t + 1) >> m, &new_limit, mode);
2728 alg_in->cost.cost += op_cost;
2729 alg_in->cost.latency += op_cost;
2730 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2732 struct algorithm *x;
2733 best_cost = alg_in->cost;
2734 x = alg_in, alg_in = best_alg, best_alg = x;
2735 best_alg->log[best_alg->ops] = m;
2736 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2744 /* If best_cost has not decreased, we have not found any algorithm. */
2745 if (!CHEAPER_MULT_COST (&best_cost, cost_limit))
2747 /* We failed to find an algorithm. Record alg_impossible for
2748 this case (that is, <T, MODE, COST_LIMIT>) so that next time
2749 we are asked to find an algorithm for T within the same or
2750 lower COST_LIMIT, we can immediately return to the
2752 alg_hash[hash_index].t = t;
2753 alg_hash[hash_index].mode = mode;
2754 alg_hash[hash_index].speed = speed;
2755 alg_hash[hash_index].alg = alg_impossible;
2756 alg_hash[hash_index].cost = *cost_limit;
2760 /* Cache the result. */
2763 alg_hash[hash_index].t = t;
2764 alg_hash[hash_index].mode = mode;
2765 alg_hash[hash_index].speed = speed;
2766 alg_hash[hash_index].alg = best_alg->op[best_alg->ops];
2767 alg_hash[hash_index].cost.cost = best_cost.cost;
2768 alg_hash[hash_index].cost.latency = best_cost.latency;
2771 /* If we are getting a too long sequence for `struct algorithm'
2772 to record, make this search fail. */
2773 if (best_alg->ops == MAX_BITS_PER_WORD)
2776 /* Copy the algorithm from temporary space to the space at alg_out.
2777 We avoid using structure assignment because the majority of
2778 best_alg is normally undefined, and this is a critical function. */
2779 alg_out->ops = best_alg->ops + 1;
2780 alg_out->cost = best_cost;
2781 memcpy (alg_out->op, best_alg->op,
2782 alg_out->ops * sizeof *alg_out->op);
2783 memcpy (alg_out->log, best_alg->log,
2784 alg_out->ops * sizeof *alg_out->log);
2787 /* Find the cheapest way of multiplying a value of mode MODE by VAL.
2788 Try three variations:
2790 - a shift/add sequence based on VAL itself
2791 - a shift/add sequence based on -VAL, followed by a negation
2792 - a shift/add sequence based on VAL - 1, followed by an addition.
2794 Return true if the cheapest of these cost less than MULT_COST,
2795 describing the algorithm in *ALG and final fixup in *VARIANT. */
2798 choose_mult_variant (enum machine_mode mode, HOST_WIDE_INT val,
2799 struct algorithm *alg, enum mult_variant *variant,
2802 struct algorithm alg2;
2803 struct mult_cost limit;
2805 bool speed = optimize_insn_for_speed_p ();
2807 /* Fail quickly for impossible bounds. */
2811 /* Ensure that mult_cost provides a reasonable upper bound.
2812 Any constant multiplication can be performed with less
2813 than 2 * bits additions. */
2814 op_cost = 2 * GET_MODE_BITSIZE (mode) * add_cost[speed][mode];
2815 if (mult_cost > op_cost)
2816 mult_cost = op_cost;
2818 *variant = basic_variant;
2819 limit.cost = mult_cost;
2820 limit.latency = mult_cost;
2821 synth_mult (alg, val, &limit, mode);
2823 /* This works only if the inverted value actually fits in an
2825 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2827 op_cost = neg_cost[speed][mode];
2828 if (MULT_COST_LESS (&alg->cost, mult_cost))
2830 limit.cost = alg->cost.cost - op_cost;
2831 limit.latency = alg->cost.latency - op_cost;
2835 limit.cost = mult_cost - op_cost;
2836 limit.latency = mult_cost - op_cost;
2839 synth_mult (&alg2, -val, &limit, mode);
2840 alg2.cost.cost += op_cost;
2841 alg2.cost.latency += op_cost;
2842 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2843 *alg = alg2, *variant = negate_variant;
2846 /* This proves very useful for division-by-constant. */
2847 op_cost = add_cost[speed][mode];
2848 if (MULT_COST_LESS (&alg->cost, mult_cost))
2850 limit.cost = alg->cost.cost - op_cost;
2851 limit.latency = alg->cost.latency - op_cost;
2855 limit.cost = mult_cost - op_cost;
2856 limit.latency = mult_cost - op_cost;
2859 synth_mult (&alg2, val - 1, &limit, mode);
2860 alg2.cost.cost += op_cost;
2861 alg2.cost.latency += op_cost;
2862 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2863 *alg = alg2, *variant = add_variant;
2865 return MULT_COST_LESS (&alg->cost, mult_cost);
2868 /* A subroutine of expand_mult, used for constant multiplications.
2869 Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
2870 convenient. Use the shift/add sequence described by ALG and apply
2871 the final fixup specified by VARIANT. */
2874 expand_mult_const (enum machine_mode mode, rtx op0, HOST_WIDE_INT val,
2875 rtx target, const struct algorithm *alg,
2876 enum mult_variant variant)
2878 HOST_WIDE_INT val_so_far;
2879 rtx insn, accum, tem;
2881 enum machine_mode nmode;
2883 /* Avoid referencing memory over and over and invalid sharing
2885 op0 = force_reg (mode, op0);
2887 /* ACCUM starts out either as OP0 or as a zero, depending on
2888 the first operation. */
2890 if (alg->op[0] == alg_zero)
2892 accum = copy_to_mode_reg (mode, const0_rtx);
2895 else if (alg->op[0] == alg_m)
2897 accum = copy_to_mode_reg (mode, op0);
2903 for (opno = 1; opno < alg->ops; opno++)
2905 int log = alg->log[opno];
2906 rtx shift_subtarget = optimize ? 0 : accum;
2908 = (opno == alg->ops - 1 && target != 0 && variant != add_variant
2911 rtx accum_target = optimize ? 0 : accum;
2913 switch (alg->op[opno])
2916 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2917 build_int_cst (NULL_TREE, log),
2923 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2924 build_int_cst (NULL_TREE, log),
2926 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2927 add_target ? add_target : accum_target);
2928 val_so_far += (HOST_WIDE_INT) 1 << log;
2932 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2933 build_int_cst (NULL_TREE, log),
2935 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2936 add_target ? add_target : accum_target);
2937 val_so_far -= (HOST_WIDE_INT) 1 << log;
2941 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2942 build_int_cst (NULL_TREE, log),
2945 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2946 add_target ? add_target : accum_target);
2947 val_so_far = (val_so_far << log) + 1;
2951 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2952 build_int_cst (NULL_TREE, log),
2953 shift_subtarget, 0);
2954 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2955 add_target ? add_target : accum_target);
2956 val_so_far = (val_so_far << log) - 1;
2959 case alg_add_factor:
2960 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2961 build_int_cst (NULL_TREE, log),
2963 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2964 add_target ? add_target : accum_target);
2965 val_so_far += val_so_far << log;
2968 case alg_sub_factor:
2969 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2970 build_int_cst (NULL_TREE, log),
2972 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
2974 ? add_target : (optimize ? 0 : tem)));
2975 val_so_far = (val_so_far << log) - val_so_far;
2982 /* Write a REG_EQUAL note on the last insn so that we can cse
2983 multiplication sequences. Note that if ACCUM is a SUBREG,
2984 we've set the inner register and must properly indicate
2987 tem = op0, nmode = mode;
2988 if (GET_CODE (accum) == SUBREG)
2990 nmode = GET_MODE (SUBREG_REG (accum));
2991 tem = gen_lowpart (nmode, op0);
2994 insn = get_last_insn ();
2995 set_unique_reg_note (insn, REG_EQUAL,
2996 gen_rtx_MULT (nmode, tem,
2997 GEN_INT (val_so_far)));
3000 if (variant == negate_variant)
3002 val_so_far = -val_so_far;
3003 accum = expand_unop (mode, neg_optab, accum, target, 0);
3005 else if (variant == add_variant)
3007 val_so_far = val_so_far + 1;
3008 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
3011 /* Compare only the bits of val and val_so_far that are significant
3012 in the result mode, to avoid sign-/zero-extension confusion. */
3013 val &= GET_MODE_MASK (mode);
3014 val_so_far &= GET_MODE_MASK (mode);
3015 gcc_assert (val == val_so_far);
3020 /* Perform a multiplication and return an rtx for the result.
3021 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3022 TARGET is a suggestion for where to store the result (an rtx).
3024 We check specially for a constant integer as OP1.
3025 If you want this check for OP0 as well, then before calling
3026 you should swap the two operands if OP0 would be constant. */
3029 expand_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3032 enum mult_variant variant;
3033 struct algorithm algorithm;
3035 bool speed = optimize_insn_for_speed_p ();
3037 /* Handling const0_rtx here allows us to use zero as a rogue value for
3039 if (op1 == const0_rtx)
3041 if (op1 == const1_rtx)
3043 if (op1 == constm1_rtx)
3044 return expand_unop (mode,
3045 GET_MODE_CLASS (mode) == MODE_INT
3046 && !unsignedp && flag_trapv
3047 ? negv_optab : neg_optab,
3050 /* These are the operations that are potentially turned into a sequence
3051 of shifts and additions. */
3052 if (SCALAR_INT_MODE_P (mode)
3053 && (unsignedp || !flag_trapv))
3055 HOST_WIDE_INT coeff = 0;
3056 rtx fake_reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
3058 /* synth_mult does an `unsigned int' multiply. As long as the mode is
3059 less than or equal in size to `unsigned int' this doesn't matter.
3060 If the mode is larger than `unsigned int', then synth_mult works
3061 only if the constant value exactly fits in an `unsigned int' without
3062 any truncation. This means that multiplying by negative values does
3063 not work; results are off by 2^32 on a 32 bit machine. */
3065 if (GET_CODE (op1) == CONST_INT)
3067 /* Attempt to handle multiplication of DImode values by negative
3068 coefficients, by performing the multiplication by a positive
3069 multiplier and then inverting the result. */
3070 if (INTVAL (op1) < 0
3071 && GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT)
3073 /* Its safe to use -INTVAL (op1) even for INT_MIN, as the
3074 result is interpreted as an unsigned coefficient.
3075 Exclude cost of op0 from max_cost to match the cost
3076 calculation of the synth_mult. */
3077 max_cost = rtx_cost (gen_rtx_MULT (mode, fake_reg, op1), SET, speed)
3078 - neg_cost[speed][mode];
3080 && choose_mult_variant (mode, -INTVAL (op1), &algorithm,
3081 &variant, max_cost))
3083 rtx temp = expand_mult_const (mode, op0, -INTVAL (op1),
3084 NULL_RTX, &algorithm,
3086 return expand_unop (mode, neg_optab, temp, target, 0);
3089 else coeff = INTVAL (op1);
3091 else if (GET_CODE (op1) == CONST_DOUBLE)
3093 /* If we are multiplying in DImode, it may still be a win
3094 to try to work with shifts and adds. */
3095 if (CONST_DOUBLE_HIGH (op1) == 0
3096 && CONST_DOUBLE_LOW (op1) > 0)
3097 coeff = CONST_DOUBLE_LOW (op1);
3098 else if (CONST_DOUBLE_LOW (op1) == 0
3099 && EXACT_POWER_OF_2_OR_ZERO_P (CONST_DOUBLE_HIGH (op1)))
3101 int shift = floor_log2 (CONST_DOUBLE_HIGH (op1))
3102 + HOST_BITS_PER_WIDE_INT;
3103 return expand_shift (LSHIFT_EXPR, mode, op0,
3104 build_int_cst (NULL_TREE, shift),
3109 /* We used to test optimize here, on the grounds that it's better to
3110 produce a smaller program when -O is not used. But this causes
3111 such a terrible slowdown sometimes that it seems better to always
3115 /* Special case powers of two. */
3116 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3117 return expand_shift (LSHIFT_EXPR, mode, op0,
3118 build_int_cst (NULL_TREE, floor_log2 (coeff)),
3121 /* Exclude cost of op0 from max_cost to match the cost
3122 calculation of the synth_mult. */
3123 max_cost = rtx_cost (gen_rtx_MULT (mode, fake_reg, op1), SET, speed);
3124 if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3126 return expand_mult_const (mode, op0, coeff, target,
3127 &algorithm, variant);
3131 if (GET_CODE (op0) == CONST_DOUBLE)
3138 /* Expand x*2.0 as x+x. */
3139 if (GET_CODE (op1) == CONST_DOUBLE
3140 && SCALAR_FLOAT_MODE_P (mode))
3143 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
3145 if (REAL_VALUES_EQUAL (d, dconst2))
3147 op0 = force_reg (GET_MODE (op0), op0);
3148 return expand_binop (mode, add_optab, op0, op0,
3149 target, unsignedp, OPTAB_LIB_WIDEN);
3153 /* This used to use umul_optab if unsigned, but for non-widening multiply
3154 there is no difference between signed and unsigned. */
3155 op0 = expand_binop (mode,
3157 && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT)
3158 ? smulv_optab : smul_optab,
3159 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
3164 /* Return the smallest n such that 2**n >= X. */
3167 ceil_log2 (unsigned HOST_WIDE_INT x)
3169 return floor_log2 (x - 1) + 1;
3172 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
3173 replace division by D, and put the least significant N bits of the result
3174 in *MULTIPLIER_PTR and return the most significant bit.
3176 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
3177 needed precision is in PRECISION (should be <= N).
3179 PRECISION should be as small as possible so this function can choose
3180 multiplier more freely.
3182 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
3183 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
3185 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
3186 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
3189 unsigned HOST_WIDE_INT
3190 choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
3191 rtx *multiplier_ptr, int *post_shift_ptr, int *lgup_ptr)
3193 HOST_WIDE_INT mhigh_hi, mlow_hi;
3194 unsigned HOST_WIDE_INT mhigh_lo, mlow_lo;
3195 int lgup, post_shift;
3197 unsigned HOST_WIDE_INT nl, dummy1;
3198 HOST_WIDE_INT nh, dummy2;
3200 /* lgup = ceil(log2(divisor)); */
3201 lgup = ceil_log2 (d);
3203 gcc_assert (lgup <= n);
3206 pow2 = n + lgup - precision;
3208 /* We could handle this with some effort, but this case is much
3209 better handled directly with a scc insn, so rely on caller using
3211 gcc_assert (pow != 2 * HOST_BITS_PER_WIDE_INT);
3213 /* mlow = 2^(N + lgup)/d */
3214 if (pow >= HOST_BITS_PER_WIDE_INT)
3216 nh = (HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
3222 nl = (unsigned HOST_WIDE_INT) 1 << pow;
3224 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
3225 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
3227 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
3228 if (pow2 >= HOST_BITS_PER_WIDE_INT)
3229 nh |= (HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
3231 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
3232 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
3233 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
3235 gcc_assert (!mhigh_hi || nh - d < d);
3236 gcc_assert (mhigh_hi <= 1 && mlow_hi <= 1);
3237 /* Assert that mlow < mhigh. */
3238 gcc_assert (mlow_hi < mhigh_hi
3239 || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo));
3241 /* If precision == N, then mlow, mhigh exceed 2^N
3242 (but they do not exceed 2^(N+1)). */
3244 /* Reduce to lowest terms. */
3245 for (post_shift = lgup; post_shift > 0; post_shift--)
3247 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
3248 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
3258 *post_shift_ptr = post_shift;
3260 if (n < HOST_BITS_PER_WIDE_INT)
3262 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
3263 *multiplier_ptr = GEN_INT (mhigh_lo & mask);
3264 return mhigh_lo >= mask;
3268 *multiplier_ptr = GEN_INT (mhigh_lo);
3273 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
3274 congruent to 1 (mod 2**N). */
3276 static unsigned HOST_WIDE_INT
3277 invert_mod2n (unsigned HOST_WIDE_INT x, int n)
3279 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
3281 /* The algorithm notes that the choice y = x satisfies
3282 x*y == 1 mod 2^3, since x is assumed odd.
3283 Each iteration doubles the number of bits of significance in y. */
3285 unsigned HOST_WIDE_INT mask;
3286 unsigned HOST_WIDE_INT y = x;
3289 mask = (n == HOST_BITS_PER_WIDE_INT
3290 ? ~(unsigned HOST_WIDE_INT) 0
3291 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
3295 y = y * (2 - x*y) & mask; /* Modulo 2^N */
3301 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
3302 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
3303 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
3304 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
3307 The result is put in TARGET if that is convenient.
3309 MODE is the mode of operation. */
3312 expand_mult_highpart_adjust (enum machine_mode mode, rtx adj_operand, rtx op0,
3313 rtx op1, rtx target, int unsignedp)
3316 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
3318 tem = expand_shift (RSHIFT_EXPR, mode, op0,
3319 build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode) - 1),
3321 tem = expand_and (mode, tem, op1, NULL_RTX);
3323 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3326 tem = expand_shift (RSHIFT_EXPR, mode, op1,
3327 build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode) - 1),
3329 tem = expand_and (mode, tem, op0, NULL_RTX);
3330 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3336 /* Subroutine of expand_mult_highpart. Return the MODE high part of OP. */
3339 extract_high_half (enum machine_mode mode, rtx op)
3341 enum machine_mode wider_mode;
3343 if (mode == word_mode)
3344 return gen_highpart (mode, op);
3346 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3348 wider_mode = GET_MODE_WIDER_MODE (mode);
3349 op = expand_shift (RSHIFT_EXPR, wider_mode, op,
3350 build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode)), 0, 1);
3351 return convert_modes (mode, wider_mode, op, 0);
3354 /* Like expand_mult_highpart, but only consider using a multiplication
3355 optab. OP1 is an rtx for the constant operand. */
3358 expand_mult_highpart_optab (enum machine_mode mode, rtx op0, rtx op1,
3359 rtx target, int unsignedp, int max_cost)
3361 rtx narrow_op1 = gen_int_mode (INTVAL (op1), mode);
3362 enum machine_mode wider_mode;
3366 bool speed = optimize_insn_for_speed_p ();
3368 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3370 wider_mode = GET_MODE_WIDER_MODE (mode);
3371 size = GET_MODE_BITSIZE (mode);
3373 /* Firstly, try using a multiplication insn that only generates the needed
3374 high part of the product, and in the sign flavor of unsignedp. */
3375 if (mul_highpart_cost[speed][mode] < max_cost)
3377 moptab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
3378 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3379 unsignedp, OPTAB_DIRECT);
3384 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
3385 Need to adjust the result after the multiplication. */
3386 if (size - 1 < BITS_PER_WORD
3387 && (mul_highpart_cost[speed][mode] + 2 * shift_cost[speed][mode][size-1]
3388 + 4 * add_cost[speed][mode] < max_cost))
3390 moptab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
3391 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3392 unsignedp, OPTAB_DIRECT);
3394 /* We used the wrong signedness. Adjust the result. */
3395 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3399 /* Try widening multiplication. */
3400 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
3401 if (optab_handler (moptab, wider_mode)->insn_code != CODE_FOR_nothing
3402 && mul_widen_cost[speed][wider_mode] < max_cost)
3404 tem = expand_binop (wider_mode, moptab, op0, narrow_op1, 0,
3405 unsignedp, OPTAB_WIDEN);
3407 return extract_high_half (mode, tem);
3410 /* Try widening the mode and perform a non-widening multiplication. */
3411 if (optab_handler (smul_optab, wider_mode)->insn_code != CODE_FOR_nothing
3412 && size - 1 < BITS_PER_WORD
3413 && mul_cost[speed][wider_mode] + shift_cost[speed][mode][size-1] < max_cost)
3415 rtx insns, wop0, wop1;
3417 /* We need to widen the operands, for example to ensure the
3418 constant multiplier is correctly sign or zero extended.
3419 Use a sequence to clean-up any instructions emitted by
3420 the conversions if things don't work out. */
3422 wop0 = convert_modes (wider_mode, mode, op0, unsignedp);
3423 wop1 = convert_modes (wider_mode, mode, op1, unsignedp);
3424 tem = expand_binop (wider_mode, smul_optab, wop0, wop1, 0,
3425 unsignedp, OPTAB_WIDEN);
3426 insns = get_insns ();
3432 return extract_high_half (mode, tem);
3436 /* Try widening multiplication of opposite signedness, and adjust. */
3437 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
3438 if (optab_handler (moptab, wider_mode)->insn_code != CODE_FOR_nothing
3439 && size - 1 < BITS_PER_WORD
3440 && (mul_widen_cost[speed][wider_mode] + 2 * shift_cost[speed][mode][size-1]
3441 + 4 * add_cost[speed][mode] < max_cost))
3443 tem = expand_binop (wider_mode, moptab, op0, narrow_op1,
3444 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
3447 tem = extract_high_half (mode, tem);
3448 /* We used the wrong signedness. Adjust the result. */
3449 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3457 /* Emit code to multiply OP0 and OP1 (where OP1 is an integer constant),
3458 putting the high half of the result in TARGET if that is convenient,
3459 and return where the result is. If the operation can not be performed,
3462 MODE is the mode of operation and result.
3464 UNSIGNEDP nonzero means unsigned multiply.
3466 MAX_COST is the total allowed cost for the expanded RTL. */
3469 expand_mult_highpart (enum machine_mode mode, rtx op0, rtx op1,
3470 rtx target, int unsignedp, int max_cost)
3472 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
3473 unsigned HOST_WIDE_INT cnst1;
3475 bool sign_adjust = false;
3476 enum mult_variant variant;
3477 struct algorithm alg;
3479 bool speed = optimize_insn_for_speed_p ();
3481 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3482 /* We can't support modes wider than HOST_BITS_PER_INT. */
3483 gcc_assert (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT);
3485 cnst1 = INTVAL (op1) & GET_MODE_MASK (mode);
3487 /* We can't optimize modes wider than BITS_PER_WORD.
3488 ??? We might be able to perform double-word arithmetic if
3489 mode == word_mode, however all the cost calculations in
3490 synth_mult etc. assume single-word operations. */
3491 if (GET_MODE_BITSIZE (wider_mode) > BITS_PER_WORD)
3492 return expand_mult_highpart_optab (mode, op0, op1, target,
3493 unsignedp, max_cost);
3495 extra_cost = shift_cost[speed][mode][GET_MODE_BITSIZE (mode) - 1];
3497 /* Check whether we try to multiply by a negative constant. */
3498 if (!unsignedp && ((cnst1 >> (GET_MODE_BITSIZE (mode) - 1)) & 1))
3501 extra_cost += add_cost[speed][mode];
3504 /* See whether shift/add multiplication is cheap enough. */
3505 if (choose_mult_variant (wider_mode, cnst1, &alg, &variant,
3506 max_cost - extra_cost))
3508 /* See whether the specialized multiplication optabs are
3509 cheaper than the shift/add version. */
3510 tem = expand_mult_highpart_optab (mode, op0, op1, target, unsignedp,
3511 alg.cost.cost + extra_cost);
3515 tem = convert_to_mode (wider_mode, op0, unsignedp);
3516 tem = expand_mult_const (wider_mode, tem, cnst1, 0, &alg, variant);
3517 tem = extract_high_half (mode, tem);
3519 /* Adjust result for signedness. */
3521 tem = force_operand (gen_rtx_MINUS (mode, tem, op0), tem);
3525 return expand_mult_highpart_optab (mode, op0, op1, target,
3526 unsignedp, max_cost);
3530 /* Expand signed modulus of OP0 by a power of two D in mode MODE. */
3533 expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
3535 unsigned HOST_WIDE_INT masklow, maskhigh;
3536 rtx result, temp, shift, label;
3539 logd = floor_log2 (d);
3540 result = gen_reg_rtx (mode);
3542 /* Avoid conditional branches when they're expensive. */
3543 if (BRANCH_COST (optimize_insn_for_speed_p (), false) >= 2
3544 && optimize_insn_for_speed_p ())
3546 rtx signmask = emit_store_flag (result, LT, op0, const0_rtx,
3550 signmask = force_reg (mode, signmask);
3551 masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
3552 shift = GEN_INT (GET_MODE_BITSIZE (mode) - logd);
3554 /* Use the rtx_cost of a LSHIFTRT instruction to determine
3555 which instruction sequence to use. If logical right shifts
3556 are expensive the use 2 XORs, 2 SUBs and an AND, otherwise
3557 use a LSHIFTRT, 1 ADD, 1 SUB and an AND. */
3559 temp = gen_rtx_LSHIFTRT (mode, result, shift);
3560 if (optab_handler (lshr_optab, mode)->insn_code == CODE_FOR_nothing
3561 || rtx_cost (temp, SET, optimize_insn_for_speed_p ()) > COSTS_N_INSNS (2))
3563 temp = expand_binop (mode, xor_optab, op0, signmask,
3564 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3565 temp = expand_binop (mode, sub_optab, temp, signmask,
3566 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3567 temp = expand_binop (mode, and_optab, temp, GEN_INT (masklow),
3568 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3569 temp = expand_binop (mode, xor_optab, temp, signmask,
3570 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3571 temp = expand_binop (mode, sub_optab, temp, signmask,
3572 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3576 signmask = expand_binop (mode, lshr_optab, signmask, shift,
3577 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3578 signmask = force_reg (mode, signmask);
3580 temp = expand_binop (mode, add_optab, op0, signmask,
3581 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3582 temp = expand_binop (mode, and_optab, temp, GEN_INT (masklow),
3583 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3584 temp = expand_binop (mode, sub_optab, temp, signmask,
3585 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3591 /* Mask contains the mode's signbit and the significant bits of the
3592 modulus. By including the signbit in the operation, many targets
3593 can avoid an explicit compare operation in the following comparison
3596 masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
3597 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3599 masklow |= (HOST_WIDE_INT) -1 << (GET_MODE_BITSIZE (mode) - 1);
3603 maskhigh = (HOST_WIDE_INT) -1
3604 << (GET_MODE_BITSIZE (mode) - HOST_BITS_PER_WIDE_INT - 1);
3606 temp = expand_binop (mode, and_optab, op0,
3607 immed_double_const (masklow, maskhigh, mode),
3608 result, 1, OPTAB_LIB_WIDEN);
3610 emit_move_insn (result, temp);
3612 label = gen_label_rtx ();
3613 do_cmp_and_jump (result, const0_rtx, GE, mode, label);
3615 temp = expand_binop (mode, sub_optab, result, const1_rtx, result,
3616 0, OPTAB_LIB_WIDEN);
3617 masklow = (HOST_WIDE_INT) -1 << logd;
3619 temp = expand_binop (mode, ior_optab, temp,
3620 immed_double_const (masklow, maskhigh, mode),
3621 result, 1, OPTAB_LIB_WIDEN);
3622 temp = expand_binop (mode, add_optab, temp, const1_rtx, result,
3623 0, OPTAB_LIB_WIDEN);
3625 emit_move_insn (result, temp);
3630 /* Expand signed division of OP0 by a power of two D in mode MODE.
3631 This routine is only called for positive values of D. */
3634 expand_sdiv_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
3640 logd = floor_log2 (d);
3641 shift = build_int_cst (NULL_TREE, logd);
3644 && BRANCH_COST (optimize_insn_for_speed_p (),
3647 temp = gen_reg_rtx (mode);
3648 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, 1);
3649 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3650 0, OPTAB_LIB_WIDEN);
3651 return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3654 #ifdef HAVE_conditional_move
3655 if (BRANCH_COST (optimize_insn_for_speed_p (), false)
3660 /* ??? emit_conditional_move forces a stack adjustment via
3661 compare_from_rtx so, if the sequence is discarded, it will
3662 be lost. Do it now instead. */
3663 do_pending_stack_adjust ();
3666 temp2 = copy_to_mode_reg (mode, op0);
3667 temp = expand_binop (mode, add_optab, temp2, GEN_INT (d-1),
3668 NULL_RTX, 0, OPTAB_LIB_WIDEN);
3669 temp = force_reg (mode, temp);
3671 /* Construct "temp2 = (temp2 < 0) ? temp : temp2". */
3672 temp2 = emit_conditional_move (temp2, LT, temp2, const0_rtx,
3673 mode, temp, temp2, mode, 0);
3676 rtx seq = get_insns ();
3679 return expand_shift (RSHIFT_EXPR, mode, temp2, shift, NULL_RTX, 0);
3685 if (BRANCH_COST (optimize_insn_for_speed_p (),
3688 int ushift = GET_MODE_BITSIZE (mode) - logd;
3690 temp = gen_reg_rtx (mode);
3691 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, -1);
3692 if (shift_cost[optimize_insn_for_speed_p ()][mode][ushift] > COSTS_N_INSNS (1))
3693 temp = expand_binop (mode, and_optab, temp, GEN_INT (d - 1),
3694 NULL_RTX, 0, OPTAB_LIB_WIDEN);
3696 temp = expand_shift (RSHIFT_EXPR, mode, temp,
3697 build_int_cst (NULL_TREE, ushift),
3699 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3700 0, OPTAB_LIB_WIDEN);
3701 return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3704 label = gen_label_rtx ();
3705 temp = copy_to_mode_reg (mode, op0);
3706 do_cmp_and_jump (temp, const0_rtx, GE, mode, label);
3707 expand_inc (temp, GEN_INT (d - 1));
3709 return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3712 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
3713 if that is convenient, and returning where the result is.
3714 You may request either the quotient or the remainder as the result;
3715 specify REM_FLAG nonzero to get the remainder.
3717 CODE is the expression code for which kind of division this is;
3718 it controls how rounding is done. MODE is the machine mode to use.
3719 UNSIGNEDP nonzero means do unsigned division. */
3721 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
3722 and then correct it by or'ing in missing high bits
3723 if result of ANDI is nonzero.
3724 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
3725 This could optimize to a bfexts instruction.
3726 But C doesn't use these operations, so their optimizations are
3728 /* ??? For modulo, we don't actually need the highpart of the first product,
3729 the low part will do nicely. And for small divisors, the second multiply
3730 can also be a low-part only multiply or even be completely left out.
3731 E.g. to calculate the remainder of a division by 3 with a 32 bit
3732 multiply, multiply with 0x55555556 and extract the upper two bits;
3733 the result is exact for inputs up to 0x1fffffff.
3734 The input range can be reduced by using cross-sum rules.
3735 For odd divisors >= 3, the following table gives right shift counts
3736 so that if a number is shifted by an integer multiple of the given
3737 amount, the remainder stays the same:
3738 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
3739 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
3740 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
3741 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
3742 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
3744 Cross-sum rules for even numbers can be derived by leaving as many bits
3745 to the right alone as the divisor has zeros to the right.
3746 E.g. if x is an unsigned 32 bit number:
3747 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
3751 expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode,
3752 rtx op0, rtx op1, rtx target, int unsignedp)
3754 enum machine_mode compute_mode;
3756 rtx quotient = 0, remainder = 0;
3760 optab optab1, optab2;
3761 int op1_is_constant, op1_is_pow2 = 0;
3762 int max_cost, extra_cost;
3763 static HOST_WIDE_INT last_div_const = 0;
3764 static HOST_WIDE_INT ext_op1;
3765 bool speed = optimize_insn_for_speed_p ();
3767 op1_is_constant = GET_CODE (op1) == CONST_INT;
3768 if (op1_is_constant)
3770 ext_op1 = INTVAL (op1);
3772 ext_op1 &= GET_MODE_MASK (mode);
3773 op1_is_pow2 = ((EXACT_POWER_OF_2_OR_ZERO_P (ext_op1)
3774 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-ext_op1))));
3778 This is the structure of expand_divmod:
3780 First comes code to fix up the operands so we can perform the operations
3781 correctly and efficiently.
3783 Second comes a switch statement with code specific for each rounding mode.
3784 For some special operands this code emits all RTL for the desired
3785 operation, for other cases, it generates only a quotient and stores it in
3786 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
3787 to indicate that it has not done anything.
3789 Last comes code that finishes the operation. If QUOTIENT is set and
3790 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
3791 QUOTIENT is not set, it is computed using trunc rounding.
3793 We try to generate special code for division and remainder when OP1 is a
3794 constant. If |OP1| = 2**n we can use shifts and some other fast
3795 operations. For other values of OP1, we compute a carefully selected
3796 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
3799 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
3800 half of the product. Different strategies for generating the product are
3801 implemented in expand_mult_highpart.
3803 If what we actually want is the remainder, we generate that by another
3804 by-constant multiplication and a subtraction. */
3806 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3807 code below will malfunction if we are, so check here and handle
3808 the special case if so. */
3809 if (op1 == const1_rtx)
3810 return rem_flag ? const0_rtx : op0;
3812 /* When dividing by -1, we could get an overflow.
3813 negv_optab can handle overflows. */
3814 if (! unsignedp && op1 == constm1_rtx)
3818 return expand_unop (mode, flag_trapv && GET_MODE_CLASS(mode) == MODE_INT
3819 ? negv_optab : neg_optab, op0, target, 0);
3823 /* Don't use the function value register as a target
3824 since we have to read it as well as write it,
3825 and function-inlining gets confused by this. */
3826 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
3827 /* Don't clobber an operand while doing a multi-step calculation. */
3828 || ((rem_flag || op1_is_constant)
3829 && (reg_mentioned_p (target, op0)
3830 || (MEM_P (op0) && MEM_P (target))))
3831 || reg_mentioned_p (target, op1)
3832 || (MEM_P (op1) && MEM_P (target))))
3835 /* Get the mode in which to perform this computation. Normally it will
3836 be MODE, but sometimes we can't do the desired operation in MODE.
3837 If so, pick a wider mode in which we can do the operation. Convert
3838 to that mode at the start to avoid repeated conversions.
3840 First see what operations we need. These depend on the expression
3841 we are evaluating. (We assume that divxx3 insns exist under the
3842 same conditions that modxx3 insns and that these insns don't normally
3843 fail. If these assumptions are not correct, we may generate less
3844 efficient code in some cases.)
3846 Then see if we find a mode in which we can open-code that operation
3847 (either a division, modulus, or shift). Finally, check for the smallest
3848 mode for which we can do the operation with a library call. */
3850 /* We might want to refine this now that we have division-by-constant
3851 optimization. Since expand_mult_highpart tries so many variants, it is
3852 not straightforward to generalize this. Maybe we should make an array
3853 of possible modes in init_expmed? Save this for GCC 2.7. */
3855 optab1 = ((op1_is_pow2 && op1 != const0_rtx)
3856 ? (unsignedp ? lshr_optab : ashr_optab)
3857 : (unsignedp ? udiv_optab : sdiv_optab));
3858 optab2 = ((op1_is_pow2 && op1 != const0_rtx)
3860 : (unsignedp ? udivmod_optab : sdivmod_optab));
3862 for (compute_mode = mode; compute_mode != VOIDmode;
3863 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3864 if (optab_handler (optab1, compute_mode)->insn_code != CODE_FOR_nothing
3865 || optab_handler (optab2, compute_mode)->insn_code != CODE_FOR_nothing)
3868 if (compute_mode == VOIDmode)
3869 for (compute_mode = mode; compute_mode != VOIDmode;
3870 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3871 if (optab_libfunc (optab1, compute_mode)
3872 || optab_libfunc (optab2, compute_mode))
3875 /* If we still couldn't find a mode, use MODE, but expand_binop will
3877 if (compute_mode == VOIDmode)
3878 compute_mode = mode;
3880 if (target && GET_MODE (target) == compute_mode)
3883 tquotient = gen_reg_rtx (compute_mode);
3885 size = GET_MODE_BITSIZE (compute_mode);
3887 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3888 (mode), and thereby get better code when OP1 is a constant. Do that
3889 later. It will require going over all usages of SIZE below. */
3890 size = GET_MODE_BITSIZE (mode);
3893 /* Only deduct something for a REM if the last divide done was
3894 for a different constant. Then set the constant of the last
3896 max_cost = unsignedp ? udiv_cost[speed][compute_mode] : sdiv_cost[speed][compute_mode];
3897 if (rem_flag && ! (last_div_const != 0 && op1_is_constant
3898 && INTVAL (op1) == last_div_const))
3899 max_cost -= mul_cost[speed][compute_mode] + add_cost[speed][compute_mode];
3901 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
3903 /* Now convert to the best mode to use. */
3904 if (compute_mode != mode)
3906 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
3907 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
3909 /* convert_modes may have placed op1 into a register, so we
3910 must recompute the following. */
3911 op1_is_constant = GET_CODE (op1) == CONST_INT;
3912 op1_is_pow2 = (op1_is_constant
3913 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3915 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
3918 /* If one of the operands is a volatile MEM, copy it into a register. */
3920 if (MEM_P (op0) && MEM_VOLATILE_P (op0))
3921 op0 = force_reg (compute_mode, op0);
3922 if (MEM_P (op1) && MEM_VOLATILE_P (op1))
3923 op1 = force_reg (compute_mode, op1);
3925 /* If we need the remainder or if OP1 is constant, we need to
3926 put OP0 in a register in case it has any queued subexpressions. */
3927 if (rem_flag || op1_is_constant)
3928 op0 = force_reg (compute_mode, op0);
3930 last = get_last_insn ();
3932 /* Promote floor rounding to trunc rounding for unsigned operations. */
3935 if (code == FLOOR_DIV_EXPR)
3936 code = TRUNC_DIV_EXPR;
3937 if (code == FLOOR_MOD_EXPR)
3938 code = TRUNC_MOD_EXPR;
3939 if (code == EXACT_DIV_EXPR && op1_is_pow2)
3940 code = TRUNC_DIV_EXPR;
3943 if (op1 != const0_rtx)
3946 case TRUNC_MOD_EXPR:
3947 case TRUNC_DIV_EXPR:
3948 if (op1_is_constant)
3952 unsigned HOST_WIDE_INT mh;
3953 int pre_shift, post_shift;
3956 unsigned HOST_WIDE_INT d = (INTVAL (op1)
3957 & GET_MODE_MASK (compute_mode));
3959 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3961 pre_shift = floor_log2 (d);
3965 = expand_binop (compute_mode, and_optab, op0,
3966 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3970 return gen_lowpart (mode, remainder);
3972 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3973 build_int_cst (NULL_TREE,
3977 else if (size <= HOST_BITS_PER_WIDE_INT)
3979 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
3981 /* Most significant bit of divisor is set; emit an scc
3983 quotient = emit_store_flag (tquotient, GEU, op0, op1,
3984 compute_mode, 1, 1);
3990 /* Find a suitable multiplier and right shift count
3991 instead of multiplying with D. */
3993 mh = choose_multiplier (d, size, size,
3994 &ml, &post_shift, &dummy);
3996 /* If the suggested multiplier is more than SIZE bits,
3997 we can do better for even divisors, using an
3998 initial right shift. */
3999 if (mh != 0 && (d & 1) == 0)
4001 pre_shift = floor_log2 (d & -d);
4002 mh = choose_multiplier (d >> pre_shift, size,
4004 &ml, &post_shift, &dummy);
4014 if (post_shift - 1 >= BITS_PER_WORD)
4018 = (shift_cost[speed][compute_mode][post_shift - 1]
4019 + shift_cost[speed][compute_mode][1]
4020 + 2 * add_cost[speed][compute_mode]);
4021 t1 = expand_mult_highpart (compute_mode, op0, ml,
4023 max_cost - extra_cost);
4026 t2 = force_operand (gen_rtx_MINUS (compute_mode,
4030 (RSHIFT_EXPR, compute_mode, t2,
4031 build_int_cst (NULL_TREE, 1),
4033 t4 = force_operand (gen_rtx_PLUS (compute_mode,
4036 quotient = expand_shift
4037 (RSHIFT_EXPR, compute_mode, t4,
4038 build_int_cst (NULL_TREE, post_shift - 1),
4045 if (pre_shift >= BITS_PER_WORD
4046 || post_shift >= BITS_PER_WORD)
4050 (RSHIFT_EXPR, compute_mode, op0,
4051 build_int_cst (NULL_TREE, pre_shift),
4054 = (shift_cost[speed][compute_mode][pre_shift]
4055 + shift_cost[speed][compute_mode][post_shift]);
4056 t2 = expand_mult_highpart (compute_mode, t1, ml,
4058 max_cost - extra_cost);
4061 quotient = expand_shift
4062 (RSHIFT_EXPR, compute_mode, t2,
4063 build_int_cst (NULL_TREE, post_shift),
4068 else /* Too wide mode to use tricky code */
4071 insn = get_last_insn ();
4073 && (set = single_set (insn)) != 0
4074 && SET_DEST (set) == quotient)
4075 set_unique_reg_note (insn,
4077 gen_rtx_UDIV (compute_mode, op0, op1));
4079 else /* TRUNC_DIV, signed */
4081 unsigned HOST_WIDE_INT ml;
4082 int lgup, post_shift;
4084 HOST_WIDE_INT d = INTVAL (op1);
4085 unsigned HOST_WIDE_INT abs_d;
4087 /* Since d might be INT_MIN, we have to cast to
4088 unsigned HOST_WIDE_INT before negating to avoid
4089 undefined signed overflow. */
4091 ? (unsigned HOST_WIDE_INT) d
4092 : - (unsigned HOST_WIDE_INT) d);
4094 /* n rem d = n rem -d */
4095 if (rem_flag && d < 0)
4098 op1 = gen_int_mode (abs_d, compute_mode);
4104 quotient = expand_unop (compute_mode, neg_optab, op0,
4106 else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
4108 /* This case is not handled correctly below. */
4109 quotient = emit_store_flag (tquotient, EQ, op0, op1,
4110 compute_mode, 1, 1);
4114 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
4115 && (rem_flag ? smod_pow2_cheap[speed][compute_mode]
4116 : sdiv_pow2_cheap[speed][compute_mode])
4117 /* We assume that cheap metric is true if the
4118 optab has an expander for this mode. */
4119 && ((optab_handler ((rem_flag ? smod_optab
4121 compute_mode)->insn_code
4122 != CODE_FOR_nothing)
4123 || (optab_handler(sdivmod_optab,
4125 ->insn_code != CODE_FOR_nothing)))
4127 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
4131 remainder = expand_smod_pow2 (compute_mode, op0, d);
4133 return gen_lowpart (mode, remainder);
4136 if (sdiv_pow2_cheap[speed][compute_mode]
4137 && ((optab_handler (sdiv_optab, compute_mode)->insn_code
4138 != CODE_FOR_nothing)
4139 || (optab_handler (sdivmod_optab, compute_mode)->insn_code
4140 != CODE_FOR_nothing)))
4141 quotient = expand_divmod (0, TRUNC_DIV_EXPR,
4143 gen_int_mode (abs_d,
4147 quotient = expand_sdiv_pow2 (compute_mode, op0, abs_d);
4149 /* We have computed OP0 / abs(OP1). If OP1 is negative,
4150 negate the quotient. */
4153 insn = get_last_insn ();
4155 && (set = single_set (insn)) != 0
4156 && SET_DEST (set) == quotient
4157 && abs_d < ((unsigned HOST_WIDE_INT) 1
4158 << (HOST_BITS_PER_WIDE_INT - 1)))
4159 set_unique_reg_note (insn,
4161 gen_rtx_DIV (compute_mode,
4168 quotient = expand_unop (compute_mode, neg_optab,
4169 quotient, quotient, 0);
4172 else if (size <= HOST_BITS_PER_WIDE_INT)
4174 choose_multiplier (abs_d, size, size - 1,
4175 &mlr, &post_shift, &lgup);
4176 ml = (unsigned HOST_WIDE_INT) INTVAL (mlr);
4177 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
4181 if (post_shift >= BITS_PER_WORD
4182 || size - 1 >= BITS_PER_WORD)
4185 extra_cost = (shift_cost[speed][compute_mode][post_shift]
4186 + shift_cost[speed][compute_mode][size - 1]
4187 + add_cost[speed][compute_mode]);
4188 t1 = expand_mult_highpart (compute_mode, op0, mlr,
4190 max_cost - extra_cost);
4194 (RSHIFT_EXPR, compute_mode, t1,
4195 build_int_cst (NULL_TREE, post_shift),
4198 (RSHIFT_EXPR, compute_mode, op0,
4199 build_int_cst (NULL_TREE, size - 1),
4203 = force_operand (gen_rtx_MINUS (compute_mode,
4208 = force_operand (gen_rtx_MINUS (compute_mode,
4216 if (post_shift >= BITS_PER_WORD
4217 || size - 1 >= BITS_PER_WORD)
4220 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
4221 mlr = gen_int_mode (ml, compute_mode);
4222 extra_cost = (shift_cost[speed][compute_mode][post_shift]
4223 + shift_cost[speed][compute_mode][size - 1]
4224 + 2 * add_cost[speed][compute_mode]);
4225 t1 = expand_mult_highpart (compute_mode, op0, mlr,
4227 max_cost - extra_cost);
4230 t2 = force_operand (gen_rtx_PLUS (compute_mode,
4234 (RSHIFT_EXPR, compute_mode, t2,
4235 build_int_cst (NULL_TREE, post_shift),
4238 (RSHIFT_EXPR, compute_mode, op0,
4239 build_int_cst (NULL_TREE, size - 1),
4243 = force_operand (gen_rtx_MINUS (compute_mode,
4248 = force_operand (gen_rtx_MINUS (compute_mode,
4253 else /* Too wide mode to use tricky code */
4256 insn = get_last_insn ();
4258 && (set = single_set (insn)) != 0
4259 && SET_DEST (set) == quotient)
4260 set_unique_reg_note (insn,
4262 gen_rtx_DIV (compute_mode, op0, op1));
4267 delete_insns_since (last);
4270 case FLOOR_DIV_EXPR:
4271 case FLOOR_MOD_EXPR:
4272 /* We will come here only for signed operations. */
4273 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
4275 unsigned HOST_WIDE_INT mh;
4276 int pre_shift, lgup, post_shift;
4277 HOST_WIDE_INT d = INTVAL (op1);
4282 /* We could just as easily deal with negative constants here,
4283 but it does not seem worth the trouble for GCC 2.6. */
4284 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
4286 pre_shift = floor_log2 (d);
4289 remainder = expand_binop (compute_mode, and_optab, op0,
4290 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
4291 remainder, 0, OPTAB_LIB_WIDEN);
4293 return gen_lowpart (mode, remainder);
4295 quotient = expand_shift
4296 (RSHIFT_EXPR, compute_mode, op0,
4297 build_int_cst (NULL_TREE, pre_shift),
4304 mh = choose_multiplier (d, size, size - 1,
4305 &ml, &post_shift, &lgup);
4308 if (post_shift < BITS_PER_WORD
4309 && size - 1 < BITS_PER_WORD)
4312 (RSHIFT_EXPR, compute_mode, op0,
4313 build_int_cst (NULL_TREE, size - 1),
4315 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
4316 NULL_RTX, 0, OPTAB_WIDEN);
4317 extra_cost = (shift_cost[speed][compute_mode][post_shift]
4318 + shift_cost[speed][compute_mode][size - 1]
4319 + 2 * add_cost[speed][compute_mode]);
4320 t3 = expand_mult_highpart (compute_mode, t2, ml,
4322 max_cost - extra_cost);
4326 (RSHIFT_EXPR, compute_mode, t3,
4327 build_int_cst (NULL_TREE, post_shift),
4329 quotient = expand_binop (compute_mode, xor_optab,
4330 t4, t1, tquotient, 0,
4338 rtx nsign, t1, t2, t3, t4;
4339 t1 = force_operand (gen_rtx_PLUS (compute_mode,
4340 op0, constm1_rtx), NULL_RTX);
4341 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
4343 nsign = expand_shift
4344 (RSHIFT_EXPR, compute_mode, t2,
4345 build_int_cst (NULL_TREE, size - 1),
4347 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
4349 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
4354 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
4356 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4365 delete_insns_since (last);
4367 /* Try using an instruction that produces both the quotient and
4368 remainder, using truncation. We can easily compensate the quotient
4369 or remainder to get floor rounding, once we have the remainder.
4370 Notice that we compute also the final remainder value here,
4371 and return the result right away. */
4372 if (target == 0 || GET_MODE (target) != compute_mode)
4373 target = gen_reg_rtx (compute_mode);
4378 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4379 quotient = gen_reg_rtx (compute_mode);
4384 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4385 remainder = gen_reg_rtx (compute_mode);
4388 if (expand_twoval_binop (sdivmod_optab, op0, op1,
4389 quotient, remainder, 0))
4391 /* This could be computed with a branch-less sequence.
4392 Save that for later. */
4394 rtx label = gen_label_rtx ();
4395 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
4396 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4397 NULL_RTX, 0, OPTAB_WIDEN);
4398 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
4399 expand_dec (quotient, const1_rtx);
4400 expand_inc (remainder, op1);
4402 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4405 /* No luck with division elimination or divmod. Have to do it
4406 by conditionally adjusting op0 *and* the result. */
4408 rtx label1, label2, label3, label4, label5;
4412 quotient = gen_reg_rtx (compute_mode);
4413 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4414 label1 = gen_label_rtx ();
4415 label2 = gen_label_rtx ();
4416 label3 = gen_label_rtx ();
4417 label4 = gen_label_rtx ();
4418 label5 = gen_label_rtx ();
4419 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4420 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
4421 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4422 quotient, 0, OPTAB_LIB_WIDEN);
4423 if (tem != quotient)
4424 emit_move_insn (quotient, tem);
4425 emit_jump_insn (gen_jump (label5));
4427 emit_label (label1);
4428 expand_inc (adjusted_op0, const1_rtx);
4429 emit_jump_insn (gen_jump (label4));
4431 emit_label (label2);
4432 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
4433 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4434 quotient, 0, OPTAB_LIB_WIDEN);
4435 if (tem != quotient)
4436 emit_move_insn (quotient, tem);
4437 emit_jump_insn (gen_jump (label5));
4439 emit_label (label3);
4440 expand_dec (adjusted_op0, const1_rtx);
4441 emit_label (label4);
4442 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4443 quotient, 0, OPTAB_LIB_WIDEN);
4444 if (tem != quotient)
4445 emit_move_insn (quotient, tem);
4446 expand_dec (quotient, const1_rtx);
4447 emit_label (label5);
4455 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
4458 unsigned HOST_WIDE_INT d = INTVAL (op1);
4459 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4460 build_int_cst (NULL_TREE, floor_log2 (d)),
4462 t2 = expand_binop (compute_mode, and_optab, op0,
4464 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4465 t3 = gen_reg_rtx (compute_mode);
4466 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4467 compute_mode, 1, 1);
4471 lab = gen_label_rtx ();
4472 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4473 expand_inc (t1, const1_rtx);
4478 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4484 /* Try using an instruction that produces both the quotient and
4485 remainder, using truncation. We can easily compensate the
4486 quotient or remainder to get ceiling rounding, once we have the
4487 remainder. Notice that we compute also the final remainder
4488 value here, and return the result right away. */
4489 if (target == 0 || GET_MODE (target) != compute_mode)
4490 target = gen_reg_rtx (compute_mode);
4494 remainder = (REG_P (target)
4495 ? target : gen_reg_rtx (compute_mode));
4496 quotient = gen_reg_rtx (compute_mode);
4500 quotient = (REG_P (target)
4501 ? target : gen_reg_rtx (compute_mode));
4502 remainder = gen_reg_rtx (compute_mode);
4505 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
4508 /* This could be computed with a branch-less sequence.
4509 Save that for later. */
4510 rtx label = gen_label_rtx ();
4511 do_cmp_and_jump (remainder, const0_rtx, EQ,
4512 compute_mode, label);
4513 expand_inc (quotient, const1_rtx);
4514 expand_dec (remainder, op1);
4516 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4519 /* No luck with division elimination or divmod. Have to do it
4520 by conditionally adjusting op0 *and* the result. */
4523 rtx adjusted_op0, tem;
4525 quotient = gen_reg_rtx (compute_mode);
4526 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4527 label1 = gen_label_rtx ();
4528 label2 = gen_label_rtx ();
4529 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
4530 compute_mode, label1);
4531 emit_move_insn (quotient, const0_rtx);
4532 emit_jump_insn (gen_jump (label2));
4534 emit_label (label1);
4535 expand_dec (adjusted_op0, const1_rtx);
4536 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
4537 quotient, 1, OPTAB_LIB_WIDEN);
4538 if (tem != quotient)
4539 emit_move_insn (quotient, tem);
4540 expand_inc (quotient, const1_rtx);
4541 emit_label (label2);
4546 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
4547 && INTVAL (op1) >= 0)
4549 /* This is extremely similar to the code for the unsigned case
4550 above. For 2.7 we should merge these variants, but for
4551 2.6.1 I don't want to touch the code for unsigned since that
4552 get used in C. The signed case will only be used by other
4556 unsigned HOST_WIDE_INT d = INTVAL (op1);
4557 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4558 build_int_cst (NULL_TREE, floor_log2 (d)),
4560 t2 = expand_binop (compute_mode, and_optab, op0,
4562 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4563 t3 = gen_reg_rtx (compute_mode);
4564 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4565 compute_mode, 1, 1);
4569 lab = gen_label_rtx ();
4570 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4571 expand_inc (t1, const1_rtx);
4576 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4582 /* Try using an instruction that produces both the quotient and
4583 remainder, using truncation. We can easily compensate the
4584 quotient or remainder to get ceiling rounding, once we have the
4585 remainder. Notice that we compute also the final remainder
4586 value here, and return the result right away. */
4587 if (target == 0 || GET_MODE (target) != compute_mode)
4588 target = gen_reg_rtx (compute_mode);
4591 remainder= (REG_P (target)
4592 ? target : gen_reg_rtx (compute_mode));
4593 quotient = gen_reg_rtx (compute_mode);
4597 quotient = (REG_P (target)
4598 ? target : gen_reg_rtx (compute_mode));
4599 remainder = gen_reg_rtx (compute_mode);
4602 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
4605 /* This could be computed with a branch-less sequence.
4606 Save that for later. */
4608 rtx label = gen_label_rtx ();
4609 do_cmp_and_jump (remainder, const0_rtx, EQ,
4610 compute_mode, label);
4611 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4612 NULL_RTX, 0, OPTAB_WIDEN);
4613 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
4614 expand_inc (quotient, const1_rtx);
4615 expand_dec (remainder, op1);
4617 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4620 /* No luck with division elimination or divmod. Have to do it
4621 by conditionally adjusting op0 *and* the result. */
4623 rtx label1, label2, label3, label4, label5;
4627 quotient = gen_reg_rtx (compute_mode);
4628 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4629 label1 = gen_label_rtx ();
4630 label2 = gen_label_rtx ();
4631 label3 = gen_label_rtx ();
4632 label4 = gen_label_rtx ();
4633 label5 = gen_label_rtx ();
4634 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4635 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
4636 compute_mode, label1);
4637 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4638 quotient, 0, OPTAB_LIB_WIDEN);
4639 if (tem != quotient)
4640 emit_move_insn (quotient, tem);
4641 emit_jump_insn (gen_jump (label5));
4643 emit_label (label1);
4644 expand_dec (adjusted_op0, const1_rtx);
4645 emit_jump_insn (gen_jump (label4));
4647 emit_label (label2);
4648 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
4649 compute_mode, label3);
4650 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4651 quotient, 0, OPTAB_LIB_WIDEN);
4652 if (tem != quotient)
4653 emit_move_insn (quotient, tem);
4654 emit_jump_insn (gen_jump (label5));
4656 emit_label (label3);
4657 expand_inc (adjusted_op0, const1_rtx);
4658 emit_label (label4);
4659 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4660 quotient, 0, OPTAB_LIB_WIDEN);
4661 if (tem != quotient)
4662 emit_move_insn (quotient, tem);
4663 expand_inc (quotient, const1_rtx);
4664 emit_label (label5);
4669 case EXACT_DIV_EXPR:
4670 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
4672 HOST_WIDE_INT d = INTVAL (op1);
4673 unsigned HOST_WIDE_INT ml;
4677 pre_shift = floor_log2 (d & -d);
4678 ml = invert_mod2n (d >> pre_shift, size);
4679 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4680 build_int_cst (NULL_TREE, pre_shift),
4681 NULL_RTX, unsignedp);
4682 quotient = expand_mult (compute_mode, t1,
4683 gen_int_mode (ml, compute_mode),
4686 insn = get_last_insn ();
4687 set_unique_reg_note (insn,
4689 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
4695 case ROUND_DIV_EXPR:
4696 case ROUND_MOD_EXPR:
4701 label = gen_label_rtx ();
4702 quotient = gen_reg_rtx (compute_mode);
4703 remainder = gen_reg_rtx (compute_mode);
4704 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
4707 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
4708 quotient, 1, OPTAB_LIB_WIDEN);
4709 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
4710 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4711 remainder, 1, OPTAB_LIB_WIDEN);
4713 tem = plus_constant (op1, -1);
4714 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4715 build_int_cst (NULL_TREE, 1),
4717 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
4718 expand_inc (quotient, const1_rtx);
4719 expand_dec (remainder, op1);
4724 rtx abs_rem, abs_op1, tem, mask;
4726 label = gen_label_rtx ();
4727 quotient = gen_reg_rtx (compute_mode);
4728 remainder = gen_reg_rtx (compute_mode);
4729 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
4732 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
4733 quotient, 0, OPTAB_LIB_WIDEN);
4734 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
4735 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4736 remainder, 0, OPTAB_LIB_WIDEN);
4738 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 1, 0);
4739 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 1, 0);
4740 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
4741 build_int_cst (NULL_TREE, 1),
4743 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
4744 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4745 NULL_RTX, 0, OPTAB_WIDEN);
4746 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4747 build_int_cst (NULL_TREE, size - 1),
4749 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
4750 NULL_RTX, 0, OPTAB_WIDEN);
4751 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4752 NULL_RTX, 0, OPTAB_WIDEN);
4753 expand_inc (quotient, tem);
4754 tem = expand_binop (compute_mode, xor_optab, mask, op1,
4755 NULL_RTX, 0, OPTAB_WIDEN);
4756 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4757 NULL_RTX, 0, OPTAB_WIDEN);
4758 expand_dec (remainder, tem);
4761 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4769 if (target && GET_MODE (target) != compute_mode)
4774 /* Try to produce the remainder without producing the quotient.
4775 If we seem to have a divmod pattern that does not require widening,
4776 don't try widening here. We should really have a WIDEN argument
4777 to expand_twoval_binop, since what we'd really like to do here is
4778 1) try a mod insn in compute_mode
4779 2) try a divmod insn in compute_mode
4780 3) try a div insn in compute_mode and multiply-subtract to get
4782 4) try the same things with widening allowed. */
4784 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4787 ((optab_handler (optab2, compute_mode)->insn_code
4788 != CODE_FOR_nothing)
4789 ? OPTAB_DIRECT : OPTAB_WIDEN));
4792 /* No luck there. Can we do remainder and divide at once
4793 without a library call? */
4794 remainder = gen_reg_rtx (compute_mode);
4795 if (! expand_twoval_binop ((unsignedp
4799 NULL_RTX, remainder, unsignedp))
4804 return gen_lowpart (mode, remainder);
4807 /* Produce the quotient. Try a quotient insn, but not a library call.
4808 If we have a divmod in this mode, use it in preference to widening
4809 the div (for this test we assume it will not fail). Note that optab2
4810 is set to the one of the two optabs that the call below will use. */
4812 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
4813 op0, op1, rem_flag ? NULL_RTX : target,
4815 ((optab_handler (optab2, compute_mode)->insn_code
4816 != CODE_FOR_nothing)
4817 ? OPTAB_DIRECT : OPTAB_WIDEN));
4821 /* No luck there. Try a quotient-and-remainder insn,
4822 keeping the quotient alone. */
4823 quotient = gen_reg_rtx (compute_mode);
4824 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
4826 quotient, NULL_RTX, unsignedp))
4830 /* Still no luck. If we are not computing the remainder,
4831 use a library call for the quotient. */
4832 quotient = sign_expand_binop (compute_mode,
4833 udiv_optab, sdiv_optab,
4835 unsignedp, OPTAB_LIB_WIDEN);
4842 if (target && GET_MODE (target) != compute_mode)
4847 /* No divide instruction either. Use library for remainder. */
4848 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4850 unsignedp, OPTAB_LIB_WIDEN);
4851 /* No remainder function. Try a quotient-and-remainder
4852 function, keeping the remainder. */
4855 remainder = gen_reg_rtx (compute_mode);
4856 if (!expand_twoval_binop_libfunc
4857 (unsignedp ? udivmod_optab : sdivmod_optab,
4859 NULL_RTX, remainder,
4860 unsignedp ? UMOD : MOD))
4861 remainder = NULL_RTX;
4866 /* We divided. Now finish doing X - Y * (X / Y). */
4867 remainder = expand_mult (compute_mode, quotient, op1,
4868 NULL_RTX, unsignedp);
4869 remainder = expand_binop (compute_mode, sub_optab, op0,
4870 remainder, target, unsignedp,
4875 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4878 /* Return a tree node with data type TYPE, describing the value of X.
4879 Usually this is an VAR_DECL, if there is no obvious better choice.
4880 X may be an expression, however we only support those expressions
4881 generated by loop.c. */
4884 make_tree (tree type, rtx x)
4888 switch (GET_CODE (x))
4892 HOST_WIDE_INT hi = 0;
4895 && !(TYPE_UNSIGNED (type)
4896 && (GET_MODE_BITSIZE (TYPE_MODE (type))
4897 < HOST_BITS_PER_WIDE_INT)))
4900 t = build_int_cst_wide (type, INTVAL (x), hi);
4906 if (GET_MODE (x) == VOIDmode)
4907 t = build_int_cst_wide (type,
4908 CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
4913 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
4914 t = build_real (type, d);
4921 int units = CONST_VECTOR_NUNITS (x);
4922 tree itype = TREE_TYPE (type);
4927 /* Build a tree with vector elements. */
4928 for (i = units - 1; i >= 0; --i)
4930 rtx elt = CONST_VECTOR_ELT (x, i);
4931 t = tree_cons (NULL_TREE, make_tree (itype, elt), t);
4934 return build_vector (type, t);
4938 return fold_build2 (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4939 make_tree (type, XEXP (x, 1)));
4942 return fold_build2 (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4943 make_tree (type, XEXP (x, 1)));
4946 return fold_build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0)));
4949 return fold_build2 (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
4950 make_tree (type, XEXP (x, 1)));
4953 return fold_build2 (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
4954 make_tree (type, XEXP (x, 1)));
4957 t = unsigned_type_for (type);
4958 return fold_convert (type, build2 (RSHIFT_EXPR, t,
4959 make_tree (t, XEXP (x, 0)),
4960 make_tree (type, XEXP (x, 1))));
4963 t = signed_type_for (type);
4964 return fold_convert (type, build2 (RSHIFT_EXPR, t,
4965 make_tree (t, XEXP (x, 0)),
4966 make_tree (type, XEXP (x, 1))));
4969 if (TREE_CODE (type) != REAL_TYPE)
4970 t = signed_type_for (type);
4974 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
4975 make_tree (t, XEXP (x, 0)),
4976 make_tree (t, XEXP (x, 1))));
4978 t = unsigned_type_for (type);
4979 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
4980 make_tree (t, XEXP (x, 0)),
4981 make_tree (t, XEXP (x, 1))));
4985 t = lang_hooks.types.type_for_mode (GET_MODE (XEXP (x, 0)),
4986 GET_CODE (x) == ZERO_EXTEND);
4987 return fold_convert (type, make_tree (t, XEXP (x, 0)));
4990 return make_tree (type, XEXP (x, 0));
4993 t = SYMBOL_REF_DECL (x);
4995 return fold_convert (type, build_fold_addr_expr (t));
4996 /* else fall through. */
4999 t = build_decl (VAR_DECL, NULL_TREE, type);
5001 /* If TYPE is a POINTER_TYPE, X might be Pmode with TYPE_MODE being
5002 ptr_mode. So convert. */
5003 if (POINTER_TYPE_P (type))
5004 x = convert_memory_address (TYPE_MODE (type), x);
5006 /* Note that we do *not* use SET_DECL_RTL here, because we do not
5007 want set_decl_rtl to go adjusting REG_ATTRS for this temporary. */
5008 t->decl_with_rtl.rtl = x;
5014 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
5015 and returning TARGET.
5017 If TARGET is 0, a pseudo-register or constant is returned. */
5020 expand_and (enum machine_mode mode, rtx op0, rtx op1, rtx target)
5024 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
5025 tem = simplify_binary_operation (AND, mode, op0, op1);
5027 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
5031 else if (tem != target)
5032 emit_move_insn (target, tem);
5036 /* Helper function for emit_store_flag. */
5038 emit_store_flag_1 (rtx target, rtx subtarget, enum machine_mode mode,
5042 enum machine_mode target_mode = GET_MODE (target);
5044 /* If we are converting to a wider mode, first convert to
5045 TARGET_MODE, then normalize. This produces better combining
5046 opportunities on machines that have a SIGN_EXTRACT when we are
5047 testing a single bit. This mostly benefits the 68k.
5049 If STORE_FLAG_VALUE does not have the sign bit set when
5050 interpreted in MODE, we can do this conversion as unsigned, which
5051 is usually more efficient. */
5052 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
5054 convert_move (target, subtarget,
5055 (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
5056 && 0 == (STORE_FLAG_VALUE
5057 & ((HOST_WIDE_INT) 1
5058 << (GET_MODE_BITSIZE (mode) -1))));
5065 /* If we want to keep subexpressions around, don't reuse our last
5070 /* Now normalize to the proper value in MODE. Sometimes we don't
5071 have to do anything. */
5072 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
5074 /* STORE_FLAG_VALUE might be the most negative number, so write
5075 the comparison this way to avoid a compiler-time warning. */
5076 else if (- normalizep == STORE_FLAG_VALUE)
5077 op0 = expand_unop (mode, neg_optab, op0, subtarget, 0);
5079 /* We don't want to use STORE_FLAG_VALUE < 0 below since this makes
5080 it hard to use a value of just the sign bit due to ANSI integer
5081 constant typing rules. */
5082 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5083 && (STORE_FLAG_VALUE
5084 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))))
5085 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
5086 size_int (GET_MODE_BITSIZE (mode) - 1), subtarget,
5090 gcc_assert (STORE_FLAG_VALUE & 1);
5092 op0 = expand_and (mode, op0, const1_rtx, subtarget);
5093 if (normalizep == -1)
5094 op0 = expand_unop (mode, neg_optab, op0, op0, 0);
5097 /* If we were converting to a smaller mode, do the conversion now. */
5098 if (target_mode != mode)
5100 convert_move (target, op0, 0);
5107 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
5108 and storing in TARGET. Normally return TARGET.
5109 Return 0 if that cannot be done.
5111 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
5112 it is VOIDmode, they cannot both be CONST_INT.
5114 UNSIGNEDP is for the case where we have to widen the operands
5115 to perform the operation. It says to use zero-extension.
5117 NORMALIZEP is 1 if we should convert the result to be either zero
5118 or one. Normalize is -1 if we should convert the result to be
5119 either zero or -1. If NORMALIZEP is zero, the result will be left
5120 "raw" out of the scc insn. */
5123 emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1,
5124 enum machine_mode mode, int unsignedp, int normalizep)
5127 enum insn_code icode;
5128 enum machine_mode compare_mode;
5129 enum machine_mode target_mode = GET_MODE (target);
5131 rtx last = get_last_insn ();
5132 rtx pattern, comparison;
5135 code = unsigned_condition (code);
5137 /* If one operand is constant, make it the second one. Only do this
5138 if the other operand is not constant as well. */
5140 if (swap_commutative_operands_p (op0, op1))
5145 code = swap_condition (code);
5148 if (mode == VOIDmode)
5149 mode = GET_MODE (op0);
5151 /* For some comparisons with 1 and -1, we can convert this to
5152 comparisons with zero. This will often produce more opportunities for
5153 store-flag insns. */
5158 if (op1 == const1_rtx)
5159 op1 = const0_rtx, code = LE;
5162 if (op1 == constm1_rtx)
5163 op1 = const0_rtx, code = LT;
5166 if (op1 == const1_rtx)
5167 op1 = const0_rtx, code = GT;
5170 if (op1 == constm1_rtx)
5171 op1 = const0_rtx, code = GE;
5174 if (op1 == const1_rtx)
5175 op1 = const0_rtx, code = NE;
5178 if (op1 == const1_rtx)
5179 op1 = const0_rtx, code = EQ;
5185 /* If we are comparing a double-word integer with zero or -1, we can
5186 convert the comparison into one involving a single word. */
5187 if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
5188 && GET_MODE_CLASS (mode) == MODE_INT
5189 && (!MEM_P (op0) || ! MEM_VOLATILE_P (op0)))
5191 if ((code == EQ || code == NE)
5192 && (op1 == const0_rtx || op1 == constm1_rtx))
5194 rtx op00, op01, op0both;
5196 /* Do a logical OR or AND of the two words and compare the
5198 op00 = simplify_gen_subreg (word_mode, op0, mode, 0);
5199 op01 = simplify_gen_subreg (word_mode, op0, mode, UNITS_PER_WORD);
5200 op0both = expand_binop (word_mode,
5201 op1 == const0_rtx ? ior_optab : and_optab,
5202 op00, op01, NULL_RTX, unsignedp,
5206 return emit_store_flag (target, code, op0both, op1, word_mode,
5207 unsignedp, normalizep);
5209 else if ((code == LT || code == GE) && op1 == const0_rtx)
5213 /* If testing the sign bit, can just test on high word. */
5214 op0h = simplify_gen_subreg (word_mode, op0, mode,
5215 subreg_highpart_offset (word_mode,
5217 return emit_store_flag (target, code, op0h, op1, word_mode,
5218 unsignedp, normalizep);
5222 /* If this is A < 0 or A >= 0, we can do this by taking the ones
5223 complement of A (for GE) and shifting the sign bit to the low bit. */
5224 if (op1 == const0_rtx && (code == LT || code == GE)
5225 && GET_MODE_CLASS (mode) == MODE_INT
5226 && (normalizep || STORE_FLAG_VALUE == 1
5227 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5228 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
5229 == ((unsigned HOST_WIDE_INT) 1
5230 << (GET_MODE_BITSIZE (mode) - 1))))))
5234 /* If the result is to be wider than OP0, it is best to convert it
5235 first. If it is to be narrower, it is *incorrect* to convert it
5237 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
5239 op0 = convert_modes (target_mode, mode, op0, 0);
5243 if (target_mode != mode)
5247 op0 = expand_unop (mode, one_cmpl_optab, op0,
5248 ((STORE_FLAG_VALUE == 1 || normalizep)
5249 ? 0 : subtarget), 0);
5251 if (STORE_FLAG_VALUE == 1 || normalizep)
5252 /* If we are supposed to produce a 0/1 value, we want to do
5253 a logical shift from the sign bit to the low-order bit; for
5254 a -1/0 value, we do an arithmetic shift. */
5255 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
5256 size_int (GET_MODE_BITSIZE (mode) - 1),
5257 subtarget, normalizep != -1);
5259 if (mode != target_mode)
5260 op0 = convert_modes (target_mode, mode, op0, 0);
5265 icode = setcc_gen_code[(int) code];
5267 if (icode != CODE_FOR_nothing)
5269 insn_operand_predicate_fn pred;
5271 /* We think we may be able to do this with a scc insn. Emit the
5272 comparison and then the scc insn. */
5274 do_pending_stack_adjust ();
5275 last = get_last_insn ();
5278 = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX);
5279 if (CONSTANT_P (comparison))
5281 switch (GET_CODE (comparison))
5284 if (comparison == const0_rtx)
5288 #ifdef FLOAT_STORE_FLAG_VALUE
5290 if (comparison == CONST0_RTX (GET_MODE (comparison)))
5298 if (normalizep == 1)
5300 if (normalizep == -1)
5302 return const_true_rtx;
5305 /* The code of COMPARISON may not match CODE if compare_from_rtx
5306 decided to swap its operands and reverse the original code.
5308 We know that compare_from_rtx returns either a CONST_INT or
5309 a new comparison code, so it is safe to just extract the
5310 code from COMPARISON. */
5311 code = GET_CODE (comparison);
5313 /* Get a reference to the target in the proper mode for this insn. */
5314 compare_mode = insn_data[(int) icode].operand[0].mode;
5316 pred = insn_data[(int) icode].operand[0].predicate;
5317 if (optimize || ! (*pred) (subtarget, compare_mode))
5318 subtarget = gen_reg_rtx (compare_mode);
5320 pattern = GEN_FCN (icode) (subtarget);
5323 emit_insn (pattern);
5324 return emit_store_flag_1 (target, subtarget, compare_mode,
5330 /* We don't have an scc insn, so try a cstore insn. */
5332 for (compare_mode = mode; compare_mode != VOIDmode;
5333 compare_mode = GET_MODE_WIDER_MODE (compare_mode))
5335 icode = optab_handler (cstore_optab, compare_mode)->insn_code;
5336 if (icode != CODE_FOR_nothing)
5340 if (icode != CODE_FOR_nothing)
5342 enum machine_mode result_mode
5343 = insn_data[(int) icode].operand[0].mode;
5344 rtx cstore_op0 = op0;
5345 rtx cstore_op1 = op1;
5347 do_pending_stack_adjust ();
5348 last = get_last_insn ();
5350 if (compare_mode != mode)
5352 cstore_op0 = convert_modes (compare_mode, mode, cstore_op0,
5354 cstore_op1 = convert_modes (compare_mode, mode, cstore_op1,
5358 if (!insn_data[(int) icode].operand[2].predicate (cstore_op0,
5360 cstore_op0 = copy_to_mode_reg (compare_mode, cstore_op0);
5362 if (!insn_data[(int) icode].operand[3].predicate (cstore_op1,
5364 cstore_op1 = copy_to_mode_reg (compare_mode, cstore_op1);
5366 comparison = gen_rtx_fmt_ee (code, result_mode, cstore_op0,
5370 if (optimize || !(insn_data[(int) icode].operand[0].predicate
5371 (subtarget, result_mode)))
5372 subtarget = gen_reg_rtx (result_mode);
5374 pattern = GEN_FCN (icode) (subtarget, comparison, cstore_op0,
5379 emit_insn (pattern);
5380 return emit_store_flag_1 (target, subtarget, result_mode,
5386 delete_insns_since (last);
5388 /* If optimizing, use different pseudo registers for each insn, instead
5389 of reusing the same pseudo. This leads to better CSE, but slows
5390 down the compiler, since there are more pseudos */
5391 subtarget = (!optimize
5392 && (target_mode == mode)) ? target : NULL_RTX;
5394 /* If we reached here, we can't do this with a scc insn. However, there
5395 are some comparisons that can be done directly. For example, if
5396 this is an equality comparison of integers, we can try to exclusive-or
5397 (or subtract) the two operands and use a recursive call to try the
5398 comparison with zero. Don't do any of these cases if branches are
5401 if (BRANCH_COST (optimize_insn_for_speed_p (),
5403 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
5404 && op1 != const0_rtx)
5406 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
5410 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
5413 tem = emit_store_flag (target, code, tem, const0_rtx,
5414 mode, unsignedp, normalizep);
5416 delete_insns_since (last);
5420 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
5421 the constant zero. Reject all other comparisons at this point. Only
5422 do LE and GT if branches are expensive since they are expensive on
5423 2-operand machines. */
5425 if (BRANCH_COST (optimize_insn_for_speed_p (),
5427 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
5428 || (code != EQ && code != NE
5429 && (BRANCH_COST (optimize_insn_for_speed_p (),
5430 false) <= 1 || (code != LE && code != GT))))
5433 /* See what we need to return. We can only return a 1, -1, or the
5436 if (normalizep == 0)
5438 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
5439 normalizep = STORE_FLAG_VALUE;
5441 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5442 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
5443 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
5449 /* Try to put the result of the comparison in the sign bit. Assume we can't
5450 do the necessary operation below. */
5454 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
5455 the sign bit set. */
5459 /* This is destructive, so SUBTARGET can't be OP0. */
5460 if (rtx_equal_p (subtarget, op0))
5463 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
5466 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
5470 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
5471 number of bits in the mode of OP0, minus one. */
5475 if (rtx_equal_p (subtarget, op0))
5478 tem = expand_shift (RSHIFT_EXPR, mode, op0,
5479 size_int (GET_MODE_BITSIZE (mode) - 1),
5481 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
5485 if (code == EQ || code == NE)
5487 /* For EQ or NE, one way to do the comparison is to apply an operation
5488 that converts the operand into a positive number if it is nonzero
5489 or zero if it was originally zero. Then, for EQ, we subtract 1 and
5490 for NE we negate. This puts the result in the sign bit. Then we
5491 normalize with a shift, if needed.
5493 Two operations that can do the above actions are ABS and FFS, so try
5494 them. If that doesn't work, and MODE is smaller than a full word,
5495 we can use zero-extension to the wider mode (an unsigned conversion)
5496 as the operation. */
5498 /* Note that ABS doesn't yield a positive number for INT_MIN, but
5499 that is compensated by the subsequent overflow when subtracting
5502 if (optab_handler (abs_optab, mode)->insn_code != CODE_FOR_nothing)
5503 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
5504 else if (optab_handler (ffs_optab, mode)->insn_code != CODE_FOR_nothing)
5505 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
5506 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5508 tem = convert_modes (word_mode, mode, op0, 1);
5515 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
5518 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
5521 /* If we couldn't do it that way, for NE we can "or" the two's complement
5522 of the value with itself. For EQ, we take the one's complement of
5523 that "or", which is an extra insn, so we only handle EQ if branches
5528 || BRANCH_COST (optimize_insn_for_speed_p (),
5531 if (rtx_equal_p (subtarget, op0))
5534 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
5535 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
5538 if (tem && code == EQ)
5539 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
5543 if (tem && normalizep)
5544 tem = expand_shift (RSHIFT_EXPR, mode, tem,
5545 size_int (GET_MODE_BITSIZE (mode) - 1),
5546 subtarget, normalizep == 1);
5550 if (GET_MODE (tem) != target_mode)
5552 convert_move (target, tem, 0);
5555 else if (!subtarget)
5557 emit_move_insn (target, tem);
5562 delete_insns_since (last);
5567 /* Like emit_store_flag, but always succeeds. */
5570 emit_store_flag_force (rtx target, enum rtx_code code, rtx op0, rtx op1,
5571 enum machine_mode mode, int unsignedp, int normalizep)
5575 /* First see if emit_store_flag can do the job. */
5576 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
5580 if (normalizep == 0)
5583 /* If this failed, we have to do this with set/compare/jump/set code. */
5586 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
5587 target = gen_reg_rtx (GET_MODE (target));
5589 emit_move_insn (target, const1_rtx);
5590 label = gen_label_rtx ();
5591 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX,
5594 emit_move_insn (target, const0_rtx);
5600 /* Perform possibly multi-word comparison and conditional jump to LABEL
5601 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE. This is
5602 now a thin wrapper around do_compare_rtx_and_jump. */
5605 do_cmp_and_jump (rtx arg1, rtx arg2, enum rtx_code op, enum machine_mode mode,
5608 int unsignedp = (op == LTU || op == LEU || op == GTU || op == GEU);
5609 do_compare_rtx_and_jump (arg1, arg2, op, unsignedp, mode,
5610 NULL_RTX, NULL_RTX, label);