1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
5 Free Software Foundation, Inc.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
26 #include "coretypes.h"
33 #include "insn-config.h"
38 #include "langhooks.h"
42 static void store_fixed_bit_field (rtx, unsigned HOST_WIDE_INT,
43 unsigned HOST_WIDE_INT,
44 unsigned HOST_WIDE_INT, rtx);
45 static void store_split_bit_field (rtx, unsigned HOST_WIDE_INT,
46 unsigned HOST_WIDE_INT, rtx);
47 static rtx extract_fixed_bit_field (enum machine_mode, rtx,
48 unsigned HOST_WIDE_INT,
49 unsigned HOST_WIDE_INT,
50 unsigned HOST_WIDE_INT, rtx, int);
51 static rtx mask_rtx (enum machine_mode, int, int, int);
52 static rtx lshift_value (enum machine_mode, rtx, int, int);
53 static rtx extract_split_bit_field (rtx, unsigned HOST_WIDE_INT,
54 unsigned HOST_WIDE_INT, int);
55 static void do_cmp_and_jump (rtx, rtx, enum rtx_code, enum machine_mode, rtx);
56 static rtx expand_smod_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
57 static rtx expand_sdiv_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
59 /* Test whether a value is zero of a power of two. */
60 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
62 /* Nonzero means divides or modulus operations are relatively cheap for
63 powers of two, so don't use branches; emit the operation instead.
64 Usually, this will mean that the MD file will emit non-branch
67 static bool sdiv_pow2_cheap[2][NUM_MACHINE_MODES];
68 static bool smod_pow2_cheap[2][NUM_MACHINE_MODES];
70 #ifndef SLOW_UNALIGNED_ACCESS
71 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
74 /* For compilers that support multiple targets with different word sizes,
75 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
76 is the H8/300(H) compiler. */
78 #ifndef MAX_BITS_PER_WORD
79 #define MAX_BITS_PER_WORD BITS_PER_WORD
82 /* Reduce conditional compilation elsewhere. */
85 #define CODE_FOR_insv CODE_FOR_nothing
86 #define gen_insv(a,b,c,d) NULL_RTX
90 #define CODE_FOR_extv CODE_FOR_nothing
91 #define gen_extv(a,b,c,d) NULL_RTX
95 #define CODE_FOR_extzv CODE_FOR_nothing
96 #define gen_extzv(a,b,c,d) NULL_RTX
99 /* Cost of various pieces of RTL. Note that some of these are indexed by
100 shift count and some by mode. */
101 static int zero_cost[2];
102 static int add_cost[2][NUM_MACHINE_MODES];
103 static int neg_cost[2][NUM_MACHINE_MODES];
104 static int shift_cost[2][NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
105 static int shiftadd_cost[2][NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
106 static int shiftsub0_cost[2][NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
107 static int shiftsub1_cost[2][NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
108 static int mul_cost[2][NUM_MACHINE_MODES];
109 static int sdiv_cost[2][NUM_MACHINE_MODES];
110 static int udiv_cost[2][NUM_MACHINE_MODES];
111 static int mul_widen_cost[2][NUM_MACHINE_MODES];
112 static int mul_highpart_cost[2][NUM_MACHINE_MODES];
119 struct rtx_def reg; rtunion reg_fld[2];
120 struct rtx_def plus; rtunion plus_fld1;
122 struct rtx_def mult; rtunion mult_fld1;
123 struct rtx_def sdiv; rtunion sdiv_fld1;
124 struct rtx_def udiv; rtunion udiv_fld1;
126 struct rtx_def sdiv_32; rtunion sdiv_32_fld1;
127 struct rtx_def smod_32; rtunion smod_32_fld1;
128 struct rtx_def wide_mult; rtunion wide_mult_fld1;
129 struct rtx_def wide_lshr; rtunion wide_lshr_fld1;
130 struct rtx_def wide_trunc;
131 struct rtx_def shift; rtunion shift_fld1;
132 struct rtx_def shift_mult; rtunion shift_mult_fld1;
133 struct rtx_def shift_add; rtunion shift_add_fld1;
134 struct rtx_def shift_sub0; rtunion shift_sub0_fld1;
135 struct rtx_def shift_sub1; rtunion shift_sub1_fld1;
138 rtx pow2[MAX_BITS_PER_WORD];
139 rtx cint[MAX_BITS_PER_WORD];
141 enum machine_mode mode, wider_mode;
145 for (m = 1; m < MAX_BITS_PER_WORD; m++)
147 pow2[m] = GEN_INT ((HOST_WIDE_INT) 1 << m);
148 cint[m] = GEN_INT (m);
150 memset (&all, 0, sizeof all);
152 PUT_CODE (&all.reg, REG);
153 /* Avoid using hard regs in ways which may be unsupported. */
154 SET_REGNO (&all.reg, LAST_VIRTUAL_REGISTER + 1);
156 PUT_CODE (&all.plus, PLUS);
157 XEXP (&all.plus, 0) = &all.reg;
158 XEXP (&all.plus, 1) = &all.reg;
160 PUT_CODE (&all.neg, NEG);
161 XEXP (&all.neg, 0) = &all.reg;
163 PUT_CODE (&all.mult, MULT);
164 XEXP (&all.mult, 0) = &all.reg;
165 XEXP (&all.mult, 1) = &all.reg;
167 PUT_CODE (&all.sdiv, DIV);
168 XEXP (&all.sdiv, 0) = &all.reg;
169 XEXP (&all.sdiv, 1) = &all.reg;
171 PUT_CODE (&all.udiv, UDIV);
172 XEXP (&all.udiv, 0) = &all.reg;
173 XEXP (&all.udiv, 1) = &all.reg;
175 PUT_CODE (&all.sdiv_32, DIV);
176 XEXP (&all.sdiv_32, 0) = &all.reg;
177 XEXP (&all.sdiv_32, 1) = 32 < MAX_BITS_PER_WORD ? cint[32] : GEN_INT (32);
179 PUT_CODE (&all.smod_32, MOD);
180 XEXP (&all.smod_32, 0) = &all.reg;
181 XEXP (&all.smod_32, 1) = XEXP (&all.sdiv_32, 1);
183 PUT_CODE (&all.zext, ZERO_EXTEND);
184 XEXP (&all.zext, 0) = &all.reg;
186 PUT_CODE (&all.wide_mult, MULT);
187 XEXP (&all.wide_mult, 0) = &all.zext;
188 XEXP (&all.wide_mult, 1) = &all.zext;
190 PUT_CODE (&all.wide_lshr, LSHIFTRT);
191 XEXP (&all.wide_lshr, 0) = &all.wide_mult;
193 PUT_CODE (&all.wide_trunc, TRUNCATE);
194 XEXP (&all.wide_trunc, 0) = &all.wide_lshr;
196 PUT_CODE (&all.shift, ASHIFT);
197 XEXP (&all.shift, 0) = &all.reg;
199 PUT_CODE (&all.shift_mult, MULT);
200 XEXP (&all.shift_mult, 0) = &all.reg;
202 PUT_CODE (&all.shift_add, PLUS);
203 XEXP (&all.shift_add, 0) = &all.shift_mult;
204 XEXP (&all.shift_add, 1) = &all.reg;
206 PUT_CODE (&all.shift_sub0, MINUS);
207 XEXP (&all.shift_sub0, 0) = &all.shift_mult;
208 XEXP (&all.shift_sub0, 1) = &all.reg;
210 PUT_CODE (&all.shift_sub1, MINUS);
211 XEXP (&all.shift_sub1, 0) = &all.reg;
212 XEXP (&all.shift_sub1, 1) = &all.shift_mult;
214 for (speed = 0; speed < 2; speed++)
216 crtl->maybe_hot_insn_p = speed;
217 zero_cost[speed] = rtx_cost (const0_rtx, SET, speed);
219 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
221 mode = GET_MODE_WIDER_MODE (mode))
223 PUT_MODE (&all.reg, mode);
224 PUT_MODE (&all.plus, mode);
225 PUT_MODE (&all.neg, mode);
226 PUT_MODE (&all.mult, mode);
227 PUT_MODE (&all.sdiv, mode);
228 PUT_MODE (&all.udiv, mode);
229 PUT_MODE (&all.sdiv_32, mode);
230 PUT_MODE (&all.smod_32, mode);
231 PUT_MODE (&all.wide_trunc, mode);
232 PUT_MODE (&all.shift, mode);
233 PUT_MODE (&all.shift_mult, mode);
234 PUT_MODE (&all.shift_add, mode);
235 PUT_MODE (&all.shift_sub0, mode);
236 PUT_MODE (&all.shift_sub1, mode);
238 add_cost[speed][mode] = rtx_cost (&all.plus, SET, speed);
239 neg_cost[speed][mode] = rtx_cost (&all.neg, SET, speed);
240 mul_cost[speed][mode] = rtx_cost (&all.mult, SET, speed);
241 sdiv_cost[speed][mode] = rtx_cost (&all.sdiv, SET, speed);
242 udiv_cost[speed][mode] = rtx_cost (&all.udiv, SET, speed);
244 sdiv_pow2_cheap[speed][mode] = (rtx_cost (&all.sdiv_32, SET, speed)
245 <= 2 * add_cost[speed][mode]);
246 smod_pow2_cheap[speed][mode] = (rtx_cost (&all.smod_32, SET, speed)
247 <= 4 * add_cost[speed][mode]);
249 wider_mode = GET_MODE_WIDER_MODE (mode);
250 if (wider_mode != VOIDmode)
252 PUT_MODE (&all.zext, wider_mode);
253 PUT_MODE (&all.wide_mult, wider_mode);
254 PUT_MODE (&all.wide_lshr, wider_mode);
255 XEXP (&all.wide_lshr, 1) = GEN_INT (GET_MODE_BITSIZE (mode));
257 mul_widen_cost[speed][wider_mode]
258 = rtx_cost (&all.wide_mult, SET, speed);
259 mul_highpart_cost[speed][mode]
260 = rtx_cost (&all.wide_trunc, SET, speed);
263 shift_cost[speed][mode][0] = 0;
264 shiftadd_cost[speed][mode][0] = shiftsub0_cost[speed][mode][0]
265 = shiftsub1_cost[speed][mode][0] = add_cost[speed][mode];
267 n = MIN (MAX_BITS_PER_WORD, GET_MODE_BITSIZE (mode));
268 for (m = 1; m < n; m++)
270 XEXP (&all.shift, 1) = cint[m];
271 XEXP (&all.shift_mult, 1) = pow2[m];
273 shift_cost[speed][mode][m] = rtx_cost (&all.shift, SET, speed);
274 shiftadd_cost[speed][mode][m] = rtx_cost (&all.shift_add, SET, speed);
275 shiftsub0_cost[speed][mode][m] = rtx_cost (&all.shift_sub0, SET, speed);
276 shiftsub1_cost[speed][mode][m] = rtx_cost (&all.shift_sub1, SET, speed);
280 default_rtl_profile ();
283 /* Return an rtx representing minus the value of X.
284 MODE is the intended mode of the result,
285 useful if X is a CONST_INT. */
288 negate_rtx (enum machine_mode mode, rtx x)
290 rtx result = simplify_unary_operation (NEG, mode, x, mode);
293 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
298 /* Report on the availability of insv/extv/extzv and the desired mode
299 of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo
300 is false; else the mode of the specified operand. If OPNO is -1,
301 all the caller cares about is whether the insn is available. */
303 mode_for_extraction (enum extraction_pattern pattern, int opno)
305 const struct insn_data *data;
312 data = &insn_data[CODE_FOR_insv];
315 return MAX_MACHINE_MODE;
320 data = &insn_data[CODE_FOR_extv];
323 return MAX_MACHINE_MODE;
328 data = &insn_data[CODE_FOR_extzv];
331 return MAX_MACHINE_MODE;
340 /* Everyone who uses this function used to follow it with
341 if (result == VOIDmode) result = word_mode; */
342 if (data->operand[opno].mode == VOIDmode)
344 return data->operand[opno].mode;
347 /* Return true if X, of mode MODE, matches the predicate for operand
348 OPNO of instruction ICODE. Allow volatile memories, regardless of
349 the ambient volatile_ok setting. */
352 check_predicate_volatile_ok (enum insn_code icode, int opno,
353 rtx x, enum machine_mode mode)
355 bool save_volatile_ok, result;
357 save_volatile_ok = volatile_ok;
358 result = insn_data[(int) icode].operand[opno].predicate (x, mode);
359 volatile_ok = save_volatile_ok;
363 /* A subroutine of store_bit_field, with the same arguments. Return true
364 if the operation could be implemented.
366 If FALLBACK_P is true, fall back to store_fixed_bit_field if we have
367 no other way of implementing the operation. If FALLBACK_P is false,
368 return false instead. */
371 store_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
372 unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode,
373 rtx value, bool fallback_p)
376 = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD;
377 unsigned HOST_WIDE_INT offset, bitpos;
382 enum machine_mode op_mode = mode_for_extraction (EP_insv, 3);
384 while (GET_CODE (op0) == SUBREG)
386 /* The following line once was done only if WORDS_BIG_ENDIAN,
387 but I think that is a mistake. WORDS_BIG_ENDIAN is
388 meaningful at a much higher level; when structures are copied
389 between memory and regs, the higher-numbered regs
390 always get higher addresses. */
391 int inner_mode_size = GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)));
392 int outer_mode_size = GET_MODE_SIZE (GET_MODE (op0));
396 /* Paradoxical subregs need special handling on big endian machines. */
397 if (SUBREG_BYTE (op0) == 0 && inner_mode_size < outer_mode_size)
399 int difference = inner_mode_size - outer_mode_size;
401 if (WORDS_BIG_ENDIAN)
402 byte_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
403 if (BYTES_BIG_ENDIAN)
404 byte_offset += difference % UNITS_PER_WORD;
407 byte_offset = SUBREG_BYTE (op0);
409 bitnum += byte_offset * BITS_PER_UNIT;
410 op0 = SUBREG_REG (op0);
413 /* No action is needed if the target is a register and if the field
414 lies completely outside that register. This can occur if the source
415 code contains an out-of-bounds access to a small array. */
416 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
419 /* Use vec_set patterns for inserting parts of vectors whenever
421 if (VECTOR_MODE_P (GET_MODE (op0))
423 && (optab_handler (vec_set_optab, GET_MODE (op0))->insn_code
425 && fieldmode == GET_MODE_INNER (GET_MODE (op0))
426 && bitsize == GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
427 && !(bitnum % GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
429 enum machine_mode outermode = GET_MODE (op0);
430 enum machine_mode innermode = GET_MODE_INNER (outermode);
431 int icode = (int) optab_handler (vec_set_optab, outermode)->insn_code;
432 int pos = bitnum / GET_MODE_BITSIZE (innermode);
433 rtx rtxpos = GEN_INT (pos);
437 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
438 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
439 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
443 if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
444 src = copy_to_mode_reg (mode1, src);
446 if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
447 rtxpos = copy_to_mode_reg (mode1, rtxpos);
449 /* We could handle this, but we should always be called with a pseudo
450 for our targets and all insns should take them as outputs. */
451 gcc_assert ((*insn_data[icode].operand[0].predicate) (dest, mode0)
452 && (*insn_data[icode].operand[1].predicate) (src, mode1)
453 && (*insn_data[icode].operand[2].predicate) (rtxpos, mode2));
454 pat = GEN_FCN (icode) (dest, src, rtxpos);
465 /* If the target is a register, overwriting the entire object, or storing
466 a full-word or multi-word field can be done with just a SUBREG.
468 If the target is memory, storing any naturally aligned field can be
469 done with a simple store. For targets that support fast unaligned
470 memory, any naturally sized, unit aligned field can be done directly. */
472 offset = bitnum / unit;
473 bitpos = bitnum % unit;
474 byte_offset = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
475 + (offset * UNITS_PER_WORD);
478 && bitsize == GET_MODE_BITSIZE (fieldmode)
480 ? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
481 || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
482 && byte_offset % GET_MODE_SIZE (fieldmode) == 0)
483 : (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
484 || (offset * BITS_PER_UNIT % bitsize == 0
485 && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0))))
488 op0 = adjust_address (op0, fieldmode, offset);
489 else if (GET_MODE (op0) != fieldmode)
490 op0 = simplify_gen_subreg (fieldmode, op0, GET_MODE (op0),
492 emit_move_insn (op0, value);
496 /* Make sure we are playing with integral modes. Pun with subregs
497 if we aren't. This must come after the entire register case above,
498 since that case is valid for any mode. The following cases are only
499 valid for integral modes. */
501 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
502 if (imode != GET_MODE (op0))
505 op0 = adjust_address (op0, imode, 0);
508 gcc_assert (imode != BLKmode);
509 op0 = gen_lowpart (imode, op0);
514 /* We may be accessing data outside the field, which means
515 we can alias adjacent data. */
518 op0 = shallow_copy_rtx (op0);
519 set_mem_alias_set (op0, 0);
520 set_mem_expr (op0, 0);
523 /* If OP0 is a register, BITPOS must count within a word.
524 But as we have it, it counts within whatever size OP0 now has.
525 On a bigendian machine, these are not the same, so convert. */
528 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
529 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
531 /* Storing an lsb-aligned field in a register
532 can be done with a movestrict instruction. */
535 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
536 && bitsize == GET_MODE_BITSIZE (fieldmode)
537 && (optab_handler (movstrict_optab, fieldmode)->insn_code
538 != CODE_FOR_nothing))
540 int icode = optab_handler (movstrict_optab, fieldmode)->insn_code;
542 rtx start = get_last_insn ();
545 /* Get appropriate low part of the value being stored. */
546 if (GET_CODE (value) == CONST_INT || REG_P (value))
547 value = gen_lowpart (fieldmode, value);
548 else if (!(GET_CODE (value) == SYMBOL_REF
549 || GET_CODE (value) == LABEL_REF
550 || GET_CODE (value) == CONST))
551 value = convert_to_mode (fieldmode, value, 0);
553 if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode))
554 value = copy_to_mode_reg (fieldmode, value);
556 if (GET_CODE (op0) == SUBREG)
558 /* Else we've got some float mode source being extracted into
559 a different float mode destination -- this combination of
560 subregs results in Severe Tire Damage. */
561 gcc_assert (GET_MODE (SUBREG_REG (op0)) == fieldmode
562 || GET_MODE_CLASS (fieldmode) == MODE_INT
563 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT);
564 arg0 = SUBREG_REG (op0);
567 insn = (GEN_FCN (icode)
568 (gen_rtx_SUBREG (fieldmode, arg0,
569 (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
570 + (offset * UNITS_PER_WORD)),
577 delete_insns_since (start);
580 /* Handle fields bigger than a word. */
582 if (bitsize > BITS_PER_WORD)
584 /* Here we transfer the words of the field
585 in the order least significant first.
586 This is because the most significant word is the one which may
588 However, only do that if the value is not BLKmode. */
590 unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
591 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
595 /* This is the mode we must force value to, so that there will be enough
596 subwords to extract. Note that fieldmode will often (always?) be
597 VOIDmode, because that is what store_field uses to indicate that this
598 is a bit field, but passing VOIDmode to operand_subword_force
600 fieldmode = GET_MODE (value);
601 if (fieldmode == VOIDmode)
602 fieldmode = smallest_mode_for_size (nwords * BITS_PER_WORD, MODE_INT);
604 last = get_last_insn ();
605 for (i = 0; i < nwords; i++)
607 /* If I is 0, use the low-order word in both field and target;
608 if I is 1, use the next to lowest word; and so on. */
609 unsigned int wordnum = (backwards ? nwords - i - 1 : i);
610 unsigned int bit_offset = (backwards
611 ? MAX ((int) bitsize - ((int) i + 1)
614 : (int) i * BITS_PER_WORD);
615 rtx value_word = operand_subword_force (value, wordnum, fieldmode);
617 if (!store_bit_field_1 (op0, MIN (BITS_PER_WORD,
618 bitsize - i * BITS_PER_WORD),
619 bitnum + bit_offset, word_mode,
620 value_word, fallback_p))
622 delete_insns_since (last);
629 /* From here on we can assume that the field to be stored in is
630 a full-word (whatever type that is), since it is shorter than a word. */
632 /* OFFSET is the number of words or bytes (UNIT says which)
633 from STR_RTX to the first word or byte containing part of the field. */
638 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
642 /* Since this is a destination (lvalue), we can't copy
643 it to a pseudo. We can remove a SUBREG that does not
644 change the size of the operand. Such a SUBREG may
645 have been added above. */
646 gcc_assert (GET_CODE (op0) == SUBREG
647 && (GET_MODE_SIZE (GET_MODE (op0))
648 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))));
649 op0 = SUBREG_REG (op0);
651 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
652 op0, (offset * UNITS_PER_WORD));
657 /* If VALUE has a floating-point or complex mode, access it as an
658 integer of the corresponding size. This can occur on a machine
659 with 64 bit registers that uses SFmode for float. It can also
660 occur for unaligned float or complex fields. */
662 if (GET_MODE (value) != VOIDmode
663 && GET_MODE_CLASS (GET_MODE (value)) != MODE_INT
664 && GET_MODE_CLASS (GET_MODE (value)) != MODE_PARTIAL_INT)
666 value = gen_reg_rtx (int_mode_for_mode (GET_MODE (value)));
667 emit_move_insn (gen_lowpart (GET_MODE (orig_value), value), orig_value);
670 /* Now OFFSET is nonzero only if OP0 is memory
671 and is therefore always measured in bytes. */
674 && GET_MODE (value) != BLKmode
676 && GET_MODE_BITSIZE (op_mode) >= bitsize
677 && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
678 && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode)))
679 && insn_data[CODE_FOR_insv].operand[1].predicate (GEN_INT (bitsize),
681 && check_predicate_volatile_ok (CODE_FOR_insv, 0, op0, VOIDmode))
683 int xbitpos = bitpos;
686 rtx last = get_last_insn ();
689 /* Add OFFSET into OP0's address. */
691 xop0 = adjust_address (xop0, byte_mode, offset);
693 /* If xop0 is a register, we need it in OP_MODE
694 to make it acceptable to the format of insv. */
695 if (GET_CODE (xop0) == SUBREG)
696 /* We can't just change the mode, because this might clobber op0,
697 and we will need the original value of op0 if insv fails. */
698 xop0 = gen_rtx_SUBREG (op_mode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
699 if (REG_P (xop0) && GET_MODE (xop0) != op_mode)
700 xop0 = gen_rtx_SUBREG (op_mode, xop0, 0);
702 /* On big-endian machines, we count bits from the most significant.
703 If the bit field insn does not, we must invert. */
705 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
706 xbitpos = unit - bitsize - xbitpos;
708 /* We have been counting XBITPOS within UNIT.
709 Count instead within the size of the register. */
710 if (BITS_BIG_ENDIAN && !MEM_P (xop0))
711 xbitpos += GET_MODE_BITSIZE (op_mode) - unit;
713 unit = GET_MODE_BITSIZE (op_mode);
715 /* Convert VALUE to op_mode (which insv insn wants) in VALUE1. */
717 if (GET_MODE (value) != op_mode)
719 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
721 /* Optimization: Don't bother really extending VALUE
722 if it has all the bits we will actually use. However,
723 if we must narrow it, be sure we do it correctly. */
725 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (op_mode))
729 tmp = simplify_subreg (op_mode, value1, GET_MODE (value), 0);
731 tmp = simplify_gen_subreg (op_mode,
732 force_reg (GET_MODE (value),
734 GET_MODE (value), 0);
738 value1 = gen_lowpart (op_mode, value1);
740 else if (GET_CODE (value) == CONST_INT)
741 value1 = gen_int_mode (INTVAL (value), op_mode);
743 /* Parse phase is supposed to make VALUE's data type
744 match that of the component reference, which is a type
745 at least as wide as the field; so VALUE should have
746 a mode that corresponds to that type. */
747 gcc_assert (CONSTANT_P (value));
750 /* If this machine's insv insists on a register,
751 get VALUE1 into a register. */
752 if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate)
754 value1 = force_reg (op_mode, value1);
756 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
761 /* If the mode of the insertion is wider than the mode of the
762 target register we created a paradoxical subreg for the
763 target. Truncate the paradoxical subreg of the target to
765 if (!TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (GET_MODE (op0)),
766 GET_MODE_BITSIZE (op_mode))
768 || GET_CODE (xop0) == SUBREG))
769 convert_move (op0, xop0, true);
772 delete_insns_since (last);
775 /* If OP0 is a memory, try copying it to a register and seeing if a
776 cheap register alternative is available. */
777 if (HAVE_insv && MEM_P (op0))
779 enum machine_mode bestmode;
781 /* Get the mode to use for inserting into this field. If OP0 is
782 BLKmode, get the smallest mode consistent with the alignment. If
783 OP0 is a non-BLKmode object that is no wider than OP_MODE, use its
784 mode. Otherwise, use the smallest mode containing the field. */
786 if (GET_MODE (op0) == BLKmode
787 || (op_mode != MAX_MACHINE_MODE
788 && GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (op_mode)))
789 bestmode = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0),
790 (op_mode == MAX_MACHINE_MODE
791 ? VOIDmode : op_mode),
792 MEM_VOLATILE_P (op0));
794 bestmode = GET_MODE (op0);
796 if (bestmode != VOIDmode
797 && GET_MODE_SIZE (bestmode) >= GET_MODE_SIZE (fieldmode)
798 && !(SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
799 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
801 rtx last, tempreg, xop0;
802 unsigned HOST_WIDE_INT xoffset, xbitpos;
804 last = get_last_insn ();
806 /* Adjust address to point to the containing unit of
807 that mode. Compute the offset as a multiple of this unit,
808 counting in bytes. */
809 unit = GET_MODE_BITSIZE (bestmode);
810 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
811 xbitpos = bitnum % unit;
812 xop0 = adjust_address (op0, bestmode, xoffset);
814 /* Fetch that unit, store the bitfield in it, then store
816 tempreg = copy_to_reg (xop0);
817 if (store_bit_field_1 (tempreg, bitsize, xbitpos,
818 fieldmode, orig_value, false))
820 emit_move_insn (xop0, tempreg);
823 delete_insns_since (last);
830 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
834 /* Generate code to store value from rtx VALUE
835 into a bit-field within structure STR_RTX
836 containing BITSIZE bits starting at bit BITNUM.
837 FIELDMODE is the machine-mode of the FIELD_DECL node for this field. */
840 store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
841 unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode,
844 if (!store_bit_field_1 (str_rtx, bitsize, bitnum, fieldmode, value, true))
848 /* Use shifts and boolean operations to store VALUE
849 into a bit field of width BITSIZE
850 in a memory location specified by OP0 except offset by OFFSET bytes.
851 (OFFSET must be 0 if OP0 is a register.)
852 The field starts at position BITPOS within the byte.
853 (If OP0 is a register, it may be a full word or a narrower mode,
854 but BITPOS still counts within a full word,
855 which is significant on bigendian machines.) */
858 store_fixed_bit_field (rtx op0, unsigned HOST_WIDE_INT offset,
859 unsigned HOST_WIDE_INT bitsize,
860 unsigned HOST_WIDE_INT bitpos, rtx value)
862 enum machine_mode mode;
863 unsigned int total_bits = BITS_PER_WORD;
868 /* There is a case not handled here:
869 a structure with a known alignment of just a halfword
870 and a field split across two aligned halfwords within the structure.
871 Or likewise a structure with a known alignment of just a byte
872 and a field split across two bytes.
873 Such cases are not supposed to be able to occur. */
875 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
877 gcc_assert (!offset);
878 /* Special treatment for a bit field split across two registers. */
879 if (bitsize + bitpos > BITS_PER_WORD)
881 store_split_bit_field (op0, bitsize, bitpos, value);
887 /* Get the proper mode to use for this field. We want a mode that
888 includes the entire field. If such a mode would be larger than
889 a word, we won't be doing the extraction the normal way.
890 We don't want a mode bigger than the destination. */
892 mode = GET_MODE (op0);
893 if (GET_MODE_BITSIZE (mode) == 0
894 || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
896 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
897 MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
899 if (mode == VOIDmode)
901 /* The only way this should occur is if the field spans word
903 store_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT,
908 total_bits = GET_MODE_BITSIZE (mode);
910 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
911 be in the range 0 to total_bits-1, and put any excess bytes in
913 if (bitpos >= total_bits)
915 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
916 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
920 /* Get ref to an aligned byte, halfword, or word containing the field.
921 Adjust BITPOS to be position within a word,
922 and OFFSET to be the offset of that word.
923 Then alter OP0 to refer to that word. */
924 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
925 offset -= (offset % (total_bits / BITS_PER_UNIT));
926 op0 = adjust_address (op0, mode, offset);
929 mode = GET_MODE (op0);
931 /* Now MODE is either some integral mode for a MEM as OP0,
932 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
933 The bit field is contained entirely within OP0.
934 BITPOS is the starting bit number within OP0.
935 (OP0's mode may actually be narrower than MODE.) */
937 if (BYTES_BIG_ENDIAN)
938 /* BITPOS is the distance between our msb
939 and that of the containing datum.
940 Convert it to the distance from the lsb. */
941 bitpos = total_bits - bitsize - bitpos;
943 /* Now BITPOS is always the distance between our lsb
946 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
947 we must first convert its mode to MODE. */
949 if (GET_CODE (value) == CONST_INT)
951 HOST_WIDE_INT v = INTVAL (value);
953 if (bitsize < HOST_BITS_PER_WIDE_INT)
954 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
958 else if ((bitsize < HOST_BITS_PER_WIDE_INT
959 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
960 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
963 value = lshift_value (mode, value, bitpos, bitsize);
967 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
968 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
970 if (GET_MODE (value) != mode)
971 value = convert_to_mode (mode, value, 1);
974 value = expand_binop (mode, and_optab, value,
975 mask_rtx (mode, 0, bitsize, 0),
976 NULL_RTX, 1, OPTAB_LIB_WIDEN);
978 value = expand_shift (LSHIFT_EXPR, mode, value,
979 build_int_cst (NULL_TREE, bitpos), NULL_RTX, 1);
982 /* Now clear the chosen bits in OP0,
983 except that if VALUE is -1 we need not bother. */
984 /* We keep the intermediates in registers to allow CSE to combine
985 consecutive bitfield assignments. */
987 temp = force_reg (mode, op0);
991 temp = expand_binop (mode, and_optab, temp,
992 mask_rtx (mode, bitpos, bitsize, 1),
993 NULL_RTX, 1, OPTAB_LIB_WIDEN);
994 temp = force_reg (mode, temp);
997 /* Now logical-or VALUE into OP0, unless it is zero. */
1001 temp = expand_binop (mode, ior_optab, temp, value,
1002 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1003 temp = force_reg (mode, temp);
1008 op0 = copy_rtx (op0);
1009 emit_move_insn (op0, temp);
1013 /* Store a bit field that is split across multiple accessible memory objects.
1015 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
1016 BITSIZE is the field width; BITPOS the position of its first bit
1018 VALUE is the value to store.
1020 This does not yet handle fields wider than BITS_PER_WORD. */
1023 store_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1024 unsigned HOST_WIDE_INT bitpos, rtx value)
1027 unsigned int bitsdone = 0;
1029 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1031 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
1032 unit = BITS_PER_WORD;
1034 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1036 /* If VALUE is a constant other than a CONST_INT, get it into a register in
1037 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
1038 that VALUE might be a floating-point constant. */
1039 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
1041 rtx word = gen_lowpart_common (word_mode, value);
1043 if (word && (value != word))
1046 value = gen_lowpart_common (word_mode,
1047 force_reg (GET_MODE (value) != VOIDmode
1049 : word_mode, value));
1052 while (bitsdone < bitsize)
1054 unsigned HOST_WIDE_INT thissize;
1056 unsigned HOST_WIDE_INT thispos;
1057 unsigned HOST_WIDE_INT offset;
1059 offset = (bitpos + bitsdone) / unit;
1060 thispos = (bitpos + bitsdone) % unit;
1062 /* THISSIZE must not overrun a word boundary. Otherwise,
1063 store_fixed_bit_field will call us again, and we will mutually
1065 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1066 thissize = MIN (thissize, unit - thispos);
1068 if (BYTES_BIG_ENDIAN)
1072 /* We must do an endian conversion exactly the same way as it is
1073 done in extract_bit_field, so that the two calls to
1074 extract_fixed_bit_field will have comparable arguments. */
1075 if (!MEM_P (value) || GET_MODE (value) == BLKmode)
1076 total_bits = BITS_PER_WORD;
1078 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
1080 /* Fetch successively less significant portions. */
1081 if (GET_CODE (value) == CONST_INT)
1082 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1083 >> (bitsize - bitsdone - thissize))
1084 & (((HOST_WIDE_INT) 1 << thissize) - 1));
1086 /* The args are chosen so that the last part includes the
1087 lsb. Give extract_bit_field the value it needs (with
1088 endianness compensation) to fetch the piece we want. */
1089 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
1090 total_bits - bitsize + bitsdone,
1095 /* Fetch successively more significant portions. */
1096 if (GET_CODE (value) == CONST_INT)
1097 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1099 & (((HOST_WIDE_INT) 1 << thissize) - 1));
1101 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
1102 bitsdone, NULL_RTX, 1);
1105 /* If OP0 is a register, then handle OFFSET here.
1107 When handling multiword bitfields, extract_bit_field may pass
1108 down a word_mode SUBREG of a larger REG for a bitfield that actually
1109 crosses a word boundary. Thus, for a SUBREG, we must find
1110 the current word starting from the base register. */
1111 if (GET_CODE (op0) == SUBREG)
1113 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1114 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1115 GET_MODE (SUBREG_REG (op0)));
1118 else if (REG_P (op0))
1120 word = operand_subword_force (op0, offset, GET_MODE (op0));
1126 /* OFFSET is in UNITs, and UNIT is in bits.
1127 store_fixed_bit_field wants offset in bytes. */
1128 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT, thissize,
1130 bitsdone += thissize;
1134 /* A subroutine of extract_bit_field_1 that converts return value X
1135 to either MODE or TMODE. MODE, TMODE and UNSIGNEDP are arguments
1136 to extract_bit_field. */
1139 convert_extracted_bit_field (rtx x, enum machine_mode mode,
1140 enum machine_mode tmode, bool unsignedp)
1142 if (GET_MODE (x) == tmode || GET_MODE (x) == mode)
1145 /* If the x mode is not a scalar integral, first convert to the
1146 integer mode of that size and then access it as a floating-point
1147 value via a SUBREG. */
1148 if (!SCALAR_INT_MODE_P (tmode))
1150 enum machine_mode smode;
1152 smode = mode_for_size (GET_MODE_BITSIZE (tmode), MODE_INT, 0);
1153 x = convert_to_mode (smode, x, unsignedp);
1154 x = force_reg (smode, x);
1155 return gen_lowpart (tmode, x);
1158 return convert_to_mode (tmode, x, unsignedp);
1161 /* A subroutine of extract_bit_field, with the same arguments.
1162 If FALLBACK_P is true, fall back to extract_fixed_bit_field
1163 if we can find no other means of implementing the operation.
1164 if FALLBACK_P is false, return NULL instead. */
1167 extract_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1168 unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target,
1169 enum machine_mode mode, enum machine_mode tmode,
1173 = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD;
1174 unsigned HOST_WIDE_INT offset, bitpos;
1176 enum machine_mode int_mode;
1177 enum machine_mode ext_mode;
1178 enum machine_mode mode1;
1179 enum insn_code icode;
1182 if (tmode == VOIDmode)
1185 while (GET_CODE (op0) == SUBREG)
1187 bitnum += SUBREG_BYTE (op0) * BITS_PER_UNIT;
1188 op0 = SUBREG_REG (op0);
1191 /* If we have an out-of-bounds access to a register, just return an
1192 uninitialized register of the required mode. This can occur if the
1193 source code contains an out-of-bounds access to a small array. */
1194 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
1195 return gen_reg_rtx (tmode);
1198 && mode == GET_MODE (op0)
1200 && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
1202 /* We're trying to extract a full register from itself. */
1206 /* See if we can get a better vector mode before extracting. */
1207 if (VECTOR_MODE_P (GET_MODE (op0))
1209 && GET_MODE_INNER (GET_MODE (op0)) != tmode)
1211 enum machine_mode new_mode;
1212 int nunits = GET_MODE_NUNITS (GET_MODE (op0));
1214 if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
1215 new_mode = MIN_MODE_VECTOR_FLOAT;
1216 else if (GET_MODE_CLASS (tmode) == MODE_FRACT)
1217 new_mode = MIN_MODE_VECTOR_FRACT;
1218 else if (GET_MODE_CLASS (tmode) == MODE_UFRACT)
1219 new_mode = MIN_MODE_VECTOR_UFRACT;
1220 else if (GET_MODE_CLASS (tmode) == MODE_ACCUM)
1221 new_mode = MIN_MODE_VECTOR_ACCUM;
1222 else if (GET_MODE_CLASS (tmode) == MODE_UACCUM)
1223 new_mode = MIN_MODE_VECTOR_UACCUM;
1225 new_mode = MIN_MODE_VECTOR_INT;
1227 for (; new_mode != VOIDmode ; new_mode = GET_MODE_WIDER_MODE (new_mode))
1228 if (GET_MODE_NUNITS (new_mode) == nunits
1229 && GET_MODE_SIZE (new_mode) == GET_MODE_SIZE (GET_MODE (op0))
1230 && targetm.vector_mode_supported_p (new_mode))
1232 if (new_mode != VOIDmode)
1233 op0 = gen_lowpart (new_mode, op0);
1236 /* Use vec_extract patterns for extracting parts of vectors whenever
1238 if (VECTOR_MODE_P (GET_MODE (op0))
1240 && (optab_handler (vec_extract_optab, GET_MODE (op0))->insn_code
1241 != CODE_FOR_nothing)
1242 && ((bitnum + bitsize - 1) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
1243 == bitnum / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
1245 enum machine_mode outermode = GET_MODE (op0);
1246 enum machine_mode innermode = GET_MODE_INNER (outermode);
1247 int icode = (int) optab_handler (vec_extract_optab, outermode)->insn_code;
1248 unsigned HOST_WIDE_INT pos = bitnum / GET_MODE_BITSIZE (innermode);
1249 rtx rtxpos = GEN_INT (pos);
1251 rtx dest = NULL, pat, seq;
1252 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
1253 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
1254 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
1256 if (innermode == tmode || innermode == mode)
1260 dest = gen_reg_rtx (innermode);
1264 if (! (*insn_data[icode].operand[0].predicate) (dest, mode0))
1265 dest = copy_to_mode_reg (mode0, dest);
1267 if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
1268 src = copy_to_mode_reg (mode1, src);
1270 if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
1271 rtxpos = copy_to_mode_reg (mode1, rtxpos);
1273 /* We could handle this, but we should always be called with a pseudo
1274 for our targets and all insns should take them as outputs. */
1275 gcc_assert ((*insn_data[icode].operand[0].predicate) (dest, mode0)
1276 && (*insn_data[icode].operand[1].predicate) (src, mode1)
1277 && (*insn_data[icode].operand[2].predicate) (rtxpos, mode2));
1279 pat = GEN_FCN (icode) (dest, src, rtxpos);
1287 return gen_lowpart (tmode, dest);
1292 /* Make sure we are playing with integral modes. Pun with subregs
1295 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1296 if (imode != GET_MODE (op0))
1299 op0 = adjust_address (op0, imode, 0);
1300 else if (imode != BLKmode)
1302 op0 = gen_lowpart (imode, op0);
1304 /* If we got a SUBREG, force it into a register since we
1305 aren't going to be able to do another SUBREG on it. */
1306 if (GET_CODE (op0) == SUBREG)
1307 op0 = force_reg (imode, op0);
1309 else if (REG_P (op0))
1312 imode = smallest_mode_for_size (GET_MODE_BITSIZE (GET_MODE (op0)),
1314 reg = gen_reg_rtx (imode);
1315 subreg = gen_lowpart_SUBREG (GET_MODE (op0), reg);
1316 emit_move_insn (subreg, op0);
1318 bitnum += SUBREG_BYTE (subreg) * BITS_PER_UNIT;
1322 rtx mem = assign_stack_temp (GET_MODE (op0),
1323 GET_MODE_SIZE (GET_MODE (op0)), 0);
1324 emit_move_insn (mem, op0);
1325 op0 = adjust_address (mem, BLKmode, 0);
1330 /* We may be accessing data outside the field, which means
1331 we can alias adjacent data. */
1334 op0 = shallow_copy_rtx (op0);
1335 set_mem_alias_set (op0, 0);
1336 set_mem_expr (op0, 0);
1339 /* Extraction of a full-word or multi-word value from a structure
1340 in a register or aligned memory can be done with just a SUBREG.
1341 A subword value in the least significant part of a register
1342 can also be extracted with a SUBREG. For this, we need the
1343 byte offset of the value in op0. */
1345 bitpos = bitnum % unit;
1346 offset = bitnum / unit;
1347 byte_offset = bitpos / BITS_PER_UNIT + offset * UNITS_PER_WORD;
1349 /* If OP0 is a register, BITPOS must count within a word.
1350 But as we have it, it counts within whatever size OP0 now has.
1351 On a bigendian machine, these are not the same, so convert. */
1352 if (BYTES_BIG_ENDIAN
1354 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1355 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1357 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1358 If that's wrong, the solution is to test for it and set TARGET to 0
1361 /* Only scalar integer modes can be converted via subregs. There is an
1362 additional problem for FP modes here in that they can have a precision
1363 which is different from the size. mode_for_size uses precision, but
1364 we want a mode based on the size, so we must avoid calling it for FP
1366 mode1 = (SCALAR_INT_MODE_P (tmode)
1367 ? mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0)
1370 if (((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1371 && bitpos % BITS_PER_WORD == 0)
1372 || (mode1 != BLKmode
1373 /* ??? The big endian test here is wrong. This is correct
1374 if the value is in a register, and if mode_for_size is not
1375 the same mode as op0. This causes us to get unnecessarily
1376 inefficient code from the Thumb port when -mbig-endian. */
1377 && (BYTES_BIG_ENDIAN
1378 ? bitpos + bitsize == BITS_PER_WORD
1381 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode1),
1382 GET_MODE_BITSIZE (GET_MODE (op0)))
1383 && GET_MODE_SIZE (mode1) != 0
1384 && byte_offset % GET_MODE_SIZE (mode1) == 0)
1386 && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
1387 || (offset * BITS_PER_UNIT % bitsize == 0
1388 && MEM_ALIGN (op0) % bitsize == 0)))))
1391 op0 = adjust_address (op0, mode1, offset);
1392 else if (mode1 != GET_MODE (op0))
1394 rtx sub = simplify_gen_subreg (mode1, op0, GET_MODE (op0),
1397 goto no_subreg_mode_swap;
1401 return convert_to_mode (tmode, op0, unsignedp);
1404 no_subreg_mode_swap:
1406 /* Handle fields bigger than a word. */
1408 if (bitsize > BITS_PER_WORD)
1410 /* Here we transfer the words of the field
1411 in the order least significant first.
1412 This is because the most significant word is the one which may
1413 be less than full. */
1415 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1418 if (target == 0 || !REG_P (target))
1419 target = gen_reg_rtx (mode);
1421 /* Indicate for flow that the entire target reg is being set. */
1422 emit_clobber (target);
1424 for (i = 0; i < nwords; i++)
1426 /* If I is 0, use the low-order word in both field and target;
1427 if I is 1, use the next to lowest word; and so on. */
1428 /* Word number in TARGET to use. */
1429 unsigned int wordnum
1431 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1433 /* Offset from start of field in OP0. */
1434 unsigned int bit_offset = (WORDS_BIG_ENDIAN
1435 ? MAX (0, ((int) bitsize - ((int) i + 1)
1436 * (int) BITS_PER_WORD))
1437 : (int) i * BITS_PER_WORD);
1438 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1440 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1441 bitsize - i * BITS_PER_WORD),
1442 bitnum + bit_offset, 1, target_part, mode,
1445 gcc_assert (target_part);
1447 if (result_part != target_part)
1448 emit_move_insn (target_part, result_part);
1453 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1454 need to be zero'd out. */
1455 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1457 unsigned int i, total_words;
1459 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1460 for (i = nwords; i < total_words; i++)
1462 (operand_subword (target,
1463 WORDS_BIG_ENDIAN ? total_words - i - 1 : i,
1470 /* Signed bit field: sign-extend with two arithmetic shifts. */
1471 target = expand_shift (LSHIFT_EXPR, mode, target,
1472 build_int_cst (NULL_TREE,
1473 GET_MODE_BITSIZE (mode) - bitsize),
1475 return expand_shift (RSHIFT_EXPR, mode, target,
1476 build_int_cst (NULL_TREE,
1477 GET_MODE_BITSIZE (mode) - bitsize),
1481 /* From here on we know the desired field is smaller than a word. */
1483 /* Check if there is a correspondingly-sized integer field, so we can
1484 safely extract it as one size of integer, if necessary; then
1485 truncate or extend to the size that is wanted; then use SUBREGs or
1486 convert_to_mode to get one of the modes we really wanted. */
1488 int_mode = int_mode_for_mode (tmode);
1489 if (int_mode == BLKmode)
1490 int_mode = int_mode_for_mode (mode);
1491 /* Should probably push op0 out to memory and then do a load. */
1492 gcc_assert (int_mode != BLKmode);
1494 /* OFFSET is the number of words or bytes (UNIT says which)
1495 from STR_RTX to the first word or byte containing part of the field. */
1499 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1502 op0 = copy_to_reg (op0);
1503 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1504 op0, (offset * UNITS_PER_WORD));
1509 /* Now OFFSET is nonzero only for memory operands. */
1510 ext_mode = mode_for_extraction (unsignedp ? EP_extzv : EP_extv, 0);
1511 icode = unsignedp ? CODE_FOR_extzv : CODE_FOR_extv;
1512 if (ext_mode != MAX_MACHINE_MODE
1514 && GET_MODE_BITSIZE (ext_mode) >= bitsize
1515 /* If op0 is a register, we need it in EXT_MODE to make it
1516 acceptable to the format of ext(z)v. */
1517 && !(GET_CODE (op0) == SUBREG && GET_MODE (op0) != ext_mode)
1518 && !((REG_P (op0) || GET_CODE (op0) == SUBREG)
1519 && (bitsize + bitpos > GET_MODE_BITSIZE (ext_mode)))
1520 && check_predicate_volatile_ok (icode, 1, op0, GET_MODE (op0)))
1522 unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset;
1523 rtx bitsize_rtx, bitpos_rtx;
1524 rtx last = get_last_insn ();
1526 rtx xtarget = target;
1527 rtx xspec_target = target;
1528 rtx xspec_target_subreg = 0;
1531 /* If op0 is a register, we need it in EXT_MODE to make it
1532 acceptable to the format of ext(z)v. */
1533 if (REG_P (xop0) && GET_MODE (xop0) != ext_mode)
1534 xop0 = gen_rtx_SUBREG (ext_mode, xop0, 0);
1536 /* Get ref to first byte containing part of the field. */
1537 xop0 = adjust_address (xop0, byte_mode, xoffset);
1539 /* On big-endian machines, we count bits from the most significant.
1540 If the bit field insn does not, we must invert. */
1541 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1542 xbitpos = unit - bitsize - xbitpos;
1544 /* Now convert from counting within UNIT to counting in EXT_MODE. */
1545 if (BITS_BIG_ENDIAN && !MEM_P (xop0))
1546 xbitpos += GET_MODE_BITSIZE (ext_mode) - unit;
1548 unit = GET_MODE_BITSIZE (ext_mode);
1551 xtarget = xspec_target = gen_reg_rtx (tmode);
1553 if (GET_MODE (xtarget) != ext_mode)
1555 /* Don't use LHS paradoxical subreg if explicit truncation is needed
1556 between the mode of the extraction (word_mode) and the target
1557 mode. Instead, create a temporary and use convert_move to set
1560 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (GET_MODE (xtarget)),
1561 GET_MODE_BITSIZE (ext_mode)))
1563 xtarget = gen_lowpart (ext_mode, xtarget);
1564 if (GET_MODE_SIZE (ext_mode)
1565 > GET_MODE_SIZE (GET_MODE (xspec_target)))
1566 xspec_target_subreg = xtarget;
1569 xtarget = gen_reg_rtx (ext_mode);
1572 /* If this machine's ext(z)v insists on a register target,
1573 make sure we have one. */
1574 if (!insn_data[(int) icode].operand[0].predicate (xtarget, ext_mode))
1575 xtarget = gen_reg_rtx (ext_mode);
1577 bitsize_rtx = GEN_INT (bitsize);
1578 bitpos_rtx = GEN_INT (xbitpos);
1581 ? gen_extzv (xtarget, xop0, bitsize_rtx, bitpos_rtx)
1582 : gen_extv (xtarget, xop0, bitsize_rtx, bitpos_rtx));
1586 if (xtarget == xspec_target)
1588 if (xtarget == xspec_target_subreg)
1589 return xspec_target;
1590 return convert_extracted_bit_field (xtarget, mode, tmode, unsignedp);
1592 delete_insns_since (last);
1595 /* If OP0 is a memory, try copying it to a register and seeing if a
1596 cheap register alternative is available. */
1597 if (ext_mode != MAX_MACHINE_MODE && MEM_P (op0))
1599 enum machine_mode bestmode;
1601 /* Get the mode to use for inserting into this field. If
1602 OP0 is BLKmode, get the smallest mode consistent with the
1603 alignment. If OP0 is a non-BLKmode object that is no
1604 wider than EXT_MODE, use its mode. Otherwise, use the
1605 smallest mode containing the field. */
1607 if (GET_MODE (op0) == BLKmode
1608 || (ext_mode != MAX_MACHINE_MODE
1609 && GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (ext_mode)))
1610 bestmode = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0),
1611 (ext_mode == MAX_MACHINE_MODE
1612 ? VOIDmode : ext_mode),
1613 MEM_VOLATILE_P (op0));
1615 bestmode = GET_MODE (op0);
1617 if (bestmode != VOIDmode
1618 && !(SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
1619 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
1621 unsigned HOST_WIDE_INT xoffset, xbitpos;
1623 /* Compute the offset as a multiple of this unit,
1624 counting in bytes. */
1625 unit = GET_MODE_BITSIZE (bestmode);
1626 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1627 xbitpos = bitnum % unit;
1629 /* Make sure the register is big enough for the whole field. */
1630 if (xoffset * BITS_PER_UNIT + unit
1631 >= offset * BITS_PER_UNIT + bitsize)
1633 rtx last, result, xop0;
1635 last = get_last_insn ();
1637 /* Fetch it to a register in that size. */
1638 xop0 = adjust_address (op0, bestmode, xoffset);
1639 xop0 = force_reg (bestmode, xop0);
1640 result = extract_bit_field_1 (xop0, bitsize, xbitpos,
1642 mode, tmode, false);
1646 delete_insns_since (last);
1654 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1655 bitpos, target, unsignedp);
1656 return convert_extracted_bit_field (target, mode, tmode, unsignedp);
1659 /* Generate code to extract a byte-field from STR_RTX
1660 containing BITSIZE bits, starting at BITNUM,
1661 and put it in TARGET if possible (if TARGET is nonzero).
1662 Regardless of TARGET, we return the rtx for where the value is placed.
1664 STR_RTX is the structure containing the byte (a REG or MEM).
1665 UNSIGNEDP is nonzero if this is an unsigned bit field.
1666 MODE is the natural mode of the field value once extracted.
1667 TMODE is the mode the caller would like the value to have;
1668 but the value may be returned with type MODE instead.
1670 If a TARGET is specified and we can store in it at no extra cost,
1671 we do so, and return TARGET.
1672 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1673 if they are equally easy. */
1676 extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1677 unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target,
1678 enum machine_mode mode, enum machine_mode tmode)
1680 return extract_bit_field_1 (str_rtx, bitsize, bitnum, unsignedp,
1681 target, mode, tmode, true);
1684 /* Extract a bit field using shifts and boolean operations
1685 Returns an rtx to represent the value.
1686 OP0 addresses a register (word) or memory (byte).
1687 BITPOS says which bit within the word or byte the bit field starts in.
1688 OFFSET says how many bytes farther the bit field starts;
1689 it is 0 if OP0 is a register.
1690 BITSIZE says how many bits long the bit field is.
1691 (If OP0 is a register, it may be narrower than a full word,
1692 but BITPOS still counts within a full word,
1693 which is significant on bigendian machines.)
1695 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1696 If TARGET is nonzero, attempts to store the value there
1697 and return TARGET, but this is not guaranteed.
1698 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1701 extract_fixed_bit_field (enum machine_mode tmode, rtx op0,
1702 unsigned HOST_WIDE_INT offset,
1703 unsigned HOST_WIDE_INT bitsize,
1704 unsigned HOST_WIDE_INT bitpos, rtx target,
1707 unsigned int total_bits = BITS_PER_WORD;
1708 enum machine_mode mode;
1710 if (GET_CODE (op0) == SUBREG || REG_P (op0))
1712 /* Special treatment for a bit field split across two registers. */
1713 if (bitsize + bitpos > BITS_PER_WORD)
1714 return extract_split_bit_field (op0, bitsize, bitpos, unsignedp);
1718 /* Get the proper mode to use for this field. We want a mode that
1719 includes the entire field. If such a mode would be larger than
1720 a word, we won't be doing the extraction the normal way. */
1722 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1723 MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
1725 if (mode == VOIDmode)
1726 /* The only way this should occur is if the field spans word
1728 return extract_split_bit_field (op0, bitsize,
1729 bitpos + offset * BITS_PER_UNIT,
1732 total_bits = GET_MODE_BITSIZE (mode);
1734 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1735 be in the range 0 to total_bits-1, and put any excess bytes in
1737 if (bitpos >= total_bits)
1739 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1740 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1744 /* Get ref to an aligned byte, halfword, or word containing the field.
1745 Adjust BITPOS to be position within a word,
1746 and OFFSET to be the offset of that word.
1747 Then alter OP0 to refer to that word. */
1748 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1749 offset -= (offset % (total_bits / BITS_PER_UNIT));
1750 op0 = adjust_address (op0, mode, offset);
1753 mode = GET_MODE (op0);
1755 if (BYTES_BIG_ENDIAN)
1756 /* BITPOS is the distance between our msb and that of OP0.
1757 Convert it to the distance from the lsb. */
1758 bitpos = total_bits - bitsize - bitpos;
1760 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1761 We have reduced the big-endian case to the little-endian case. */
1767 /* If the field does not already start at the lsb,
1768 shift it so it does. */
1769 tree amount = build_int_cst (NULL_TREE, bitpos);
1770 /* Maybe propagate the target for the shift. */
1771 /* But not if we will return it--could confuse integrate.c. */
1772 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1773 if (tmode != mode) subtarget = 0;
1774 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1776 /* Convert the value to the desired mode. */
1778 op0 = convert_to_mode (tmode, op0, 1);
1780 /* Unless the msb of the field used to be the msb when we shifted,
1781 mask out the upper bits. */
1783 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize)
1784 return expand_binop (GET_MODE (op0), and_optab, op0,
1785 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1786 target, 1, OPTAB_LIB_WIDEN);
1790 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1791 then arithmetic-shift its lsb to the lsb of the word. */
1792 op0 = force_reg (mode, op0);
1796 /* Find the narrowest integer mode that contains the field. */
1798 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1799 mode = GET_MODE_WIDER_MODE (mode))
1800 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1802 op0 = convert_to_mode (mode, op0, 0);
1806 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1809 = build_int_cst (NULL_TREE,
1810 GET_MODE_BITSIZE (mode) - (bitsize + bitpos));
1811 /* Maybe propagate the target for the shift. */
1812 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1813 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1816 return expand_shift (RSHIFT_EXPR, mode, op0,
1817 build_int_cst (NULL_TREE,
1818 GET_MODE_BITSIZE (mode) - bitsize),
1822 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1823 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1824 complement of that if COMPLEMENT. The mask is truncated if
1825 necessary to the width of mode MODE. The mask is zero-extended if
1826 BITSIZE+BITPOS is too small for MODE. */
1829 mask_rtx (enum machine_mode mode, int bitpos, int bitsize, int complement)
1831 HOST_WIDE_INT masklow, maskhigh;
1835 else if (bitpos < HOST_BITS_PER_WIDE_INT)
1836 masklow = (HOST_WIDE_INT) -1 << bitpos;
1840 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1841 masklow &= ((unsigned HOST_WIDE_INT) -1
1842 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1844 if (bitpos <= HOST_BITS_PER_WIDE_INT)
1847 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1851 else if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1852 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1853 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1859 maskhigh = ~maskhigh;
1863 return immed_double_const (masklow, maskhigh, mode);
1866 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1867 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1870 lshift_value (enum machine_mode mode, rtx value, int bitpos, int bitsize)
1872 unsigned HOST_WIDE_INT v = INTVAL (value);
1873 HOST_WIDE_INT low, high;
1875 if (bitsize < HOST_BITS_PER_WIDE_INT)
1876 v &= ~((HOST_WIDE_INT) -1 << bitsize);
1878 if (bitpos < HOST_BITS_PER_WIDE_INT)
1881 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1886 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1889 return immed_double_const (low, high, mode);
1892 /* Extract a bit field that is split across two words
1893 and return an RTX for the result.
1895 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1896 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1897 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
1900 extract_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1901 unsigned HOST_WIDE_INT bitpos, int unsignedp)
1904 unsigned int bitsdone = 0;
1905 rtx result = NULL_RTX;
1908 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1910 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
1911 unit = BITS_PER_WORD;
1913 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1915 while (bitsdone < bitsize)
1917 unsigned HOST_WIDE_INT thissize;
1919 unsigned HOST_WIDE_INT thispos;
1920 unsigned HOST_WIDE_INT offset;
1922 offset = (bitpos + bitsdone) / unit;
1923 thispos = (bitpos + bitsdone) % unit;
1925 /* THISSIZE must not overrun a word boundary. Otherwise,
1926 extract_fixed_bit_field will call us again, and we will mutually
1928 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1929 thissize = MIN (thissize, unit - thispos);
1931 /* If OP0 is a register, then handle OFFSET here.
1933 When handling multiword bitfields, extract_bit_field may pass
1934 down a word_mode SUBREG of a larger REG for a bitfield that actually
1935 crosses a word boundary. Thus, for a SUBREG, we must find
1936 the current word starting from the base register. */
1937 if (GET_CODE (op0) == SUBREG)
1939 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1940 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1941 GET_MODE (SUBREG_REG (op0)));
1944 else if (REG_P (op0))
1946 word = operand_subword_force (op0, offset, GET_MODE (op0));
1952 /* Extract the parts in bit-counting order,
1953 whose meaning is determined by BYTES_PER_UNIT.
1954 OFFSET is in UNITs, and UNIT is in bits.
1955 extract_fixed_bit_field wants offset in bytes. */
1956 part = extract_fixed_bit_field (word_mode, word,
1957 offset * unit / BITS_PER_UNIT,
1958 thissize, thispos, 0, 1);
1959 bitsdone += thissize;
1961 /* Shift this part into place for the result. */
1962 if (BYTES_BIG_ENDIAN)
1964 if (bitsize != bitsdone)
1965 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1966 build_int_cst (NULL_TREE, bitsize - bitsdone),
1971 if (bitsdone != thissize)
1972 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1973 build_int_cst (NULL_TREE,
1974 bitsdone - thissize), 0, 1);
1980 /* Combine the parts with bitwise or. This works
1981 because we extracted each part as an unsigned bit field. */
1982 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
1988 /* Unsigned bit field: we are done. */
1991 /* Signed bit field: sign-extend with two arithmetic shifts. */
1992 result = expand_shift (LSHIFT_EXPR, word_mode, result,
1993 build_int_cst (NULL_TREE, BITS_PER_WORD - bitsize),
1995 return expand_shift (RSHIFT_EXPR, word_mode, result,
1996 build_int_cst (NULL_TREE, BITS_PER_WORD - bitsize),
2000 /* Try to read the low bits of SRC as an rvalue of mode MODE, preserving
2001 the bit pattern. SRC_MODE is the mode of SRC; if this is smaller than
2002 MODE, fill the upper bits with zeros. Fail if the layout of either
2003 mode is unknown (as for CC modes) or if the extraction would involve
2004 unprofitable mode punning. Return the value on success, otherwise
2007 This is different from gen_lowpart* in these respects:
2009 - the returned value must always be considered an rvalue
2011 - when MODE is wider than SRC_MODE, the extraction involves
2014 - when MODE is smaller than SRC_MODE, the extraction involves
2015 a truncation (and is thus subject to TRULY_NOOP_TRUNCATION).
2017 In other words, this routine performs a computation, whereas the
2018 gen_lowpart* routines are conceptually lvalue or rvalue subreg
2022 extract_low_bits (enum machine_mode mode, enum machine_mode src_mode, rtx src)
2024 enum machine_mode int_mode, src_int_mode;
2026 if (mode == src_mode)
2029 if (CONSTANT_P (src))
2031 /* simplify_gen_subreg can't be used here, as if simplify_subreg
2032 fails, it will happily create (subreg (symbol_ref)) or similar
2034 unsigned int byte = subreg_lowpart_offset (mode, src_mode);
2035 rtx ret = simplify_subreg (mode, src, src_mode, byte);
2039 if (GET_MODE (src) == VOIDmode
2040 || !validate_subreg (mode, src_mode, src, byte))
2043 src = force_reg (GET_MODE (src), src);
2044 return gen_rtx_SUBREG (mode, src, byte);
2047 if (GET_MODE_CLASS (mode) == MODE_CC || GET_MODE_CLASS (src_mode) == MODE_CC)
2050 if (GET_MODE_BITSIZE (mode) == GET_MODE_BITSIZE (src_mode)
2051 && MODES_TIEABLE_P (mode, src_mode))
2053 rtx x = gen_lowpart_common (mode, src);
2058 src_int_mode = int_mode_for_mode (src_mode);
2059 int_mode = int_mode_for_mode (mode);
2060 if (src_int_mode == BLKmode || int_mode == BLKmode)
2063 if (!MODES_TIEABLE_P (src_int_mode, src_mode))
2065 if (!MODES_TIEABLE_P (int_mode, mode))
2068 src = gen_lowpart (src_int_mode, src);
2069 src = convert_modes (int_mode, src_int_mode, src, true);
2070 src = gen_lowpart (mode, src);
2074 /* Add INC into TARGET. */
2077 expand_inc (rtx target, rtx inc)
2079 rtx value = expand_binop (GET_MODE (target), add_optab,
2081 target, 0, OPTAB_LIB_WIDEN);
2082 if (value != target)
2083 emit_move_insn (target, value);
2086 /* Subtract DEC from TARGET. */
2089 expand_dec (rtx target, rtx dec)
2091 rtx value = expand_binop (GET_MODE (target), sub_optab,
2093 target, 0, OPTAB_LIB_WIDEN);
2094 if (value != target)
2095 emit_move_insn (target, value);
2098 /* Output a shift instruction for expression code CODE,
2099 with SHIFTED being the rtx for the value to shift,
2100 and AMOUNT the tree for the amount to shift by.
2101 Store the result in the rtx TARGET, if that is convenient.
2102 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2103 Return the rtx for where the value is. */
2106 expand_shift (enum tree_code code, enum machine_mode mode, rtx shifted,
2107 tree amount, rtx target, int unsignedp)
2110 int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
2111 int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
2112 optab lshift_optab = ashl_optab;
2113 optab rshift_arith_optab = ashr_optab;
2114 optab rshift_uns_optab = lshr_optab;
2115 optab lrotate_optab = rotl_optab;
2116 optab rrotate_optab = rotr_optab;
2117 enum machine_mode op1_mode;
2119 bool speed = optimize_insn_for_speed_p ();
2121 op1 = expand_normal (amount);
2122 op1_mode = GET_MODE (op1);
2124 /* Determine whether the shift/rotate amount is a vector, or scalar. If the
2125 shift amount is a vector, use the vector/vector shift patterns. */
2126 if (VECTOR_MODE_P (mode) && VECTOR_MODE_P (op1_mode))
2128 lshift_optab = vashl_optab;
2129 rshift_arith_optab = vashr_optab;
2130 rshift_uns_optab = vlshr_optab;
2131 lrotate_optab = vrotl_optab;
2132 rrotate_optab = vrotr_optab;
2135 /* Previously detected shift-counts computed by NEGATE_EXPR
2136 and shifted in the other direction; but that does not work
2139 if (SHIFT_COUNT_TRUNCATED)
2141 if (GET_CODE (op1) == CONST_INT
2142 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
2143 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
2144 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
2145 % GET_MODE_BITSIZE (mode));
2146 else if (GET_CODE (op1) == SUBREG
2147 && subreg_lowpart_p (op1)
2148 && INTEGRAL_MODE_P (GET_MODE (SUBREG_REG (op1))))
2149 op1 = SUBREG_REG (op1);
2152 if (op1 == const0_rtx)
2155 /* Check whether its cheaper to implement a left shift by a constant
2156 bit count by a sequence of additions. */
2157 if (code == LSHIFT_EXPR
2158 && GET_CODE (op1) == CONST_INT
2160 && INTVAL (op1) < GET_MODE_BITSIZE (mode)
2161 && INTVAL (op1) < MAX_BITS_PER_WORD
2162 && shift_cost[speed][mode][INTVAL (op1)] > INTVAL (op1) * add_cost[speed][mode]
2163 && shift_cost[speed][mode][INTVAL (op1)] != MAX_COST)
2166 for (i = 0; i < INTVAL (op1); i++)
2168 temp = force_reg (mode, shifted);
2169 shifted = expand_binop (mode, add_optab, temp, temp, NULL_RTX,
2170 unsignedp, OPTAB_LIB_WIDEN);
2175 for (attempt = 0; temp == 0 && attempt < 3; attempt++)
2177 enum optab_methods methods;
2180 methods = OPTAB_DIRECT;
2181 else if (attempt == 1)
2182 methods = OPTAB_WIDEN;
2184 methods = OPTAB_LIB_WIDEN;
2188 /* Widening does not work for rotation. */
2189 if (methods == OPTAB_WIDEN)
2191 else if (methods == OPTAB_LIB_WIDEN)
2193 /* If we have been unable to open-code this by a rotation,
2194 do it as the IOR of two shifts. I.e., to rotate A
2195 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
2196 where C is the bitsize of A.
2198 It is theoretically possible that the target machine might
2199 not be able to perform either shift and hence we would
2200 be making two libcalls rather than just the one for the
2201 shift (similarly if IOR could not be done). We will allow
2202 this extremely unlikely lossage to avoid complicating the
2205 rtx subtarget = target == shifted ? 0 : target;
2206 tree new_amount, other_amount;
2208 tree type = TREE_TYPE (amount);
2209 if (GET_MODE (op1) != TYPE_MODE (type)
2210 && GET_MODE (op1) != VOIDmode)
2211 op1 = convert_to_mode (TYPE_MODE (type), op1, 1);
2212 new_amount = make_tree (type, op1);
2214 = fold_build2 (MINUS_EXPR, type,
2215 build_int_cst (type, GET_MODE_BITSIZE (mode)),
2218 shifted = force_reg (mode, shifted);
2220 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
2221 mode, shifted, new_amount, 0, 1);
2222 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
2223 mode, shifted, other_amount, subtarget, 1);
2224 return expand_binop (mode, ior_optab, temp, temp1, target,
2225 unsignedp, methods);
2228 temp = expand_binop (mode,
2229 left ? lrotate_optab : rrotate_optab,
2230 shifted, op1, target, unsignedp, methods);
2233 temp = expand_binop (mode,
2234 left ? lshift_optab : rshift_uns_optab,
2235 shifted, op1, target, unsignedp, methods);
2237 /* Do arithmetic shifts.
2238 Also, if we are going to widen the operand, we can just as well
2239 use an arithmetic right-shift instead of a logical one. */
2240 if (temp == 0 && ! rotate
2241 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2243 enum optab_methods methods1 = methods;
2245 /* If trying to widen a log shift to an arithmetic shift,
2246 don't accept an arithmetic shift of the same size. */
2248 methods1 = OPTAB_MUST_WIDEN;
2250 /* Arithmetic shift */
2252 temp = expand_binop (mode,
2253 left ? lshift_optab : rshift_arith_optab,
2254 shifted, op1, target, unsignedp, methods1);
2257 /* We used to try extzv here for logical right shifts, but that was
2258 only useful for one machine, the VAX, and caused poor code
2259 generation there for lshrdi3, so the code was deleted and a
2260 define_expand for lshrsi3 was added to vax.md. */
2280 /* This structure holds the "cost" of a multiply sequence. The
2281 "cost" field holds the total rtx_cost of every operator in the
2282 synthetic multiplication sequence, hence cost(a op b) is defined
2283 as rtx_cost(op) + cost(a) + cost(b), where cost(leaf) is zero.
2284 The "latency" field holds the minimum possible latency of the
2285 synthetic multiply, on a hypothetical infinitely parallel CPU.
2286 This is the critical path, or the maximum height, of the expression
2287 tree which is the sum of rtx_costs on the most expensive path from
2288 any leaf to the root. Hence latency(a op b) is defined as zero for
2289 leaves and rtx_cost(op) + max(latency(a), latency(b)) otherwise. */
2292 short cost; /* Total rtx_cost of the multiplication sequence. */
2293 short latency; /* The latency of the multiplication sequence. */
2296 /* This macro is used to compare a pointer to a mult_cost against an
2297 single integer "rtx_cost" value. This is equivalent to the macro
2298 CHEAPER_MULT_COST(X,Z) where Z = {Y,Y}. */
2299 #define MULT_COST_LESS(X,Y) ((X)->cost < (Y) \
2300 || ((X)->cost == (Y) && (X)->latency < (Y)))
2302 /* This macro is used to compare two pointers to mult_costs against
2303 each other. The macro returns true if X is cheaper than Y.
2304 Currently, the cheaper of two mult_costs is the one with the
2305 lower "cost". If "cost"s are tied, the lower latency is cheaper. */
2306 #define CHEAPER_MULT_COST(X,Y) ((X)->cost < (Y)->cost \
2307 || ((X)->cost == (Y)->cost \
2308 && (X)->latency < (Y)->latency))
2310 /* This structure records a sequence of operations.
2311 `ops' is the number of operations recorded.
2312 `cost' is their total cost.
2313 The operations are stored in `op' and the corresponding
2314 logarithms of the integer coefficients in `log'.
2316 These are the operations:
2317 alg_zero total := 0;
2318 alg_m total := multiplicand;
2319 alg_shift total := total * coeff
2320 alg_add_t_m2 total := total + multiplicand * coeff;
2321 alg_sub_t_m2 total := total - multiplicand * coeff;
2322 alg_add_factor total := total * coeff + total;
2323 alg_sub_factor total := total * coeff - total;
2324 alg_add_t2_m total := total * coeff + multiplicand;
2325 alg_sub_t2_m total := total * coeff - multiplicand;
2327 The first operand must be either alg_zero or alg_m. */
2331 struct mult_cost cost;
2333 /* The size of the OP and LOG fields are not directly related to the
2334 word size, but the worst-case algorithms will be if we have few
2335 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2336 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2337 in total wordsize operations. */
2338 enum alg_code op[MAX_BITS_PER_WORD];
2339 char log[MAX_BITS_PER_WORD];
2342 /* The entry for our multiplication cache/hash table. */
2343 struct alg_hash_entry {
2344 /* The number we are multiplying by. */
2345 unsigned HOST_WIDE_INT t;
2347 /* The mode in which we are multiplying something by T. */
2348 enum machine_mode mode;
2350 /* The best multiplication algorithm for t. */
2353 /* The cost of multiplication if ALG_CODE is not alg_impossible.
2354 Otherwise, the cost within which multiplication by T is
2356 struct mult_cost cost;
2358 /* OPtimized for speed? */
2362 /* The number of cache/hash entries. */
2363 #if HOST_BITS_PER_WIDE_INT == 64
2364 #define NUM_ALG_HASH_ENTRIES 1031
2366 #define NUM_ALG_HASH_ENTRIES 307
2369 /* Each entry of ALG_HASH caches alg_code for some integer. This is
2370 actually a hash table. If we have a collision, that the older
2371 entry is kicked out. */
2372 static struct alg_hash_entry alg_hash[NUM_ALG_HASH_ENTRIES];
2374 /* Indicates the type of fixup needed after a constant multiplication.
2375 BASIC_VARIANT means no fixup is needed, NEGATE_VARIANT means that
2376 the result should be negated, and ADD_VARIANT means that the
2377 multiplicand should be added to the result. */
2378 enum mult_variant {basic_variant, negate_variant, add_variant};
2380 static void synth_mult (struct algorithm *, unsigned HOST_WIDE_INT,
2381 const struct mult_cost *, enum machine_mode mode);
2382 static bool choose_mult_variant (enum machine_mode, HOST_WIDE_INT,
2383 struct algorithm *, enum mult_variant *, int);
2384 static rtx expand_mult_const (enum machine_mode, rtx, HOST_WIDE_INT, rtx,
2385 const struct algorithm *, enum mult_variant);
2386 static unsigned HOST_WIDE_INT choose_multiplier (unsigned HOST_WIDE_INT, int,
2387 int, rtx *, int *, int *);
2388 static unsigned HOST_WIDE_INT invert_mod2n (unsigned HOST_WIDE_INT, int);
2389 static rtx extract_high_half (enum machine_mode, rtx);
2390 static rtx expand_mult_highpart (enum machine_mode, rtx, rtx, rtx, int, int);
2391 static rtx expand_mult_highpart_optab (enum machine_mode, rtx, rtx, rtx,
2393 /* Compute and return the best algorithm for multiplying by T.
2394 The algorithm must cost less than cost_limit
2395 If retval.cost >= COST_LIMIT, no algorithm was found and all
2396 other field of the returned struct are undefined.
2397 MODE is the machine mode of the multiplication. */
2400 synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t,
2401 const struct mult_cost *cost_limit, enum machine_mode mode)
2404 struct algorithm *alg_in, *best_alg;
2405 struct mult_cost best_cost;
2406 struct mult_cost new_limit;
2407 int op_cost, op_latency;
2408 unsigned HOST_WIDE_INT orig_t = t;
2409 unsigned HOST_WIDE_INT q;
2410 int maxm = MIN (BITS_PER_WORD, GET_MODE_BITSIZE (mode));
2412 bool cache_hit = false;
2413 enum alg_code cache_alg = alg_zero;
2414 bool speed = optimize_insn_for_speed_p ();
2416 /* Indicate that no algorithm is yet found. If no algorithm
2417 is found, this value will be returned and indicate failure. */
2418 alg_out->cost.cost = cost_limit->cost + 1;
2419 alg_out->cost.latency = cost_limit->latency + 1;
2421 if (cost_limit->cost < 0
2422 || (cost_limit->cost == 0 && cost_limit->latency <= 0))
2425 /* Restrict the bits of "t" to the multiplication's mode. */
2426 t &= GET_MODE_MASK (mode);
2428 /* t == 1 can be done in zero cost. */
2432 alg_out->cost.cost = 0;
2433 alg_out->cost.latency = 0;
2434 alg_out->op[0] = alg_m;
2438 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2442 if (MULT_COST_LESS (cost_limit, zero_cost[speed]))
2447 alg_out->cost.cost = zero_cost[speed];
2448 alg_out->cost.latency = zero_cost[speed];
2449 alg_out->op[0] = alg_zero;
2454 /* We'll be needing a couple extra algorithm structures now. */
2456 alg_in = XALLOCA (struct algorithm);
2457 best_alg = XALLOCA (struct algorithm);
2458 best_cost = *cost_limit;
2460 /* Compute the hash index. */
2461 hash_index = (t ^ (unsigned int) mode ^ (speed * 256)) % NUM_ALG_HASH_ENTRIES;
2463 /* See if we already know what to do for T. */
2464 if (alg_hash[hash_index].t == t
2465 && alg_hash[hash_index].mode == mode
2466 && alg_hash[hash_index].mode == mode
2467 && alg_hash[hash_index].speed == speed
2468 && alg_hash[hash_index].alg != alg_unknown)
2470 cache_alg = alg_hash[hash_index].alg;
2472 if (cache_alg == alg_impossible)
2474 /* The cache tells us that it's impossible to synthesize
2475 multiplication by T within alg_hash[hash_index].cost. */
2476 if (!CHEAPER_MULT_COST (&alg_hash[hash_index].cost, cost_limit))
2477 /* COST_LIMIT is at least as restrictive as the one
2478 recorded in the hash table, in which case we have no
2479 hope of synthesizing a multiplication. Just
2483 /* If we get here, COST_LIMIT is less restrictive than the
2484 one recorded in the hash table, so we may be able to
2485 synthesize a multiplication. Proceed as if we didn't
2486 have the cache entry. */
2490 if (CHEAPER_MULT_COST (cost_limit, &alg_hash[hash_index].cost))
2491 /* The cached algorithm shows that this multiplication
2492 requires more cost than COST_LIMIT. Just return. This
2493 way, we don't clobber this cache entry with
2494 alg_impossible but retain useful information. */
2506 goto do_alg_addsub_t_m2;
2508 case alg_add_factor:
2509 case alg_sub_factor:
2510 goto do_alg_addsub_factor;
2513 goto do_alg_add_t2_m;
2516 goto do_alg_sub_t2_m;
2524 /* If we have a group of zero bits at the low-order part of T, try
2525 multiplying by the remaining bits and then doing a shift. */
2530 m = floor_log2 (t & -t); /* m = number of low zero bits */
2534 /* The function expand_shift will choose between a shift and
2535 a sequence of additions, so the observed cost is given as
2536 MIN (m * add_cost[speed][mode], shift_cost[speed][mode][m]). */
2537 op_cost = m * add_cost[speed][mode];
2538 if (shift_cost[speed][mode][m] < op_cost)
2539 op_cost = shift_cost[speed][mode][m];
2540 new_limit.cost = best_cost.cost - op_cost;
2541 new_limit.latency = best_cost.latency - op_cost;
2542 synth_mult (alg_in, q, &new_limit, mode);
2544 alg_in->cost.cost += op_cost;
2545 alg_in->cost.latency += op_cost;
2546 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2548 struct algorithm *x;
2549 best_cost = alg_in->cost;
2550 x = alg_in, alg_in = best_alg, best_alg = x;
2551 best_alg->log[best_alg->ops] = m;
2552 best_alg->op[best_alg->ops] = alg_shift;
2559 /* If we have an odd number, add or subtract one. */
2562 unsigned HOST_WIDE_INT w;
2565 for (w = 1; (w & t) != 0; w <<= 1)
2567 /* If T was -1, then W will be zero after the loop. This is another
2568 case where T ends with ...111. Handling this with (T + 1) and
2569 subtract 1 produces slightly better code and results in algorithm
2570 selection much faster than treating it like the ...0111 case
2574 /* Reject the case where t is 3.
2575 Thus we prefer addition in that case. */
2578 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2580 op_cost = add_cost[speed][mode];
2581 new_limit.cost = best_cost.cost - op_cost;
2582 new_limit.latency = best_cost.latency - op_cost;
2583 synth_mult (alg_in, t + 1, &new_limit, mode);
2585 alg_in->cost.cost += op_cost;
2586 alg_in->cost.latency += op_cost;
2587 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2589 struct algorithm *x;
2590 best_cost = alg_in->cost;
2591 x = alg_in, alg_in = best_alg, best_alg = x;
2592 best_alg->log[best_alg->ops] = 0;
2593 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2598 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2600 op_cost = add_cost[speed][mode];
2601 new_limit.cost = best_cost.cost - op_cost;
2602 new_limit.latency = best_cost.latency - op_cost;
2603 synth_mult (alg_in, t - 1, &new_limit, mode);
2605 alg_in->cost.cost += op_cost;
2606 alg_in->cost.latency += op_cost;
2607 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2609 struct algorithm *x;
2610 best_cost = alg_in->cost;
2611 x = alg_in, alg_in = best_alg, best_alg = x;
2612 best_alg->log[best_alg->ops] = 0;
2613 best_alg->op[best_alg->ops] = alg_add_t_m2;
2617 /* We may be able to calculate a * -7, a * -15, a * -31, etc
2618 quickly with a - a * n for some appropriate constant n. */
2619 m = exact_log2 (-orig_t + 1);
2620 if (m >= 0 && m < maxm)
2622 op_cost = shiftsub1_cost[speed][mode][m];
2623 new_limit.cost = best_cost.cost - op_cost;
2624 new_limit.latency = best_cost.latency - op_cost;
2625 synth_mult (alg_in, (unsigned HOST_WIDE_INT) (-orig_t + 1) >> m, &new_limit, mode);
2627 alg_in->cost.cost += op_cost;
2628 alg_in->cost.latency += op_cost;
2629 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2631 struct algorithm *x;
2632 best_cost = alg_in->cost;
2633 x = alg_in, alg_in = best_alg, best_alg = x;
2634 best_alg->log[best_alg->ops] = m;
2635 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2643 /* Look for factors of t of the form
2644 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2645 If we find such a factor, we can multiply by t using an algorithm that
2646 multiplies by q, shift the result by m and add/subtract it to itself.
2648 We search for large factors first and loop down, even if large factors
2649 are less probable than small; if we find a large factor we will find a
2650 good sequence quickly, and therefore be able to prune (by decreasing
2651 COST_LIMIT) the search. */
2653 do_alg_addsub_factor:
2654 for (m = floor_log2 (t - 1); m >= 2; m--)
2656 unsigned HOST_WIDE_INT d;
2658 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2659 if (t % d == 0 && t > d && m < maxm
2660 && (!cache_hit || cache_alg == alg_add_factor))
2662 /* If the target has a cheap shift-and-add instruction use
2663 that in preference to a shift insn followed by an add insn.
2664 Assume that the shift-and-add is "atomic" with a latency
2665 equal to its cost, otherwise assume that on superscalar
2666 hardware the shift may be executed concurrently with the
2667 earlier steps in the algorithm. */
2668 op_cost = add_cost[speed][mode] + shift_cost[speed][mode][m];
2669 if (shiftadd_cost[speed][mode][m] < op_cost)
2671 op_cost = shiftadd_cost[speed][mode][m];
2672 op_latency = op_cost;
2675 op_latency = add_cost[speed][mode];
2677 new_limit.cost = best_cost.cost - op_cost;
2678 new_limit.latency = best_cost.latency - op_latency;
2679 synth_mult (alg_in, t / d, &new_limit, mode);
2681 alg_in->cost.cost += op_cost;
2682 alg_in->cost.latency += op_latency;
2683 if (alg_in->cost.latency < op_cost)
2684 alg_in->cost.latency = op_cost;
2685 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2687 struct algorithm *x;
2688 best_cost = alg_in->cost;
2689 x = alg_in, alg_in = best_alg, best_alg = x;
2690 best_alg->log[best_alg->ops] = m;
2691 best_alg->op[best_alg->ops] = alg_add_factor;
2693 /* Other factors will have been taken care of in the recursion. */
2697 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2698 if (t % d == 0 && t > d && m < maxm
2699 && (!cache_hit || cache_alg == alg_sub_factor))
2701 /* If the target has a cheap shift-and-subtract insn use
2702 that in preference to a shift insn followed by a sub insn.
2703 Assume that the shift-and-sub is "atomic" with a latency
2704 equal to it's cost, otherwise assume that on superscalar
2705 hardware the shift may be executed concurrently with the
2706 earlier steps in the algorithm. */
2707 op_cost = add_cost[speed][mode] + shift_cost[speed][mode][m];
2708 if (shiftsub0_cost[speed][mode][m] < op_cost)
2710 op_cost = shiftsub0_cost[speed][mode][m];
2711 op_latency = op_cost;
2714 op_latency = add_cost[speed][mode];
2716 new_limit.cost = best_cost.cost - op_cost;
2717 new_limit.latency = best_cost.latency - op_latency;
2718 synth_mult (alg_in, t / d, &new_limit, mode);
2720 alg_in->cost.cost += op_cost;
2721 alg_in->cost.latency += op_latency;
2722 if (alg_in->cost.latency < op_cost)
2723 alg_in->cost.latency = op_cost;
2724 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2726 struct algorithm *x;
2727 best_cost = alg_in->cost;
2728 x = alg_in, alg_in = best_alg, best_alg = x;
2729 best_alg->log[best_alg->ops] = m;
2730 best_alg->op[best_alg->ops] = alg_sub_factor;
2738 /* Try shift-and-add (load effective address) instructions,
2739 i.e. do a*3, a*5, a*9. */
2746 if (m >= 0 && m < maxm)
2748 op_cost = shiftadd_cost[speed][mode][m];
2749 new_limit.cost = best_cost.cost - op_cost;
2750 new_limit.latency = best_cost.latency - op_cost;
2751 synth_mult (alg_in, (t - 1) >> m, &new_limit, mode);
2753 alg_in->cost.cost += op_cost;
2754 alg_in->cost.latency += op_cost;
2755 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2757 struct algorithm *x;
2758 best_cost = alg_in->cost;
2759 x = alg_in, alg_in = best_alg, best_alg = x;
2760 best_alg->log[best_alg->ops] = m;
2761 best_alg->op[best_alg->ops] = alg_add_t2_m;
2771 if (m >= 0 && m < maxm)
2773 op_cost = shiftsub0_cost[speed][mode][m];
2774 new_limit.cost = best_cost.cost - op_cost;
2775 new_limit.latency = best_cost.latency - op_cost;
2776 synth_mult (alg_in, (t + 1) >> m, &new_limit, mode);
2778 alg_in->cost.cost += op_cost;
2779 alg_in->cost.latency += op_cost;
2780 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2782 struct algorithm *x;
2783 best_cost = alg_in->cost;
2784 x = alg_in, alg_in = best_alg, best_alg = x;
2785 best_alg->log[best_alg->ops] = m;
2786 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2794 /* If best_cost has not decreased, we have not found any algorithm. */
2795 if (!CHEAPER_MULT_COST (&best_cost, cost_limit))
2797 /* We failed to find an algorithm. Record alg_impossible for
2798 this case (that is, <T, MODE, COST_LIMIT>) so that next time
2799 we are asked to find an algorithm for T within the same or
2800 lower COST_LIMIT, we can immediately return to the
2802 alg_hash[hash_index].t = t;
2803 alg_hash[hash_index].mode = mode;
2804 alg_hash[hash_index].speed = speed;
2805 alg_hash[hash_index].alg = alg_impossible;
2806 alg_hash[hash_index].cost = *cost_limit;
2810 /* Cache the result. */
2813 alg_hash[hash_index].t = t;
2814 alg_hash[hash_index].mode = mode;
2815 alg_hash[hash_index].speed = speed;
2816 alg_hash[hash_index].alg = best_alg->op[best_alg->ops];
2817 alg_hash[hash_index].cost.cost = best_cost.cost;
2818 alg_hash[hash_index].cost.latency = best_cost.latency;
2821 /* If we are getting a too long sequence for `struct algorithm'
2822 to record, make this search fail. */
2823 if (best_alg->ops == MAX_BITS_PER_WORD)
2826 /* Copy the algorithm from temporary space to the space at alg_out.
2827 We avoid using structure assignment because the majority of
2828 best_alg is normally undefined, and this is a critical function. */
2829 alg_out->ops = best_alg->ops + 1;
2830 alg_out->cost = best_cost;
2831 memcpy (alg_out->op, best_alg->op,
2832 alg_out->ops * sizeof *alg_out->op);
2833 memcpy (alg_out->log, best_alg->log,
2834 alg_out->ops * sizeof *alg_out->log);
2837 /* Find the cheapest way of multiplying a value of mode MODE by VAL.
2838 Try three variations:
2840 - a shift/add sequence based on VAL itself
2841 - a shift/add sequence based on -VAL, followed by a negation
2842 - a shift/add sequence based on VAL - 1, followed by an addition.
2844 Return true if the cheapest of these cost less than MULT_COST,
2845 describing the algorithm in *ALG and final fixup in *VARIANT. */
2848 choose_mult_variant (enum machine_mode mode, HOST_WIDE_INT val,
2849 struct algorithm *alg, enum mult_variant *variant,
2852 struct algorithm alg2;
2853 struct mult_cost limit;
2855 bool speed = optimize_insn_for_speed_p ();
2857 /* Fail quickly for impossible bounds. */
2861 /* Ensure that mult_cost provides a reasonable upper bound.
2862 Any constant multiplication can be performed with less
2863 than 2 * bits additions. */
2864 op_cost = 2 * GET_MODE_BITSIZE (mode) * add_cost[speed][mode];
2865 if (mult_cost > op_cost)
2866 mult_cost = op_cost;
2868 *variant = basic_variant;
2869 limit.cost = mult_cost;
2870 limit.latency = mult_cost;
2871 synth_mult (alg, val, &limit, mode);
2873 /* This works only if the inverted value actually fits in an
2875 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2877 op_cost = neg_cost[speed][mode];
2878 if (MULT_COST_LESS (&alg->cost, mult_cost))
2880 limit.cost = alg->cost.cost - op_cost;
2881 limit.latency = alg->cost.latency - op_cost;
2885 limit.cost = mult_cost - op_cost;
2886 limit.latency = mult_cost - op_cost;
2889 synth_mult (&alg2, -val, &limit, mode);
2890 alg2.cost.cost += op_cost;
2891 alg2.cost.latency += op_cost;
2892 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2893 *alg = alg2, *variant = negate_variant;
2896 /* This proves very useful for division-by-constant. */
2897 op_cost = add_cost[speed][mode];
2898 if (MULT_COST_LESS (&alg->cost, mult_cost))
2900 limit.cost = alg->cost.cost - op_cost;
2901 limit.latency = alg->cost.latency - op_cost;
2905 limit.cost = mult_cost - op_cost;
2906 limit.latency = mult_cost - op_cost;
2909 synth_mult (&alg2, val - 1, &limit, mode);
2910 alg2.cost.cost += op_cost;
2911 alg2.cost.latency += op_cost;
2912 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2913 *alg = alg2, *variant = add_variant;
2915 return MULT_COST_LESS (&alg->cost, mult_cost);
2918 /* A subroutine of expand_mult, used for constant multiplications.
2919 Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
2920 convenient. Use the shift/add sequence described by ALG and apply
2921 the final fixup specified by VARIANT. */
2924 expand_mult_const (enum machine_mode mode, rtx op0, HOST_WIDE_INT val,
2925 rtx target, const struct algorithm *alg,
2926 enum mult_variant variant)
2928 HOST_WIDE_INT val_so_far;
2929 rtx insn, accum, tem;
2931 enum machine_mode nmode;
2933 /* Avoid referencing memory over and over and invalid sharing
2935 op0 = force_reg (mode, op0);
2937 /* ACCUM starts out either as OP0 or as a zero, depending on
2938 the first operation. */
2940 if (alg->op[0] == alg_zero)
2942 accum = copy_to_mode_reg (mode, const0_rtx);
2945 else if (alg->op[0] == alg_m)
2947 accum = copy_to_mode_reg (mode, op0);
2953 for (opno = 1; opno < alg->ops; opno++)
2955 int log = alg->log[opno];
2956 rtx shift_subtarget = optimize ? 0 : accum;
2958 = (opno == alg->ops - 1 && target != 0 && variant != add_variant
2961 rtx accum_target = optimize ? 0 : accum;
2963 switch (alg->op[opno])
2966 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2967 build_int_cst (NULL_TREE, log),
2973 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2974 build_int_cst (NULL_TREE, log),
2976 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2977 add_target ? add_target : accum_target);
2978 val_so_far += (HOST_WIDE_INT) 1 << log;
2982 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2983 build_int_cst (NULL_TREE, log),
2985 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2986 add_target ? add_target : accum_target);
2987 val_so_far -= (HOST_WIDE_INT) 1 << log;
2991 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2992 build_int_cst (NULL_TREE, log),
2995 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2996 add_target ? add_target : accum_target);
2997 val_so_far = (val_so_far << log) + 1;
3001 accum = expand_shift (LSHIFT_EXPR, mode, accum,
3002 build_int_cst (NULL_TREE, log),
3003 shift_subtarget, 0);
3004 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
3005 add_target ? add_target : accum_target);
3006 val_so_far = (val_so_far << log) - 1;
3009 case alg_add_factor:
3010 tem = expand_shift (LSHIFT_EXPR, mode, accum,
3011 build_int_cst (NULL_TREE, log),
3013 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
3014 add_target ? add_target : accum_target);
3015 val_so_far += val_so_far << log;
3018 case alg_sub_factor:
3019 tem = expand_shift (LSHIFT_EXPR, mode, accum,
3020 build_int_cst (NULL_TREE, log),
3022 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
3024 ? add_target : (optimize ? 0 : tem)));
3025 val_so_far = (val_so_far << log) - val_so_far;
3032 /* Write a REG_EQUAL note on the last insn so that we can cse
3033 multiplication sequences. Note that if ACCUM is a SUBREG,
3034 we've set the inner register and must properly indicate
3037 tem = op0, nmode = mode;
3038 if (GET_CODE (accum) == SUBREG)
3040 nmode = GET_MODE (SUBREG_REG (accum));
3041 tem = gen_lowpart (nmode, op0);
3044 insn = get_last_insn ();
3045 set_unique_reg_note (insn, REG_EQUAL,
3046 gen_rtx_MULT (nmode, tem,
3047 GEN_INT (val_so_far)));
3050 if (variant == negate_variant)
3052 val_so_far = -val_so_far;
3053 accum = expand_unop (mode, neg_optab, accum, target, 0);
3055 else if (variant == add_variant)
3057 val_so_far = val_so_far + 1;
3058 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
3061 /* Compare only the bits of val and val_so_far that are significant
3062 in the result mode, to avoid sign-/zero-extension confusion. */
3063 val &= GET_MODE_MASK (mode);
3064 val_so_far &= GET_MODE_MASK (mode);
3065 gcc_assert (val == val_so_far);
3070 /* Perform a multiplication and return an rtx for the result.
3071 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3072 TARGET is a suggestion for where to store the result (an rtx).
3074 We check specially for a constant integer as OP1.
3075 If you want this check for OP0 as well, then before calling
3076 you should swap the two operands if OP0 would be constant. */
3079 expand_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3082 enum mult_variant variant;
3083 struct algorithm algorithm;
3085 bool speed = optimize_insn_for_speed_p ();
3087 /* Handling const0_rtx here allows us to use zero as a rogue value for
3089 if (op1 == const0_rtx)
3091 if (op1 == const1_rtx)
3093 if (op1 == constm1_rtx)
3094 return expand_unop (mode,
3095 GET_MODE_CLASS (mode) == MODE_INT
3096 && !unsignedp && flag_trapv
3097 ? negv_optab : neg_optab,
3100 /* These are the operations that are potentially turned into a sequence
3101 of shifts and additions. */
3102 if (SCALAR_INT_MODE_P (mode)
3103 && (unsignedp || !flag_trapv))
3105 HOST_WIDE_INT coeff = 0;
3106 rtx fake_reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
3108 /* synth_mult does an `unsigned int' multiply. As long as the mode is
3109 less than or equal in size to `unsigned int' this doesn't matter.
3110 If the mode is larger than `unsigned int', then synth_mult works
3111 only if the constant value exactly fits in an `unsigned int' without
3112 any truncation. This means that multiplying by negative values does
3113 not work; results are off by 2^32 on a 32 bit machine. */
3115 if (GET_CODE (op1) == CONST_INT)
3117 /* Attempt to handle multiplication of DImode values by negative
3118 coefficients, by performing the multiplication by a positive
3119 multiplier and then inverting the result. */
3120 if (INTVAL (op1) < 0
3121 && GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT)
3123 /* Its safe to use -INTVAL (op1) even for INT_MIN, as the
3124 result is interpreted as an unsigned coefficient.
3125 Exclude cost of op0 from max_cost to match the cost
3126 calculation of the synth_mult. */
3127 max_cost = rtx_cost (gen_rtx_MULT (mode, fake_reg, op1), SET, speed)
3128 - neg_cost[speed][mode];
3130 && choose_mult_variant (mode, -INTVAL (op1), &algorithm,
3131 &variant, max_cost))
3133 rtx temp = expand_mult_const (mode, op0, -INTVAL (op1),
3134 NULL_RTX, &algorithm,
3136 return expand_unop (mode, neg_optab, temp, target, 0);
3139 else coeff = INTVAL (op1);
3141 else if (GET_CODE (op1) == CONST_DOUBLE)
3143 /* If we are multiplying in DImode, it may still be a win
3144 to try to work with shifts and adds. */
3145 if (CONST_DOUBLE_HIGH (op1) == 0
3146 && CONST_DOUBLE_LOW (op1) > 0)
3147 coeff = CONST_DOUBLE_LOW (op1);
3148 else if (CONST_DOUBLE_LOW (op1) == 0
3149 && EXACT_POWER_OF_2_OR_ZERO_P (CONST_DOUBLE_HIGH (op1)))
3151 int shift = floor_log2 (CONST_DOUBLE_HIGH (op1))
3152 + HOST_BITS_PER_WIDE_INT;
3153 return expand_shift (LSHIFT_EXPR, mode, op0,
3154 build_int_cst (NULL_TREE, shift),
3159 /* We used to test optimize here, on the grounds that it's better to
3160 produce a smaller program when -O is not used. But this causes
3161 such a terrible slowdown sometimes that it seems better to always
3165 /* Special case powers of two. */
3166 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3167 return expand_shift (LSHIFT_EXPR, mode, op0,
3168 build_int_cst (NULL_TREE, floor_log2 (coeff)),
3171 /* Exclude cost of op0 from max_cost to match the cost
3172 calculation of the synth_mult. */
3173 max_cost = rtx_cost (gen_rtx_MULT (mode, fake_reg, op1), SET, speed);
3174 if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3176 return expand_mult_const (mode, op0, coeff, target,
3177 &algorithm, variant);
3181 if (GET_CODE (op0) == CONST_DOUBLE)
3188 /* Expand x*2.0 as x+x. */
3189 if (GET_CODE (op1) == CONST_DOUBLE
3190 && SCALAR_FLOAT_MODE_P (mode))
3193 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
3195 if (REAL_VALUES_EQUAL (d, dconst2))
3197 op0 = force_reg (GET_MODE (op0), op0);
3198 return expand_binop (mode, add_optab, op0, op0,
3199 target, unsignedp, OPTAB_LIB_WIDEN);
3203 /* This used to use umul_optab if unsigned, but for non-widening multiply
3204 there is no difference between signed and unsigned. */
3205 op0 = expand_binop (mode,
3207 && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT)
3208 ? smulv_optab : smul_optab,
3209 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
3214 /* Return the smallest n such that 2**n >= X. */
3217 ceil_log2 (unsigned HOST_WIDE_INT x)
3219 return floor_log2 (x - 1) + 1;
3222 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
3223 replace division by D, and put the least significant N bits of the result
3224 in *MULTIPLIER_PTR and return the most significant bit.
3226 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
3227 needed precision is in PRECISION (should be <= N).
3229 PRECISION should be as small as possible so this function can choose
3230 multiplier more freely.
3232 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
3233 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
3235 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
3236 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
3239 unsigned HOST_WIDE_INT
3240 choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
3241 rtx *multiplier_ptr, int *post_shift_ptr, int *lgup_ptr)
3243 HOST_WIDE_INT mhigh_hi, mlow_hi;
3244 unsigned HOST_WIDE_INT mhigh_lo, mlow_lo;
3245 int lgup, post_shift;
3247 unsigned HOST_WIDE_INT nl, dummy1;
3248 HOST_WIDE_INT nh, dummy2;
3250 /* lgup = ceil(log2(divisor)); */
3251 lgup = ceil_log2 (d);
3253 gcc_assert (lgup <= n);
3256 pow2 = n + lgup - precision;
3258 /* We could handle this with some effort, but this case is much
3259 better handled directly with a scc insn, so rely on caller using
3261 gcc_assert (pow != 2 * HOST_BITS_PER_WIDE_INT);
3263 /* mlow = 2^(N + lgup)/d */
3264 if (pow >= HOST_BITS_PER_WIDE_INT)
3266 nh = (HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
3272 nl = (unsigned HOST_WIDE_INT) 1 << pow;
3274 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
3275 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
3277 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
3278 if (pow2 >= HOST_BITS_PER_WIDE_INT)
3279 nh |= (HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
3281 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
3282 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
3283 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
3285 gcc_assert (!mhigh_hi || nh - d < d);
3286 gcc_assert (mhigh_hi <= 1 && mlow_hi <= 1);
3287 /* Assert that mlow < mhigh. */
3288 gcc_assert (mlow_hi < mhigh_hi
3289 || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo));
3291 /* If precision == N, then mlow, mhigh exceed 2^N
3292 (but they do not exceed 2^(N+1)). */
3294 /* Reduce to lowest terms. */
3295 for (post_shift = lgup; post_shift > 0; post_shift--)
3297 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
3298 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
3308 *post_shift_ptr = post_shift;
3310 if (n < HOST_BITS_PER_WIDE_INT)
3312 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
3313 *multiplier_ptr = GEN_INT (mhigh_lo & mask);
3314 return mhigh_lo >= mask;
3318 *multiplier_ptr = GEN_INT (mhigh_lo);
3323 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
3324 congruent to 1 (mod 2**N). */
3326 static unsigned HOST_WIDE_INT
3327 invert_mod2n (unsigned HOST_WIDE_INT x, int n)
3329 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
3331 /* The algorithm notes that the choice y = x satisfies
3332 x*y == 1 mod 2^3, since x is assumed odd.
3333 Each iteration doubles the number of bits of significance in y. */
3335 unsigned HOST_WIDE_INT mask;
3336 unsigned HOST_WIDE_INT y = x;
3339 mask = (n == HOST_BITS_PER_WIDE_INT
3340 ? ~(unsigned HOST_WIDE_INT) 0
3341 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
3345 y = y * (2 - x*y) & mask; /* Modulo 2^N */
3351 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
3352 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
3353 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
3354 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
3357 The result is put in TARGET if that is convenient.
3359 MODE is the mode of operation. */
3362 expand_mult_highpart_adjust (enum machine_mode mode, rtx adj_operand, rtx op0,
3363 rtx op1, rtx target, int unsignedp)
3366 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
3368 tem = expand_shift (RSHIFT_EXPR, mode, op0,
3369 build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode) - 1),
3371 tem = expand_and (mode, tem, op1, NULL_RTX);
3373 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3376 tem = expand_shift (RSHIFT_EXPR, mode, op1,
3377 build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode) - 1),
3379 tem = expand_and (mode, tem, op0, NULL_RTX);
3380 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3386 /* Subroutine of expand_mult_highpart. Return the MODE high part of OP. */
3389 extract_high_half (enum machine_mode mode, rtx op)
3391 enum machine_mode wider_mode;
3393 if (mode == word_mode)
3394 return gen_highpart (mode, op);
3396 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3398 wider_mode = GET_MODE_WIDER_MODE (mode);
3399 op = expand_shift (RSHIFT_EXPR, wider_mode, op,
3400 build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode)), 0, 1);
3401 return convert_modes (mode, wider_mode, op, 0);
3404 /* Like expand_mult_highpart, but only consider using a multiplication
3405 optab. OP1 is an rtx for the constant operand. */
3408 expand_mult_highpart_optab (enum machine_mode mode, rtx op0, rtx op1,
3409 rtx target, int unsignedp, int max_cost)
3411 rtx narrow_op1 = gen_int_mode (INTVAL (op1), mode);
3412 enum machine_mode wider_mode;
3416 bool speed = optimize_insn_for_speed_p ();
3418 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3420 wider_mode = GET_MODE_WIDER_MODE (mode);
3421 size = GET_MODE_BITSIZE (mode);
3423 /* Firstly, try using a multiplication insn that only generates the needed
3424 high part of the product, and in the sign flavor of unsignedp. */
3425 if (mul_highpart_cost[speed][mode] < max_cost)
3427 moptab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
3428 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3429 unsignedp, OPTAB_DIRECT);
3434 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
3435 Need to adjust the result after the multiplication. */
3436 if (size - 1 < BITS_PER_WORD
3437 && (mul_highpart_cost[speed][mode] + 2 * shift_cost[speed][mode][size-1]
3438 + 4 * add_cost[speed][mode] < max_cost))
3440 moptab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
3441 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3442 unsignedp, OPTAB_DIRECT);
3444 /* We used the wrong signedness. Adjust the result. */
3445 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3449 /* Try widening multiplication. */
3450 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
3451 if (optab_handler (moptab, wider_mode)->insn_code != CODE_FOR_nothing
3452 && mul_widen_cost[speed][wider_mode] < max_cost)
3454 tem = expand_binop (wider_mode, moptab, op0, narrow_op1, 0,
3455 unsignedp, OPTAB_WIDEN);
3457 return extract_high_half (mode, tem);
3460 /* Try widening the mode and perform a non-widening multiplication. */
3461 if (optab_handler (smul_optab, wider_mode)->insn_code != CODE_FOR_nothing
3462 && size - 1 < BITS_PER_WORD
3463 && mul_cost[speed][wider_mode] + shift_cost[speed][mode][size-1] < max_cost)
3465 rtx insns, wop0, wop1;
3467 /* We need to widen the operands, for example to ensure the
3468 constant multiplier is correctly sign or zero extended.
3469 Use a sequence to clean-up any instructions emitted by
3470 the conversions if things don't work out. */
3472 wop0 = convert_modes (wider_mode, mode, op0, unsignedp);
3473 wop1 = convert_modes (wider_mode, mode, op1, unsignedp);
3474 tem = expand_binop (wider_mode, smul_optab, wop0, wop1, 0,
3475 unsignedp, OPTAB_WIDEN);
3476 insns = get_insns ();
3482 return extract_high_half (mode, tem);
3486 /* Try widening multiplication of opposite signedness, and adjust. */
3487 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
3488 if (optab_handler (moptab, wider_mode)->insn_code != CODE_FOR_nothing
3489 && size - 1 < BITS_PER_WORD
3490 && (mul_widen_cost[speed][wider_mode] + 2 * shift_cost[speed][mode][size-1]
3491 + 4 * add_cost[speed][mode] < max_cost))
3493 tem = expand_binop (wider_mode, moptab, op0, narrow_op1,
3494 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
3497 tem = extract_high_half (mode, tem);
3498 /* We used the wrong signedness. Adjust the result. */
3499 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3507 /* Emit code to multiply OP0 and OP1 (where OP1 is an integer constant),
3508 putting the high half of the result in TARGET if that is convenient,
3509 and return where the result is. If the operation can not be performed,
3512 MODE is the mode of operation and result.
3514 UNSIGNEDP nonzero means unsigned multiply.
3516 MAX_COST is the total allowed cost for the expanded RTL. */
3519 expand_mult_highpart (enum machine_mode mode, rtx op0, rtx op1,
3520 rtx target, int unsignedp, int max_cost)
3522 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
3523 unsigned HOST_WIDE_INT cnst1;
3525 bool sign_adjust = false;
3526 enum mult_variant variant;
3527 struct algorithm alg;
3529 bool speed = optimize_insn_for_speed_p ();
3531 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3532 /* We can't support modes wider than HOST_BITS_PER_INT. */
3533 gcc_assert (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT);
3535 cnst1 = INTVAL (op1) & GET_MODE_MASK (mode);
3537 /* We can't optimize modes wider than BITS_PER_WORD.
3538 ??? We might be able to perform double-word arithmetic if
3539 mode == word_mode, however all the cost calculations in
3540 synth_mult etc. assume single-word operations. */
3541 if (GET_MODE_BITSIZE (wider_mode) > BITS_PER_WORD)
3542 return expand_mult_highpart_optab (mode, op0, op1, target,
3543 unsignedp, max_cost);
3545 extra_cost = shift_cost[speed][mode][GET_MODE_BITSIZE (mode) - 1];
3547 /* Check whether we try to multiply by a negative constant. */
3548 if (!unsignedp && ((cnst1 >> (GET_MODE_BITSIZE (mode) - 1)) & 1))
3551 extra_cost += add_cost[speed][mode];
3554 /* See whether shift/add multiplication is cheap enough. */
3555 if (choose_mult_variant (wider_mode, cnst1, &alg, &variant,
3556 max_cost - extra_cost))
3558 /* See whether the specialized multiplication optabs are
3559 cheaper than the shift/add version. */
3560 tem = expand_mult_highpart_optab (mode, op0, op1, target, unsignedp,
3561 alg.cost.cost + extra_cost);
3565 tem = convert_to_mode (wider_mode, op0, unsignedp);
3566 tem = expand_mult_const (wider_mode, tem, cnst1, 0, &alg, variant);
3567 tem = extract_high_half (mode, tem);
3569 /* Adjust result for signedness. */
3571 tem = force_operand (gen_rtx_MINUS (mode, tem, op0), tem);
3575 return expand_mult_highpart_optab (mode, op0, op1, target,
3576 unsignedp, max_cost);
3580 /* Expand signed modulus of OP0 by a power of two D in mode MODE. */
3583 expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
3585 unsigned HOST_WIDE_INT masklow, maskhigh;
3586 rtx result, temp, shift, label;
3589 logd = floor_log2 (d);
3590 result = gen_reg_rtx (mode);
3592 /* Avoid conditional branches when they're expensive. */
3593 if (BRANCH_COST (optimize_insn_for_speed_p (), false) >= 2
3594 && optimize_insn_for_speed_p ())
3596 rtx signmask = emit_store_flag (result, LT, op0, const0_rtx,
3600 signmask = force_reg (mode, signmask);
3601 masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
3602 shift = GEN_INT (GET_MODE_BITSIZE (mode) - logd);
3604 /* Use the rtx_cost of a LSHIFTRT instruction to determine
3605 which instruction sequence to use. If logical right shifts
3606 are expensive the use 2 XORs, 2 SUBs and an AND, otherwise
3607 use a LSHIFTRT, 1 ADD, 1 SUB and an AND. */
3609 temp = gen_rtx_LSHIFTRT (mode, result, shift);
3610 if (optab_handler (lshr_optab, mode)->insn_code == CODE_FOR_nothing
3611 || rtx_cost (temp, SET, optimize_insn_for_speed_p ()) > COSTS_N_INSNS (2))
3613 temp = expand_binop (mode, xor_optab, op0, signmask,
3614 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3615 temp = expand_binop (mode, sub_optab, temp, signmask,
3616 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3617 temp = expand_binop (mode, and_optab, temp, GEN_INT (masklow),
3618 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3619 temp = expand_binop (mode, xor_optab, temp, signmask,
3620 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3621 temp = expand_binop (mode, sub_optab, temp, signmask,
3622 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3626 signmask = expand_binop (mode, lshr_optab, signmask, shift,
3627 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3628 signmask = force_reg (mode, signmask);
3630 temp = expand_binop (mode, add_optab, op0, signmask,
3631 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3632 temp = expand_binop (mode, and_optab, temp, GEN_INT (masklow),
3633 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3634 temp = expand_binop (mode, sub_optab, temp, signmask,
3635 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3641 /* Mask contains the mode's signbit and the significant bits of the
3642 modulus. By including the signbit in the operation, many targets
3643 can avoid an explicit compare operation in the following comparison
3646 masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
3647 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3649 masklow |= (HOST_WIDE_INT) -1 << (GET_MODE_BITSIZE (mode) - 1);
3653 maskhigh = (HOST_WIDE_INT) -1
3654 << (GET_MODE_BITSIZE (mode) - HOST_BITS_PER_WIDE_INT - 1);
3656 temp = expand_binop (mode, and_optab, op0,
3657 immed_double_const (masklow, maskhigh, mode),
3658 result, 1, OPTAB_LIB_WIDEN);
3660 emit_move_insn (result, temp);
3662 label = gen_label_rtx ();
3663 do_cmp_and_jump (result, const0_rtx, GE, mode, label);
3665 temp = expand_binop (mode, sub_optab, result, const1_rtx, result,
3666 0, OPTAB_LIB_WIDEN);
3667 masklow = (HOST_WIDE_INT) -1 << logd;
3669 temp = expand_binop (mode, ior_optab, temp,
3670 immed_double_const (masklow, maskhigh, mode),
3671 result, 1, OPTAB_LIB_WIDEN);
3672 temp = expand_binop (mode, add_optab, temp, const1_rtx, result,
3673 0, OPTAB_LIB_WIDEN);
3675 emit_move_insn (result, temp);
3680 /* Expand signed division of OP0 by a power of two D in mode MODE.
3681 This routine is only called for positive values of D. */
3684 expand_sdiv_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
3690 logd = floor_log2 (d);
3691 shift = build_int_cst (NULL_TREE, logd);
3694 && BRANCH_COST (optimize_insn_for_speed_p (),
3697 temp = gen_reg_rtx (mode);
3698 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, 1);
3699 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3700 0, OPTAB_LIB_WIDEN);
3701 return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3704 #ifdef HAVE_conditional_move
3705 if (BRANCH_COST (optimize_insn_for_speed_p (), false)
3710 /* ??? emit_conditional_move forces a stack adjustment via
3711 compare_from_rtx so, if the sequence is discarded, it will
3712 be lost. Do it now instead. */
3713 do_pending_stack_adjust ();
3716 temp2 = copy_to_mode_reg (mode, op0);
3717 temp = expand_binop (mode, add_optab, temp2, GEN_INT (d-1),
3718 NULL_RTX, 0, OPTAB_LIB_WIDEN);
3719 temp = force_reg (mode, temp);
3721 /* Construct "temp2 = (temp2 < 0) ? temp : temp2". */
3722 temp2 = emit_conditional_move (temp2, LT, temp2, const0_rtx,
3723 mode, temp, temp2, mode, 0);
3726 rtx seq = get_insns ();
3729 return expand_shift (RSHIFT_EXPR, mode, temp2, shift, NULL_RTX, 0);
3735 if (BRANCH_COST (optimize_insn_for_speed_p (),
3738 int ushift = GET_MODE_BITSIZE (mode) - logd;
3740 temp = gen_reg_rtx (mode);
3741 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, -1);
3742 if (shift_cost[optimize_insn_for_speed_p ()][mode][ushift] > COSTS_N_INSNS (1))
3743 temp = expand_binop (mode, and_optab, temp, GEN_INT (d - 1),
3744 NULL_RTX, 0, OPTAB_LIB_WIDEN);
3746 temp = expand_shift (RSHIFT_EXPR, mode, temp,
3747 build_int_cst (NULL_TREE, ushift),
3749 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3750 0, OPTAB_LIB_WIDEN);
3751 return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3754 label = gen_label_rtx ();
3755 temp = copy_to_mode_reg (mode, op0);
3756 do_cmp_and_jump (temp, const0_rtx, GE, mode, label);
3757 expand_inc (temp, GEN_INT (d - 1));
3759 return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3762 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
3763 if that is convenient, and returning where the result is.
3764 You may request either the quotient or the remainder as the result;
3765 specify REM_FLAG nonzero to get the remainder.
3767 CODE is the expression code for which kind of division this is;
3768 it controls how rounding is done. MODE is the machine mode to use.
3769 UNSIGNEDP nonzero means do unsigned division. */
3771 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
3772 and then correct it by or'ing in missing high bits
3773 if result of ANDI is nonzero.
3774 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
3775 This could optimize to a bfexts instruction.
3776 But C doesn't use these operations, so their optimizations are
3778 /* ??? For modulo, we don't actually need the highpart of the first product,
3779 the low part will do nicely. And for small divisors, the second multiply
3780 can also be a low-part only multiply or even be completely left out.
3781 E.g. to calculate the remainder of a division by 3 with a 32 bit
3782 multiply, multiply with 0x55555556 and extract the upper two bits;
3783 the result is exact for inputs up to 0x1fffffff.
3784 The input range can be reduced by using cross-sum rules.
3785 For odd divisors >= 3, the following table gives right shift counts
3786 so that if a number is shifted by an integer multiple of the given
3787 amount, the remainder stays the same:
3788 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
3789 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
3790 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
3791 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
3792 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
3794 Cross-sum rules for even numbers can be derived by leaving as many bits
3795 to the right alone as the divisor has zeros to the right.
3796 E.g. if x is an unsigned 32 bit number:
3797 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
3801 expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode,
3802 rtx op0, rtx op1, rtx target, int unsignedp)
3804 enum machine_mode compute_mode;
3806 rtx quotient = 0, remainder = 0;
3810 optab optab1, optab2;
3811 int op1_is_constant, op1_is_pow2 = 0;
3812 int max_cost, extra_cost;
3813 static HOST_WIDE_INT last_div_const = 0;
3814 static HOST_WIDE_INT ext_op1;
3815 bool speed = optimize_insn_for_speed_p ();
3817 op1_is_constant = GET_CODE (op1) == CONST_INT;
3818 if (op1_is_constant)
3820 ext_op1 = INTVAL (op1);
3822 ext_op1 &= GET_MODE_MASK (mode);
3823 op1_is_pow2 = ((EXACT_POWER_OF_2_OR_ZERO_P (ext_op1)
3824 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-ext_op1))));
3828 This is the structure of expand_divmod:
3830 First comes code to fix up the operands so we can perform the operations
3831 correctly and efficiently.
3833 Second comes a switch statement with code specific for each rounding mode.
3834 For some special operands this code emits all RTL for the desired
3835 operation, for other cases, it generates only a quotient and stores it in
3836 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
3837 to indicate that it has not done anything.
3839 Last comes code that finishes the operation. If QUOTIENT is set and
3840 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
3841 QUOTIENT is not set, it is computed using trunc rounding.
3843 We try to generate special code for division and remainder when OP1 is a
3844 constant. If |OP1| = 2**n we can use shifts and some other fast
3845 operations. For other values of OP1, we compute a carefully selected
3846 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
3849 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
3850 half of the product. Different strategies for generating the product are
3851 implemented in expand_mult_highpart.
3853 If what we actually want is the remainder, we generate that by another
3854 by-constant multiplication and a subtraction. */
3856 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3857 code below will malfunction if we are, so check here and handle
3858 the special case if so. */
3859 if (op1 == const1_rtx)
3860 return rem_flag ? const0_rtx : op0;
3862 /* When dividing by -1, we could get an overflow.
3863 negv_optab can handle overflows. */
3864 if (! unsignedp && op1 == constm1_rtx)
3868 return expand_unop (mode, flag_trapv && GET_MODE_CLASS(mode) == MODE_INT
3869 ? negv_optab : neg_optab, op0, target, 0);
3873 /* Don't use the function value register as a target
3874 since we have to read it as well as write it,
3875 and function-inlining gets confused by this. */
3876 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
3877 /* Don't clobber an operand while doing a multi-step calculation. */
3878 || ((rem_flag || op1_is_constant)
3879 && (reg_mentioned_p (target, op0)
3880 || (MEM_P (op0) && MEM_P (target))))
3881 || reg_mentioned_p (target, op1)
3882 || (MEM_P (op1) && MEM_P (target))))
3885 /* Get the mode in which to perform this computation. Normally it will
3886 be MODE, but sometimes we can't do the desired operation in MODE.
3887 If so, pick a wider mode in which we can do the operation. Convert
3888 to that mode at the start to avoid repeated conversions.
3890 First see what operations we need. These depend on the expression
3891 we are evaluating. (We assume that divxx3 insns exist under the
3892 same conditions that modxx3 insns and that these insns don't normally
3893 fail. If these assumptions are not correct, we may generate less
3894 efficient code in some cases.)
3896 Then see if we find a mode in which we can open-code that operation
3897 (either a division, modulus, or shift). Finally, check for the smallest
3898 mode for which we can do the operation with a library call. */
3900 /* We might want to refine this now that we have division-by-constant
3901 optimization. Since expand_mult_highpart tries so many variants, it is
3902 not straightforward to generalize this. Maybe we should make an array
3903 of possible modes in init_expmed? Save this for GCC 2.7. */
3905 optab1 = ((op1_is_pow2 && op1 != const0_rtx)
3906 ? (unsignedp ? lshr_optab : ashr_optab)
3907 : (unsignedp ? udiv_optab : sdiv_optab));
3908 optab2 = ((op1_is_pow2 && op1 != const0_rtx)
3910 : (unsignedp ? udivmod_optab : sdivmod_optab));
3912 for (compute_mode = mode; compute_mode != VOIDmode;
3913 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3914 if (optab_handler (optab1, compute_mode)->insn_code != CODE_FOR_nothing
3915 || optab_handler (optab2, compute_mode)->insn_code != CODE_FOR_nothing)
3918 if (compute_mode == VOIDmode)
3919 for (compute_mode = mode; compute_mode != VOIDmode;
3920 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3921 if (optab_libfunc (optab1, compute_mode)
3922 || optab_libfunc (optab2, compute_mode))
3925 /* If we still couldn't find a mode, use MODE, but expand_binop will
3927 if (compute_mode == VOIDmode)
3928 compute_mode = mode;
3930 if (target && GET_MODE (target) == compute_mode)
3933 tquotient = gen_reg_rtx (compute_mode);
3935 size = GET_MODE_BITSIZE (compute_mode);
3937 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3938 (mode), and thereby get better code when OP1 is a constant. Do that
3939 later. It will require going over all usages of SIZE below. */
3940 size = GET_MODE_BITSIZE (mode);
3943 /* Only deduct something for a REM if the last divide done was
3944 for a different constant. Then set the constant of the last
3946 max_cost = unsignedp ? udiv_cost[speed][compute_mode] : sdiv_cost[speed][compute_mode];
3947 if (rem_flag && ! (last_div_const != 0 && op1_is_constant
3948 && INTVAL (op1) == last_div_const))
3949 max_cost -= mul_cost[speed][compute_mode] + add_cost[speed][compute_mode];
3951 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
3953 /* Now convert to the best mode to use. */
3954 if (compute_mode != mode)
3956 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
3957 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
3959 /* convert_modes may have placed op1 into a register, so we
3960 must recompute the following. */
3961 op1_is_constant = GET_CODE (op1) == CONST_INT;
3962 op1_is_pow2 = (op1_is_constant
3963 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3965 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
3968 /* If one of the operands is a volatile MEM, copy it into a register. */
3970 if (MEM_P (op0) && MEM_VOLATILE_P (op0))
3971 op0 = force_reg (compute_mode, op0);
3972 if (MEM_P (op1) && MEM_VOLATILE_P (op1))
3973 op1 = force_reg (compute_mode, op1);
3975 /* If we need the remainder or if OP1 is constant, we need to
3976 put OP0 in a register in case it has any queued subexpressions. */
3977 if (rem_flag || op1_is_constant)
3978 op0 = force_reg (compute_mode, op0);
3980 last = get_last_insn ();
3982 /* Promote floor rounding to trunc rounding for unsigned operations. */
3985 if (code == FLOOR_DIV_EXPR)
3986 code = TRUNC_DIV_EXPR;
3987 if (code == FLOOR_MOD_EXPR)
3988 code = TRUNC_MOD_EXPR;
3989 if (code == EXACT_DIV_EXPR && op1_is_pow2)
3990 code = TRUNC_DIV_EXPR;
3993 if (op1 != const0_rtx)
3996 case TRUNC_MOD_EXPR:
3997 case TRUNC_DIV_EXPR:
3998 if (op1_is_constant)
4002 unsigned HOST_WIDE_INT mh;
4003 int pre_shift, post_shift;
4006 unsigned HOST_WIDE_INT d = (INTVAL (op1)
4007 & GET_MODE_MASK (compute_mode));
4009 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
4011 pre_shift = floor_log2 (d);
4015 = expand_binop (compute_mode, and_optab, op0,
4016 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
4020 return gen_lowpart (mode, remainder);
4022 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4023 build_int_cst (NULL_TREE,
4027 else if (size <= HOST_BITS_PER_WIDE_INT)
4029 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
4031 /* Most significant bit of divisor is set; emit an scc
4033 quotient = emit_store_flag_force (tquotient, GEU, op0, op1,
4034 compute_mode, 1, 1);
4038 /* Find a suitable multiplier and right shift count
4039 instead of multiplying with D. */
4041 mh = choose_multiplier (d, size, size,
4042 &ml, &post_shift, &dummy);
4044 /* If the suggested multiplier is more than SIZE bits,
4045 we can do better for even divisors, using an
4046 initial right shift. */
4047 if (mh != 0 && (d & 1) == 0)
4049 pre_shift = floor_log2 (d & -d);
4050 mh = choose_multiplier (d >> pre_shift, size,
4052 &ml, &post_shift, &dummy);
4062 if (post_shift - 1 >= BITS_PER_WORD)
4066 = (shift_cost[speed][compute_mode][post_shift - 1]
4067 + shift_cost[speed][compute_mode][1]
4068 + 2 * add_cost[speed][compute_mode]);
4069 t1 = expand_mult_highpart (compute_mode, op0, ml,
4071 max_cost - extra_cost);
4074 t2 = force_operand (gen_rtx_MINUS (compute_mode,
4078 (RSHIFT_EXPR, compute_mode, t2,
4079 build_int_cst (NULL_TREE, 1),
4081 t4 = force_operand (gen_rtx_PLUS (compute_mode,
4084 quotient = expand_shift
4085 (RSHIFT_EXPR, compute_mode, t4,
4086 build_int_cst (NULL_TREE, post_shift - 1),
4093 if (pre_shift >= BITS_PER_WORD
4094 || post_shift >= BITS_PER_WORD)
4098 (RSHIFT_EXPR, compute_mode, op0,
4099 build_int_cst (NULL_TREE, pre_shift),
4102 = (shift_cost[speed][compute_mode][pre_shift]
4103 + shift_cost[speed][compute_mode][post_shift]);
4104 t2 = expand_mult_highpart (compute_mode, t1, ml,
4106 max_cost - extra_cost);
4109 quotient = expand_shift
4110 (RSHIFT_EXPR, compute_mode, t2,
4111 build_int_cst (NULL_TREE, post_shift),
4116 else /* Too wide mode to use tricky code */
4119 insn = get_last_insn ();
4121 && (set = single_set (insn)) != 0
4122 && SET_DEST (set) == quotient)
4123 set_unique_reg_note (insn,
4125 gen_rtx_UDIV (compute_mode, op0, op1));
4127 else /* TRUNC_DIV, signed */
4129 unsigned HOST_WIDE_INT ml;
4130 int lgup, post_shift;
4132 HOST_WIDE_INT d = INTVAL (op1);
4133 unsigned HOST_WIDE_INT abs_d;
4135 /* Since d might be INT_MIN, we have to cast to
4136 unsigned HOST_WIDE_INT before negating to avoid
4137 undefined signed overflow. */
4139 ? (unsigned HOST_WIDE_INT) d
4140 : - (unsigned HOST_WIDE_INT) d);
4142 /* n rem d = n rem -d */
4143 if (rem_flag && d < 0)
4146 op1 = gen_int_mode (abs_d, compute_mode);
4152 quotient = expand_unop (compute_mode, neg_optab, op0,
4154 else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
4156 /* This case is not handled correctly below. */
4157 quotient = emit_store_flag (tquotient, EQ, op0, op1,
4158 compute_mode, 1, 1);
4162 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
4163 && (rem_flag ? smod_pow2_cheap[speed][compute_mode]
4164 : sdiv_pow2_cheap[speed][compute_mode])
4165 /* We assume that cheap metric is true if the
4166 optab has an expander for this mode. */
4167 && ((optab_handler ((rem_flag ? smod_optab
4169 compute_mode)->insn_code
4170 != CODE_FOR_nothing)
4171 || (optab_handler(sdivmod_optab,
4173 ->insn_code != CODE_FOR_nothing)))
4175 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
4179 remainder = expand_smod_pow2 (compute_mode, op0, d);
4181 return gen_lowpart (mode, remainder);
4184 if (sdiv_pow2_cheap[speed][compute_mode]
4185 && ((optab_handler (sdiv_optab, compute_mode)->insn_code
4186 != CODE_FOR_nothing)
4187 || (optab_handler (sdivmod_optab, compute_mode)->insn_code
4188 != CODE_FOR_nothing)))
4189 quotient = expand_divmod (0, TRUNC_DIV_EXPR,
4191 gen_int_mode (abs_d,
4195 quotient = expand_sdiv_pow2 (compute_mode, op0, abs_d);
4197 /* We have computed OP0 / abs(OP1). If OP1 is negative,
4198 negate the quotient. */
4201 insn = get_last_insn ();
4203 && (set = single_set (insn)) != 0
4204 && SET_DEST (set) == quotient
4205 && abs_d < ((unsigned HOST_WIDE_INT) 1
4206 << (HOST_BITS_PER_WIDE_INT - 1)))
4207 set_unique_reg_note (insn,
4209 gen_rtx_DIV (compute_mode,
4216 quotient = expand_unop (compute_mode, neg_optab,
4217 quotient, quotient, 0);
4220 else if (size <= HOST_BITS_PER_WIDE_INT)
4222 choose_multiplier (abs_d, size, size - 1,
4223 &mlr, &post_shift, &lgup);
4224 ml = (unsigned HOST_WIDE_INT) INTVAL (mlr);
4225 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
4229 if (post_shift >= BITS_PER_WORD
4230 || size - 1 >= BITS_PER_WORD)
4233 extra_cost = (shift_cost[speed][compute_mode][post_shift]
4234 + shift_cost[speed][compute_mode][size - 1]
4235 + add_cost[speed][compute_mode]);
4236 t1 = expand_mult_highpart (compute_mode, op0, mlr,
4238 max_cost - extra_cost);
4242 (RSHIFT_EXPR, compute_mode, t1,
4243 build_int_cst (NULL_TREE, post_shift),
4246 (RSHIFT_EXPR, compute_mode, op0,
4247 build_int_cst (NULL_TREE, size - 1),
4251 = force_operand (gen_rtx_MINUS (compute_mode,
4256 = force_operand (gen_rtx_MINUS (compute_mode,
4264 if (post_shift >= BITS_PER_WORD
4265 || size - 1 >= BITS_PER_WORD)
4268 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
4269 mlr = gen_int_mode (ml, compute_mode);
4270 extra_cost = (shift_cost[speed][compute_mode][post_shift]
4271 + shift_cost[speed][compute_mode][size - 1]
4272 + 2 * add_cost[speed][compute_mode]);
4273 t1 = expand_mult_highpart (compute_mode, op0, mlr,
4275 max_cost - extra_cost);
4278 t2 = force_operand (gen_rtx_PLUS (compute_mode,
4282 (RSHIFT_EXPR, compute_mode, t2,
4283 build_int_cst (NULL_TREE, post_shift),
4286 (RSHIFT_EXPR, compute_mode, op0,
4287 build_int_cst (NULL_TREE, size - 1),
4291 = force_operand (gen_rtx_MINUS (compute_mode,
4296 = force_operand (gen_rtx_MINUS (compute_mode,
4301 else /* Too wide mode to use tricky code */
4304 insn = get_last_insn ();
4306 && (set = single_set (insn)) != 0
4307 && SET_DEST (set) == quotient)
4308 set_unique_reg_note (insn,
4310 gen_rtx_DIV (compute_mode, op0, op1));
4315 delete_insns_since (last);
4318 case FLOOR_DIV_EXPR:
4319 case FLOOR_MOD_EXPR:
4320 /* We will come here only for signed operations. */
4321 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
4323 unsigned HOST_WIDE_INT mh;
4324 int pre_shift, lgup, post_shift;
4325 HOST_WIDE_INT d = INTVAL (op1);
4330 /* We could just as easily deal with negative constants here,
4331 but it does not seem worth the trouble for GCC 2.6. */
4332 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
4334 pre_shift = floor_log2 (d);
4337 remainder = expand_binop (compute_mode, and_optab, op0,
4338 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
4339 remainder, 0, OPTAB_LIB_WIDEN);
4341 return gen_lowpart (mode, remainder);
4343 quotient = expand_shift
4344 (RSHIFT_EXPR, compute_mode, op0,
4345 build_int_cst (NULL_TREE, pre_shift),
4352 mh = choose_multiplier (d, size, size - 1,
4353 &ml, &post_shift, &lgup);
4356 if (post_shift < BITS_PER_WORD
4357 && size - 1 < BITS_PER_WORD)
4360 (RSHIFT_EXPR, compute_mode, op0,
4361 build_int_cst (NULL_TREE, size - 1),
4363 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
4364 NULL_RTX, 0, OPTAB_WIDEN);
4365 extra_cost = (shift_cost[speed][compute_mode][post_shift]
4366 + shift_cost[speed][compute_mode][size - 1]
4367 + 2 * add_cost[speed][compute_mode]);
4368 t3 = expand_mult_highpart (compute_mode, t2, ml,
4370 max_cost - extra_cost);
4374 (RSHIFT_EXPR, compute_mode, t3,
4375 build_int_cst (NULL_TREE, post_shift),
4377 quotient = expand_binop (compute_mode, xor_optab,
4378 t4, t1, tquotient, 0,
4386 rtx nsign, t1, t2, t3, t4;
4387 t1 = force_operand (gen_rtx_PLUS (compute_mode,
4388 op0, constm1_rtx), NULL_RTX);
4389 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
4391 nsign = expand_shift
4392 (RSHIFT_EXPR, compute_mode, t2,
4393 build_int_cst (NULL_TREE, size - 1),
4395 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
4397 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
4402 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
4404 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4413 delete_insns_since (last);
4415 /* Try using an instruction that produces both the quotient and
4416 remainder, using truncation. We can easily compensate the quotient
4417 or remainder to get floor rounding, once we have the remainder.
4418 Notice that we compute also the final remainder value here,
4419 and return the result right away. */
4420 if (target == 0 || GET_MODE (target) != compute_mode)
4421 target = gen_reg_rtx (compute_mode);
4426 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4427 quotient = gen_reg_rtx (compute_mode);
4432 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4433 remainder = gen_reg_rtx (compute_mode);
4436 if (expand_twoval_binop (sdivmod_optab, op0, op1,
4437 quotient, remainder, 0))
4439 /* This could be computed with a branch-less sequence.
4440 Save that for later. */
4442 rtx label = gen_label_rtx ();
4443 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
4444 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4445 NULL_RTX, 0, OPTAB_WIDEN);
4446 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
4447 expand_dec (quotient, const1_rtx);
4448 expand_inc (remainder, op1);
4450 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4453 /* No luck with division elimination or divmod. Have to do it
4454 by conditionally adjusting op0 *and* the result. */
4456 rtx label1, label2, label3, label4, label5;
4460 quotient = gen_reg_rtx (compute_mode);
4461 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4462 label1 = gen_label_rtx ();
4463 label2 = gen_label_rtx ();
4464 label3 = gen_label_rtx ();
4465 label4 = gen_label_rtx ();
4466 label5 = gen_label_rtx ();
4467 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4468 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
4469 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4470 quotient, 0, OPTAB_LIB_WIDEN);
4471 if (tem != quotient)
4472 emit_move_insn (quotient, tem);
4473 emit_jump_insn (gen_jump (label5));
4475 emit_label (label1);
4476 expand_inc (adjusted_op0, const1_rtx);
4477 emit_jump_insn (gen_jump (label4));
4479 emit_label (label2);
4480 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
4481 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4482 quotient, 0, OPTAB_LIB_WIDEN);
4483 if (tem != quotient)
4484 emit_move_insn (quotient, tem);
4485 emit_jump_insn (gen_jump (label5));
4487 emit_label (label3);
4488 expand_dec (adjusted_op0, const1_rtx);
4489 emit_label (label4);
4490 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4491 quotient, 0, OPTAB_LIB_WIDEN);
4492 if (tem != quotient)
4493 emit_move_insn (quotient, tem);
4494 expand_dec (quotient, const1_rtx);
4495 emit_label (label5);
4503 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
4506 unsigned HOST_WIDE_INT d = INTVAL (op1);
4507 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4508 build_int_cst (NULL_TREE, floor_log2 (d)),
4510 t2 = expand_binop (compute_mode, and_optab, op0,
4512 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4513 t3 = gen_reg_rtx (compute_mode);
4514 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4515 compute_mode, 1, 1);
4519 lab = gen_label_rtx ();
4520 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4521 expand_inc (t1, const1_rtx);
4526 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4532 /* Try using an instruction that produces both the quotient and
4533 remainder, using truncation. We can easily compensate the
4534 quotient or remainder to get ceiling rounding, once we have the
4535 remainder. Notice that we compute also the final remainder
4536 value here, and return the result right away. */
4537 if (target == 0 || GET_MODE (target) != compute_mode)
4538 target = gen_reg_rtx (compute_mode);
4542 remainder = (REG_P (target)
4543 ? target : gen_reg_rtx (compute_mode));
4544 quotient = gen_reg_rtx (compute_mode);
4548 quotient = (REG_P (target)
4549 ? target : gen_reg_rtx (compute_mode));
4550 remainder = gen_reg_rtx (compute_mode);
4553 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
4556 /* This could be computed with a branch-less sequence.
4557 Save that for later. */
4558 rtx label = gen_label_rtx ();
4559 do_cmp_and_jump (remainder, const0_rtx, EQ,
4560 compute_mode, label);
4561 expand_inc (quotient, const1_rtx);
4562 expand_dec (remainder, op1);
4564 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4567 /* No luck with division elimination or divmod. Have to do it
4568 by conditionally adjusting op0 *and* the result. */
4571 rtx adjusted_op0, tem;
4573 quotient = gen_reg_rtx (compute_mode);
4574 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4575 label1 = gen_label_rtx ();
4576 label2 = gen_label_rtx ();
4577 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
4578 compute_mode, label1);
4579 emit_move_insn (quotient, const0_rtx);
4580 emit_jump_insn (gen_jump (label2));
4582 emit_label (label1);
4583 expand_dec (adjusted_op0, const1_rtx);
4584 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
4585 quotient, 1, OPTAB_LIB_WIDEN);
4586 if (tem != quotient)
4587 emit_move_insn (quotient, tem);
4588 expand_inc (quotient, const1_rtx);
4589 emit_label (label2);
4594 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
4595 && INTVAL (op1) >= 0)
4597 /* This is extremely similar to the code for the unsigned case
4598 above. For 2.7 we should merge these variants, but for
4599 2.6.1 I don't want to touch the code for unsigned since that
4600 get used in C. The signed case will only be used by other
4604 unsigned HOST_WIDE_INT d = INTVAL (op1);
4605 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4606 build_int_cst (NULL_TREE, floor_log2 (d)),
4608 t2 = expand_binop (compute_mode, and_optab, op0,
4610 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4611 t3 = gen_reg_rtx (compute_mode);
4612 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4613 compute_mode, 1, 1);
4617 lab = gen_label_rtx ();
4618 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4619 expand_inc (t1, const1_rtx);
4624 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4630 /* Try using an instruction that produces both the quotient and
4631 remainder, using truncation. We can easily compensate the
4632 quotient or remainder to get ceiling rounding, once we have the
4633 remainder. Notice that we compute also the final remainder
4634 value here, and return the result right away. */
4635 if (target == 0 || GET_MODE (target) != compute_mode)
4636 target = gen_reg_rtx (compute_mode);
4639 remainder= (REG_P (target)
4640 ? target : gen_reg_rtx (compute_mode));
4641 quotient = gen_reg_rtx (compute_mode);
4645 quotient = (REG_P (target)
4646 ? target : gen_reg_rtx (compute_mode));
4647 remainder = gen_reg_rtx (compute_mode);
4650 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
4653 /* This could be computed with a branch-less sequence.
4654 Save that for later. */
4656 rtx label = gen_label_rtx ();
4657 do_cmp_and_jump (remainder, const0_rtx, EQ,
4658 compute_mode, label);
4659 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4660 NULL_RTX, 0, OPTAB_WIDEN);
4661 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
4662 expand_inc (quotient, const1_rtx);
4663 expand_dec (remainder, op1);
4665 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4668 /* No luck with division elimination or divmod. Have to do it
4669 by conditionally adjusting op0 *and* the result. */
4671 rtx label1, label2, label3, label4, label5;
4675 quotient = gen_reg_rtx (compute_mode);
4676 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4677 label1 = gen_label_rtx ();
4678 label2 = gen_label_rtx ();
4679 label3 = gen_label_rtx ();
4680 label4 = gen_label_rtx ();
4681 label5 = gen_label_rtx ();
4682 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4683 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
4684 compute_mode, label1);
4685 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4686 quotient, 0, OPTAB_LIB_WIDEN);
4687 if (tem != quotient)
4688 emit_move_insn (quotient, tem);
4689 emit_jump_insn (gen_jump (label5));
4691 emit_label (label1);
4692 expand_dec (adjusted_op0, const1_rtx);
4693 emit_jump_insn (gen_jump (label4));
4695 emit_label (label2);
4696 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
4697 compute_mode, label3);
4698 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4699 quotient, 0, OPTAB_LIB_WIDEN);
4700 if (tem != quotient)
4701 emit_move_insn (quotient, tem);
4702 emit_jump_insn (gen_jump (label5));
4704 emit_label (label3);
4705 expand_inc (adjusted_op0, const1_rtx);
4706 emit_label (label4);
4707 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4708 quotient, 0, OPTAB_LIB_WIDEN);
4709 if (tem != quotient)
4710 emit_move_insn (quotient, tem);
4711 expand_inc (quotient, const1_rtx);
4712 emit_label (label5);
4717 case EXACT_DIV_EXPR:
4718 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
4720 HOST_WIDE_INT d = INTVAL (op1);
4721 unsigned HOST_WIDE_INT ml;
4725 pre_shift = floor_log2 (d & -d);
4726 ml = invert_mod2n (d >> pre_shift, size);
4727 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4728 build_int_cst (NULL_TREE, pre_shift),
4729 NULL_RTX, unsignedp);
4730 quotient = expand_mult (compute_mode, t1,
4731 gen_int_mode (ml, compute_mode),
4734 insn = get_last_insn ();
4735 set_unique_reg_note (insn,
4737 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
4743 case ROUND_DIV_EXPR:
4744 case ROUND_MOD_EXPR:
4749 label = gen_label_rtx ();
4750 quotient = gen_reg_rtx (compute_mode);
4751 remainder = gen_reg_rtx (compute_mode);
4752 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
4755 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
4756 quotient, 1, OPTAB_LIB_WIDEN);
4757 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
4758 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4759 remainder, 1, OPTAB_LIB_WIDEN);
4761 tem = plus_constant (op1, -1);
4762 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4763 build_int_cst (NULL_TREE, 1),
4765 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
4766 expand_inc (quotient, const1_rtx);
4767 expand_dec (remainder, op1);
4772 rtx abs_rem, abs_op1, tem, mask;
4774 label = gen_label_rtx ();
4775 quotient = gen_reg_rtx (compute_mode);
4776 remainder = gen_reg_rtx (compute_mode);
4777 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
4780 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
4781 quotient, 0, OPTAB_LIB_WIDEN);
4782 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
4783 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4784 remainder, 0, OPTAB_LIB_WIDEN);
4786 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 1, 0);
4787 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 1, 0);
4788 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
4789 build_int_cst (NULL_TREE, 1),
4791 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
4792 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4793 NULL_RTX, 0, OPTAB_WIDEN);
4794 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4795 build_int_cst (NULL_TREE, size - 1),
4797 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
4798 NULL_RTX, 0, OPTAB_WIDEN);
4799 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4800 NULL_RTX, 0, OPTAB_WIDEN);
4801 expand_inc (quotient, tem);
4802 tem = expand_binop (compute_mode, xor_optab, mask, op1,
4803 NULL_RTX, 0, OPTAB_WIDEN);
4804 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4805 NULL_RTX, 0, OPTAB_WIDEN);
4806 expand_dec (remainder, tem);
4809 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4817 if (target && GET_MODE (target) != compute_mode)
4822 /* Try to produce the remainder without producing the quotient.
4823 If we seem to have a divmod pattern that does not require widening,
4824 don't try widening here. We should really have a WIDEN argument
4825 to expand_twoval_binop, since what we'd really like to do here is
4826 1) try a mod insn in compute_mode
4827 2) try a divmod insn in compute_mode
4828 3) try a div insn in compute_mode and multiply-subtract to get
4830 4) try the same things with widening allowed. */
4832 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4835 ((optab_handler (optab2, compute_mode)->insn_code
4836 != CODE_FOR_nothing)
4837 ? OPTAB_DIRECT : OPTAB_WIDEN));
4840 /* No luck there. Can we do remainder and divide at once
4841 without a library call? */
4842 remainder = gen_reg_rtx (compute_mode);
4843 if (! expand_twoval_binop ((unsignedp
4847 NULL_RTX, remainder, unsignedp))
4852 return gen_lowpart (mode, remainder);
4855 /* Produce the quotient. Try a quotient insn, but not a library call.
4856 If we have a divmod in this mode, use it in preference to widening
4857 the div (for this test we assume it will not fail). Note that optab2
4858 is set to the one of the two optabs that the call below will use. */
4860 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
4861 op0, op1, rem_flag ? NULL_RTX : target,
4863 ((optab_handler (optab2, compute_mode)->insn_code
4864 != CODE_FOR_nothing)
4865 ? OPTAB_DIRECT : OPTAB_WIDEN));
4869 /* No luck there. Try a quotient-and-remainder insn,
4870 keeping the quotient alone. */
4871 quotient = gen_reg_rtx (compute_mode);
4872 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
4874 quotient, NULL_RTX, unsignedp))
4878 /* Still no luck. If we are not computing the remainder,
4879 use a library call for the quotient. */
4880 quotient = sign_expand_binop (compute_mode,
4881 udiv_optab, sdiv_optab,
4883 unsignedp, OPTAB_LIB_WIDEN);
4890 if (target && GET_MODE (target) != compute_mode)
4895 /* No divide instruction either. Use library for remainder. */
4896 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4898 unsignedp, OPTAB_LIB_WIDEN);
4899 /* No remainder function. Try a quotient-and-remainder
4900 function, keeping the remainder. */
4903 remainder = gen_reg_rtx (compute_mode);
4904 if (!expand_twoval_binop_libfunc
4905 (unsignedp ? udivmod_optab : sdivmod_optab,
4907 NULL_RTX, remainder,
4908 unsignedp ? UMOD : MOD))
4909 remainder = NULL_RTX;
4914 /* We divided. Now finish doing X - Y * (X / Y). */
4915 remainder = expand_mult (compute_mode, quotient, op1,
4916 NULL_RTX, unsignedp);
4917 remainder = expand_binop (compute_mode, sub_optab, op0,
4918 remainder, target, unsignedp,
4923 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4926 /* Return a tree node with data type TYPE, describing the value of X.
4927 Usually this is an VAR_DECL, if there is no obvious better choice.
4928 X may be an expression, however we only support those expressions
4929 generated by loop.c. */
4932 make_tree (tree type, rtx x)
4936 switch (GET_CODE (x))
4940 HOST_WIDE_INT hi = 0;
4943 && !(TYPE_UNSIGNED (type)
4944 && (GET_MODE_BITSIZE (TYPE_MODE (type))
4945 < HOST_BITS_PER_WIDE_INT)))
4948 t = build_int_cst_wide (type, INTVAL (x), hi);
4954 if (GET_MODE (x) == VOIDmode)
4955 t = build_int_cst_wide (type,
4956 CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
4961 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
4962 t = build_real (type, d);
4969 int units = CONST_VECTOR_NUNITS (x);
4970 tree itype = TREE_TYPE (type);
4975 /* Build a tree with vector elements. */
4976 for (i = units - 1; i >= 0; --i)
4978 rtx elt = CONST_VECTOR_ELT (x, i);
4979 t = tree_cons (NULL_TREE, make_tree (itype, elt), t);
4982 return build_vector (type, t);
4986 return fold_build2 (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4987 make_tree (type, XEXP (x, 1)));
4990 return fold_build2 (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4991 make_tree (type, XEXP (x, 1)));
4994 return fold_build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0)));
4997 return fold_build2 (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
4998 make_tree (type, XEXP (x, 1)));
5001 return fold_build2 (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
5002 make_tree (type, XEXP (x, 1)));
5005 t = unsigned_type_for (type);
5006 return fold_convert (type, build2 (RSHIFT_EXPR, t,
5007 make_tree (t, XEXP (x, 0)),
5008 make_tree (type, XEXP (x, 1))));
5011 t = signed_type_for (type);
5012 return fold_convert (type, build2 (RSHIFT_EXPR, t,
5013 make_tree (t, XEXP (x, 0)),
5014 make_tree (type, XEXP (x, 1))));
5017 if (TREE_CODE (type) != REAL_TYPE)
5018 t = signed_type_for (type);
5022 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
5023 make_tree (t, XEXP (x, 0)),
5024 make_tree (t, XEXP (x, 1))));
5026 t = unsigned_type_for (type);
5027 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
5028 make_tree (t, XEXP (x, 0)),
5029 make_tree (t, XEXP (x, 1))));
5033 t = lang_hooks.types.type_for_mode (GET_MODE (XEXP (x, 0)),
5034 GET_CODE (x) == ZERO_EXTEND);
5035 return fold_convert (type, make_tree (t, XEXP (x, 0)));
5038 return make_tree (type, XEXP (x, 0));
5041 t = SYMBOL_REF_DECL (x);
5043 return fold_convert (type, build_fold_addr_expr (t));
5044 /* else fall through. */
5047 t = build_decl (VAR_DECL, NULL_TREE, type);
5049 /* If TYPE is a POINTER_TYPE, X might be Pmode with TYPE_MODE being
5050 ptr_mode. So convert. */
5051 if (POINTER_TYPE_P (type))
5052 x = convert_memory_address (TYPE_MODE (type), x);
5054 /* Note that we do *not* use SET_DECL_RTL here, because we do not
5055 want set_decl_rtl to go adjusting REG_ATTRS for this temporary. */
5056 t->decl_with_rtl.rtl = x;
5062 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
5063 and returning TARGET.
5065 If TARGET is 0, a pseudo-register or constant is returned. */
5068 expand_and (enum machine_mode mode, rtx op0, rtx op1, rtx target)
5072 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
5073 tem = simplify_binary_operation (AND, mode, op0, op1);
5075 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
5079 else if (tem != target)
5080 emit_move_insn (target, tem);
5084 /* Helper function for emit_store_flag. */
5086 emit_store_flag_1 (rtx target, rtx subtarget, enum machine_mode mode,
5090 enum machine_mode target_mode = GET_MODE (target);
5092 /* If we are converting to a wider mode, first convert to
5093 TARGET_MODE, then normalize. This produces better combining
5094 opportunities on machines that have a SIGN_EXTRACT when we are
5095 testing a single bit. This mostly benefits the 68k.
5097 If STORE_FLAG_VALUE does not have the sign bit set when
5098 interpreted in MODE, we can do this conversion as unsigned, which
5099 is usually more efficient. */
5100 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
5102 convert_move (target, subtarget,
5103 (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
5104 && 0 == (STORE_FLAG_VALUE
5105 & ((HOST_WIDE_INT) 1
5106 << (GET_MODE_BITSIZE (mode) -1))));
5113 /* If we want to keep subexpressions around, don't reuse our last
5118 /* Now normalize to the proper value in MODE. Sometimes we don't
5119 have to do anything. */
5120 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
5122 /* STORE_FLAG_VALUE might be the most negative number, so write
5123 the comparison this way to avoid a compiler-time warning. */
5124 else if (- normalizep == STORE_FLAG_VALUE)
5125 op0 = expand_unop (mode, neg_optab, op0, subtarget, 0);
5127 /* We don't want to use STORE_FLAG_VALUE < 0 below since this makes
5128 it hard to use a value of just the sign bit due to ANSI integer
5129 constant typing rules. */
5130 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5131 && (STORE_FLAG_VALUE
5132 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))))
5133 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
5134 size_int (GET_MODE_BITSIZE (mode) - 1), subtarget,
5138 gcc_assert (STORE_FLAG_VALUE & 1);
5140 op0 = expand_and (mode, op0, const1_rtx, subtarget);
5141 if (normalizep == -1)
5142 op0 = expand_unop (mode, neg_optab, op0, op0, 0);
5145 /* If we were converting to a smaller mode, do the conversion now. */
5146 if (target_mode != mode)
5148 convert_move (target, op0, 0);
5155 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
5156 and storing in TARGET. Normally return TARGET.
5157 Return 0 if that cannot be done.
5159 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
5160 it is VOIDmode, they cannot both be CONST_INT.
5162 UNSIGNEDP is for the case where we have to widen the operands
5163 to perform the operation. It says to use zero-extension.
5165 NORMALIZEP is 1 if we should convert the result to be either zero
5166 or one. Normalize is -1 if we should convert the result to be
5167 either zero or -1. If NORMALIZEP is zero, the result will be left
5168 "raw" out of the scc insn. */
5171 emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1,
5172 enum machine_mode mode, int unsignedp, int normalizep)
5175 enum insn_code icode;
5176 enum machine_mode compare_mode;
5177 enum machine_mode target_mode = GET_MODE (target);
5179 rtx last = get_last_insn ();
5180 rtx pattern, comparison;
5183 code = unsigned_condition (code);
5185 /* If one operand is constant, make it the second one. Only do this
5186 if the other operand is not constant as well. */
5188 if (swap_commutative_operands_p (op0, op1))
5193 code = swap_condition (code);
5196 if (mode == VOIDmode)
5197 mode = GET_MODE (op0);
5199 /* For some comparisons with 1 and -1, we can convert this to
5200 comparisons with zero. This will often produce more opportunities for
5201 store-flag insns. */
5206 if (op1 == const1_rtx)
5207 op1 = const0_rtx, code = LE;
5210 if (op1 == constm1_rtx)
5211 op1 = const0_rtx, code = LT;
5214 if (op1 == const1_rtx)
5215 op1 = const0_rtx, code = GT;
5218 if (op1 == constm1_rtx)
5219 op1 = const0_rtx, code = GE;
5222 if (op1 == const1_rtx)
5223 op1 = const0_rtx, code = NE;
5226 if (op1 == const1_rtx)
5227 op1 = const0_rtx, code = EQ;
5233 /* If we are comparing a double-word integer with zero or -1, we can
5234 convert the comparison into one involving a single word. */
5235 if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
5236 && GET_MODE_CLASS (mode) == MODE_INT
5237 && (!MEM_P (op0) || ! MEM_VOLATILE_P (op0)))
5239 if ((code == EQ || code == NE)
5240 && (op1 == const0_rtx || op1 == constm1_rtx))
5242 rtx op00, op01, op0both;
5244 /* Do a logical OR or AND of the two words and compare the
5246 op00 = simplify_gen_subreg (word_mode, op0, mode, 0);
5247 op01 = simplify_gen_subreg (word_mode, op0, mode, UNITS_PER_WORD);
5248 op0both = expand_binop (word_mode,
5249 op1 == const0_rtx ? ior_optab : and_optab,
5250 op00, op01, NULL_RTX, unsignedp,
5254 return emit_store_flag (target, code, op0both, op1, word_mode,
5255 unsignedp, normalizep);
5257 else if ((code == LT || code == GE) && op1 == const0_rtx)
5261 /* If testing the sign bit, can just test on high word. */
5262 op0h = simplify_gen_subreg (word_mode, op0, mode,
5263 subreg_highpart_offset (word_mode,
5265 return emit_store_flag (target, code, op0h, op1, word_mode,
5266 unsignedp, normalizep);
5270 /* If this is A < 0 or A >= 0, we can do this by taking the ones
5271 complement of A (for GE) and shifting the sign bit to the low bit. */
5272 if (op1 == const0_rtx && (code == LT || code == GE)
5273 && GET_MODE_CLASS (mode) == MODE_INT
5274 && (normalizep || STORE_FLAG_VALUE == 1
5275 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5276 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
5277 == ((unsigned HOST_WIDE_INT) 1
5278 << (GET_MODE_BITSIZE (mode) - 1))))))
5282 /* If the result is to be wider than OP0, it is best to convert it
5283 first. If it is to be narrower, it is *incorrect* to convert it
5285 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
5287 op0 = convert_modes (target_mode, mode, op0, 0);
5291 if (target_mode != mode)
5295 op0 = expand_unop (mode, one_cmpl_optab, op0,
5296 ((STORE_FLAG_VALUE == 1 || normalizep)
5297 ? 0 : subtarget), 0);
5299 if (STORE_FLAG_VALUE == 1 || normalizep)
5300 /* If we are supposed to produce a 0/1 value, we want to do
5301 a logical shift from the sign bit to the low-order bit; for
5302 a -1/0 value, we do an arithmetic shift. */
5303 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
5304 size_int (GET_MODE_BITSIZE (mode) - 1),
5305 subtarget, normalizep != -1);
5307 if (mode != target_mode)
5308 op0 = convert_modes (target_mode, mode, op0, 0);
5313 icode = setcc_gen_code[(int) code];
5315 if (icode != CODE_FOR_nothing)
5317 insn_operand_predicate_fn pred;
5319 /* We think we may be able to do this with a scc insn. Emit the
5320 comparison and then the scc insn. */
5322 do_pending_stack_adjust ();
5323 last = get_last_insn ();
5326 = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX);
5327 if (CONSTANT_P (comparison))
5329 switch (GET_CODE (comparison))
5332 if (comparison == const0_rtx)
5336 #ifdef FLOAT_STORE_FLAG_VALUE
5338 if (comparison == CONST0_RTX (GET_MODE (comparison)))
5346 if (normalizep == 1)
5348 if (normalizep == -1)
5350 return const_true_rtx;
5353 /* The code of COMPARISON may not match CODE if compare_from_rtx
5354 decided to swap its operands and reverse the original code.
5356 We know that compare_from_rtx returns either a CONST_INT or
5357 a new comparison code, so it is safe to just extract the
5358 code from COMPARISON. */
5359 code = GET_CODE (comparison);
5361 /* Get a reference to the target in the proper mode for this insn. */
5362 compare_mode = insn_data[(int) icode].operand[0].mode;
5364 pred = insn_data[(int) icode].operand[0].predicate;
5365 if (optimize || ! (*pred) (subtarget, compare_mode))
5366 subtarget = gen_reg_rtx (compare_mode);
5368 pattern = GEN_FCN (icode) (subtarget);
5371 emit_insn (pattern);
5372 return emit_store_flag_1 (target, subtarget, compare_mode,
5378 /* We don't have an scc insn, so try a cstore insn. */
5380 for (compare_mode = mode; compare_mode != VOIDmode;
5381 compare_mode = GET_MODE_WIDER_MODE (compare_mode))
5383 icode = optab_handler (cstore_optab, compare_mode)->insn_code;
5384 if (icode != CODE_FOR_nothing)
5388 if (icode != CODE_FOR_nothing)
5390 enum machine_mode result_mode
5391 = insn_data[(int) icode].operand[0].mode;
5392 rtx cstore_op0 = op0;
5393 rtx cstore_op1 = op1;
5395 do_pending_stack_adjust ();
5396 last = get_last_insn ();
5398 if (compare_mode != mode)
5400 cstore_op0 = convert_modes (compare_mode, mode, cstore_op0,
5402 cstore_op1 = convert_modes (compare_mode, mode, cstore_op1,
5406 if (!insn_data[(int) icode].operand[2].predicate (cstore_op0,
5408 cstore_op0 = copy_to_mode_reg (compare_mode, cstore_op0);
5410 if (!insn_data[(int) icode].operand[3].predicate (cstore_op1,
5412 cstore_op1 = copy_to_mode_reg (compare_mode, cstore_op1);
5414 comparison = gen_rtx_fmt_ee (code, result_mode, cstore_op0,
5418 if (optimize || !(insn_data[(int) icode].operand[0].predicate
5419 (subtarget, result_mode)))
5420 subtarget = gen_reg_rtx (result_mode);
5422 pattern = GEN_FCN (icode) (subtarget, comparison, cstore_op0,
5427 emit_insn (pattern);
5428 return emit_store_flag_1 (target, subtarget, result_mode,
5434 delete_insns_since (last);
5436 /* If optimizing, use different pseudo registers for each insn, instead
5437 of reusing the same pseudo. This leads to better CSE, but slows
5438 down the compiler, since there are more pseudos */
5439 subtarget = (!optimize
5440 && (target_mode == mode)) ? target : NULL_RTX;
5442 /* If we reached here, we can't do this with a scc insn. However, there
5443 are some comparisons that can be done directly. For example, if
5444 this is an equality comparison of integers, we can try to exclusive-or
5445 (or subtract) the two operands and use a recursive call to try the
5446 comparison with zero. Don't do any of these cases if branches are
5449 if (BRANCH_COST (optimize_insn_for_speed_p (),
5451 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
5452 && op1 != const0_rtx)
5454 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
5458 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
5461 tem = emit_store_flag (target, code, tem, const0_rtx,
5462 mode, unsignedp, normalizep);
5464 delete_insns_since (last);
5468 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
5469 the constant zero. Reject all other comparisons at this point. Only
5470 do LE and GT if branches are expensive since they are expensive on
5471 2-operand machines. */
5473 if (BRANCH_COST (optimize_insn_for_speed_p (),
5475 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
5476 || (code != EQ && code != NE
5477 && (BRANCH_COST (optimize_insn_for_speed_p (),
5478 false) <= 1 || (code != LE && code != GT))))
5481 /* See what we need to return. We can only return a 1, -1, or the
5484 if (normalizep == 0)
5486 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
5487 normalizep = STORE_FLAG_VALUE;
5489 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5490 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
5491 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
5497 /* Try to put the result of the comparison in the sign bit. Assume we can't
5498 do the necessary operation below. */
5502 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
5503 the sign bit set. */
5507 /* This is destructive, so SUBTARGET can't be OP0. */
5508 if (rtx_equal_p (subtarget, op0))
5511 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
5514 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
5518 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
5519 number of bits in the mode of OP0, minus one. */
5523 if (rtx_equal_p (subtarget, op0))
5526 tem = expand_shift (RSHIFT_EXPR, mode, op0,
5527 size_int (GET_MODE_BITSIZE (mode) - 1),
5529 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
5533 if (code == EQ || code == NE)
5535 /* For EQ or NE, one way to do the comparison is to apply an operation
5536 that converts the operand into a positive number if it is nonzero
5537 or zero if it was originally zero. Then, for EQ, we subtract 1 and
5538 for NE we negate. This puts the result in the sign bit. Then we
5539 normalize with a shift, if needed.
5541 Two operations that can do the above actions are ABS and FFS, so try
5542 them. If that doesn't work, and MODE is smaller than a full word,
5543 we can use zero-extension to the wider mode (an unsigned conversion)
5544 as the operation. */
5546 /* Note that ABS doesn't yield a positive number for INT_MIN, but
5547 that is compensated by the subsequent overflow when subtracting
5550 if (optab_handler (abs_optab, mode)->insn_code != CODE_FOR_nothing)
5551 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
5552 else if (optab_handler (ffs_optab, mode)->insn_code != CODE_FOR_nothing)
5553 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
5554 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5556 tem = convert_modes (word_mode, mode, op0, 1);
5563 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
5566 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
5569 /* If we couldn't do it that way, for NE we can "or" the two's complement
5570 of the value with itself. For EQ, we take the one's complement of
5571 that "or", which is an extra insn, so we only handle EQ if branches
5576 || BRANCH_COST (optimize_insn_for_speed_p (),
5579 if (rtx_equal_p (subtarget, op0))
5582 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
5583 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
5586 if (tem && code == EQ)
5587 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
5591 if (tem && normalizep)
5592 tem = expand_shift (RSHIFT_EXPR, mode, tem,
5593 size_int (GET_MODE_BITSIZE (mode) - 1),
5594 subtarget, normalizep == 1);
5598 if (GET_MODE (tem) != target_mode)
5600 convert_move (target, tem, 0);
5603 else if (!subtarget)
5605 emit_move_insn (target, tem);
5610 delete_insns_since (last);
5615 /* Like emit_store_flag, but always succeeds. */
5618 emit_store_flag_force (rtx target, enum rtx_code code, rtx op0, rtx op1,
5619 enum machine_mode mode, int unsignedp, int normalizep)
5623 /* First see if emit_store_flag can do the job. */
5624 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
5628 if (normalizep == 0)
5631 /* If this failed, we have to do this with set/compare/jump/set code. */
5634 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
5635 target = gen_reg_rtx (GET_MODE (target));
5637 emit_move_insn (target, const1_rtx);
5638 label = gen_label_rtx ();
5639 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX,
5642 emit_move_insn (target, const0_rtx);
5648 /* Perform possibly multi-word comparison and conditional jump to LABEL
5649 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE. This is
5650 now a thin wrapper around do_compare_rtx_and_jump. */
5653 do_cmp_and_jump (rtx arg1, rtx arg2, enum rtx_code op, enum machine_mode mode,
5656 int unsignedp = (op == LTU || op == LEU || op == GTU || op == GEU);
5657 do_compare_rtx_and_jump (arg1, arg2, op, unsignedp, mode,
5658 NULL_RTX, NULL_RTX, label);