1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
5 Free Software Foundation, Inc.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
26 #include "coretypes.h"
28 #include "diagnostic-core.h"
34 #include "insn-config.h"
38 #include "langhooks.h"
42 static void store_fixed_bit_field (rtx, unsigned HOST_WIDE_INT,
43 unsigned HOST_WIDE_INT,
44 unsigned HOST_WIDE_INT, rtx);
45 static void store_split_bit_field (rtx, unsigned HOST_WIDE_INT,
46 unsigned HOST_WIDE_INT, rtx);
47 static rtx extract_fixed_bit_field (enum machine_mode, rtx,
48 unsigned HOST_WIDE_INT,
49 unsigned HOST_WIDE_INT,
50 unsigned HOST_WIDE_INT, rtx, int);
51 static rtx mask_rtx (enum machine_mode, int, int, int);
52 static rtx lshift_value (enum machine_mode, rtx, int, int);
53 static rtx extract_split_bit_field (rtx, unsigned HOST_WIDE_INT,
54 unsigned HOST_WIDE_INT, int);
55 static void do_cmp_and_jump (rtx, rtx, enum rtx_code, enum machine_mode, rtx);
56 static rtx expand_smod_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
57 static rtx expand_sdiv_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
59 /* Test whether a value is zero of a power of two. */
60 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
62 /* Nonzero means divides or modulus operations are relatively cheap for
63 powers of two, so don't use branches; emit the operation instead.
64 Usually, this will mean that the MD file will emit non-branch
67 static bool sdiv_pow2_cheap[2][NUM_MACHINE_MODES];
68 static bool smod_pow2_cheap[2][NUM_MACHINE_MODES];
70 #ifndef SLOW_UNALIGNED_ACCESS
71 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
74 /* For compilers that support multiple targets with different word sizes,
75 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
76 is the H8/300(H) compiler. */
78 #ifndef MAX_BITS_PER_WORD
79 #define MAX_BITS_PER_WORD BITS_PER_WORD
82 /* Reduce conditional compilation elsewhere. */
85 #define CODE_FOR_insv CODE_FOR_nothing
86 #define gen_insv(a,b,c,d) NULL_RTX
90 #define CODE_FOR_extv CODE_FOR_nothing
91 #define gen_extv(a,b,c,d) NULL_RTX
95 #define CODE_FOR_extzv CODE_FOR_nothing
96 #define gen_extzv(a,b,c,d) NULL_RTX
99 /* Cost of various pieces of RTL. Note that some of these are indexed by
100 shift count and some by mode. */
101 static int zero_cost[2];
102 static int add_cost[2][NUM_MACHINE_MODES];
103 static int neg_cost[2][NUM_MACHINE_MODES];
104 static int shift_cost[2][NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
105 static int shiftadd_cost[2][NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
106 static int shiftsub0_cost[2][NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
107 static int shiftsub1_cost[2][NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
108 static int mul_cost[2][NUM_MACHINE_MODES];
109 static int sdiv_cost[2][NUM_MACHINE_MODES];
110 static int udiv_cost[2][NUM_MACHINE_MODES];
111 static int mul_widen_cost[2][NUM_MACHINE_MODES];
112 static int mul_highpart_cost[2][NUM_MACHINE_MODES];
119 struct rtx_def reg; rtunion reg_fld[2];
120 struct rtx_def plus; rtunion plus_fld1;
122 struct rtx_def mult; rtunion mult_fld1;
123 struct rtx_def sdiv; rtunion sdiv_fld1;
124 struct rtx_def udiv; rtunion udiv_fld1;
126 struct rtx_def sdiv_32; rtunion sdiv_32_fld1;
127 struct rtx_def smod_32; rtunion smod_32_fld1;
128 struct rtx_def wide_mult; rtunion wide_mult_fld1;
129 struct rtx_def wide_lshr; rtunion wide_lshr_fld1;
130 struct rtx_def wide_trunc;
131 struct rtx_def shift; rtunion shift_fld1;
132 struct rtx_def shift_mult; rtunion shift_mult_fld1;
133 struct rtx_def shift_add; rtunion shift_add_fld1;
134 struct rtx_def shift_sub0; rtunion shift_sub0_fld1;
135 struct rtx_def shift_sub1; rtunion shift_sub1_fld1;
138 rtx pow2[MAX_BITS_PER_WORD];
139 rtx cint[MAX_BITS_PER_WORD];
141 enum machine_mode mode, wider_mode;
145 for (m = 1; m < MAX_BITS_PER_WORD; m++)
147 pow2[m] = GEN_INT ((HOST_WIDE_INT) 1 << m);
148 cint[m] = GEN_INT (m);
150 memset (&all, 0, sizeof all);
152 PUT_CODE (&all.reg, REG);
153 /* Avoid using hard regs in ways which may be unsupported. */
154 SET_REGNO (&all.reg, LAST_VIRTUAL_REGISTER + 1);
156 PUT_CODE (&all.plus, PLUS);
157 XEXP (&all.plus, 0) = &all.reg;
158 XEXP (&all.plus, 1) = &all.reg;
160 PUT_CODE (&all.neg, NEG);
161 XEXP (&all.neg, 0) = &all.reg;
163 PUT_CODE (&all.mult, MULT);
164 XEXP (&all.mult, 0) = &all.reg;
165 XEXP (&all.mult, 1) = &all.reg;
167 PUT_CODE (&all.sdiv, DIV);
168 XEXP (&all.sdiv, 0) = &all.reg;
169 XEXP (&all.sdiv, 1) = &all.reg;
171 PUT_CODE (&all.udiv, UDIV);
172 XEXP (&all.udiv, 0) = &all.reg;
173 XEXP (&all.udiv, 1) = &all.reg;
175 PUT_CODE (&all.sdiv_32, DIV);
176 XEXP (&all.sdiv_32, 0) = &all.reg;
177 XEXP (&all.sdiv_32, 1) = 32 < MAX_BITS_PER_WORD ? cint[32] : GEN_INT (32);
179 PUT_CODE (&all.smod_32, MOD);
180 XEXP (&all.smod_32, 0) = &all.reg;
181 XEXP (&all.smod_32, 1) = XEXP (&all.sdiv_32, 1);
183 PUT_CODE (&all.zext, ZERO_EXTEND);
184 XEXP (&all.zext, 0) = &all.reg;
186 PUT_CODE (&all.wide_mult, MULT);
187 XEXP (&all.wide_mult, 0) = &all.zext;
188 XEXP (&all.wide_mult, 1) = &all.zext;
190 PUT_CODE (&all.wide_lshr, LSHIFTRT);
191 XEXP (&all.wide_lshr, 0) = &all.wide_mult;
193 PUT_CODE (&all.wide_trunc, TRUNCATE);
194 XEXP (&all.wide_trunc, 0) = &all.wide_lshr;
196 PUT_CODE (&all.shift, ASHIFT);
197 XEXP (&all.shift, 0) = &all.reg;
199 PUT_CODE (&all.shift_mult, MULT);
200 XEXP (&all.shift_mult, 0) = &all.reg;
202 PUT_CODE (&all.shift_add, PLUS);
203 XEXP (&all.shift_add, 0) = &all.shift_mult;
204 XEXP (&all.shift_add, 1) = &all.reg;
206 PUT_CODE (&all.shift_sub0, MINUS);
207 XEXP (&all.shift_sub0, 0) = &all.shift_mult;
208 XEXP (&all.shift_sub0, 1) = &all.reg;
210 PUT_CODE (&all.shift_sub1, MINUS);
211 XEXP (&all.shift_sub1, 0) = &all.reg;
212 XEXP (&all.shift_sub1, 1) = &all.shift_mult;
214 for (speed = 0; speed < 2; speed++)
216 crtl->maybe_hot_insn_p = speed;
217 zero_cost[speed] = rtx_cost (const0_rtx, SET, speed);
219 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
221 mode = GET_MODE_WIDER_MODE (mode))
223 PUT_MODE (&all.reg, mode);
224 PUT_MODE (&all.plus, mode);
225 PUT_MODE (&all.neg, mode);
226 PUT_MODE (&all.mult, mode);
227 PUT_MODE (&all.sdiv, mode);
228 PUT_MODE (&all.udiv, mode);
229 PUT_MODE (&all.sdiv_32, mode);
230 PUT_MODE (&all.smod_32, mode);
231 PUT_MODE (&all.wide_trunc, mode);
232 PUT_MODE (&all.shift, mode);
233 PUT_MODE (&all.shift_mult, mode);
234 PUT_MODE (&all.shift_add, mode);
235 PUT_MODE (&all.shift_sub0, mode);
236 PUT_MODE (&all.shift_sub1, mode);
238 add_cost[speed][mode] = rtx_cost (&all.plus, SET, speed);
239 neg_cost[speed][mode] = rtx_cost (&all.neg, SET, speed);
240 mul_cost[speed][mode] = rtx_cost (&all.mult, SET, speed);
241 sdiv_cost[speed][mode] = rtx_cost (&all.sdiv, SET, speed);
242 udiv_cost[speed][mode] = rtx_cost (&all.udiv, SET, speed);
244 sdiv_pow2_cheap[speed][mode] = (rtx_cost (&all.sdiv_32, SET, speed)
245 <= 2 * add_cost[speed][mode]);
246 smod_pow2_cheap[speed][mode] = (rtx_cost (&all.smod_32, SET, speed)
247 <= 4 * add_cost[speed][mode]);
249 wider_mode = GET_MODE_WIDER_MODE (mode);
250 if (wider_mode != VOIDmode)
252 PUT_MODE (&all.zext, wider_mode);
253 PUT_MODE (&all.wide_mult, wider_mode);
254 PUT_MODE (&all.wide_lshr, wider_mode);
255 XEXP (&all.wide_lshr, 1) = GEN_INT (GET_MODE_BITSIZE (mode));
257 mul_widen_cost[speed][wider_mode]
258 = rtx_cost (&all.wide_mult, SET, speed);
259 mul_highpart_cost[speed][mode]
260 = rtx_cost (&all.wide_trunc, SET, speed);
263 shift_cost[speed][mode][0] = 0;
264 shiftadd_cost[speed][mode][0] = shiftsub0_cost[speed][mode][0]
265 = shiftsub1_cost[speed][mode][0] = add_cost[speed][mode];
267 n = MIN (MAX_BITS_PER_WORD, GET_MODE_BITSIZE (mode));
268 for (m = 1; m < n; m++)
270 XEXP (&all.shift, 1) = cint[m];
271 XEXP (&all.shift_mult, 1) = pow2[m];
273 shift_cost[speed][mode][m] = rtx_cost (&all.shift, SET, speed);
274 shiftadd_cost[speed][mode][m] = rtx_cost (&all.shift_add, SET, speed);
275 shiftsub0_cost[speed][mode][m] = rtx_cost (&all.shift_sub0, SET, speed);
276 shiftsub1_cost[speed][mode][m] = rtx_cost (&all.shift_sub1, SET, speed);
280 default_rtl_profile ();
283 /* Return an rtx representing minus the value of X.
284 MODE is the intended mode of the result,
285 useful if X is a CONST_INT. */
288 negate_rtx (enum machine_mode mode, rtx x)
290 rtx result = simplify_unary_operation (NEG, mode, x, mode);
293 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
298 /* Report on the availability of insv/extv/extzv and the desired mode
299 of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo
300 is false; else the mode of the specified operand. If OPNO is -1,
301 all the caller cares about is whether the insn is available. */
303 mode_for_extraction (enum extraction_pattern pattern, int opno)
305 const struct insn_data_d *data;
312 data = &insn_data[CODE_FOR_insv];
315 return MAX_MACHINE_MODE;
320 data = &insn_data[CODE_FOR_extv];
323 return MAX_MACHINE_MODE;
328 data = &insn_data[CODE_FOR_extzv];
331 return MAX_MACHINE_MODE;
340 /* Everyone who uses this function used to follow it with
341 if (result == VOIDmode) result = word_mode; */
342 if (data->operand[opno].mode == VOIDmode)
344 return data->operand[opno].mode;
347 /* Return true if X, of mode MODE, matches the predicate for operand
348 OPNO of instruction ICODE. Allow volatile memories, regardless of
349 the ambient volatile_ok setting. */
352 check_predicate_volatile_ok (enum insn_code icode, int opno,
353 rtx x, enum machine_mode mode)
355 bool save_volatile_ok, result;
357 save_volatile_ok = volatile_ok;
358 result = insn_data[(int) icode].operand[opno].predicate (x, mode);
359 volatile_ok = save_volatile_ok;
363 /* A subroutine of store_bit_field, with the same arguments. Return true
364 if the operation could be implemented.
366 If FALLBACK_P is true, fall back to store_fixed_bit_field if we have
367 no other way of implementing the operation. If FALLBACK_P is false,
368 return false instead. */
371 store_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
372 unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode,
373 rtx value, bool fallback_p)
376 = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD;
377 unsigned HOST_WIDE_INT offset, bitpos;
382 enum machine_mode op_mode = mode_for_extraction (EP_insv, 3);
384 while (GET_CODE (op0) == SUBREG)
386 /* The following line once was done only if WORDS_BIG_ENDIAN,
387 but I think that is a mistake. WORDS_BIG_ENDIAN is
388 meaningful at a much higher level; when structures are copied
389 between memory and regs, the higher-numbered regs
390 always get higher addresses. */
391 int inner_mode_size = GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)));
392 int outer_mode_size = GET_MODE_SIZE (GET_MODE (op0));
396 /* Paradoxical subregs need special handling on big endian machines. */
397 if (SUBREG_BYTE (op0) == 0 && inner_mode_size < outer_mode_size)
399 int difference = inner_mode_size - outer_mode_size;
401 if (WORDS_BIG_ENDIAN)
402 byte_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
403 if (BYTES_BIG_ENDIAN)
404 byte_offset += difference % UNITS_PER_WORD;
407 byte_offset = SUBREG_BYTE (op0);
409 bitnum += byte_offset * BITS_PER_UNIT;
410 op0 = SUBREG_REG (op0);
413 /* No action is needed if the target is a register and if the field
414 lies completely outside that register. This can occur if the source
415 code contains an out-of-bounds access to a small array. */
416 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
419 /* Use vec_set patterns for inserting parts of vectors whenever
421 if (VECTOR_MODE_P (GET_MODE (op0))
423 && optab_handler (vec_set_optab, GET_MODE (op0)) != CODE_FOR_nothing
424 && fieldmode == GET_MODE_INNER (GET_MODE (op0))
425 && bitsize == GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
426 && !(bitnum % GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
428 enum machine_mode outermode = GET_MODE (op0);
429 enum machine_mode innermode = GET_MODE_INNER (outermode);
430 int icode = (int) optab_handler (vec_set_optab, outermode);
431 int pos = bitnum / GET_MODE_BITSIZE (innermode);
432 rtx rtxpos = GEN_INT (pos);
436 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
437 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
438 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
442 if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
443 src = copy_to_mode_reg (mode1, src);
445 if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
446 rtxpos = copy_to_mode_reg (mode1, rtxpos);
448 /* We could handle this, but we should always be called with a pseudo
449 for our targets and all insns should take them as outputs. */
450 gcc_assert ((*insn_data[icode].operand[0].predicate) (dest, mode0)
451 && (*insn_data[icode].operand[1].predicate) (src, mode1)
452 && (*insn_data[icode].operand[2].predicate) (rtxpos, mode2));
453 pat = GEN_FCN (icode) (dest, src, rtxpos);
464 /* If the target is a register, overwriting the entire object, or storing
465 a full-word or multi-word field can be done with just a SUBREG.
467 If the target is memory, storing any naturally aligned field can be
468 done with a simple store. For targets that support fast unaligned
469 memory, any naturally sized, unit aligned field can be done directly. */
471 offset = bitnum / unit;
472 bitpos = bitnum % unit;
473 byte_offset = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
474 + (offset * UNITS_PER_WORD);
477 && bitsize == GET_MODE_BITSIZE (fieldmode)
479 ? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
480 || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
481 && byte_offset % GET_MODE_SIZE (fieldmode) == 0)
482 : (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
483 || (offset * BITS_PER_UNIT % bitsize == 0
484 && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0))))
487 op0 = adjust_address (op0, fieldmode, offset);
488 else if (GET_MODE (op0) != fieldmode)
489 op0 = simplify_gen_subreg (fieldmode, op0, GET_MODE (op0),
491 emit_move_insn (op0, value);
495 /* Make sure we are playing with integral modes. Pun with subregs
496 if we aren't. This must come after the entire register case above,
497 since that case is valid for any mode. The following cases are only
498 valid for integral modes. */
500 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
501 if (imode != GET_MODE (op0))
504 op0 = adjust_address (op0, imode, 0);
507 gcc_assert (imode != BLKmode);
508 op0 = gen_lowpart (imode, op0);
513 /* We may be accessing data outside the field, which means
514 we can alias adjacent data. */
517 op0 = shallow_copy_rtx (op0);
518 set_mem_alias_set (op0, 0);
519 set_mem_expr (op0, 0);
522 /* If OP0 is a register, BITPOS must count within a word.
523 But as we have it, it counts within whatever size OP0 now has.
524 On a bigendian machine, these are not the same, so convert. */
527 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
528 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
530 /* Storing an lsb-aligned field in a register
531 can be done with a movestrict instruction. */
534 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
535 && bitsize == GET_MODE_BITSIZE (fieldmode)
536 && optab_handler (movstrict_optab, fieldmode) != CODE_FOR_nothing)
538 int icode = optab_handler (movstrict_optab, fieldmode);
540 rtx start = get_last_insn ();
543 /* Get appropriate low part of the value being stored. */
544 if (CONST_INT_P (value) || REG_P (value))
545 value = gen_lowpart (fieldmode, value);
546 else if (!(GET_CODE (value) == SYMBOL_REF
547 || GET_CODE (value) == LABEL_REF
548 || GET_CODE (value) == CONST))
549 value = convert_to_mode (fieldmode, value, 0);
551 if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode))
552 value = copy_to_mode_reg (fieldmode, value);
554 if (GET_CODE (op0) == SUBREG)
556 /* Else we've got some float mode source being extracted into
557 a different float mode destination -- this combination of
558 subregs results in Severe Tire Damage. */
559 gcc_assert (GET_MODE (SUBREG_REG (op0)) == fieldmode
560 || GET_MODE_CLASS (fieldmode) == MODE_INT
561 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT);
562 arg0 = SUBREG_REG (op0);
565 insn = (GEN_FCN (icode)
566 (gen_rtx_SUBREG (fieldmode, arg0,
567 (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
568 + (offset * UNITS_PER_WORD)),
575 delete_insns_since (start);
578 /* Handle fields bigger than a word. */
580 if (bitsize > BITS_PER_WORD)
582 /* Here we transfer the words of the field
583 in the order least significant first.
584 This is because the most significant word is the one which may
586 However, only do that if the value is not BLKmode. */
588 unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
589 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
593 /* This is the mode we must force value to, so that there will be enough
594 subwords to extract. Note that fieldmode will often (always?) be
595 VOIDmode, because that is what store_field uses to indicate that this
596 is a bit field, but passing VOIDmode to operand_subword_force
598 fieldmode = GET_MODE (value);
599 if (fieldmode == VOIDmode)
600 fieldmode = smallest_mode_for_size (nwords * BITS_PER_WORD, MODE_INT);
602 last = get_last_insn ();
603 for (i = 0; i < nwords; i++)
605 /* If I is 0, use the low-order word in both field and target;
606 if I is 1, use the next to lowest word; and so on. */
607 unsigned int wordnum = (backwards ? nwords - i - 1 : i);
608 unsigned int bit_offset = (backwards
609 ? MAX ((int) bitsize - ((int) i + 1)
612 : (int) i * BITS_PER_WORD);
613 rtx value_word = operand_subword_force (value, wordnum, fieldmode);
615 if (!store_bit_field_1 (op0, MIN (BITS_PER_WORD,
616 bitsize - i * BITS_PER_WORD),
617 bitnum + bit_offset, word_mode,
618 value_word, fallback_p))
620 delete_insns_since (last);
627 /* From here on we can assume that the field to be stored in is
628 a full-word (whatever type that is), since it is shorter than a word. */
630 /* OFFSET is the number of words or bytes (UNIT says which)
631 from STR_RTX to the first word or byte containing part of the field. */
636 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
640 /* Since this is a destination (lvalue), we can't copy
641 it to a pseudo. We can remove a SUBREG that does not
642 change the size of the operand. Such a SUBREG may
643 have been added above. */
644 gcc_assert (GET_CODE (op0) == SUBREG
645 && (GET_MODE_SIZE (GET_MODE (op0))
646 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))));
647 op0 = SUBREG_REG (op0);
649 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
650 op0, (offset * UNITS_PER_WORD));
655 /* If VALUE has a floating-point or complex mode, access it as an
656 integer of the corresponding size. This can occur on a machine
657 with 64 bit registers that uses SFmode for float. It can also
658 occur for unaligned float or complex fields. */
660 if (GET_MODE (value) != VOIDmode
661 && GET_MODE_CLASS (GET_MODE (value)) != MODE_INT
662 && GET_MODE_CLASS (GET_MODE (value)) != MODE_PARTIAL_INT)
664 value = gen_reg_rtx (int_mode_for_mode (GET_MODE (value)));
665 emit_move_insn (gen_lowpart (GET_MODE (orig_value), value), orig_value);
668 /* Now OFFSET is nonzero only if OP0 is memory
669 and is therefore always measured in bytes. */
672 && GET_MODE (value) != BLKmode
674 && GET_MODE_BITSIZE (op_mode) >= bitsize
675 && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
676 && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode)))
677 && insn_data[CODE_FOR_insv].operand[1].predicate (GEN_INT (bitsize),
679 && check_predicate_volatile_ok (CODE_FOR_insv, 0, op0, VOIDmode))
681 int xbitpos = bitpos;
684 rtx last = get_last_insn ();
686 bool copy_back = false;
688 /* Add OFFSET into OP0's address. */
690 xop0 = adjust_address (xop0, byte_mode, offset);
692 /* If xop0 is a register, we need it in OP_MODE
693 to make it acceptable to the format of insv. */
694 if (GET_CODE (xop0) == SUBREG)
695 /* We can't just change the mode, because this might clobber op0,
696 and we will need the original value of op0 if insv fails. */
697 xop0 = gen_rtx_SUBREG (op_mode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
698 if (REG_P (xop0) && GET_MODE (xop0) != op_mode)
699 xop0 = gen_lowpart_SUBREG (op_mode, xop0);
701 /* If the destination is a paradoxical subreg such that we need a
702 truncate to the inner mode, perform the insertion on a temporary and
703 truncate the result to the original destination. Note that we can't
704 just truncate the paradoxical subreg as (truncate:N (subreg:W (reg:N
705 X) 0)) is (reg:N X). */
706 if (GET_CODE (xop0) == SUBREG
707 && REG_P (SUBREG_REG (xop0))
708 && (!TRULY_NOOP_TRUNCATION
709 (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (xop0))),
710 GET_MODE_BITSIZE (op_mode))))
712 rtx tem = gen_reg_rtx (op_mode);
713 emit_move_insn (tem, xop0);
718 /* On big-endian machines, we count bits from the most significant.
719 If the bit field insn does not, we must invert. */
721 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
722 xbitpos = unit - bitsize - xbitpos;
724 /* We have been counting XBITPOS within UNIT.
725 Count instead within the size of the register. */
726 if (BITS_BIG_ENDIAN && !MEM_P (xop0))
727 xbitpos += GET_MODE_BITSIZE (op_mode) - unit;
729 unit = GET_MODE_BITSIZE (op_mode);
731 /* Convert VALUE to op_mode (which insv insn wants) in VALUE1. */
733 if (GET_MODE (value) != op_mode)
735 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
737 /* Optimization: Don't bother really extending VALUE
738 if it has all the bits we will actually use. However,
739 if we must narrow it, be sure we do it correctly. */
741 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (op_mode))
745 tmp = simplify_subreg (op_mode, value1, GET_MODE (value), 0);
747 tmp = simplify_gen_subreg (op_mode,
748 force_reg (GET_MODE (value),
750 GET_MODE (value), 0);
754 value1 = gen_lowpart (op_mode, value1);
756 else if (CONST_INT_P (value))
757 value1 = gen_int_mode (INTVAL (value), op_mode);
759 /* Parse phase is supposed to make VALUE's data type
760 match that of the component reference, which is a type
761 at least as wide as the field; so VALUE should have
762 a mode that corresponds to that type. */
763 gcc_assert (CONSTANT_P (value));
766 /* If this machine's insv insists on a register,
767 get VALUE1 into a register. */
768 if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate)
770 value1 = force_reg (op_mode, value1);
772 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
778 convert_move (op0, xop0, true);
781 delete_insns_since (last);
784 /* If OP0 is a memory, try copying it to a register and seeing if a
785 cheap register alternative is available. */
786 if (HAVE_insv && MEM_P (op0))
788 enum machine_mode bestmode;
790 /* Get the mode to use for inserting into this field. If OP0 is
791 BLKmode, get the smallest mode consistent with the alignment. If
792 OP0 is a non-BLKmode object that is no wider than OP_MODE, use its
793 mode. Otherwise, use the smallest mode containing the field. */
795 if (GET_MODE (op0) == BLKmode
796 || (op_mode != MAX_MACHINE_MODE
797 && GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (op_mode)))
798 bestmode = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0),
799 (op_mode == MAX_MACHINE_MODE
800 ? VOIDmode : op_mode),
801 MEM_VOLATILE_P (op0));
803 bestmode = GET_MODE (op0);
805 if (bestmode != VOIDmode
806 && GET_MODE_SIZE (bestmode) >= GET_MODE_SIZE (fieldmode)
807 && !(SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
808 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
810 rtx last, tempreg, xop0;
811 unsigned HOST_WIDE_INT xoffset, xbitpos;
813 last = get_last_insn ();
815 /* Adjust address to point to the containing unit of
816 that mode. Compute the offset as a multiple of this unit,
817 counting in bytes. */
818 unit = GET_MODE_BITSIZE (bestmode);
819 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
820 xbitpos = bitnum % unit;
821 xop0 = adjust_address (op0, bestmode, xoffset);
823 /* Fetch that unit, store the bitfield in it, then store
825 tempreg = copy_to_reg (xop0);
826 if (store_bit_field_1 (tempreg, bitsize, xbitpos,
827 fieldmode, orig_value, false))
829 emit_move_insn (xop0, tempreg);
832 delete_insns_since (last);
839 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
843 /* Generate code to store value from rtx VALUE
844 into a bit-field within structure STR_RTX
845 containing BITSIZE bits starting at bit BITNUM.
846 FIELDMODE is the machine-mode of the FIELD_DECL node for this field. */
849 store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
850 unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode,
853 if (!store_bit_field_1 (str_rtx, bitsize, bitnum, fieldmode, value, true))
857 /* Use shifts and boolean operations to store VALUE
858 into a bit field of width BITSIZE
859 in a memory location specified by OP0 except offset by OFFSET bytes.
860 (OFFSET must be 0 if OP0 is a register.)
861 The field starts at position BITPOS within the byte.
862 (If OP0 is a register, it may be a full word or a narrower mode,
863 but BITPOS still counts within a full word,
864 which is significant on bigendian machines.) */
867 store_fixed_bit_field (rtx op0, unsigned HOST_WIDE_INT offset,
868 unsigned HOST_WIDE_INT bitsize,
869 unsigned HOST_WIDE_INT bitpos, rtx value)
871 enum machine_mode mode;
872 unsigned int total_bits = BITS_PER_WORD;
877 /* There is a case not handled here:
878 a structure with a known alignment of just a halfword
879 and a field split across two aligned halfwords within the structure.
880 Or likewise a structure with a known alignment of just a byte
881 and a field split across two bytes.
882 Such cases are not supposed to be able to occur. */
884 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
886 gcc_assert (!offset);
887 /* Special treatment for a bit field split across two registers. */
888 if (bitsize + bitpos > BITS_PER_WORD)
890 store_split_bit_field (op0, bitsize, bitpos, value);
896 /* Get the proper mode to use for this field. We want a mode that
897 includes the entire field. If such a mode would be larger than
898 a word, we won't be doing the extraction the normal way.
899 We don't want a mode bigger than the destination. */
901 mode = GET_MODE (op0);
902 if (GET_MODE_BITSIZE (mode) == 0
903 || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
906 if (MEM_VOLATILE_P (op0)
907 && GET_MODE_BITSIZE (GET_MODE (op0)) > 0
908 && flag_strict_volatile_bitfields > 0)
909 mode = GET_MODE (op0);
911 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
912 MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
914 if (mode == VOIDmode)
916 /* The only way this should occur is if the field spans word
918 store_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT,
923 total_bits = GET_MODE_BITSIZE (mode);
925 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
926 be in the range 0 to total_bits-1, and put any excess bytes in
928 if (bitpos >= total_bits)
930 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
931 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
935 /* Get ref to an aligned byte, halfword, or word containing the field.
936 Adjust BITPOS to be position within a word,
937 and OFFSET to be the offset of that word.
938 Then alter OP0 to refer to that word. */
939 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
940 offset -= (offset % (total_bits / BITS_PER_UNIT));
941 op0 = adjust_address (op0, mode, offset);
944 mode = GET_MODE (op0);
946 /* Now MODE is either some integral mode for a MEM as OP0,
947 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
948 The bit field is contained entirely within OP0.
949 BITPOS is the starting bit number within OP0.
950 (OP0's mode may actually be narrower than MODE.) */
952 if (BYTES_BIG_ENDIAN)
953 /* BITPOS is the distance between our msb
954 and that of the containing datum.
955 Convert it to the distance from the lsb. */
956 bitpos = total_bits - bitsize - bitpos;
958 /* Now BITPOS is always the distance between our lsb
961 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
962 we must first convert its mode to MODE. */
964 if (CONST_INT_P (value))
966 HOST_WIDE_INT v = INTVAL (value);
968 if (bitsize < HOST_BITS_PER_WIDE_INT)
969 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
973 else if ((bitsize < HOST_BITS_PER_WIDE_INT
974 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
975 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
978 value = lshift_value (mode, value, bitpos, bitsize);
982 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
983 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
985 if (GET_MODE (value) != mode)
986 value = convert_to_mode (mode, value, 1);
989 value = expand_binop (mode, and_optab, value,
990 mask_rtx (mode, 0, bitsize, 0),
991 NULL_RTX, 1, OPTAB_LIB_WIDEN);
993 value = expand_shift (LSHIFT_EXPR, mode, value,
994 build_int_cst (NULL_TREE, bitpos), NULL_RTX, 1);
997 /* Now clear the chosen bits in OP0,
998 except that if VALUE is -1 we need not bother. */
999 /* We keep the intermediates in registers to allow CSE to combine
1000 consecutive bitfield assignments. */
1002 temp = force_reg (mode, op0);
1006 temp = expand_binop (mode, and_optab, temp,
1007 mask_rtx (mode, bitpos, bitsize, 1),
1008 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1009 temp = force_reg (mode, temp);
1012 /* Now logical-or VALUE into OP0, unless it is zero. */
1016 temp = expand_binop (mode, ior_optab, temp, value,
1017 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1018 temp = force_reg (mode, temp);
1023 op0 = copy_rtx (op0);
1024 emit_move_insn (op0, temp);
1028 /* Store a bit field that is split across multiple accessible memory objects.
1030 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
1031 BITSIZE is the field width; BITPOS the position of its first bit
1033 VALUE is the value to store.
1035 This does not yet handle fields wider than BITS_PER_WORD. */
1038 store_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1039 unsigned HOST_WIDE_INT bitpos, rtx value)
1042 unsigned int bitsdone = 0;
1044 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1046 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
1047 unit = BITS_PER_WORD;
1049 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1051 /* If VALUE is a constant other than a CONST_INT, get it into a register in
1052 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
1053 that VALUE might be a floating-point constant. */
1054 if (CONSTANT_P (value) && !CONST_INT_P (value))
1056 rtx word = gen_lowpart_common (word_mode, value);
1058 if (word && (value != word))
1061 value = gen_lowpart_common (word_mode,
1062 force_reg (GET_MODE (value) != VOIDmode
1064 : word_mode, value));
1067 while (bitsdone < bitsize)
1069 unsigned HOST_WIDE_INT thissize;
1071 unsigned HOST_WIDE_INT thispos;
1072 unsigned HOST_WIDE_INT offset;
1074 offset = (bitpos + bitsdone) / unit;
1075 thispos = (bitpos + bitsdone) % unit;
1077 /* THISSIZE must not overrun a word boundary. Otherwise,
1078 store_fixed_bit_field will call us again, and we will mutually
1080 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1081 thissize = MIN (thissize, unit - thispos);
1083 if (BYTES_BIG_ENDIAN)
1087 /* We must do an endian conversion exactly the same way as it is
1088 done in extract_bit_field, so that the two calls to
1089 extract_fixed_bit_field will have comparable arguments. */
1090 if (!MEM_P (value) || GET_MODE (value) == BLKmode)
1091 total_bits = BITS_PER_WORD;
1093 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
1095 /* Fetch successively less significant portions. */
1096 if (CONST_INT_P (value))
1097 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1098 >> (bitsize - bitsdone - thissize))
1099 & (((HOST_WIDE_INT) 1 << thissize) - 1));
1101 /* The args are chosen so that the last part includes the
1102 lsb. Give extract_bit_field the value it needs (with
1103 endianness compensation) to fetch the piece we want. */
1104 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
1105 total_bits - bitsize + bitsdone,
1110 /* Fetch successively more significant portions. */
1111 if (CONST_INT_P (value))
1112 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1114 & (((HOST_WIDE_INT) 1 << thissize) - 1));
1116 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
1117 bitsdone, NULL_RTX, 1);
1120 /* If OP0 is a register, then handle OFFSET here.
1122 When handling multiword bitfields, extract_bit_field may pass
1123 down a word_mode SUBREG of a larger REG for a bitfield that actually
1124 crosses a word boundary. Thus, for a SUBREG, we must find
1125 the current word starting from the base register. */
1126 if (GET_CODE (op0) == SUBREG)
1128 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1129 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1130 GET_MODE (SUBREG_REG (op0)));
1133 else if (REG_P (op0))
1135 word = operand_subword_force (op0, offset, GET_MODE (op0));
1141 /* OFFSET is in UNITs, and UNIT is in bits.
1142 store_fixed_bit_field wants offset in bytes. */
1143 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT, thissize,
1145 bitsdone += thissize;
1149 /* A subroutine of extract_bit_field_1 that converts return value X
1150 to either MODE or TMODE. MODE, TMODE and UNSIGNEDP are arguments
1151 to extract_bit_field. */
1154 convert_extracted_bit_field (rtx x, enum machine_mode mode,
1155 enum machine_mode tmode, bool unsignedp)
1157 if (GET_MODE (x) == tmode || GET_MODE (x) == mode)
1160 /* If the x mode is not a scalar integral, first convert to the
1161 integer mode of that size and then access it as a floating-point
1162 value via a SUBREG. */
1163 if (!SCALAR_INT_MODE_P (tmode))
1165 enum machine_mode smode;
1167 smode = mode_for_size (GET_MODE_BITSIZE (tmode), MODE_INT, 0);
1168 x = convert_to_mode (smode, x, unsignedp);
1169 x = force_reg (smode, x);
1170 return gen_lowpart (tmode, x);
1173 return convert_to_mode (tmode, x, unsignedp);
1176 /* A subroutine of extract_bit_field, with the same arguments.
1177 If FALLBACK_P is true, fall back to extract_fixed_bit_field
1178 if we can find no other means of implementing the operation.
1179 if FALLBACK_P is false, return NULL instead. */
1182 extract_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1183 unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target,
1184 enum machine_mode mode, enum machine_mode tmode,
1188 = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD;
1189 unsigned HOST_WIDE_INT offset, bitpos;
1191 enum machine_mode int_mode;
1192 enum machine_mode ext_mode;
1193 enum machine_mode mode1;
1194 enum insn_code icode;
1197 if (tmode == VOIDmode)
1200 while (GET_CODE (op0) == SUBREG)
1202 bitnum += SUBREG_BYTE (op0) * BITS_PER_UNIT;
1203 op0 = SUBREG_REG (op0);
1206 /* If we have an out-of-bounds access to a register, just return an
1207 uninitialized register of the required mode. This can occur if the
1208 source code contains an out-of-bounds access to a small array. */
1209 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
1210 return gen_reg_rtx (tmode);
1213 && mode == GET_MODE (op0)
1215 && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
1217 /* We're trying to extract a full register from itself. */
1221 /* See if we can get a better vector mode before extracting. */
1222 if (VECTOR_MODE_P (GET_MODE (op0))
1224 && GET_MODE_INNER (GET_MODE (op0)) != tmode)
1226 enum machine_mode new_mode;
1227 int nunits = GET_MODE_NUNITS (GET_MODE (op0));
1229 if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
1230 new_mode = MIN_MODE_VECTOR_FLOAT;
1231 else if (GET_MODE_CLASS (tmode) == MODE_FRACT)
1232 new_mode = MIN_MODE_VECTOR_FRACT;
1233 else if (GET_MODE_CLASS (tmode) == MODE_UFRACT)
1234 new_mode = MIN_MODE_VECTOR_UFRACT;
1235 else if (GET_MODE_CLASS (tmode) == MODE_ACCUM)
1236 new_mode = MIN_MODE_VECTOR_ACCUM;
1237 else if (GET_MODE_CLASS (tmode) == MODE_UACCUM)
1238 new_mode = MIN_MODE_VECTOR_UACCUM;
1240 new_mode = MIN_MODE_VECTOR_INT;
1242 for (; new_mode != VOIDmode ; new_mode = GET_MODE_WIDER_MODE (new_mode))
1243 if (GET_MODE_NUNITS (new_mode) == nunits
1244 && GET_MODE_SIZE (new_mode) == GET_MODE_SIZE (GET_MODE (op0))
1245 && targetm.vector_mode_supported_p (new_mode))
1247 if (new_mode != VOIDmode)
1248 op0 = gen_lowpart (new_mode, op0);
1251 /* Use vec_extract patterns for extracting parts of vectors whenever
1253 if (VECTOR_MODE_P (GET_MODE (op0))
1255 && optab_handler (vec_extract_optab, GET_MODE (op0)) != CODE_FOR_nothing
1256 && ((bitnum + bitsize - 1) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
1257 == bitnum / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
1259 enum machine_mode outermode = GET_MODE (op0);
1260 enum machine_mode innermode = GET_MODE_INNER (outermode);
1261 int icode = (int) optab_handler (vec_extract_optab, outermode);
1262 unsigned HOST_WIDE_INT pos = bitnum / GET_MODE_BITSIZE (innermode);
1263 rtx rtxpos = GEN_INT (pos);
1265 rtx dest = NULL, pat, seq;
1266 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
1267 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
1268 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
1270 if (innermode == tmode || innermode == mode)
1274 dest = gen_reg_rtx (innermode);
1278 if (! (*insn_data[icode].operand[0].predicate) (dest, mode0))
1279 dest = copy_to_mode_reg (mode0, dest);
1281 if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
1282 src = copy_to_mode_reg (mode1, src);
1284 if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
1285 rtxpos = copy_to_mode_reg (mode1, rtxpos);
1287 /* We could handle this, but we should always be called with a pseudo
1288 for our targets and all insns should take them as outputs. */
1289 gcc_assert ((*insn_data[icode].operand[0].predicate) (dest, mode0)
1290 && (*insn_data[icode].operand[1].predicate) (src, mode1)
1291 && (*insn_data[icode].operand[2].predicate) (rtxpos, mode2));
1293 pat = GEN_FCN (icode) (dest, src, rtxpos);
1301 return gen_lowpart (tmode, dest);
1306 /* Make sure we are playing with integral modes. Pun with subregs
1309 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1310 if (imode != GET_MODE (op0))
1313 op0 = adjust_address (op0, imode, 0);
1314 else if (imode != BLKmode)
1316 op0 = gen_lowpart (imode, op0);
1318 /* If we got a SUBREG, force it into a register since we
1319 aren't going to be able to do another SUBREG on it. */
1320 if (GET_CODE (op0) == SUBREG)
1321 op0 = force_reg (imode, op0);
1323 else if (REG_P (op0))
1326 imode = smallest_mode_for_size (GET_MODE_BITSIZE (GET_MODE (op0)),
1328 reg = gen_reg_rtx (imode);
1329 subreg = gen_lowpart_SUBREG (GET_MODE (op0), reg);
1330 emit_move_insn (subreg, op0);
1332 bitnum += SUBREG_BYTE (subreg) * BITS_PER_UNIT;
1336 rtx mem = assign_stack_temp (GET_MODE (op0),
1337 GET_MODE_SIZE (GET_MODE (op0)), 0);
1338 emit_move_insn (mem, op0);
1339 op0 = adjust_address (mem, BLKmode, 0);
1344 /* We may be accessing data outside the field, which means
1345 we can alias adjacent data. */
1348 op0 = shallow_copy_rtx (op0);
1349 set_mem_alias_set (op0, 0);
1350 set_mem_expr (op0, 0);
1353 /* Extraction of a full-word or multi-word value from a structure
1354 in a register or aligned memory can be done with just a SUBREG.
1355 A subword value in the least significant part of a register
1356 can also be extracted with a SUBREG. For this, we need the
1357 byte offset of the value in op0. */
1359 bitpos = bitnum % unit;
1360 offset = bitnum / unit;
1361 byte_offset = bitpos / BITS_PER_UNIT + offset * UNITS_PER_WORD;
1363 /* If OP0 is a register, BITPOS must count within a word.
1364 But as we have it, it counts within whatever size OP0 now has.
1365 On a bigendian machine, these are not the same, so convert. */
1366 if (BYTES_BIG_ENDIAN
1368 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1369 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1371 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1372 If that's wrong, the solution is to test for it and set TARGET to 0
1375 /* Only scalar integer modes can be converted via subregs. There is an
1376 additional problem for FP modes here in that they can have a precision
1377 which is different from the size. mode_for_size uses precision, but
1378 we want a mode based on the size, so we must avoid calling it for FP
1380 mode1 = (SCALAR_INT_MODE_P (tmode)
1381 ? mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0)
1384 /* If the bitfield is volatile, we need to make sure the access
1385 remains on a type-aligned boundary. */
1386 if (GET_CODE (op0) == MEM
1387 && MEM_VOLATILE_P (op0)
1388 && GET_MODE_BITSIZE (GET_MODE (op0)) > 0
1389 && flag_strict_volatile_bitfields > 0)
1390 goto no_subreg_mode_swap;
1392 if (((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1393 && bitpos % BITS_PER_WORD == 0)
1394 || (mode1 != BLKmode
1395 /* ??? The big endian test here is wrong. This is correct
1396 if the value is in a register, and if mode_for_size is not
1397 the same mode as op0. This causes us to get unnecessarily
1398 inefficient code from the Thumb port when -mbig-endian. */
1399 && (BYTES_BIG_ENDIAN
1400 ? bitpos + bitsize == BITS_PER_WORD
1403 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode1),
1404 GET_MODE_BITSIZE (GET_MODE (op0)))
1405 && GET_MODE_SIZE (mode1) != 0
1406 && byte_offset % GET_MODE_SIZE (mode1) == 0)
1408 && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
1409 || (offset * BITS_PER_UNIT % bitsize == 0
1410 && MEM_ALIGN (op0) % bitsize == 0)))))
1413 op0 = adjust_address (op0, mode1, offset);
1414 else if (mode1 != GET_MODE (op0))
1416 rtx sub = simplify_gen_subreg (mode1, op0, GET_MODE (op0),
1419 goto no_subreg_mode_swap;
1423 return convert_to_mode (tmode, op0, unsignedp);
1426 no_subreg_mode_swap:
1428 /* Handle fields bigger than a word. */
1430 if (bitsize > BITS_PER_WORD)
1432 /* Here we transfer the words of the field
1433 in the order least significant first.
1434 This is because the most significant word is the one which may
1435 be less than full. */
1437 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1440 if (target == 0 || !REG_P (target))
1441 target = gen_reg_rtx (mode);
1443 /* Indicate for flow that the entire target reg is being set. */
1444 emit_clobber (target);
1446 for (i = 0; i < nwords; i++)
1448 /* If I is 0, use the low-order word in both field and target;
1449 if I is 1, use the next to lowest word; and so on. */
1450 /* Word number in TARGET to use. */
1451 unsigned int wordnum
1453 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1455 /* Offset from start of field in OP0. */
1456 unsigned int bit_offset = (WORDS_BIG_ENDIAN
1457 ? MAX (0, ((int) bitsize - ((int) i + 1)
1458 * (int) BITS_PER_WORD))
1459 : (int) i * BITS_PER_WORD);
1460 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1462 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1463 bitsize - i * BITS_PER_WORD),
1464 bitnum + bit_offset, 1, target_part, mode,
1467 gcc_assert (target_part);
1469 if (result_part != target_part)
1470 emit_move_insn (target_part, result_part);
1475 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1476 need to be zero'd out. */
1477 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1479 unsigned int i, total_words;
1481 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1482 for (i = nwords; i < total_words; i++)
1484 (operand_subword (target,
1485 WORDS_BIG_ENDIAN ? total_words - i - 1 : i,
1492 /* Signed bit field: sign-extend with two arithmetic shifts. */
1493 target = expand_shift (LSHIFT_EXPR, mode, target,
1494 build_int_cst (NULL_TREE,
1495 GET_MODE_BITSIZE (mode) - bitsize),
1497 return expand_shift (RSHIFT_EXPR, mode, target,
1498 build_int_cst (NULL_TREE,
1499 GET_MODE_BITSIZE (mode) - bitsize),
1503 /* From here on we know the desired field is smaller than a word. */
1505 /* Check if there is a correspondingly-sized integer field, so we can
1506 safely extract it as one size of integer, if necessary; then
1507 truncate or extend to the size that is wanted; then use SUBREGs or
1508 convert_to_mode to get one of the modes we really wanted. */
1510 int_mode = int_mode_for_mode (tmode);
1511 if (int_mode == BLKmode)
1512 int_mode = int_mode_for_mode (mode);
1513 /* Should probably push op0 out to memory and then do a load. */
1514 gcc_assert (int_mode != BLKmode);
1516 /* OFFSET is the number of words or bytes (UNIT says which)
1517 from STR_RTX to the first word or byte containing part of the field. */
1521 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1524 op0 = copy_to_reg (op0);
1525 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1526 op0, (offset * UNITS_PER_WORD));
1531 /* Now OFFSET is nonzero only for memory operands. */
1532 ext_mode = mode_for_extraction (unsignedp ? EP_extzv : EP_extv, 0);
1533 icode = unsignedp ? CODE_FOR_extzv : CODE_FOR_extv;
1534 if (ext_mode != MAX_MACHINE_MODE
1536 && GET_MODE_BITSIZE (ext_mode) >= bitsize
1537 /* If op0 is a register, we need it in EXT_MODE to make it
1538 acceptable to the format of ext(z)v. */
1539 && !(GET_CODE (op0) == SUBREG && GET_MODE (op0) != ext_mode)
1540 && !((REG_P (op0) || GET_CODE (op0) == SUBREG)
1541 && (bitsize + bitpos > GET_MODE_BITSIZE (ext_mode)))
1542 && check_predicate_volatile_ok (icode, 1, op0, GET_MODE (op0)))
1544 unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset;
1545 rtx bitsize_rtx, bitpos_rtx;
1546 rtx last = get_last_insn ();
1548 rtx xtarget = target;
1549 rtx xspec_target = target;
1550 rtx xspec_target_subreg = 0;
1553 /* If op0 is a register, we need it in EXT_MODE to make it
1554 acceptable to the format of ext(z)v. */
1555 if (REG_P (xop0) && GET_MODE (xop0) != ext_mode)
1556 xop0 = gen_lowpart_SUBREG (ext_mode, xop0);
1558 /* Get ref to first byte containing part of the field. */
1559 xop0 = adjust_address (xop0, byte_mode, xoffset);
1561 /* On big-endian machines, we count bits from the most significant.
1562 If the bit field insn does not, we must invert. */
1563 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1564 xbitpos = unit - bitsize - xbitpos;
1566 /* Now convert from counting within UNIT to counting in EXT_MODE. */
1567 if (BITS_BIG_ENDIAN && !MEM_P (xop0))
1568 xbitpos += GET_MODE_BITSIZE (ext_mode) - unit;
1570 unit = GET_MODE_BITSIZE (ext_mode);
1573 xtarget = xspec_target = gen_reg_rtx (tmode);
1575 if (GET_MODE (xtarget) != ext_mode)
1577 /* Don't use LHS paradoxical subreg if explicit truncation is needed
1578 between the mode of the extraction (word_mode) and the target
1579 mode. Instead, create a temporary and use convert_move to set
1582 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (GET_MODE (xtarget)),
1583 GET_MODE_BITSIZE (ext_mode)))
1585 xtarget = gen_lowpart (ext_mode, xtarget);
1586 if (GET_MODE_SIZE (ext_mode)
1587 > GET_MODE_SIZE (GET_MODE (xspec_target)))
1588 xspec_target_subreg = xtarget;
1591 xtarget = gen_reg_rtx (ext_mode);
1594 /* If this machine's ext(z)v insists on a register target,
1595 make sure we have one. */
1596 if (!insn_data[(int) icode].operand[0].predicate (xtarget, ext_mode))
1597 xtarget = gen_reg_rtx (ext_mode);
1599 bitsize_rtx = GEN_INT (bitsize);
1600 bitpos_rtx = GEN_INT (xbitpos);
1603 ? gen_extzv (xtarget, xop0, bitsize_rtx, bitpos_rtx)
1604 : gen_extv (xtarget, xop0, bitsize_rtx, bitpos_rtx));
1608 if (xtarget == xspec_target)
1610 if (xtarget == xspec_target_subreg)
1611 return xspec_target;
1612 return convert_extracted_bit_field (xtarget, mode, tmode, unsignedp);
1614 delete_insns_since (last);
1617 /* If OP0 is a memory, try copying it to a register and seeing if a
1618 cheap register alternative is available. */
1619 if (ext_mode != MAX_MACHINE_MODE && MEM_P (op0))
1621 enum machine_mode bestmode;
1623 /* Get the mode to use for inserting into this field. If
1624 OP0 is BLKmode, get the smallest mode consistent with the
1625 alignment. If OP0 is a non-BLKmode object that is no
1626 wider than EXT_MODE, use its mode. Otherwise, use the
1627 smallest mode containing the field. */
1629 if (GET_MODE (op0) == BLKmode
1630 || (ext_mode != MAX_MACHINE_MODE
1631 && GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (ext_mode)))
1632 bestmode = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0),
1633 (ext_mode == MAX_MACHINE_MODE
1634 ? VOIDmode : ext_mode),
1635 MEM_VOLATILE_P (op0));
1637 bestmode = GET_MODE (op0);
1639 if (bestmode != VOIDmode
1640 && !(SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
1641 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
1643 unsigned HOST_WIDE_INT xoffset, xbitpos;
1645 /* Compute the offset as a multiple of this unit,
1646 counting in bytes. */
1647 unit = GET_MODE_BITSIZE (bestmode);
1648 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1649 xbitpos = bitnum % unit;
1651 /* Make sure the register is big enough for the whole field. */
1652 if (xoffset * BITS_PER_UNIT + unit
1653 >= offset * BITS_PER_UNIT + bitsize)
1655 rtx last, result, xop0;
1657 last = get_last_insn ();
1659 /* Fetch it to a register in that size. */
1660 xop0 = adjust_address (op0, bestmode, xoffset);
1661 xop0 = force_reg (bestmode, xop0);
1662 result = extract_bit_field_1 (xop0, bitsize, xbitpos,
1664 mode, tmode, false);
1668 delete_insns_since (last);
1676 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1677 bitpos, target, unsignedp);
1678 return convert_extracted_bit_field (target, mode, tmode, unsignedp);
1681 /* Generate code to extract a byte-field from STR_RTX
1682 containing BITSIZE bits, starting at BITNUM,
1683 and put it in TARGET if possible (if TARGET is nonzero).
1684 Regardless of TARGET, we return the rtx for where the value is placed.
1686 STR_RTX is the structure containing the byte (a REG or MEM).
1687 UNSIGNEDP is nonzero if this is an unsigned bit field.
1688 MODE is the natural mode of the field value once extracted.
1689 TMODE is the mode the caller would like the value to have;
1690 but the value may be returned with type MODE instead.
1692 If a TARGET is specified and we can store in it at no extra cost,
1693 we do so, and return TARGET.
1694 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1695 if they are equally easy. */
1698 extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1699 unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target,
1700 enum machine_mode mode, enum machine_mode tmode)
1702 return extract_bit_field_1 (str_rtx, bitsize, bitnum, unsignedp,
1703 target, mode, tmode, true);
1706 /* Extract a bit field using shifts and boolean operations
1707 Returns an rtx to represent the value.
1708 OP0 addresses a register (word) or memory (byte).
1709 BITPOS says which bit within the word or byte the bit field starts in.
1710 OFFSET says how many bytes farther the bit field starts;
1711 it is 0 if OP0 is a register.
1712 BITSIZE says how many bits long the bit field is.
1713 (If OP0 is a register, it may be narrower than a full word,
1714 but BITPOS still counts within a full word,
1715 which is significant on bigendian machines.)
1717 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1718 If TARGET is nonzero, attempts to store the value there
1719 and return TARGET, but this is not guaranteed.
1720 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1723 extract_fixed_bit_field (enum machine_mode tmode, rtx op0,
1724 unsigned HOST_WIDE_INT offset,
1725 unsigned HOST_WIDE_INT bitsize,
1726 unsigned HOST_WIDE_INT bitpos, rtx target,
1729 unsigned int total_bits = BITS_PER_WORD;
1730 enum machine_mode mode;
1732 if (GET_CODE (op0) == SUBREG || REG_P (op0))
1734 /* Special treatment for a bit field split across two registers. */
1735 if (bitsize + bitpos > BITS_PER_WORD)
1736 return extract_split_bit_field (op0, bitsize, bitpos, unsignedp);
1740 /* Get the proper mode to use for this field. We want a mode that
1741 includes the entire field. If such a mode would be larger than
1742 a word, we won't be doing the extraction the normal way. */
1744 if (MEM_VOLATILE_P (op0)
1745 && flag_strict_volatile_bitfields > 0)
1747 if (GET_MODE_BITSIZE (GET_MODE (op0)) > 0)
1748 mode = GET_MODE (op0);
1749 else if (target && GET_MODE_BITSIZE (GET_MODE (target)) > 0)
1750 mode = GET_MODE (target);
1755 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1756 MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
1758 if (mode == VOIDmode)
1759 /* The only way this should occur is if the field spans word
1761 return extract_split_bit_field (op0, bitsize,
1762 bitpos + offset * BITS_PER_UNIT,
1765 total_bits = GET_MODE_BITSIZE (mode);
1767 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1768 be in the range 0 to total_bits-1, and put any excess bytes in
1770 if (bitpos >= total_bits)
1772 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1773 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1777 /* If we're accessing a volatile MEM, we can't do the next
1778 alignment step if it results in a multi-word access where we
1779 otherwise wouldn't have one. So, check for that case
1782 && MEM_VOLATILE_P (op0)
1783 && flag_strict_volatile_bitfields > 0
1784 && bitpos + bitsize <= total_bits
1785 && bitpos + bitsize + (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT > total_bits)
1787 if (STRICT_ALIGNMENT)
1789 static bool informed_about_misalignment = false;
1792 if (bitsize == total_bits)
1793 warned = warning_at (input_location, OPT_fstrict_volatile_bitfields,
1794 "mis-aligned access used for structure member");
1796 warned = warning_at (input_location, OPT_fstrict_volatile_bitfields,
1797 "mis-aligned access used for structure bitfield");
1799 if (! informed_about_misalignment && warned)
1801 informed_about_misalignment = true;
1802 inform (input_location,
1803 "When a volatile object spans multiple type-sized locations,"
1804 " the compiler must choose between using a single mis-aligned access to"
1805 " preserve the volatility, or using multiple aligned accesses to avoid"
1806 " runtime faults. This code may fail at runtime if the hardware does"
1807 " not allow this access.");
1814 /* Get ref to an aligned byte, halfword, or word containing the field.
1815 Adjust BITPOS to be position within a word,
1816 and OFFSET to be the offset of that word.
1817 Then alter OP0 to refer to that word. */
1818 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1819 offset -= (offset % (total_bits / BITS_PER_UNIT));
1822 op0 = adjust_address (op0, mode, offset);
1825 mode = GET_MODE (op0);
1827 if (BYTES_BIG_ENDIAN)
1828 /* BITPOS is the distance between our msb and that of OP0.
1829 Convert it to the distance from the lsb. */
1830 bitpos = total_bits - bitsize - bitpos;
1832 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1833 We have reduced the big-endian case to the little-endian case. */
1839 /* If the field does not already start at the lsb,
1840 shift it so it does. */
1841 tree amount = build_int_cst (NULL_TREE, bitpos);
1842 /* Maybe propagate the target for the shift. */
1843 /* But not if we will return it--could confuse integrate.c. */
1844 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1845 if (tmode != mode) subtarget = 0;
1846 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1848 /* Convert the value to the desired mode. */
1850 op0 = convert_to_mode (tmode, op0, 1);
1852 /* Unless the msb of the field used to be the msb when we shifted,
1853 mask out the upper bits. */
1855 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize)
1856 return expand_binop (GET_MODE (op0), and_optab, op0,
1857 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1858 target, 1, OPTAB_LIB_WIDEN);
1862 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1863 then arithmetic-shift its lsb to the lsb of the word. */
1864 op0 = force_reg (mode, op0);
1868 /* Find the narrowest integer mode that contains the field. */
1870 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1871 mode = GET_MODE_WIDER_MODE (mode))
1872 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1874 op0 = convert_to_mode (mode, op0, 0);
1878 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1881 = build_int_cst (NULL_TREE,
1882 GET_MODE_BITSIZE (mode) - (bitsize + bitpos));
1883 /* Maybe propagate the target for the shift. */
1884 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1885 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1888 return expand_shift (RSHIFT_EXPR, mode, op0,
1889 build_int_cst (NULL_TREE,
1890 GET_MODE_BITSIZE (mode) - bitsize),
1894 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1895 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1896 complement of that if COMPLEMENT. The mask is truncated if
1897 necessary to the width of mode MODE. The mask is zero-extended if
1898 BITSIZE+BITPOS is too small for MODE. */
1901 mask_rtx (enum machine_mode mode, int bitpos, int bitsize, int complement)
1905 mask = double_int_mask (bitsize);
1906 mask = double_int_lshift (mask, bitpos, HOST_BITS_PER_DOUBLE_INT, false);
1909 mask = double_int_not (mask);
1911 return immed_double_int_const (mask, mode);
1914 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1915 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1918 lshift_value (enum machine_mode mode, rtx value, int bitpos, int bitsize)
1922 val = double_int_zext (uhwi_to_double_int (INTVAL (value)), bitsize);
1923 val = double_int_lshift (val, bitpos, HOST_BITS_PER_DOUBLE_INT, false);
1925 return immed_double_int_const (val, mode);
1928 /* Extract a bit field that is split across two words
1929 and return an RTX for the result.
1931 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1932 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1933 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
1936 extract_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1937 unsigned HOST_WIDE_INT bitpos, int unsignedp)
1940 unsigned int bitsdone = 0;
1941 rtx result = NULL_RTX;
1944 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1946 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
1947 unit = BITS_PER_WORD;
1949 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1951 while (bitsdone < bitsize)
1953 unsigned HOST_WIDE_INT thissize;
1955 unsigned HOST_WIDE_INT thispos;
1956 unsigned HOST_WIDE_INT offset;
1958 offset = (bitpos + bitsdone) / unit;
1959 thispos = (bitpos + bitsdone) % unit;
1961 /* THISSIZE must not overrun a word boundary. Otherwise,
1962 extract_fixed_bit_field will call us again, and we will mutually
1964 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1965 thissize = MIN (thissize, unit - thispos);
1967 /* If OP0 is a register, then handle OFFSET here.
1969 When handling multiword bitfields, extract_bit_field may pass
1970 down a word_mode SUBREG of a larger REG for a bitfield that actually
1971 crosses a word boundary. Thus, for a SUBREG, we must find
1972 the current word starting from the base register. */
1973 if (GET_CODE (op0) == SUBREG)
1975 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1976 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1977 GET_MODE (SUBREG_REG (op0)));
1980 else if (REG_P (op0))
1982 word = operand_subword_force (op0, offset, GET_MODE (op0));
1988 /* Extract the parts in bit-counting order,
1989 whose meaning is determined by BYTES_PER_UNIT.
1990 OFFSET is in UNITs, and UNIT is in bits.
1991 extract_fixed_bit_field wants offset in bytes. */
1992 part = extract_fixed_bit_field (word_mode, word,
1993 offset * unit / BITS_PER_UNIT,
1994 thissize, thispos, 0, 1);
1995 bitsdone += thissize;
1997 /* Shift this part into place for the result. */
1998 if (BYTES_BIG_ENDIAN)
2000 if (bitsize != bitsdone)
2001 part = expand_shift (LSHIFT_EXPR, word_mode, part,
2002 build_int_cst (NULL_TREE, bitsize - bitsdone),
2007 if (bitsdone != thissize)
2008 part = expand_shift (LSHIFT_EXPR, word_mode, part,
2009 build_int_cst (NULL_TREE,
2010 bitsdone - thissize), 0, 1);
2016 /* Combine the parts with bitwise or. This works
2017 because we extracted each part as an unsigned bit field. */
2018 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
2024 /* Unsigned bit field: we are done. */
2027 /* Signed bit field: sign-extend with two arithmetic shifts. */
2028 result = expand_shift (LSHIFT_EXPR, word_mode, result,
2029 build_int_cst (NULL_TREE, BITS_PER_WORD - bitsize),
2031 return expand_shift (RSHIFT_EXPR, word_mode, result,
2032 build_int_cst (NULL_TREE, BITS_PER_WORD - bitsize),
2036 /* Try to read the low bits of SRC as an rvalue of mode MODE, preserving
2037 the bit pattern. SRC_MODE is the mode of SRC; if this is smaller than
2038 MODE, fill the upper bits with zeros. Fail if the layout of either
2039 mode is unknown (as for CC modes) or if the extraction would involve
2040 unprofitable mode punning. Return the value on success, otherwise
2043 This is different from gen_lowpart* in these respects:
2045 - the returned value must always be considered an rvalue
2047 - when MODE is wider than SRC_MODE, the extraction involves
2050 - when MODE is smaller than SRC_MODE, the extraction involves
2051 a truncation (and is thus subject to TRULY_NOOP_TRUNCATION).
2053 In other words, this routine performs a computation, whereas the
2054 gen_lowpart* routines are conceptually lvalue or rvalue subreg
2058 extract_low_bits (enum machine_mode mode, enum machine_mode src_mode, rtx src)
2060 enum machine_mode int_mode, src_int_mode;
2062 if (mode == src_mode)
2065 if (CONSTANT_P (src))
2067 /* simplify_gen_subreg can't be used here, as if simplify_subreg
2068 fails, it will happily create (subreg (symbol_ref)) or similar
2070 unsigned int byte = subreg_lowpart_offset (mode, src_mode);
2071 rtx ret = simplify_subreg (mode, src, src_mode, byte);
2075 if (GET_MODE (src) == VOIDmode
2076 || !validate_subreg (mode, src_mode, src, byte))
2079 src = force_reg (GET_MODE (src), src);
2080 return gen_rtx_SUBREG (mode, src, byte);
2083 if (GET_MODE_CLASS (mode) == MODE_CC || GET_MODE_CLASS (src_mode) == MODE_CC)
2086 if (GET_MODE_BITSIZE (mode) == GET_MODE_BITSIZE (src_mode)
2087 && MODES_TIEABLE_P (mode, src_mode))
2089 rtx x = gen_lowpart_common (mode, src);
2094 src_int_mode = int_mode_for_mode (src_mode);
2095 int_mode = int_mode_for_mode (mode);
2096 if (src_int_mode == BLKmode || int_mode == BLKmode)
2099 if (!MODES_TIEABLE_P (src_int_mode, src_mode))
2101 if (!MODES_TIEABLE_P (int_mode, mode))
2104 src = gen_lowpart (src_int_mode, src);
2105 src = convert_modes (int_mode, src_int_mode, src, true);
2106 src = gen_lowpart (mode, src);
2110 /* Add INC into TARGET. */
2113 expand_inc (rtx target, rtx inc)
2115 rtx value = expand_binop (GET_MODE (target), add_optab,
2117 target, 0, OPTAB_LIB_WIDEN);
2118 if (value != target)
2119 emit_move_insn (target, value);
2122 /* Subtract DEC from TARGET. */
2125 expand_dec (rtx target, rtx dec)
2127 rtx value = expand_binop (GET_MODE (target), sub_optab,
2129 target, 0, OPTAB_LIB_WIDEN);
2130 if (value != target)
2131 emit_move_insn (target, value);
2134 /* Output a shift instruction for expression code CODE,
2135 with SHIFTED being the rtx for the value to shift,
2136 and AMOUNT the tree for the amount to shift by.
2137 Store the result in the rtx TARGET, if that is convenient.
2138 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2139 Return the rtx for where the value is. */
2142 expand_shift (enum tree_code code, enum machine_mode mode, rtx shifted,
2143 tree amount, rtx target, int unsignedp)
2146 int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
2147 int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
2148 optab lshift_optab = ashl_optab;
2149 optab rshift_arith_optab = ashr_optab;
2150 optab rshift_uns_optab = lshr_optab;
2151 optab lrotate_optab = rotl_optab;
2152 optab rrotate_optab = rotr_optab;
2153 enum machine_mode op1_mode;
2155 bool speed = optimize_insn_for_speed_p ();
2157 op1 = expand_normal (amount);
2158 op1_mode = GET_MODE (op1);
2160 /* Determine whether the shift/rotate amount is a vector, or scalar. If the
2161 shift amount is a vector, use the vector/vector shift patterns. */
2162 if (VECTOR_MODE_P (mode) && VECTOR_MODE_P (op1_mode))
2164 lshift_optab = vashl_optab;
2165 rshift_arith_optab = vashr_optab;
2166 rshift_uns_optab = vlshr_optab;
2167 lrotate_optab = vrotl_optab;
2168 rrotate_optab = vrotr_optab;
2171 /* Previously detected shift-counts computed by NEGATE_EXPR
2172 and shifted in the other direction; but that does not work
2175 if (SHIFT_COUNT_TRUNCATED)
2177 if (CONST_INT_P (op1)
2178 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
2179 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
2180 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
2181 % GET_MODE_BITSIZE (mode));
2182 else if (GET_CODE (op1) == SUBREG
2183 && subreg_lowpart_p (op1)
2184 && INTEGRAL_MODE_P (GET_MODE (SUBREG_REG (op1))))
2185 op1 = SUBREG_REG (op1);
2188 if (op1 == const0_rtx)
2191 /* Check whether its cheaper to implement a left shift by a constant
2192 bit count by a sequence of additions. */
2193 if (code == LSHIFT_EXPR
2194 && CONST_INT_P (op1)
2196 && INTVAL (op1) < GET_MODE_BITSIZE (mode)
2197 && INTVAL (op1) < MAX_BITS_PER_WORD
2198 && shift_cost[speed][mode][INTVAL (op1)] > INTVAL (op1) * add_cost[speed][mode]
2199 && shift_cost[speed][mode][INTVAL (op1)] != MAX_COST)
2202 for (i = 0; i < INTVAL (op1); i++)
2204 temp = force_reg (mode, shifted);
2205 shifted = expand_binop (mode, add_optab, temp, temp, NULL_RTX,
2206 unsignedp, OPTAB_LIB_WIDEN);
2211 for (attempt = 0; temp == 0 && attempt < 3; attempt++)
2213 enum optab_methods methods;
2216 methods = OPTAB_DIRECT;
2217 else if (attempt == 1)
2218 methods = OPTAB_WIDEN;
2220 methods = OPTAB_LIB_WIDEN;
2224 /* Widening does not work for rotation. */
2225 if (methods == OPTAB_WIDEN)
2227 else if (methods == OPTAB_LIB_WIDEN)
2229 /* If we have been unable to open-code this by a rotation,
2230 do it as the IOR of two shifts. I.e., to rotate A
2231 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
2232 where C is the bitsize of A.
2234 It is theoretically possible that the target machine might
2235 not be able to perform either shift and hence we would
2236 be making two libcalls rather than just the one for the
2237 shift (similarly if IOR could not be done). We will allow
2238 this extremely unlikely lossage to avoid complicating the
2241 rtx subtarget = target == shifted ? 0 : target;
2242 tree new_amount, other_amount;
2244 tree type = TREE_TYPE (amount);
2245 if (GET_MODE (op1) != TYPE_MODE (type)
2246 && GET_MODE (op1) != VOIDmode)
2247 op1 = convert_to_mode (TYPE_MODE (type), op1, 1);
2248 new_amount = make_tree (type, op1);
2250 = fold_build2 (MINUS_EXPR, type,
2251 build_int_cst (type, GET_MODE_BITSIZE (mode)),
2254 shifted = force_reg (mode, shifted);
2256 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
2257 mode, shifted, new_amount, 0, 1);
2258 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
2259 mode, shifted, other_amount, subtarget, 1);
2260 return expand_binop (mode, ior_optab, temp, temp1, target,
2261 unsignedp, methods);
2264 temp = expand_binop (mode,
2265 left ? lrotate_optab : rrotate_optab,
2266 shifted, op1, target, unsignedp, methods);
2269 temp = expand_binop (mode,
2270 left ? lshift_optab : rshift_uns_optab,
2271 shifted, op1, target, unsignedp, methods);
2273 /* Do arithmetic shifts.
2274 Also, if we are going to widen the operand, we can just as well
2275 use an arithmetic right-shift instead of a logical one. */
2276 if (temp == 0 && ! rotate
2277 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2279 enum optab_methods methods1 = methods;
2281 /* If trying to widen a log shift to an arithmetic shift,
2282 don't accept an arithmetic shift of the same size. */
2284 methods1 = OPTAB_MUST_WIDEN;
2286 /* Arithmetic shift */
2288 temp = expand_binop (mode,
2289 left ? lshift_optab : rshift_arith_optab,
2290 shifted, op1, target, unsignedp, methods1);
2293 /* We used to try extzv here for logical right shifts, but that was
2294 only useful for one machine, the VAX, and caused poor code
2295 generation there for lshrdi3, so the code was deleted and a
2296 define_expand for lshrsi3 was added to vax.md. */
2316 /* This structure holds the "cost" of a multiply sequence. The
2317 "cost" field holds the total rtx_cost of every operator in the
2318 synthetic multiplication sequence, hence cost(a op b) is defined
2319 as rtx_cost(op) + cost(a) + cost(b), where cost(leaf) is zero.
2320 The "latency" field holds the minimum possible latency of the
2321 synthetic multiply, on a hypothetical infinitely parallel CPU.
2322 This is the critical path, or the maximum height, of the expression
2323 tree which is the sum of rtx_costs on the most expensive path from
2324 any leaf to the root. Hence latency(a op b) is defined as zero for
2325 leaves and rtx_cost(op) + max(latency(a), latency(b)) otherwise. */
2328 short cost; /* Total rtx_cost of the multiplication sequence. */
2329 short latency; /* The latency of the multiplication sequence. */
2332 /* This macro is used to compare a pointer to a mult_cost against an
2333 single integer "rtx_cost" value. This is equivalent to the macro
2334 CHEAPER_MULT_COST(X,Z) where Z = {Y,Y}. */
2335 #define MULT_COST_LESS(X,Y) ((X)->cost < (Y) \
2336 || ((X)->cost == (Y) && (X)->latency < (Y)))
2338 /* This macro is used to compare two pointers to mult_costs against
2339 each other. The macro returns true if X is cheaper than Y.
2340 Currently, the cheaper of two mult_costs is the one with the
2341 lower "cost". If "cost"s are tied, the lower latency is cheaper. */
2342 #define CHEAPER_MULT_COST(X,Y) ((X)->cost < (Y)->cost \
2343 || ((X)->cost == (Y)->cost \
2344 && (X)->latency < (Y)->latency))
2346 /* This structure records a sequence of operations.
2347 `ops' is the number of operations recorded.
2348 `cost' is their total cost.
2349 The operations are stored in `op' and the corresponding
2350 logarithms of the integer coefficients in `log'.
2352 These are the operations:
2353 alg_zero total := 0;
2354 alg_m total := multiplicand;
2355 alg_shift total := total * coeff
2356 alg_add_t_m2 total := total + multiplicand * coeff;
2357 alg_sub_t_m2 total := total - multiplicand * coeff;
2358 alg_add_factor total := total * coeff + total;
2359 alg_sub_factor total := total * coeff - total;
2360 alg_add_t2_m total := total * coeff + multiplicand;
2361 alg_sub_t2_m total := total * coeff - multiplicand;
2363 The first operand must be either alg_zero or alg_m. */
2367 struct mult_cost cost;
2369 /* The size of the OP and LOG fields are not directly related to the
2370 word size, but the worst-case algorithms will be if we have few
2371 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2372 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2373 in total wordsize operations. */
2374 enum alg_code op[MAX_BITS_PER_WORD];
2375 char log[MAX_BITS_PER_WORD];
2378 /* The entry for our multiplication cache/hash table. */
2379 struct alg_hash_entry {
2380 /* The number we are multiplying by. */
2381 unsigned HOST_WIDE_INT t;
2383 /* The mode in which we are multiplying something by T. */
2384 enum machine_mode mode;
2386 /* The best multiplication algorithm for t. */
2389 /* The cost of multiplication if ALG_CODE is not alg_impossible.
2390 Otherwise, the cost within which multiplication by T is
2392 struct mult_cost cost;
2394 /* OPtimized for speed? */
2398 /* The number of cache/hash entries. */
2399 #if HOST_BITS_PER_WIDE_INT == 64
2400 #define NUM_ALG_HASH_ENTRIES 1031
2402 #define NUM_ALG_HASH_ENTRIES 307
2405 /* Each entry of ALG_HASH caches alg_code for some integer. This is
2406 actually a hash table. If we have a collision, that the older
2407 entry is kicked out. */
2408 static struct alg_hash_entry alg_hash[NUM_ALG_HASH_ENTRIES];
2410 /* Indicates the type of fixup needed after a constant multiplication.
2411 BASIC_VARIANT means no fixup is needed, NEGATE_VARIANT means that
2412 the result should be negated, and ADD_VARIANT means that the
2413 multiplicand should be added to the result. */
2414 enum mult_variant {basic_variant, negate_variant, add_variant};
2416 static void synth_mult (struct algorithm *, unsigned HOST_WIDE_INT,
2417 const struct mult_cost *, enum machine_mode mode);
2418 static bool choose_mult_variant (enum machine_mode, HOST_WIDE_INT,
2419 struct algorithm *, enum mult_variant *, int);
2420 static rtx expand_mult_const (enum machine_mode, rtx, HOST_WIDE_INT, rtx,
2421 const struct algorithm *, enum mult_variant);
2422 static unsigned HOST_WIDE_INT choose_multiplier (unsigned HOST_WIDE_INT, int,
2423 int, rtx *, int *, int *);
2424 static unsigned HOST_WIDE_INT invert_mod2n (unsigned HOST_WIDE_INT, int);
2425 static rtx extract_high_half (enum machine_mode, rtx);
2426 static rtx expand_mult_highpart (enum machine_mode, rtx, rtx, rtx, int, int);
2427 static rtx expand_mult_highpart_optab (enum machine_mode, rtx, rtx, rtx,
2429 /* Compute and return the best algorithm for multiplying by T.
2430 The algorithm must cost less than cost_limit
2431 If retval.cost >= COST_LIMIT, no algorithm was found and all
2432 other field of the returned struct are undefined.
2433 MODE is the machine mode of the multiplication. */
2436 synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t,
2437 const struct mult_cost *cost_limit, enum machine_mode mode)
2440 struct algorithm *alg_in, *best_alg;
2441 struct mult_cost best_cost;
2442 struct mult_cost new_limit;
2443 int op_cost, op_latency;
2444 unsigned HOST_WIDE_INT orig_t = t;
2445 unsigned HOST_WIDE_INT q;
2446 int maxm = MIN (BITS_PER_WORD, GET_MODE_BITSIZE (mode));
2448 bool cache_hit = false;
2449 enum alg_code cache_alg = alg_zero;
2450 bool speed = optimize_insn_for_speed_p ();
2452 /* Indicate that no algorithm is yet found. If no algorithm
2453 is found, this value will be returned and indicate failure. */
2454 alg_out->cost.cost = cost_limit->cost + 1;
2455 alg_out->cost.latency = cost_limit->latency + 1;
2457 if (cost_limit->cost < 0
2458 || (cost_limit->cost == 0 && cost_limit->latency <= 0))
2461 /* Restrict the bits of "t" to the multiplication's mode. */
2462 t &= GET_MODE_MASK (mode);
2464 /* t == 1 can be done in zero cost. */
2468 alg_out->cost.cost = 0;
2469 alg_out->cost.latency = 0;
2470 alg_out->op[0] = alg_m;
2474 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2478 if (MULT_COST_LESS (cost_limit, zero_cost[speed]))
2483 alg_out->cost.cost = zero_cost[speed];
2484 alg_out->cost.latency = zero_cost[speed];
2485 alg_out->op[0] = alg_zero;
2490 /* We'll be needing a couple extra algorithm structures now. */
2492 alg_in = XALLOCA (struct algorithm);
2493 best_alg = XALLOCA (struct algorithm);
2494 best_cost = *cost_limit;
2496 /* Compute the hash index. */
2497 hash_index = (t ^ (unsigned int) mode ^ (speed * 256)) % NUM_ALG_HASH_ENTRIES;
2499 /* See if we already know what to do for T. */
2500 if (alg_hash[hash_index].t == t
2501 && alg_hash[hash_index].mode == mode
2502 && alg_hash[hash_index].mode == mode
2503 && alg_hash[hash_index].speed == speed
2504 && alg_hash[hash_index].alg != alg_unknown)
2506 cache_alg = alg_hash[hash_index].alg;
2508 if (cache_alg == alg_impossible)
2510 /* The cache tells us that it's impossible to synthesize
2511 multiplication by T within alg_hash[hash_index].cost. */
2512 if (!CHEAPER_MULT_COST (&alg_hash[hash_index].cost, cost_limit))
2513 /* COST_LIMIT is at least as restrictive as the one
2514 recorded in the hash table, in which case we have no
2515 hope of synthesizing a multiplication. Just
2519 /* If we get here, COST_LIMIT is less restrictive than the
2520 one recorded in the hash table, so we may be able to
2521 synthesize a multiplication. Proceed as if we didn't
2522 have the cache entry. */
2526 if (CHEAPER_MULT_COST (cost_limit, &alg_hash[hash_index].cost))
2527 /* The cached algorithm shows that this multiplication
2528 requires more cost than COST_LIMIT. Just return. This
2529 way, we don't clobber this cache entry with
2530 alg_impossible but retain useful information. */
2542 goto do_alg_addsub_t_m2;
2544 case alg_add_factor:
2545 case alg_sub_factor:
2546 goto do_alg_addsub_factor;
2549 goto do_alg_add_t2_m;
2552 goto do_alg_sub_t2_m;
2560 /* If we have a group of zero bits at the low-order part of T, try
2561 multiplying by the remaining bits and then doing a shift. */
2566 m = floor_log2 (t & -t); /* m = number of low zero bits */
2570 /* The function expand_shift will choose between a shift and
2571 a sequence of additions, so the observed cost is given as
2572 MIN (m * add_cost[speed][mode], shift_cost[speed][mode][m]). */
2573 op_cost = m * add_cost[speed][mode];
2574 if (shift_cost[speed][mode][m] < op_cost)
2575 op_cost = shift_cost[speed][mode][m];
2576 new_limit.cost = best_cost.cost - op_cost;
2577 new_limit.latency = best_cost.latency - op_cost;
2578 synth_mult (alg_in, q, &new_limit, mode);
2580 alg_in->cost.cost += op_cost;
2581 alg_in->cost.latency += op_cost;
2582 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2584 struct algorithm *x;
2585 best_cost = alg_in->cost;
2586 x = alg_in, alg_in = best_alg, best_alg = x;
2587 best_alg->log[best_alg->ops] = m;
2588 best_alg->op[best_alg->ops] = alg_shift;
2591 /* See if treating ORIG_T as a signed number yields a better
2592 sequence. Try this sequence only for a negative ORIG_T
2593 as it would be useless for a non-negative ORIG_T. */
2594 if ((HOST_WIDE_INT) orig_t < 0)
2596 /* Shift ORIG_T as follows because a right shift of a
2597 negative-valued signed type is implementation
2599 q = ~(~orig_t >> m);
2600 /* The function expand_shift will choose between a shift
2601 and a sequence of additions, so the observed cost is
2602 given as MIN (m * add_cost[speed][mode],
2603 shift_cost[speed][mode][m]). */
2604 op_cost = m * add_cost[speed][mode];
2605 if (shift_cost[speed][mode][m] < op_cost)
2606 op_cost = shift_cost[speed][mode][m];
2607 new_limit.cost = best_cost.cost - op_cost;
2608 new_limit.latency = best_cost.latency - op_cost;
2609 synth_mult (alg_in, q, &new_limit, mode);
2611 alg_in->cost.cost += op_cost;
2612 alg_in->cost.latency += op_cost;
2613 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2615 struct algorithm *x;
2616 best_cost = alg_in->cost;
2617 x = alg_in, alg_in = best_alg, best_alg = x;
2618 best_alg->log[best_alg->ops] = m;
2619 best_alg->op[best_alg->ops] = alg_shift;
2627 /* If we have an odd number, add or subtract one. */
2630 unsigned HOST_WIDE_INT w;
2633 for (w = 1; (w & t) != 0; w <<= 1)
2635 /* If T was -1, then W will be zero after the loop. This is another
2636 case where T ends with ...111. Handling this with (T + 1) and
2637 subtract 1 produces slightly better code and results in algorithm
2638 selection much faster than treating it like the ...0111 case
2642 /* Reject the case where t is 3.
2643 Thus we prefer addition in that case. */
2646 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2648 op_cost = add_cost[speed][mode];
2649 new_limit.cost = best_cost.cost - op_cost;
2650 new_limit.latency = best_cost.latency - op_cost;
2651 synth_mult (alg_in, t + 1, &new_limit, mode);
2653 alg_in->cost.cost += op_cost;
2654 alg_in->cost.latency += op_cost;
2655 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2657 struct algorithm *x;
2658 best_cost = alg_in->cost;
2659 x = alg_in, alg_in = best_alg, best_alg = x;
2660 best_alg->log[best_alg->ops] = 0;
2661 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2666 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2668 op_cost = add_cost[speed][mode];
2669 new_limit.cost = best_cost.cost - op_cost;
2670 new_limit.latency = best_cost.latency - op_cost;
2671 synth_mult (alg_in, t - 1, &new_limit, mode);
2673 alg_in->cost.cost += op_cost;
2674 alg_in->cost.latency += op_cost;
2675 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2677 struct algorithm *x;
2678 best_cost = alg_in->cost;
2679 x = alg_in, alg_in = best_alg, best_alg = x;
2680 best_alg->log[best_alg->ops] = 0;
2681 best_alg->op[best_alg->ops] = alg_add_t_m2;
2685 /* We may be able to calculate a * -7, a * -15, a * -31, etc
2686 quickly with a - a * n for some appropriate constant n. */
2687 m = exact_log2 (-orig_t + 1);
2688 if (m >= 0 && m < maxm)
2690 op_cost = shiftsub1_cost[speed][mode][m];
2691 new_limit.cost = best_cost.cost - op_cost;
2692 new_limit.latency = best_cost.latency - op_cost;
2693 synth_mult (alg_in, (unsigned HOST_WIDE_INT) (-orig_t + 1) >> m, &new_limit, mode);
2695 alg_in->cost.cost += op_cost;
2696 alg_in->cost.latency += op_cost;
2697 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2699 struct algorithm *x;
2700 best_cost = alg_in->cost;
2701 x = alg_in, alg_in = best_alg, best_alg = x;
2702 best_alg->log[best_alg->ops] = m;
2703 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2711 /* Look for factors of t of the form
2712 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2713 If we find such a factor, we can multiply by t using an algorithm that
2714 multiplies by q, shift the result by m and add/subtract it to itself.
2716 We search for large factors first and loop down, even if large factors
2717 are less probable than small; if we find a large factor we will find a
2718 good sequence quickly, and therefore be able to prune (by decreasing
2719 COST_LIMIT) the search. */
2721 do_alg_addsub_factor:
2722 for (m = floor_log2 (t - 1); m >= 2; m--)
2724 unsigned HOST_WIDE_INT d;
2726 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2727 if (t % d == 0 && t > d && m < maxm
2728 && (!cache_hit || cache_alg == alg_add_factor))
2730 /* If the target has a cheap shift-and-add instruction use
2731 that in preference to a shift insn followed by an add insn.
2732 Assume that the shift-and-add is "atomic" with a latency
2733 equal to its cost, otherwise assume that on superscalar
2734 hardware the shift may be executed concurrently with the
2735 earlier steps in the algorithm. */
2736 op_cost = add_cost[speed][mode] + shift_cost[speed][mode][m];
2737 if (shiftadd_cost[speed][mode][m] < op_cost)
2739 op_cost = shiftadd_cost[speed][mode][m];
2740 op_latency = op_cost;
2743 op_latency = add_cost[speed][mode];
2745 new_limit.cost = best_cost.cost - op_cost;
2746 new_limit.latency = best_cost.latency - op_latency;
2747 synth_mult (alg_in, t / d, &new_limit, mode);
2749 alg_in->cost.cost += op_cost;
2750 alg_in->cost.latency += op_latency;
2751 if (alg_in->cost.latency < op_cost)
2752 alg_in->cost.latency = op_cost;
2753 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2755 struct algorithm *x;
2756 best_cost = alg_in->cost;
2757 x = alg_in, alg_in = best_alg, best_alg = x;
2758 best_alg->log[best_alg->ops] = m;
2759 best_alg->op[best_alg->ops] = alg_add_factor;
2761 /* Other factors will have been taken care of in the recursion. */
2765 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2766 if (t % d == 0 && t > d && m < maxm
2767 && (!cache_hit || cache_alg == alg_sub_factor))
2769 /* If the target has a cheap shift-and-subtract insn use
2770 that in preference to a shift insn followed by a sub insn.
2771 Assume that the shift-and-sub is "atomic" with a latency
2772 equal to it's cost, otherwise assume that on superscalar
2773 hardware the shift may be executed concurrently with the
2774 earlier steps in the algorithm. */
2775 op_cost = add_cost[speed][mode] + shift_cost[speed][mode][m];
2776 if (shiftsub0_cost[speed][mode][m] < op_cost)
2778 op_cost = shiftsub0_cost[speed][mode][m];
2779 op_latency = op_cost;
2782 op_latency = add_cost[speed][mode];
2784 new_limit.cost = best_cost.cost - op_cost;
2785 new_limit.latency = best_cost.latency - op_latency;
2786 synth_mult (alg_in, t / d, &new_limit, mode);
2788 alg_in->cost.cost += op_cost;
2789 alg_in->cost.latency += op_latency;
2790 if (alg_in->cost.latency < op_cost)
2791 alg_in->cost.latency = op_cost;
2792 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2794 struct algorithm *x;
2795 best_cost = alg_in->cost;
2796 x = alg_in, alg_in = best_alg, best_alg = x;
2797 best_alg->log[best_alg->ops] = m;
2798 best_alg->op[best_alg->ops] = alg_sub_factor;
2806 /* Try shift-and-add (load effective address) instructions,
2807 i.e. do a*3, a*5, a*9. */
2814 if (m >= 0 && m < maxm)
2816 op_cost = shiftadd_cost[speed][mode][m];
2817 new_limit.cost = best_cost.cost - op_cost;
2818 new_limit.latency = best_cost.latency - op_cost;
2819 synth_mult (alg_in, (t - 1) >> m, &new_limit, mode);
2821 alg_in->cost.cost += op_cost;
2822 alg_in->cost.latency += op_cost;
2823 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2825 struct algorithm *x;
2826 best_cost = alg_in->cost;
2827 x = alg_in, alg_in = best_alg, best_alg = x;
2828 best_alg->log[best_alg->ops] = m;
2829 best_alg->op[best_alg->ops] = alg_add_t2_m;
2839 if (m >= 0 && m < maxm)
2841 op_cost = shiftsub0_cost[speed][mode][m];
2842 new_limit.cost = best_cost.cost - op_cost;
2843 new_limit.latency = best_cost.latency - op_cost;
2844 synth_mult (alg_in, (t + 1) >> m, &new_limit, mode);
2846 alg_in->cost.cost += op_cost;
2847 alg_in->cost.latency += op_cost;
2848 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2850 struct algorithm *x;
2851 best_cost = alg_in->cost;
2852 x = alg_in, alg_in = best_alg, best_alg = x;
2853 best_alg->log[best_alg->ops] = m;
2854 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2862 /* If best_cost has not decreased, we have not found any algorithm. */
2863 if (!CHEAPER_MULT_COST (&best_cost, cost_limit))
2865 /* We failed to find an algorithm. Record alg_impossible for
2866 this case (that is, <T, MODE, COST_LIMIT>) so that next time
2867 we are asked to find an algorithm for T within the same or
2868 lower COST_LIMIT, we can immediately return to the
2870 alg_hash[hash_index].t = t;
2871 alg_hash[hash_index].mode = mode;
2872 alg_hash[hash_index].speed = speed;
2873 alg_hash[hash_index].alg = alg_impossible;
2874 alg_hash[hash_index].cost = *cost_limit;
2878 /* Cache the result. */
2881 alg_hash[hash_index].t = t;
2882 alg_hash[hash_index].mode = mode;
2883 alg_hash[hash_index].speed = speed;
2884 alg_hash[hash_index].alg = best_alg->op[best_alg->ops];
2885 alg_hash[hash_index].cost.cost = best_cost.cost;
2886 alg_hash[hash_index].cost.latency = best_cost.latency;
2889 /* If we are getting a too long sequence for `struct algorithm'
2890 to record, make this search fail. */
2891 if (best_alg->ops == MAX_BITS_PER_WORD)
2894 /* Copy the algorithm from temporary space to the space at alg_out.
2895 We avoid using structure assignment because the majority of
2896 best_alg is normally undefined, and this is a critical function. */
2897 alg_out->ops = best_alg->ops + 1;
2898 alg_out->cost = best_cost;
2899 memcpy (alg_out->op, best_alg->op,
2900 alg_out->ops * sizeof *alg_out->op);
2901 memcpy (alg_out->log, best_alg->log,
2902 alg_out->ops * sizeof *alg_out->log);
2905 /* Find the cheapest way of multiplying a value of mode MODE by VAL.
2906 Try three variations:
2908 - a shift/add sequence based on VAL itself
2909 - a shift/add sequence based on -VAL, followed by a negation
2910 - a shift/add sequence based on VAL - 1, followed by an addition.
2912 Return true if the cheapest of these cost less than MULT_COST,
2913 describing the algorithm in *ALG and final fixup in *VARIANT. */
2916 choose_mult_variant (enum machine_mode mode, HOST_WIDE_INT val,
2917 struct algorithm *alg, enum mult_variant *variant,
2920 struct algorithm alg2;
2921 struct mult_cost limit;
2923 bool speed = optimize_insn_for_speed_p ();
2925 /* Fail quickly for impossible bounds. */
2929 /* Ensure that mult_cost provides a reasonable upper bound.
2930 Any constant multiplication can be performed with less
2931 than 2 * bits additions. */
2932 op_cost = 2 * GET_MODE_BITSIZE (mode) * add_cost[speed][mode];
2933 if (mult_cost > op_cost)
2934 mult_cost = op_cost;
2936 *variant = basic_variant;
2937 limit.cost = mult_cost;
2938 limit.latency = mult_cost;
2939 synth_mult (alg, val, &limit, mode);
2941 /* This works only if the inverted value actually fits in an
2943 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2945 op_cost = neg_cost[speed][mode];
2946 if (MULT_COST_LESS (&alg->cost, mult_cost))
2948 limit.cost = alg->cost.cost - op_cost;
2949 limit.latency = alg->cost.latency - op_cost;
2953 limit.cost = mult_cost - op_cost;
2954 limit.latency = mult_cost - op_cost;
2957 synth_mult (&alg2, -val, &limit, mode);
2958 alg2.cost.cost += op_cost;
2959 alg2.cost.latency += op_cost;
2960 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2961 *alg = alg2, *variant = negate_variant;
2964 /* This proves very useful for division-by-constant. */
2965 op_cost = add_cost[speed][mode];
2966 if (MULT_COST_LESS (&alg->cost, mult_cost))
2968 limit.cost = alg->cost.cost - op_cost;
2969 limit.latency = alg->cost.latency - op_cost;
2973 limit.cost = mult_cost - op_cost;
2974 limit.latency = mult_cost - op_cost;
2977 synth_mult (&alg2, val - 1, &limit, mode);
2978 alg2.cost.cost += op_cost;
2979 alg2.cost.latency += op_cost;
2980 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2981 *alg = alg2, *variant = add_variant;
2983 return MULT_COST_LESS (&alg->cost, mult_cost);
2986 /* A subroutine of expand_mult, used for constant multiplications.
2987 Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
2988 convenient. Use the shift/add sequence described by ALG and apply
2989 the final fixup specified by VARIANT. */
2992 expand_mult_const (enum machine_mode mode, rtx op0, HOST_WIDE_INT val,
2993 rtx target, const struct algorithm *alg,
2994 enum mult_variant variant)
2996 HOST_WIDE_INT val_so_far;
2997 rtx insn, accum, tem;
2999 enum machine_mode nmode;
3001 /* Avoid referencing memory over and over and invalid sharing
3003 op0 = force_reg (mode, op0);
3005 /* ACCUM starts out either as OP0 or as a zero, depending on
3006 the first operation. */
3008 if (alg->op[0] == alg_zero)
3010 accum = copy_to_mode_reg (mode, const0_rtx);
3013 else if (alg->op[0] == alg_m)
3015 accum = copy_to_mode_reg (mode, op0);
3021 for (opno = 1; opno < alg->ops; opno++)
3023 int log = alg->log[opno];
3024 rtx shift_subtarget = optimize ? 0 : accum;
3026 = (opno == alg->ops - 1 && target != 0 && variant != add_variant
3029 rtx accum_target = optimize ? 0 : accum;
3031 switch (alg->op[opno])
3034 accum = expand_shift (LSHIFT_EXPR, mode, accum,
3035 build_int_cst (NULL_TREE, log),
3041 tem = expand_shift (LSHIFT_EXPR, mode, op0,
3042 build_int_cst (NULL_TREE, log),
3044 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
3045 add_target ? add_target : accum_target);
3046 val_so_far += (HOST_WIDE_INT) 1 << log;
3050 tem = expand_shift (LSHIFT_EXPR, mode, op0,
3051 build_int_cst (NULL_TREE, log),
3053 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
3054 add_target ? add_target : accum_target);
3055 val_so_far -= (HOST_WIDE_INT) 1 << log;
3059 accum = expand_shift (LSHIFT_EXPR, mode, accum,
3060 build_int_cst (NULL_TREE, log),
3063 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
3064 add_target ? add_target : accum_target);
3065 val_so_far = (val_so_far << log) + 1;
3069 accum = expand_shift (LSHIFT_EXPR, mode, accum,
3070 build_int_cst (NULL_TREE, log),
3071 shift_subtarget, 0);
3072 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
3073 add_target ? add_target : accum_target);
3074 val_so_far = (val_so_far << log) - 1;
3077 case alg_add_factor:
3078 tem = expand_shift (LSHIFT_EXPR, mode, accum,
3079 build_int_cst (NULL_TREE, log),
3081 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
3082 add_target ? add_target : accum_target);
3083 val_so_far += val_so_far << log;
3086 case alg_sub_factor:
3087 tem = expand_shift (LSHIFT_EXPR, mode, accum,
3088 build_int_cst (NULL_TREE, log),
3090 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
3092 ? add_target : (optimize ? 0 : tem)));
3093 val_so_far = (val_so_far << log) - val_so_far;
3100 /* Write a REG_EQUAL note on the last insn so that we can cse
3101 multiplication sequences. Note that if ACCUM is a SUBREG,
3102 we've set the inner register and must properly indicate
3105 tem = op0, nmode = mode;
3106 if (GET_CODE (accum) == SUBREG)
3108 nmode = GET_MODE (SUBREG_REG (accum));
3109 tem = gen_lowpart (nmode, op0);
3112 insn = get_last_insn ();
3113 set_unique_reg_note (insn, REG_EQUAL,
3114 gen_rtx_MULT (nmode, tem,
3115 GEN_INT (val_so_far)));
3118 if (variant == negate_variant)
3120 val_so_far = -val_so_far;
3121 accum = expand_unop (mode, neg_optab, accum, target, 0);
3123 else if (variant == add_variant)
3125 val_so_far = val_so_far + 1;
3126 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
3129 /* Compare only the bits of val and val_so_far that are significant
3130 in the result mode, to avoid sign-/zero-extension confusion. */
3131 val &= GET_MODE_MASK (mode);
3132 val_so_far &= GET_MODE_MASK (mode);
3133 gcc_assert (val == val_so_far);
3138 /* Perform a multiplication and return an rtx for the result.
3139 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3140 TARGET is a suggestion for where to store the result (an rtx).
3142 We check specially for a constant integer as OP1.
3143 If you want this check for OP0 as well, then before calling
3144 you should swap the two operands if OP0 would be constant. */
3147 expand_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3150 enum mult_variant variant;
3151 struct algorithm algorithm;
3153 bool speed = optimize_insn_for_speed_p ();
3155 /* Handling const0_rtx here allows us to use zero as a rogue value for
3157 if (op1 == const0_rtx)
3159 if (op1 == const1_rtx)
3161 if (op1 == constm1_rtx)
3162 return expand_unop (mode,
3163 GET_MODE_CLASS (mode) == MODE_INT
3164 && !unsignedp && flag_trapv
3165 ? negv_optab : neg_optab,
3168 /* These are the operations that are potentially turned into a sequence
3169 of shifts and additions. */
3170 if (SCALAR_INT_MODE_P (mode)
3171 && (unsignedp || !flag_trapv))
3173 HOST_WIDE_INT coeff = 0;
3174 rtx fake_reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
3176 /* synth_mult does an `unsigned int' multiply. As long as the mode is
3177 less than or equal in size to `unsigned int' this doesn't matter.
3178 If the mode is larger than `unsigned int', then synth_mult works
3179 only if the constant value exactly fits in an `unsigned int' without
3180 any truncation. This means that multiplying by negative values does
3181 not work; results are off by 2^32 on a 32 bit machine. */
3183 if (CONST_INT_P (op1))
3185 /* Attempt to handle multiplication of DImode values by negative
3186 coefficients, by performing the multiplication by a positive
3187 multiplier and then inverting the result. */
3188 if (INTVAL (op1) < 0
3189 && GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT)
3191 /* Its safe to use -INTVAL (op1) even for INT_MIN, as the
3192 result is interpreted as an unsigned coefficient.
3193 Exclude cost of op0 from max_cost to match the cost
3194 calculation of the synth_mult. */
3195 max_cost = rtx_cost (gen_rtx_MULT (mode, fake_reg, op1), SET, speed)
3196 - neg_cost[speed][mode];
3198 && choose_mult_variant (mode, -INTVAL (op1), &algorithm,
3199 &variant, max_cost))
3201 rtx temp = expand_mult_const (mode, op0, -INTVAL (op1),
3202 NULL_RTX, &algorithm,
3204 return expand_unop (mode, neg_optab, temp, target, 0);
3207 else coeff = INTVAL (op1);
3209 else if (GET_CODE (op1) == CONST_DOUBLE)
3211 /* If we are multiplying in DImode, it may still be a win
3212 to try to work with shifts and adds. */
3213 if (CONST_DOUBLE_HIGH (op1) == 0
3214 && CONST_DOUBLE_LOW (op1) > 0)
3215 coeff = CONST_DOUBLE_LOW (op1);
3216 else if (CONST_DOUBLE_LOW (op1) == 0
3217 && EXACT_POWER_OF_2_OR_ZERO_P (CONST_DOUBLE_HIGH (op1)))
3219 int shift = floor_log2 (CONST_DOUBLE_HIGH (op1))
3220 + HOST_BITS_PER_WIDE_INT;
3221 return expand_shift (LSHIFT_EXPR, mode, op0,
3222 build_int_cst (NULL_TREE, shift),
3227 /* We used to test optimize here, on the grounds that it's better to
3228 produce a smaller program when -O is not used. But this causes
3229 such a terrible slowdown sometimes that it seems better to always
3233 /* Special case powers of two. */
3234 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3235 return expand_shift (LSHIFT_EXPR, mode, op0,
3236 build_int_cst (NULL_TREE, floor_log2 (coeff)),
3239 /* Exclude cost of op0 from max_cost to match the cost
3240 calculation of the synth_mult. */
3241 max_cost = rtx_cost (gen_rtx_MULT (mode, fake_reg, op1), SET, speed);
3242 if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3244 return expand_mult_const (mode, op0, coeff, target,
3245 &algorithm, variant);
3249 if (GET_CODE (op0) == CONST_DOUBLE)
3256 /* Expand x*2.0 as x+x. */
3257 if (GET_CODE (op1) == CONST_DOUBLE
3258 && SCALAR_FLOAT_MODE_P (mode))
3261 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
3263 if (REAL_VALUES_EQUAL (d, dconst2))
3265 op0 = force_reg (GET_MODE (op0), op0);
3266 return expand_binop (mode, add_optab, op0, op0,
3267 target, unsignedp, OPTAB_LIB_WIDEN);
3271 /* This used to use umul_optab if unsigned, but for non-widening multiply
3272 there is no difference between signed and unsigned. */
3273 op0 = expand_binop (mode,
3275 && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT)
3276 ? smulv_optab : smul_optab,
3277 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
3282 /* Perform a widening multiplication and return an rtx for the result.
3283 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3284 TARGET is a suggestion for where to store the result (an rtx).
3285 THIS_OPTAB is the optab we should use, it must be either umul_widen_optab
3286 or smul_widen_optab.
3288 We check specially for a constant integer as OP1, comparing the
3289 cost of a widening multiply against the cost of a sequence of shifts
3293 expand_widening_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3294 int unsignedp, optab this_optab)
3296 bool speed = optimize_insn_for_speed_p ();
3298 if (CONST_INT_P (op1)
3299 && (INTVAL (op1) >= 0
3300 || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT))
3302 HOST_WIDE_INT coeff = INTVAL (op1);
3304 enum mult_variant variant;
3305 struct algorithm algorithm;
3307 /* Special case powers of two. */
3308 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3310 op0 = convert_to_mode (mode, op0, this_optab == umul_widen_optab);
3311 return expand_shift (LSHIFT_EXPR, mode, op0,
3312 build_int_cst (NULL_TREE, floor_log2 (coeff)),
3316 /* Exclude cost of op0 from max_cost to match the cost
3317 calculation of the synth_mult. */
3318 max_cost = mul_widen_cost[speed][mode];
3319 if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3322 op0 = convert_to_mode (mode, op0, this_optab == umul_widen_optab);
3323 return expand_mult_const (mode, op0, coeff, target,
3324 &algorithm, variant);
3327 return expand_binop (mode, this_optab, op0, op1, target,
3328 unsignedp, OPTAB_LIB_WIDEN);
3331 /* Return the smallest n such that 2**n >= X. */
3334 ceil_log2 (unsigned HOST_WIDE_INT x)
3336 return floor_log2 (x - 1) + 1;
3339 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
3340 replace division by D, and put the least significant N bits of the result
3341 in *MULTIPLIER_PTR and return the most significant bit.
3343 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
3344 needed precision is in PRECISION (should be <= N).
3346 PRECISION should be as small as possible so this function can choose
3347 multiplier more freely.
3349 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
3350 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
3352 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
3353 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
3356 unsigned HOST_WIDE_INT
3357 choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
3358 rtx *multiplier_ptr, int *post_shift_ptr, int *lgup_ptr)
3360 HOST_WIDE_INT mhigh_hi, mlow_hi;
3361 unsigned HOST_WIDE_INT mhigh_lo, mlow_lo;
3362 int lgup, post_shift;
3364 unsigned HOST_WIDE_INT nl, dummy1;
3365 HOST_WIDE_INT nh, dummy2;
3367 /* lgup = ceil(log2(divisor)); */
3368 lgup = ceil_log2 (d);
3370 gcc_assert (lgup <= n);
3373 pow2 = n + lgup - precision;
3375 /* We could handle this with some effort, but this case is much
3376 better handled directly with a scc insn, so rely on caller using
3378 gcc_assert (pow != 2 * HOST_BITS_PER_WIDE_INT);
3380 /* mlow = 2^(N + lgup)/d */
3381 if (pow >= HOST_BITS_PER_WIDE_INT)
3383 nh = (HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
3389 nl = (unsigned HOST_WIDE_INT) 1 << pow;
3391 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
3392 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
3394 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
3395 if (pow2 >= HOST_BITS_PER_WIDE_INT)
3396 nh |= (HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
3398 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
3399 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
3400 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
3402 gcc_assert (!mhigh_hi || nh - d < d);
3403 gcc_assert (mhigh_hi <= 1 && mlow_hi <= 1);
3404 /* Assert that mlow < mhigh. */
3405 gcc_assert (mlow_hi < mhigh_hi
3406 || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo));
3408 /* If precision == N, then mlow, mhigh exceed 2^N
3409 (but they do not exceed 2^(N+1)). */
3411 /* Reduce to lowest terms. */
3412 for (post_shift = lgup; post_shift > 0; post_shift--)
3414 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
3415 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
3425 *post_shift_ptr = post_shift;
3427 if (n < HOST_BITS_PER_WIDE_INT)
3429 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
3430 *multiplier_ptr = GEN_INT (mhigh_lo & mask);
3431 return mhigh_lo >= mask;
3435 *multiplier_ptr = GEN_INT (mhigh_lo);
3440 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
3441 congruent to 1 (mod 2**N). */
3443 static unsigned HOST_WIDE_INT
3444 invert_mod2n (unsigned HOST_WIDE_INT x, int n)
3446 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
3448 /* The algorithm notes that the choice y = x satisfies
3449 x*y == 1 mod 2^3, since x is assumed odd.
3450 Each iteration doubles the number of bits of significance in y. */
3452 unsigned HOST_WIDE_INT mask;
3453 unsigned HOST_WIDE_INT y = x;
3456 mask = (n == HOST_BITS_PER_WIDE_INT
3457 ? ~(unsigned HOST_WIDE_INT) 0
3458 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
3462 y = y * (2 - x*y) & mask; /* Modulo 2^N */
3468 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
3469 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
3470 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
3471 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
3474 The result is put in TARGET if that is convenient.
3476 MODE is the mode of operation. */
3479 expand_mult_highpart_adjust (enum machine_mode mode, rtx adj_operand, rtx op0,
3480 rtx op1, rtx target, int unsignedp)
3483 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
3485 tem = expand_shift (RSHIFT_EXPR, mode, op0,
3486 build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode) - 1),
3488 tem = expand_and (mode, tem, op1, NULL_RTX);
3490 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3493 tem = expand_shift (RSHIFT_EXPR, mode, op1,
3494 build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode) - 1),
3496 tem = expand_and (mode, tem, op0, NULL_RTX);
3497 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3503 /* Subroutine of expand_mult_highpart. Return the MODE high part of OP. */
3506 extract_high_half (enum machine_mode mode, rtx op)
3508 enum machine_mode wider_mode;
3510 if (mode == word_mode)
3511 return gen_highpart (mode, op);
3513 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3515 wider_mode = GET_MODE_WIDER_MODE (mode);
3516 op = expand_shift (RSHIFT_EXPR, wider_mode, op,
3517 build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode)), 0, 1);
3518 return convert_modes (mode, wider_mode, op, 0);
3521 /* Like expand_mult_highpart, but only consider using a multiplication
3522 optab. OP1 is an rtx for the constant operand. */
3525 expand_mult_highpart_optab (enum machine_mode mode, rtx op0, rtx op1,
3526 rtx target, int unsignedp, int max_cost)
3528 rtx narrow_op1 = gen_int_mode (INTVAL (op1), mode);
3529 enum machine_mode wider_mode;
3533 bool speed = optimize_insn_for_speed_p ();
3535 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3537 wider_mode = GET_MODE_WIDER_MODE (mode);
3538 size = GET_MODE_BITSIZE (mode);
3540 /* Firstly, try using a multiplication insn that only generates the needed
3541 high part of the product, and in the sign flavor of unsignedp. */
3542 if (mul_highpart_cost[speed][mode] < max_cost)
3544 moptab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
3545 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3546 unsignedp, OPTAB_DIRECT);
3551 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
3552 Need to adjust the result after the multiplication. */
3553 if (size - 1 < BITS_PER_WORD
3554 && (mul_highpart_cost[speed][mode] + 2 * shift_cost[speed][mode][size-1]
3555 + 4 * add_cost[speed][mode] < max_cost))
3557 moptab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
3558 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3559 unsignedp, OPTAB_DIRECT);
3561 /* We used the wrong signedness. Adjust the result. */
3562 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3566 /* Try widening multiplication. */
3567 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
3568 if (optab_handler (moptab, wider_mode) != CODE_FOR_nothing
3569 && mul_widen_cost[speed][wider_mode] < max_cost)
3571 tem = expand_binop (wider_mode, moptab, op0, narrow_op1, 0,
3572 unsignedp, OPTAB_WIDEN);
3574 return extract_high_half (mode, tem);
3577 /* Try widening the mode and perform a non-widening multiplication. */
3578 if (optab_handler (smul_optab, wider_mode) != CODE_FOR_nothing
3579 && size - 1 < BITS_PER_WORD
3580 && mul_cost[speed][wider_mode] + shift_cost[speed][mode][size-1] < max_cost)
3582 rtx insns, wop0, wop1;
3584 /* We need to widen the operands, for example to ensure the
3585 constant multiplier is correctly sign or zero extended.
3586 Use a sequence to clean-up any instructions emitted by
3587 the conversions if things don't work out. */
3589 wop0 = convert_modes (wider_mode, mode, op0, unsignedp);
3590 wop1 = convert_modes (wider_mode, mode, op1, unsignedp);
3591 tem = expand_binop (wider_mode, smul_optab, wop0, wop1, 0,
3592 unsignedp, OPTAB_WIDEN);
3593 insns = get_insns ();
3599 return extract_high_half (mode, tem);
3603 /* Try widening multiplication of opposite signedness, and adjust. */
3604 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
3605 if (optab_handler (moptab, wider_mode) != CODE_FOR_nothing
3606 && size - 1 < BITS_PER_WORD
3607 && (mul_widen_cost[speed][wider_mode] + 2 * shift_cost[speed][mode][size-1]
3608 + 4 * add_cost[speed][mode] < max_cost))
3610 tem = expand_binop (wider_mode, moptab, op0, narrow_op1,
3611 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
3614 tem = extract_high_half (mode, tem);
3615 /* We used the wrong signedness. Adjust the result. */
3616 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3624 /* Emit code to multiply OP0 and OP1 (where OP1 is an integer constant),
3625 putting the high half of the result in TARGET if that is convenient,
3626 and return where the result is. If the operation can not be performed,
3629 MODE is the mode of operation and result.
3631 UNSIGNEDP nonzero means unsigned multiply.
3633 MAX_COST is the total allowed cost for the expanded RTL. */
3636 expand_mult_highpart (enum machine_mode mode, rtx op0, rtx op1,
3637 rtx target, int unsignedp, int max_cost)
3639 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
3640 unsigned HOST_WIDE_INT cnst1;
3642 bool sign_adjust = false;
3643 enum mult_variant variant;
3644 struct algorithm alg;
3646 bool speed = optimize_insn_for_speed_p ();
3648 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3649 /* We can't support modes wider than HOST_BITS_PER_INT. */
3650 gcc_assert (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT);
3652 cnst1 = INTVAL (op1) & GET_MODE_MASK (mode);
3654 /* We can't optimize modes wider than BITS_PER_WORD.
3655 ??? We might be able to perform double-word arithmetic if
3656 mode == word_mode, however all the cost calculations in
3657 synth_mult etc. assume single-word operations. */
3658 if (GET_MODE_BITSIZE (wider_mode) > BITS_PER_WORD)
3659 return expand_mult_highpart_optab (mode, op0, op1, target,
3660 unsignedp, max_cost);
3662 extra_cost = shift_cost[speed][mode][GET_MODE_BITSIZE (mode) - 1];
3664 /* Check whether we try to multiply by a negative constant. */
3665 if (!unsignedp && ((cnst1 >> (GET_MODE_BITSIZE (mode) - 1)) & 1))
3668 extra_cost += add_cost[speed][mode];
3671 /* See whether shift/add multiplication is cheap enough. */
3672 if (choose_mult_variant (wider_mode, cnst1, &alg, &variant,
3673 max_cost - extra_cost))
3675 /* See whether the specialized multiplication optabs are
3676 cheaper than the shift/add version. */
3677 tem = expand_mult_highpart_optab (mode, op0, op1, target, unsignedp,
3678 alg.cost.cost + extra_cost);
3682 tem = convert_to_mode (wider_mode, op0, unsignedp);
3683 tem = expand_mult_const (wider_mode, tem, cnst1, 0, &alg, variant);
3684 tem = extract_high_half (mode, tem);
3686 /* Adjust result for signedness. */
3688 tem = force_operand (gen_rtx_MINUS (mode, tem, op0), tem);
3692 return expand_mult_highpart_optab (mode, op0, op1, target,
3693 unsignedp, max_cost);
3697 /* Expand signed modulus of OP0 by a power of two D in mode MODE. */
3700 expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
3702 unsigned HOST_WIDE_INT masklow, maskhigh;
3703 rtx result, temp, shift, label;
3706 logd = floor_log2 (d);
3707 result = gen_reg_rtx (mode);
3709 /* Avoid conditional branches when they're expensive. */
3710 if (BRANCH_COST (optimize_insn_for_speed_p (), false) >= 2
3711 && optimize_insn_for_speed_p ())
3713 rtx signmask = emit_store_flag (result, LT, op0, const0_rtx,
3717 signmask = force_reg (mode, signmask);
3718 masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
3719 shift = GEN_INT (GET_MODE_BITSIZE (mode) - logd);
3721 /* Use the rtx_cost of a LSHIFTRT instruction to determine
3722 which instruction sequence to use. If logical right shifts
3723 are expensive the use 2 XORs, 2 SUBs and an AND, otherwise
3724 use a LSHIFTRT, 1 ADD, 1 SUB and an AND. */
3726 temp = gen_rtx_LSHIFTRT (mode, result, shift);
3727 if (optab_handler (lshr_optab, mode) == CODE_FOR_nothing
3728 || rtx_cost (temp, SET, optimize_insn_for_speed_p ()) > COSTS_N_INSNS (2))
3730 temp = expand_binop (mode, xor_optab, op0, signmask,
3731 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3732 temp = expand_binop (mode, sub_optab, temp, signmask,
3733 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3734 temp = expand_binop (mode, and_optab, temp, GEN_INT (masklow),
3735 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3736 temp = expand_binop (mode, xor_optab, temp, signmask,
3737 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3738 temp = expand_binop (mode, sub_optab, temp, signmask,
3739 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3743 signmask = expand_binop (mode, lshr_optab, signmask, shift,
3744 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3745 signmask = force_reg (mode, signmask);
3747 temp = expand_binop (mode, add_optab, op0, signmask,
3748 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3749 temp = expand_binop (mode, and_optab, temp, GEN_INT (masklow),
3750 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3751 temp = expand_binop (mode, sub_optab, temp, signmask,
3752 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3758 /* Mask contains the mode's signbit and the significant bits of the
3759 modulus. By including the signbit in the operation, many targets
3760 can avoid an explicit compare operation in the following comparison
3763 masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
3764 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3766 masklow |= (HOST_WIDE_INT) -1 << (GET_MODE_BITSIZE (mode) - 1);
3770 maskhigh = (HOST_WIDE_INT) -1
3771 << (GET_MODE_BITSIZE (mode) - HOST_BITS_PER_WIDE_INT - 1);
3773 temp = expand_binop (mode, and_optab, op0,
3774 immed_double_const (masklow, maskhigh, mode),
3775 result, 1, OPTAB_LIB_WIDEN);
3777 emit_move_insn (result, temp);
3779 label = gen_label_rtx ();
3780 do_cmp_and_jump (result, const0_rtx, GE, mode, label);
3782 temp = expand_binop (mode, sub_optab, result, const1_rtx, result,
3783 0, OPTAB_LIB_WIDEN);
3784 masklow = (HOST_WIDE_INT) -1 << logd;
3786 temp = expand_binop (mode, ior_optab, temp,
3787 immed_double_const (masklow, maskhigh, mode),
3788 result, 1, OPTAB_LIB_WIDEN);
3789 temp = expand_binop (mode, add_optab, temp, const1_rtx, result,
3790 0, OPTAB_LIB_WIDEN);
3792 emit_move_insn (result, temp);
3797 /* Expand signed division of OP0 by a power of two D in mode MODE.
3798 This routine is only called for positive values of D. */
3801 expand_sdiv_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
3807 logd = floor_log2 (d);
3808 shift = build_int_cst (NULL_TREE, logd);
3811 && BRANCH_COST (optimize_insn_for_speed_p (),
3814 temp = gen_reg_rtx (mode);
3815 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, 1);
3816 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3817 0, OPTAB_LIB_WIDEN);
3818 return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3821 #ifdef HAVE_conditional_move
3822 if (BRANCH_COST (optimize_insn_for_speed_p (), false)
3827 /* ??? emit_conditional_move forces a stack adjustment via
3828 compare_from_rtx so, if the sequence is discarded, it will
3829 be lost. Do it now instead. */
3830 do_pending_stack_adjust ();
3833 temp2 = copy_to_mode_reg (mode, op0);
3834 temp = expand_binop (mode, add_optab, temp2, GEN_INT (d-1),
3835 NULL_RTX, 0, OPTAB_LIB_WIDEN);
3836 temp = force_reg (mode, temp);
3838 /* Construct "temp2 = (temp2 < 0) ? temp : temp2". */
3839 temp2 = emit_conditional_move (temp2, LT, temp2, const0_rtx,
3840 mode, temp, temp2, mode, 0);
3843 rtx seq = get_insns ();
3846 return expand_shift (RSHIFT_EXPR, mode, temp2, shift, NULL_RTX, 0);
3852 if (BRANCH_COST (optimize_insn_for_speed_p (),
3855 int ushift = GET_MODE_BITSIZE (mode) - logd;
3857 temp = gen_reg_rtx (mode);
3858 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, -1);
3859 if (shift_cost[optimize_insn_for_speed_p ()][mode][ushift] > COSTS_N_INSNS (1))
3860 temp = expand_binop (mode, and_optab, temp, GEN_INT (d - 1),
3861 NULL_RTX, 0, OPTAB_LIB_WIDEN);
3863 temp = expand_shift (RSHIFT_EXPR, mode, temp,
3864 build_int_cst (NULL_TREE, ushift),
3866 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3867 0, OPTAB_LIB_WIDEN);
3868 return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3871 label = gen_label_rtx ();
3872 temp = copy_to_mode_reg (mode, op0);
3873 do_cmp_and_jump (temp, const0_rtx, GE, mode, label);
3874 expand_inc (temp, GEN_INT (d - 1));
3876 return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3879 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
3880 if that is convenient, and returning where the result is.
3881 You may request either the quotient or the remainder as the result;
3882 specify REM_FLAG nonzero to get the remainder.
3884 CODE is the expression code for which kind of division this is;
3885 it controls how rounding is done. MODE is the machine mode to use.
3886 UNSIGNEDP nonzero means do unsigned division. */
3888 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
3889 and then correct it by or'ing in missing high bits
3890 if result of ANDI is nonzero.
3891 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
3892 This could optimize to a bfexts instruction.
3893 But C doesn't use these operations, so their optimizations are
3895 /* ??? For modulo, we don't actually need the highpart of the first product,
3896 the low part will do nicely. And for small divisors, the second multiply
3897 can also be a low-part only multiply or even be completely left out.
3898 E.g. to calculate the remainder of a division by 3 with a 32 bit
3899 multiply, multiply with 0x55555556 and extract the upper two bits;
3900 the result is exact for inputs up to 0x1fffffff.
3901 The input range can be reduced by using cross-sum rules.
3902 For odd divisors >= 3, the following table gives right shift counts
3903 so that if a number is shifted by an integer multiple of the given
3904 amount, the remainder stays the same:
3905 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
3906 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
3907 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
3908 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
3909 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
3911 Cross-sum rules for even numbers can be derived by leaving as many bits
3912 to the right alone as the divisor has zeros to the right.
3913 E.g. if x is an unsigned 32 bit number:
3914 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
3918 expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode,
3919 rtx op0, rtx op1, rtx target, int unsignedp)
3921 enum machine_mode compute_mode;
3923 rtx quotient = 0, remainder = 0;
3927 optab optab1, optab2;
3928 int op1_is_constant, op1_is_pow2 = 0;
3929 int max_cost, extra_cost;
3930 static HOST_WIDE_INT last_div_const = 0;
3931 static HOST_WIDE_INT ext_op1;
3932 bool speed = optimize_insn_for_speed_p ();
3934 op1_is_constant = CONST_INT_P (op1);
3935 if (op1_is_constant)
3937 ext_op1 = INTVAL (op1);
3939 ext_op1 &= GET_MODE_MASK (mode);
3940 op1_is_pow2 = ((EXACT_POWER_OF_2_OR_ZERO_P (ext_op1)
3941 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-ext_op1))));
3945 This is the structure of expand_divmod:
3947 First comes code to fix up the operands so we can perform the operations
3948 correctly and efficiently.
3950 Second comes a switch statement with code specific for each rounding mode.
3951 For some special operands this code emits all RTL for the desired
3952 operation, for other cases, it generates only a quotient and stores it in
3953 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
3954 to indicate that it has not done anything.
3956 Last comes code that finishes the operation. If QUOTIENT is set and
3957 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
3958 QUOTIENT is not set, it is computed using trunc rounding.
3960 We try to generate special code for division and remainder when OP1 is a
3961 constant. If |OP1| = 2**n we can use shifts and some other fast
3962 operations. For other values of OP1, we compute a carefully selected
3963 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
3966 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
3967 half of the product. Different strategies for generating the product are
3968 implemented in expand_mult_highpart.
3970 If what we actually want is the remainder, we generate that by another
3971 by-constant multiplication and a subtraction. */
3973 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3974 code below will malfunction if we are, so check here and handle
3975 the special case if so. */
3976 if (op1 == const1_rtx)
3977 return rem_flag ? const0_rtx : op0;
3979 /* When dividing by -1, we could get an overflow.
3980 negv_optab can handle overflows. */
3981 if (! unsignedp && op1 == constm1_rtx)
3985 return expand_unop (mode, flag_trapv && GET_MODE_CLASS(mode) == MODE_INT
3986 ? negv_optab : neg_optab, op0, target, 0);
3990 /* Don't use the function value register as a target
3991 since we have to read it as well as write it,
3992 and function-inlining gets confused by this. */
3993 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
3994 /* Don't clobber an operand while doing a multi-step calculation. */
3995 || ((rem_flag || op1_is_constant)
3996 && (reg_mentioned_p (target, op0)
3997 || (MEM_P (op0) && MEM_P (target))))
3998 || reg_mentioned_p (target, op1)
3999 || (MEM_P (op1) && MEM_P (target))))
4002 /* Get the mode in which to perform this computation. Normally it will
4003 be MODE, but sometimes we can't do the desired operation in MODE.
4004 If so, pick a wider mode in which we can do the operation. Convert
4005 to that mode at the start to avoid repeated conversions.
4007 First see what operations we need. These depend on the expression
4008 we are evaluating. (We assume that divxx3 insns exist under the
4009 same conditions that modxx3 insns and that these insns don't normally
4010 fail. If these assumptions are not correct, we may generate less
4011 efficient code in some cases.)
4013 Then see if we find a mode in which we can open-code that operation
4014 (either a division, modulus, or shift). Finally, check for the smallest
4015 mode for which we can do the operation with a library call. */
4017 /* We might want to refine this now that we have division-by-constant
4018 optimization. Since expand_mult_highpart tries so many variants, it is
4019 not straightforward to generalize this. Maybe we should make an array
4020 of possible modes in init_expmed? Save this for GCC 2.7. */
4022 optab1 = ((op1_is_pow2 && op1 != const0_rtx)
4023 ? (unsignedp ? lshr_optab : ashr_optab)
4024 : (unsignedp ? udiv_optab : sdiv_optab));
4025 optab2 = ((op1_is_pow2 && op1 != const0_rtx)
4027 : (unsignedp ? udivmod_optab : sdivmod_optab));
4029 for (compute_mode = mode; compute_mode != VOIDmode;
4030 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
4031 if (optab_handler (optab1, compute_mode) != CODE_FOR_nothing
4032 || optab_handler (optab2, compute_mode) != CODE_FOR_nothing)
4035 if (compute_mode == VOIDmode)
4036 for (compute_mode = mode; compute_mode != VOIDmode;
4037 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
4038 if (optab_libfunc (optab1, compute_mode)
4039 || optab_libfunc (optab2, compute_mode))
4042 /* If we still couldn't find a mode, use MODE, but expand_binop will
4044 if (compute_mode == VOIDmode)
4045 compute_mode = mode;
4047 if (target && GET_MODE (target) == compute_mode)
4050 tquotient = gen_reg_rtx (compute_mode);
4052 size = GET_MODE_BITSIZE (compute_mode);
4054 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
4055 (mode), and thereby get better code when OP1 is a constant. Do that
4056 later. It will require going over all usages of SIZE below. */
4057 size = GET_MODE_BITSIZE (mode);
4060 /* Only deduct something for a REM if the last divide done was
4061 for a different constant. Then set the constant of the last
4063 max_cost = unsignedp ? udiv_cost[speed][compute_mode] : sdiv_cost[speed][compute_mode];
4064 if (rem_flag && ! (last_div_const != 0 && op1_is_constant
4065 && INTVAL (op1) == last_div_const))
4066 max_cost -= mul_cost[speed][compute_mode] + add_cost[speed][compute_mode];
4068 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
4070 /* Now convert to the best mode to use. */
4071 if (compute_mode != mode)
4073 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
4074 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
4076 /* convert_modes may have placed op1 into a register, so we
4077 must recompute the following. */
4078 op1_is_constant = CONST_INT_P (op1);
4079 op1_is_pow2 = (op1_is_constant
4080 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
4082 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
4085 /* If one of the operands is a volatile MEM, copy it into a register. */
4087 if (MEM_P (op0) && MEM_VOLATILE_P (op0))
4088 op0 = force_reg (compute_mode, op0);
4089 if (MEM_P (op1) && MEM_VOLATILE_P (op1))
4090 op1 = force_reg (compute_mode, op1);
4092 /* If we need the remainder or if OP1 is constant, we need to
4093 put OP0 in a register in case it has any queued subexpressions. */
4094 if (rem_flag || op1_is_constant)
4095 op0 = force_reg (compute_mode, op0);
4097 last = get_last_insn ();
4099 /* Promote floor rounding to trunc rounding for unsigned operations. */
4102 if (code == FLOOR_DIV_EXPR)
4103 code = TRUNC_DIV_EXPR;
4104 if (code == FLOOR_MOD_EXPR)
4105 code = TRUNC_MOD_EXPR;
4106 if (code == EXACT_DIV_EXPR && op1_is_pow2)
4107 code = TRUNC_DIV_EXPR;
4110 if (op1 != const0_rtx)
4113 case TRUNC_MOD_EXPR:
4114 case TRUNC_DIV_EXPR:
4115 if (op1_is_constant)
4119 unsigned HOST_WIDE_INT mh;
4120 int pre_shift, post_shift;
4123 unsigned HOST_WIDE_INT d = (INTVAL (op1)
4124 & GET_MODE_MASK (compute_mode));
4126 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
4128 pre_shift = floor_log2 (d);
4132 = expand_binop (compute_mode, and_optab, op0,
4133 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
4137 return gen_lowpart (mode, remainder);
4139 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4140 build_int_cst (NULL_TREE,
4144 else if (size <= HOST_BITS_PER_WIDE_INT)
4146 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
4148 /* Most significant bit of divisor is set; emit an scc
4150 quotient = emit_store_flag_force (tquotient, GEU, op0, op1,
4151 compute_mode, 1, 1);
4155 /* Find a suitable multiplier and right shift count
4156 instead of multiplying with D. */
4158 mh = choose_multiplier (d, size, size,
4159 &ml, &post_shift, &dummy);
4161 /* If the suggested multiplier is more than SIZE bits,
4162 we can do better for even divisors, using an
4163 initial right shift. */
4164 if (mh != 0 && (d & 1) == 0)
4166 pre_shift = floor_log2 (d & -d);
4167 mh = choose_multiplier (d >> pre_shift, size,
4169 &ml, &post_shift, &dummy);
4179 if (post_shift - 1 >= BITS_PER_WORD)
4183 = (shift_cost[speed][compute_mode][post_shift - 1]
4184 + shift_cost[speed][compute_mode][1]
4185 + 2 * add_cost[speed][compute_mode]);
4186 t1 = expand_mult_highpart (compute_mode, op0, ml,
4188 max_cost - extra_cost);
4191 t2 = force_operand (gen_rtx_MINUS (compute_mode,
4195 (RSHIFT_EXPR, compute_mode, t2,
4196 build_int_cst (NULL_TREE, 1),
4198 t4 = force_operand (gen_rtx_PLUS (compute_mode,
4201 quotient = expand_shift
4202 (RSHIFT_EXPR, compute_mode, t4,
4203 build_int_cst (NULL_TREE, post_shift - 1),
4210 if (pre_shift >= BITS_PER_WORD
4211 || post_shift >= BITS_PER_WORD)
4215 (RSHIFT_EXPR, compute_mode, op0,
4216 build_int_cst (NULL_TREE, pre_shift),
4219 = (shift_cost[speed][compute_mode][pre_shift]
4220 + shift_cost[speed][compute_mode][post_shift]);
4221 t2 = expand_mult_highpart (compute_mode, t1, ml,
4223 max_cost - extra_cost);
4226 quotient = expand_shift
4227 (RSHIFT_EXPR, compute_mode, t2,
4228 build_int_cst (NULL_TREE, post_shift),
4233 else /* Too wide mode to use tricky code */
4236 insn = get_last_insn ();
4238 && (set = single_set (insn)) != 0
4239 && SET_DEST (set) == quotient)
4240 set_unique_reg_note (insn,
4242 gen_rtx_UDIV (compute_mode, op0, op1));
4244 else /* TRUNC_DIV, signed */
4246 unsigned HOST_WIDE_INT ml;
4247 int lgup, post_shift;
4249 HOST_WIDE_INT d = INTVAL (op1);
4250 unsigned HOST_WIDE_INT abs_d;
4252 /* Since d might be INT_MIN, we have to cast to
4253 unsigned HOST_WIDE_INT before negating to avoid
4254 undefined signed overflow. */
4256 ? (unsigned HOST_WIDE_INT) d
4257 : - (unsigned HOST_WIDE_INT) d);
4259 /* n rem d = n rem -d */
4260 if (rem_flag && d < 0)
4263 op1 = gen_int_mode (abs_d, compute_mode);
4269 quotient = expand_unop (compute_mode, neg_optab, op0,
4271 else if (HOST_BITS_PER_WIDE_INT >= size
4272 && abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
4274 /* This case is not handled correctly below. */
4275 quotient = emit_store_flag (tquotient, EQ, op0, op1,
4276 compute_mode, 1, 1);
4280 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
4281 && (rem_flag ? smod_pow2_cheap[speed][compute_mode]
4282 : sdiv_pow2_cheap[speed][compute_mode])
4283 /* We assume that cheap metric is true if the
4284 optab has an expander for this mode. */
4285 && ((optab_handler ((rem_flag ? smod_optab
4288 != CODE_FOR_nothing)
4289 || (optab_handler (sdivmod_optab,
4291 != CODE_FOR_nothing)))
4293 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
4297 remainder = expand_smod_pow2 (compute_mode, op0, d);
4299 return gen_lowpart (mode, remainder);
4302 if (sdiv_pow2_cheap[speed][compute_mode]
4303 && ((optab_handler (sdiv_optab, compute_mode)
4304 != CODE_FOR_nothing)
4305 || (optab_handler (sdivmod_optab, compute_mode)
4306 != CODE_FOR_nothing)))
4307 quotient = expand_divmod (0, TRUNC_DIV_EXPR,
4309 gen_int_mode (abs_d,
4313 quotient = expand_sdiv_pow2 (compute_mode, op0, abs_d);
4315 /* We have computed OP0 / abs(OP1). If OP1 is negative,
4316 negate the quotient. */
4319 insn = get_last_insn ();
4321 && (set = single_set (insn)) != 0
4322 && SET_DEST (set) == quotient
4323 && abs_d < ((unsigned HOST_WIDE_INT) 1
4324 << (HOST_BITS_PER_WIDE_INT - 1)))
4325 set_unique_reg_note (insn,
4327 gen_rtx_DIV (compute_mode,
4334 quotient = expand_unop (compute_mode, neg_optab,
4335 quotient, quotient, 0);
4338 else if (size <= HOST_BITS_PER_WIDE_INT)
4340 choose_multiplier (abs_d, size, size - 1,
4341 &mlr, &post_shift, &lgup);
4342 ml = (unsigned HOST_WIDE_INT) INTVAL (mlr);
4343 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
4347 if (post_shift >= BITS_PER_WORD
4348 || size - 1 >= BITS_PER_WORD)
4351 extra_cost = (shift_cost[speed][compute_mode][post_shift]
4352 + shift_cost[speed][compute_mode][size - 1]
4353 + add_cost[speed][compute_mode]);
4354 t1 = expand_mult_highpart (compute_mode, op0, mlr,
4356 max_cost - extra_cost);
4360 (RSHIFT_EXPR, compute_mode, t1,
4361 build_int_cst (NULL_TREE, post_shift),
4364 (RSHIFT_EXPR, compute_mode, op0,
4365 build_int_cst (NULL_TREE, size - 1),
4369 = force_operand (gen_rtx_MINUS (compute_mode,
4374 = force_operand (gen_rtx_MINUS (compute_mode,
4382 if (post_shift >= BITS_PER_WORD
4383 || size - 1 >= BITS_PER_WORD)
4386 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
4387 mlr = gen_int_mode (ml, compute_mode);
4388 extra_cost = (shift_cost[speed][compute_mode][post_shift]
4389 + shift_cost[speed][compute_mode][size - 1]
4390 + 2 * add_cost[speed][compute_mode]);
4391 t1 = expand_mult_highpart (compute_mode, op0, mlr,
4393 max_cost - extra_cost);
4396 t2 = force_operand (gen_rtx_PLUS (compute_mode,
4400 (RSHIFT_EXPR, compute_mode, t2,
4401 build_int_cst (NULL_TREE, post_shift),
4404 (RSHIFT_EXPR, compute_mode, op0,
4405 build_int_cst (NULL_TREE, size - 1),
4409 = force_operand (gen_rtx_MINUS (compute_mode,
4414 = force_operand (gen_rtx_MINUS (compute_mode,
4419 else /* Too wide mode to use tricky code */
4422 insn = get_last_insn ();
4424 && (set = single_set (insn)) != 0
4425 && SET_DEST (set) == quotient)
4426 set_unique_reg_note (insn,
4428 gen_rtx_DIV (compute_mode, op0, op1));
4433 delete_insns_since (last);
4436 case FLOOR_DIV_EXPR:
4437 case FLOOR_MOD_EXPR:
4438 /* We will come here only for signed operations. */
4439 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
4441 unsigned HOST_WIDE_INT mh;
4442 int pre_shift, lgup, post_shift;
4443 HOST_WIDE_INT d = INTVAL (op1);
4448 /* We could just as easily deal with negative constants here,
4449 but it does not seem worth the trouble for GCC 2.6. */
4450 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
4452 pre_shift = floor_log2 (d);
4455 remainder = expand_binop (compute_mode, and_optab, op0,
4456 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
4457 remainder, 0, OPTAB_LIB_WIDEN);
4459 return gen_lowpart (mode, remainder);
4461 quotient = expand_shift
4462 (RSHIFT_EXPR, compute_mode, op0,
4463 build_int_cst (NULL_TREE, pre_shift),
4470 mh = choose_multiplier (d, size, size - 1,
4471 &ml, &post_shift, &lgup);
4474 if (post_shift < BITS_PER_WORD
4475 && size - 1 < BITS_PER_WORD)
4478 (RSHIFT_EXPR, compute_mode, op0,
4479 build_int_cst (NULL_TREE, size - 1),
4481 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
4482 NULL_RTX, 0, OPTAB_WIDEN);
4483 extra_cost = (shift_cost[speed][compute_mode][post_shift]
4484 + shift_cost[speed][compute_mode][size - 1]
4485 + 2 * add_cost[speed][compute_mode]);
4486 t3 = expand_mult_highpart (compute_mode, t2, ml,
4488 max_cost - extra_cost);
4492 (RSHIFT_EXPR, compute_mode, t3,
4493 build_int_cst (NULL_TREE, post_shift),
4495 quotient = expand_binop (compute_mode, xor_optab,
4496 t4, t1, tquotient, 0,
4504 rtx nsign, t1, t2, t3, t4;
4505 t1 = force_operand (gen_rtx_PLUS (compute_mode,
4506 op0, constm1_rtx), NULL_RTX);
4507 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
4509 nsign = expand_shift
4510 (RSHIFT_EXPR, compute_mode, t2,
4511 build_int_cst (NULL_TREE, size - 1),
4513 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
4515 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
4520 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
4522 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4531 delete_insns_since (last);
4533 /* Try using an instruction that produces both the quotient and
4534 remainder, using truncation. We can easily compensate the quotient
4535 or remainder to get floor rounding, once we have the remainder.
4536 Notice that we compute also the final remainder value here,
4537 and return the result right away. */
4538 if (target == 0 || GET_MODE (target) != compute_mode)
4539 target = gen_reg_rtx (compute_mode);
4544 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4545 quotient = gen_reg_rtx (compute_mode);
4550 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4551 remainder = gen_reg_rtx (compute_mode);
4554 if (expand_twoval_binop (sdivmod_optab, op0, op1,
4555 quotient, remainder, 0))
4557 /* This could be computed with a branch-less sequence.
4558 Save that for later. */
4560 rtx label = gen_label_rtx ();
4561 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
4562 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4563 NULL_RTX, 0, OPTAB_WIDEN);
4564 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
4565 expand_dec (quotient, const1_rtx);
4566 expand_inc (remainder, op1);
4568 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4571 /* No luck with division elimination or divmod. Have to do it
4572 by conditionally adjusting op0 *and* the result. */
4574 rtx label1, label2, label3, label4, label5;
4578 quotient = gen_reg_rtx (compute_mode);
4579 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4580 label1 = gen_label_rtx ();
4581 label2 = gen_label_rtx ();
4582 label3 = gen_label_rtx ();
4583 label4 = gen_label_rtx ();
4584 label5 = gen_label_rtx ();
4585 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4586 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
4587 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4588 quotient, 0, OPTAB_LIB_WIDEN);
4589 if (tem != quotient)
4590 emit_move_insn (quotient, tem);
4591 emit_jump_insn (gen_jump (label5));
4593 emit_label (label1);
4594 expand_inc (adjusted_op0, const1_rtx);
4595 emit_jump_insn (gen_jump (label4));
4597 emit_label (label2);
4598 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
4599 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4600 quotient, 0, OPTAB_LIB_WIDEN);
4601 if (tem != quotient)
4602 emit_move_insn (quotient, tem);
4603 emit_jump_insn (gen_jump (label5));
4605 emit_label (label3);
4606 expand_dec (adjusted_op0, const1_rtx);
4607 emit_label (label4);
4608 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4609 quotient, 0, OPTAB_LIB_WIDEN);
4610 if (tem != quotient)
4611 emit_move_insn (quotient, tem);
4612 expand_dec (quotient, const1_rtx);
4613 emit_label (label5);
4621 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
4624 unsigned HOST_WIDE_INT d = INTVAL (op1);
4625 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4626 build_int_cst (NULL_TREE, floor_log2 (d)),
4628 t2 = expand_binop (compute_mode, and_optab, op0,
4630 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4631 t3 = gen_reg_rtx (compute_mode);
4632 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4633 compute_mode, 1, 1);
4637 lab = gen_label_rtx ();
4638 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4639 expand_inc (t1, const1_rtx);
4644 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4650 /* Try using an instruction that produces both the quotient and
4651 remainder, using truncation. We can easily compensate the
4652 quotient or remainder to get ceiling rounding, once we have the
4653 remainder. Notice that we compute also the final remainder
4654 value here, and return the result right away. */
4655 if (target == 0 || GET_MODE (target) != compute_mode)
4656 target = gen_reg_rtx (compute_mode);
4660 remainder = (REG_P (target)
4661 ? target : gen_reg_rtx (compute_mode));
4662 quotient = gen_reg_rtx (compute_mode);
4666 quotient = (REG_P (target)
4667 ? target : gen_reg_rtx (compute_mode));
4668 remainder = gen_reg_rtx (compute_mode);
4671 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
4674 /* This could be computed with a branch-less sequence.
4675 Save that for later. */
4676 rtx label = gen_label_rtx ();
4677 do_cmp_and_jump (remainder, const0_rtx, EQ,
4678 compute_mode, label);
4679 expand_inc (quotient, const1_rtx);
4680 expand_dec (remainder, op1);
4682 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4685 /* No luck with division elimination or divmod. Have to do it
4686 by conditionally adjusting op0 *and* the result. */
4689 rtx adjusted_op0, tem;
4691 quotient = gen_reg_rtx (compute_mode);
4692 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4693 label1 = gen_label_rtx ();
4694 label2 = gen_label_rtx ();
4695 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
4696 compute_mode, label1);
4697 emit_move_insn (quotient, const0_rtx);
4698 emit_jump_insn (gen_jump (label2));
4700 emit_label (label1);
4701 expand_dec (adjusted_op0, const1_rtx);
4702 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
4703 quotient, 1, OPTAB_LIB_WIDEN);
4704 if (tem != quotient)
4705 emit_move_insn (quotient, tem);
4706 expand_inc (quotient, const1_rtx);
4707 emit_label (label2);
4712 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
4713 && INTVAL (op1) >= 0)
4715 /* This is extremely similar to the code for the unsigned case
4716 above. For 2.7 we should merge these variants, but for
4717 2.6.1 I don't want to touch the code for unsigned since that
4718 get used in C. The signed case will only be used by other
4722 unsigned HOST_WIDE_INT d = INTVAL (op1);
4723 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4724 build_int_cst (NULL_TREE, floor_log2 (d)),
4726 t2 = expand_binop (compute_mode, and_optab, op0,
4728 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4729 t3 = gen_reg_rtx (compute_mode);
4730 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4731 compute_mode, 1, 1);
4735 lab = gen_label_rtx ();
4736 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4737 expand_inc (t1, const1_rtx);
4742 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4748 /* Try using an instruction that produces both the quotient and
4749 remainder, using truncation. We can easily compensate the
4750 quotient or remainder to get ceiling rounding, once we have the
4751 remainder. Notice that we compute also the final remainder
4752 value here, and return the result right away. */
4753 if (target == 0 || GET_MODE (target) != compute_mode)
4754 target = gen_reg_rtx (compute_mode);
4757 remainder= (REG_P (target)
4758 ? target : gen_reg_rtx (compute_mode));
4759 quotient = gen_reg_rtx (compute_mode);
4763 quotient = (REG_P (target)
4764 ? target : gen_reg_rtx (compute_mode));
4765 remainder = gen_reg_rtx (compute_mode);
4768 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
4771 /* This could be computed with a branch-less sequence.
4772 Save that for later. */
4774 rtx label = gen_label_rtx ();
4775 do_cmp_and_jump (remainder, const0_rtx, EQ,
4776 compute_mode, label);
4777 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4778 NULL_RTX, 0, OPTAB_WIDEN);
4779 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
4780 expand_inc (quotient, const1_rtx);
4781 expand_dec (remainder, op1);
4783 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4786 /* No luck with division elimination or divmod. Have to do it
4787 by conditionally adjusting op0 *and* the result. */
4789 rtx label1, label2, label3, label4, label5;
4793 quotient = gen_reg_rtx (compute_mode);
4794 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4795 label1 = gen_label_rtx ();
4796 label2 = gen_label_rtx ();
4797 label3 = gen_label_rtx ();
4798 label4 = gen_label_rtx ();
4799 label5 = gen_label_rtx ();
4800 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4801 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
4802 compute_mode, label1);
4803 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4804 quotient, 0, OPTAB_LIB_WIDEN);
4805 if (tem != quotient)
4806 emit_move_insn (quotient, tem);
4807 emit_jump_insn (gen_jump (label5));
4809 emit_label (label1);
4810 expand_dec (adjusted_op0, const1_rtx);
4811 emit_jump_insn (gen_jump (label4));
4813 emit_label (label2);
4814 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
4815 compute_mode, label3);
4816 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4817 quotient, 0, OPTAB_LIB_WIDEN);
4818 if (tem != quotient)
4819 emit_move_insn (quotient, tem);
4820 emit_jump_insn (gen_jump (label5));
4822 emit_label (label3);
4823 expand_inc (adjusted_op0, const1_rtx);
4824 emit_label (label4);
4825 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4826 quotient, 0, OPTAB_LIB_WIDEN);
4827 if (tem != quotient)
4828 emit_move_insn (quotient, tem);
4829 expand_inc (quotient, const1_rtx);
4830 emit_label (label5);
4835 case EXACT_DIV_EXPR:
4836 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
4838 HOST_WIDE_INT d = INTVAL (op1);
4839 unsigned HOST_WIDE_INT ml;
4843 pre_shift = floor_log2 (d & -d);
4844 ml = invert_mod2n (d >> pre_shift, size);
4845 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4846 build_int_cst (NULL_TREE, pre_shift),
4847 NULL_RTX, unsignedp);
4848 quotient = expand_mult (compute_mode, t1,
4849 gen_int_mode (ml, compute_mode),
4852 insn = get_last_insn ();
4853 set_unique_reg_note (insn,
4855 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
4861 case ROUND_DIV_EXPR:
4862 case ROUND_MOD_EXPR:
4867 label = gen_label_rtx ();
4868 quotient = gen_reg_rtx (compute_mode);
4869 remainder = gen_reg_rtx (compute_mode);
4870 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
4873 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
4874 quotient, 1, OPTAB_LIB_WIDEN);
4875 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
4876 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4877 remainder, 1, OPTAB_LIB_WIDEN);
4879 tem = plus_constant (op1, -1);
4880 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4881 build_int_cst (NULL_TREE, 1),
4883 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
4884 expand_inc (quotient, const1_rtx);
4885 expand_dec (remainder, op1);
4890 rtx abs_rem, abs_op1, tem, mask;
4892 label = gen_label_rtx ();
4893 quotient = gen_reg_rtx (compute_mode);
4894 remainder = gen_reg_rtx (compute_mode);
4895 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
4898 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
4899 quotient, 0, OPTAB_LIB_WIDEN);
4900 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
4901 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4902 remainder, 0, OPTAB_LIB_WIDEN);
4904 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 1, 0);
4905 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 1, 0);
4906 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
4907 build_int_cst (NULL_TREE, 1),
4909 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
4910 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4911 NULL_RTX, 0, OPTAB_WIDEN);
4912 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4913 build_int_cst (NULL_TREE, size - 1),
4915 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
4916 NULL_RTX, 0, OPTAB_WIDEN);
4917 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4918 NULL_RTX, 0, OPTAB_WIDEN);
4919 expand_inc (quotient, tem);
4920 tem = expand_binop (compute_mode, xor_optab, mask, op1,
4921 NULL_RTX, 0, OPTAB_WIDEN);
4922 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4923 NULL_RTX, 0, OPTAB_WIDEN);
4924 expand_dec (remainder, tem);
4927 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4935 if (target && GET_MODE (target) != compute_mode)
4940 /* Try to produce the remainder without producing the quotient.
4941 If we seem to have a divmod pattern that does not require widening,
4942 don't try widening here. We should really have a WIDEN argument
4943 to expand_twoval_binop, since what we'd really like to do here is
4944 1) try a mod insn in compute_mode
4945 2) try a divmod insn in compute_mode
4946 3) try a div insn in compute_mode and multiply-subtract to get
4948 4) try the same things with widening allowed. */
4950 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4953 ((optab_handler (optab2, compute_mode)
4954 != CODE_FOR_nothing)
4955 ? OPTAB_DIRECT : OPTAB_WIDEN));
4958 /* No luck there. Can we do remainder and divide at once
4959 without a library call? */
4960 remainder = gen_reg_rtx (compute_mode);
4961 if (! expand_twoval_binop ((unsignedp
4965 NULL_RTX, remainder, unsignedp))
4970 return gen_lowpart (mode, remainder);
4973 /* Produce the quotient. Try a quotient insn, but not a library call.
4974 If we have a divmod in this mode, use it in preference to widening
4975 the div (for this test we assume it will not fail). Note that optab2
4976 is set to the one of the two optabs that the call below will use. */
4978 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
4979 op0, op1, rem_flag ? NULL_RTX : target,
4981 ((optab_handler (optab2, compute_mode)
4982 != CODE_FOR_nothing)
4983 ? OPTAB_DIRECT : OPTAB_WIDEN));
4987 /* No luck there. Try a quotient-and-remainder insn,
4988 keeping the quotient alone. */
4989 quotient = gen_reg_rtx (compute_mode);
4990 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
4992 quotient, NULL_RTX, unsignedp))
4996 /* Still no luck. If we are not computing the remainder,
4997 use a library call for the quotient. */
4998 quotient = sign_expand_binop (compute_mode,
4999 udiv_optab, sdiv_optab,
5001 unsignedp, OPTAB_LIB_WIDEN);
5008 if (target && GET_MODE (target) != compute_mode)
5013 /* No divide instruction either. Use library for remainder. */
5014 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
5016 unsignedp, OPTAB_LIB_WIDEN);
5017 /* No remainder function. Try a quotient-and-remainder
5018 function, keeping the remainder. */
5021 remainder = gen_reg_rtx (compute_mode);
5022 if (!expand_twoval_binop_libfunc
5023 (unsignedp ? udivmod_optab : sdivmod_optab,
5025 NULL_RTX, remainder,
5026 unsignedp ? UMOD : MOD))
5027 remainder = NULL_RTX;
5032 /* We divided. Now finish doing X - Y * (X / Y). */
5033 remainder = expand_mult (compute_mode, quotient, op1,
5034 NULL_RTX, unsignedp);
5035 remainder = expand_binop (compute_mode, sub_optab, op0,
5036 remainder, target, unsignedp,
5041 return gen_lowpart (mode, rem_flag ? remainder : quotient);
5044 /* Return a tree node with data type TYPE, describing the value of X.
5045 Usually this is an VAR_DECL, if there is no obvious better choice.
5046 X may be an expression, however we only support those expressions
5047 generated by loop.c. */
5050 make_tree (tree type, rtx x)
5054 switch (GET_CODE (x))
5058 HOST_WIDE_INT hi = 0;
5061 && !(TYPE_UNSIGNED (type)
5062 && (GET_MODE_BITSIZE (TYPE_MODE (type))
5063 < HOST_BITS_PER_WIDE_INT)))
5066 t = build_int_cst_wide (type, INTVAL (x), hi);
5072 if (GET_MODE (x) == VOIDmode)
5073 t = build_int_cst_wide (type,
5074 CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
5079 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
5080 t = build_real (type, d);
5087 int units = CONST_VECTOR_NUNITS (x);
5088 tree itype = TREE_TYPE (type);
5093 /* Build a tree with vector elements. */
5094 for (i = units - 1; i >= 0; --i)
5096 rtx elt = CONST_VECTOR_ELT (x, i);
5097 t = tree_cons (NULL_TREE, make_tree (itype, elt), t);
5100 return build_vector (type, t);
5104 return fold_build2 (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
5105 make_tree (type, XEXP (x, 1)));
5108 return fold_build2 (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
5109 make_tree (type, XEXP (x, 1)));
5112 return fold_build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0)));
5115 return fold_build2 (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
5116 make_tree (type, XEXP (x, 1)));
5119 return fold_build2 (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
5120 make_tree (type, XEXP (x, 1)));
5123 t = unsigned_type_for (type);
5124 return fold_convert (type, build2 (RSHIFT_EXPR, t,
5125 make_tree (t, XEXP (x, 0)),
5126 make_tree (type, XEXP (x, 1))));
5129 t = signed_type_for (type);
5130 return fold_convert (type, build2 (RSHIFT_EXPR, t,
5131 make_tree (t, XEXP (x, 0)),
5132 make_tree (type, XEXP (x, 1))));
5135 if (TREE_CODE (type) != REAL_TYPE)
5136 t = signed_type_for (type);
5140 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
5141 make_tree (t, XEXP (x, 0)),
5142 make_tree (t, XEXP (x, 1))));
5144 t = unsigned_type_for (type);
5145 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
5146 make_tree (t, XEXP (x, 0)),
5147 make_tree (t, XEXP (x, 1))));
5151 t = lang_hooks.types.type_for_mode (GET_MODE (XEXP (x, 0)),
5152 GET_CODE (x) == ZERO_EXTEND);
5153 return fold_convert (type, make_tree (t, XEXP (x, 0)));
5156 return make_tree (type, XEXP (x, 0));
5159 t = SYMBOL_REF_DECL (x);
5161 return fold_convert (type, build_fold_addr_expr (t));
5162 /* else fall through. */
5165 t = build_decl (RTL_LOCATION (x), VAR_DECL, NULL_TREE, type);
5167 /* If TYPE is a POINTER_TYPE, we might need to convert X from
5168 address mode to pointer mode. */
5169 if (POINTER_TYPE_P (type))
5170 x = convert_memory_address_addr_space
5171 (TYPE_MODE (type), x, TYPE_ADDR_SPACE (TREE_TYPE (type)));
5173 /* Note that we do *not* use SET_DECL_RTL here, because we do not
5174 want set_decl_rtl to go adjusting REG_ATTRS for this temporary. */
5175 t->decl_with_rtl.rtl = x;
5181 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
5182 and returning TARGET.
5184 If TARGET is 0, a pseudo-register or constant is returned. */
5187 expand_and (enum machine_mode mode, rtx op0, rtx op1, rtx target)
5191 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
5192 tem = simplify_binary_operation (AND, mode, op0, op1);
5194 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
5198 else if (tem != target)
5199 emit_move_insn (target, tem);
5203 /* Helper function for emit_store_flag. */
5205 emit_cstore (rtx target, enum insn_code icode, enum rtx_code code,
5206 enum machine_mode mode, enum machine_mode compare_mode,
5207 int unsignedp, rtx x, rtx y, int normalizep,
5208 enum machine_mode target_mode)
5210 rtx op0, last, comparison, subtarget, pattern;
5211 enum machine_mode result_mode = insn_data[(int) icode].operand[0].mode;
5213 last = get_last_insn ();
5214 x = prepare_operand (icode, x, 2, mode, compare_mode, unsignedp);
5215 y = prepare_operand (icode, y, 3, mode, compare_mode, unsignedp);
5216 comparison = gen_rtx_fmt_ee (code, result_mode, x, y);
5218 || !insn_data[icode].operand[2].predicate
5219 (x, insn_data[icode].operand[2].mode)
5220 || !insn_data[icode].operand[3].predicate
5221 (y, insn_data[icode].operand[3].mode)
5222 || !insn_data[icode].operand[1].predicate (comparison, VOIDmode))
5224 delete_insns_since (last);
5228 if (target_mode == VOIDmode)
5229 target_mode = result_mode;
5231 target = gen_reg_rtx (target_mode);
5234 || !(insn_data[(int) icode].operand[0].predicate (target, result_mode)))
5235 subtarget = gen_reg_rtx (result_mode);
5239 pattern = GEN_FCN (icode) (subtarget, comparison, x, y);
5242 emit_insn (pattern);
5244 /* If we are converting to a wider mode, first convert to
5245 TARGET_MODE, then normalize. This produces better combining
5246 opportunities on machines that have a SIGN_EXTRACT when we are
5247 testing a single bit. This mostly benefits the 68k.
5249 If STORE_FLAG_VALUE does not have the sign bit set when
5250 interpreted in MODE, we can do this conversion as unsigned, which
5251 is usually more efficient. */
5252 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (result_mode))
5254 convert_move (target, subtarget,
5255 (GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT)
5256 && 0 == (STORE_FLAG_VALUE
5257 & ((HOST_WIDE_INT) 1
5258 << (GET_MODE_BITSIZE (result_mode) -1))));
5260 result_mode = target_mode;
5265 /* If we want to keep subexpressions around, don't reuse our last
5270 /* Now normalize to the proper value in MODE. Sometimes we don't
5271 have to do anything. */
5272 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
5274 /* STORE_FLAG_VALUE might be the most negative number, so write
5275 the comparison this way to avoid a compiler-time warning. */
5276 else if (- normalizep == STORE_FLAG_VALUE)
5277 op0 = expand_unop (result_mode, neg_optab, op0, subtarget, 0);
5279 /* We don't want to use STORE_FLAG_VALUE < 0 below since this makes
5280 it hard to use a value of just the sign bit due to ANSI integer
5281 constant typing rules. */
5282 else if (GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT
5283 && (STORE_FLAG_VALUE
5284 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (result_mode) - 1))))
5285 op0 = expand_shift (RSHIFT_EXPR, result_mode, op0,
5286 size_int (GET_MODE_BITSIZE (result_mode) - 1), subtarget,
5290 gcc_assert (STORE_FLAG_VALUE & 1);
5292 op0 = expand_and (result_mode, op0, const1_rtx, subtarget);
5293 if (normalizep == -1)
5294 op0 = expand_unop (result_mode, neg_optab, op0, op0, 0);
5297 /* If we were converting to a smaller mode, do the conversion now. */
5298 if (target_mode != result_mode)
5300 convert_move (target, op0, 0);
5308 /* A subroutine of emit_store_flag only including "tricks" that do not
5309 need a recursive call. These are kept separate to avoid infinite
5313 emit_store_flag_1 (rtx target, enum rtx_code code, rtx op0, rtx op1,
5314 enum machine_mode mode, int unsignedp, int normalizep,
5315 enum machine_mode target_mode)
5318 enum insn_code icode;
5319 enum machine_mode compare_mode;
5320 enum mode_class mclass;
5321 enum rtx_code scode;
5325 code = unsigned_condition (code);
5326 scode = swap_condition (code);
5328 /* If one operand is constant, make it the second one. Only do this
5329 if the other operand is not constant as well. */
5331 if (swap_commutative_operands_p (op0, op1))
5336 code = swap_condition (code);
5339 if (mode == VOIDmode)
5340 mode = GET_MODE (op0);
5342 /* For some comparisons with 1 and -1, we can convert this to
5343 comparisons with zero. This will often produce more opportunities for
5344 store-flag insns. */
5349 if (op1 == const1_rtx)
5350 op1 = const0_rtx, code = LE;
5353 if (op1 == constm1_rtx)
5354 op1 = const0_rtx, code = LT;
5357 if (op1 == const1_rtx)
5358 op1 = const0_rtx, code = GT;
5361 if (op1 == constm1_rtx)
5362 op1 = const0_rtx, code = GE;
5365 if (op1 == const1_rtx)
5366 op1 = const0_rtx, code = NE;
5369 if (op1 == const1_rtx)
5370 op1 = const0_rtx, code = EQ;
5376 /* If we are comparing a double-word integer with zero or -1, we can
5377 convert the comparison into one involving a single word. */
5378 if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
5379 && GET_MODE_CLASS (mode) == MODE_INT
5380 && (!MEM_P (op0) || ! MEM_VOLATILE_P (op0)))
5382 if ((code == EQ || code == NE)
5383 && (op1 == const0_rtx || op1 == constm1_rtx))
5387 /* Do a logical OR or AND of the two words and compare the
5389 op00 = simplify_gen_subreg (word_mode, op0, mode, 0);
5390 op01 = simplify_gen_subreg (word_mode, op0, mode, UNITS_PER_WORD);
5391 tem = expand_binop (word_mode,
5392 op1 == const0_rtx ? ior_optab : and_optab,
5393 op00, op01, NULL_RTX, unsignedp,
5397 tem = emit_store_flag (NULL_RTX, code, tem, op1, word_mode,
5398 unsignedp, normalizep);
5400 else if ((code == LT || code == GE) && op1 == const0_rtx)
5404 /* If testing the sign bit, can just test on high word. */
5405 op0h = simplify_gen_subreg (word_mode, op0, mode,
5406 subreg_highpart_offset (word_mode,
5408 tem = emit_store_flag (NULL_RTX, code, op0h, op1, word_mode,
5409 unsignedp, normalizep);
5416 if (target_mode == VOIDmode || GET_MODE (tem) == target_mode)
5419 target = gen_reg_rtx (target_mode);
5421 convert_move (target, tem,
5422 0 == ((normalizep ? normalizep : STORE_FLAG_VALUE)
5423 & ((HOST_WIDE_INT) 1
5424 << (GET_MODE_BITSIZE (word_mode) -1))));
5429 /* If this is A < 0 or A >= 0, we can do this by taking the ones
5430 complement of A (for GE) and shifting the sign bit to the low bit. */
5431 if (op1 == const0_rtx && (code == LT || code == GE)
5432 && GET_MODE_CLASS (mode) == MODE_INT
5433 && (normalizep || STORE_FLAG_VALUE == 1
5434 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5435 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
5436 == ((unsigned HOST_WIDE_INT) 1
5437 << (GET_MODE_BITSIZE (mode) - 1))))))
5444 /* If the result is to be wider than OP0, it is best to convert it
5445 first. If it is to be narrower, it is *incorrect* to convert it
5447 else if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
5449 op0 = convert_modes (target_mode, mode, op0, 0);
5453 if (target_mode != mode)
5457 op0 = expand_unop (mode, one_cmpl_optab, op0,
5458 ((STORE_FLAG_VALUE == 1 || normalizep)
5459 ? 0 : subtarget), 0);
5461 if (STORE_FLAG_VALUE == 1 || normalizep)
5462 /* If we are supposed to produce a 0/1 value, we want to do
5463 a logical shift from the sign bit to the low-order bit; for
5464 a -1/0 value, we do an arithmetic shift. */
5465 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
5466 size_int (GET_MODE_BITSIZE (mode) - 1),
5467 subtarget, normalizep != -1);
5469 if (mode != target_mode)
5470 op0 = convert_modes (target_mode, mode, op0, 0);
5475 mclass = GET_MODE_CLASS (mode);
5476 for (compare_mode = mode; compare_mode != VOIDmode;
5477 compare_mode = GET_MODE_WIDER_MODE (compare_mode))
5479 enum machine_mode optab_mode = mclass == MODE_CC ? CCmode : compare_mode;
5480 icode = optab_handler (cstore_optab, optab_mode);
5481 if (icode != CODE_FOR_nothing)
5483 do_pending_stack_adjust ();
5484 tem = emit_cstore (target, icode, code, mode, compare_mode,
5485 unsignedp, op0, op1, normalizep, target_mode);
5489 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5491 tem = emit_cstore (target, icode, scode, mode, compare_mode,
5492 unsignedp, op1, op0, normalizep, target_mode);
5503 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
5504 and storing in TARGET. Normally return TARGET.
5505 Return 0 if that cannot be done.
5507 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
5508 it is VOIDmode, they cannot both be CONST_INT.
5510 UNSIGNEDP is for the case where we have to widen the operands
5511 to perform the operation. It says to use zero-extension.
5513 NORMALIZEP is 1 if we should convert the result to be either zero
5514 or one. Normalize is -1 if we should convert the result to be
5515 either zero or -1. If NORMALIZEP is zero, the result will be left
5516 "raw" out of the scc insn. */
5519 emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1,
5520 enum machine_mode mode, int unsignedp, int normalizep)
5522 enum machine_mode target_mode = target ? GET_MODE (target) : VOIDmode;
5523 enum rtx_code rcode;
5525 rtx tem, last, trueval;
5527 tem = emit_store_flag_1 (target, code, op0, op1, mode, unsignedp, normalizep,
5532 /* If we reached here, we can't do this with a scc insn, however there
5533 are some comparisons that can be done in other ways. Don't do any
5534 of these cases if branches are very cheap. */
5535 if (BRANCH_COST (optimize_insn_for_speed_p (), false) == 0)
5538 /* See what we need to return. We can only return a 1, -1, or the
5541 if (normalizep == 0)
5543 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
5544 normalizep = STORE_FLAG_VALUE;
5546 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5547 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
5548 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
5554 last = get_last_insn ();
5556 /* If optimizing, use different pseudo registers for each insn, instead
5557 of reusing the same pseudo. This leads to better CSE, but slows
5558 down the compiler, since there are more pseudos */
5559 subtarget = (!optimize
5560 && (target_mode == mode)) ? target : NULL_RTX;
5561 trueval = GEN_INT (normalizep ? normalizep : STORE_FLAG_VALUE);
5563 /* For floating-point comparisons, try the reverse comparison or try
5564 changing the "orderedness" of the comparison. */
5565 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5567 enum rtx_code first_code;
5570 rcode = reverse_condition_maybe_unordered (code);
5571 if (can_compare_p (rcode, mode, ccp_store_flag)
5572 && (code == ORDERED || code == UNORDERED
5573 || (! HONOR_NANS (mode) && (code == LTGT || code == UNEQ))
5574 || (! HONOR_SNANS (mode) && (code == EQ || code == NE))))
5576 int want_add = ((STORE_FLAG_VALUE == 1 && normalizep == -1)
5577 || (STORE_FLAG_VALUE == -1 && normalizep == 1));
5579 /* For the reverse comparison, use either an addition or a XOR. */
5581 && rtx_cost (GEN_INT (normalizep), PLUS,
5582 optimize_insn_for_speed_p ()) == 0)
5584 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5585 STORE_FLAG_VALUE, target_mode);
5587 return expand_binop (target_mode, add_optab, tem,
5588 GEN_INT (normalizep),
5589 target, 0, OPTAB_WIDEN);
5592 && rtx_cost (trueval, XOR,
5593 optimize_insn_for_speed_p ()) == 0)
5595 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5596 normalizep, target_mode);
5598 return expand_binop (target_mode, xor_optab, tem, trueval,
5599 target, INTVAL (trueval) >= 0, OPTAB_WIDEN);
5603 delete_insns_since (last);
5605 /* Cannot split ORDERED and UNORDERED, only try the above trick. */
5606 if (code == ORDERED || code == UNORDERED)
5609 and_them = split_comparison (code, mode, &first_code, &code);
5611 /* If there are no NaNs, the first comparison should always fall through.
5612 Effectively change the comparison to the other one. */
5613 if (!HONOR_NANS (mode))
5615 gcc_assert (first_code == (and_them ? ORDERED : UNORDERED));
5616 return emit_store_flag_1 (target, code, op0, op1, mode, 0, normalizep,
5620 #ifdef HAVE_conditional_move
5621 /* Try using a setcc instruction for ORDERED/UNORDERED, followed by a
5622 conditional move. */
5623 tem = emit_store_flag_1 (subtarget, first_code, op0, op1, mode, 0,
5624 normalizep, target_mode);
5629 tem = emit_conditional_move (target, code, op0, op1, mode,
5630 tem, const0_rtx, GET_MODE (tem), 0);
5632 tem = emit_conditional_move (target, code, op0, op1, mode,
5633 trueval, tem, GET_MODE (tem), 0);
5636 delete_insns_since (last);
5643 /* The remaining tricks only apply to integer comparisons. */
5645 if (GET_MODE_CLASS (mode) != MODE_INT)
5648 /* If this is an equality comparison of integers, we can try to exclusive-or
5649 (or subtract) the two operands and use a recursive call to try the
5650 comparison with zero. Don't do any of these cases if branches are
5653 if ((code == EQ || code == NE) && op1 != const0_rtx)
5655 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
5659 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
5662 tem = emit_store_flag (target, code, tem, const0_rtx,
5663 mode, unsignedp, normalizep);
5667 delete_insns_since (last);
5670 /* For integer comparisons, try the reverse comparison. However, for
5671 small X and if we'd have anyway to extend, implementing "X != 0"
5672 as "-(int)X >> 31" is still cheaper than inverting "(int)X == 0". */
5673 rcode = reverse_condition (code);
5674 if (can_compare_p (rcode, mode, ccp_store_flag)
5675 && ! (optab_handler (cstore_optab, mode) == CODE_FOR_nothing
5677 && GET_MODE_SIZE (mode) < UNITS_PER_WORD
5678 && op1 == const0_rtx))
5680 int want_add = ((STORE_FLAG_VALUE == 1 && normalizep == -1)
5681 || (STORE_FLAG_VALUE == -1 && normalizep == 1));
5683 /* Again, for the reverse comparison, use either an addition or a XOR. */
5685 && rtx_cost (GEN_INT (normalizep), PLUS,
5686 optimize_insn_for_speed_p ()) == 0)
5688 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5689 STORE_FLAG_VALUE, target_mode);
5691 tem = expand_binop (target_mode, add_optab, tem,
5692 GEN_INT (normalizep), target, 0, OPTAB_WIDEN);
5695 && rtx_cost (trueval, XOR,
5696 optimize_insn_for_speed_p ()) == 0)
5698 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5699 normalizep, target_mode);
5701 tem = expand_binop (target_mode, xor_optab, tem, trueval, target,
5702 INTVAL (trueval) >= 0, OPTAB_WIDEN);
5707 delete_insns_since (last);
5710 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
5711 the constant zero. Reject all other comparisons at this point. Only
5712 do LE and GT if branches are expensive since they are expensive on
5713 2-operand machines. */
5715 if (op1 != const0_rtx
5716 || (code != EQ && code != NE
5717 && (BRANCH_COST (optimize_insn_for_speed_p (),
5718 false) <= 1 || (code != LE && code != GT))))
5721 /* Try to put the result of the comparison in the sign bit. Assume we can't
5722 do the necessary operation below. */
5726 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
5727 the sign bit set. */
5731 /* This is destructive, so SUBTARGET can't be OP0. */
5732 if (rtx_equal_p (subtarget, op0))
5735 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
5738 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
5742 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
5743 number of bits in the mode of OP0, minus one. */
5747 if (rtx_equal_p (subtarget, op0))
5750 tem = expand_shift (RSHIFT_EXPR, mode, op0,
5751 size_int (GET_MODE_BITSIZE (mode) - 1),
5753 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
5757 if (code == EQ || code == NE)
5759 /* For EQ or NE, one way to do the comparison is to apply an operation
5760 that converts the operand into a positive number if it is nonzero
5761 or zero if it was originally zero. Then, for EQ, we subtract 1 and
5762 for NE we negate. This puts the result in the sign bit. Then we
5763 normalize with a shift, if needed.
5765 Two operations that can do the above actions are ABS and FFS, so try
5766 them. If that doesn't work, and MODE is smaller than a full word,
5767 we can use zero-extension to the wider mode (an unsigned conversion)
5768 as the operation. */
5770 /* Note that ABS doesn't yield a positive number for INT_MIN, but
5771 that is compensated by the subsequent overflow when subtracting
5774 if (optab_handler (abs_optab, mode) != CODE_FOR_nothing)
5775 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
5776 else if (optab_handler (ffs_optab, mode) != CODE_FOR_nothing)
5777 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
5778 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5780 tem = convert_modes (word_mode, mode, op0, 1);
5787 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
5790 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
5793 /* If we couldn't do it that way, for NE we can "or" the two's complement
5794 of the value with itself. For EQ, we take the one's complement of
5795 that "or", which is an extra insn, so we only handle EQ if branches
5800 || BRANCH_COST (optimize_insn_for_speed_p (),
5803 if (rtx_equal_p (subtarget, op0))
5806 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
5807 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
5810 if (tem && code == EQ)
5811 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
5815 if (tem && normalizep)
5816 tem = expand_shift (RSHIFT_EXPR, mode, tem,
5817 size_int (GET_MODE_BITSIZE (mode) - 1),
5818 subtarget, normalizep == 1);
5824 else if (GET_MODE (tem) != target_mode)
5826 convert_move (target, tem, 0);
5829 else if (!subtarget)
5831 emit_move_insn (target, tem);
5836 delete_insns_since (last);
5841 /* Like emit_store_flag, but always succeeds. */
5844 emit_store_flag_force (rtx target, enum rtx_code code, rtx op0, rtx op1,
5845 enum machine_mode mode, int unsignedp, int normalizep)
5848 rtx trueval, falseval;
5850 /* First see if emit_store_flag can do the job. */
5851 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
5856 target = gen_reg_rtx (word_mode);
5858 /* If this failed, we have to do this with set/compare/jump/set code.
5859 For foo != 0, if foo is in OP0, just replace it with 1 if nonzero. */
5860 trueval = normalizep ? GEN_INT (normalizep) : const1_rtx;
5862 && GET_MODE_CLASS (mode) == MODE_INT
5865 && op1 == const0_rtx)
5867 label = gen_label_rtx ();
5868 do_compare_rtx_and_jump (target, const0_rtx, EQ, unsignedp,
5869 mode, NULL_RTX, NULL_RTX, label, -1);
5870 emit_move_insn (target, trueval);
5876 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
5877 target = gen_reg_rtx (GET_MODE (target));
5879 /* Jump in the right direction if the target cannot implement CODE
5880 but can jump on its reverse condition. */
5881 falseval = const0_rtx;
5882 if (! can_compare_p (code, mode, ccp_jump)
5883 && (! FLOAT_MODE_P (mode)
5884 || code == ORDERED || code == UNORDERED
5885 || (! HONOR_NANS (mode) && (code == LTGT || code == UNEQ))
5886 || (! HONOR_SNANS (mode) && (code == EQ || code == NE))))
5888 enum rtx_code rcode;
5889 if (FLOAT_MODE_P (mode))
5890 rcode = reverse_condition_maybe_unordered (code);
5892 rcode = reverse_condition (code);
5894 /* Canonicalize to UNORDERED for the libcall. */
5895 if (can_compare_p (rcode, mode, ccp_jump)
5896 || (code == ORDERED && ! can_compare_p (ORDERED, mode, ccp_jump)))
5899 trueval = const0_rtx;
5904 emit_move_insn (target, trueval);
5905 label = gen_label_rtx ();
5906 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX,
5907 NULL_RTX, label, -1);
5909 emit_move_insn (target, falseval);
5915 /* Perform possibly multi-word comparison and conditional jump to LABEL
5916 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE. This is
5917 now a thin wrapper around do_compare_rtx_and_jump. */
5920 do_cmp_and_jump (rtx arg1, rtx arg2, enum rtx_code op, enum machine_mode mode,
5923 int unsignedp = (op == LTU || op == LEU || op == GTU || op == GEU);
5924 do_compare_rtx_and_jump (arg1, arg2, op, unsignedp, mode,
5925 NULL_RTX, NULL_RTX, label, -1);