1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 88, 89, 92-97, 1998 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
28 #include "insn-flags.h"
29 #include "insn-codes.h"
30 #include "insn-config.h"
35 static void store_fixed_bit_field PROTO((rtx, int, int, int, rtx, int));
36 static void store_split_bit_field PROTO((rtx, int, int, rtx, int));
37 static rtx extract_fixed_bit_field PROTO((enum machine_mode, rtx, int,
38 int, int, rtx, int, int));
39 static rtx mask_rtx PROTO((enum machine_mode, int,
41 static rtx lshift_value PROTO((enum machine_mode, rtx,
43 static rtx extract_split_bit_field PROTO((rtx, int, int, int, int));
44 static void do_cmp_and_jump PROTO((rtx, rtx, enum rtx_code,
45 enum machine_mode, rtx));
47 #define CEIL(x,y) (((x) + (y) - 1) / (y))
49 /* Non-zero means divides or modulus operations are relatively cheap for
50 powers of two, so don't use branches; emit the operation instead.
51 Usually, this will mean that the MD file will emit non-branch
54 static int sdiv_pow2_cheap, smod_pow2_cheap;
56 #ifndef SLOW_UNALIGNED_ACCESS
57 #define SLOW_UNALIGNED_ACCESS STRICT_ALIGNMENT
60 /* For compilers that support multiple targets with different word sizes,
61 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
62 is the H8/300(H) compiler. */
64 #ifndef MAX_BITS_PER_WORD
65 #define MAX_BITS_PER_WORD BITS_PER_WORD
68 /* Cost of various pieces of RTL. Note that some of these are indexed by shift count,
70 static int add_cost, negate_cost, zero_cost;
71 static int shift_cost[MAX_BITS_PER_WORD];
72 static int shiftadd_cost[MAX_BITS_PER_WORD];
73 static int shiftsub_cost[MAX_BITS_PER_WORD];
74 static int mul_cost[NUM_MACHINE_MODES];
75 static int div_cost[NUM_MACHINE_MODES];
76 static int mul_widen_cost[NUM_MACHINE_MODES];
77 static int mul_highpart_cost[NUM_MACHINE_MODES];
83 /* This is "some random pseudo register" for purposes of calling recog
84 to see what insns exist. */
85 rtx reg = gen_rtx_REG (word_mode, 10000);
86 rtx shift_insn, shiftadd_insn, shiftsub_insn;
89 enum machine_mode mode, wider_mode;
93 /* Since we are on the permanent obstack, we must be sure we save this
94 spot AFTER we call start_sequence, since it will reuse the rtl it
96 free_point = (char *) oballoc (0);
98 reg = gen_rtx (REG, word_mode, 10000);
100 zero_cost = rtx_cost (const0_rtx, 0);
101 add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
103 shift_insn = emit_insn (gen_rtx_SET (VOIDmode, reg,
104 gen_rtx_ASHIFT (word_mode, reg,
108 = emit_insn (gen_rtx_SET (VOIDmode, reg,
109 gen_rtx_PLUS (word_mode,
110 gen_rtx_MULT (word_mode,
115 = emit_insn (gen_rtx_SET (VOIDmode, reg,
116 gen_rtx_MINUS (word_mode,
117 gen_rtx_MULT (word_mode,
124 shiftadd_cost[0] = shiftsub_cost[0] = add_cost;
126 for (m = 1; m < MAX_BITS_PER_WORD; m++)
128 shift_cost[m] = shiftadd_cost[m] = shiftsub_cost[m] = 32000;
130 XEXP (SET_SRC (PATTERN (shift_insn)), 1) = GEN_INT (m);
131 if (recog (PATTERN (shift_insn), shift_insn, &dummy) >= 0)
132 shift_cost[m] = rtx_cost (SET_SRC (PATTERN (shift_insn)), SET);
134 XEXP (XEXP (SET_SRC (PATTERN (shiftadd_insn)), 0), 1)
135 = GEN_INT ((HOST_WIDE_INT) 1 << m);
136 if (recog (PATTERN (shiftadd_insn), shiftadd_insn, &dummy) >= 0)
137 shiftadd_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftadd_insn)), SET);
139 XEXP (XEXP (SET_SRC (PATTERN (shiftsub_insn)), 0), 1)
140 = GEN_INT ((HOST_WIDE_INT) 1 << m);
141 if (recog (PATTERN (shiftsub_insn), shiftsub_insn, &dummy) >= 0)
142 shiftsub_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftsub_insn)), SET);
145 negate_cost = rtx_cost (gen_rtx_NEG (word_mode, reg), SET);
148 = (rtx_cost (gen_rtx_DIV (word_mode, reg, GEN_INT (32)), SET)
151 = (rtx_cost (gen_rtx_MOD (word_mode, reg, GEN_INT (32)), SET)
154 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
156 mode = GET_MODE_WIDER_MODE (mode))
158 reg = gen_rtx_REG (mode, 10000);
159 div_cost[(int) mode] = rtx_cost (gen_rtx_UDIV (mode, reg, reg), SET);
160 mul_cost[(int) mode] = rtx_cost (gen_rtx_MULT (mode, reg, reg), SET);
161 wider_mode = GET_MODE_WIDER_MODE (mode);
162 if (wider_mode != VOIDmode)
164 mul_widen_cost[(int) wider_mode]
165 = rtx_cost (gen_rtx_MULT (wider_mode,
166 gen_rtx_ZERO_EXTEND (wider_mode, reg),
167 gen_rtx_ZERO_EXTEND (wider_mode, reg)),
169 mul_highpart_cost[(int) mode]
170 = rtx_cost (gen_rtx_TRUNCATE
174 gen_rtx_MULT (wider_mode,
175 gen_rtx_ZERO_EXTEND (wider_mode, reg),
176 gen_rtx_ZERO_EXTEND (wider_mode, reg)),
177 GEN_INT (GET_MODE_BITSIZE (mode)))),
182 /* Free the objects we just allocated. */
187 /* Return an rtx representing minus the value of X.
188 MODE is the intended mode of the result,
189 useful if X is a CONST_INT. */
193 enum machine_mode mode;
196 rtx result = simplify_unary_operation (NEG, mode, x, mode);
199 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
204 /* Generate code to store value from rtx VALUE
205 into a bit-field within structure STR_RTX
206 containing BITSIZE bits starting at bit BITNUM.
207 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
208 ALIGN is the alignment that STR_RTX is known to have, measured in bytes.
209 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
211 /* ??? Note that there are two different ideas here for how
212 to determine the size to count bits within, for a register.
213 One is BITS_PER_WORD, and the other is the size of operand 3
216 If operand 3 of the insv pattern is VOIDmode, then we will use BITS_PER_WORD
217 else, we use the mode of operand 3. */
220 store_bit_field (str_rtx, bitsize, bitnum, fieldmode, value, align, total_size)
222 register int bitsize;
224 enum machine_mode fieldmode;
229 int unit = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
230 register int offset = bitnum / unit;
231 register int bitpos = bitnum % unit;
232 register rtx op0 = str_rtx;
236 if (insn_operand_mode[(int) CODE_FOR_insv][3] == VOIDmode)
237 insv_bitsize = GET_MODE_BITSIZE (word_mode);
239 insv_bitsize = GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_insv][3]);
242 if (GET_CODE (str_rtx) == MEM && ! MEM_IN_STRUCT_P (str_rtx))
245 /* Discount the part of the structure before the desired byte.
246 We need to know how many bytes are safe to reference after it. */
248 total_size -= (bitpos / BIGGEST_ALIGNMENT
249 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
251 while (GET_CODE (op0) == SUBREG)
253 /* The following line once was done only if WORDS_BIG_ENDIAN,
254 but I think that is a mistake. WORDS_BIG_ENDIAN is
255 meaningful at a much higher level; when structures are copied
256 between memory and regs, the higher-numbered regs
257 always get higher addresses. */
258 offset += SUBREG_WORD (op0);
259 /* We used to adjust BITPOS here, but now we do the whole adjustment
260 right after the loop. */
261 op0 = SUBREG_REG (op0);
264 /* Make sure we are playing with integral modes. Pun with subregs
267 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
268 if (imode != GET_MODE (op0))
270 if (GET_CODE (op0) == MEM)
271 op0 = change_address (op0, imode, NULL_RTX);
272 else if (imode != BLKmode)
273 op0 = gen_lowpart (imode, op0);
279 /* If OP0 is a register, BITPOS must count within a word.
280 But as we have it, it counts within whatever size OP0 now has.
281 On a bigendian machine, these are not the same, so convert. */
283 && GET_CODE (op0) != MEM
284 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
285 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
287 value = protect_from_queue (value, 0);
290 value = force_not_mem (value);
292 /* Note that the adjustment of BITPOS above has no effect on whether
293 BITPOS is 0 in a REG bigger than a word. */
294 if (GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
295 && (GET_CODE (op0) != MEM
296 || ! SLOW_UNALIGNED_ACCESS
297 || (offset * BITS_PER_UNIT % bitsize == 0
298 && align % GET_MODE_SIZE (fieldmode) == 0))
299 && bitpos == 0 && bitsize == GET_MODE_BITSIZE (fieldmode))
301 /* Storing in a full-word or multi-word field in a register
302 can be done with just SUBREG. */
303 if (GET_MODE (op0) != fieldmode)
305 if (GET_CODE (op0) == SUBREG)
307 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
308 || GET_MODE_CLASS (fieldmode) == MODE_INT
309 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
310 op0 = SUBREG_REG (op0);
312 /* Else we've got some float mode source being extracted into
313 a different float mode destination -- this combination of
314 subregs results in Severe Tire Damage. */
317 if (GET_CODE (op0) == REG)
318 op0 = gen_rtx_SUBREG (fieldmode, op0, offset);
320 op0 = change_address (op0, fieldmode,
321 plus_constant (XEXP (op0, 0), offset));
323 emit_move_insn (op0, value);
327 /* Storing an lsb-aligned field in a register
328 can be done with a movestrict instruction. */
330 if (GET_CODE (op0) != MEM
331 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
332 && bitsize == GET_MODE_BITSIZE (fieldmode)
333 && (GET_MODE (op0) == fieldmode
334 || (movstrict_optab->handlers[(int) fieldmode].insn_code
335 != CODE_FOR_nothing)))
337 /* Get appropriate low part of the value being stored. */
338 if (GET_CODE (value) == CONST_INT || GET_CODE (value) == REG)
339 value = gen_lowpart (fieldmode, value);
340 else if (!(GET_CODE (value) == SYMBOL_REF
341 || GET_CODE (value) == LABEL_REF
342 || GET_CODE (value) == CONST))
343 value = convert_to_mode (fieldmode, value, 0);
345 if (GET_MODE (op0) == fieldmode)
346 emit_move_insn (op0, value);
349 int icode = movstrict_optab->handlers[(int) fieldmode].insn_code;
350 if(! (*insn_operand_predicate[icode][1]) (value, fieldmode))
351 value = copy_to_mode_reg (fieldmode, value);
352 emit_insn (GEN_FCN (icode)
353 (gen_rtx_SUBREG (fieldmode, op0, offset), value));
358 /* Handle fields bigger than a word. */
360 if (bitsize > BITS_PER_WORD)
362 /* Here we transfer the words of the field
363 in the order least significant first.
364 This is because the most significant word is the one which may
366 However, only do that if the value is not BLKmode. */
368 int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
370 int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
373 /* This is the mode we must force value to, so that there will be enough
374 subwords to extract. Note that fieldmode will often (always?) be
375 VOIDmode, because that is what store_field uses to indicate that this
376 is a bit field, but passing VOIDmode to operand_subword_force will
377 result in an abort. */
378 fieldmode = mode_for_size (nwords * BITS_PER_WORD, MODE_INT, 0);
380 for (i = 0; i < nwords; i++)
382 /* If I is 0, use the low-order word in both field and target;
383 if I is 1, use the next to lowest word; and so on. */
384 int wordnum = (backwards ? nwords - i - 1 : i);
385 int bit_offset = (backwards
386 ? MAX (bitsize - (i + 1) * BITS_PER_WORD, 0)
387 : i * BITS_PER_WORD);
388 store_bit_field (op0, MIN (BITS_PER_WORD,
389 bitsize - i * BITS_PER_WORD),
390 bitnum + bit_offset, word_mode,
391 operand_subword_force (value, wordnum,
392 (GET_MODE (value) == VOIDmode
394 : GET_MODE (value))),
400 /* From here on we can assume that the field to be stored in is
401 a full-word (whatever type that is), since it is shorter than a word. */
403 /* OFFSET is the number of words or bytes (UNIT says which)
404 from STR_RTX to the first word or byte containing part of the field. */
406 if (GET_CODE (op0) == REG)
409 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
410 op0 = gen_rtx_SUBREG (TYPE_MODE (type_for_size (BITS_PER_WORD, 0)),
416 op0 = protect_from_queue (op0, 1);
419 /* If VALUE is a floating-point mode, access it as an integer of the
420 corresponding size. This can occur on a machine with 64 bit registers
421 that uses SFmode for float. This can also occur for unaligned float
423 if (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT)
425 if (GET_CODE (value) != REG)
426 value = copy_to_reg (value);
427 value = gen_rtx_SUBREG (word_mode, value, 0);
430 /* Now OFFSET is nonzero only if OP0 is memory
431 and is therefore always measured in bytes. */
435 && GET_MODE (value) != BLKmode
436 && !(bitsize == 1 && GET_CODE (value) == CONST_INT)
437 /* Ensure insv's size is wide enough for this field. */
438 && (insv_bitsize >= bitsize)
439 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
440 && (bitsize + bitpos > insv_bitsize)))
442 int xbitpos = bitpos;
445 rtx last = get_last_insn ();
447 enum machine_mode maxmode;
448 int save_volatile_ok = volatile_ok;
450 maxmode = insn_operand_mode[(int) CODE_FOR_insv][3];
451 if (maxmode == VOIDmode)
456 /* If this machine's insv can only insert into a register, copy OP0
457 into a register and save it back later. */
458 /* This used to check flag_force_mem, but that was a serious
459 de-optimization now that flag_force_mem is enabled by -O2. */
460 if (GET_CODE (op0) == MEM
461 && ! ((*insn_operand_predicate[(int) CODE_FOR_insv][0])
465 enum machine_mode bestmode;
467 /* Get the mode to use for inserting into this field. If OP0 is
468 BLKmode, get the smallest mode consistent with the alignment. If
469 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
470 mode. Otherwise, use the smallest mode containing the field. */
472 if (GET_MODE (op0) == BLKmode
473 || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
475 = get_best_mode (bitsize, bitnum, align * BITS_PER_UNIT, maxmode,
476 MEM_VOLATILE_P (op0));
478 bestmode = GET_MODE (op0);
480 if (bestmode == VOIDmode
481 || (SLOW_UNALIGNED_ACCESS && GET_MODE_SIZE (bestmode) > align))
484 /* Adjust address to point to the containing unit of that mode. */
485 unit = GET_MODE_BITSIZE (bestmode);
486 /* Compute offset as multiple of this unit, counting in bytes. */
487 offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
488 bitpos = bitnum % unit;
489 op0 = change_address (op0, bestmode,
490 plus_constant (XEXP (op0, 0), offset));
492 /* Fetch that unit, store the bitfield in it, then store the unit. */
493 tempreg = copy_to_reg (op0);
494 store_bit_field (tempreg, bitsize, bitpos, fieldmode, value,
496 emit_move_insn (op0, tempreg);
499 volatile_ok = save_volatile_ok;
501 /* Add OFFSET into OP0's address. */
502 if (GET_CODE (xop0) == MEM)
503 xop0 = change_address (xop0, byte_mode,
504 plus_constant (XEXP (xop0, 0), offset));
506 /* If xop0 is a register, we need it in MAXMODE
507 to make it acceptable to the format of insv. */
508 if (GET_CODE (xop0) == SUBREG)
509 /* We can't just change the mode, because this might clobber op0,
510 and we will need the original value of op0 if insv fails. */
511 xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_WORD (xop0));
512 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
513 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
515 /* On big-endian machines, we count bits from the most significant.
516 If the bit field insn does not, we must invert. */
518 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
519 xbitpos = unit - bitsize - xbitpos;
521 /* We have been counting XBITPOS within UNIT.
522 Count instead within the size of the register. */
523 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
524 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
526 unit = GET_MODE_BITSIZE (maxmode);
528 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
530 if (GET_MODE (value) != maxmode)
532 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
534 /* Optimization: Don't bother really extending VALUE
535 if it has all the bits we will actually use. However,
536 if we must narrow it, be sure we do it correctly. */
538 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode))
540 /* Avoid making subreg of a subreg, or of a mem. */
541 if (GET_CODE (value1) != REG)
542 value1 = copy_to_reg (value1);
543 value1 = gen_rtx_SUBREG (maxmode, value1, 0);
546 value1 = gen_lowpart (maxmode, value1);
548 else if (!CONSTANT_P (value))
549 /* Parse phase is supposed to make VALUE's data type
550 match that of the component reference, which is a type
551 at least as wide as the field; so VALUE should have
552 a mode that corresponds to that type. */
556 /* If this machine's insv insists on a register,
557 get VALUE1 into a register. */
558 if (! ((*insn_operand_predicate[(int) CODE_FOR_insv][3])
560 value1 = force_reg (maxmode, value1);
562 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
567 delete_insns_since (last);
568 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
574 /* Insv is not available; store using shifts and boolean ops. */
575 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
579 /* Use shifts and boolean operations to store VALUE
580 into a bit field of width BITSIZE
581 in a memory location specified by OP0 except offset by OFFSET bytes.
582 (OFFSET must be 0 if OP0 is a register.)
583 The field starts at position BITPOS within the byte.
584 (If OP0 is a register, it may be a full word or a narrower mode,
585 but BITPOS still counts within a full word,
586 which is significant on bigendian machines.)
587 STRUCT_ALIGN is the alignment the structure is known to have (in bytes).
589 Note that protect_from_queue has already been done on OP0 and VALUE. */
592 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, struct_align)
594 register int offset, bitsize, bitpos;
598 register enum machine_mode mode;
599 int total_bits = BITS_PER_WORD;
604 if (! SLOW_UNALIGNED_ACCESS)
605 struct_align = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
607 /* There is a case not handled here:
608 a structure with a known alignment of just a halfword
609 and a field split across two aligned halfwords within the structure.
610 Or likewise a structure with a known alignment of just a byte
611 and a field split across two bytes.
612 Such cases are not supposed to be able to occur. */
614 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
618 /* Special treatment for a bit field split across two registers. */
619 if (bitsize + bitpos > BITS_PER_WORD)
621 store_split_bit_field (op0, bitsize, bitpos,
622 value, BITS_PER_WORD);
628 /* Get the proper mode to use for this field. We want a mode that
629 includes the entire field. If such a mode would be larger than
630 a word, we won't be doing the extraction the normal way. */
632 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
633 struct_align * BITS_PER_UNIT, word_mode,
634 GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
636 if (mode == VOIDmode)
638 /* The only way this should occur is if the field spans word
640 store_split_bit_field (op0,
641 bitsize, bitpos + offset * BITS_PER_UNIT,
642 value, struct_align);
646 total_bits = GET_MODE_BITSIZE (mode);
648 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
649 be in the range 0 to total_bits-1, and put any excess bytes in
651 if (bitpos >= total_bits)
653 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
654 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
658 /* Get ref to an aligned byte, halfword, or word containing the field.
659 Adjust BITPOS to be position within a word,
660 and OFFSET to be the offset of that word.
661 Then alter OP0 to refer to that word. */
662 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
663 offset -= (offset % (total_bits / BITS_PER_UNIT));
664 op0 = change_address (op0, mode,
665 plus_constant (XEXP (op0, 0), offset));
668 mode = GET_MODE (op0);
670 /* Now MODE is either some integral mode for a MEM as OP0,
671 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
672 The bit field is contained entirely within OP0.
673 BITPOS is the starting bit number within OP0.
674 (OP0's mode may actually be narrower than MODE.) */
676 if (BYTES_BIG_ENDIAN)
677 /* BITPOS is the distance between our msb
678 and that of the containing datum.
679 Convert it to the distance from the lsb. */
680 bitpos = total_bits - bitsize - bitpos;
682 /* Now BITPOS is always the distance between our lsb
685 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
686 we must first convert its mode to MODE. */
688 if (GET_CODE (value) == CONST_INT)
690 register HOST_WIDE_INT v = INTVAL (value);
692 if (bitsize < HOST_BITS_PER_WIDE_INT)
693 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
697 else if ((bitsize < HOST_BITS_PER_WIDE_INT
698 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
699 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
702 value = lshift_value (mode, value, bitpos, bitsize);
706 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
707 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
709 if (GET_MODE (value) != mode)
711 if ((GET_CODE (value) == REG || GET_CODE (value) == SUBREG)
712 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
713 value = gen_lowpart (mode, value);
715 value = convert_to_mode (mode, value, 1);
719 value = expand_binop (mode, and_optab, value,
720 mask_rtx (mode, 0, bitsize, 0),
721 NULL_RTX, 1, OPTAB_LIB_WIDEN);
723 value = expand_shift (LSHIFT_EXPR, mode, value,
724 build_int_2 (bitpos, 0), NULL_RTX, 1);
727 /* Now clear the chosen bits in OP0,
728 except that if VALUE is -1 we need not bother. */
730 subtarget = (GET_CODE (op0) == REG || ! flag_force_mem) ? op0 : 0;
734 temp = expand_binop (mode, and_optab, op0,
735 mask_rtx (mode, bitpos, bitsize, 1),
736 subtarget, 1, OPTAB_LIB_WIDEN);
742 /* Now logical-or VALUE into OP0, unless it is zero. */
745 temp = expand_binop (mode, ior_optab, temp, value,
746 subtarget, 1, OPTAB_LIB_WIDEN);
748 emit_move_insn (op0, temp);
751 /* Store a bit field that is split across multiple accessible memory objects.
753 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
754 BITSIZE is the field width; BITPOS the position of its first bit
756 VALUE is the value to store.
757 ALIGN is the known alignment of OP0, measured in bytes.
758 This is also the size of the memory objects to be used.
760 This does not yet handle fields wider than BITS_PER_WORD. */
763 store_split_bit_field (op0, bitsize, bitpos, value, align)
772 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
774 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
775 unit = BITS_PER_WORD;
777 unit = MIN (align * BITS_PER_UNIT, BITS_PER_WORD);
779 /* If VALUE is a constant other than a CONST_INT, get it into a register in
780 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
781 that VALUE might be a floating-point constant. */
782 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
784 rtx word = gen_lowpart_common (word_mode, value);
786 if (word && (value != word))
789 value = gen_lowpart_common (word_mode,
790 force_reg (GET_MODE (value) != VOIDmode
792 : word_mode, value));
794 else if (GET_CODE (value) == ADDRESSOF)
795 value = copy_to_reg (value);
797 while (bitsdone < bitsize)
804 offset = (bitpos + bitsdone) / unit;
805 thispos = (bitpos + bitsdone) % unit;
807 /* THISSIZE must not overrun a word boundary. Otherwise,
808 store_fixed_bit_field will call us again, and we will mutually
810 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
811 thissize = MIN (thissize, unit - thispos);
813 if (BYTES_BIG_ENDIAN)
817 /* We must do an endian conversion exactly the same way as it is
818 done in extract_bit_field, so that the two calls to
819 extract_fixed_bit_field will have comparable arguments. */
820 if (GET_CODE (value) != MEM || GET_MODE (value) == BLKmode)
821 total_bits = BITS_PER_WORD;
823 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
825 /* Fetch successively less significant portions. */
826 if (GET_CODE (value) == CONST_INT)
827 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
828 >> (bitsize - bitsdone - thissize))
829 & (((HOST_WIDE_INT) 1 << thissize) - 1));
831 /* The args are chosen so that the last part includes the
832 lsb. Give extract_bit_field the value it needs (with
833 endianness compensation) to fetch the piece we want.
835 ??? We have no idea what the alignment of VALUE is, so
836 we have to use a guess. */
838 = extract_fixed_bit_field
839 (word_mode, value, 0, thissize,
840 total_bits - bitsize + bitsdone, NULL_RTX, 1,
841 GET_MODE (value) == VOIDmode
843 : (GET_MODE (value) == BLKmode
845 : GET_MODE_ALIGNMENT (GET_MODE (value)) / BITS_PER_UNIT));
849 /* Fetch successively more significant portions. */
850 if (GET_CODE (value) == CONST_INT)
851 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
853 & (((HOST_WIDE_INT) 1 << thissize) - 1));
856 = extract_fixed_bit_field
857 (word_mode, value, 0, thissize, bitsdone, NULL_RTX, 1,
858 GET_MODE (value) == VOIDmode
860 : (GET_MODE (value) == BLKmode
862 : GET_MODE_ALIGNMENT (GET_MODE (value)) / BITS_PER_UNIT));
865 /* If OP0 is a register, then handle OFFSET here.
867 When handling multiword bitfields, extract_bit_field may pass
868 down a word_mode SUBREG of a larger REG for a bitfield that actually
869 crosses a word boundary. Thus, for a SUBREG, we must find
870 the current word starting from the base register. */
871 if (GET_CODE (op0) == SUBREG)
873 word = operand_subword_force (SUBREG_REG (op0),
874 SUBREG_WORD (op0) + offset,
875 GET_MODE (SUBREG_REG (op0)));
878 else if (GET_CODE (op0) == REG)
880 word = operand_subword_force (op0, offset, GET_MODE (op0));
886 /* OFFSET is in UNITs, and UNIT is in bits.
887 store_fixed_bit_field wants offset in bytes. */
888 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT,
889 thissize, thispos, part, align);
890 bitsdone += thissize;
894 /* Generate code to extract a byte-field from STR_RTX
895 containing BITSIZE bits, starting at BITNUM,
896 and put it in TARGET if possible (if TARGET is nonzero).
897 Regardless of TARGET, we return the rtx for where the value is placed.
900 STR_RTX is the structure containing the byte (a REG or MEM).
901 UNSIGNEDP is nonzero if this is an unsigned bit field.
902 MODE is the natural mode of the field value once extracted.
903 TMODE is the mode the caller would like the value to have;
904 but the value may be returned with type MODE instead.
906 ALIGN is the alignment that STR_RTX is known to have, measured in bytes.
907 TOTAL_SIZE is the size in bytes of the containing structure,
910 If a TARGET is specified and we can store in it at no extra cost,
911 we do so, and return TARGET.
912 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
913 if they are equally easy. */
916 extract_bit_field (str_rtx, bitsize, bitnum, unsignedp,
917 target, mode, tmode, align, total_size)
919 register int bitsize;
923 enum machine_mode mode, tmode;
927 int unit = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
928 register int offset = bitnum / unit;
929 register int bitpos = bitnum % unit;
930 register rtx op0 = str_rtx;
931 rtx spec_target = target;
932 rtx spec_target_subreg = 0;
941 if (insn_operand_mode[(int) CODE_FOR_extv][0] == VOIDmode)
942 extv_bitsize = GET_MODE_BITSIZE (word_mode);
944 extv_bitsize = GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_extv][0]);
948 if (insn_operand_mode[(int) CODE_FOR_extzv][0] == VOIDmode)
949 extzv_bitsize = GET_MODE_BITSIZE (word_mode);
952 = GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_extzv][0]);
955 /* Discount the part of the structure before the desired byte.
956 We need to know how many bytes are safe to reference after it. */
958 total_size -= (bitpos / BIGGEST_ALIGNMENT
959 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
961 if (tmode == VOIDmode)
963 while (GET_CODE (op0) == SUBREG)
965 int outer_size = GET_MODE_BITSIZE (GET_MODE (op0));
966 int inner_size = GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)));
968 offset += SUBREG_WORD (op0);
970 inner_size = MIN (inner_size, BITS_PER_WORD);
972 if (BYTES_BIG_ENDIAN && (outer_size < inner_size))
974 bitpos += inner_size - outer_size;
977 offset += (bitpos / unit);
982 op0 = SUBREG_REG (op0);
985 /* Make sure we are playing with integral modes. Pun with subregs
988 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
989 if (imode != GET_MODE (op0))
991 if (GET_CODE (op0) == MEM)
992 op0 = change_address (op0, imode, NULL_RTX);
993 else if (imode != BLKmode)
994 op0 = gen_lowpart (imode, op0);
1000 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1001 If that's wrong, the solution is to test for it and set TARGET to 0
1004 /* If OP0 is a register, BITPOS must count within a word.
1005 But as we have it, it counts within whatever size OP0 now has.
1006 On a bigendian machine, these are not the same, so convert. */
1007 if (BYTES_BIG_ENDIAN
1008 && GET_CODE (op0) != MEM
1009 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1010 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1012 /* Extracting a full-word or multi-word value
1013 from a structure in a register or aligned memory.
1014 This can be done with just SUBREG.
1015 So too extracting a subword value in
1016 the least significant part of the register. */
1018 if (((GET_CODE (op0) != MEM
1019 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1020 GET_MODE_BITSIZE (GET_MODE (op0))))
1021 || (GET_CODE (op0) == MEM
1022 && (! SLOW_UNALIGNED_ACCESS
1023 || (offset * BITS_PER_UNIT % bitsize == 0
1024 && align * BITS_PER_UNIT % bitsize == 0))))
1025 && ((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1026 && bitpos % BITS_PER_WORD == 0)
1027 || (mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0) != BLKmode
1028 /* ??? The big endian test here is wrong. This is correct
1029 if the value is in a register, and if mode_for_size is not
1030 the same mode as op0. This causes us to get unnecessarily
1031 inefficient code from the Thumb port when -mbig-endian. */
1032 && (BYTES_BIG_ENDIAN
1033 ? bitpos + bitsize == BITS_PER_WORD
1036 enum machine_mode mode1
1037 = mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0);
1039 if (mode1 != GET_MODE (op0))
1041 if (GET_CODE (op0) == SUBREG)
1043 if (GET_MODE (SUBREG_REG (op0)) == mode1
1044 || GET_MODE_CLASS (mode1) == MODE_INT
1045 || GET_MODE_CLASS (mode1) == MODE_PARTIAL_INT)
1046 op0 = SUBREG_REG (op0);
1048 /* Else we've got some float mode source being extracted into
1049 a different float mode destination -- this combination of
1050 subregs results in Severe Tire Damage. */
1053 if (GET_CODE (op0) == REG)
1054 op0 = gen_rtx_SUBREG (mode1, op0, offset);
1056 op0 = change_address (op0, mode1,
1057 plus_constant (XEXP (op0, 0), offset));
1060 return convert_to_mode (tmode, op0, unsignedp);
1064 /* Handle fields bigger than a word. */
1066 if (bitsize > BITS_PER_WORD)
1068 /* Here we transfer the words of the field
1069 in the order least significant first.
1070 This is because the most significant word is the one which may
1071 be less than full. */
1073 int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1076 if (target == 0 || GET_CODE (target) != REG)
1077 target = gen_reg_rtx (mode);
1079 /* Indicate for flow that the entire target reg is being set. */
1080 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
1082 for (i = 0; i < nwords; i++)
1084 /* If I is 0, use the low-order word in both field and target;
1085 if I is 1, use the next to lowest word; and so on. */
1086 /* Word number in TARGET to use. */
1087 int wordnum = (WORDS_BIG_ENDIAN
1088 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1090 /* Offset from start of field in OP0. */
1091 int bit_offset = (WORDS_BIG_ENDIAN
1092 ? MAX (0, bitsize - (i + 1) * BITS_PER_WORD)
1093 : i * BITS_PER_WORD);
1094 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1096 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1097 bitsize - i * BITS_PER_WORD),
1098 bitnum + bit_offset,
1099 1, target_part, mode, word_mode,
1102 if (target_part == 0)
1105 if (result_part != target_part)
1106 emit_move_insn (target_part, result_part);
1111 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1112 need to be zero'd out. */
1113 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1117 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1118 for (i = nwords; i < total_words; i++)
1120 int wordnum = WORDS_BIG_ENDIAN ? total_words - i - 1 : i;
1121 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1122 emit_move_insn (target_part, const0_rtx);
1128 /* Signed bit field: sign-extend with two arithmetic shifts. */
1129 target = expand_shift (LSHIFT_EXPR, mode, target,
1130 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1132 return expand_shift (RSHIFT_EXPR, mode, target,
1133 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1137 /* From here on we know the desired field is smaller than a word
1138 so we can assume it is an integer. So we can safely extract it as one
1139 size of integer, if necessary, and then truncate or extend
1140 to the size that is wanted. */
1142 /* OFFSET is the number of words or bytes (UNIT says which)
1143 from STR_RTX to the first word or byte containing part of the field. */
1145 if (GET_CODE (op0) != MEM)
1148 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1149 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1155 op0 = protect_from_queue (str_rtx, 1);
1158 /* Now OFFSET is nonzero only for memory operands. */
1164 && (extzv_bitsize >= bitsize)
1165 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1166 && (bitsize + bitpos > extzv_bitsize)))
1168 int xbitpos = bitpos, xoffset = offset;
1169 rtx bitsize_rtx, bitpos_rtx;
1170 rtx last = get_last_insn ();
1172 rtx xtarget = target;
1173 rtx xspec_target = spec_target;
1174 rtx xspec_target_subreg = spec_target_subreg;
1176 enum machine_mode maxmode;
1178 maxmode = insn_operand_mode[(int) CODE_FOR_extzv][0];
1179 if (maxmode == VOIDmode)
1180 maxmode = word_mode;
1182 if (GET_CODE (xop0) == MEM)
1184 int save_volatile_ok = volatile_ok;
1187 /* Is the memory operand acceptable? */
1188 if (! ((*insn_operand_predicate[(int) CODE_FOR_extzv][1])
1189 (xop0, GET_MODE (xop0))))
1191 /* No, load into a reg and extract from there. */
1192 enum machine_mode bestmode;
1194 /* Get the mode to use for inserting into this field. If
1195 OP0 is BLKmode, get the smallest mode consistent with the
1196 alignment. If OP0 is a non-BLKmode object that is no
1197 wider than MAXMODE, use its mode. Otherwise, use the
1198 smallest mode containing the field. */
1200 if (GET_MODE (xop0) == BLKmode
1201 || (GET_MODE_SIZE (GET_MODE (op0))
1202 > GET_MODE_SIZE (maxmode)))
1203 bestmode = get_best_mode (bitsize, bitnum,
1204 align * BITS_PER_UNIT, maxmode,
1205 MEM_VOLATILE_P (xop0));
1207 bestmode = GET_MODE (xop0);
1209 if (bestmode == VOIDmode
1210 || (SLOW_UNALIGNED_ACCESS && GET_MODE_SIZE (bestmode) > align))
1213 /* Compute offset as multiple of this unit,
1214 counting in bytes. */
1215 unit = GET_MODE_BITSIZE (bestmode);
1216 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1217 xbitpos = bitnum % unit;
1218 xop0 = change_address (xop0, bestmode,
1219 plus_constant (XEXP (xop0, 0),
1221 /* Fetch it to a register in that size. */
1222 xop0 = force_reg (bestmode, xop0);
1224 /* XBITPOS counts within UNIT, which is what is expected. */
1227 /* Get ref to first byte containing part of the field. */
1228 xop0 = change_address (xop0, byte_mode,
1229 plus_constant (XEXP (xop0, 0), xoffset));
1231 volatile_ok = save_volatile_ok;
1234 /* If op0 is a register, we need it in MAXMODE (which is usually
1235 SImode). to make it acceptable to the format of extzv. */
1236 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1238 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1239 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1241 /* On big-endian machines, we count bits from the most significant.
1242 If the bit field insn does not, we must invert. */
1243 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1244 xbitpos = unit - bitsize - xbitpos;
1246 /* Now convert from counting within UNIT to counting in MAXMODE. */
1247 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1248 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
1250 unit = GET_MODE_BITSIZE (maxmode);
1253 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1254 xtarget = xspec_target = gen_reg_rtx (tmode);
1256 if (GET_MODE (xtarget) != maxmode)
1258 if (GET_CODE (xtarget) == REG)
1260 int wider = (GET_MODE_SIZE (maxmode)
1261 > GET_MODE_SIZE (GET_MODE (xtarget)));
1262 xtarget = gen_lowpart (maxmode, xtarget);
1264 xspec_target_subreg = xtarget;
1267 xtarget = gen_reg_rtx (maxmode);
1270 /* If this machine's extzv insists on a register target,
1271 make sure we have one. */
1272 if (! ((*insn_operand_predicate[(int) CODE_FOR_extzv][0])
1273 (xtarget, maxmode)))
1274 xtarget = gen_reg_rtx (maxmode);
1276 bitsize_rtx = GEN_INT (bitsize);
1277 bitpos_rtx = GEN_INT (xbitpos);
1279 pat = gen_extzv (protect_from_queue (xtarget, 1),
1280 xop0, bitsize_rtx, bitpos_rtx);
1285 spec_target = xspec_target;
1286 spec_target_subreg = xspec_target_subreg;
1290 delete_insns_since (last);
1291 target = extract_fixed_bit_field (tmode, op0, offset, bitsize,
1292 bitpos, target, 1, align);
1298 target = extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1305 && (extv_bitsize >= bitsize)
1306 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1307 && (bitsize + bitpos > extv_bitsize)))
1309 int xbitpos = bitpos, xoffset = offset;
1310 rtx bitsize_rtx, bitpos_rtx;
1311 rtx last = get_last_insn ();
1312 rtx xop0 = op0, xtarget = target;
1313 rtx xspec_target = spec_target;
1314 rtx xspec_target_subreg = spec_target_subreg;
1316 enum machine_mode maxmode;
1318 maxmode = insn_operand_mode[(int) CODE_FOR_extv][0];
1319 if (maxmode == VOIDmode)
1320 maxmode = word_mode;
1322 if (GET_CODE (xop0) == MEM)
1324 /* Is the memory operand acceptable? */
1325 if (! ((*insn_operand_predicate[(int) CODE_FOR_extv][1])
1326 (xop0, GET_MODE (xop0))))
1328 /* No, load into a reg and extract from there. */
1329 enum machine_mode bestmode;
1331 /* Get the mode to use for inserting into this field. If
1332 OP0 is BLKmode, get the smallest mode consistent with the
1333 alignment. If OP0 is a non-BLKmode object that is no
1334 wider than MAXMODE, use its mode. Otherwise, use the
1335 smallest mode containing the field. */
1337 if (GET_MODE (xop0) == BLKmode
1338 || (GET_MODE_SIZE (GET_MODE (op0))
1339 > GET_MODE_SIZE (maxmode)))
1340 bestmode = get_best_mode (bitsize, bitnum,
1341 align * BITS_PER_UNIT, maxmode,
1342 MEM_VOLATILE_P (xop0));
1344 bestmode = GET_MODE (xop0);
1346 if (bestmode == VOIDmode
1347 || (SLOW_UNALIGNED_ACCESS && GET_MODE_SIZE (bestmode) > align))
1350 /* Compute offset as multiple of this unit,
1351 counting in bytes. */
1352 unit = GET_MODE_BITSIZE (bestmode);
1353 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1354 xbitpos = bitnum % unit;
1355 xop0 = change_address (xop0, bestmode,
1356 plus_constant (XEXP (xop0, 0),
1358 /* Fetch it to a register in that size. */
1359 xop0 = force_reg (bestmode, xop0);
1361 /* XBITPOS counts within UNIT, which is what is expected. */
1364 /* Get ref to first byte containing part of the field. */
1365 xop0 = change_address (xop0, byte_mode,
1366 plus_constant (XEXP (xop0, 0), xoffset));
1369 /* If op0 is a register, we need it in MAXMODE (which is usually
1370 SImode) to make it acceptable to the format of extv. */
1371 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1373 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1374 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1376 /* On big-endian machines, we count bits from the most significant.
1377 If the bit field insn does not, we must invert. */
1378 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1379 xbitpos = unit - bitsize - xbitpos;
1381 /* XBITPOS counts within a size of UNIT.
1382 Adjust to count within a size of MAXMODE. */
1383 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1384 xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
1386 unit = GET_MODE_BITSIZE (maxmode);
1389 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1390 xtarget = xspec_target = gen_reg_rtx (tmode);
1392 if (GET_MODE (xtarget) != maxmode)
1394 if (GET_CODE (xtarget) == REG)
1396 int wider = (GET_MODE_SIZE (maxmode)
1397 > GET_MODE_SIZE (GET_MODE (xtarget)));
1398 xtarget = gen_lowpart (maxmode, xtarget);
1400 xspec_target_subreg = xtarget;
1403 xtarget = gen_reg_rtx (maxmode);
1406 /* If this machine's extv insists on a register target,
1407 make sure we have one. */
1408 if (! ((*insn_operand_predicate[(int) CODE_FOR_extv][0])
1409 (xtarget, maxmode)))
1410 xtarget = gen_reg_rtx (maxmode);
1412 bitsize_rtx = GEN_INT (bitsize);
1413 bitpos_rtx = GEN_INT (xbitpos);
1415 pat = gen_extv (protect_from_queue (xtarget, 1),
1416 xop0, bitsize_rtx, bitpos_rtx);
1421 spec_target = xspec_target;
1422 spec_target_subreg = xspec_target_subreg;
1426 delete_insns_since (last);
1427 target = extract_fixed_bit_field (tmode, op0, offset, bitsize,
1428 bitpos, target, 0, align);
1434 target = extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1437 if (target == spec_target)
1439 if (target == spec_target_subreg)
1441 if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
1443 /* If the target mode is floating-point, first convert to the
1444 integer mode of that size and then access it as a floating-point
1445 value via a SUBREG. */
1446 if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
1448 target = convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode),
1451 if (GET_CODE (target) != REG)
1452 target = copy_to_reg (target);
1453 return gen_rtx_SUBREG (tmode, target, 0);
1456 return convert_to_mode (tmode, target, unsignedp);
1461 /* Extract a bit field using shifts and boolean operations
1462 Returns an rtx to represent the value.
1463 OP0 addresses a register (word) or memory (byte).
1464 BITPOS says which bit within the word or byte the bit field starts in.
1465 OFFSET says how many bytes farther the bit field starts;
1466 it is 0 if OP0 is a register.
1467 BITSIZE says how many bits long the bit field is.
1468 (If OP0 is a register, it may be narrower than a full word,
1469 but BITPOS still counts within a full word,
1470 which is significant on bigendian machines.)
1472 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1473 If TARGET is nonzero, attempts to store the value there
1474 and return TARGET, but this is not guaranteed.
1475 If TARGET is not used, create a pseudo-reg of mode TMODE for the value.
1477 ALIGN is the alignment that STR_RTX is known to have, measured in bytes. */
1480 extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1481 target, unsignedp, align)
1482 enum machine_mode tmode;
1483 register rtx op0, target;
1484 register int offset, bitsize, bitpos;
1488 int total_bits = BITS_PER_WORD;
1489 enum machine_mode mode;
1491 if (GET_CODE (op0) == SUBREG || GET_CODE (op0) == REG)
1493 /* Special treatment for a bit field split across two registers. */
1494 if (bitsize + bitpos > BITS_PER_WORD)
1495 return extract_split_bit_field (op0, bitsize, bitpos,
1500 /* Get the proper mode to use for this field. We want a mode that
1501 includes the entire field. If such a mode would be larger than
1502 a word, we won't be doing the extraction the normal way. */
1504 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1505 align * BITS_PER_UNIT, word_mode,
1506 GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
1508 if (mode == VOIDmode)
1509 /* The only way this should occur is if the field spans word
1511 return extract_split_bit_field (op0, bitsize,
1512 bitpos + offset * BITS_PER_UNIT,
1515 total_bits = GET_MODE_BITSIZE (mode);
1517 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1518 be in the range 0 to total_bits-1, and put any excess bytes in
1520 if (bitpos >= total_bits)
1522 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1523 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1527 /* Get ref to an aligned byte, halfword, or word containing the field.
1528 Adjust BITPOS to be position within a word,
1529 and OFFSET to be the offset of that word.
1530 Then alter OP0 to refer to that word. */
1531 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1532 offset -= (offset % (total_bits / BITS_PER_UNIT));
1533 op0 = change_address (op0, mode,
1534 plus_constant (XEXP (op0, 0), offset));
1537 mode = GET_MODE (op0);
1539 if (BYTES_BIG_ENDIAN)
1541 /* BITPOS is the distance between our msb and that of OP0.
1542 Convert it to the distance from the lsb. */
1544 bitpos = total_bits - bitsize - bitpos;
1547 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1548 We have reduced the big-endian case to the little-endian case. */
1554 /* If the field does not already start at the lsb,
1555 shift it so it does. */
1556 tree amount = build_int_2 (bitpos, 0);
1557 /* Maybe propagate the target for the shift. */
1558 /* But not if we will return it--could confuse integrate.c. */
1559 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1560 && !REG_FUNCTION_VALUE_P (target)
1562 if (tmode != mode) subtarget = 0;
1563 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1565 /* Convert the value to the desired mode. */
1567 op0 = convert_to_mode (tmode, op0, 1);
1569 /* Unless the msb of the field used to be the msb when we shifted,
1570 mask out the upper bits. */
1572 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize
1574 #ifdef SLOW_ZERO_EXTEND
1575 /* Always generate an `and' if
1576 we just zero-extended op0 and SLOW_ZERO_EXTEND, since it
1577 will combine fruitfully with the zero-extend. */
1582 return expand_binop (GET_MODE (op0), and_optab, op0,
1583 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1584 target, 1, OPTAB_LIB_WIDEN);
1588 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1589 then arithmetic-shift its lsb to the lsb of the word. */
1590 op0 = force_reg (mode, op0);
1594 /* Find the narrowest integer mode that contains the field. */
1596 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1597 mode = GET_MODE_WIDER_MODE (mode))
1598 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1600 op0 = convert_to_mode (mode, op0, 0);
1604 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1606 tree amount = build_int_2 (GET_MODE_BITSIZE (mode) - (bitsize + bitpos), 0);
1607 /* Maybe propagate the target for the shift. */
1608 /* But not if we will return the result--could confuse integrate.c. */
1609 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1610 && ! REG_FUNCTION_VALUE_P (target)
1612 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1615 return expand_shift (RSHIFT_EXPR, mode, op0,
1616 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1620 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1621 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1622 complement of that if COMPLEMENT. The mask is truncated if
1623 necessary to the width of mode MODE. The mask is zero-extended if
1624 BITSIZE+BITPOS is too small for MODE. */
1627 mask_rtx (mode, bitpos, bitsize, complement)
1628 enum machine_mode mode;
1629 int bitpos, bitsize, complement;
1631 HOST_WIDE_INT masklow, maskhigh;
1633 if (bitpos < HOST_BITS_PER_WIDE_INT)
1634 masklow = (HOST_WIDE_INT) -1 << bitpos;
1638 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1639 masklow &= ((unsigned HOST_WIDE_INT) -1
1640 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1642 if (bitpos <= HOST_BITS_PER_WIDE_INT)
1645 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1647 if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1648 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1649 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1655 maskhigh = ~maskhigh;
1659 return immed_double_const (masklow, maskhigh, mode);
1662 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1663 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1666 lshift_value (mode, value, bitpos, bitsize)
1667 enum machine_mode mode;
1669 int bitpos, bitsize;
1671 unsigned HOST_WIDE_INT v = INTVAL (value);
1672 HOST_WIDE_INT low, high;
1674 if (bitsize < HOST_BITS_PER_WIDE_INT)
1675 v &= ~((HOST_WIDE_INT) -1 << bitsize);
1677 if (bitpos < HOST_BITS_PER_WIDE_INT)
1680 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1685 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1688 return immed_double_const (low, high, mode);
1691 /* Extract a bit field that is split across two words
1692 and return an RTX for the result.
1694 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1695 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1696 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend.
1698 ALIGN is the known alignment of OP0, measured in bytes.
1699 This is also the size of the memory objects to be used. */
1702 extract_split_bit_field (op0, bitsize, bitpos, unsignedp, align)
1704 int bitsize, bitpos, unsignedp, align;
1708 rtx result = NULL_RTX;
1711 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1713 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1714 unit = BITS_PER_WORD;
1716 unit = MIN (align * BITS_PER_UNIT, BITS_PER_WORD);
1718 while (bitsdone < bitsize)
1725 offset = (bitpos + bitsdone) / unit;
1726 thispos = (bitpos + bitsdone) % unit;
1728 /* THISSIZE must not overrun a word boundary. Otherwise,
1729 extract_fixed_bit_field will call us again, and we will mutually
1731 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1732 thissize = MIN (thissize, unit - thispos);
1734 /* If OP0 is a register, then handle OFFSET here.
1736 When handling multiword bitfields, extract_bit_field may pass
1737 down a word_mode SUBREG of a larger REG for a bitfield that actually
1738 crosses a word boundary. Thus, for a SUBREG, we must find
1739 the current word starting from the base register. */
1740 if (GET_CODE (op0) == SUBREG)
1742 word = operand_subword_force (SUBREG_REG (op0),
1743 SUBREG_WORD (op0) + offset,
1744 GET_MODE (SUBREG_REG (op0)));
1747 else if (GET_CODE (op0) == REG)
1749 word = operand_subword_force (op0, offset, GET_MODE (op0));
1755 /* Extract the parts in bit-counting order,
1756 whose meaning is determined by BYTES_PER_UNIT.
1757 OFFSET is in UNITs, and UNIT is in bits.
1758 extract_fixed_bit_field wants offset in bytes. */
1759 part = extract_fixed_bit_field (word_mode, word,
1760 offset * unit / BITS_PER_UNIT,
1761 thissize, thispos, 0, 1, align);
1762 bitsdone += thissize;
1764 /* Shift this part into place for the result. */
1765 if (BYTES_BIG_ENDIAN)
1767 if (bitsize != bitsdone)
1768 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1769 build_int_2 (bitsize - bitsdone, 0), 0, 1);
1773 if (bitsdone != thissize)
1774 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1775 build_int_2 (bitsdone - thissize, 0), 0, 1);
1781 /* Combine the parts with bitwise or. This works
1782 because we extracted each part as an unsigned bit field. */
1783 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
1789 /* Unsigned bit field: we are done. */
1792 /* Signed bit field: sign-extend with two arithmetic shifts. */
1793 result = expand_shift (LSHIFT_EXPR, word_mode, result,
1794 build_int_2 (BITS_PER_WORD - bitsize, 0),
1796 return expand_shift (RSHIFT_EXPR, word_mode, result,
1797 build_int_2 (BITS_PER_WORD - bitsize, 0), NULL_RTX, 0);
1800 /* Add INC into TARGET. */
1803 expand_inc (target, inc)
1806 rtx value = expand_binop (GET_MODE (target), add_optab,
1808 target, 0, OPTAB_LIB_WIDEN);
1809 if (value != target)
1810 emit_move_insn (target, value);
1813 /* Subtract DEC from TARGET. */
1816 expand_dec (target, dec)
1819 rtx value = expand_binop (GET_MODE (target), sub_optab,
1821 target, 0, OPTAB_LIB_WIDEN);
1822 if (value != target)
1823 emit_move_insn (target, value);
1826 /* Output a shift instruction for expression code CODE,
1827 with SHIFTED being the rtx for the value to shift,
1828 and AMOUNT the tree for the amount to shift by.
1829 Store the result in the rtx TARGET, if that is convenient.
1830 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
1831 Return the rtx for where the value is. */
1834 expand_shift (code, mode, shifted, amount, target, unsignedp)
1835 enum tree_code code;
1836 register enum machine_mode mode;
1839 register rtx target;
1842 register rtx op1, temp = 0;
1843 register int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
1844 register int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
1847 /* Previously detected shift-counts computed by NEGATE_EXPR
1848 and shifted in the other direction; but that does not work
1851 op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0);
1853 #ifdef SHIFT_COUNT_TRUNCATED
1854 if (SHIFT_COUNT_TRUNCATED)
1856 if (GET_CODE (op1) == CONST_INT
1857 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
1858 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
1859 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
1860 % GET_MODE_BITSIZE (mode));
1861 else if (GET_CODE (op1) == SUBREG
1862 && SUBREG_WORD (op1) == 0)
1863 op1 = SUBREG_REG (op1);
1867 if (op1 == const0_rtx)
1870 for (try = 0; temp == 0 && try < 3; try++)
1872 enum optab_methods methods;
1875 methods = OPTAB_DIRECT;
1877 methods = OPTAB_WIDEN;
1879 methods = OPTAB_LIB_WIDEN;
1883 /* Widening does not work for rotation. */
1884 if (methods == OPTAB_WIDEN)
1886 else if (methods == OPTAB_LIB_WIDEN)
1888 /* If we have been unable to open-code this by a rotation,
1889 do it as the IOR of two shifts. I.e., to rotate A
1890 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
1891 where C is the bitsize of A.
1893 It is theoretically possible that the target machine might
1894 not be able to perform either shift and hence we would
1895 be making two libcalls rather than just the one for the
1896 shift (similarly if IOR could not be done). We will allow
1897 this extremely unlikely lossage to avoid complicating the
1900 rtx subtarget = target == shifted ? 0 : target;
1902 tree type = TREE_TYPE (amount);
1903 tree new_amount = make_tree (type, op1);
1905 = fold (build (MINUS_EXPR, type,
1907 build_int_2 (GET_MODE_BITSIZE (mode),
1911 shifted = force_reg (mode, shifted);
1913 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
1914 mode, shifted, new_amount, subtarget, 1);
1915 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
1916 mode, shifted, other_amount, 0, 1);
1917 return expand_binop (mode, ior_optab, temp, temp1, target,
1918 unsignedp, methods);
1921 temp = expand_binop (mode,
1922 left ? rotl_optab : rotr_optab,
1923 shifted, op1, target, unsignedp, methods);
1925 /* If we don't have the rotate, but we are rotating by a constant
1926 that is in range, try a rotate in the opposite direction. */
1928 if (temp == 0 && GET_CODE (op1) == CONST_INT
1929 && INTVAL (op1) > 0 && INTVAL (op1) < GET_MODE_BITSIZE (mode))
1930 temp = expand_binop (mode,
1931 left ? rotr_optab : rotl_optab,
1933 GEN_INT (GET_MODE_BITSIZE (mode)
1935 target, unsignedp, methods);
1938 temp = expand_binop (mode,
1939 left ? ashl_optab : lshr_optab,
1940 shifted, op1, target, unsignedp, methods);
1942 /* Do arithmetic shifts.
1943 Also, if we are going to widen the operand, we can just as well
1944 use an arithmetic right-shift instead of a logical one. */
1945 if (temp == 0 && ! rotate
1946 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
1948 enum optab_methods methods1 = methods;
1950 /* If trying to widen a log shift to an arithmetic shift,
1951 don't accept an arithmetic shift of the same size. */
1953 methods1 = OPTAB_MUST_WIDEN;
1955 /* Arithmetic shift */
1957 temp = expand_binop (mode,
1958 left ? ashl_optab : ashr_optab,
1959 shifted, op1, target, unsignedp, methods1);
1962 /* We used to try extzv here for logical right shifts, but that was
1963 only useful for one machine, the VAX, and caused poor code
1964 generation there for lshrdi3, so the code was deleted and a
1965 define_expand for lshrsi3 was added to vax.md. */
1973 enum alg_code { alg_zero, alg_m, alg_shift,
1974 alg_add_t_m2, alg_sub_t_m2,
1975 alg_add_factor, alg_sub_factor,
1976 alg_add_t2_m, alg_sub_t2_m,
1977 alg_add, alg_subtract, alg_factor, alg_shiftop };
1979 /* This structure records a sequence of operations.
1980 `ops' is the number of operations recorded.
1981 `cost' is their total cost.
1982 The operations are stored in `op' and the corresponding
1983 logarithms of the integer coefficients in `log'.
1985 These are the operations:
1986 alg_zero total := 0;
1987 alg_m total := multiplicand;
1988 alg_shift total := total * coeff
1989 alg_add_t_m2 total := total + multiplicand * coeff;
1990 alg_sub_t_m2 total := total - multiplicand * coeff;
1991 alg_add_factor total := total * coeff + total;
1992 alg_sub_factor total := total * coeff - total;
1993 alg_add_t2_m total := total * coeff + multiplicand;
1994 alg_sub_t2_m total := total * coeff - multiplicand;
1996 The first operand must be either alg_zero or alg_m. */
2002 /* The size of the OP and LOG fields are not directly related to the
2003 word size, but the worst-case algorithms will be if we have few
2004 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2005 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2006 in total wordsize operations. */
2007 enum alg_code op[MAX_BITS_PER_WORD];
2008 char log[MAX_BITS_PER_WORD];
2011 static void synth_mult PROTO((struct algorithm *,
2012 unsigned HOST_WIDE_INT,
2014 static unsigned HOST_WIDE_INT choose_multiplier PROTO((unsigned HOST_WIDE_INT,
2016 unsigned HOST_WIDE_INT *,
2018 static unsigned HOST_WIDE_INT invert_mod2n PROTO((unsigned HOST_WIDE_INT,
2020 /* Compute and return the best algorithm for multiplying by T.
2021 The algorithm must cost less than cost_limit
2022 If retval.cost >= COST_LIMIT, no algorithm was found and all
2023 other field of the returned struct are undefined. */
2026 synth_mult (alg_out, t, cost_limit)
2027 struct algorithm *alg_out;
2028 unsigned HOST_WIDE_INT t;
2032 struct algorithm *alg_in, *best_alg;
2034 unsigned HOST_WIDE_INT q;
2036 /* Indicate that no algorithm is yet found. If no algorithm
2037 is found, this value will be returned and indicate failure. */
2038 alg_out->cost = cost_limit;
2040 if (cost_limit <= 0)
2043 /* t == 1 can be done in zero cost. */
2048 alg_out->op[0] = alg_m;
2052 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2056 if (zero_cost >= cost_limit)
2061 alg_out->cost = zero_cost;
2062 alg_out->op[0] = alg_zero;
2067 /* We'll be needing a couple extra algorithm structures now. */
2069 alg_in = (struct algorithm *)alloca (sizeof (struct algorithm));
2070 best_alg = (struct algorithm *)alloca (sizeof (struct algorithm));
2072 /* If we have a group of zero bits at the low-order part of T, try
2073 multiplying by the remaining bits and then doing a shift. */
2077 m = floor_log2 (t & -t); /* m = number of low zero bits */
2079 cost = shift_cost[m];
2080 synth_mult (alg_in, q, cost_limit - cost);
2082 cost += alg_in->cost;
2083 if (cost < cost_limit)
2085 struct algorithm *x;
2086 x = alg_in, alg_in = best_alg, best_alg = x;
2087 best_alg->log[best_alg->ops] = m;
2088 best_alg->op[best_alg->ops] = alg_shift;
2093 /* If we have an odd number, add or subtract one. */
2096 unsigned HOST_WIDE_INT w;
2098 for (w = 1; (w & t) != 0; w <<= 1)
2100 /* If T was -1, then W will be zero after the loop. This is another
2101 case where T ends with ...111. Handling this with (T + 1) and
2102 subtract 1 produces slightly better code and results in algorithm
2103 selection much faster than treating it like the ...0111 case
2107 /* Reject the case where t is 3.
2108 Thus we prefer addition in that case. */
2111 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2114 synth_mult (alg_in, t + 1, cost_limit - cost);
2116 cost += alg_in->cost;
2117 if (cost < cost_limit)
2119 struct algorithm *x;
2120 x = alg_in, alg_in = best_alg, best_alg = x;
2121 best_alg->log[best_alg->ops] = 0;
2122 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2128 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2131 synth_mult (alg_in, t - 1, cost_limit - cost);
2133 cost += alg_in->cost;
2134 if (cost < cost_limit)
2136 struct algorithm *x;
2137 x = alg_in, alg_in = best_alg, best_alg = x;
2138 best_alg->log[best_alg->ops] = 0;
2139 best_alg->op[best_alg->ops] = alg_add_t_m2;
2145 /* Look for factors of t of the form
2146 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2147 If we find such a factor, we can multiply by t using an algorithm that
2148 multiplies by q, shift the result by m and add/subtract it to itself.
2150 We search for large factors first and loop down, even if large factors
2151 are less probable than small; if we find a large factor we will find a
2152 good sequence quickly, and therefore be able to prune (by decreasing
2153 COST_LIMIT) the search. */
2155 for (m = floor_log2 (t - 1); m >= 2; m--)
2157 unsigned HOST_WIDE_INT d;
2159 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2160 if (t % d == 0 && t > d)
2162 cost = MIN (shiftadd_cost[m], add_cost + shift_cost[m]);
2163 synth_mult (alg_in, t / d, cost_limit - cost);
2165 cost += alg_in->cost;
2166 if (cost < cost_limit)
2168 struct algorithm *x;
2169 x = alg_in, alg_in = best_alg, best_alg = x;
2170 best_alg->log[best_alg->ops] = m;
2171 best_alg->op[best_alg->ops] = alg_add_factor;
2174 /* Other factors will have been taken care of in the recursion. */
2178 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2179 if (t % d == 0 && t > d)
2181 cost = MIN (shiftsub_cost[m], add_cost + shift_cost[m]);
2182 synth_mult (alg_in, t / d, cost_limit - cost);
2184 cost += alg_in->cost;
2185 if (cost < cost_limit)
2187 struct algorithm *x;
2188 x = alg_in, alg_in = best_alg, best_alg = x;
2189 best_alg->log[best_alg->ops] = m;
2190 best_alg->op[best_alg->ops] = alg_sub_factor;
2197 /* Try shift-and-add (load effective address) instructions,
2198 i.e. do a*3, a*5, a*9. */
2206 cost = shiftadd_cost[m];
2207 synth_mult (alg_in, (t - 1) >> m, cost_limit - cost);
2209 cost += alg_in->cost;
2210 if (cost < cost_limit)
2212 struct algorithm *x;
2213 x = alg_in, alg_in = best_alg, best_alg = x;
2214 best_alg->log[best_alg->ops] = m;
2215 best_alg->op[best_alg->ops] = alg_add_t2_m;
2225 cost = shiftsub_cost[m];
2226 synth_mult (alg_in, (t + 1) >> m, cost_limit - cost);
2228 cost += alg_in->cost;
2229 if (cost < cost_limit)
2231 struct algorithm *x;
2232 x = alg_in, alg_in = best_alg, best_alg = x;
2233 best_alg->log[best_alg->ops] = m;
2234 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2240 /* If cost_limit has not decreased since we stored it in alg_out->cost,
2241 we have not found any algorithm. */
2242 if (cost_limit == alg_out->cost)
2245 /* If we are getting a too long sequence for `struct algorithm'
2246 to record, make this search fail. */
2247 if (best_alg->ops == MAX_BITS_PER_WORD)
2250 /* Copy the algorithm from temporary space to the space at alg_out.
2251 We avoid using structure assignment because the majority of
2252 best_alg is normally undefined, and this is a critical function. */
2253 alg_out->ops = best_alg->ops + 1;
2254 alg_out->cost = cost_limit;
2255 bcopy ((char *) best_alg->op, (char *) alg_out->op,
2256 alg_out->ops * sizeof *alg_out->op);
2257 bcopy ((char *) best_alg->log, (char *) alg_out->log,
2258 alg_out->ops * sizeof *alg_out->log);
2261 /* Perform a multiplication and return an rtx for the result.
2262 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
2263 TARGET is a suggestion for where to store the result (an rtx).
2265 We check specially for a constant integer as OP1.
2266 If you want this check for OP0 as well, then before calling
2267 you should swap the two operands if OP0 would be constant. */
2270 expand_mult (mode, op0, op1, target, unsignedp)
2271 enum machine_mode mode;
2272 register rtx op0, op1, target;
2275 rtx const_op1 = op1;
2277 /* synth_mult does an `unsigned int' multiply. As long as the mode is
2278 less than or equal in size to `unsigned int' this doesn't matter.
2279 If the mode is larger than `unsigned int', then synth_mult works only
2280 if the constant value exactly fits in an `unsigned int' without any
2281 truncation. This means that multiplying by negative values does
2282 not work; results are off by 2^32 on a 32 bit machine. */
2284 /* If we are multiplying in DImode, it may still be a win
2285 to try to work with shifts and adds. */
2286 if (GET_CODE (op1) == CONST_DOUBLE
2287 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_INT
2288 && HOST_BITS_PER_INT >= BITS_PER_WORD
2289 && CONST_DOUBLE_HIGH (op1) == 0)
2290 const_op1 = GEN_INT (CONST_DOUBLE_LOW (op1));
2291 else if (HOST_BITS_PER_INT < GET_MODE_BITSIZE (mode)
2292 && GET_CODE (op1) == CONST_INT
2293 && INTVAL (op1) < 0)
2296 /* We used to test optimize here, on the grounds that it's better to
2297 produce a smaller program when -O is not used.
2298 But this causes such a terrible slowdown sometimes
2299 that it seems better to use synth_mult always. */
2301 if (const_op1 && GET_CODE (const_op1) == CONST_INT)
2303 struct algorithm alg;
2304 struct algorithm alg2;
2305 HOST_WIDE_INT val = INTVAL (op1);
2306 HOST_WIDE_INT val_so_far;
2309 enum {basic_variant, negate_variant, add_variant} variant = basic_variant;
2311 /* Try to do the computation three ways: multiply by the negative of OP1
2312 and then negate, do the multiplication directly, or do multiplication
2315 mult_cost = rtx_cost (gen_rtx_MULT (mode, op0, op1), SET);
2316 mult_cost = MIN (12 * add_cost, mult_cost);
2318 synth_mult (&alg, val, mult_cost);
2320 /* This works only if the inverted value actually fits in an
2322 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2324 synth_mult (&alg2, - val,
2325 (alg.cost < mult_cost ? alg.cost : mult_cost) - negate_cost);
2326 if (alg2.cost + negate_cost < alg.cost)
2327 alg = alg2, variant = negate_variant;
2330 /* This proves very useful for division-by-constant. */
2331 synth_mult (&alg2, val - 1,
2332 (alg.cost < mult_cost ? alg.cost : mult_cost) - add_cost);
2333 if (alg2.cost + add_cost < alg.cost)
2334 alg = alg2, variant = add_variant;
2336 if (alg.cost < mult_cost)
2338 /* We found something cheaper than a multiply insn. */
2342 op0 = protect_from_queue (op0, 0);
2344 /* Avoid referencing memory over and over.
2345 For speed, but also for correctness when mem is volatile. */
2346 if (GET_CODE (op0) == MEM)
2347 op0 = force_reg (mode, op0);
2349 /* ACCUM starts out either as OP0 or as a zero, depending on
2350 the first operation. */
2352 if (alg.op[0] == alg_zero)
2354 accum = copy_to_mode_reg (mode, const0_rtx);
2357 else if (alg.op[0] == alg_m)
2359 accum = copy_to_mode_reg (mode, op0);
2365 for (opno = 1; opno < alg.ops; opno++)
2367 int log = alg.log[opno];
2368 int preserve = preserve_subexpressions_p ();
2369 rtx shift_subtarget = preserve ? 0 : accum;
2371 = (opno == alg.ops - 1 && target != 0 && variant != add_variant
2374 rtx accum_target = preserve ? 0 : accum;
2376 switch (alg.op[opno])
2379 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2380 build_int_2 (log, 0), NULL_RTX, 0);
2385 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2386 build_int_2 (log, 0), NULL_RTX, 0);
2387 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2388 add_target ? add_target : accum_target);
2389 val_so_far += (HOST_WIDE_INT) 1 << log;
2393 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2394 build_int_2 (log, 0), NULL_RTX, 0);
2395 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2396 add_target ? add_target : accum_target);
2397 val_so_far -= (HOST_WIDE_INT) 1 << log;
2401 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2402 build_int_2 (log, 0), shift_subtarget,
2404 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2405 add_target ? add_target : accum_target);
2406 val_so_far = (val_so_far << log) + 1;
2410 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2411 build_int_2 (log, 0), shift_subtarget,
2413 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2414 add_target ? add_target : accum_target);
2415 val_so_far = (val_so_far << log) - 1;
2418 case alg_add_factor:
2419 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2420 build_int_2 (log, 0), NULL_RTX, 0);
2421 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2422 add_target ? add_target : accum_target);
2423 val_so_far += val_so_far << log;
2426 case alg_sub_factor:
2427 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2428 build_int_2 (log, 0), NULL_RTX, 0);
2429 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
2430 (add_target ? add_target
2431 : preserve ? 0 : tem));
2432 val_so_far = (val_so_far << log) - val_so_far;
2439 /* Write a REG_EQUAL note on the last insn so that we can cse
2440 multiplication sequences. */
2442 insn = get_last_insn ();
2444 = gen_rtx_EXPR_LIST (REG_EQUAL,
2445 gen_rtx_MULT (mode, op0, GEN_INT (val_so_far)),
2449 if (variant == negate_variant)
2451 val_so_far = - val_so_far;
2452 accum = expand_unop (mode, neg_optab, accum, target, 0);
2454 else if (variant == add_variant)
2456 val_so_far = val_so_far + 1;
2457 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
2460 if (val != val_so_far)
2467 /* This used to use umul_optab if unsigned, but for non-widening multiply
2468 there is no difference between signed and unsigned. */
2469 op0 = expand_binop (mode, smul_optab,
2470 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
2476 /* Return the smallest n such that 2**n >= X. */
2480 unsigned HOST_WIDE_INT x;
2482 return floor_log2 (x - 1) + 1;
2485 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
2486 replace division by D, and put the least significant N bits of the result
2487 in *MULTIPLIER_PTR and return the most significant bit.
2489 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
2490 needed precision is in PRECISION (should be <= N).
2492 PRECISION should be as small as possible so this function can choose
2493 multiplier more freely.
2495 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
2496 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
2498 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
2499 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
2502 unsigned HOST_WIDE_INT
2503 choose_multiplier (d, n, precision, multiplier_ptr, post_shift_ptr, lgup_ptr)
2504 unsigned HOST_WIDE_INT d;
2507 unsigned HOST_WIDE_INT *multiplier_ptr;
2508 int *post_shift_ptr;
2511 unsigned HOST_WIDE_INT mhigh_hi, mhigh_lo;
2512 unsigned HOST_WIDE_INT mlow_hi, mlow_lo;
2513 int lgup, post_shift;
2515 unsigned HOST_WIDE_INT nh, nl, dummy1, dummy2;
2517 /* lgup = ceil(log2(divisor)); */
2518 lgup = ceil_log2 (d);
2524 pow2 = n + lgup - precision;
2526 if (pow == 2 * HOST_BITS_PER_WIDE_INT)
2528 /* We could handle this with some effort, but this case is much better
2529 handled directly with a scc insn, so rely on caller using that. */
2533 /* mlow = 2^(N + lgup)/d */
2534 if (pow >= HOST_BITS_PER_WIDE_INT)
2536 nh = (unsigned HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
2542 nl = (unsigned HOST_WIDE_INT) 1 << pow;
2544 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2545 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
2547 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
2548 if (pow2 >= HOST_BITS_PER_WIDE_INT)
2549 nh |= (unsigned HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
2551 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
2552 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2553 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
2555 if (mhigh_hi && nh - d >= d)
2557 if (mhigh_hi > 1 || mlow_hi > 1)
2559 /* assert that mlow < mhigh. */
2560 if (! (mlow_hi < mhigh_hi || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo)))
2563 /* If precision == N, then mlow, mhigh exceed 2^N
2564 (but they do not exceed 2^(N+1)). */
2566 /* Reduce to lowest terms */
2567 for (post_shift = lgup; post_shift > 0; post_shift--)
2569 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
2570 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
2580 *post_shift_ptr = post_shift;
2582 if (n < HOST_BITS_PER_WIDE_INT)
2584 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
2585 *multiplier_ptr = mhigh_lo & mask;
2586 return mhigh_lo >= mask;
2590 *multiplier_ptr = mhigh_lo;
2595 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
2596 congruent to 1 (mod 2**N). */
2598 static unsigned HOST_WIDE_INT
2600 unsigned HOST_WIDE_INT x;
2603 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
2605 /* The algorithm notes that the choice y = x satisfies
2606 x*y == 1 mod 2^3, since x is assumed odd.
2607 Each iteration doubles the number of bits of significance in y. */
2609 unsigned HOST_WIDE_INT mask;
2610 unsigned HOST_WIDE_INT y = x;
2613 mask = (n == HOST_BITS_PER_WIDE_INT
2614 ? ~(unsigned HOST_WIDE_INT) 0
2615 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
2619 y = y * (2 - x*y) & mask; /* Modulo 2^N */
2625 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
2626 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
2627 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
2628 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
2631 The result is put in TARGET if that is convenient.
2633 MODE is the mode of operation. */
2636 expand_mult_highpart_adjust (mode, adj_operand, op0, op1, target, unsignedp)
2637 enum machine_mode mode;
2638 register rtx adj_operand, op0, op1, target;
2642 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
2644 tem = expand_shift (RSHIFT_EXPR, mode, op0,
2645 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2647 tem = expand_and (tem, op1, NULL_RTX);
2649 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2652 tem = expand_shift (RSHIFT_EXPR, mode, op1,
2653 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2655 tem = expand_and (tem, op0, NULL_RTX);
2656 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2662 /* Emit code to multiply OP0 and CNST1, putting the high half of the result
2663 in TARGET if that is convenient, and return where the result is. If the
2664 operation can not be performed, 0 is returned.
2666 MODE is the mode of operation and result.
2668 UNSIGNEDP nonzero means unsigned multiply.
2670 MAX_COST is the total allowed cost for the expanded RTL. */
2673 expand_mult_highpart (mode, op0, cnst1, target, unsignedp, max_cost)
2674 enum machine_mode mode;
2675 register rtx op0, target;
2676 unsigned HOST_WIDE_INT cnst1;
2680 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
2681 optab mul_highpart_optab;
2684 int size = GET_MODE_BITSIZE (mode);
2687 /* We can't support modes wider than HOST_BITS_PER_INT. */
2688 if (size > HOST_BITS_PER_WIDE_INT)
2691 op1 = GEN_INT (cnst1);
2693 if (GET_MODE_BITSIZE (wider_mode) <= HOST_BITS_PER_INT)
2697 = immed_double_const (cnst1,
2700 : -(cnst1 >> (HOST_BITS_PER_WIDE_INT - 1))),
2703 /* expand_mult handles constant multiplication of word_mode
2704 or narrower. It does a poor job for large modes. */
2705 if (size < BITS_PER_WORD
2706 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2708 /* We have to do this, since expand_binop doesn't do conversion for
2709 multiply. Maybe change expand_binop to handle widening multiply? */
2710 op0 = convert_to_mode (wider_mode, op0, unsignedp);
2712 tem = expand_mult (wider_mode, op0, wide_op1, NULL_RTX, unsignedp);
2713 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2714 build_int_2 (size, 0), NULL_RTX, 1);
2715 return convert_modes (mode, wider_mode, tem, unsignedp);
2719 target = gen_reg_rtx (mode);
2721 /* Firstly, try using a multiplication insn that only generates the needed
2722 high part of the product, and in the sign flavor of unsignedp. */
2723 if (mul_highpart_cost[(int) mode] < max_cost)
2725 mul_highpart_optab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
2726 target = expand_binop (mode, mul_highpart_optab,
2727 op0, wide_op1, target, unsignedp, OPTAB_DIRECT);
2732 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
2733 Need to adjust the result after the multiplication. */
2734 if (mul_highpart_cost[(int) mode] + 2 * shift_cost[size-1] + 4 * add_cost < max_cost)
2736 mul_highpart_optab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
2737 target = expand_binop (mode, mul_highpart_optab,
2738 op0, wide_op1, target, unsignedp, OPTAB_DIRECT);
2740 /* We used the wrong signedness. Adjust the result. */
2741 return expand_mult_highpart_adjust (mode, target, op0,
2742 op1, target, unsignedp);
2745 /* Try widening multiplication. */
2746 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
2747 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2748 && mul_widen_cost[(int) wider_mode] < max_cost)
2750 op1 = force_reg (mode, op1);
2754 /* Try widening the mode and perform a non-widening multiplication. */
2755 moptab = smul_optab;
2756 if (smul_optab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2757 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2763 /* Try widening multiplication of opposite signedness, and adjust. */
2764 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
2765 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2766 && (mul_widen_cost[(int) wider_mode]
2767 + 2 * shift_cost[size-1] + 4 * add_cost < max_cost))
2769 rtx regop1 = force_reg (mode, op1);
2770 tem = expand_binop (wider_mode, moptab, op0, regop1,
2771 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
2774 /* Extract the high half of the just generated product. */
2775 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2776 build_int_2 (size, 0), NULL_RTX, 1);
2777 tem = convert_modes (mode, wider_mode, tem, unsignedp);
2778 /* We used the wrong signedness. Adjust the result. */
2779 return expand_mult_highpart_adjust (mode, tem, op0, op1,
2787 /* Pass NULL_RTX as target since TARGET has wrong mode. */
2788 tem = expand_binop (wider_mode, moptab, op0, op1,
2789 NULL_RTX, unsignedp, OPTAB_WIDEN);
2793 /* Extract the high half of the just generated product. */
2794 if (mode == word_mode)
2796 return gen_highpart (mode, tem);
2800 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2801 build_int_2 (size, 0), NULL_RTX, 1);
2802 return convert_modes (mode, wider_mode, tem, unsignedp);
2806 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
2807 if that is convenient, and returning where the result is.
2808 You may request either the quotient or the remainder as the result;
2809 specify REM_FLAG nonzero to get the remainder.
2811 CODE is the expression code for which kind of division this is;
2812 it controls how rounding is done. MODE is the machine mode to use.
2813 UNSIGNEDP nonzero means do unsigned division. */
2815 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
2816 and then correct it by or'ing in missing high bits
2817 if result of ANDI is nonzero.
2818 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
2819 This could optimize to a bfexts instruction.
2820 But C doesn't use these operations, so their optimizations are
2823 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
2826 expand_divmod (rem_flag, code, mode, op0, op1, target, unsignedp)
2828 enum tree_code code;
2829 enum machine_mode mode;
2830 register rtx op0, op1, target;
2833 enum machine_mode compute_mode;
2834 register rtx tquotient;
2835 rtx quotient = 0, remainder = 0;
2839 optab optab1, optab2;
2840 int op1_is_constant, op1_is_pow2;
2841 int max_cost, extra_cost;
2842 static HOST_WIDE_INT last_div_const = 0;
2844 op1_is_constant = GET_CODE (op1) == CONST_INT;
2845 op1_is_pow2 = (op1_is_constant
2846 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
2847 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1))))));
2850 This is the structure of expand_divmod:
2852 First comes code to fix up the operands so we can perform the operations
2853 correctly and efficiently.
2855 Second comes a switch statement with code specific for each rounding mode.
2856 For some special operands this code emits all RTL for the desired
2857 operation, for other cases, it generates only a quotient and stores it in
2858 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
2859 to indicate that it has not done anything.
2861 Last comes code that finishes the operation. If QUOTIENT is set and
2862 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
2863 QUOTIENT is not set, it is computed using trunc rounding.
2865 We try to generate special code for division and remainder when OP1 is a
2866 constant. If |OP1| = 2**n we can use shifts and some other fast
2867 operations. For other values of OP1, we compute a carefully selected
2868 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
2871 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
2872 half of the product. Different strategies for generating the product are
2873 implemented in expand_mult_highpart.
2875 If what we actually want is the remainder, we generate that by another
2876 by-constant multiplication and a subtraction. */
2878 /* We shouldn't be called with OP1 == const1_rtx, but some of the
2879 code below will malfunction if we are, so check here and handle
2880 the special case if so. */
2881 if (op1 == const1_rtx)
2882 return rem_flag ? const0_rtx : op0;
2885 /* Don't use the function value register as a target
2886 since we have to read it as well as write it,
2887 and function-inlining gets confused by this. */
2888 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
2889 /* Don't clobber an operand while doing a multi-step calculation. */
2890 || ((rem_flag || op1_is_constant)
2891 && (reg_mentioned_p (target, op0)
2892 || (GET_CODE (op0) == MEM && GET_CODE (target) == MEM)))
2893 || reg_mentioned_p (target, op1)
2894 || (GET_CODE (op1) == MEM && GET_CODE (target) == MEM)))
2897 /* Get the mode in which to perform this computation. Normally it will
2898 be MODE, but sometimes we can't do the desired operation in MODE.
2899 If so, pick a wider mode in which we can do the operation. Convert
2900 to that mode at the start to avoid repeated conversions.
2902 First see what operations we need. These depend on the expression
2903 we are evaluating. (We assume that divxx3 insns exist under the
2904 same conditions that modxx3 insns and that these insns don't normally
2905 fail. If these assumptions are not correct, we may generate less
2906 efficient code in some cases.)
2908 Then see if we find a mode in which we can open-code that operation
2909 (either a division, modulus, or shift). Finally, check for the smallest
2910 mode for which we can do the operation with a library call. */
2912 /* We might want to refine this now that we have division-by-constant
2913 optimization. Since expand_mult_highpart tries so many variants, it is
2914 not straightforward to generalize this. Maybe we should make an array
2915 of possible modes in init_expmed? Save this for GCC 2.7. */
2917 optab1 = (op1_is_pow2 ? (unsignedp ? lshr_optab : ashr_optab)
2918 : (unsignedp ? udiv_optab : sdiv_optab));
2919 optab2 = (op1_is_pow2 ? optab1 : (unsignedp ? udivmod_optab : sdivmod_optab));
2921 for (compute_mode = mode; compute_mode != VOIDmode;
2922 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
2923 if (optab1->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing
2924 || optab2->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing)
2927 if (compute_mode == VOIDmode)
2928 for (compute_mode = mode; compute_mode != VOIDmode;
2929 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
2930 if (optab1->handlers[(int) compute_mode].libfunc
2931 || optab2->handlers[(int) compute_mode].libfunc)
2934 /* If we still couldn't find a mode, use MODE, but we'll probably abort
2936 if (compute_mode == VOIDmode)
2937 compute_mode = mode;
2939 if (target && GET_MODE (target) == compute_mode)
2942 tquotient = gen_reg_rtx (compute_mode);
2944 size = GET_MODE_BITSIZE (compute_mode);
2946 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
2947 (mode), and thereby get better code when OP1 is a constant. Do that
2948 later. It will require going over all usages of SIZE below. */
2949 size = GET_MODE_BITSIZE (mode);
2952 /* Only deduct something for a REM if the last divide done was
2953 for a different constant. Then set the constant of the last
2955 max_cost = div_cost[(int) compute_mode]
2956 - (rem_flag && ! (last_div_const != 0 && op1_is_constant
2957 && INTVAL (op1) == last_div_const)
2958 ? mul_cost[(int) compute_mode] + add_cost : 0);
2960 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
2962 /* Now convert to the best mode to use. */
2963 if (compute_mode != mode)
2965 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
2966 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
2968 /* convert_modes may have placed op1 into a register, so we
2969 must recompute the following. */
2970 op1_is_constant = GET_CODE (op1) == CONST_INT;
2971 op1_is_pow2 = (op1_is_constant
2972 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
2974 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
2977 /* If one of the operands is a volatile MEM, copy it into a register. */
2979 if (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0))
2980 op0 = force_reg (compute_mode, op0);
2981 if (GET_CODE (op1) == MEM && MEM_VOLATILE_P (op1))
2982 op1 = force_reg (compute_mode, op1);
2984 /* If we need the remainder or if OP1 is constant, we need to
2985 put OP0 in a register in case it has any queued subexpressions. */
2986 if (rem_flag || op1_is_constant)
2987 op0 = force_reg (compute_mode, op0);
2989 last = get_last_insn ();
2991 /* Promote floor rounding to trunc rounding for unsigned operations. */
2994 if (code == FLOOR_DIV_EXPR)
2995 code = TRUNC_DIV_EXPR;
2996 if (code == FLOOR_MOD_EXPR)
2997 code = TRUNC_MOD_EXPR;
2998 if (code == EXACT_DIV_EXPR && op1_is_pow2)
2999 code = TRUNC_DIV_EXPR;
3002 if (op1 != const0_rtx)
3005 case TRUNC_MOD_EXPR:
3006 case TRUNC_DIV_EXPR:
3007 if (op1_is_constant)
3011 unsigned HOST_WIDE_INT mh, ml;
3012 int pre_shift, post_shift;
3014 unsigned HOST_WIDE_INT d = INTVAL (op1);
3016 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3018 pre_shift = floor_log2 (d);
3022 = expand_binop (compute_mode, and_optab, op0,
3023 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3027 return gen_lowpart (mode, remainder);
3029 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3030 build_int_2 (pre_shift, 0),
3033 else if (size <= HOST_BITS_PER_WIDE_INT)
3035 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
3037 /* Most significant bit of divisor is set; emit an scc
3039 quotient = emit_store_flag (tquotient, GEU, op0, op1,
3040 compute_mode, 1, 1);
3046 /* Find a suitable multiplier and right shift count
3047 instead of multiplying with D. */
3049 mh = choose_multiplier (d, size, size,
3050 &ml, &post_shift, &dummy);
3052 /* If the suggested multiplier is more than SIZE bits,
3053 we can do better for even divisors, using an
3054 initial right shift. */
3055 if (mh != 0 && (d & 1) == 0)
3057 pre_shift = floor_log2 (d & -d);
3058 mh = choose_multiplier (d >> pre_shift, size,
3060 &ml, &post_shift, &dummy);
3071 extra_cost = (shift_cost[post_shift - 1]
3072 + shift_cost[1] + 2 * add_cost);
3073 t1 = expand_mult_highpart (compute_mode, op0, ml,
3075 max_cost - extra_cost);
3078 t2 = force_operand (gen_rtx_MINUS (compute_mode,
3081 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3082 build_int_2 (1, 0), NULL_RTX,1);
3083 t4 = force_operand (gen_rtx_PLUS (compute_mode,
3087 = expand_shift (RSHIFT_EXPR, compute_mode, t4,
3088 build_int_2 (post_shift - 1, 0),
3095 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3096 build_int_2 (pre_shift, 0),
3098 extra_cost = (shift_cost[pre_shift]
3099 + shift_cost[post_shift]);
3100 t2 = expand_mult_highpart (compute_mode, t1, ml,
3102 max_cost - extra_cost);
3106 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3107 build_int_2 (post_shift, 0),
3112 else /* Too wide mode to use tricky code */
3115 insn = get_last_insn ();
3117 && (set = single_set (insn)) != 0
3118 && SET_DEST (set) == quotient)
3120 = gen_rtx_EXPR_LIST (REG_EQUAL,
3121 gen_rtx_UDIV (compute_mode, op0, op1),
3124 else /* TRUNC_DIV, signed */
3126 unsigned HOST_WIDE_INT ml;
3127 int lgup, post_shift;
3128 HOST_WIDE_INT d = INTVAL (op1);
3129 unsigned HOST_WIDE_INT abs_d = d >= 0 ? d : -d;
3131 /* n rem d = n rem -d */
3132 if (rem_flag && d < 0)
3135 op1 = GEN_INT (abs_d);
3141 quotient = expand_unop (compute_mode, neg_optab, op0,
3143 else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
3145 /* This case is not handled correctly below. */
3146 quotient = emit_store_flag (tquotient, EQ, op0, op1,
3147 compute_mode, 1, 1);
3151 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
3152 && (rem_flag ? smod_pow2_cheap : sdiv_pow2_cheap))
3154 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
3156 lgup = floor_log2 (abs_d);
3157 if (abs_d != 2 && BRANCH_COST < 3)
3159 rtx label = gen_label_rtx ();
3162 t1 = copy_to_mode_reg (compute_mode, op0);
3163 do_cmp_and_jump (t1, const0_rtx, GE,
3164 compute_mode, label);
3165 expand_inc (t1, GEN_INT (abs_d - 1));
3167 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3168 build_int_2 (lgup, 0),
3174 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3175 build_int_2 (size - 1, 0),
3177 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3178 build_int_2 (size - lgup, 0),
3180 t3 = force_operand (gen_rtx_PLUS (compute_mode,
3183 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3184 build_int_2 (lgup, 0),
3188 /* We have computed OP0 / abs(OP1). If OP1 is negative, negate
3192 insn = get_last_insn ();
3194 && (set = single_set (insn)) != 0
3195 && SET_DEST (set) == quotient)
3197 = gen_rtx_EXPR_LIST (REG_EQUAL,
3198 gen_rtx_DIV (compute_mode,
3203 quotient = expand_unop (compute_mode, neg_optab,
3204 quotient, quotient, 0);
3207 else if (size <= HOST_BITS_PER_WIDE_INT)
3209 choose_multiplier (abs_d, size, size - 1,
3210 &ml, &post_shift, &lgup);
3211 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
3215 extra_cost = (shift_cost[post_shift]
3216 + shift_cost[size - 1] + add_cost);
3217 t1 = expand_mult_highpart (compute_mode, op0, ml,
3219 max_cost - extra_cost);
3222 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3223 build_int_2 (post_shift, 0), NULL_RTX, 0);
3224 t3 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3225 build_int_2 (size - 1, 0), NULL_RTX, 0);
3227 quotient = force_operand (gen_rtx_MINUS (compute_mode, t3, t2),
3230 quotient = force_operand (gen_rtx_MINUS (compute_mode, t2, t3),
3237 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
3238 extra_cost = (shift_cost[post_shift]
3239 + shift_cost[size - 1] + 2 * add_cost);
3240 t1 = expand_mult_highpart (compute_mode, op0, ml,
3242 max_cost - extra_cost);
3245 t2 = force_operand (gen_rtx_PLUS (compute_mode, t1, op0),
3247 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3248 build_int_2 (post_shift, 0), NULL_RTX, 0);
3249 t4 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3250 build_int_2 (size - 1, 0), NULL_RTX, 0);
3252 quotient = force_operand (gen_rtx_MINUS (compute_mode, t4, t3),
3255 quotient = force_operand (gen_rtx_MINUS (compute_mode, t3, t4),
3259 else /* Too wide mode to use tricky code */
3262 insn = get_last_insn ();
3264 && (set = single_set (insn)) != 0
3265 && SET_DEST (set) == quotient)
3267 = gen_rtx_EXPR_LIST (REG_EQUAL,
3268 gen_rtx_DIV (compute_mode, op0, op1),
3274 delete_insns_since (last);
3277 case FLOOR_DIV_EXPR:
3278 case FLOOR_MOD_EXPR:
3279 /* We will come here only for signed operations. */
3280 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3282 unsigned HOST_WIDE_INT mh, ml;
3283 int pre_shift, lgup, post_shift;
3284 HOST_WIDE_INT d = INTVAL (op1);
3288 /* We could just as easily deal with negative constants here,
3289 but it does not seem worth the trouble for GCC 2.6. */
3290 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3292 pre_shift = floor_log2 (d);
3295 remainder = expand_binop (compute_mode, and_optab, op0,
3296 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3297 remainder, 0, OPTAB_LIB_WIDEN);
3299 return gen_lowpart (mode, remainder);
3301 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3302 build_int_2 (pre_shift, 0),
3309 mh = choose_multiplier (d, size, size - 1,
3310 &ml, &post_shift, &lgup);
3314 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3315 build_int_2 (size - 1, 0), NULL_RTX, 0);
3316 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
3317 NULL_RTX, 0, OPTAB_WIDEN);
3318 extra_cost = (shift_cost[post_shift]
3319 + shift_cost[size - 1] + 2 * add_cost);
3320 t3 = expand_mult_highpart (compute_mode, t2, ml,
3322 max_cost - extra_cost);
3325 t4 = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3326 build_int_2 (post_shift, 0),
3328 quotient = expand_binop (compute_mode, xor_optab,
3329 t4, t1, tquotient, 0,
3336 rtx nsign, t1, t2, t3, t4;
3337 t1 = force_operand (gen_rtx_PLUS (compute_mode,
3338 op0, constm1_rtx), NULL_RTX);
3339 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
3341 nsign = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3342 build_int_2 (size - 1, 0), NULL_RTX, 0);
3343 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
3345 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
3350 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
3352 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3361 delete_insns_since (last);
3363 /* Try using an instruction that produces both the quotient and
3364 remainder, using truncation. We can easily compensate the quotient
3365 or remainder to get floor rounding, once we have the remainder.
3366 Notice that we compute also the final remainder value here,
3367 and return the result right away. */
3368 if (target == 0 || GET_MODE (target) != compute_mode)
3369 target = gen_reg_rtx (compute_mode);
3374 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3375 quotient = gen_reg_rtx (compute_mode);
3380 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3381 remainder = gen_reg_rtx (compute_mode);
3384 if (expand_twoval_binop (sdivmod_optab, op0, op1,
3385 quotient, remainder, 0))
3387 /* This could be computed with a branch-less sequence.
3388 Save that for later. */
3390 rtx label = gen_label_rtx ();
3391 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
3392 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3393 NULL_RTX, 0, OPTAB_WIDEN);
3394 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
3395 expand_dec (quotient, const1_rtx);
3396 expand_inc (remainder, op1);
3398 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3401 /* No luck with division elimination or divmod. Have to do it
3402 by conditionally adjusting op0 *and* the result. */
3404 rtx label1, label2, label3, label4, label5;
3408 quotient = gen_reg_rtx (compute_mode);
3409 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3410 label1 = gen_label_rtx ();
3411 label2 = gen_label_rtx ();
3412 label3 = gen_label_rtx ();
3413 label4 = gen_label_rtx ();
3414 label5 = gen_label_rtx ();
3415 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3416 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
3417 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3418 quotient, 0, OPTAB_LIB_WIDEN);
3419 if (tem != quotient)
3420 emit_move_insn (quotient, tem);
3421 emit_jump_insn (gen_jump (label5));
3423 emit_label (label1);
3424 expand_inc (adjusted_op0, const1_rtx);
3425 emit_jump_insn (gen_jump (label4));
3427 emit_label (label2);
3428 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
3429 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3430 quotient, 0, OPTAB_LIB_WIDEN);
3431 if (tem != quotient)
3432 emit_move_insn (quotient, tem);
3433 emit_jump_insn (gen_jump (label5));
3435 emit_label (label3);
3436 expand_dec (adjusted_op0, const1_rtx);
3437 emit_label (label4);
3438 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3439 quotient, 0, OPTAB_LIB_WIDEN);
3440 if (tem != quotient)
3441 emit_move_insn (quotient, tem);
3442 expand_dec (quotient, const1_rtx);
3443 emit_label (label5);
3451 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
3454 unsigned HOST_WIDE_INT d = INTVAL (op1);
3455 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3456 build_int_2 (floor_log2 (d), 0),
3458 t2 = expand_binop (compute_mode, and_optab, op0,
3460 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3461 t3 = gen_reg_rtx (compute_mode);
3462 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3463 compute_mode, 1, 1);
3467 lab = gen_label_rtx ();
3468 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3469 expand_inc (t1, const1_rtx);
3474 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3480 /* Try using an instruction that produces both the quotient and
3481 remainder, using truncation. We can easily compensate the
3482 quotient or remainder to get ceiling rounding, once we have the
3483 remainder. Notice that we compute also the final remainder
3484 value here, and return the result right away. */
3485 if (target == 0 || GET_MODE (target) != compute_mode)
3486 target = gen_reg_rtx (compute_mode);
3490 remainder = (GET_CODE (target) == REG
3491 ? target : gen_reg_rtx (compute_mode));
3492 quotient = gen_reg_rtx (compute_mode);
3496 quotient = (GET_CODE (target) == REG
3497 ? target : gen_reg_rtx (compute_mode));
3498 remainder = gen_reg_rtx (compute_mode);
3501 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
3504 /* This could be computed with a branch-less sequence.
3505 Save that for later. */
3506 rtx label = gen_label_rtx ();
3507 do_cmp_and_jump (remainder, const0_rtx, EQ,
3508 compute_mode, label);
3509 expand_inc (quotient, const1_rtx);
3510 expand_dec (remainder, op1);
3512 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3515 /* No luck with division elimination or divmod. Have to do it
3516 by conditionally adjusting op0 *and* the result. */
3519 rtx adjusted_op0, tem;
3521 quotient = gen_reg_rtx (compute_mode);
3522 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3523 label1 = gen_label_rtx ();
3524 label2 = gen_label_rtx ();
3525 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
3526 compute_mode, label1);
3527 emit_move_insn (quotient, const0_rtx);
3528 emit_jump_insn (gen_jump (label2));
3530 emit_label (label1);
3531 expand_dec (adjusted_op0, const1_rtx);
3532 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
3533 quotient, 1, OPTAB_LIB_WIDEN);
3534 if (tem != quotient)
3535 emit_move_insn (quotient, tem);
3536 expand_inc (quotient, const1_rtx);
3537 emit_label (label2);
3542 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3543 && INTVAL (op1) >= 0)
3545 /* This is extremely similar to the code for the unsigned case
3546 above. For 2.7 we should merge these variants, but for
3547 2.6.1 I don't want to touch the code for unsigned since that
3548 get used in C. The signed case will only be used by other
3552 unsigned HOST_WIDE_INT d = INTVAL (op1);
3553 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3554 build_int_2 (floor_log2 (d), 0),
3556 t2 = expand_binop (compute_mode, and_optab, op0,
3558 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3559 t3 = gen_reg_rtx (compute_mode);
3560 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3561 compute_mode, 1, 1);
3565 lab = gen_label_rtx ();
3566 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3567 expand_inc (t1, const1_rtx);
3572 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3578 /* Try using an instruction that produces both the quotient and
3579 remainder, using truncation. We can easily compensate the
3580 quotient or remainder to get ceiling rounding, once we have the
3581 remainder. Notice that we compute also the final remainder
3582 value here, and return the result right away. */
3583 if (target == 0 || GET_MODE (target) != compute_mode)
3584 target = gen_reg_rtx (compute_mode);
3587 remainder= (GET_CODE (target) == REG
3588 ? target : gen_reg_rtx (compute_mode));
3589 quotient = gen_reg_rtx (compute_mode);
3593 quotient = (GET_CODE (target) == REG
3594 ? target : gen_reg_rtx (compute_mode));
3595 remainder = gen_reg_rtx (compute_mode);
3598 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
3601 /* This could be computed with a branch-less sequence.
3602 Save that for later. */
3604 rtx label = gen_label_rtx ();
3605 do_cmp_and_jump (remainder, const0_rtx, EQ,
3606 compute_mode, label);
3607 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3608 NULL_RTX, 0, OPTAB_WIDEN);
3609 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
3610 expand_inc (quotient, const1_rtx);
3611 expand_dec (remainder, op1);
3613 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3616 /* No luck with division elimination or divmod. Have to do it
3617 by conditionally adjusting op0 *and* the result. */
3619 rtx label1, label2, label3, label4, label5;
3623 quotient = gen_reg_rtx (compute_mode);
3624 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3625 label1 = gen_label_rtx ();
3626 label2 = gen_label_rtx ();
3627 label3 = gen_label_rtx ();
3628 label4 = gen_label_rtx ();
3629 label5 = gen_label_rtx ();
3630 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3631 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
3632 compute_mode, label1);
3633 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3634 quotient, 0, OPTAB_LIB_WIDEN);
3635 if (tem != quotient)
3636 emit_move_insn (quotient, tem);
3637 emit_jump_insn (gen_jump (label5));
3639 emit_label (label1);
3640 expand_dec (adjusted_op0, const1_rtx);
3641 emit_jump_insn (gen_jump (label4));
3643 emit_label (label2);
3644 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
3645 compute_mode, label3);
3646 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3647 quotient, 0, OPTAB_LIB_WIDEN);
3648 if (tem != quotient)
3649 emit_move_insn (quotient, tem);
3650 emit_jump_insn (gen_jump (label5));
3652 emit_label (label3);
3653 expand_inc (adjusted_op0, const1_rtx);
3654 emit_label (label4);
3655 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3656 quotient, 0, OPTAB_LIB_WIDEN);
3657 if (tem != quotient)
3658 emit_move_insn (quotient, tem);
3659 expand_inc (quotient, const1_rtx);
3660 emit_label (label5);
3665 case EXACT_DIV_EXPR:
3666 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3668 HOST_WIDE_INT d = INTVAL (op1);
3669 unsigned HOST_WIDE_INT ml;
3673 post_shift = floor_log2 (d & -d);
3674 ml = invert_mod2n (d >> post_shift, size);
3675 t1 = expand_mult (compute_mode, op0, GEN_INT (ml), NULL_RTX,
3677 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3678 build_int_2 (post_shift, 0),
3679 NULL_RTX, unsignedp);
3681 insn = get_last_insn ();
3683 = gen_rtx_EXPR_LIST (REG_EQUAL,
3684 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
3691 case ROUND_DIV_EXPR:
3692 case ROUND_MOD_EXPR:
3697 label = gen_label_rtx ();
3698 quotient = gen_reg_rtx (compute_mode);
3699 remainder = gen_reg_rtx (compute_mode);
3700 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
3703 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
3704 quotient, 1, OPTAB_LIB_WIDEN);
3705 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
3706 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3707 remainder, 1, OPTAB_LIB_WIDEN);
3709 tem = plus_constant (op1, -1);
3710 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3711 build_int_2 (1, 0), NULL_RTX, 1);
3712 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
3713 expand_inc (quotient, const1_rtx);
3714 expand_dec (remainder, op1);
3719 rtx abs_rem, abs_op1, tem, mask;
3721 label = gen_label_rtx ();
3722 quotient = gen_reg_rtx (compute_mode);
3723 remainder = gen_reg_rtx (compute_mode);
3724 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
3727 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
3728 quotient, 0, OPTAB_LIB_WIDEN);
3729 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
3730 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3731 remainder, 0, OPTAB_LIB_WIDEN);
3733 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 0, 0);
3734 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 0, 0);
3735 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
3736 build_int_2 (1, 0), NULL_RTX, 1);
3737 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
3738 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3739 NULL_RTX, 0, OPTAB_WIDEN);
3740 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3741 build_int_2 (size - 1, 0), NULL_RTX, 0);
3742 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
3743 NULL_RTX, 0, OPTAB_WIDEN);
3744 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3745 NULL_RTX, 0, OPTAB_WIDEN);
3746 expand_inc (quotient, tem);
3747 tem = expand_binop (compute_mode, xor_optab, mask, op1,
3748 NULL_RTX, 0, OPTAB_WIDEN);
3749 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3750 NULL_RTX, 0, OPTAB_WIDEN);
3751 expand_dec (remainder, tem);
3754 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3762 if (target && GET_MODE (target) != compute_mode)
3767 /* Try to produce the remainder without producing the quotient.
3768 If we seem to have a divmod patten that does not require widening,
3769 don't try windening here. We should really have an WIDEN argument
3770 to expand_twoval_binop, since what we'd really like to do here is
3771 1) try a mod insn in compute_mode
3772 2) try a divmod insn in compute_mode
3773 3) try a div insn in compute_mode and multiply-subtract to get
3775 4) try the same things with widening allowed. */
3777 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
3780 ((optab2->handlers[(int) compute_mode].insn_code
3781 != CODE_FOR_nothing)
3782 ? OPTAB_DIRECT : OPTAB_WIDEN));
3785 /* No luck there. Can we do remainder and divide at once
3786 without a library call? */
3787 remainder = gen_reg_rtx (compute_mode);
3788 if (! expand_twoval_binop ((unsignedp
3792 NULL_RTX, remainder, unsignedp))
3797 return gen_lowpart (mode, remainder);
3800 /* Produce the quotient. Try a quotient insn, but not a library call.
3801 If we have a divmod in this mode, use it in preference to widening
3802 the div (for this test we assume it will not fail). Note that optab2
3803 is set to the one of the two optabs that the call below will use. */
3805 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
3806 op0, op1, rem_flag ? NULL_RTX : target,
3808 ((optab2->handlers[(int) compute_mode].insn_code
3809 != CODE_FOR_nothing)
3810 ? OPTAB_DIRECT : OPTAB_WIDEN));
3814 /* No luck there. Try a quotient-and-remainder insn,
3815 keeping the quotient alone. */
3816 quotient = gen_reg_rtx (compute_mode);
3817 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
3819 quotient, NULL_RTX, unsignedp))
3823 /* Still no luck. If we are not computing the remainder,
3824 use a library call for the quotient. */
3825 quotient = sign_expand_binop (compute_mode,
3826 udiv_optab, sdiv_optab,
3828 unsignedp, OPTAB_LIB_WIDEN);
3835 if (target && GET_MODE (target) != compute_mode)
3839 /* No divide instruction either. Use library for remainder. */
3840 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
3842 unsignedp, OPTAB_LIB_WIDEN);
3845 /* We divided. Now finish doing X - Y * (X / Y). */
3846 remainder = expand_mult (compute_mode, quotient, op1,
3847 NULL_RTX, unsignedp);
3848 remainder = expand_binop (compute_mode, sub_optab, op0,
3849 remainder, target, unsignedp,
3854 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3857 /* Return a tree node with data type TYPE, describing the value of X.
3858 Usually this is an RTL_EXPR, if there is no obvious better choice.
3859 X may be an expression, however we only support those expressions
3860 generated by loop.c. */
3869 switch (GET_CODE (x))
3872 t = build_int_2 (INTVAL (x),
3873 (TREE_UNSIGNED (type)
3874 && (GET_MODE_BITSIZE (TYPE_MODE (type)) < HOST_BITS_PER_WIDE_INT))
3875 || INTVAL (x) >= 0 ? 0 : -1);
3876 TREE_TYPE (t) = type;
3880 if (GET_MODE (x) == VOIDmode)
3882 t = build_int_2 (CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
3883 TREE_TYPE (t) = type;
3889 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
3890 t = build_real (type, d);
3896 return fold (build (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
3897 make_tree (type, XEXP (x, 1))));
3900 return fold (build (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
3901 make_tree (type, XEXP (x, 1))));
3904 return fold (build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0))));
3907 return fold (build (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
3908 make_tree (type, XEXP (x, 1))));
3911 return fold (build (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
3912 make_tree (type, XEXP (x, 1))));
3915 return fold (convert (type,
3916 build (RSHIFT_EXPR, unsigned_type (type),
3917 make_tree (unsigned_type (type),
3919 make_tree (type, XEXP (x, 1)))));
3922 return fold (convert (type,
3923 build (RSHIFT_EXPR, signed_type (type),
3924 make_tree (signed_type (type), XEXP (x, 0)),
3925 make_tree (type, XEXP (x, 1)))));
3928 if (TREE_CODE (type) != REAL_TYPE)
3929 t = signed_type (type);
3933 return fold (convert (type,
3934 build (TRUNC_DIV_EXPR, t,
3935 make_tree (t, XEXP (x, 0)),
3936 make_tree (t, XEXP (x, 1)))));
3938 t = unsigned_type (type);
3939 return fold (convert (type,
3940 build (TRUNC_DIV_EXPR, t,
3941 make_tree (t, XEXP (x, 0)),
3942 make_tree (t, XEXP (x, 1)))));
3944 t = make_node (RTL_EXPR);
3945 TREE_TYPE (t) = type;
3946 RTL_EXPR_RTL (t) = x;
3947 /* There are no insns to be output
3948 when this rtl_expr is used. */
3949 RTL_EXPR_SEQUENCE (t) = 0;
3954 /* Return an rtx representing the value of X * MULT + ADD.
3955 TARGET is a suggestion for where to store the result (an rtx).
3956 MODE is the machine mode for the computation.
3957 X and MULT must have mode MODE. ADD may have a different mode.
3958 So can X (defaults to same as MODE).
3959 UNSIGNEDP is non-zero to do unsigned multiplication.
3960 This may emit insns. */
3963 expand_mult_add (x, target, mult, add, mode, unsignedp)
3964 rtx x, target, mult, add;
3965 enum machine_mode mode;
3968 tree type = type_for_mode (mode, unsignedp);
3969 tree add_type = (GET_MODE (add) == VOIDmode
3970 ? type : type_for_mode (GET_MODE (add), unsignedp));
3971 tree result = fold (build (PLUS_EXPR, type,
3972 fold (build (MULT_EXPR, type,
3973 make_tree (type, x),
3974 make_tree (type, mult))),
3975 make_tree (add_type, add)));
3977 return expand_expr (result, target, VOIDmode, 0);
3980 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
3981 and returning TARGET.
3983 If TARGET is 0, a pseudo-register or constant is returned. */
3986 expand_and (op0, op1, target)
3987 rtx op0, op1, target;
3989 enum machine_mode mode = VOIDmode;
3992 if (GET_MODE (op0) != VOIDmode)
3993 mode = GET_MODE (op0);
3994 else if (GET_MODE (op1) != VOIDmode)
3995 mode = GET_MODE (op1);
3997 if (mode != VOIDmode)
3998 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
3999 else if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT)
4000 tem = GEN_INT (INTVAL (op0) & INTVAL (op1));
4006 else if (tem != target)
4007 emit_move_insn (target, tem);
4011 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
4012 and storing in TARGET. Normally return TARGET.
4013 Return 0 if that cannot be done.
4015 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
4016 it is VOIDmode, they cannot both be CONST_INT.
4018 UNSIGNEDP is for the case where we have to widen the operands
4019 to perform the operation. It says to use zero-extension.
4021 NORMALIZEP is 1 if we should convert the result to be either zero
4022 or one. Normalize is -1 if we should convert the result to be
4023 either zero or -1. If NORMALIZEP is zero, the result will be left
4024 "raw" out of the scc insn. */
4027 emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep)
4031 enum machine_mode mode;
4036 enum insn_code icode;
4037 enum machine_mode compare_mode;
4038 enum machine_mode target_mode = GET_MODE (target);
4040 rtx last = get_last_insn ();
4041 rtx pattern, comparison;
4043 /* If one operand is constant, make it the second one. Only do this
4044 if the other operand is not constant as well. */
4046 if ((CONSTANT_P (op0) && ! CONSTANT_P (op1))
4047 || (GET_CODE (op0) == CONST_INT && GET_CODE (op1) != CONST_INT))
4052 code = swap_condition (code);
4055 if (mode == VOIDmode)
4056 mode = GET_MODE (op0);
4058 /* For some comparisons with 1 and -1, we can convert this to
4059 comparisons with zero. This will often produce more opportunities for
4060 store-flag insns. */
4065 if (op1 == const1_rtx)
4066 op1 = const0_rtx, code = LE;
4069 if (op1 == constm1_rtx)
4070 op1 = const0_rtx, code = LT;
4073 if (op1 == const1_rtx)
4074 op1 = const0_rtx, code = GT;
4077 if (op1 == constm1_rtx)
4078 op1 = const0_rtx, code = GE;
4081 if (op1 == const1_rtx)
4082 op1 = const0_rtx, code = NE;
4085 if (op1 == const1_rtx)
4086 op1 = const0_rtx, code = EQ;
4092 /* From now on, we won't change CODE, so set ICODE now. */
4093 icode = setcc_gen_code[(int) code];
4095 /* If this is A < 0 or A >= 0, we can do this by taking the ones
4096 complement of A (for GE) and shifting the sign bit to the low bit. */
4097 if (op1 == const0_rtx && (code == LT || code == GE)
4098 && GET_MODE_CLASS (mode) == MODE_INT
4099 && (normalizep || STORE_FLAG_VALUE == 1
4100 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4101 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4102 == (HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))))
4106 /* If the result is to be wider than OP0, it is best to convert it
4107 first. If it is to be narrower, it is *incorrect* to convert it
4109 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
4111 op0 = protect_from_queue (op0, 0);
4112 op0 = convert_modes (target_mode, mode, op0, 0);
4116 if (target_mode != mode)
4120 op0 = expand_unop (mode, one_cmpl_optab, op0,
4121 ((STORE_FLAG_VALUE == 1 || normalizep)
4122 ? 0 : subtarget), 0);
4124 if (STORE_FLAG_VALUE == 1 || normalizep)
4125 /* If we are supposed to produce a 0/1 value, we want to do
4126 a logical shift from the sign bit to the low-order bit; for
4127 a -1/0 value, we do an arithmetic shift. */
4128 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
4129 size_int (GET_MODE_BITSIZE (mode) - 1),
4130 subtarget, normalizep != -1);
4132 if (mode != target_mode)
4133 op0 = convert_modes (target_mode, mode, op0, 0);
4138 if (icode != CODE_FOR_nothing)
4140 /* We think we may be able to do this with a scc insn. Emit the
4141 comparison and then the scc insn.
4143 compare_from_rtx may call emit_queue, which would be deleted below
4144 if the scc insn fails. So call it ourselves before setting LAST. */
4147 last = get_last_insn ();
4150 = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX, 0);
4151 if (GET_CODE (comparison) == CONST_INT)
4152 return (comparison == const0_rtx ? const0_rtx
4153 : normalizep == 1 ? const1_rtx
4154 : normalizep == -1 ? constm1_rtx
4157 /* If the code of COMPARISON doesn't match CODE, something is
4158 wrong; we can no longer be sure that we have the operation.
4159 We could handle this case, but it should not happen. */
4161 if (GET_CODE (comparison) != code)
4164 /* Get a reference to the target in the proper mode for this insn. */
4165 compare_mode = insn_operand_mode[(int) icode][0];
4167 if (preserve_subexpressions_p ()
4168 || ! (*insn_operand_predicate[(int) icode][0]) (subtarget, compare_mode))
4169 subtarget = gen_reg_rtx (compare_mode);
4171 pattern = GEN_FCN (icode) (subtarget);
4174 emit_insn (pattern);
4176 /* If we are converting to a wider mode, first convert to
4177 TARGET_MODE, then normalize. This produces better combining
4178 opportunities on machines that have a SIGN_EXTRACT when we are
4179 testing a single bit. This mostly benefits the 68k.
4181 If STORE_FLAG_VALUE does not have the sign bit set when
4182 interpreted in COMPARE_MODE, we can do this conversion as
4183 unsigned, which is usually more efficient. */
4184 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
4186 convert_move (target, subtarget,
4187 (GET_MODE_BITSIZE (compare_mode)
4188 <= HOST_BITS_PER_WIDE_INT)
4189 && 0 == (STORE_FLAG_VALUE
4190 & ((HOST_WIDE_INT) 1
4191 << (GET_MODE_BITSIZE (compare_mode) -1))));
4193 compare_mode = target_mode;
4198 /* If we want to keep subexpressions around, don't reuse our
4201 if (preserve_subexpressions_p ())
4204 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
4205 we don't have to do anything. */
4206 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
4208 else if (normalizep == - STORE_FLAG_VALUE)
4209 op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
4211 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
4212 makes it hard to use a value of just the sign bit due to
4213 ANSI integer constant typing rules. */
4214 else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT
4215 && (STORE_FLAG_VALUE
4216 & ((HOST_WIDE_INT) 1
4217 << (GET_MODE_BITSIZE (compare_mode) - 1))))
4218 op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
4219 size_int (GET_MODE_BITSIZE (compare_mode) - 1),
4220 subtarget, normalizep == 1);
4221 else if (STORE_FLAG_VALUE & 1)
4223 op0 = expand_and (op0, const1_rtx, subtarget);
4224 if (normalizep == -1)
4225 op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
4230 /* If we were converting to a smaller mode, do the
4232 if (target_mode != compare_mode)
4234 convert_move (target, op0, 0);
4242 delete_insns_since (last);
4244 /* If expensive optimizations, use different pseudo registers for each
4245 insn, instead of reusing the same pseudo. This leads to better CSE,
4246 but slows down the compiler, since there are more pseudos */
4247 subtarget = (!flag_expensive_optimizations
4248 && (target_mode == mode)) ? target : NULL_RTX;
4250 /* If we reached here, we can't do this with a scc insn. However, there
4251 are some comparisons that can be done directly. For example, if
4252 this is an equality comparison of integers, we can try to exclusive-or
4253 (or subtract) the two operands and use a recursive call to try the
4254 comparison with zero. Don't do any of these cases if branches are
4258 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
4259 && op1 != const0_rtx)
4261 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
4265 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
4268 tem = emit_store_flag (target, code, tem, const0_rtx,
4269 mode, unsignedp, normalizep);
4271 delete_insns_since (last);
4275 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
4276 the constant zero. Reject all other comparisons at this point. Only
4277 do LE and GT if branches are expensive since they are expensive on
4278 2-operand machines. */
4280 if (BRANCH_COST == 0
4281 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
4282 || (code != EQ && code != NE
4283 && (BRANCH_COST <= 1 || (code != LE && code != GT))))
4286 /* See what we need to return. We can only return a 1, -1, or the
4289 if (normalizep == 0)
4291 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
4292 normalizep = STORE_FLAG_VALUE;
4294 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4295 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4296 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
4302 /* Try to put the result of the comparison in the sign bit. Assume we can't
4303 do the necessary operation below. */
4307 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
4308 the sign bit set. */
4312 /* This is destructive, so SUBTARGET can't be OP0. */
4313 if (rtx_equal_p (subtarget, op0))
4316 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
4319 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
4323 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
4324 number of bits in the mode of OP0, minus one. */
4328 if (rtx_equal_p (subtarget, op0))
4331 tem = expand_shift (RSHIFT_EXPR, mode, op0,
4332 size_int (GET_MODE_BITSIZE (mode) - 1),
4334 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
4338 if (code == EQ || code == NE)
4340 /* For EQ or NE, one way to do the comparison is to apply an operation
4341 that converts the operand into a positive number if it is non-zero
4342 or zero if it was originally zero. Then, for EQ, we subtract 1 and
4343 for NE we negate. This puts the result in the sign bit. Then we
4344 normalize with a shift, if needed.
4346 Two operations that can do the above actions are ABS and FFS, so try
4347 them. If that doesn't work, and MODE is smaller than a full word,
4348 we can use zero-extension to the wider mode (an unsigned conversion)
4349 as the operation. */
4351 if (abs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4352 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
4353 else if (ffs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4354 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
4355 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
4357 op0 = protect_from_queue (op0, 0);
4358 tem = convert_modes (word_mode, mode, op0, 1);
4365 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
4368 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
4371 /* If we couldn't do it that way, for NE we can "or" the two's complement
4372 of the value with itself. For EQ, we take the one's complement of
4373 that "or", which is an extra insn, so we only handle EQ if branches
4376 if (tem == 0 && (code == NE || BRANCH_COST > 1))
4378 if (rtx_equal_p (subtarget, op0))
4381 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
4382 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
4385 if (tem && code == EQ)
4386 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
4390 if (tem && normalizep)
4391 tem = expand_shift (RSHIFT_EXPR, mode, tem,
4392 size_int (GET_MODE_BITSIZE (mode) - 1),
4393 subtarget, normalizep == 1);
4397 if (GET_MODE (tem) != target_mode)
4399 convert_move (target, tem, 0);
4402 else if (!subtarget)
4404 emit_move_insn (target, tem);
4409 delete_insns_since (last);
4414 /* Like emit_store_flag, but always succeeds. */
4417 emit_store_flag_force (target, code, op0, op1, mode, unsignedp, normalizep)
4421 enum machine_mode mode;
4427 /* First see if emit_store_flag can do the job. */
4428 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
4432 if (normalizep == 0)
4435 /* If this failed, we have to do this with set/compare/jump/set code. */
4437 if (GET_CODE (target) != REG
4438 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
4439 target = gen_reg_rtx (GET_MODE (target));
4441 emit_move_insn (target, const1_rtx);
4442 tem = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX, 0);
4443 if (GET_CODE (tem) == CONST_INT)
4446 label = gen_label_rtx ();
4447 if (bcc_gen_fctn[(int) code] == 0)
4450 emit_jump_insn ((*bcc_gen_fctn[(int) code]) (label));
4451 emit_move_insn (target, const0_rtx);
4457 /* Perform possibly multi-word comparison and conditional jump to LABEL
4458 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE
4460 The algorithm is based on the code in expr.c:do_jump.
4462 Note that this does not perform a general comparison. Only variants
4463 generated within expmed.c are correctly handled, others abort (but could
4464 be handled if needed). */
4467 do_cmp_and_jump (arg1, arg2, op, mode, label)
4468 rtx arg1, arg2, label;
4470 enum machine_mode mode;
4472 /* If this mode is an integer too wide to compare properly,
4473 compare word by word. Rely on cse to optimize constant cases. */
4475 if (GET_MODE_CLASS (mode) == MODE_INT && !can_compare_p (mode))
4477 rtx label2 = gen_label_rtx ();
4482 do_jump_by_parts_greater_rtx (mode, 1, arg2, arg1, label2, label);
4486 do_jump_by_parts_greater_rtx (mode, 1, arg1, arg2, label, label2);
4490 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label2, label);
4494 do_jump_by_parts_greater_rtx (mode, 0, arg1, arg2, label2, label);
4498 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label, label2);
4501 /* do_jump_by_parts_equality_rtx compares with zero. Luckily
4502 that's the only equality operations we do */
4504 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4506 do_jump_by_parts_equality_rtx (arg1, label2, label);
4510 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4512 do_jump_by_parts_equality_rtx (arg1, label, label2);
4519 emit_label (label2);
4523 emit_cmp_insn(arg1, arg2, op, NULL_RTX, mode, 0, 0);
4524 if (bcc_gen_fctn[(int) op] == 0)
4526 emit_jump_insn ((*bcc_gen_fctn[(int) op]) (label));