1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
26 #include "insn-flags.h"
27 #include "insn-codes.h"
28 #include "insn-config.h"
33 static rtx extract_split_bit_field ();
34 static rtx extract_fixed_bit_field ();
35 static void store_split_bit_field ();
36 static void store_fixed_bit_field ();
37 static rtx mask_rtx ();
38 static rtx lshift_value ();
40 #define CEIL(x,y) (((x) + (y) - 1) / (y))
42 /* Non-zero means multiply instructions are cheaper than shifts. */
43 int mult_is_very_cheap;
45 /* Non-zero means divides or modulus operations are relatively cheap for
46 powers of two, so don't use branches; emit the operation instead.
47 Usually, this will mean that the MD file will emit non-branch
50 static int sdiv_pow2_cheap, smod_pow2_cheap;
52 /* Cost of various pieces of RTL. */
53 static int add_cost, shift_cost, mult_cost, negate_cost, lea_cost;
55 /* Max scale factor for scaled address in lea instruction. */
56 static int lea_max_mul;
61 char *free_point = (char *) oballoc (1);
62 /* This is "some random pseudo register" for purposes of calling recog
63 to see what insns exist. */
64 rtx reg = gen_rtx (REG, word_mode, FIRST_PSEUDO_REGISTER);
65 rtx pow2 = GEN_INT (32);
70 add_cost = rtx_cost (gen_rtx (PLUS, word_mode, reg, reg), SET);
71 shift_cost = rtx_cost (gen_rtx (LSHIFT, word_mode, reg,
72 /* Using a constant gives better
73 estimate of typical costs.
74 1 or 2 might have quirks. */
76 mult_cost = rtx_cost (gen_rtx (MULT, word_mode, reg, reg), SET);
77 negate_cost = rtx_cost (gen_rtx (NEG, word_mode, reg), SET);
79 /* 999999 is chosen to avoid any plausible faster special case. */
81 = (rtx_cost (gen_rtx (MULT, word_mode, reg, GEN_INT (999999)), SET)
82 < rtx_cost (gen_rtx (LSHIFT, word_mode, reg, GEN_INT (7)), SET));
85 = rtx_cost (gen_rtx (DIV, word_mode, reg, pow2), SET) <= 2 * add_cost;
87 = rtx_cost (gen_rtx (MOD, word_mode, reg, pow2), SET) <= 2 * add_cost;
92 lea = gen_rtx (SET, VOIDmode, reg,
93 gen_rtx (PLUS, word_mode,
94 gen_rtx (MULT, word_mode, reg, GEN_INT (i)),
96 /* Using 0 as second argument is not quite right,
97 but what else is there to do? */
98 if (recog (lea, 0, &dummy) < 0)
101 lea_cost = rtx_cost (SET_SRC (lea), SET);
104 /* Free the objects we just allocated. */
108 /* Return an rtx representing minus the value of X.
109 MODE is the intended mode of the result,
110 useful if X is a CONST_INT. */
114 enum machine_mode mode;
117 if (GET_CODE (x) == CONST_INT)
119 HOST_WIDE_INT val = - INTVAL (x);
120 if (GET_MODE_BITSIZE (mode) < HOST_BITS_PER_WIDE_INT)
122 /* Sign extend the value from the bits that are significant. */
123 if (val & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
124 val |= (HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (mode);
126 val &= ((HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (mode)) - 1;
128 return GEN_INT (val);
131 return expand_unop (GET_MODE (x), neg_optab, x, NULL_RTX, 0);
134 /* Generate code to store value from rtx VALUE
135 into a bit-field within structure STR_RTX
136 containing BITSIZE bits starting at bit BITNUM.
137 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
138 ALIGN is the alignment that STR_RTX is known to have, measured in bytes.
139 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
141 /* ??? Note that there are two different ideas here for how
142 to determine the size to count bits within, for a register.
143 One is BITS_PER_WORD, and the other is the size of operand 3
144 of the insv pattern. (The latter assumes that an n-bit machine
145 will be able to insert bit fields up to n bits wide.)
146 It isn't certain that either of these is right.
147 extract_bit_field has the same quandary. */
150 store_bit_field (str_rtx, bitsize, bitnum, fieldmode, value, align, total_size)
152 register int bitsize;
154 enum machine_mode fieldmode;
159 int unit = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
160 register int offset = bitnum / unit;
161 register int bitpos = bitnum % unit;
162 register rtx op0 = str_rtx;
164 if (GET_CODE (str_rtx) == MEM && ! MEM_IN_STRUCT_P (str_rtx))
167 /* Discount the part of the structure before the desired byte.
168 We need to know how many bytes are safe to reference after it. */
170 total_size -= (bitpos / BIGGEST_ALIGNMENT
171 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
173 while (GET_CODE (op0) == SUBREG)
175 /* The following line once was done only if WORDS_BIG_ENDIAN,
176 but I think that is a mistake. WORDS_BIG_ENDIAN is
177 meaningful at a much higher level; when structures are copied
178 between memory and regs, the higher-numbered regs
179 always get higher addresses. */
180 offset += SUBREG_WORD (op0);
181 /* We used to adjust BITPOS here, but now we do the whole adjustment
182 right after the loop. */
183 op0 = SUBREG_REG (op0);
187 /* If OP0 is a register, BITPOS must count within a word.
188 But as we have it, it counts within whatever size OP0 now has.
189 On a bigendian machine, these are not the same, so convert. */
190 if (GET_CODE (op0) != MEM && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
191 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
194 value = protect_from_queue (value, 0);
197 value = force_not_mem (value);
199 /* Note that the adjustment of BITPOS above has no effect on whether
200 BITPOS is 0 in a REG bigger than a word. */
201 if (GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
202 && (! STRICT_ALIGNMENT || GET_CODE (op0) != MEM)
203 && bitpos == 0 && bitsize == GET_MODE_BITSIZE (fieldmode))
205 /* Storing in a full-word or multi-word field in a register
206 can be done with just SUBREG. */
207 if (GET_MODE (op0) != fieldmode)
208 if (GET_CODE (op0) == REG)
209 op0 = gen_rtx (SUBREG, fieldmode, op0, offset);
211 op0 = change_address (op0, fieldmode,
212 plus_constant (XEXP (op0, 0), offset));
213 emit_move_insn (op0, value);
217 /* Storing an lsb-aligned field in a register
218 can be done with a movestrict instruction. */
220 if (GET_CODE (op0) != MEM
222 && bitpos + bitsize == unit
226 && bitsize == GET_MODE_BITSIZE (fieldmode)
227 && (GET_MODE (op0) == fieldmode
228 || (movstrict_optab->handlers[(int) fieldmode].insn_code
229 != CODE_FOR_nothing)))
231 /* Get appropriate low part of the value being stored. */
232 if (GET_CODE (value) == CONST_INT || GET_CODE (value) == REG)
233 value = gen_lowpart (fieldmode, value);
234 else if (!(GET_CODE (value) == SYMBOL_REF
235 || GET_CODE (value) == LABEL_REF
236 || GET_CODE (value) == CONST))
237 value = convert_to_mode (fieldmode, value, 0);
239 if (GET_MODE (op0) == fieldmode)
240 emit_move_insn (op0, value);
243 int icode = movstrict_optab->handlers[(int) fieldmode].insn_code;
244 if(! (*insn_operand_predicate[icode][1]) (value, fieldmode))
245 value = copy_to_mode_reg (fieldmode, value);
246 emit_insn (GEN_FCN (icode)
247 (gen_rtx (SUBREG, fieldmode, op0, offset), value));
252 /* Handle fields bigger than a word. */
254 if (bitsize > BITS_PER_WORD)
256 /* Here we transfer the words of the field
257 in the order least significant first.
258 This is because the most significant word is the one which may
259 be less than full. */
261 int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
264 /* This is the mode we must force value to, so that there will be enough
265 subwords to extract. Note that fieldmode will often (always?) be
266 VOIDmode, because that is what store_field uses to indicate that this
267 is a bit field, but passing VOIDmode to operand_subword_force will
268 result in an abort. */
269 fieldmode = mode_for_size (nwords * BITS_PER_WORD, MODE_INT, 0);
271 for (i = 0; i < nwords; i++)
273 /* If I is 0, use the low-order word in both field and target;
274 if I is 1, use the next to lowest word; and so on. */
275 int wordnum = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
276 int bit_offset = (WORDS_BIG_ENDIAN
277 ? MAX (bitsize - (i + 1) * BITS_PER_WORD, 0)
278 : i * BITS_PER_WORD);
279 store_bit_field (op0, MIN (BITS_PER_WORD,
280 bitsize - i * BITS_PER_WORD),
281 bitnum + bit_offset, word_mode,
282 operand_subword_force (value, wordnum, fieldmode),
288 /* From here on we can assume that the field to be stored in is
289 a full-word (whatever type that is), since it is shorter than a word. */
291 /* OFFSET is the number of words or bytes (UNIT says which)
292 from STR_RTX to the first word or byte containing part of the field. */
294 if (GET_CODE (op0) == REG)
297 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
298 op0 = gen_rtx (SUBREG, TYPE_MODE (type_for_size (BITS_PER_WORD, 0)),
304 op0 = protect_from_queue (op0, 1);
307 /* Now OFFSET is nonzero only if OP0 is memory
308 and is therefore always measured in bytes. */
312 && !(bitsize == 1 && GET_CODE (value) == CONST_INT)
313 /* Ensure insv's size is wide enough for this field. */
314 && (GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_insv][3])
317 int xbitpos = bitpos;
320 rtx last = get_last_insn ();
322 enum machine_mode maxmode
323 = insn_operand_mode[(int) CODE_FOR_insv][3];
325 int save_volatile_ok = volatile_ok;
328 /* If this machine's insv can only insert into a register, or if we
329 are to force MEMs into a register, copy OP0 into a register and
330 save it back later. */
331 if (GET_CODE (op0) == MEM
333 || ! ((*insn_operand_predicate[(int) CODE_FOR_insv][0])
337 enum machine_mode bestmode;
339 /* Get the mode to use for inserting into this field. If OP0 is
340 BLKmode, get the smallest mode consistent with the alignment. If
341 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
342 mode. Otherwise, use the smallest mode containing the field. */
344 if (GET_MODE (op0) == BLKmode
345 || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
347 = get_best_mode (bitsize, bitnum, align * BITS_PER_UNIT, maxmode,
348 MEM_VOLATILE_P (op0));
350 bestmode = GET_MODE (op0);
352 if (bestmode == VOIDmode)
355 /* Adjust address to point to the containing unit of that mode. */
356 unit = GET_MODE_BITSIZE (bestmode);
357 /* Compute offset as multiple of this unit, counting in bytes. */
358 offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
359 bitpos = bitnum % unit;
360 op0 = change_address (op0, bestmode,
361 plus_constant (XEXP (op0, 0), offset));
363 /* Fetch that unit, store the bitfield in it, then store the unit. */
364 tempreg = copy_to_reg (op0);
365 store_bit_field (tempreg, bitsize, bitpos, fieldmode, value,
367 emit_move_insn (op0, tempreg);
370 volatile_ok = save_volatile_ok;
372 /* Add OFFSET into OP0's address. */
373 if (GET_CODE (xop0) == MEM)
374 xop0 = change_address (xop0, byte_mode,
375 plus_constant (XEXP (xop0, 0), offset));
377 /* If xop0 is a register, we need it in MAXMODE
378 to make it acceptable to the format of insv. */
379 if (GET_CODE (xop0) == SUBREG)
380 PUT_MODE (xop0, maxmode);
381 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
382 xop0 = gen_rtx (SUBREG, maxmode, xop0, 0);
384 /* On big-endian machines, we count bits from the most significant.
385 If the bit field insn does not, we must invert. */
387 #if BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN
388 xbitpos = unit - bitsize - xbitpos;
390 /* We have been counting XBITPOS within UNIT.
391 Count instead within the size of the register. */
393 if (GET_CODE (xop0) != MEM)
394 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
396 unit = GET_MODE_BITSIZE (maxmode);
398 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
400 if (GET_MODE (value) != maxmode)
402 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
404 /* Optimization: Don't bother really extending VALUE
405 if it has all the bits we will actually use. However,
406 if we must narrow it, be sure we do it correctly. */
408 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode))
410 /* Avoid making subreg of a subreg, or of a mem. */
411 if (GET_CODE (value1) != REG)
412 value1 = copy_to_reg (value1);
413 value1 = gen_rtx (SUBREG, maxmode, value1, 0);
416 value1 = gen_lowpart (maxmode, value1);
418 else if (!CONSTANT_P (value))
419 /* Parse phase is supposed to make VALUE's data type
420 match that of the component reference, which is a type
421 at least as wide as the field; so VALUE should have
422 a mode that corresponds to that type. */
426 /* If this machine's insv insists on a register,
427 get VALUE1 into a register. */
428 if (! ((*insn_operand_predicate[(int) CODE_FOR_insv][3])
430 value1 = force_reg (maxmode, value1);
432 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
437 delete_insns_since (last);
438 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
444 /* Insv is not available; store using shifts and boolean ops. */
445 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
449 /* Use shifts and boolean operations to store VALUE
450 into a bit field of width BITSIZE
451 in a memory location specified by OP0 except offset by OFFSET bytes.
452 (OFFSET must be 0 if OP0 is a register.)
453 The field starts at position BITPOS within the byte.
454 (If OP0 is a register, it may be a full word or a narrower mode,
455 but BITPOS still counts within a full word,
456 which is significant on bigendian machines.)
457 STRUCT_ALIGN is the alignment the structure is known to have (in bytes).
459 Note that protect_from_queue has already been done on OP0 and VALUE. */
462 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, struct_align)
464 register int offset, bitsize, bitpos;
468 register enum machine_mode mode;
469 int total_bits = BITS_PER_WORD;
474 /* Add OFFSET to OP0's address (if it is in memory)
475 and if a single byte contains the whole bit field
476 change OP0 to a byte. */
478 /* There is a case not handled here:
479 a structure with a known alignment of just a halfword
480 and a field split across two aligned halfwords within the structure.
481 Or likewise a structure with a known alignment of just a byte
482 and a field split across two bytes.
483 Such cases are not supposed to be able to occur. */
485 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
489 /* Special treatment for a bit field split across two registers. */
490 if (bitsize + bitpos > BITS_PER_WORD)
492 store_split_bit_field (op0, bitsize, bitpos, value, BITS_PER_WORD);
498 /* Get the proper mode to use for this field. We want a mode that
499 includes the entire field. If such a mode would be larger than
500 a word, we won't be doing the extraction the normal way. */
502 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
503 struct_align * BITS_PER_UNIT, word_mode,
504 GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
506 if (mode == VOIDmode)
508 /* The only way this should occur is if the field spans word
510 store_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT,
511 value, struct_align);
515 total_bits = GET_MODE_BITSIZE (mode);
517 /* Get ref to an aligned byte, halfword, or word containing the field.
518 Adjust BITPOS to be position within a word,
519 and OFFSET to be the offset of that word.
520 Then alter OP0 to refer to that word. */
521 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
522 offset -= (offset % (total_bits / BITS_PER_UNIT));
523 op0 = change_address (op0, mode,
524 plus_constant (XEXP (op0, 0), offset));
527 mode = GET_MODE (op0);
529 /* Now MODE is either some integral mode for a MEM as OP0,
530 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
531 The bit field is contained entirely within OP0.
532 BITPOS is the starting bit number within OP0.
533 (OP0's mode may actually be narrower than MODE.) */
536 /* BITPOS is the distance between our msb
537 and that of the containing datum.
538 Convert it to the distance from the lsb. */
540 bitpos = total_bits - bitsize - bitpos;
542 /* Now BITPOS is always the distance between our lsb
545 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
546 we must first convert its mode to MODE. */
548 if (GET_CODE (value) == CONST_INT)
550 register HOST_WIDE_INT v = INTVAL (value);
552 if (bitsize < HOST_BITS_PER_WIDE_INT)
553 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
557 else if ((bitsize < HOST_BITS_PER_WIDE_INT
558 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
559 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
562 value = lshift_value (mode, value, bitpos, bitsize);
566 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
567 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
569 if (GET_MODE (value) != mode)
571 /* If VALUE is a floating-point mode, access it as an integer
572 of the corresponding size, then convert it. This can occur on
573 a machine with 64 bit registers that uses SFmode for float. */
574 if (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT)
576 if (GET_CODE (value) != REG)
577 value = copy_to_reg (value);
579 = gen_rtx (SUBREG, word_mode, value, 0);
582 if ((GET_CODE (value) == REG || GET_CODE (value) == SUBREG)
583 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
584 value = gen_lowpart (mode, value);
586 value = convert_to_mode (mode, value, 1);
590 value = expand_binop (mode, and_optab, value,
591 mask_rtx (mode, 0, bitsize, 0),
592 NULL_RTX, 1, OPTAB_LIB_WIDEN);
594 value = expand_shift (LSHIFT_EXPR, mode, value,
595 build_int_2 (bitpos, 0), NULL_RTX, 1);
598 /* Now clear the chosen bits in OP0,
599 except that if VALUE is -1 we need not bother. */
601 subtarget = (GET_CODE (op0) == REG || ! flag_force_mem) ? op0 : 0;
605 temp = expand_binop (mode, and_optab, op0,
606 mask_rtx (mode, bitpos, bitsize, 1),
607 subtarget, 1, OPTAB_LIB_WIDEN);
613 /* Now logical-or VALUE into OP0, unless it is zero. */
616 temp = expand_binop (mode, ior_optab, temp, value,
617 subtarget, 1, OPTAB_LIB_WIDEN);
619 emit_move_insn (op0, temp);
622 /* Store a bit field that is split across two words.
624 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
625 BITSIZE is the field width; BITPOS the position of its first bit
627 VALUE is the value to store. */
630 store_split_bit_field (op0, bitsize, bitpos, value, align)
636 /* BITSIZE_1 is size of the part in the first word. */
637 int bitsize_1 = BITS_PER_WORD - bitpos % BITS_PER_WORD;
638 /* BITSIZE_2 is size of the rest (in the following word). */
639 int bitsize_2 = bitsize - bitsize_1;
641 int unit = GET_CODE (op0) == MEM ? BITS_PER_UNIT : BITS_PER_WORD;
642 int offset = bitpos / unit;
645 /* The field must span exactly one word boundary. */
646 if (bitpos / BITS_PER_WORD != (bitpos + bitsize - 1) / BITS_PER_WORD - 1)
649 if (GET_MODE (value) != VOIDmode)
650 value = convert_to_mode (word_mode, value, 1);
651 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
652 value = copy_to_reg (value);
654 /* Split the value into two parts:
655 PART1 gets that which goes in the first word; PART2 the other. */
657 /* PART1 gets the more significant part. */
658 if (GET_CODE (value) == CONST_INT)
660 part1 = GEN_INT ((unsigned HOST_WIDE_INT) (INTVAL (value)) >> bitsize_2);
661 part2 = GEN_INT ((unsigned HOST_WIDE_INT) (INTVAL (value))
662 & (((HOST_WIDE_INT) 1 << bitsize_2) - 1));
666 part1 = extract_fixed_bit_field (word_mode, value, 0, bitsize_1,
667 BITS_PER_WORD - bitsize, NULL_RTX, 1,
669 part2 = extract_fixed_bit_field (word_mode, value, 0, bitsize_2,
670 BITS_PER_WORD - bitsize_2, NULL_RTX, 1,
674 /* PART1 gets the less significant part. */
675 if (GET_CODE (value) == CONST_INT)
677 part1 = GEN_INT ((unsigned HOST_WIDE_INT) (INTVAL (value))
678 & (((HOST_WIDE_INT) 1 << bitsize_1) - 1));
679 part2 = GEN_INT ((unsigned HOST_WIDE_INT) (INTVAL (value)) >> bitsize_1);
683 part1 = extract_fixed_bit_field (word_mode, value, 0, bitsize_1, 0,
684 NULL_RTX, 1, BITS_PER_WORD);
685 part2 = extract_fixed_bit_field (word_mode, value, 0, bitsize_2,
686 bitsize_1, NULL_RTX, 1, BITS_PER_WORD);
690 /* Store PART1 into the first word. If OP0 is a MEM, pass OP0 and the
691 offset computed above. Otherwise, get the proper word and pass an
693 word = (GET_CODE (op0) == MEM ? op0
694 : operand_subword (op0, offset, 1, GET_MODE (op0)));
698 store_fixed_bit_field (word, GET_CODE (op0) == MEM ? offset : 0,
699 bitsize_1, bitpos % unit, part1, align);
701 /* Offset op0 by 1 word to get to the following one. */
702 if (GET_CODE (op0) == SUBREG)
703 word = operand_subword (SUBREG_REG (op0), SUBREG_WORD (op0) + offset + 1,
705 else if (GET_CODE (op0) == MEM)
708 word = operand_subword (op0, offset + 1, 1, GET_MODE (op0));
713 /* Store PART2 into the second word. */
714 store_fixed_bit_field (word,
715 (GET_CODE (op0) == MEM
716 ? CEIL (offset + 1, UNITS_PER_WORD) * UNITS_PER_WORD
718 bitsize_2, 0, part2, align);
721 /* Generate code to extract a byte-field from STR_RTX
722 containing BITSIZE bits, starting at BITNUM,
723 and put it in TARGET if possible (if TARGET is nonzero).
724 Regardless of TARGET, we return the rtx for where the value is placed.
727 STR_RTX is the structure containing the byte (a REG or MEM).
728 UNSIGNEDP is nonzero if this is an unsigned bit field.
729 MODE is the natural mode of the field value once extracted.
730 TMODE is the mode the caller would like the value to have;
731 but the value may be returned with type MODE instead.
733 ALIGN is the alignment that STR_RTX is known to have, measured in bytes.
734 TOTAL_SIZE is the size in bytes of the containing structure,
737 If a TARGET is specified and we can store in it at no extra cost,
738 we do so, and return TARGET.
739 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
740 if they are equally easy. */
743 extract_bit_field (str_rtx, bitsize, bitnum, unsignedp,
744 target, mode, tmode, align, total_size)
746 register int bitsize;
750 enum machine_mode mode, tmode;
754 int unit = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
755 register int offset = bitnum / unit;
756 register int bitpos = bitnum % unit;
757 register rtx op0 = str_rtx;
758 rtx spec_target = target;
759 rtx spec_target_subreg = 0;
761 if (GET_CODE (str_rtx) == MEM && ! MEM_IN_STRUCT_P (str_rtx))
764 /* Discount the part of the structure before the desired byte.
765 We need to know how many bytes are safe to reference after it. */
767 total_size -= (bitpos / BIGGEST_ALIGNMENT
768 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
770 if (tmode == VOIDmode)
772 while (GET_CODE (op0) == SUBREG)
774 offset += SUBREG_WORD (op0);
775 op0 = SUBREG_REG (op0);
779 /* If OP0 is a register, BITPOS must count within a word.
780 But as we have it, it counts within whatever size OP0 now has.
781 On a bigendian machine, these are not the same, so convert. */
782 if (GET_CODE (op0) != MEM && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
783 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
786 /* Extracting a full-word or multi-word value
787 from a structure in a register.
788 This can be done with just SUBREG.
789 So too extracting a subword value in
790 the least significant part of the register. */
792 if (GET_CODE (op0) == REG
793 && ((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
794 && bitpos % BITS_PER_WORD == 0)
795 || (mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0) != BLKmode
797 && bitpos + bitsize == BITS_PER_WORD
803 enum machine_mode mode1
804 = mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0);
806 if (mode1 != GET_MODE (op0))
807 op0 = gen_rtx (SUBREG, mode1, op0, offset);
810 return convert_to_mode (tmode, op0, unsignedp);
814 /* Handle fields bigger than a word. */
816 if (bitsize > BITS_PER_WORD)
818 /* Here we transfer the words of the field
819 in the order least significant first.
820 This is because the most significant word is the one which may
821 be less than full. */
823 int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
826 if (target == 0 || GET_CODE (target) != REG)
827 target = gen_reg_rtx (mode);
829 for (i = 0; i < nwords; i++)
831 /* If I is 0, use the low-order word in both field and target;
832 if I is 1, use the next to lowest word; and so on. */
833 int wordnum = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
834 int bit_offset = (WORDS_BIG_ENDIAN
835 ? MAX (0, bitsize - (i + 1) * BITS_PER_WORD)
836 : i * BITS_PER_WORD);
837 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
839 = extract_bit_field (op0, MIN (BITS_PER_WORD,
840 bitsize - i * BITS_PER_WORD),
842 1, target_part, mode, word_mode,
845 if (target_part == 0)
848 if (result_part != target_part)
849 emit_move_insn (target_part, result_part);
855 /* From here on we know the desired field is smaller than a word
856 so we can assume it is an integer. So we can safely extract it as one
857 size of integer, if necessary, and then truncate or extend
858 to the size that is wanted. */
860 /* OFFSET is the number of words or bytes (UNIT says which)
861 from STR_RTX to the first word or byte containing part of the field. */
863 if (GET_CODE (op0) == REG)
866 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
867 op0 = gen_rtx (SUBREG, TYPE_MODE (type_for_size (BITS_PER_WORD, 0)),
873 op0 = protect_from_queue (str_rtx, 1);
876 /* Now OFFSET is nonzero only for memory operands. */
882 && (GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_extzv][0])
885 int xbitpos = bitpos, xoffset = offset;
886 rtx bitsize_rtx, bitpos_rtx;
887 rtx last = get_last_insn();
889 rtx xtarget = target;
890 rtx xspec_target = spec_target;
891 rtx xspec_target_subreg = spec_target_subreg;
893 enum machine_mode maxmode
894 = insn_operand_mode[(int) CODE_FOR_extzv][0];
896 if (GET_CODE (xop0) == MEM)
898 int save_volatile_ok = volatile_ok;
901 /* Is the memory operand acceptable? */
903 || ! ((*insn_operand_predicate[(int) CODE_FOR_extzv][1])
904 (xop0, GET_MODE (xop0))))
906 /* No, load into a reg and extract from there. */
907 enum machine_mode bestmode;
909 /* Get the mode to use for inserting into this field. If
910 OP0 is BLKmode, get the smallest mode consistent with the
911 alignment. If OP0 is a non-BLKmode object that is no
912 wider than MAXMODE, use its mode. Otherwise, use the
913 smallest mode containing the field. */
915 if (GET_MODE (xop0) == BLKmode
916 || (GET_MODE_SIZE (GET_MODE (op0))
917 > GET_MODE_SIZE (maxmode)))
918 bestmode = get_best_mode (bitsize, bitnum,
919 align * BITS_PER_UNIT, maxmode,
920 MEM_VOLATILE_P (xop0));
922 bestmode = GET_MODE (xop0);
924 if (bestmode == VOIDmode)
927 /* Compute offset as multiple of this unit,
928 counting in bytes. */
929 unit = GET_MODE_BITSIZE (bestmode);
930 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
931 xbitpos = bitnum % unit;
932 xop0 = change_address (xop0, bestmode,
933 plus_constant (XEXP (xop0, 0),
935 /* Fetch it to a register in that size. */
936 xop0 = force_reg (bestmode, xop0);
938 /* XBITPOS counts within UNIT, which is what is expected. */
941 /* Get ref to first byte containing part of the field. */
942 xop0 = change_address (xop0, byte_mode,
943 plus_constant (XEXP (xop0, 0), xoffset));
945 volatile_ok = save_volatile_ok;
948 /* If op0 is a register, we need it in MAXMODE (which is usually
949 SImode). to make it acceptable to the format of extzv. */
950 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
952 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
953 xop0 = gen_rtx (SUBREG, maxmode, xop0, 0);
955 /* On big-endian machines, we count bits from the most significant.
956 If the bit field insn does not, we must invert. */
957 #if BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN
958 xbitpos = unit - bitsize - xbitpos;
960 /* Now convert from counting within UNIT to counting in MAXMODE. */
962 if (GET_CODE (xop0) != MEM)
963 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
965 unit = GET_MODE_BITSIZE (maxmode);
968 || (flag_force_mem && GET_CODE (xtarget) == MEM))
969 xtarget = xspec_target = gen_reg_rtx (tmode);
971 if (GET_MODE (xtarget) != maxmode)
973 if (GET_CODE (xtarget) == REG)
975 int wider = (GET_MODE_SIZE (maxmode)
976 > GET_MODE_SIZE (GET_MODE (xtarget)));
977 xtarget = gen_lowpart (maxmode, xtarget);
979 xspec_target_subreg = xtarget;
982 xtarget = gen_reg_rtx (maxmode);
985 /* If this machine's extzv insists on a register target,
986 make sure we have one. */
987 if (! ((*insn_operand_predicate[(int) CODE_FOR_extzv][0])
989 xtarget = gen_reg_rtx (maxmode);
991 bitsize_rtx = GEN_INT (bitsize);
992 bitpos_rtx = GEN_INT (xbitpos);
994 pat = gen_extzv (protect_from_queue (xtarget, 1),
995 xop0, bitsize_rtx, bitpos_rtx);
1000 spec_target = xspec_target;
1001 spec_target_subreg = xspec_target_subreg;
1005 delete_insns_since (last);
1006 target = extract_fixed_bit_field (tmode, op0, offset, bitsize,
1007 bitpos, target, 1, align);
1013 target = extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1020 && (GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_extv][0])
1023 int xbitpos = bitpos, xoffset = offset;
1024 rtx bitsize_rtx, bitpos_rtx;
1025 rtx last = get_last_insn();
1026 rtx xop0 = op0, xtarget = target;
1027 rtx xspec_target = spec_target;
1028 rtx xspec_target_subreg = spec_target_subreg;
1030 enum machine_mode maxmode
1031 = insn_operand_mode[(int) CODE_FOR_extv][0];
1033 if (GET_CODE (xop0) == MEM)
1035 /* Is the memory operand acceptable? */
1036 if (! ((*insn_operand_predicate[(int) CODE_FOR_extv][1])
1037 (xop0, GET_MODE (xop0))))
1039 /* No, load into a reg and extract from there. */
1040 enum machine_mode bestmode;
1042 /* Get the mode to use for inserting into this field. If
1043 OP0 is BLKmode, get the smallest mode consistent with the
1044 alignment. If OP0 is a non-BLKmode object that is no
1045 wider than MAXMODE, use its mode. Otherwise, use the
1046 smallest mode containing the field. */
1048 if (GET_MODE (xop0) == BLKmode
1049 || (GET_MODE_SIZE (GET_MODE (op0))
1050 > GET_MODE_SIZE (maxmode)))
1051 bestmode = get_best_mode (bitsize, bitnum,
1052 align * BITS_PER_UNIT, maxmode,
1053 MEM_VOLATILE_P (xop0));
1055 bestmode = GET_MODE (xop0);
1057 if (bestmode == VOIDmode)
1060 /* Compute offset as multiple of this unit,
1061 counting in bytes. */
1062 unit = GET_MODE_BITSIZE (bestmode);
1063 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1064 xbitpos = bitnum % unit;
1065 xop0 = change_address (xop0, bestmode,
1066 plus_constant (XEXP (xop0, 0),
1068 /* Fetch it to a register in that size. */
1069 xop0 = force_reg (bestmode, xop0);
1071 /* XBITPOS counts within UNIT, which is what is expected. */
1074 /* Get ref to first byte containing part of the field. */
1075 xop0 = change_address (xop0, byte_mode,
1076 plus_constant (XEXP (xop0, 0), xoffset));
1079 /* If op0 is a register, we need it in MAXMODE (which is usually
1080 SImode) to make it acceptable to the format of extv. */
1081 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1083 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1084 xop0 = gen_rtx (SUBREG, maxmode, xop0, 0);
1086 /* On big-endian machines, we count bits from the most significant.
1087 If the bit field insn does not, we must invert. */
1088 #if BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN
1089 xbitpos = unit - bitsize - xbitpos;
1091 /* XBITPOS counts within a size of UNIT.
1092 Adjust to count within a size of MAXMODE. */
1094 if (GET_CODE (xop0) != MEM)
1095 xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
1097 unit = GET_MODE_BITSIZE (maxmode);
1100 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1101 xtarget = xspec_target = gen_reg_rtx (tmode);
1103 if (GET_MODE (xtarget) != maxmode)
1105 if (GET_CODE (xtarget) == REG)
1107 int wider = (GET_MODE_SIZE (maxmode)
1108 > GET_MODE_SIZE (GET_MODE (xtarget)));
1109 xtarget = gen_lowpart (maxmode, xtarget);
1111 xspec_target_subreg = xtarget;
1114 xtarget = gen_reg_rtx (maxmode);
1117 /* If this machine's extv insists on a register target,
1118 make sure we have one. */
1119 if (! ((*insn_operand_predicate[(int) CODE_FOR_extv][0])
1120 (xtarget, maxmode)))
1121 xtarget = gen_reg_rtx (maxmode);
1123 bitsize_rtx = GEN_INT (bitsize);
1124 bitpos_rtx = GEN_INT (xbitpos);
1126 pat = gen_extv (protect_from_queue (xtarget, 1),
1127 xop0, bitsize_rtx, bitpos_rtx);
1132 spec_target = xspec_target;
1133 spec_target_subreg = xspec_target_subreg;
1137 delete_insns_since (last);
1138 target = extract_fixed_bit_field (tmode, op0, offset, bitsize,
1139 bitpos, target, 0, align);
1145 target = extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1148 if (target == spec_target)
1150 if (target == spec_target_subreg)
1152 if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
1154 /* If the target mode is floating-point, first convert to the
1155 integer mode of that size and then access it as a floating-point
1156 value via a SUBREG. */
1157 if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
1159 target = convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode),
1162 if (GET_CODE (target) != REG)
1163 target = copy_to_reg (target);
1164 return gen_rtx (SUBREG, tmode, target, 0);
1167 return convert_to_mode (tmode, target, unsignedp);
1172 /* Extract a bit field using shifts and boolean operations
1173 Returns an rtx to represent the value.
1174 OP0 addresses a register (word) or memory (byte).
1175 BITPOS says which bit within the word or byte the bit field starts in.
1176 OFFSET says how many bytes farther the bit field starts;
1177 it is 0 if OP0 is a register.
1178 BITSIZE says how many bits long the bit field is.
1179 (If OP0 is a register, it may be narrower than a full word,
1180 but BITPOS still counts within a full word,
1181 which is significant on bigendian machines.)
1183 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1184 If TARGET is nonzero, attempts to store the value there
1185 and return TARGET, but this is not guaranteed.
1186 If TARGET is not used, create a pseudo-reg of mode TMODE for the value.
1188 ALIGN is the alignment that STR_RTX is known to have, measured in bytes. */
1191 extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1192 target, unsignedp, align)
1193 enum machine_mode tmode;
1194 register rtx op0, target;
1195 register int offset, bitsize, bitpos;
1199 int total_bits = BITS_PER_WORD;
1200 enum machine_mode mode;
1202 if (GET_CODE (op0) == SUBREG || GET_CODE (op0) == REG)
1204 /* Special treatment for a bit field split across two registers. */
1205 if (bitsize + bitpos > BITS_PER_WORD)
1206 return extract_split_bit_field (op0, bitsize, bitpos,
1211 /* Get the proper mode to use for this field. We want a mode that
1212 includes the entire field. If such a mode would be larger than
1213 a word, we won't be doing the extraction the normal way. */
1215 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1216 align * BITS_PER_UNIT, word_mode,
1217 GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
1219 if (mode == VOIDmode)
1220 /* The only way this should occur is if the field spans word
1222 return extract_split_bit_field (op0, bitsize,
1223 bitpos + offset * BITS_PER_UNIT,
1226 total_bits = GET_MODE_BITSIZE (mode);
1228 /* Get ref to an aligned byte, halfword, or word containing the field.
1229 Adjust BITPOS to be position within a word,
1230 and OFFSET to be the offset of that word.
1231 Then alter OP0 to refer to that word. */
1232 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1233 offset -= (offset % (total_bits / BITS_PER_UNIT));
1234 op0 = change_address (op0, mode,
1235 plus_constant (XEXP (op0, 0), offset));
1238 mode = GET_MODE (op0);
1240 #if BYTES_BIG_ENDIAN
1241 /* BITPOS is the distance between our msb and that of OP0.
1242 Convert it to the distance from the lsb. */
1244 bitpos = total_bits - bitsize - bitpos;
1246 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1247 We have reduced the big-endian case to the little-endian case. */
1253 /* If the field does not already start at the lsb,
1254 shift it so it does. */
1255 tree amount = build_int_2 (bitpos, 0);
1256 /* Maybe propagate the target for the shift. */
1257 /* But not if we will return it--could confuse integrate.c. */
1258 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1259 && !REG_FUNCTION_VALUE_P (target)
1261 if (tmode != mode) subtarget = 0;
1262 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1264 /* Convert the value to the desired mode. */
1266 op0 = convert_to_mode (tmode, op0, 1);
1268 /* Unless the msb of the field used to be the msb when we shifted,
1269 mask out the upper bits. */
1271 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize
1273 #ifdef SLOW_ZERO_EXTEND
1274 /* Always generate an `and' if
1275 we just zero-extended op0 and SLOW_ZERO_EXTEND, since it
1276 will combine fruitfully with the zero-extend. */
1281 return expand_binop (GET_MODE (op0), and_optab, op0,
1282 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1283 target, 1, OPTAB_LIB_WIDEN);
1287 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1288 then arithmetic-shift its lsb to the lsb of the word. */
1289 op0 = force_reg (mode, op0);
1293 /* Find the narrowest integer mode that contains the field. */
1295 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1296 mode = GET_MODE_WIDER_MODE (mode))
1297 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1299 op0 = convert_to_mode (mode, op0, 0);
1303 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1305 tree amount = build_int_2 (GET_MODE_BITSIZE (mode) - (bitsize + bitpos), 0);
1306 /* Maybe propagate the target for the shift. */
1307 /* But not if we will return the result--could confuse integrate.c. */
1308 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1309 && ! REG_FUNCTION_VALUE_P (target)
1311 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1314 return expand_shift (RSHIFT_EXPR, mode, op0,
1315 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1319 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1320 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1321 complement of that if COMPLEMENT. The mask is truncated if
1322 necessary to the width of mode MODE. */
1325 mask_rtx (mode, bitpos, bitsize, complement)
1326 enum machine_mode mode;
1327 int bitpos, bitsize, complement;
1329 HOST_WIDE_INT masklow, maskhigh;
1331 if (bitpos < HOST_BITS_PER_WIDE_INT)
1332 masklow = (HOST_WIDE_INT) -1 << bitpos;
1336 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1337 masklow &= ((unsigned HOST_WIDE_INT) -1
1338 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1340 if (bitpos <= HOST_BITS_PER_WIDE_INT)
1343 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1345 if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1346 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1347 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1353 maskhigh = ~maskhigh;
1357 return immed_double_const (masklow, maskhigh, mode);
1360 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1361 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1364 lshift_value (mode, value, bitpos, bitsize)
1365 enum machine_mode mode;
1367 int bitpos, bitsize;
1369 unsigned HOST_WIDE_INT v = INTVAL (value);
1370 HOST_WIDE_INT low, high;
1372 if (bitsize < HOST_BITS_PER_WIDE_INT)
1373 v &= ~((HOST_WIDE_INT) -1 << bitsize);
1375 if (bitpos < HOST_BITS_PER_WIDE_INT)
1378 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1383 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1386 return immed_double_const (low, high, mode);
1389 /* Extract a bit field that is split across two words
1390 and return an RTX for the result.
1392 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1393 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1394 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
1397 extract_split_bit_field (op0, bitsize, bitpos, unsignedp, align)
1399 int bitsize, bitpos, unsignedp, align;
1401 /* BITSIZE_1 is size of the part in the first word. */
1402 int bitsize_1 = BITS_PER_WORD - bitpos % BITS_PER_WORD;
1403 /* BITSIZE_2 is size of the rest (in the following word). */
1404 int bitsize_2 = bitsize - bitsize_1;
1405 rtx part1, part2, result;
1406 int unit = GET_CODE (op0) == MEM ? BITS_PER_UNIT : BITS_PER_WORD;
1407 int offset = bitpos / unit;
1410 /* The field must span exactly one word boundary. */
1411 if (bitpos / BITS_PER_WORD != (bitpos + bitsize - 1) / BITS_PER_WORD - 1)
1414 /* Get the part of the bit field from the first word. If OP0 is a MEM,
1415 pass OP0 and the offset computed above. Otherwise, get the proper
1416 word and pass an offset of zero. */
1417 word = (GET_CODE (op0) == MEM ? op0
1418 : operand_subword_force (op0, offset, GET_MODE (op0)));
1419 part1 = extract_fixed_bit_field (word_mode, word,
1420 GET_CODE (op0) == MEM ? offset : 0,
1421 bitsize_1, bitpos % unit, NULL_RTX,
1424 /* Offset op0 by 1 word to get to the following one. */
1425 if (GET_CODE (op0) == SUBREG)
1426 word = operand_subword_force (SUBREG_REG (op0),
1427 SUBREG_WORD (op0) + offset + 1, VOIDmode);
1428 else if (GET_CODE (op0) == MEM)
1431 word = operand_subword_force (op0, offset + 1, GET_MODE (op0));
1433 /* Get the part of the bit field from the second word. */
1434 part2 = extract_fixed_bit_field (word_mode, word,
1435 (GET_CODE (op0) == MEM
1436 ? CEIL (offset + 1, UNITS_PER_WORD) * UNITS_PER_WORD
1438 bitsize_2, 0, NULL_RTX, 1, align);
1440 /* Shift the more significant part up to fit above the other part. */
1441 #if BYTES_BIG_ENDIAN
1442 part1 = expand_shift (LSHIFT_EXPR, word_mode, part1,
1443 build_int_2 (bitsize_2, 0), 0, 1);
1445 part2 = expand_shift (LSHIFT_EXPR, word_mode, part2,
1446 build_int_2 (bitsize_1, 0), 0, 1);
1449 /* Combine the two parts with bitwise or. This works
1450 because we extracted both parts as unsigned bit fields. */
1451 result = expand_binop (word_mode, ior_optab, part1, part2, NULL_RTX, 1,
1454 /* Unsigned bit field: we are done. */
1457 /* Signed bit field: sign-extend with two arithmetic shifts. */
1458 result = expand_shift (LSHIFT_EXPR, word_mode, result,
1459 build_int_2 (BITS_PER_WORD - bitsize, 0),
1461 return expand_shift (RSHIFT_EXPR, word_mode, result,
1462 build_int_2 (BITS_PER_WORD - bitsize, 0), NULL_RTX, 0);
1465 /* Add INC into TARGET. */
1468 expand_inc (target, inc)
1471 rtx value = expand_binop (GET_MODE (target), add_optab,
1473 target, 0, OPTAB_LIB_WIDEN);
1474 if (value != target)
1475 emit_move_insn (target, value);
1478 /* Subtract DEC from TARGET. */
1481 expand_dec (target, dec)
1484 rtx value = expand_binop (GET_MODE (target), sub_optab,
1486 target, 0, OPTAB_LIB_WIDEN);
1487 if (value != target)
1488 emit_move_insn (target, value);
1491 /* Output a shift instruction for expression code CODE,
1492 with SHIFTED being the rtx for the value to shift,
1493 and AMOUNT the tree for the amount to shift by.
1494 Store the result in the rtx TARGET, if that is convenient.
1495 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
1496 Return the rtx for where the value is. */
1499 expand_shift (code, mode, shifted, amount, target, unsignedp)
1500 enum tree_code code;
1501 register enum machine_mode mode;
1504 register rtx target;
1507 register rtx op1, temp = 0;
1508 register int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
1509 register int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
1512 /* Previously detected shift-counts computed by NEGATE_EXPR
1513 and shifted in the other direction; but that does not work
1516 op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0);
1518 if (op1 == const0_rtx)
1521 for (try = 0; temp == 0 && try < 3; try++)
1523 enum optab_methods methods;
1526 methods = OPTAB_DIRECT;
1528 methods = OPTAB_WIDEN;
1530 methods = OPTAB_LIB_WIDEN;
1534 /* Widening does not work for rotation. */
1535 if (methods == OPTAB_WIDEN)
1537 else if (methods == OPTAB_LIB_WIDEN)
1538 methods = OPTAB_LIB;
1540 temp = expand_binop (mode,
1541 left ? rotl_optab : rotr_optab,
1542 shifted, op1, target, unsignedp, methods);
1546 temp = expand_binop (mode,
1547 left ? lshl_optab : lshr_optab,
1548 shifted, op1, target, unsignedp, methods);
1549 if (temp == 0 && left)
1550 temp = expand_binop (mode, ashl_optab,
1551 shifted, op1, target, unsignedp, methods);
1554 /* Do arithmetic shifts.
1555 Also, if we are going to widen the operand, we can just as well
1556 use an arithmetic right-shift instead of a logical one. */
1557 if (temp == 0 && ! rotate
1558 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
1560 enum optab_methods methods1 = methods;
1562 /* If trying to widen a log shift to an arithmetic shift,
1563 don't accept an arithmetic shift of the same size. */
1565 methods1 = OPTAB_MUST_WIDEN;
1567 /* Arithmetic shift */
1569 temp = expand_binop (mode,
1570 left ? ashl_optab : ashr_optab,
1571 shifted, op1, target, unsignedp, methods1);
1575 /* We can do a logical (unsigned) right shift with a bit-field
1576 extract insn. But first check if one of the above methods worked. */
1580 if (unsignedp && code == RSHIFT_EXPR && ! BITS_BIG_ENDIAN && HAVE_extzv)
1582 enum machine_mode output_mode
1583 = insn_operand_mode[(int) CODE_FOR_extzv][0];
1585 if ((methods == OPTAB_DIRECT && mode == output_mode)
1586 || (methods == OPTAB_WIDEN
1587 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (output_mode)))
1589 rtx shifted1 = convert_to_mode (output_mode,
1590 protect_from_queue (shifted, 0),
1592 enum machine_mode length_mode
1593 = insn_operand_mode[(int) CODE_FOR_extzv][2];
1594 enum machine_mode pos_mode
1595 = insn_operand_mode[(int) CODE_FOR_extzv][3];
1597 rtx last = get_last_insn ();
1603 target1 = protect_from_queue (target, 1);
1605 /* We define extract insns as having OUTPUT_MODE in a register
1606 and the mode of operand 1 in memory. Since we want
1607 OUTPUT_MODE, we will always force the operand into a
1608 register. At some point we might want to support MEM
1610 shifted1 = force_reg (output_mode, shifted1);
1612 /* If we don't have or cannot use a suggested target,
1613 make a place for the result, in the proper mode. */
1614 if (methods == OPTAB_WIDEN || target1 == 0
1615 || ! ((*insn_operand_predicate[(int) CODE_FOR_extzv][0])
1616 (target1, output_mode)))
1617 target1 = gen_reg_rtx (output_mode);
1619 xop1 = protect_from_queue (xop1, 0);
1620 xop1 = convert_to_mode (pos_mode, xop1,
1621 TREE_UNSIGNED (TREE_TYPE (amount)));
1623 /* If this machine's extzv insists on a register for
1624 operand 3 (position), arrange for that. */
1625 if (! ((*insn_operand_predicate[(int) CODE_FOR_extzv][3])
1627 xop1 = force_reg (pos_mode, xop1);
1629 /* WIDTH gets the width of the bit field to extract:
1630 wordsize minus # bits to shift by. */
1631 if (GET_CODE (xop1) == CONST_INT)
1632 width = GEN_INT (GET_MODE_BITSIZE (mode) - INTVAL (op1));
1635 /* Now get the width in the proper mode. */
1636 op1 = protect_from_queue (op1, 0);
1637 width = convert_to_mode (length_mode, op1,
1638 TREE_UNSIGNED (TREE_TYPE (amount)));
1640 width = expand_binop (length_mode, sub_optab,
1641 GEN_INT (GET_MODE_BITSIZE (mode)),
1642 width, NULL_RTX, 0, OPTAB_LIB_WIDEN);
1645 /* If this machine's extzv insists on a register for
1646 operand 2 (length), arrange for that. */
1647 if (! ((*insn_operand_predicate[(int) CODE_FOR_extzv][2])
1648 (width, length_mode)))
1649 width = force_reg (length_mode, width);
1651 /* Now extract with WIDTH, omitting OP1 least sig bits. */
1652 pat = gen_extzv (target1, shifted1, width, xop1);
1656 temp = convert_to_mode (mode, target1, 1);
1659 delete_insns_since (last);
1662 /* Can also do logical shift with signed bit-field extract
1663 followed by inserting the bit-field at a different position.
1664 That strategy is not yet implemented. */
1666 #endif /* HAVE_extzv */
1674 enum alg_code { alg_add, alg_subtract, alg_compound };
1676 /* This structure records a sequence of operations.
1677 `ops' is the number of operations recorded.
1678 `cost' is their total cost.
1679 The operations are stored in `op' and the corresponding
1680 integer coefficients in `coeff'.
1681 These are the operations:
1682 alg_add Add to the total the multiplicand times the coefficient.
1683 alg_subtract Subtract the multiplicand times the coefficient.
1684 alg_compound This coefficient plus or minus the following one
1685 is multiplied into the total. The following operation
1686 is alg_add or alg_subtract to indicate whether to add
1687 or subtract the two coefficients. */
1689 #ifndef MAX_BITS_PER_WORD
1690 #define MAX_BITS_PER_WORD BITS_PER_WORD
1697 enum alg_code op[MAX_BITS_PER_WORD];
1698 unsigned HOST_WIDE_INT coeff[MAX_BITS_PER_WORD];
1701 /* Compute and return the best algorithm for multiplying by T.
1702 Assume that add insns cost ADD_COST and shifts cost SHIFT_COST.
1703 Return cost -1 if would cost more than MAX_COST. */
1705 static struct algorithm
1706 synth_mult (t, add_cost, shift_cost, max_cost)
1707 unsigned HOST_WIDE_INT t;
1708 int add_cost, shift_cost;
1712 struct algorithm *best_alg
1713 = (struct algorithm *)alloca (sizeof (struct algorithm));
1714 struct algorithm *alg_in
1715 = (struct algorithm *)alloca (sizeof (struct algorithm));
1718 /* No matter what happens, we want to return a valid algorithm. */
1719 best_alg->cost = max_cost;
1722 /* Is t an exponent of 2, so we can just do a shift? */
1728 if (max_cost >= shift_cost)
1730 best_alg->cost = shift_cost;
1732 best_alg->op[0] = alg_add;
1733 best_alg->coeff[0] = t;
1736 best_alg->cost = -1;
1749 /* If MAX_COST just permits as little as an addition (or less), we won't
1750 succeed in synthesizing an algorithm for t. Return immediately with
1751 an indication of failure. */
1752 if (max_cost <= add_cost)
1754 best_alg->cost = -1;
1758 /* Look for factors of t of the form
1759 t = q(2**m +- 1), 2 <= m <= floor(log2(t)) - 1.
1760 If we find such a factor, we can multiply by t using an algorithm that
1761 multiplies by q, shift the result by m and add/subtract it to itself. */
1763 for (m = floor_log2 (t) - 1; m >= 2; m--)
1765 HOST_WIDE_INT m_exp_2 = (HOST_WIDE_INT) 1 << m;
1771 HOST_WIDE_INT q = t / d;
1773 cost = add_cost + shift_cost * 2;
1775 *alg_in = synth_mult (q, add_cost, shift_cost,
1776 MIN (max_cost, best_alg->cost) - cost);
1778 if (alg_in->cost >= 0)
1780 cost += alg_in->cost;
1782 if (cost < best_alg->cost)
1784 struct algorithm *x;
1788 best_alg->coeff[best_alg->ops] = m_exp_2;
1789 best_alg->op[best_alg->ops++] = alg_compound;
1790 best_alg->coeff[best_alg->ops] = 1;
1791 best_alg->op[best_alg->ops++] = alg_add;
1792 best_alg->cost = cost;
1800 HOST_WIDE_INT q = t / d;
1802 cost = add_cost + shift_cost * 2;
1804 *alg_in = synth_mult (q, add_cost, shift_cost,
1805 MIN (max_cost, best_alg->cost) - cost);
1807 if (alg_in->cost >= 0)
1809 cost += alg_in->cost;
1811 if (cost < best_alg->cost)
1813 struct algorithm *x;
1817 best_alg->coeff[best_alg->ops] = m_exp_2;
1818 best_alg->op[best_alg->ops++] = alg_compound;
1819 best_alg->coeff[best_alg->ops] = 1;
1820 best_alg->op[best_alg->ops++] = alg_subtract;
1821 best_alg->cost = cost;
1827 /* Try load effective address instructions, i.e. do a*3, a*5, a*9. */
1833 q = t & -t; /* get out lsb */
1834 w = (t - q) & -(t - q); /* get out next lsb */
1836 if (w / q <= lea_max_mul)
1838 cost = lea_cost + (q != 1 ? shift_cost : 0);
1840 *alg_in = synth_mult (t - q - w, add_cost, shift_cost,
1841 MIN (max_cost, best_alg->cost) - cost);
1843 if (alg_in->cost >= 0)
1845 cost += alg_in->cost;
1847 /* Use <= to prefer this method to the factoring method
1848 when the cost appears the same, because this method
1849 uses fewer temporary registers. */
1850 if (cost <= best_alg->cost)
1852 struct algorithm *x;
1856 best_alg->coeff[best_alg->ops] = w;
1857 best_alg->op[best_alg->ops++] = alg_add;
1858 best_alg->coeff[best_alg->ops] = q;
1859 best_alg->op[best_alg->ops++] = alg_add;
1860 best_alg->cost = cost;
1866 /* Now, use the good old method to add or subtract at the leftmost
1873 q = t & -t; /* get out lsb */
1874 for (w = q; (w & t) != 0; w <<= 1)
1877 /* Reject the case where t has only two bits.
1878 Thus we prefer addition in that case. */
1879 && !(t < w && w == q << 2))
1881 /* There are many bits in a row. Make 'em by subtraction. */
1887 *alg_in = synth_mult (t + q, add_cost, shift_cost,
1888 MIN (max_cost, best_alg->cost) - cost);
1890 if (alg_in->cost >= 0)
1892 cost += alg_in->cost;
1894 /* Use <= to prefer this method to the factoring method
1895 when the cost appears the same, because this method
1896 uses fewer temporary registers. */
1897 if (cost <= best_alg->cost)
1899 struct algorithm *x;
1903 best_alg->coeff[best_alg->ops] = q;
1904 best_alg->op[best_alg->ops++] = alg_subtract;
1905 best_alg->cost = cost;
1911 /* There's only one bit at the left. Make it by addition. */
1917 *alg_in = synth_mult (t - q, add_cost, shift_cost,
1918 MIN (max_cost, best_alg->cost) - cost);
1920 if (alg_in->cost >= 0)
1922 cost += alg_in->cost;
1924 if (cost <= best_alg->cost)
1926 struct algorithm *x;
1930 best_alg->coeff[best_alg->ops] = q;
1931 best_alg->op[best_alg->ops++] = alg_add;
1932 best_alg->cost = cost;
1938 if (best_alg->cost >= max_cost)
1939 best_alg->cost = -1;
1943 /* Perform a multiplication and return an rtx for the result.
1944 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
1945 TARGET is a suggestion for where to store the result (an rtx).
1947 We check specially for a constant integer as OP1.
1948 If you want this check for OP0 as well, then before calling
1949 you should swap the two operands if OP0 would be constant. */
1952 expand_mult (mode, op0, op1, target, unsignedp)
1953 enum machine_mode mode;
1954 register rtx op0, op1, target;
1957 rtx const_op1 = op1;
1959 /* If we are multiplying in DImode, it may still be a win
1960 to try to work with shifts and adds. */
1961 if (GET_CODE (op1) == CONST_DOUBLE
1962 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_INT
1963 && HOST_BITS_PER_INT <= BITS_PER_WORD)
1965 if ((CONST_DOUBLE_HIGH (op1) == 0 && CONST_DOUBLE_LOW (op1) >= 0)
1966 || (CONST_DOUBLE_HIGH (op1) == -1 && CONST_DOUBLE_LOW (op1) < 0))
1967 const_op1 = GEN_INT (CONST_DOUBLE_LOW (op1));
1970 /* We used to test optimize here, on the grounds that it's better to
1971 produce a smaller program when -O is not used.
1972 But this causes such a terrible slowdown sometimes
1973 that it seems better to use synth_mult always. */
1974 if (GET_CODE (const_op1) == CONST_INT && ! mult_is_very_cheap)
1976 struct algorithm alg;
1977 struct algorithm neg_alg;
1979 HOST_WIDE_INT absval = INTVAL (op1);
1982 /* Try to do the computation two ways: multiply by the negative of OP1
1983 and then negate, or do the multiplication directly. The latter is
1984 usually faster for positive numbers and the former for negative
1985 numbers, but the opposite can be faster if the original value
1986 has a factor of 2**m +/- 1, while the negated value does not or
1989 alg = synth_mult (absval, add_cost, shift_cost, mult_cost);
1990 neg_alg = synth_mult (- absval, add_cost, shift_cost,
1991 (alg.cost >= 0 ? alg.cost : mult_cost)
1994 if (neg_alg.cost >= 0 && neg_alg.cost + negate_cost < alg.cost)
1995 alg = neg_alg, negate = 1, absval = - absval;
1999 /* If we found something, it must be cheaper than multiply.
2003 int factors_seen = 0;
2005 op0 = protect_from_queue (op0, 0);
2007 /* Avoid referencing memory over and over.
2008 For speed, but also for correctness when mem is volatile. */
2009 if (GET_CODE (op0) == MEM)
2010 op0 = force_reg (mode, op0);
2013 accum = copy_to_mode_reg (mode, op0);
2016 /* 1 if this is the last in a series of adds and subtracts. */
2017 int last = (1 == alg.ops || alg.op[1] == alg_compound);
2018 int log = floor_log2 (alg.coeff[0]);
2019 if (! factors_seen && ! last)
2020 log -= floor_log2 (alg.coeff[1]);
2022 if (alg.op[0] != alg_add)
2024 accum = expand_shift (LSHIFT_EXPR, mode, op0,
2025 build_int_2 (log, 0), NULL_RTX, 0);
2028 while (++opno < alg.ops)
2030 int log = floor_log2 (alg.coeff[opno]);
2031 /* 1 if this is the last in a series of adds and subtracts. */
2032 int last = (opno + 1 == alg.ops
2033 || alg.op[opno + 1] == alg_compound);
2035 /* If we have not yet seen any separate factors (alg_compound)
2036 then turn op0<<a1 + op0<<a2 + op0<<a3... into
2037 (op0<<(a1-a2) + op0)<<(a2-a3) + op0... */
2038 switch (alg.op[opno])
2043 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2044 build_int_2 (log, 0), NULL_RTX, 0);
2045 accum = force_operand (gen_rtx (PLUS, mode, accum, tem),
2051 log -= floor_log2 (alg.coeff[opno + 1]);
2052 accum = force_operand (gen_rtx (PLUS, mode, accum, op0),
2054 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2055 build_int_2 (log, 0), accum, 0);
2062 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2063 build_int_2 (log, 0), NULL_RTX, 0);
2064 accum = force_operand (gen_rtx (MINUS, mode, accum, tem),
2070 log -= floor_log2 (alg.coeff[opno + 1]);
2071 accum = force_operand (gen_rtx (MINUS, mode, accum, op0),
2073 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2074 build_int_2 (log, 0), accum, 0);
2081 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2082 build_int_2 (log, 0), NULL_RTX, 0);
2084 log = floor_log2 (alg.coeff[opno + 1]);
2085 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2086 build_int_2 (log, 0), NULL_RTX, 0);
2088 if (alg.op[opno] == alg_add)
2089 accum = force_operand (gen_rtx (PLUS, mode, tem, accum),
2092 accum = force_operand (gen_rtx (MINUS, mode, tem, accum),
2097 /* Write a REG_EQUAL note on the last insn so that we can cse
2098 multiplication sequences. We need not do this if we were
2099 multiplying by a power of two, since only one insn would have
2102 ??? We could also write REG_EQUAL notes on the last insn of
2103 each sequence that uses a single temporary, but it is not
2104 clear how to calculate the partial product so far.
2106 Torbjorn: Can you do this? */
2108 if (exact_log2 (absval) < 0)
2110 last = get_last_insn ();
2112 = gen_rtx (EXPR_LIST, REG_EQUAL,
2113 gen_rtx (MULT, mode, op0,
2114 negate ? GEN_INT (absval) : op1),
2118 return (negate ? expand_unop (mode, neg_optab, accum, target, 0)
2123 /* This used to use umul_optab if unsigned,
2124 but I think that for non-widening multiply there is no difference
2125 between signed and unsigned. */
2126 op0 = expand_binop (mode, smul_optab,
2127 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
2133 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
2134 if that is convenient, and returning where the result is.
2135 You may request either the quotient or the remainder as the result;
2136 specify REM_FLAG nonzero to get the remainder.
2138 CODE is the expression code for which kind of division this is;
2139 it controls how rounding is done. MODE is the machine mode to use.
2140 UNSIGNEDP nonzero means do unsigned division. */
2142 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
2143 and then correct it by or'ing in missing high bits
2144 if result of ANDI is nonzero.
2145 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
2146 This could optimize to a bfexts instruction.
2147 But C doesn't use these operations, so their optimizations are
2151 expand_divmod (rem_flag, code, mode, op0, op1, target, unsignedp)
2153 enum tree_code code;
2154 enum machine_mode mode;
2155 register rtx op0, op1, target;
2158 register rtx result = 0;
2159 enum machine_mode compute_mode;
2162 int can_clobber_op0;
2163 int mod_insn_no_good = 0;
2164 rtx adjusted_op0 = op0;
2165 optab optab1, optab2;
2167 /* We shouldn't be called with op1 == const1_rtx, but some of the
2168 code below will malfunction if we are, so check here and handle
2169 the special case if so. */
2170 if (op1 == const1_rtx)
2171 return rem_flag ? const0_rtx : op0;
2173 /* Don't use the function value register as a target
2174 since we have to read it as well as write it,
2175 and function-inlining gets confused by this. */
2176 if (target && REG_P (target) && REG_FUNCTION_VALUE_P (target))
2179 /* Don't clobber an operand while doing a multi-step calculation. */
2181 if ((rem_flag && (reg_mentioned_p (target, op0)
2182 || (GET_CODE (op0) == MEM && GET_CODE (target) == MEM)))
2183 || reg_mentioned_p (target, op1)
2184 || (GET_CODE (op1) == MEM && GET_CODE (target) == MEM))
2187 can_clobber_op0 = (GET_CODE (op0) == REG && op0 == target);
2189 if (GET_CODE (op1) == CONST_INT)
2190 log = exact_log2 (INTVAL (op1));
2192 /* If log is >= 0, we are dividing by 2**log, and will do it by shifting,
2193 which is really floor-division. Otherwise we will really do a divide,
2194 and we assume that is trunc-division.
2196 We must correct the dividend by adding or subtracting something
2197 based on the divisor, in order to do the kind of rounding specified
2198 by CODE. The correction depends on what kind of rounding is actually
2199 available, and that depends on whether we will shift or divide.
2201 In many of these cases it is possible to perform the operation by a
2202 clever series of logical operations (shifts and/or exclusive-ors).
2203 Although avoiding the jump has the advantage that it extends the basic
2204 block and allows further optimization, the branch-free code is normally
2205 at least one instruction longer in the (most common) case where the
2206 dividend is non-negative. Performance measurements of the two
2207 alternatives show that the branch-free code is slightly faster on the
2208 IBM ROMP but slower on CISC processors (significantly slower on the
2209 VAX). Accordingly, the jump code has been retained.
2211 On machines where the jump code is slower, the cost of a DIV or MOD
2212 operation can be set small (less than twice that of an addition); in
2213 that case, we pretend that we don't have a power of two and perform
2214 a normal division or modulus operation. */
2216 if ((code == TRUNC_MOD_EXPR || code == TRUNC_DIV_EXPR)
2218 && (rem_flag ? smod_pow2_cheap : sdiv_pow2_cheap))
2221 /* Get the mode in which to perform this computation. Normally it will
2222 be MODE, but sometimes we can't do the desired operation in MODE.
2223 If so, pick a wider mode in which we can do the operation. Convert
2224 to that mode at the start to avoid repeated conversions.
2226 First see what operations we need. These depend on the expression
2227 we are evaluating. (We assume that divxx3 insns exist under the
2228 same conditions that modxx3 insns and that these insns don't normally
2229 fail. If these assumptions are not correct, we may generate less
2230 efficient code in some cases.)
2232 Then see if we find a mode in which we can open-code that operation
2233 (either a division, modulus, or shift). Finally, check for the smallest
2234 mode for which we can do the operation with a library call. */
2236 optab1 = (log >= 0 ? (unsignedp ? lshr_optab : ashr_optab)
2237 : (unsignedp ? udiv_optab : sdiv_optab));
2238 optab2 = (log >= 0 ? optab1 : (unsignedp ? udivmod_optab : sdivmod_optab));
2240 for (compute_mode = mode; compute_mode != VOIDmode;
2241 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
2242 if (optab1->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing
2243 || optab2->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing)
2246 if (compute_mode == VOIDmode)
2247 for (compute_mode = mode; compute_mode != VOIDmode;
2248 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
2249 if (optab1->handlers[(int) compute_mode].libfunc
2250 || optab2->handlers[(int) compute_mode].libfunc)
2253 /* If we still couldn't find a mode, use MODE; we'll probably abort in
2255 if (compute_mode == VOIDmode)
2256 compute_mode = mode;
2258 size = GET_MODE_BITSIZE (compute_mode);
2260 /* Now convert to the best mode to use. Show we made a copy of OP0
2261 and hence we can clobber it (we cannot use a SUBREG to widen
2263 if (compute_mode != mode)
2265 adjusted_op0 = op0 = convert_to_mode (compute_mode, op0, unsignedp);
2266 can_clobber_op0 = 1;
2267 op1 = convert_to_mode (compute_mode, op1, unsignedp);
2270 /* If we are computing the remainder and one of the operands is a volatile
2271 MEM, copy it into a register. */
2273 if (rem_flag && GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0))
2274 adjusted_op0 = op0 = force_reg (compute_mode, op0), can_clobber_op0 = 1;
2275 if (rem_flag && GET_CODE (op1) == MEM && MEM_VOLATILE_P (op1))
2276 op1 = force_reg (compute_mode, op1);
2278 /* If we are computing the remainder, op0 will be needed later to calculate
2279 X - Y * (X / Y), therefore cannot be clobbered. */
2281 can_clobber_op0 = 0;
2283 if (target == 0 || GET_MODE (target) != compute_mode)
2284 target = gen_reg_rtx (compute_mode);
2288 case TRUNC_MOD_EXPR:
2289 case TRUNC_DIV_EXPR:
2290 if (log >= 0 && ! unsignedp)
2292 if (! can_clobber_op0)
2294 adjusted_op0 = copy_to_suggested_reg (adjusted_op0, target,
2296 /* Copy op0 to a reg, since emit_cmp_insn will call emit_queue
2297 which will screw up mem refs for autoincrements. */
2298 op0 = force_reg (compute_mode, op0);
2300 /* Here we need to add OP1-1 if OP0 is negative, 0 otherwise.
2301 This can be computed without jumps by arithmetically shifting
2302 OP0 right LOG-1 places and then shifting right logically
2303 SIZE-LOG bits. The resulting value is unconditionally added
2305 if (log == 1 || BRANCH_COST >= 3)
2307 rtx temp = gen_reg_rtx (compute_mode);
2308 temp = copy_to_suggested_reg (adjusted_op0, temp, compute_mode);
2309 temp = expand_shift (RSHIFT_EXPR, compute_mode, temp,
2310 build_int_2 (log - 1, 0), NULL_RTX, 0);
2311 temp = expand_shift (RSHIFT_EXPR, compute_mode, temp,
2312 build_int_2 (size - log, 0),
2314 expand_inc (adjusted_op0, temp);
2318 rtx label = gen_label_rtx ();
2319 emit_cmp_insn (adjusted_op0, const0_rtx, GE,
2320 NULL_RTX, compute_mode, 0, 0);
2321 emit_jump_insn (gen_bge (label));
2322 expand_inc (adjusted_op0, plus_constant (op1, -1));
2325 mod_insn_no_good = 1;
2329 case FLOOR_DIV_EXPR:
2330 case FLOOR_MOD_EXPR:
2331 if (log < 0 && ! unsignedp)
2333 rtx label = gen_label_rtx ();
2334 if (! can_clobber_op0)
2336 adjusted_op0 = copy_to_suggested_reg (adjusted_op0, target,
2338 /* Copy op0 to a reg, since emit_cmp_insn will call emit_queue
2339 which will screw up mem refs for autoincrements. */
2340 op0 = force_reg (compute_mode, op0);
2342 emit_cmp_insn (adjusted_op0, const0_rtx, GE,
2343 NULL_RTX, compute_mode, 0, 0);
2344 emit_jump_insn (gen_bge (label));
2345 expand_dec (adjusted_op0, op1);
2346 expand_inc (adjusted_op0, const1_rtx);
2348 mod_insn_no_good = 1;
2354 if (! can_clobber_op0)
2356 adjusted_op0 = copy_to_suggested_reg (adjusted_op0, target,
2358 /* Copy op0 to a reg, since emit_cmp_insn will call emit_queue
2359 which will screw up mem refs for autoincrements. */
2360 op0 = force_reg (compute_mode, op0);
2367 label = gen_label_rtx ();
2368 emit_cmp_insn (adjusted_op0, const0_rtx, LE,
2369 NULL_RTX, compute_mode, 0, 0);
2370 emit_jump_insn (gen_ble (label));
2372 expand_inc (adjusted_op0, op1);
2373 expand_dec (adjusted_op0, const1_rtx);
2379 adjusted_op0 = expand_binop (compute_mode, add_optab,
2380 adjusted_op0, plus_constant (op1, -1),
2381 NULL_RTX, 0, OPTAB_LIB_WIDEN);
2383 mod_insn_no_good = 1;
2386 case ROUND_DIV_EXPR:
2387 case ROUND_MOD_EXPR:
2388 if (! can_clobber_op0)
2390 adjusted_op0 = copy_to_suggested_reg (adjusted_op0, target,
2392 /* Copy op0 to a reg, since emit_cmp_insn will call emit_queue
2393 which will screw up mem refs for autoincrements. */
2394 op0 = force_reg (compute_mode, op0);
2398 op1 = expand_shift (RSHIFT_EXPR, compute_mode, op1,
2399 integer_one_node, NULL_RTX, 0);
2402 if (BRANCH_COST >= 2)
2404 /* Negate OP1 if OP0 < 0. Do this by computing a temporary
2405 that has all bits equal to the sign bit and exclusive
2406 or-ing it with OP1. */
2407 rtx temp = gen_reg_rtx (compute_mode);
2408 temp = copy_to_suggested_reg (adjusted_op0, temp, compute_mode);
2409 temp = expand_shift (RSHIFT_EXPR, compute_mode, temp,
2410 build_int_2 (size - 1, 0),
2412 op1 = expand_binop (compute_mode, xor_optab, op1, temp, op1,
2413 unsignedp, OPTAB_LIB_WIDEN);
2417 rtx label = gen_label_rtx ();
2418 emit_cmp_insn (adjusted_op0, const0_rtx, GE, NULL_RTX,
2419 compute_mode, 0, 0);
2420 emit_jump_insn (gen_bge (label));
2421 expand_unop (compute_mode, neg_optab, op1, op1, 0);
2425 expand_inc (adjusted_op0, op1);
2429 op1 = GEN_INT (((HOST_WIDE_INT) 1 << log) / 2);
2430 expand_inc (adjusted_op0, op1);
2432 mod_insn_no_good = 1;
2436 if (rem_flag && !mod_insn_no_good)
2438 /* Try to produce the remainder directly */
2440 result = expand_binop (compute_mode, and_optab, adjusted_op0,
2441 GEN_INT (((HOST_WIDE_INT) 1 << log) - 1),
2442 target, 1, OPTAB_LIB_WIDEN);
2445 /* See if we can do remainder without a library call. */
2446 result = sign_expand_binop (mode, umod_optab, smod_optab,
2447 adjusted_op0, op1, target,
2448 unsignedp, OPTAB_WIDEN);
2451 /* No luck there. Can we do remainder and divide at once
2452 without a library call? */
2453 result = gen_reg_rtx (compute_mode);
2454 if (! expand_twoval_binop (unsignedp
2455 ? udivmod_optab : sdivmod_optab,
2457 NULL_RTX, result, unsignedp))
2464 return gen_lowpart (mode, result);
2466 /* Produce the quotient. */
2468 result = expand_shift (RSHIFT_EXPR, compute_mode, adjusted_op0,
2469 build_int_2 (log, 0), target, unsignedp);
2470 else if (rem_flag && !mod_insn_no_good)
2471 /* If producing quotient in order to subtract for remainder,
2472 and a remainder subroutine would be ok,
2473 don't use a divide subroutine. */
2474 result = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
2475 adjusted_op0, op1, NULL_RTX, unsignedp,
2479 /* Try a quotient insn, but not a library call. */
2480 result = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
2482 rem_flag ? NULL_RTX : target,
2483 unsignedp, OPTAB_WIDEN);
2486 /* No luck there. Try a quotient-and-remainder insn,
2487 keeping the quotient alone. */
2488 result = gen_reg_rtx (mode);
2489 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
2491 result, NULL_RTX, unsignedp))
2495 /* If still no luck, use a library call. */
2497 result = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
2499 rem_flag ? NULL_RTX : target,
2500 unsignedp, OPTAB_LIB_WIDEN);
2503 /* If we really want the remainder, get it by subtraction. */
2507 /* No divide instruction either. Use library for remainder. */
2508 result = sign_expand_binop (compute_mode, umod_optab, smod_optab,
2510 unsignedp, OPTAB_LIB_WIDEN);
2513 /* We divided. Now finish doing X - Y * (X / Y). */
2514 result = expand_mult (compute_mode, result, op1, target, unsignedp);
2515 if (! result) abort ();
2516 result = expand_binop (compute_mode, sub_optab, op0,
2517 result, target, unsignedp, OPTAB_LIB_WIDEN);
2524 return gen_lowpart (mode, result);
2527 /* Return a tree node with data type TYPE, describing the value of X.
2528 Usually this is an RTL_EXPR, if there is no obvious better choice.
2529 X may be an expression, however we only support those expressions
2530 generated by loop.c. */
2539 switch (GET_CODE (x))
2542 t = build_int_2 (INTVAL (x),
2543 ! TREE_UNSIGNED (type) && INTVAL (x) >= 0 ? 0 : -1);
2544 TREE_TYPE (t) = type;
2548 if (GET_MODE (x) == VOIDmode)
2550 t = build_int_2 (CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
2551 TREE_TYPE (t) = type;
2557 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
2558 t = build_real (type, d);
2564 return fold (build (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
2565 make_tree (type, XEXP (x, 1))));
2568 return fold (build (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
2569 make_tree (type, XEXP (x, 1))));
2572 return fold (build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0))));
2575 return fold (build (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
2576 make_tree (type, XEXP (x, 1))));
2579 return fold (build (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
2580 make_tree (type, XEXP (x, 1))));
2583 return fold (convert (type,
2584 build (RSHIFT_EXPR, unsigned_type (type),
2585 make_tree (unsigned_type (type),
2587 make_tree (type, XEXP (x, 1)))));
2590 return fold (convert (type,
2591 build (RSHIFT_EXPR, signed_type (type),
2592 make_tree (signed_type (type), XEXP (x, 0)),
2593 make_tree (type, XEXP (x, 1)))));
2596 if (TREE_CODE (type) != REAL_TYPE)
2597 t = signed_type (type);
2601 return fold (convert (type,
2602 build (TRUNC_DIV_EXPR, t,
2603 make_tree (t, XEXP (x, 0)),
2604 make_tree (t, XEXP (x, 1)))));
2606 t = unsigned_type (type);
2607 return fold (convert (type,
2608 build (TRUNC_DIV_EXPR, t,
2609 make_tree (t, XEXP (x, 0)),
2610 make_tree (t, XEXP (x, 1)))));
2612 t = make_node (RTL_EXPR);
2613 TREE_TYPE (t) = type;
2614 RTL_EXPR_RTL (t) = x;
2615 /* There are no insns to be output
2616 when this rtl_expr is used. */
2617 RTL_EXPR_SEQUENCE (t) = 0;
2622 /* Return an rtx representing the value of X * MULT + ADD.
2623 TARGET is a suggestion for where to store the result (an rtx).
2624 MODE is the machine mode for the computation.
2625 X and MULT must have mode MODE. ADD may have a different mode.
2626 So can X (defaults to same as MODE).
2627 UNSIGNEDP is non-zero to do unsigned multiplication.
2628 This may emit insns. */
2631 expand_mult_add (x, target, mult, add, mode, unsignedp)
2632 rtx x, target, mult, add;
2633 enum machine_mode mode;
2636 tree type = type_for_mode (mode, unsignedp);
2637 tree add_type = (GET_MODE (add) == VOIDmode
2638 ? type : type_for_mode (GET_MODE (add), unsignedp));
2639 tree result = fold (build (PLUS_EXPR, type,
2640 fold (build (MULT_EXPR, type,
2641 make_tree (type, x),
2642 make_tree (type, mult))),
2643 make_tree (add_type, add)));
2645 return expand_expr (result, target, VOIDmode, 0);
2648 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
2649 and returning TARGET.
2651 If TARGET is 0, a pseudo-register or constant is returned. */
2654 expand_and (op0, op1, target)
2655 rtx op0, op1, target;
2657 enum machine_mode mode = VOIDmode;
2660 if (GET_MODE (op0) != VOIDmode)
2661 mode = GET_MODE (op0);
2662 else if (GET_MODE (op1) != VOIDmode)
2663 mode = GET_MODE (op1);
2665 if (mode != VOIDmode)
2666 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
2667 else if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT)
2668 tem = GEN_INT (INTVAL (op0) & INTVAL (op1));
2674 else if (tem != target)
2675 emit_move_insn (target, tem);
2679 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
2680 and storing in TARGET. Normally return TARGET.
2681 Return 0 if that cannot be done.
2683 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
2684 it is VOIDmode, they cannot both be CONST_INT.
2686 UNSIGNEDP is for the case where we have to widen the operands
2687 to perform the operation. It says to use zero-extension.
2689 NORMALIZEP is 1 if we should convert the result to be either zero
2690 or one one. Normalize is -1 if we should convert the result to be
2691 either zero or -1. If NORMALIZEP is zero, the result will be left
2692 "raw" out of the scc insn. */
2695 emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep)
2699 enum machine_mode mode;
2704 enum insn_code icode;
2705 enum machine_mode compare_mode;
2706 enum machine_mode target_mode = GET_MODE (target);
2709 rtx pattern, comparison;
2711 if (mode == VOIDmode)
2712 mode = GET_MODE (op0);
2714 /* For some comparisons with 1 and -1, we can convert this to
2715 comparisons with zero. This will often produce more opportunities for
2716 store-flag insns. */
2721 if (op1 == const1_rtx)
2722 op1 = const0_rtx, code = LE;
2725 if (op1 == constm1_rtx)
2726 op1 = const0_rtx, code = LT;
2729 if (op1 == const1_rtx)
2730 op1 = const0_rtx, code = GT;
2733 if (op1 == constm1_rtx)
2734 op1 = const0_rtx, code = GE;
2737 if (op1 == const1_rtx)
2738 op1 = const0_rtx, code = NE;
2741 if (op1 == const1_rtx)
2742 op1 = const0_rtx, code = EQ;
2746 /* From now on, we won't change CODE, so set ICODE now. */
2747 icode = setcc_gen_code[(int) code];
2749 /* If this is A < 0 or A >= 0, we can do this by taking the ones
2750 complement of A (for GE) and shifting the sign bit to the low bit. */
2751 if (op1 == const0_rtx && (code == LT || code == GE)
2752 && GET_MODE_CLASS (mode) == MODE_INT
2753 && (normalizep || STORE_FLAG_VALUE == 1
2754 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2755 && (STORE_FLAG_VALUE
2756 == (HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))))
2758 rtx subtarget = target;
2760 /* If the result is to be wider than OP0, it is best to convert it
2761 first. If it is to be narrower, it is *incorrect* to convert it
2763 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
2765 op0 = protect_from_queue (op0, 0);
2766 op0 = convert_to_mode (target_mode, op0, 0);
2770 if (target_mode != mode)
2774 op0 = expand_unop (mode, one_cmpl_optab, op0, subtarget, 0);
2776 if (normalizep || STORE_FLAG_VALUE == 1)
2777 /* If we are supposed to produce a 0/1 value, we want to do
2778 a logical shift from the sign bit to the low-order bit; for
2779 a -1/0 value, we do an arithmetic shift. */
2780 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
2781 size_int (GET_MODE_BITSIZE (mode) - 1),
2782 subtarget, normalizep != -1);
2784 if (mode != target_mode)
2785 op0 = convert_to_mode (target_mode, op0, 0);
2790 if (icode != CODE_FOR_nothing)
2792 /* We think we may be able to do this with a scc insn. Emit the
2793 comparison and then the scc insn.
2795 compare_from_rtx may call emit_queue, which would be deleted below
2796 if the scc insn fails. So call it ourselves before setting LAST. */
2799 last = get_last_insn ();
2802 = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX, 0);
2803 if (GET_CODE (comparison) == CONST_INT)
2804 return (comparison == const0_rtx ? const0_rtx
2805 : normalizep == 1 ? const1_rtx
2806 : normalizep == -1 ? constm1_rtx
2809 /* Get a reference to the target in the proper mode for this insn. */
2810 compare_mode = insn_operand_mode[(int) icode][0];
2812 if (preserve_subexpressions_p ()
2813 || ! (*insn_operand_predicate[(int) icode][0]) (subtarget, compare_mode))
2814 subtarget = gen_reg_rtx (compare_mode);
2816 pattern = GEN_FCN (icode) (subtarget);
2819 emit_insn (pattern);
2821 /* If we are converting to a wider mode, first convert to
2822 TARGET_MODE, then normalize. This produces better combining
2823 opportunities on machines that have a SIGN_EXTRACT when we are
2824 testing a single bit. This mostly benefits the 68k.
2826 If STORE_FLAG_VALUE does not have the sign bit set when
2827 interpreted in COMPARE_MODE, we can do this conversion as
2828 unsigned, which is usually more efficient. */
2829 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
2831 convert_move (target, subtarget,
2832 (GET_MODE_BITSIZE (compare_mode)
2833 <= HOST_BITS_PER_WIDE_INT)
2834 && 0 == (STORE_FLAG_VALUE
2835 & ((HOST_WIDE_INT) 1
2836 << (GET_MODE_BITSIZE (compare_mode) -1))));
2838 compare_mode = target_mode;
2843 /* If we want to keep subexpressions around, don't reuse our
2846 if (preserve_subexpressions_p ())
2849 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
2850 we don't have to do anything. */
2851 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
2853 else if (normalizep == - STORE_FLAG_VALUE)
2854 op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
2856 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
2857 makes it hard to use a value of just the sign bit due to
2858 ANSI integer constant typing rules. */
2859 else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT
2860 && (STORE_FLAG_VALUE
2861 & ((HOST_WIDE_INT) 1
2862 << (GET_MODE_BITSIZE (compare_mode) - 1))))
2863 op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
2864 size_int (GET_MODE_BITSIZE (compare_mode) - 1),
2865 subtarget, normalizep == 1);
2866 else if (STORE_FLAG_VALUE & 1)
2868 op0 = expand_and (op0, const1_rtx, subtarget);
2869 if (normalizep == -1)
2870 op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
2875 /* If we were converting to a smaller mode, do the
2877 if (target_mode != compare_mode)
2879 convert_move (target, op0);
2888 delete_insns_since (last);
2890 subtarget = target_mode == mode ? target : 0;
2892 /* If we reached here, we can't do this with a scc insn. However, there
2893 are some comparisons that can be done directly. For example, if
2894 this is an equality comparison of integers, we can try to exclusive-or
2895 (or subtract) the two operands and use a recursive call to try the
2896 comparison with zero. Don't do any of these cases if branches are
2899 if (BRANCH_COST >= 0
2900 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
2901 && op1 != const0_rtx)
2903 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
2907 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
2910 tem = emit_store_flag (target, code, tem, const0_rtx,
2911 mode, unsignedp, normalizep);
2913 delete_insns_since (last);
2917 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
2918 the constant zero. Reject all other comparisons at this point. Only
2919 do LE and GT if branches are expensive since they are expensive on
2920 2-operand machines. */
2922 if (BRANCH_COST == 0
2923 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
2924 || (code != EQ && code != NE
2925 && (BRANCH_COST <= 1 || (code != LE && code != GT))))
2928 /* See what we need to return. We can only return a 1, -1, or the
2931 if (normalizep == 0)
2933 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
2934 normalizep = STORE_FLAG_VALUE;
2936 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2937 && (STORE_FLAG_VALUE
2938 == (HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
2944 /* Try to put the result of the comparison in the sign bit. Assume we can't
2945 do the necessary operation below. */
2949 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
2950 the sign bit set. */
2954 /* This is destructive, so SUBTARGET can't be OP0. */
2955 if (rtx_equal_p (subtarget, op0))
2958 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
2961 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
2965 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
2966 number of bits in the mode of OP0, minus one. */
2970 if (rtx_equal_p (subtarget, op0))
2973 tem = expand_shift (RSHIFT_EXPR, mode, op0,
2974 size_int (GET_MODE_BITSIZE (mode) - 1),
2976 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
2980 if (code == EQ || code == NE)
2982 /* For EQ or NE, one way to do the comparison is to apply an operation
2983 that converts the operand into a positive number if it is non-zero
2984 or zero if it was originally zero. Then, for EQ, we subtract 1 and
2985 for NE we negate. This puts the result in the sign bit. Then we
2986 normalize with a shift, if needed.
2988 Two operations that can do the above actions are ABS and FFS, so try
2989 them. If that doesn't work, and MODE is smaller than a full word,
2990 we can use zero-extension to the wider mode (an unsigned conversion)
2991 as the operation. */
2993 if (abs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2994 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
2995 else if (ffs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2996 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
2997 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
3000 op0 = protect_from_queue (op0, 0);
3001 tem = convert_to_mode (mode, op0, 1);
3007 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
3010 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
3013 /* If we couldn't do it that way, for NE we can "or" the two's complement
3014 of the value with itself. For EQ, we take the one's complement of
3015 that "or", which is an extra insn, so we only handle EQ if branches
3018 if (tem == 0 && (code == NE || BRANCH_COST > 1))
3020 if (rtx_equal_p (subtarget, op0))
3023 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
3024 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
3027 if (tem && code == EQ)
3028 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
3032 if (tem && normalizep)
3033 tem = expand_shift (RSHIFT_EXPR, mode, tem,
3034 size_int (GET_MODE_BITSIZE (mode) - 1),
3035 tem, normalizep == 1);
3037 if (tem && GET_MODE (tem) != target_mode)
3039 convert_move (target, tem, 0);
3044 delete_insns_since (last);
3048 emit_jump_insn ((*bcc_gen_fctn[(int) code]) (label));
3049 emit_move_insn (target, const1_rtx);