1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
26 #include "insn-flags.h"
27 #include "insn-codes.h"
28 #include "insn-config.h"
33 static rtx extract_split_bit_field ();
34 static rtx extract_fixed_bit_field ();
35 static void store_split_bit_field ();
36 static void store_fixed_bit_field ();
37 static rtx mask_rtx ();
38 static rtx lshift_value ();
40 #define CEIL(x,y) (((x) + (y) - 1) / (y))
42 /* Non-zero means multiply instructions are cheaper than shifts. */
43 int mult_is_very_cheap;
45 /* Non-zero means divides or modulus operations are relatively cheap for
46 powers of two, so don't use branches; emit the operation instead.
47 Usually, this will mean that the MD file will emit non-branch
50 static int sdiv_pow2_cheap, smod_pow2_cheap;
52 /* Cost of various pieces of RTL. */
53 static int add_cost, shift_cost, mult_cost, negate_cost, lea_cost;
55 /* Max scale factor for scaled address in lea instruction. */
56 static int lea_max_mul;
61 char *free_point = (char *) oballoc (1);
62 /* This is "some random pseudo register" for purposes of calling recog
63 to see what insns exist. */
64 rtx reg = gen_rtx (REG, word_mode, FIRST_PSEUDO_REGISTER);
65 rtx pow2 = GEN_INT (32);
70 add_cost = rtx_cost (gen_rtx (PLUS, word_mode, reg, reg), SET);
71 shift_cost = rtx_cost (gen_rtx (LSHIFT, word_mode, reg,
72 /* Using a constant gives better
73 estimate of typical costs.
74 1 or 2 might have quirks. */
76 mult_cost = rtx_cost (gen_rtx (MULT, word_mode, reg, reg), SET);
77 negate_cost = rtx_cost (gen_rtx (NEG, word_mode, reg), SET);
79 /* 999999 is chosen to avoid any plausible faster special case. */
81 = (rtx_cost (gen_rtx (MULT, word_mode, reg, GEN_INT (999999)), SET)
82 < rtx_cost (gen_rtx (LSHIFT, word_mode, reg, GEN_INT (7)), SET));
85 = rtx_cost (gen_rtx (DIV, word_mode, reg, pow2), SET) <= 2 * add_cost;
87 = rtx_cost (gen_rtx (MOD, word_mode, reg, pow2), SET) <= 2 * add_cost;
92 lea = gen_rtx (SET, VOIDmode, reg,
93 gen_rtx (PLUS, word_mode,
94 gen_rtx (MULT, word_mode, reg, GEN_INT (i)),
96 /* Using 0 as second argument is not quite right,
97 but what else is there to do? */
98 if (recog (lea, 0, &dummy) < 0)
101 lea_cost = rtx_cost (SET_SRC (lea), SET);
104 /* Free the objects we just allocated. */
108 /* Return an rtx representing minus the value of X.
109 MODE is the intended mode of the result,
110 useful if X is a CONST_INT. */
114 enum machine_mode mode;
117 if (GET_CODE (x) == CONST_INT)
119 HOST_WIDE_INT val = - INTVAL (x);
120 if (GET_MODE_BITSIZE (mode) < HOST_BITS_PER_WIDE_INT)
122 /* Sign extend the value from the bits that are significant. */
123 if (val & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
124 val |= (HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (mode);
126 val &= ((HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (mode)) - 1;
128 return GEN_INT (val);
131 return expand_unop (GET_MODE (x), neg_optab, x, NULL_RTX, 0);
134 /* Generate code to store value from rtx VALUE
135 into a bit-field within structure STR_RTX
136 containing BITSIZE bits starting at bit BITNUM.
137 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
138 ALIGN is the alignment that STR_RTX is known to have, measured in bytes.
139 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
141 /* ??? Note that there are two different ideas here for how
142 to determine the size to count bits within, for a register.
143 One is BITS_PER_WORD, and the other is the size of operand 3
144 of the insv pattern. (The latter assumes that an n-bit machine
145 will be able to insert bit fields up to n bits wide.)
146 It isn't certain that either of these is right.
147 extract_bit_field has the same quandary. */
150 store_bit_field (str_rtx, bitsize, bitnum, fieldmode, value, align, total_size)
152 register int bitsize;
154 enum machine_mode fieldmode;
159 int unit = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
160 register int offset = bitnum / unit;
161 register int bitpos = bitnum % unit;
162 register rtx op0 = str_rtx;
164 if (GET_CODE (str_rtx) == MEM && ! MEM_IN_STRUCT_P (str_rtx))
167 /* Discount the part of the structure before the desired byte.
168 We need to know how many bytes are safe to reference after it. */
170 total_size -= (bitpos / BIGGEST_ALIGNMENT
171 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
173 while (GET_CODE (op0) == SUBREG)
175 /* The following line once was done only if WORDS_BIG_ENDIAN,
176 but I think that is a mistake. WORDS_BIG_ENDIAN is
177 meaningful at a much higher level; when structures are copied
178 between memory and regs, the higher-numbered regs
179 always get higher addresses. */
180 offset += SUBREG_WORD (op0);
181 /* We used to adjust BITPOS here, but now we do the whole adjustment
182 right after the loop. */
183 op0 = SUBREG_REG (op0);
187 /* If OP0 is a register, BITPOS must count within a word.
188 But as we have it, it counts within whatever size OP0 now has.
189 On a bigendian machine, these are not the same, so convert. */
190 if (GET_CODE (op0) != MEM && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
191 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
194 value = protect_from_queue (value, 0);
197 value = force_not_mem (value);
199 /* Note that the adjustment of BITPOS above has no effect on whether
200 BITPOS is 0 in a REG bigger than a word. */
201 if (GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
202 && (! STRICT_ALIGNMENT || GET_CODE (op0) != MEM)
203 && bitpos == 0 && bitsize == GET_MODE_BITSIZE (fieldmode))
205 /* Storing in a full-word or multi-word field in a register
206 can be done with just SUBREG. */
207 if (GET_MODE (op0) != fieldmode)
208 if (GET_CODE (op0) == REG)
209 op0 = gen_rtx (SUBREG, fieldmode, op0, offset);
211 op0 = change_address (op0, fieldmode,
212 plus_constant (XEXP (op0, 0), offset));
213 emit_move_insn (op0, value);
217 /* Storing an lsb-aligned field in a register
218 can be done with a movestrict instruction. */
220 if (GET_CODE (op0) != MEM
222 && bitpos + bitsize == unit
226 && bitsize == GET_MODE_BITSIZE (fieldmode)
227 && (GET_MODE (op0) == fieldmode
228 || (movstrict_optab->handlers[(int) fieldmode].insn_code
229 != CODE_FOR_nothing)))
231 /* Get appropriate low part of the value being stored. */
232 if (GET_CODE (value) == CONST_INT || GET_CODE (value) == REG)
233 value = gen_lowpart (fieldmode, value);
234 else if (!(GET_CODE (value) == SYMBOL_REF
235 || GET_CODE (value) == LABEL_REF
236 || GET_CODE (value) == CONST))
237 value = convert_to_mode (fieldmode, value, 0);
239 if (GET_MODE (op0) == fieldmode)
240 emit_move_insn (op0, value);
243 int icode = movstrict_optab->handlers[(int) fieldmode].insn_code;
244 if(! (*insn_operand_predicate[icode][1]) (value, fieldmode))
245 value = copy_to_mode_reg (fieldmode, value);
246 emit_insn (GEN_FCN (icode)
247 (gen_rtx (SUBREG, fieldmode, op0, offset), value));
252 /* Handle fields bigger than a word. */
254 if (bitsize > BITS_PER_WORD)
256 /* Here we transfer the words of the field
257 in the order least significant first.
258 This is because the most significant word is the one which may
259 be less than full. */
261 int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
264 /* This is the mode we must force value to, so that there will be enough
265 subwords to extract. Note that fieldmode will often (always?) be
266 VOIDmode, because that is what store_field uses to indicate that this
267 is a bit field, but passing VOIDmode to operand_subword_force will
268 result in an abort. */
269 fieldmode = mode_for_size (nwords * BITS_PER_WORD, MODE_INT, 0);
271 for (i = 0; i < nwords; i++)
273 /* If I is 0, use the low-order word in both field and target;
274 if I is 1, use the next to lowest word; and so on. */
275 int wordnum = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
276 int bit_offset = (WORDS_BIG_ENDIAN
277 ? MAX (bitsize - (i + 1) * BITS_PER_WORD, 0)
278 : i * BITS_PER_WORD);
279 store_bit_field (op0, MIN (BITS_PER_WORD,
280 bitsize - i * BITS_PER_WORD),
281 bitnum + bit_offset, word_mode,
282 operand_subword_force (value, wordnum, fieldmode),
288 /* From here on we can assume that the field to be stored in is
289 a full-word (whatever type that is), since it is shorter than a word. */
291 /* OFFSET is the number of words or bytes (UNIT says which)
292 from STR_RTX to the first word or byte containing part of the field. */
294 if (GET_CODE (op0) == REG)
297 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
298 op0 = gen_rtx (SUBREG, TYPE_MODE (type_for_size (BITS_PER_WORD, 0)),
304 op0 = protect_from_queue (op0, 1);
307 /* Now OFFSET is nonzero only if OP0 is memory
308 and is therefore always measured in bytes. */
312 && !(bitsize == 1 && GET_CODE (value) == CONST_INT)
313 /* Ensure insv's size is wide enough for this field. */
314 && (GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_insv][3])
317 int xbitpos = bitpos;
320 rtx last = get_last_insn ();
322 enum machine_mode maxmode
323 = insn_operand_mode[(int) CODE_FOR_insv][3];
325 int save_volatile_ok = volatile_ok;
328 /* If this machine's insv can only insert into a register, or if we
329 are to force MEMs into a register, copy OP0 into a register and
330 save it back later. */
331 if (GET_CODE (op0) == MEM
333 || ! ((*insn_operand_predicate[(int) CODE_FOR_insv][0])
337 enum machine_mode bestmode;
339 /* Get the mode to use for inserting into this field. If OP0 is
340 BLKmode, get the smallest mode consistent with the alignment. If
341 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
342 mode. Otherwise, use the smallest mode containing the field. */
344 if (GET_MODE (op0) == BLKmode
345 || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
347 = get_best_mode (bitsize, bitnum, align * BITS_PER_UNIT, maxmode,
348 MEM_VOLATILE_P (op0));
350 bestmode = GET_MODE (op0);
352 if (bestmode == VOIDmode)
355 /* Adjust address to point to the containing unit of that mode. */
356 unit = GET_MODE_BITSIZE (bestmode);
357 /* Compute offset as multiple of this unit, counting in bytes. */
358 offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
359 bitpos = bitnum % unit;
360 op0 = change_address (op0, bestmode,
361 plus_constant (XEXP (op0, 0), offset));
363 /* Fetch that unit, store the bitfield in it, then store the unit. */
364 tempreg = copy_to_reg (op0);
365 store_bit_field (tempreg, bitsize, bitpos, fieldmode, value,
367 emit_move_insn (op0, tempreg);
370 volatile_ok = save_volatile_ok;
372 /* Add OFFSET into OP0's address. */
373 if (GET_CODE (xop0) == MEM)
374 xop0 = change_address (xop0, byte_mode,
375 plus_constant (XEXP (xop0, 0), offset));
377 /* If xop0 is a register, we need it in MAXMODE
378 to make it acceptable to the format of insv. */
379 if (GET_CODE (xop0) == SUBREG)
380 PUT_MODE (xop0, maxmode);
381 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
382 xop0 = gen_rtx (SUBREG, maxmode, xop0, 0);
384 /* On big-endian machines, we count bits from the most significant.
385 If the bit field insn does not, we must invert. */
387 #if BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN
388 xbitpos = unit - bitsize - xbitpos;
390 /* We have been counting XBITPOS within UNIT.
391 Count instead within the size of the register. */
393 if (GET_CODE (xop0) != MEM)
394 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
396 unit = GET_MODE_BITSIZE (maxmode);
398 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
400 if (GET_MODE (value) != maxmode)
402 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
404 /* Optimization: Don't bother really extending VALUE
405 if it has all the bits we will actually use. */
407 /* Avoid making subreg of a subreg, or of a mem. */
408 if (GET_CODE (value1) != REG)
409 value1 = copy_to_reg (value1);
410 value1 = gen_rtx (SUBREG, maxmode, value1, 0);
412 else if (!CONSTANT_P (value))
413 /* Parse phase is supposed to make VALUE's data type
414 match that of the component reference, which is a type
415 at least as wide as the field; so VALUE should have
416 a mode that corresponds to that type. */
420 /* If this machine's insv insists on a register,
421 get VALUE1 into a register. */
422 if (! ((*insn_operand_predicate[(int) CODE_FOR_insv][3])
424 value1 = force_reg (maxmode, value1);
426 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
431 delete_insns_since (last);
432 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
438 /* Insv is not available; store using shifts and boolean ops. */
439 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
443 /* Use shifts and boolean operations to store VALUE
444 into a bit field of width BITSIZE
445 in a memory location specified by OP0 except offset by OFFSET bytes.
446 (OFFSET must be 0 if OP0 is a register.)
447 The field starts at position BITPOS within the byte.
448 (If OP0 is a register, it may be a full word or a narrower mode,
449 but BITPOS still counts within a full word,
450 which is significant on bigendian machines.)
451 STRUCT_ALIGN is the alignment the structure is known to have (in bytes).
453 Note that protect_from_queue has already been done on OP0 and VALUE. */
456 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, struct_align)
458 register int offset, bitsize, bitpos;
462 register enum machine_mode mode;
463 int total_bits = BITS_PER_WORD;
468 /* Add OFFSET to OP0's address (if it is in memory)
469 and if a single byte contains the whole bit field
470 change OP0 to a byte. */
472 /* There is a case not handled here:
473 a structure with a known alignment of just a halfword
474 and a field split across two aligned halfwords within the structure.
475 Or likewise a structure with a known alignment of just a byte
476 and a field split across two bytes.
477 Such cases are not supposed to be able to occur. */
479 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
483 /* Special treatment for a bit field split across two registers. */
484 if (bitsize + bitpos > BITS_PER_WORD)
486 store_split_bit_field (op0, bitsize, bitpos, value, BITS_PER_WORD);
492 /* Get the proper mode to use for this field. We want a mode that
493 includes the entire field. If such a mode would be larger than
494 a word, we won't be doing the extraction the normal way. */
496 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
497 struct_align * BITS_PER_UNIT, word_mode,
498 GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
500 if (mode == VOIDmode)
502 /* The only way this should occur is if the field spans word
504 store_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT,
505 value, struct_align);
509 total_bits = GET_MODE_BITSIZE (mode);
511 /* Get ref to an aligned byte, halfword, or word containing the field.
512 Adjust BITPOS to be position within a word,
513 and OFFSET to be the offset of that word.
514 Then alter OP0 to refer to that word. */
515 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
516 offset -= (offset % (total_bits / BITS_PER_UNIT));
517 op0 = change_address (op0, mode,
518 plus_constant (XEXP (op0, 0), offset));
521 mode = GET_MODE (op0);
523 /* Now MODE is either some integral mode for a MEM as OP0,
524 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
525 The bit field is contained entirely within OP0.
526 BITPOS is the starting bit number within OP0.
527 (OP0's mode may actually be narrower than MODE.) */
530 /* BITPOS is the distance between our msb
531 and that of the containing datum.
532 Convert it to the distance from the lsb. */
534 bitpos = total_bits - bitsize - bitpos;
536 /* Now BITPOS is always the distance between our lsb
539 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
540 we must first convert its mode to MODE. */
542 if (GET_CODE (value) == CONST_INT)
544 register HOST_WIDE_INT v = INTVAL (value);
546 if (bitsize < HOST_BITS_PER_WIDE_INT)
547 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
551 else if ((bitsize < HOST_BITS_PER_WIDE_INT
552 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
553 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
556 value = lshift_value (mode, value, bitpos, bitsize);
560 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
561 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
563 if (GET_MODE (value) != mode)
565 /* If VALUE is a floating-point mode, access it as an integer
566 of the corresponding size, then convert it. This can occur on
567 a machine with 64 bit registers that uses SFmode for float. */
568 if (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT)
570 if (GET_CODE (value) != REG)
571 value = copy_to_reg (value);
573 = gen_rtx (SUBREG, word_mode, value, 0);
576 if ((GET_CODE (value) == REG || GET_CODE (value) == SUBREG)
577 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
578 value = gen_lowpart (mode, value);
580 value = convert_to_mode (mode, value, 1);
584 value = expand_binop (mode, and_optab, value,
585 mask_rtx (mode, 0, bitsize, 0),
586 NULL_RTX, 1, OPTAB_LIB_WIDEN);
588 value = expand_shift (LSHIFT_EXPR, mode, value,
589 build_int_2 (bitpos, 0), NULL_RTX, 1);
592 /* Now clear the chosen bits in OP0,
593 except that if VALUE is -1 we need not bother. */
595 subtarget = (GET_CODE (op0) == REG || ! flag_force_mem) ? op0 : 0;
599 temp = expand_binop (mode, and_optab, op0,
600 mask_rtx (mode, bitpos, bitsize, 1),
601 subtarget, 1, OPTAB_LIB_WIDEN);
607 /* Now logical-or VALUE into OP0, unless it is zero. */
610 temp = expand_binop (mode, ior_optab, temp, value,
611 subtarget, 1, OPTAB_LIB_WIDEN);
613 emit_move_insn (op0, temp);
616 /* Store a bit field that is split across two words.
618 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
619 BITSIZE is the field width; BITPOS the position of its first bit
621 VALUE is the value to store. */
624 store_split_bit_field (op0, bitsize, bitpos, value, align)
630 /* BITSIZE_1 is size of the part in the first word. */
631 int bitsize_1 = BITS_PER_WORD - bitpos % BITS_PER_WORD;
632 /* BITSIZE_2 is size of the rest (in the following word). */
633 int bitsize_2 = bitsize - bitsize_1;
635 int unit = GET_CODE (op0) == MEM ? BITS_PER_UNIT : BITS_PER_WORD;
636 int offset = bitpos / unit;
639 /* The field must span exactly one word boundary. */
640 if (bitpos / BITS_PER_WORD != (bitpos + bitsize - 1) / BITS_PER_WORD - 1)
643 if (GET_MODE (value) != VOIDmode)
644 value = convert_to_mode (word_mode, value, 1);
645 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
646 value = copy_to_reg (value);
648 /* Split the value into two parts:
649 PART1 gets that which goes in the first word; PART2 the other. */
651 /* PART1 gets the more significant part. */
652 if (GET_CODE (value) == CONST_INT)
654 part1 = GEN_INT ((unsigned HOST_WIDE_INT) (INTVAL (value)) >> bitsize_2);
655 part2 = GEN_INT ((unsigned HOST_WIDE_INT) (INTVAL (value))
656 & (((HOST_WIDE_INT) 1 << bitsize_2) - 1));
660 part1 = extract_fixed_bit_field (word_mode, value, 0, bitsize_1,
661 BITS_PER_WORD - bitsize, NULL_RTX, 1,
663 part2 = extract_fixed_bit_field (word_mode, value, 0, bitsize_2,
664 BITS_PER_WORD - bitsize_2, NULL_RTX, 1,
668 /* PART1 gets the less significant part. */
669 if (GET_CODE (value) == CONST_INT)
671 part1 = GEN_INT ((unsigned HOST_WIDE_INT) (INTVAL (value))
672 & (((HOST_WIDE_INT) 1 << bitsize_1) - 1));
673 part2 = GEN_INT ((unsigned HOST_WIDE_INT) (INTVAL (value)) >> bitsize_1);
677 part1 = extract_fixed_bit_field (word_mode, value, 0, bitsize_1, 0,
678 NULL_RTX, 1, BITS_PER_WORD);
679 part2 = extract_fixed_bit_field (word_mode, value, 0, bitsize_2,
680 bitsize_1, NULL_RTX, 1, BITS_PER_WORD);
684 /* Store PART1 into the first word. If OP0 is a MEM, pass OP0 and the
685 offset computed above. Otherwise, get the proper word and pass an
687 word = (GET_CODE (op0) == MEM ? op0
688 : operand_subword (op0, offset, 1, GET_MODE (op0)));
692 store_fixed_bit_field (word, GET_CODE (op0) == MEM ? offset : 0,
693 bitsize_1, bitpos % unit, part1, align);
695 /* Offset op0 by 1 word to get to the following one. */
696 if (GET_CODE (op0) == SUBREG)
697 word = operand_subword (SUBREG_REG (op0), SUBREG_WORD (op0) + offset + 1,
699 else if (GET_CODE (op0) == MEM)
702 word = operand_subword (op0, offset + 1, 1, GET_MODE (op0));
707 /* Store PART2 into the second word. */
708 store_fixed_bit_field (word,
709 (GET_CODE (op0) == MEM
710 ? CEIL (offset + 1, UNITS_PER_WORD) * UNITS_PER_WORD
712 bitsize_2, 0, part2, align);
715 /* Generate code to extract a byte-field from STR_RTX
716 containing BITSIZE bits, starting at BITNUM,
717 and put it in TARGET if possible (if TARGET is nonzero).
718 Regardless of TARGET, we return the rtx for where the value is placed.
721 STR_RTX is the structure containing the byte (a REG or MEM).
722 UNSIGNEDP is nonzero if this is an unsigned bit field.
723 MODE is the natural mode of the field value once extracted.
724 TMODE is the mode the caller would like the value to have;
725 but the value may be returned with type MODE instead.
727 ALIGN is the alignment that STR_RTX is known to have, measured in bytes.
728 TOTAL_SIZE is the size in bytes of the containing structure,
731 If a TARGET is specified and we can store in it at no extra cost,
732 we do so, and return TARGET.
733 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
734 if they are equally easy. */
737 extract_bit_field (str_rtx, bitsize, bitnum, unsignedp,
738 target, mode, tmode, align, total_size)
740 register int bitsize;
744 enum machine_mode mode, tmode;
748 int unit = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
749 register int offset = bitnum / unit;
750 register int bitpos = bitnum % unit;
751 register rtx op0 = str_rtx;
752 rtx spec_target = target;
753 rtx spec_target_subreg = 0;
755 if (GET_CODE (str_rtx) == MEM && ! MEM_IN_STRUCT_P (str_rtx))
758 /* Discount the part of the structure before the desired byte.
759 We need to know how many bytes are safe to reference after it. */
761 total_size -= (bitpos / BIGGEST_ALIGNMENT
762 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
764 if (tmode == VOIDmode)
766 while (GET_CODE (op0) == SUBREG)
768 offset += SUBREG_WORD (op0);
769 op0 = SUBREG_REG (op0);
773 /* If OP0 is a register, BITPOS must count within a word.
774 But as we have it, it counts within whatever size OP0 now has.
775 On a bigendian machine, these are not the same, so convert. */
776 if (GET_CODE (op0) != MEM && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
777 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
780 /* Extracting a full-word or multi-word value
781 from a structure in a register.
782 This can be done with just SUBREG.
783 So too extracting a subword value in
784 the least significant part of the register. */
786 if (GET_CODE (op0) == REG
787 && ((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
788 && bitpos % BITS_PER_WORD == 0)
789 || (mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0) != BLKmode
791 && bitpos + bitsize == BITS_PER_WORD
797 enum machine_mode mode1
798 = mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0);
800 if (mode1 != GET_MODE (op0))
801 op0 = gen_rtx (SUBREG, mode1, op0, offset);
804 return convert_to_mode (tmode, op0, unsignedp);
808 /* Handle fields bigger than a word. */
810 if (bitsize > BITS_PER_WORD)
812 /* Here we transfer the words of the field
813 in the order least significant first.
814 This is because the most significant word is the one which may
815 be less than full. */
817 int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
820 if (target == 0 || GET_CODE (target) != REG)
821 target = gen_reg_rtx (mode);
823 for (i = 0; i < nwords; i++)
825 /* If I is 0, use the low-order word in both field and target;
826 if I is 1, use the next to lowest word; and so on. */
827 int wordnum = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
828 int bit_offset = (WORDS_BIG_ENDIAN
829 ? MAX (0, bitsize - (i + 1) * BITS_PER_WORD)
830 : i * BITS_PER_WORD);
831 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
833 = extract_bit_field (op0, MIN (BITS_PER_WORD,
834 bitsize - i * BITS_PER_WORD),
836 1, target_part, mode, word_mode,
839 if (target_part == 0)
842 if (result_part != target_part)
843 emit_move_insn (target_part, result_part);
849 /* From here on we know the desired field is smaller than a word
850 so we can assume it is an integer. So we can safely extract it as one
851 size of integer, if necessary, and then truncate or extend
852 to the size that is wanted. */
854 /* OFFSET is the number of words or bytes (UNIT says which)
855 from STR_RTX to the first word or byte containing part of the field. */
857 if (GET_CODE (op0) == REG)
860 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
861 op0 = gen_rtx (SUBREG, TYPE_MODE (type_for_size (BITS_PER_WORD, 0)),
867 op0 = protect_from_queue (str_rtx, 1);
870 /* Now OFFSET is nonzero only for memory operands. */
876 && (GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_extzv][0])
879 int xbitpos = bitpos, xoffset = offset;
880 rtx bitsize_rtx, bitpos_rtx;
881 rtx last = get_last_insn();
883 rtx xtarget = target;
884 rtx xspec_target = spec_target;
885 rtx xspec_target_subreg = spec_target_subreg;
887 enum machine_mode maxmode
888 = insn_operand_mode[(int) CODE_FOR_extzv][0];
890 if (GET_CODE (xop0) == MEM)
892 int save_volatile_ok = volatile_ok;
895 /* Is the memory operand acceptable? */
897 || ! ((*insn_operand_predicate[(int) CODE_FOR_extzv][1])
898 (xop0, GET_MODE (xop0))))
900 /* No, load into a reg and extract from there. */
901 enum machine_mode bestmode;
903 /* Get the mode to use for inserting into this field. If
904 OP0 is BLKmode, get the smallest mode consistent with the
905 alignment. If OP0 is a non-BLKmode object that is no
906 wider than MAXMODE, use its mode. Otherwise, use the
907 smallest mode containing the field. */
909 if (GET_MODE (xop0) == BLKmode
910 || (GET_MODE_SIZE (GET_MODE (op0))
911 > GET_MODE_SIZE (maxmode)))
912 bestmode = get_best_mode (bitsize, bitnum,
913 align * BITS_PER_UNIT, maxmode,
914 MEM_VOLATILE_P (xop0));
916 bestmode = GET_MODE (xop0);
918 if (bestmode == VOIDmode)
921 /* Compute offset as multiple of this unit,
922 counting in bytes. */
923 unit = GET_MODE_BITSIZE (bestmode);
924 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
925 xbitpos = bitnum % unit;
926 xop0 = change_address (xop0, bestmode,
927 plus_constant (XEXP (xop0, 0),
929 /* Fetch it to a register in that size. */
930 xop0 = force_reg (bestmode, xop0);
932 /* XBITPOS counts within UNIT, which is what is expected. */
935 /* Get ref to first byte containing part of the field. */
936 xop0 = change_address (xop0, byte_mode,
937 plus_constant (XEXP (xop0, 0), xoffset));
939 volatile_ok = save_volatile_ok;
942 /* If op0 is a register, we need it in MAXMODE (which is usually
943 SImode). to make it acceptable to the format of extzv. */
944 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
946 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
947 xop0 = gen_rtx (SUBREG, maxmode, xop0, 0);
949 /* On big-endian machines, we count bits from the most significant.
950 If the bit field insn does not, we must invert. */
951 #if BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN
952 xbitpos = unit - bitsize - xbitpos;
954 /* Now convert from counting within UNIT to counting in MAXMODE. */
956 if (GET_CODE (xop0) != MEM)
957 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
959 unit = GET_MODE_BITSIZE (maxmode);
962 || (flag_force_mem && GET_CODE (xtarget) == MEM))
963 xtarget = xspec_target = gen_reg_rtx (tmode);
965 if (GET_MODE (xtarget) != maxmode)
967 if (GET_CODE (xtarget) == REG)
969 int wider = (GET_MODE_SIZE (maxmode)
970 > GET_MODE_SIZE (GET_MODE (xtarget)));
971 xtarget = gen_lowpart (maxmode, xtarget);
973 xspec_target_subreg = xtarget;
976 xtarget = gen_reg_rtx (maxmode);
979 /* If this machine's extzv insists on a register target,
980 make sure we have one. */
981 if (! ((*insn_operand_predicate[(int) CODE_FOR_extzv][0])
983 xtarget = gen_reg_rtx (maxmode);
985 bitsize_rtx = GEN_INT (bitsize);
986 bitpos_rtx = GEN_INT (xbitpos);
988 pat = gen_extzv (protect_from_queue (xtarget, 1),
989 xop0, bitsize_rtx, bitpos_rtx);
994 spec_target = xspec_target;
995 spec_target_subreg = xspec_target_subreg;
999 delete_insns_since (last);
1000 target = extract_fixed_bit_field (tmode, op0, offset, bitsize,
1001 bitpos, target, 1, align);
1007 target = extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1014 && (GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_extv][0])
1017 int xbitpos = bitpos, xoffset = offset;
1018 rtx bitsize_rtx, bitpos_rtx;
1019 rtx last = get_last_insn();
1020 rtx xop0 = op0, xtarget = target;
1021 rtx xspec_target = spec_target;
1022 rtx xspec_target_subreg = spec_target_subreg;
1024 enum machine_mode maxmode
1025 = insn_operand_mode[(int) CODE_FOR_extv][0];
1027 if (GET_CODE (xop0) == MEM)
1029 /* Is the memory operand acceptable? */
1030 if (! ((*insn_operand_predicate[(int) CODE_FOR_extv][1])
1031 (xop0, GET_MODE (xop0))))
1033 /* No, load into a reg and extract from there. */
1034 enum machine_mode bestmode;
1036 /* Get the mode to use for inserting into this field. If
1037 OP0 is BLKmode, get the smallest mode consistent with the
1038 alignment. If OP0 is a non-BLKmode object that is no
1039 wider than MAXMODE, use its mode. Otherwise, use the
1040 smallest mode containing the field. */
1042 if (GET_MODE (xop0) == BLKmode
1043 || (GET_MODE_SIZE (GET_MODE (op0))
1044 > GET_MODE_SIZE (maxmode)))
1045 bestmode = get_best_mode (bitsize, bitnum,
1046 align * BITS_PER_UNIT, maxmode,
1047 MEM_VOLATILE_P (xop0));
1049 bestmode = GET_MODE (xop0);
1051 if (bestmode == VOIDmode)
1054 /* Compute offset as multiple of this unit,
1055 counting in bytes. */
1056 unit = GET_MODE_BITSIZE (bestmode);
1057 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1058 xbitpos = bitnum % unit;
1059 xop0 = change_address (xop0, bestmode,
1060 plus_constant (XEXP (xop0, 0),
1062 /* Fetch it to a register in that size. */
1063 xop0 = force_reg (bestmode, xop0);
1065 /* XBITPOS counts within UNIT, which is what is expected. */
1068 /* Get ref to first byte containing part of the field. */
1069 xop0 = change_address (xop0, byte_mode,
1070 plus_constant (XEXP (xop0, 0), xoffset));
1073 /* If op0 is a register, we need it in MAXMODE (which is usually
1074 SImode) to make it acceptable to the format of extv. */
1075 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1077 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1078 xop0 = gen_rtx (SUBREG, maxmode, xop0, 0);
1080 /* On big-endian machines, we count bits from the most significant.
1081 If the bit field insn does not, we must invert. */
1082 #if BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN
1083 xbitpos = unit - bitsize - xbitpos;
1085 /* XBITPOS counts within a size of UNIT.
1086 Adjust to count within a size of MAXMODE. */
1088 if (GET_CODE (xop0) != MEM)
1089 xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
1091 unit = GET_MODE_BITSIZE (maxmode);
1094 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1095 xtarget = xspec_target = gen_reg_rtx (tmode);
1097 if (GET_MODE (xtarget) != maxmode)
1099 if (GET_CODE (xtarget) == REG)
1101 int wider = (GET_MODE_SIZE (maxmode)
1102 > GET_MODE_SIZE (GET_MODE (xtarget)));
1103 xtarget = gen_lowpart (maxmode, xtarget);
1105 xspec_target_subreg = xtarget;
1108 xtarget = gen_reg_rtx (maxmode);
1111 /* If this machine's extv insists on a register target,
1112 make sure we have one. */
1113 if (! ((*insn_operand_predicate[(int) CODE_FOR_extv][0])
1114 (xtarget, maxmode)))
1115 xtarget = gen_reg_rtx (maxmode);
1117 bitsize_rtx = GEN_INT (bitsize);
1118 bitpos_rtx = GEN_INT (xbitpos);
1120 pat = gen_extv (protect_from_queue (xtarget, 1),
1121 xop0, bitsize_rtx, bitpos_rtx);
1126 spec_target = xspec_target;
1127 spec_target_subreg = xspec_target_subreg;
1131 delete_insns_since (last);
1132 target = extract_fixed_bit_field (tmode, op0, offset, bitsize,
1133 bitpos, target, 0, align);
1139 target = extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1142 if (target == spec_target)
1144 if (target == spec_target_subreg)
1146 if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
1148 /* If the target mode is floating-point, first convert to the
1149 integer mode of that size and then access it as a floating-point
1150 value via a SUBREG. */
1151 if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
1153 target = convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode),
1156 if (GET_CODE (target) != REG)
1157 target = copy_to_reg (target);
1158 return gen_rtx (SUBREG, tmode, target, 0);
1161 return convert_to_mode (tmode, target, unsignedp);
1166 /* Extract a bit field using shifts and boolean operations
1167 Returns an rtx to represent the value.
1168 OP0 addresses a register (word) or memory (byte).
1169 BITPOS says which bit within the word or byte the bit field starts in.
1170 OFFSET says how many bytes farther the bit field starts;
1171 it is 0 if OP0 is a register.
1172 BITSIZE says how many bits long the bit field is.
1173 (If OP0 is a register, it may be narrower than a full word,
1174 but BITPOS still counts within a full word,
1175 which is significant on bigendian machines.)
1177 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1178 If TARGET is nonzero, attempts to store the value there
1179 and return TARGET, but this is not guaranteed.
1180 If TARGET is not used, create a pseudo-reg of mode TMODE for the value.
1182 ALIGN is the alignment that STR_RTX is known to have, measured in bytes. */
1185 extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1186 target, unsignedp, align)
1187 enum machine_mode tmode;
1188 register rtx op0, target;
1189 register int offset, bitsize, bitpos;
1193 int total_bits = BITS_PER_WORD;
1194 enum machine_mode mode;
1196 if (GET_CODE (op0) == SUBREG || GET_CODE (op0) == REG)
1198 /* Special treatment for a bit field split across two registers. */
1199 if (bitsize + bitpos > BITS_PER_WORD)
1200 return extract_split_bit_field (op0, bitsize, bitpos,
1205 /* Get the proper mode to use for this field. We want a mode that
1206 includes the entire field. If such a mode would be larger than
1207 a word, we won't be doing the extraction the normal way. */
1209 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1210 align * BITS_PER_UNIT, word_mode,
1211 GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
1213 if (mode == VOIDmode)
1214 /* The only way this should occur is if the field spans word
1216 return extract_split_bit_field (op0, bitsize,
1217 bitpos + offset * BITS_PER_UNIT,
1220 total_bits = GET_MODE_BITSIZE (mode);
1222 /* Get ref to an aligned byte, halfword, or word containing the field.
1223 Adjust BITPOS to be position within a word,
1224 and OFFSET to be the offset of that word.
1225 Then alter OP0 to refer to that word. */
1226 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1227 offset -= (offset % (total_bits / BITS_PER_UNIT));
1228 op0 = change_address (op0, mode,
1229 plus_constant (XEXP (op0, 0), offset));
1232 mode = GET_MODE (op0);
1234 #if BYTES_BIG_ENDIAN
1235 /* BITPOS is the distance between our msb and that of OP0.
1236 Convert it to the distance from the lsb. */
1238 bitpos = total_bits - bitsize - bitpos;
1240 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1241 We have reduced the big-endian case to the little-endian case. */
1247 /* If the field does not already start at the lsb,
1248 shift it so it does. */
1249 tree amount = build_int_2 (bitpos, 0);
1250 /* Maybe propagate the target for the shift. */
1251 /* But not if we will return it--could confuse integrate.c. */
1252 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1253 && !REG_FUNCTION_VALUE_P (target)
1255 if (tmode != mode) subtarget = 0;
1256 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1258 /* Convert the value to the desired mode. */
1260 op0 = convert_to_mode (tmode, op0, 1);
1262 /* Unless the msb of the field used to be the msb when we shifted,
1263 mask out the upper bits. */
1265 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize
1267 #ifdef SLOW_ZERO_EXTEND
1268 /* Always generate an `and' if
1269 we just zero-extended op0 and SLOW_ZERO_EXTEND, since it
1270 will combine fruitfully with the zero-extend. */
1275 return expand_binop (GET_MODE (op0), and_optab, op0,
1276 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1277 target, 1, OPTAB_LIB_WIDEN);
1281 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1282 then arithmetic-shift its lsb to the lsb of the word. */
1283 op0 = force_reg (mode, op0);
1287 /* Find the narrowest integer mode that contains the field. */
1289 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1290 mode = GET_MODE_WIDER_MODE (mode))
1291 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1293 op0 = convert_to_mode (mode, op0, 0);
1297 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1299 tree amount = build_int_2 (GET_MODE_BITSIZE (mode) - (bitsize + bitpos), 0);
1300 /* Maybe propagate the target for the shift. */
1301 /* But not if we will return the result--could confuse integrate.c. */
1302 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1303 && ! REG_FUNCTION_VALUE_P (target)
1305 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1308 return expand_shift (RSHIFT_EXPR, mode, op0,
1309 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1313 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1314 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1315 complement of that if COMPLEMENT. The mask is truncated if
1316 necessary to the width of mode MODE. */
1319 mask_rtx (mode, bitpos, bitsize, complement)
1320 enum machine_mode mode;
1321 int bitpos, bitsize, complement;
1323 HOST_WIDE_INT masklow, maskhigh;
1325 if (bitpos < HOST_BITS_PER_WIDE_INT)
1326 masklow = (HOST_WIDE_INT) -1 << bitpos;
1330 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1331 masklow &= ((unsigned HOST_WIDE_INT) -1
1332 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1334 if (bitpos <= HOST_BITS_PER_WIDE_INT)
1337 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1339 if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1340 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1341 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1347 maskhigh = ~maskhigh;
1351 return immed_double_const (masklow, maskhigh, mode);
1354 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1355 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1358 lshift_value (mode, value, bitpos, bitsize)
1359 enum machine_mode mode;
1361 int bitpos, bitsize;
1363 unsigned HOST_WIDE_INT v = INTVAL (value);
1364 HOST_WIDE_INT low, high;
1366 if (bitsize < HOST_BITS_PER_WIDE_INT)
1367 v &= ~((HOST_WIDE_INT) -1 << bitsize);
1369 if (bitpos < HOST_BITS_PER_WIDE_INT)
1372 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1377 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1380 return immed_double_const (low, high, mode);
1383 /* Extract a bit field that is split across two words
1384 and return an RTX for the result.
1386 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1387 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1388 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
1391 extract_split_bit_field (op0, bitsize, bitpos, unsignedp, align)
1393 int bitsize, bitpos, unsignedp, align;
1395 /* BITSIZE_1 is size of the part in the first word. */
1396 int bitsize_1 = BITS_PER_WORD - bitpos % BITS_PER_WORD;
1397 /* BITSIZE_2 is size of the rest (in the following word). */
1398 int bitsize_2 = bitsize - bitsize_1;
1399 rtx part1, part2, result;
1400 int unit = GET_CODE (op0) == MEM ? BITS_PER_UNIT : BITS_PER_WORD;
1401 int offset = bitpos / unit;
1404 /* The field must span exactly one word boundary. */
1405 if (bitpos / BITS_PER_WORD != (bitpos + bitsize - 1) / BITS_PER_WORD - 1)
1408 /* Get the part of the bit field from the first word. If OP0 is a MEM,
1409 pass OP0 and the offset computed above. Otherwise, get the proper
1410 word and pass an offset of zero. */
1411 word = (GET_CODE (op0) == MEM ? op0
1412 : operand_subword_force (op0, offset, GET_MODE (op0)));
1413 part1 = extract_fixed_bit_field (word_mode, word,
1414 GET_CODE (op0) == MEM ? offset : 0,
1415 bitsize_1, bitpos % unit, NULL_RTX,
1418 /* Offset op0 by 1 word to get to the following one. */
1419 if (GET_CODE (op0) == SUBREG)
1420 word = operand_subword_force (SUBREG_REG (op0),
1421 SUBREG_WORD (op0) + offset + 1, VOIDmode);
1422 else if (GET_CODE (op0) == MEM)
1425 word = operand_subword_force (op0, offset + 1, GET_MODE (op0));
1427 /* Get the part of the bit field from the second word. */
1428 part2 = extract_fixed_bit_field (word_mode, word,
1429 (GET_CODE (op0) == MEM
1430 ? CEIL (offset + 1, UNITS_PER_WORD) * UNITS_PER_WORD
1432 bitsize_2, 0, NULL_RTX, 1, align);
1434 /* Shift the more significant part up to fit above the other part. */
1435 #if BYTES_BIG_ENDIAN
1436 part1 = expand_shift (LSHIFT_EXPR, word_mode, part1,
1437 build_int_2 (bitsize_2, 0), 0, 1);
1439 part2 = expand_shift (LSHIFT_EXPR, word_mode, part2,
1440 build_int_2 (bitsize_1, 0), 0, 1);
1443 /* Combine the two parts with bitwise or. This works
1444 because we extracted both parts as unsigned bit fields. */
1445 result = expand_binop (word_mode, ior_optab, part1, part2, NULL_RTX, 1,
1448 /* Unsigned bit field: we are done. */
1451 /* Signed bit field: sign-extend with two arithmetic shifts. */
1452 result = expand_shift (LSHIFT_EXPR, word_mode, result,
1453 build_int_2 (BITS_PER_WORD - bitsize, 0),
1455 return expand_shift (RSHIFT_EXPR, word_mode, result,
1456 build_int_2 (BITS_PER_WORD - bitsize, 0), NULL_RTX, 0);
1459 /* Add INC into TARGET. */
1462 expand_inc (target, inc)
1465 rtx value = expand_binop (GET_MODE (target), add_optab,
1467 target, 0, OPTAB_LIB_WIDEN);
1468 if (value != target)
1469 emit_move_insn (target, value);
1472 /* Subtract DEC from TARGET. */
1475 expand_dec (target, dec)
1478 rtx value = expand_binop (GET_MODE (target), sub_optab,
1480 target, 0, OPTAB_LIB_WIDEN);
1481 if (value != target)
1482 emit_move_insn (target, value);
1485 /* Output a shift instruction for expression code CODE,
1486 with SHIFTED being the rtx for the value to shift,
1487 and AMOUNT the tree for the amount to shift by.
1488 Store the result in the rtx TARGET, if that is convenient.
1489 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
1490 Return the rtx for where the value is. */
1493 expand_shift (code, mode, shifted, amount, target, unsignedp)
1494 enum tree_code code;
1495 register enum machine_mode mode;
1498 register rtx target;
1501 register rtx op1, temp = 0;
1502 register int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
1503 register int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
1506 /* Previously detected shift-counts computed by NEGATE_EXPR
1507 and shifted in the other direction; but that does not work
1510 op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0);
1512 if (op1 == const0_rtx)
1515 for (try = 0; temp == 0 && try < 3; try++)
1517 enum optab_methods methods;
1520 methods = OPTAB_DIRECT;
1522 methods = OPTAB_WIDEN;
1524 methods = OPTAB_LIB_WIDEN;
1528 /* Widening does not work for rotation. */
1529 if (methods == OPTAB_WIDEN)
1531 else if (methods == OPTAB_LIB_WIDEN)
1532 methods = OPTAB_LIB;
1534 temp = expand_binop (mode,
1535 left ? rotl_optab : rotr_optab,
1536 shifted, op1, target, unsignedp, methods);
1540 temp = expand_binop (mode,
1541 left ? lshl_optab : lshr_optab,
1542 shifted, op1, target, unsignedp, methods);
1543 if (temp == 0 && left)
1544 temp = expand_binop (mode, ashl_optab,
1545 shifted, op1, target, unsignedp, methods);
1548 /* Do arithmetic shifts.
1549 Also, if we are going to widen the operand, we can just as well
1550 use an arithmetic right-shift instead of a logical one. */
1551 if (temp == 0 && ! rotate
1552 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
1554 enum optab_methods methods1 = methods;
1556 /* If trying to widen a log shift to an arithmetic shift,
1557 don't accept an arithmetic shift of the same size. */
1559 methods1 = OPTAB_MUST_WIDEN;
1561 /* Arithmetic shift */
1563 temp = expand_binop (mode,
1564 left ? ashl_optab : ashr_optab,
1565 shifted, op1, target, unsignedp, methods1);
1569 /* We can do a logical (unsigned) right shift with a bit-field
1570 extract insn. But first check if one of the above methods worked. */
1574 if (unsignedp && code == RSHIFT_EXPR && ! BITS_BIG_ENDIAN && HAVE_extzv)
1576 enum machine_mode output_mode
1577 = insn_operand_mode[(int) CODE_FOR_extzv][0];
1579 if ((methods == OPTAB_DIRECT && mode == output_mode)
1580 || (methods == OPTAB_WIDEN
1581 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (output_mode)))
1583 rtx shifted1 = convert_to_mode (output_mode,
1584 protect_from_queue (shifted, 0),
1586 enum machine_mode length_mode
1587 = insn_operand_mode[(int) CODE_FOR_extzv][2];
1588 enum machine_mode pos_mode
1589 = insn_operand_mode[(int) CODE_FOR_extzv][3];
1591 rtx last = get_last_insn ();
1597 target1 = protect_from_queue (target, 1);
1599 /* We define extract insns as having OUTPUT_MODE in a register
1600 and the mode of operand 1 in memory. Since we want
1601 OUTPUT_MODE, we will always force the operand into a
1602 register. At some point we might want to support MEM
1604 shifted1 = force_reg (output_mode, shifted1);
1606 /* If we don't have or cannot use a suggested target,
1607 make a place for the result, in the proper mode. */
1608 if (methods == OPTAB_WIDEN || target1 == 0
1609 || ! ((*insn_operand_predicate[(int) CODE_FOR_extzv][0])
1610 (target1, output_mode)))
1611 target1 = gen_reg_rtx (output_mode);
1613 xop1 = protect_from_queue (xop1, 0);
1614 xop1 = convert_to_mode (pos_mode, xop1,
1615 TREE_UNSIGNED (TREE_TYPE (amount)));
1617 /* If this machine's extzv insists on a register for
1618 operand 3 (position), arrange for that. */
1619 if (! ((*insn_operand_predicate[(int) CODE_FOR_extzv][3])
1621 xop1 = force_reg (pos_mode, xop1);
1623 /* WIDTH gets the width of the bit field to extract:
1624 wordsize minus # bits to shift by. */
1625 if (GET_CODE (xop1) == CONST_INT)
1626 width = GEN_INT (GET_MODE_BITSIZE (mode) - INTVAL (op1));
1629 /* Now get the width in the proper mode. */
1630 op1 = protect_from_queue (op1, 0);
1631 width = convert_to_mode (length_mode, op1,
1632 TREE_UNSIGNED (TREE_TYPE (amount)));
1634 width = expand_binop (length_mode, sub_optab,
1635 GEN_INT (GET_MODE_BITSIZE (mode)),
1636 width, NULL_RTX, 0, OPTAB_LIB_WIDEN);
1639 /* If this machine's extzv insists on a register for
1640 operand 2 (length), arrange for that. */
1641 if (! ((*insn_operand_predicate[(int) CODE_FOR_extzv][2])
1642 (width, length_mode)))
1643 width = force_reg (length_mode, width);
1645 /* Now extract with WIDTH, omitting OP1 least sig bits. */
1646 pat = gen_extzv (target1, shifted1, width, xop1);
1650 temp = convert_to_mode (mode, target1, 1);
1653 delete_insns_since (last);
1656 /* Can also do logical shift with signed bit-field extract
1657 followed by inserting the bit-field at a different position.
1658 That strategy is not yet implemented. */
1660 #endif /* HAVE_extzv */
1668 enum alg_code { alg_add, alg_subtract, alg_compound };
1670 /* This structure records a sequence of operations.
1671 `ops' is the number of operations recorded.
1672 `cost' is their total cost.
1673 The operations are stored in `op' and the corresponding
1674 integer coefficients in `coeff'.
1675 These are the operations:
1676 alg_add Add to the total the multiplicand times the coefficient.
1677 alg_subtract Subtract the multiplicand times the coefficient.
1678 alg_compound This coefficient plus or minus the following one
1679 is multiplied into the total. The following operation
1680 is alg_add or alg_subtract to indicate whether to add
1681 or subtract the two coefficients. */
1683 #ifndef MAX_BITS_PER_WORD
1684 #define MAX_BITS_PER_WORD BITS_PER_WORD
1691 enum alg_code op[MAX_BITS_PER_WORD];
1692 unsigned int coeff[MAX_BITS_PER_WORD];
1695 /* Compute and return the best algorithm for multiplying by T.
1696 Assume that add insns cost ADD_COST and shifts cost SHIFT_COST.
1697 Return cost -1 if would cost more than MAX_COST. */
1699 static struct algorithm
1700 synth_mult (t, add_cost, shift_cost, max_cost)
1701 unsigned HOST_WIDE_INT t;
1702 int add_cost, shift_cost;
1706 struct algorithm *best_alg
1707 = (struct algorithm *)alloca (sizeof (struct algorithm));
1708 struct algorithm *alg_in
1709 = (struct algorithm *)alloca (sizeof (struct algorithm));
1712 /* No matter what happens, we want to return a valid algorithm. */
1713 best_alg->cost = max_cost;
1716 /* Is t an exponent of 2, so we can just do a shift? */
1722 if (max_cost >= shift_cost)
1724 best_alg->cost = shift_cost;
1726 best_alg->op[0] = alg_add;
1727 best_alg->coeff[0] = t;
1730 best_alg->cost = -1;
1743 /* If MAX_COST just permits as little as an addition (or less), we won't
1744 succeed in synthesizing an algorithm for t. Return immediately with
1745 an indication of failure. */
1746 if (max_cost <= add_cost)
1748 best_alg->cost = -1;
1752 /* Look for factors of t of the form
1753 t = q(2**m +- 1), 2 <= m <= floor(log2(t)) - 1.
1754 If we find such a factor, we can multiply by t using an algorithm that
1755 multiplies by q, shift the result by m and add/subtract it to itself. */
1757 for (m = floor_log2 (t) - 1; m >= 2; m--)
1759 HOST_WIDE_INT m_exp_2 = (HOST_WIDE_INT) 1 << m;
1765 HOST_WIDE_INT q = t / d;
1767 cost = add_cost + shift_cost * 2;
1769 *alg_in = synth_mult (q, add_cost, shift_cost,
1770 MIN (max_cost, best_alg->cost) - cost);
1772 if (alg_in->cost >= 0)
1774 cost += alg_in->cost;
1776 if (cost < best_alg->cost)
1778 struct algorithm *x;
1782 best_alg->coeff[best_alg->ops] = m_exp_2;
1783 best_alg->op[best_alg->ops++] = alg_compound;
1784 best_alg->coeff[best_alg->ops] = 1;
1785 best_alg->op[best_alg->ops++] = alg_add;
1786 best_alg->cost = cost;
1794 HOST_WIDE_INT q = t / d;
1796 cost = add_cost + shift_cost * 2;
1798 *alg_in = synth_mult (q, add_cost, shift_cost,
1799 MIN (max_cost, best_alg->cost) - cost);
1801 if (alg_in->cost >= 0)
1803 cost += alg_in->cost;
1805 if (cost < best_alg->cost)
1807 struct algorithm *x;
1811 best_alg->coeff[best_alg->ops] = m_exp_2;
1812 best_alg->op[best_alg->ops++] = alg_compound;
1813 best_alg->coeff[best_alg->ops] = 1;
1814 best_alg->op[best_alg->ops++] = alg_subtract;
1815 best_alg->cost = cost;
1821 /* Try load effective address instructions, i.e. do a*3, a*5, a*9. */
1827 q = t & -t; /* get out lsb */
1828 w = (t - q) & -(t - q); /* get out next lsb */
1830 if (w / q <= lea_max_mul)
1832 cost = lea_cost + (q != 1 ? shift_cost : 0);
1834 *alg_in = synth_mult (t - q - w, add_cost, shift_cost,
1835 MIN (max_cost, best_alg->cost) - cost);
1837 if (alg_in->cost >= 0)
1839 cost += alg_in->cost;
1841 /* Use <= to prefer this method to the factoring method
1842 when the cost appears the same, because this method
1843 uses fewer temporary registers. */
1844 if (cost <= best_alg->cost)
1846 struct algorithm *x;
1850 best_alg->coeff[best_alg->ops] = w;
1851 best_alg->op[best_alg->ops++] = alg_add;
1852 best_alg->coeff[best_alg->ops] = q;
1853 best_alg->op[best_alg->ops++] = alg_add;
1854 best_alg->cost = cost;
1860 /* Now, use the good old method to add or subtract at the leftmost
1867 q = t & -t; /* get out lsb */
1868 for (w = q; (w & t) != 0; w <<= 1)
1871 /* Reject the case where t has only two bits.
1872 Thus we prefer addition in that case. */
1873 && !(t < w && w == q << 2))
1875 /* There are many bits in a row. Make 'em by subtraction. */
1881 *alg_in = synth_mult (t + q, add_cost, shift_cost,
1882 MIN (max_cost, best_alg->cost) - cost);
1884 if (alg_in->cost >= 0)
1886 cost += alg_in->cost;
1888 /* Use <= to prefer this method to the factoring method
1889 when the cost appears the same, because this method
1890 uses fewer temporary registers. */
1891 if (cost <= best_alg->cost)
1893 struct algorithm *x;
1897 best_alg->coeff[best_alg->ops] = q;
1898 best_alg->op[best_alg->ops++] = alg_subtract;
1899 best_alg->cost = cost;
1905 /* There's only one bit at the left. Make it by addition. */
1911 *alg_in = synth_mult (t - q, add_cost, shift_cost,
1912 MIN (max_cost, best_alg->cost) - cost);
1914 if (alg_in->cost >= 0)
1916 cost += alg_in->cost;
1918 if (cost <= best_alg->cost)
1920 struct algorithm *x;
1924 best_alg->coeff[best_alg->ops] = q;
1925 best_alg->op[best_alg->ops++] = alg_add;
1926 best_alg->cost = cost;
1932 if (best_alg->cost >= max_cost)
1933 best_alg->cost = -1;
1937 /* Perform a multiplication and return an rtx for the result.
1938 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
1939 TARGET is a suggestion for where to store the result (an rtx).
1941 We check specially for a constant integer as OP1.
1942 If you want this check for OP0 as well, then before calling
1943 you should swap the two operands if OP0 would be constant. */
1946 expand_mult (mode, op0, op1, target, unsignedp)
1947 enum machine_mode mode;
1948 register rtx op0, op1, target;
1951 rtx const_op1 = op1;
1953 /* If we are multiplying in DImode, it may still be a win
1954 to try to work with shifts and adds. */
1955 if (GET_CODE (op1) == CONST_DOUBLE
1956 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_INT
1957 && HOST_BITS_PER_INT <= BITS_PER_WORD)
1959 if ((CONST_DOUBLE_HIGH (op1) == 0 && CONST_DOUBLE_LOW (op1) >= 0)
1960 || (CONST_DOUBLE_HIGH (op1) == -1 && CONST_DOUBLE_LOW (op1) < 0))
1961 const_op1 = GEN_INT (CONST_DOUBLE_LOW (op1));
1964 /* We used to test optimize here, on the grounds that it's better to
1965 produce a smaller program when -O is not used.
1966 But this causes such a terrible slowdown sometimes
1967 that it seems better to use synth_mult always. */
1968 if (GET_CODE (const_op1) == CONST_INT && ! mult_is_very_cheap)
1970 struct algorithm alg;
1971 struct algorithm neg_alg;
1973 HOST_WIDE_INT absval = INTVAL (op1);
1976 /* Try to do the computation two ways: multiply by the negative of OP1
1977 and then negate, or do the multiplication directly. The latter is
1978 usually faster for positive numbers and the former for negative
1979 numbers, but the opposite can be faster if the original value
1980 has a factor of 2**m +/- 1, while the negated value does not or
1983 alg = synth_mult (absval, add_cost, shift_cost, mult_cost);
1984 neg_alg = synth_mult (- absval, add_cost, shift_cost,
1985 mult_cost - negate_cost);
1987 if (neg_alg.cost >= 0 && neg_alg.cost + negate_cost < alg.cost)
1988 alg = neg_alg, negate = 1, absval = - absval;
1992 /* If we found something, it must be cheaper than multiply.
1996 int factors_seen = 0;
1998 op0 = protect_from_queue (op0, 0);
2000 /* Avoid referencing memory over and over.
2001 For speed, but also for correctness when mem is volatile. */
2002 if (GET_CODE (op0) == MEM)
2003 op0 = force_reg (mode, op0);
2006 accum = copy_to_mode_reg (mode, op0);
2009 /* 1 if this is the last in a series of adds and subtracts. */
2010 int last = (1 == alg.ops || alg.op[1] == alg_compound);
2011 int log = floor_log2 (alg.coeff[0]);
2012 if (! factors_seen && ! last)
2013 log -= floor_log2 (alg.coeff[1]);
2015 if (alg.op[0] != alg_add)
2017 accum = expand_shift (LSHIFT_EXPR, mode, op0,
2018 build_int_2 (log, 0), NULL_RTX, 0);
2021 while (++opno < alg.ops)
2023 int log = floor_log2 (alg.coeff[opno]);
2024 /* 1 if this is the last in a series of adds and subtracts. */
2025 int last = (opno + 1 == alg.ops
2026 || alg.op[opno + 1] == alg_compound);
2028 /* If we have not yet seen any separate factors (alg_compound)
2029 then turn op0<<a1 + op0<<a2 + op0<<a3... into
2030 (op0<<(a1-a2) + op0)<<(a2-a3) + op0... */
2031 switch (alg.op[opno])
2036 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2037 build_int_2 (log, 0), NULL_RTX, 0);
2038 accum = force_operand (gen_rtx (PLUS, mode, accum, tem),
2044 log -= floor_log2 (alg.coeff[opno + 1]);
2045 accum = force_operand (gen_rtx (PLUS, mode, accum, op0),
2047 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2048 build_int_2 (log, 0), accum, 0);
2055 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2056 build_int_2 (log, 0), NULL_RTX, 0);
2057 accum = force_operand (gen_rtx (MINUS, mode, accum, tem),
2063 log -= floor_log2 (alg.coeff[opno + 1]);
2064 accum = force_operand (gen_rtx (MINUS, mode, accum, op0),
2066 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2067 build_int_2 (log, 0), accum, 0);
2074 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2075 build_int_2 (log, 0), NULL_RTX, 0);
2077 log = floor_log2 (alg.coeff[opno + 1]);
2078 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2079 build_int_2 (log, 0), NULL_RTX, 0);
2081 if (alg.op[opno] == alg_add)
2082 accum = force_operand (gen_rtx (PLUS, mode, tem, accum),
2085 accum = force_operand (gen_rtx (MINUS, mode, tem, accum),
2090 /* Write a REG_EQUAL note on the last insn so that we can cse
2091 multiplication sequences. We need not do this if we were
2092 multiplying by a power of two, since only one insn would have
2095 ??? We could also write REG_EQUAL notes on the last insn of
2096 each sequence that uses a single temporary, but it is not
2097 clear how to calculate the partial product so far.
2099 Torbjorn: Can you do this? */
2101 if (exact_log2 (absval) < 0)
2103 last = get_last_insn ();
2105 = gen_rtx (EXPR_LIST, REG_EQUAL,
2106 gen_rtx (MULT, mode, op0,
2107 negate ? GEN_INT (absval) : op1),
2111 return (negate ? expand_unop (mode, neg_optab, accum, target, 0)
2116 /* This used to use umul_optab if unsigned,
2117 but I think that for non-widening multiply there is no difference
2118 between signed and unsigned. */
2119 op0 = expand_binop (mode, smul_optab,
2120 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
2126 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
2127 if that is convenient, and returning where the result is.
2128 You may request either the quotient or the remainder as the result;
2129 specify REM_FLAG nonzero to get the remainder.
2131 CODE is the expression code for which kind of division this is;
2132 it controls how rounding is done. MODE is the machine mode to use.
2133 UNSIGNEDP nonzero means do unsigned division. */
2135 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
2136 and then correct it by or'ing in missing high bits
2137 if result of ANDI is nonzero.
2138 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
2139 This could optimize to a bfexts instruction.
2140 But C doesn't use these operations, so their optimizations are
2144 expand_divmod (rem_flag, code, mode, op0, op1, target, unsignedp)
2146 enum tree_code code;
2147 enum machine_mode mode;
2148 register rtx op0, op1, target;
2151 register rtx result = 0;
2152 enum machine_mode compute_mode;
2154 int can_clobber_op0;
2155 int mod_insn_no_good = 0;
2156 rtx adjusted_op0 = op0;
2157 optab optab1, optab2;
2159 /* We shouldn't be called with op1 == const1_rtx, but some of the
2160 code below will malfunction if we are, so check here and handle
2161 the special case if so. */
2162 if (op1 == const1_rtx)
2163 return rem_flag ? const0_rtx : op0;
2165 /* Don't use the function value register as a target
2166 since we have to read it as well as write it,
2167 and function-inlining gets confused by this. */
2168 if (target && REG_P (target) && REG_FUNCTION_VALUE_P (target))
2171 /* Don't clobber an operand while doing a multi-step calculation. */
2173 if ((rem_flag && (reg_mentioned_p (target, op0)
2174 || (GET_CODE (op0) == MEM && GET_CODE (target) == MEM)))
2175 || reg_mentioned_p (target, op1)
2176 || (GET_CODE (op1) == MEM && GET_CODE (target) == MEM))
2179 can_clobber_op0 = (GET_CODE (op0) == REG && op0 == target);
2181 if (GET_CODE (op1) == CONST_INT)
2182 log = exact_log2 (INTVAL (op1));
2184 /* If log is >= 0, we are dividing by 2**log, and will do it by shifting,
2185 which is really floor-division. Otherwise we will really do a divide,
2186 and we assume that is trunc-division.
2188 We must correct the dividend by adding or subtracting something
2189 based on the divisor, in order to do the kind of rounding specified
2190 by CODE. The correction depends on what kind of rounding is actually
2191 available, and that depends on whether we will shift or divide.
2193 In many of these cases it is possible to perform the operation by a
2194 clever series of logical operations (shifts and/or exclusive-ors).
2195 Although avoiding the jump has the advantage that it extends the basic
2196 block and allows further optimization, the branch-free code is normally
2197 at least one instruction longer in the (most common) case where the
2198 dividend is non-negative. Performance measurements of the two
2199 alternatives show that the branch-free code is slightly faster on the
2200 IBM ROMP but slower on CISC processors (significantly slower on the
2201 VAX). Accordingly, the jump code has been retained.
2203 On machines where the jump code is slower, the cost of a DIV or MOD
2204 operation can be set small (less than twice that of an addition); in
2205 that case, we pretend that we don't have a power of two and perform
2206 a normal division or modulus operation. */
2208 if ((code == TRUNC_MOD_EXPR || code == TRUNC_DIV_EXPR)
2210 && (rem_flag ? smod_pow2_cheap : sdiv_pow2_cheap))
2213 /* Get the mode in which to perform this computation. Normally it will
2214 be MODE, but sometimes we can't do the desired operation in MODE.
2215 If so, pick a wider mode in which we can do the operation. Convert
2216 to that mode at the start to avoid repeated conversions.
2218 First see what operations we need. These depend on the expression
2219 we are evaluating. (We assume that divxx3 insns exist under the
2220 same conditions that modxx3 insns and that these insns don't normally
2221 fail. If these assumptions are not correct, we may generate less
2222 efficient code in some cases.)
2224 Then see if we find a mode in which we can open-code that operation
2225 (either a division, modulus, or shift). Finally, check for the smallest
2226 mode for which we can do the operation with a library call. */
2228 optab1 = (log >= 0 ? (unsignedp ? lshr_optab : ashr_optab)
2229 : (unsignedp ? udiv_optab : sdiv_optab));
2230 optab2 = (log >= 0 ? optab1 : (unsignedp ? udivmod_optab : sdivmod_optab));
2232 for (compute_mode = mode; compute_mode != VOIDmode;
2233 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
2234 if (optab1->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing
2235 || optab2->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing)
2238 if (compute_mode == VOIDmode)
2239 for (compute_mode = mode; compute_mode != VOIDmode;
2240 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
2241 if (optab1->handlers[(int) compute_mode].libfunc
2242 || optab2->handlers[(int) compute_mode].libfunc)
2245 /* If we still couldn't find a mode, use MODE; we'll probably abort in
2247 if (compute_mode == VOIDmode)
2248 compute_mode = mode;
2250 /* Now convert to the best mode to use. Show we made a copy of OP0
2251 and hence we can clobber it (we cannot use a SUBREG to widen
2253 if (compute_mode != mode)
2255 adjusted_op0 = op0 = convert_to_mode (compute_mode, op0, unsignedp);
2256 can_clobber_op0 = 1;
2257 op1 = convert_to_mode (compute_mode, op1, unsignedp);
2260 /* If we are computing the remainder and one of the operands is a volatile
2261 MEM, copy it into a register. */
2263 if (rem_flag && GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0))
2264 adjusted_op0 = op0 = force_reg (compute_mode, op0), can_clobber_op0 = 1;
2265 if (rem_flag && GET_CODE (op1) == MEM && MEM_VOLATILE_P (op1))
2266 op1 = force_reg (compute_mode, op1);
2268 /* If we are computing the remainder, op0 will be needed later to calculate
2269 X - Y * (X / Y), therefore cannot be clobbered. */
2271 can_clobber_op0 = 0;
2273 if (target == 0 || GET_MODE (target) != compute_mode)
2274 target = gen_reg_rtx (compute_mode);
2278 case TRUNC_MOD_EXPR:
2279 case TRUNC_DIV_EXPR:
2280 if (log >= 0 && ! unsignedp)
2282 if (! can_clobber_op0)
2284 adjusted_op0 = copy_to_suggested_reg (adjusted_op0, target,
2286 /* Copy op0 to a reg, since emit_cmp_insn will call emit_queue
2287 which will screw up mem refs for autoincrements. */
2288 op0 = force_reg (compute_mode, op0);
2290 /* Here we need to add OP1-1 if OP0 is negative, 0 otherwise.
2291 This can be computed without jumps by arithmetically shifting
2292 OP0 right LOG-1 places and then shifting right logically
2293 SIZE-LOG bits. The resulting value is unconditionally added
2295 if (log == 1 || BRANCH_COST >= 3)
2297 rtx temp = gen_reg_rtx (compute_mode);
2298 temp = copy_to_suggested_reg (adjusted_op0, temp, compute_mode);
2299 temp = expand_shift (RSHIFT_EXPR, compute_mode, temp,
2300 build_int_2 (log - 1, 0), NULL_RTX, 0);
2301 temp = expand_shift (RSHIFT_EXPR, compute_mode, temp,
2302 build_int_2 (GET_MODE_BITSIZE (mode) - log,
2305 expand_inc (adjusted_op0, temp);
2309 rtx label = gen_label_rtx ();
2310 emit_cmp_insn (adjusted_op0, const0_rtx, GE,
2311 NULL_RTX, compute_mode, 0, 0);
2312 emit_jump_insn (gen_bge (label));
2313 expand_inc (adjusted_op0, plus_constant (op1, -1));
2316 mod_insn_no_good = 1;
2320 case FLOOR_DIV_EXPR:
2321 case FLOOR_MOD_EXPR:
2322 if (log < 0 && ! unsignedp)
2324 rtx label = gen_label_rtx ();
2325 if (! can_clobber_op0)
2327 adjusted_op0 = copy_to_suggested_reg (adjusted_op0, target,
2329 /* Copy op0 to a reg, since emit_cmp_insn will call emit_queue
2330 which will screw up mem refs for autoincrements. */
2331 op0 = force_reg (compute_mode, op0);
2333 emit_cmp_insn (adjusted_op0, const0_rtx, GE,
2334 NULL_RTX, compute_mode, 0, 0);
2335 emit_jump_insn (gen_bge (label));
2336 expand_dec (adjusted_op0, op1);
2337 expand_inc (adjusted_op0, const1_rtx);
2339 mod_insn_no_good = 1;
2345 if (! can_clobber_op0)
2347 adjusted_op0 = copy_to_suggested_reg (adjusted_op0, target,
2349 /* Copy op0 to a reg, since emit_cmp_insn will call emit_queue
2350 which will screw up mem refs for autoincrements. */
2351 op0 = force_reg (compute_mode, op0);
2358 label = gen_label_rtx ();
2359 emit_cmp_insn (adjusted_op0, const0_rtx, LE,
2360 NULL_RTX, compute_mode, 0, 0);
2361 emit_jump_insn (gen_ble (label));
2363 expand_inc (adjusted_op0, op1);
2364 expand_dec (adjusted_op0, const1_rtx);
2370 adjusted_op0 = expand_binop (compute_mode, add_optab,
2371 adjusted_op0, plus_constant (op1, -1),
2372 NULL_RTX, 0, OPTAB_LIB_WIDEN);
2374 mod_insn_no_good = 1;
2377 case ROUND_DIV_EXPR:
2378 case ROUND_MOD_EXPR:
2379 if (! can_clobber_op0)
2381 adjusted_op0 = copy_to_suggested_reg (adjusted_op0, target,
2383 /* Copy op0 to a reg, since emit_cmp_insn will call emit_queue
2384 which will screw up mem refs for autoincrements. */
2385 op0 = force_reg (compute_mode, op0);
2389 op1 = expand_shift (RSHIFT_EXPR, compute_mode, op1,
2390 integer_one_node, NULL_RTX, 0);
2393 if (BRANCH_COST >= 2)
2395 /* Negate OP1 if OP0 < 0. Do this by computing a temporary
2396 that has all bits equal to the sign bit and exclusive
2397 or-ing it with OP1. */
2398 rtx temp = gen_reg_rtx (compute_mode);
2399 temp = copy_to_suggested_reg (adjusted_op0, temp, compute_mode);
2400 temp = expand_shift (RSHIFT_EXPR, compute_mode, temp,
2401 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2403 op1 = expand_binop (compute_mode, xor_optab, op1, temp, op1,
2404 unsignedp, OPTAB_LIB_WIDEN);
2408 rtx label = gen_label_rtx ();
2409 emit_cmp_insn (adjusted_op0, const0_rtx, GE, NULL_RTX,
2410 compute_mode, 0, 0);
2411 emit_jump_insn (gen_bge (label));
2412 expand_unop (compute_mode, neg_optab, op1, op1, 0);
2416 expand_inc (adjusted_op0, op1);
2420 op1 = GEN_INT (((HOST_WIDE_INT) 1 << log) / 2);
2421 expand_inc (adjusted_op0, op1);
2423 mod_insn_no_good = 1;
2427 if (rem_flag && !mod_insn_no_good)
2429 /* Try to produce the remainder directly */
2431 result = expand_binop (compute_mode, and_optab, adjusted_op0,
2432 GEN_INT (((HOST_WIDE_INT) 1 << log) - 1),
2433 target, 1, OPTAB_LIB_WIDEN);
2436 /* See if we can do remainder without a library call. */
2437 result = sign_expand_binop (mode, umod_optab, smod_optab,
2438 adjusted_op0, op1, target,
2439 unsignedp, OPTAB_WIDEN);
2442 /* No luck there. Can we do remainder and divide at once
2443 without a library call? */
2444 result = gen_reg_rtx (compute_mode);
2445 if (! expand_twoval_binop (unsignedp
2446 ? udivmod_optab : sdivmod_optab,
2448 NULL_RTX, result, unsignedp))
2455 return gen_lowpart (mode, result);
2457 /* Produce the quotient. */
2459 result = expand_shift (RSHIFT_EXPR, compute_mode, adjusted_op0,
2460 build_int_2 (log, 0), target, unsignedp);
2461 else if (rem_flag && !mod_insn_no_good)
2462 /* If producing quotient in order to subtract for remainder,
2463 and a remainder subroutine would be ok,
2464 don't use a divide subroutine. */
2465 result = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
2466 adjusted_op0, op1, NULL_RTX, unsignedp,
2470 /* Try a quotient insn, but not a library call. */
2471 result = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
2473 rem_flag ? NULL_RTX : target,
2474 unsignedp, OPTAB_WIDEN);
2477 /* No luck there. Try a quotient-and-remainder insn,
2478 keeping the quotient alone. */
2479 result = gen_reg_rtx (mode);
2480 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
2482 result, NULL_RTX, unsignedp))
2486 /* If still no luck, use a library call. */
2488 result = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
2490 rem_flag ? NULL_RTX : target,
2491 unsignedp, OPTAB_LIB_WIDEN);
2494 /* If we really want the remainder, get it by subtraction. */
2498 /* No divide instruction either. Use library for remainder. */
2499 result = sign_expand_binop (compute_mode, umod_optab, smod_optab,
2501 unsignedp, OPTAB_LIB_WIDEN);
2504 /* We divided. Now finish doing X - Y * (X / Y). */
2505 result = expand_mult (compute_mode, result, op1, target, unsignedp);
2506 if (! result) abort ();
2507 result = expand_binop (compute_mode, sub_optab, op0,
2508 result, target, unsignedp, OPTAB_LIB_WIDEN);
2515 return gen_lowpart (mode, result);
2518 /* Return a tree node with data type TYPE, describing the value of X.
2519 Usually this is an RTL_EXPR, if there is no obvious better choice.
2520 X may be an expression, however we only support those expressions
2521 generated by loop.c. */
2530 switch (GET_CODE (x))
2533 t = build_int_2 (INTVAL (x),
2534 ! TREE_UNSIGNED (type) && INTVAL (x) >= 0 ? 0 : -1);
2535 TREE_TYPE (t) = type;
2539 if (GET_MODE (x) == VOIDmode)
2541 t = build_int_2 (CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
2542 TREE_TYPE (t) = type;
2548 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
2549 t = build_real (type, d);
2555 return fold (build (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
2556 make_tree (type, XEXP (x, 1))));
2559 return fold (build (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
2560 make_tree (type, XEXP (x, 1))));
2563 return fold (build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0))));
2566 return fold (build (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
2567 make_tree (type, XEXP (x, 1))));
2570 return fold (build (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
2571 make_tree (type, XEXP (x, 1))));
2574 return fold (convert (type,
2575 build (RSHIFT_EXPR, unsigned_type (type),
2576 make_tree (unsigned_type (type),
2578 make_tree (type, XEXP (x, 1)))));
2581 return fold (convert (type,
2582 build (RSHIFT_EXPR, signed_type (type),
2583 make_tree (signed_type (type), XEXP (x, 0)),
2584 make_tree (type, XEXP (x, 1)))));
2587 if (TREE_CODE (type) != REAL_TYPE)
2588 t = signed_type (type);
2592 return fold (convert (type,
2593 build (TRUNC_DIV_EXPR, t,
2594 make_tree (t, XEXP (x, 0)),
2595 make_tree (t, XEXP (x, 1)))));
2597 t = unsigned_type (type);
2598 return fold (convert (type,
2599 build (TRUNC_DIV_EXPR, t,
2600 make_tree (t, XEXP (x, 0)),
2601 make_tree (t, XEXP (x, 1)))));
2603 t = make_node (RTL_EXPR);
2604 TREE_TYPE (t) = type;
2605 RTL_EXPR_RTL (t) = x;
2606 /* There are no insns to be output
2607 when this rtl_expr is used. */
2608 RTL_EXPR_SEQUENCE (t) = 0;
2613 /* Return an rtx representing the value of X * MULT + ADD.
2614 TARGET is a suggestion for where to store the result (an rtx).
2615 MODE is the machine mode for the computation.
2616 X and MULT must have mode MODE. ADD may have a different mode.
2617 So can X (defaults to same as MODE).
2618 UNSIGNEDP is non-zero to do unsigned multiplication.
2619 This may emit insns. */
2622 expand_mult_add (x, target, mult, add, mode, unsignedp)
2623 rtx x, target, mult, add;
2624 enum machine_mode mode;
2627 tree type = type_for_mode (mode, unsignedp);
2628 tree add_type = (GET_MODE (add) == VOIDmode
2629 ? type : type_for_mode (GET_MODE (add), unsignedp));
2630 tree result = fold (build (PLUS_EXPR, type,
2631 fold (build (MULT_EXPR, type,
2632 make_tree (type, x),
2633 make_tree (type, mult))),
2634 make_tree (add_type, add)));
2636 return expand_expr (result, target, VOIDmode, 0);
2639 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
2640 and returning TARGET.
2642 If TARGET is 0, a pseudo-register or constant is returned. */
2645 expand_and (op0, op1, target)
2646 rtx op0, op1, target;
2648 enum machine_mode mode = VOIDmode;
2651 if (GET_MODE (op0) != VOIDmode)
2652 mode = GET_MODE (op0);
2653 else if (GET_MODE (op1) != VOIDmode)
2654 mode = GET_MODE (op1);
2656 if (mode != VOIDmode)
2657 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
2658 else if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT)
2659 tem = GEN_INT (INTVAL (op0) & INTVAL (op1));
2665 else if (tem != target)
2666 emit_move_insn (target, tem);
2670 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
2671 and storing in TARGET. Normally return TARGET.
2672 Return 0 if that cannot be done.
2674 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
2675 it is VOIDmode, they cannot both be CONST_INT.
2677 UNSIGNEDP is for the case where we have to widen the operands
2678 to perform the operation. It says to use zero-extension.
2680 NORMALIZEP is 1 if we should convert the result to be either zero
2681 or one one. Normalize is -1 if we should convert the result to be
2682 either zero or -1. If NORMALIZEP is zero, the result will be left
2683 "raw" out of the scc insn. */
2686 emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep)
2690 enum machine_mode mode;
2695 enum insn_code icode;
2696 enum machine_mode compare_mode;
2697 enum machine_mode target_mode = GET_MODE (target);
2700 rtx pattern, comparison;
2702 if (mode == VOIDmode)
2703 mode = GET_MODE (op0);
2705 /* For some comparisons with 1 and -1, we can convert this to
2706 comparisons with zero. This will often produce more opportunities for
2707 store-flag insns. */
2712 if (op1 == const1_rtx)
2713 op1 = const0_rtx, code = LE;
2716 if (op1 == constm1_rtx)
2717 op1 = const0_rtx, code = LT;
2720 if (op1 == const1_rtx)
2721 op1 = const0_rtx, code = GT;
2724 if (op1 == constm1_rtx)
2725 op1 = const0_rtx, code = GE;
2728 if (op1 == const1_rtx)
2729 op1 = const0_rtx, code = NE;
2732 if (op1 == const1_rtx)
2733 op1 = const0_rtx, code = EQ;
2737 /* From now on, we won't change CODE, so set ICODE now. */
2738 icode = setcc_gen_code[(int) code];
2740 /* If this is A < 0 or A >= 0, we can do this by taking the ones
2741 complement of A (for GE) and shifting the sign bit to the low bit. */
2742 if (op1 == const0_rtx && (code == LT || code == GE)
2743 && GET_MODE_CLASS (mode) == MODE_INT
2744 && (normalizep || STORE_FLAG_VALUE == 1
2745 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2746 && (STORE_FLAG_VALUE
2747 == (HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))))
2749 rtx subtarget = target;
2751 /* If the result is to be wider than OP0, it is best to convert it
2752 first. If it is to be narrower, it is *incorrect* to convert it
2754 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
2756 op0 = protect_from_queue (op0, 0);
2757 op0 = convert_to_mode (target_mode, op0, 0);
2761 if (target_mode != mode)
2765 op0 = expand_unop (mode, one_cmpl_optab, op0, subtarget, 0);
2767 if (normalizep || STORE_FLAG_VALUE == 1)
2768 /* If we are supposed to produce a 0/1 value, we want to do
2769 a logical shift from the sign bit to the low-order bit; for
2770 a -1/0 value, we do an arithmetic shift. */
2771 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
2772 size_int (GET_MODE_BITSIZE (mode) - 1),
2773 subtarget, normalizep != -1);
2775 if (mode != target_mode)
2776 op0 = convert_to_mode (target_mode, op0, 0);
2781 if (icode != CODE_FOR_nothing)
2783 /* We think we may be able to do this with a scc insn. Emit the
2784 comparison and then the scc insn.
2786 compare_from_rtx may call emit_queue, which would be deleted below
2787 if the scc insn fails. So call it ourselves before setting LAST. */
2790 last = get_last_insn ();
2793 = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX, 0);
2794 if (GET_CODE (comparison) == CONST_INT)
2795 return (comparison == const0_rtx ? const0_rtx
2796 : normalizep == 1 ? const1_rtx
2797 : normalizep == -1 ? constm1_rtx
2800 /* Get a reference to the target in the proper mode for this insn. */
2801 compare_mode = insn_operand_mode[(int) icode][0];
2803 if (preserve_subexpressions_p ()
2804 || ! (*insn_operand_predicate[(int) icode][0]) (subtarget, compare_mode))
2805 subtarget = gen_reg_rtx (compare_mode);
2807 pattern = GEN_FCN (icode) (subtarget);
2810 emit_insn (pattern);
2812 /* If we are converting to a wider mode, first convert to
2813 TARGET_MODE, then normalize. This produces better combining
2814 opportunities on machines that have a SIGN_EXTRACT when we are
2815 testing a single bit. This mostly benefits the 68k.
2817 If STORE_FLAG_VALUE does not have the sign bit set when
2818 interpreted in COMPARE_MODE, we can do this conversion as
2819 unsigned, which is usually more efficient. */
2820 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
2822 convert_move (target, subtarget,
2823 (GET_MODE_BITSIZE (compare_mode)
2824 <= HOST_BITS_PER_WIDE_INT)
2825 && 0 == (STORE_FLAG_VALUE
2826 & ((HOST_WIDE_INT) 1
2827 << (GET_MODE_BITSIZE (compare_mode) -1))));
2829 compare_mode = target_mode;
2834 /* If we want to keep subexpressions around, don't reuse our
2837 if (preserve_subexpressions_p ())
2840 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
2841 we don't have to do anything. */
2842 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
2844 else if (normalizep == - STORE_FLAG_VALUE)
2845 op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
2847 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
2848 makes it hard to use a value of just the sign bit due to
2849 ANSI integer constant typing rules. */
2850 else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT
2851 && (STORE_FLAG_VALUE
2852 & ((HOST_WIDE_INT) 1
2853 << (GET_MODE_BITSIZE (compare_mode) - 1))))
2854 op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
2855 size_int (GET_MODE_BITSIZE (compare_mode) - 1),
2856 subtarget, normalizep == 1);
2857 else if (STORE_FLAG_VALUE & 1)
2859 op0 = expand_and (op0, const1_rtx, subtarget);
2860 if (normalizep == -1)
2861 op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
2866 /* If we were converting to a smaller mode, do the
2868 if (target_mode != compare_mode)
2870 convert_move (target, op0);
2879 delete_insns_since (last);
2881 subtarget = target_mode == mode ? target : 0;
2883 /* If we reached here, we can't do this with a scc insn. However, there
2884 are some comparisons that can be done directly. For example, if
2885 this is an equality comparison of integers, we can try to exclusive-or
2886 (or subtract) the two operands and use a recursive call to try the
2887 comparison with zero. Don't do any of these cases if branches are
2890 if (BRANCH_COST >= 0
2891 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
2892 && op1 != const0_rtx)
2894 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
2898 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
2901 tem = emit_store_flag (target, code, tem, const0_rtx,
2902 mode, unsignedp, normalizep);
2904 delete_insns_since (last);
2908 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
2909 the constant zero. Reject all other comparisons at this point. Only
2910 do LE and GT if branches are expensive since they are expensive on
2911 2-operand machines. */
2913 if (BRANCH_COST == 0
2914 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
2915 || (code != EQ && code != NE
2916 && (BRANCH_COST <= 1 || (code != LE && code != GT))))
2919 /* See what we need to return. We can only return a 1, -1, or the
2922 if (normalizep == 0)
2924 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
2925 normalizep = STORE_FLAG_VALUE;
2927 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2928 && (STORE_FLAG_VALUE
2929 == (HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
2935 /* Try to put the result of the comparison in the sign bit. Assume we can't
2936 do the necessary operation below. */
2940 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
2941 the sign bit set. */
2945 /* This is destructive, so SUBTARGET can't be OP0. */
2946 if (rtx_equal_p (subtarget, op0))
2949 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
2952 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
2956 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
2957 number of bits in the mode of OP0, minus one. */
2961 if (rtx_equal_p (subtarget, op0))
2964 tem = expand_shift (RSHIFT_EXPR, mode, op0,
2965 size_int (GET_MODE_BITSIZE (mode) - 1),
2967 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
2971 if (code == EQ || code == NE)
2973 /* For EQ or NE, one way to do the comparison is to apply an operation
2974 that converts the operand into a positive number if it is non-zero
2975 or zero if it was originally zero. Then, for EQ, we subtract 1 and
2976 for NE we negate. This puts the result in the sign bit. Then we
2977 normalize with a shift, if needed.
2979 Two operations that can do the above actions are ABS and FFS, so try
2980 them. If that doesn't work, and MODE is smaller than a full word,
2981 we can use zero-extension to the wider mode (an unsigned conversion)
2982 as the operation. */
2984 if (abs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2985 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
2986 else if (ffs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2987 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
2988 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
2991 op0 = protect_from_queue (op0, 0);
2992 tem = convert_to_mode (mode, op0, 1);
2998 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
3001 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
3004 /* If we couldn't do it that way, for NE we can "or" the two's complement
3005 of the value with itself. For EQ, we take the one's complement of
3006 that "or", which is an extra insn, so we only handle EQ if branches
3009 if (tem == 0 && (code == NE || BRANCH_COST > 1))
3011 if (rtx_equal_p (subtarget, op0))
3014 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
3015 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
3018 if (tem && code == EQ)
3019 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
3023 if (tem && normalizep)
3024 tem = expand_shift (RSHIFT_EXPR, mode, tem,
3025 size_int (GET_MODE_BITSIZE (mode) - 1),
3026 tem, normalizep == 1);
3028 if (tem && GET_MODE (tem) != target_mode)
3030 convert_move (target, tem, 0);
3035 delete_insns_since (last);
3039 emit_jump_insn ((*bcc_gen_fctn[(int) code]) (label));
3040 emit_move_insn (target, const1_rtx);