1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
26 #include "insn-flags.h"
27 #include "insn-codes.h"
28 #include "insn-config.h"
33 static rtx extract_split_bit_field ();
34 static rtx extract_fixed_bit_field ();
35 static void store_split_bit_field ();
36 static void store_fixed_bit_field ();
37 static rtx mask_rtx ();
38 static rtx lshift_value ();
40 #define CEIL(x,y) (((x) + (y) - 1) / (y))
42 /* Non-zero means multiply instructions are cheaper than shifts. */
43 int mult_is_very_cheap;
45 /* Non-zero means divides or modulus operations are relatively cheap for
46 powers of two, so don't use branches; emit the operation instead.
47 Usually, this will mean that the MD file will emit non-branch
50 static int sdiv_pow2_cheap, smod_pow2_cheap;
52 /* Cost of various pieces of RTL. */
53 static int add_cost, shift_cost, mult_cost, negate_cost, lea_cost;
55 /* Max scale factor for scaled address in lea instruction. */
56 static int lea_max_mul;
61 char *free_point = (char *) oballoc (1);
62 /* This is "some random pseudo register" for purposes of calling recog
63 to see what insns exist. */
64 rtx reg = gen_rtx (REG, word_mode, FIRST_PSEUDO_REGISTER);
65 rtx pow2 = gen_rtx (CONST_INT, VOIDmode, 32);
69 add_cost = rtx_cost (gen_rtx (PLUS, word_mode, reg, reg));
70 shift_cost = rtx_cost (gen_rtx (LSHIFT, word_mode, reg,
71 /* Using a constant gives better
72 estimate of typical costs.
73 1 or 2 might have quirks. */
74 gen_rtx (CONST_INT, VOIDmode, 3)));
75 mult_cost = rtx_cost (gen_rtx (MULT, word_mode, reg, reg));
76 negate_cost = rtx_cost (gen_rtx (NEG, word_mode, reg));
78 /* 999999 is chosen to avoid any plausible faster special case. */
80 = (rtx_cost (gen_rtx (MULT, word_mode, reg,
81 gen_rtx (CONST_INT, VOIDmode, 999999)))
82 < rtx_cost (gen_rtx (LSHIFT, word_mode, reg,
83 gen_rtx (CONST_INT, VOIDmode, 7))));
86 = rtx_cost (gen_rtx (DIV, word_mode, reg, pow2)) <= 2 * add_cost;
88 = rtx_cost (gen_rtx (MOD, word_mode, reg, pow2)) <= 2 * add_cost;
93 lea = gen_rtx (SET, VOIDmode, reg,
94 gen_rtx (PLUS, word_mode,
95 gen_rtx (MULT, word_mode, reg,
96 gen_rtx (CONST_INT, VOIDmode, i)),
98 /* Using 0 as second argument is not quite right,
99 but what else is there to do? */
100 if (recog (lea, 0, &dummy) < 0)
103 lea_cost = rtx_cost (SET_SRC (lea));
106 /* Free the objects we just allocated. */
110 /* Return an rtx representing minus the value of X.
111 MODE is the intended mode of the result,
112 useful if X is a CONST_INT. */
116 enum machine_mode mode;
119 if (GET_CODE (x) == CONST_INT)
121 int val = - INTVAL (x);
122 if (GET_MODE_BITSIZE (mode) < HOST_BITS_PER_INT)
124 /* Sign extend the value from the bits that are significant. */
125 if (val & (1 << (GET_MODE_BITSIZE (mode) - 1)))
126 val |= (-1) << GET_MODE_BITSIZE (mode);
128 val &= (1 << GET_MODE_BITSIZE (mode)) - 1;
130 return gen_rtx (CONST_INT, VOIDmode, val);
133 return expand_unop (GET_MODE (x), neg_optab, x, 0, 0);
136 /* Generate code to store value from rtx VALUE
137 into a bit-field within structure STR_RTX
138 containing BITSIZE bits starting at bit BITNUM.
139 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
140 ALIGN is the alignment that STR_RTX is known to have, measured in bytes.
141 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
143 /* ??? Note that there are two different ideas here for how
144 to determine the size to count bits within, for a register.
145 One is BITS_PER_WORD, and the other is the size of operand 3
146 of the insv pattern. (The latter assumes that an n-bit machine
147 will be able to insert bit fields up to n bits wide.)
148 It isn't certain that either of these is right.
149 extract_bit_field has the same quandary. */
152 store_bit_field (str_rtx, bitsize, bitnum, fieldmode, value, align, total_size)
154 register int bitsize;
156 enum machine_mode fieldmode;
161 int unit = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
162 register int offset = bitnum / unit;
163 register int bitpos = bitnum % unit;
164 register rtx op0 = str_rtx;
166 if (GET_CODE (str_rtx) == MEM && ! MEM_IN_STRUCT_P (str_rtx))
169 /* Discount the part of the structure before the desired byte.
170 We need to know how many bytes are safe to reference after it. */
172 total_size -= (bitpos / BIGGEST_ALIGNMENT
173 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
175 while (GET_CODE (op0) == SUBREG)
177 /* The following line once was done only if WORDS_BIG_ENDIAN,
178 but I think that is a mistake. WORDS_BIG_ENDIAN is
179 meaningful at a much higher level; when structures are copied
180 between memory and regs, the higher-numbered regs
181 always get higher addresses. */
182 offset += SUBREG_WORD (op0);
183 /* We used to adjust BITPOS here, but now we do the whole adjustment
184 right after the loop. */
185 op0 = SUBREG_REG (op0);
189 /* If OP0 is a register, BITPOS must count within a word.
190 But as we have it, it counts within whatever size OP0 now has.
191 On a bigendian machine, these are not the same, so convert. */
192 if (GET_CODE (op0) != MEM && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
193 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
196 value = protect_from_queue (value, 0);
199 value = force_not_mem (value);
201 /* Note that the adjustment of BITPOS above has no effect on whether
202 BITPOS is 0 in a REG bigger than a word. */
203 if (GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD && GET_CODE (op0) != MEM
204 && bitpos == 0 && bitsize == GET_MODE_BITSIZE (fieldmode))
206 /* Storing in a full-word or multi-word field in a register
207 can be done with just SUBREG. */
208 if (GET_MODE (op0) != fieldmode)
209 op0 = gen_rtx (SUBREG, fieldmode, op0, offset);
210 emit_move_insn (op0, value);
214 /* Storing an lsb-aligned field in a register
215 can be done with a movestrict instruction. */
217 if (GET_CODE (op0) != MEM
219 && bitpos + bitsize == unit
223 && bitsize == GET_MODE_BITSIZE (fieldmode)
224 && (GET_MODE (op0) == fieldmode
225 || (movstrict_optab->handlers[(int) fieldmode].insn_code
226 != CODE_FOR_nothing)))
228 /* Get appropriate low part of the value being stored. */
229 if (GET_CODE (value) == CONST_INT || GET_CODE (value) == REG)
230 value = gen_lowpart (fieldmode, value);
231 else if (!(GET_CODE (value) == SYMBOL_REF
232 || GET_CODE (value) == LABEL_REF
233 || GET_CODE (value) == CONST))
234 value = convert_to_mode (fieldmode, value, 0);
236 if (GET_MODE (op0) == fieldmode)
237 emit_move_insn (op0, value);
240 int icode = movstrict_optab->handlers[(int) fieldmode].insn_code;
241 if(! (*insn_operand_predicate[icode][1]) (value, fieldmode))
242 value = copy_to_mode_reg (fieldmode, value);
243 emit_insn (GEN_FCN (icode)
244 (gen_rtx (SUBREG, fieldmode, op0, offset), value));
249 /* Handle fields bigger than a word. */
251 if (bitsize > BITS_PER_WORD)
253 /* Here we transfer the words of the field
254 in the order least significant first.
255 This is because the most significant word is the one which may
256 be less than full. */
258 int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
261 /* This is the mode we must force value to, so that there will be enough
262 subwords to extract. Note that fieldmode will often (always?) be
263 VOIDmode, because that is what store_field uses to indicate that this
264 is a bit field, but passing VOIDmode to operand_subword_force will
265 result in an abort. */
266 fieldmode = mode_for_size (nwords * BITS_PER_WORD, MODE_INT, 0);
268 for (i = 0; i < nwords; i++)
270 /* If I is 0, use the low-order word in both field and target;
271 if I is 1, use the next to lowest word; and so on. */
272 int wordnum = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
273 int bit_offset = (WORDS_BIG_ENDIAN
274 ? MAX (bitsize - (i + 1) * BITS_PER_WORD, 0)
275 : i * BITS_PER_WORD);
276 store_bit_field (op0, MIN (BITS_PER_WORD,
277 bitsize - i * BITS_PER_WORD),
278 bitnum + bit_offset, word_mode,
279 operand_subword_force (value, wordnum, fieldmode),
285 /* From here on we can assume that the field to be stored in is
286 a full-word (whatever type that is), since it is shorter than a word. */
288 /* OFFSET is the number of words or bytes (UNIT says which)
289 from STR_RTX to the first word or byte containing part of the field. */
291 if (GET_CODE (op0) == REG)
294 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
295 op0 = gen_rtx (SUBREG, TYPE_MODE (type_for_size (BITS_PER_WORD, 0)),
301 op0 = protect_from_queue (op0, 1);
304 /* Now OFFSET is nonzero only if OP0 is memory
305 and is therefore always measured in bytes. */
309 && !(bitsize == 1 && GET_CODE (value) == CONST_INT)
310 /* Ensure insv's size is wide enough for this field. */
311 && (GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_insv][3])
314 int xbitpos = bitpos;
317 rtx last = get_last_insn ();
319 enum machine_mode maxmode
320 = insn_operand_mode[(int) CODE_FOR_insv][3];
322 int save_volatile_ok = volatile_ok;
325 /* If this machine's insv can only insert into a register, or if we
326 are to force MEMs into a register, copy OP0 into a register and
327 save it back later. */
328 if (GET_CODE (op0) == MEM
330 || ! ((*insn_operand_predicate[(int) CODE_FOR_insv][0])
334 enum machine_mode bestmode;
336 /* Get the mode to use for inserting into this field. If OP0 is
337 BLKmode, get the smallest mode consistent with the alignment. If
338 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
339 mode. Otherwise, use the smallest mode containing the field. */
341 if (GET_MODE (op0) == BLKmode
342 || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
344 = get_best_mode (bitsize, bitnum, align * BITS_PER_UNIT, maxmode,
345 MEM_VOLATILE_P (op0));
347 bestmode = GET_MODE (op0);
349 if (bestmode == VOIDmode)
352 /* Adjust address to point to the containing unit of that mode. */
353 unit = GET_MODE_BITSIZE (bestmode);
354 /* Compute offset as multiple of this unit, counting in bytes. */
355 offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
356 bitpos = bitnum % unit;
357 op0 = change_address (op0, bestmode,
358 plus_constant (XEXP (op0, 0), offset));
360 /* Fetch that unit, store the bitfield in it, then store the unit. */
361 tempreg = copy_to_reg (op0);
362 store_bit_field (tempreg, bitsize, bitpos, fieldmode, value,
364 emit_move_insn (op0, tempreg);
367 volatile_ok = save_volatile_ok;
369 /* Add OFFSET into OP0's address. */
370 if (GET_CODE (xop0) == MEM)
371 xop0 = change_address (xop0, byte_mode,
372 plus_constant (XEXP (xop0, 0), offset));
374 /* If xop0 is a register, we need it in MAXMODE
375 to make it acceptable to the format of insv. */
376 if (GET_CODE (xop0) == SUBREG)
377 PUT_MODE (xop0, maxmode);
378 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
379 xop0 = gen_rtx (SUBREG, maxmode, xop0, 0);
381 /* On big-endian machines, we count bits from the most significant.
382 If the bit field insn does not, we must invert. */
384 #if BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN
385 xbitpos = unit - bitsize - xbitpos;
387 /* We have been counting XBITPOS within UNIT.
388 Count instead within the size of the register. */
390 if (GET_CODE (xop0) != MEM)
391 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
393 unit = GET_MODE_BITSIZE (maxmode);
395 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
397 if (GET_MODE (value) != maxmode)
399 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
401 /* Optimization: Don't bother really extending VALUE
402 if it has all the bits we will actually use. */
404 /* Avoid making subreg of a subreg, or of a mem. */
405 if (GET_CODE (value1) != REG)
406 value1 = copy_to_reg (value1);
407 value1 = gen_rtx (SUBREG, maxmode, value1, 0);
409 else if (!CONSTANT_P (value))
410 /* Parse phase is supposed to make VALUE's data type
411 match that of the component reference, which is a type
412 at least as wide as the field; so VALUE should have
413 a mode that corresponds to that type. */
417 /* If this machine's insv insists on a register,
418 get VALUE1 into a register. */
419 if (! ((*insn_operand_predicate[(int) CODE_FOR_insv][3])
421 value1 = force_reg (maxmode, value1);
423 pat = gen_insv (xop0,
424 gen_rtx (CONST_INT, VOIDmode, bitsize),
425 gen_rtx (CONST_INT, VOIDmode, xbitpos),
431 delete_insns_since (last);
432 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
438 /* Insv is not available; store using shifts and boolean ops. */
439 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
443 /* Use shifts and boolean operations to store VALUE
444 into a bit field of width BITSIZE
445 in a memory location specified by OP0 except offset by OFFSET bytes.
446 (OFFSET must be 0 if OP0 is a register.)
447 The field starts at position BITPOS within the byte.
448 (If OP0 is a register, it may be a full word or a narrower mode,
449 but BITPOS still counts within a full word,
450 which is significant on bigendian machines.)
451 STRUCT_ALIGN is the alignment the structure is known to have (in bytes).
453 Note that protect_from_queue has already been done on OP0 and VALUE. */
456 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, struct_align)
458 register int offset, bitsize, bitpos;
462 register enum machine_mode mode;
463 int total_bits = BITS_PER_WORD;
468 /* Add OFFSET to OP0's address (if it is in memory)
469 and if a single byte contains the whole bit field
470 change OP0 to a byte. */
472 /* There is a case not handled here:
473 a structure with a known alignment of just a halfword
474 and a field split across two aligned halfwords within the structure.
475 Or likewise a structure with a known alignment of just a byte
476 and a field split across two bytes.
477 Such cases are not supposed to be able to occur. */
479 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
483 /* Special treatment for a bit field split across two registers. */
484 if (bitsize + bitpos > BITS_PER_WORD)
486 store_split_bit_field (op0, bitsize, bitpos, value, BITS_PER_WORD);
492 /* Get the proper mode to use for this field. We want a mode that
493 includes the entire field. If such a mode would be larger than
494 a word, we won't be doing the extraction the normal way. */
496 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
497 struct_align * BITS_PER_UNIT, word_mode,
498 GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
500 if (mode == VOIDmode)
502 /* The only way this should occur is if the field spans word
504 store_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT,
505 value, struct_align);
509 total_bits = GET_MODE_BITSIZE (mode);
511 /* Get ref to an aligned byte, halfword, or word containing the field.
512 Adjust BITPOS to be position within a word,
513 and OFFSET to be the offset of that word.
514 Then alter OP0 to refer to that word. */
515 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
516 offset -= (offset % (total_bits / BITS_PER_UNIT));
517 op0 = change_address (op0, mode,
518 plus_constant (XEXP (op0, 0), offset));
521 mode = GET_MODE (op0);
523 /* Now MODE is either some integral mode for a MEM as OP0,
524 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
525 The bit field is contained entirely within OP0.
526 BITPOS is the starting bit number within OP0.
527 (OP0's mode may actually be narrower than MODE.) */
530 /* BITPOS is the distance between our msb
531 and that of the containing datum.
532 Convert it to the distance from the lsb. */
534 bitpos = total_bits - bitsize - bitpos;
536 /* Now BITPOS is always the distance between our lsb
539 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
540 we must first convert its mode to MODE. */
542 if (GET_CODE (value) == CONST_INT)
544 register int v = INTVAL (value);
546 if (bitsize < HOST_BITS_PER_INT)
547 v &= (1 << bitsize) - 1;
551 else if ((bitsize < HOST_BITS_PER_INT && v == (1 << bitsize) - 1)
552 || (bitsize == HOST_BITS_PER_INT && v == -1))
555 value = lshift_value (mode, value, bitpos, bitsize);
559 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
560 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
562 if (GET_MODE (value) != mode)
564 /* If VALUE is a floating-point mode, access it as an integer
565 of the corresponding size, then convert it. This can occur on
566 a machine with 64 bit registers that uses SFmode for float. */
567 if (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT)
569 if (GET_CODE (value) != REG)
570 value = copy_to_reg (value);
572 = gen_rtx (SUBREG, word_mode, value, 0);
575 if ((GET_CODE (value) == REG || GET_CODE (value) == SUBREG)
576 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
577 value = gen_lowpart (mode, value);
579 value = convert_to_mode (mode, value, 1);
583 value = expand_binop (mode, and_optab, value,
584 mask_rtx (mode, 0, bitsize, 0),
585 0, 1, OPTAB_LIB_WIDEN);
587 value = expand_shift (LSHIFT_EXPR, mode, value,
588 build_int_2 (bitpos, 0), 0, 1);
591 /* Now clear the chosen bits in OP0,
592 except that if VALUE is -1 we need not bother. */
594 subtarget = (GET_CODE (op0) == REG || ! flag_force_mem) ? op0 : 0;
598 temp = expand_binop (mode, and_optab, op0,
599 mask_rtx (mode, bitpos, bitsize, 1),
600 subtarget, 1, OPTAB_LIB_WIDEN);
606 /* Now logical-or VALUE into OP0, unless it is zero. */
609 temp = expand_binop (mode, ior_optab, temp, value,
610 subtarget, 1, OPTAB_LIB_WIDEN);
612 emit_move_insn (op0, temp);
615 /* Store a bit field that is split across two words.
617 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
618 BITSIZE is the field width; BITPOS the position of its first bit
620 VALUE is the value to store. */
623 store_split_bit_field (op0, bitsize, bitpos, value, align)
629 /* BITSIZE_1 is size of the part in the first word. */
630 int bitsize_1 = BITS_PER_WORD - bitpos % BITS_PER_WORD;
631 /* BITSIZE_2 is size of the rest (in the following word). */
632 int bitsize_2 = bitsize - bitsize_1;
634 int unit = GET_CODE (op0) == MEM ? BITS_PER_UNIT : BITS_PER_WORD;
635 int offset = bitpos / unit;
638 /* The field must span exactly one word boundary. */
639 if (bitpos / BITS_PER_WORD != (bitpos + bitsize - 1) / BITS_PER_WORD - 1)
642 if (GET_MODE (value) != VOIDmode)
643 value = convert_to_mode (word_mode, value, 1);
644 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
645 value = copy_to_reg (value);
647 /* Split the value into two parts:
648 PART1 gets that which goes in the first word; PART2 the other. */
650 /* PART1 gets the more significant part. */
651 if (GET_CODE (value) == CONST_INT)
653 part1 = gen_rtx (CONST_INT, VOIDmode,
654 (unsigned) (INTVAL (value)) >> bitsize_2);
655 part2 = gen_rtx (CONST_INT, VOIDmode,
656 (unsigned) (INTVAL (value)) & ((1 << bitsize_2) - 1));
660 part1 = extract_fixed_bit_field (word_mode, value, 0, bitsize_1,
661 BITS_PER_WORD - bitsize, 0, 1,
663 part2 = extract_fixed_bit_field (word_mode, value, 0, bitsize_2,
664 BITS_PER_WORD - bitsize_2, 0, 1,
668 /* PART1 gets the less significant part. */
669 if (GET_CODE (value) == CONST_INT)
671 part1 = gen_rtx (CONST_INT, VOIDmode,
672 (unsigned) (INTVAL (value)) & ((1 << bitsize_1) - 1));
673 part2 = gen_rtx (CONST_INT, VOIDmode,
674 (unsigned) (INTVAL (value)) >> bitsize_1);
678 part1 = extract_fixed_bit_field (word_mode, value, 0, bitsize_1, 0,
679 0, 1, BITS_PER_WORD);
680 part2 = extract_fixed_bit_field (word_mode, value, 0, bitsize_2,
681 bitsize_1, 0, 1, BITS_PER_WORD);
685 /* Store PART1 into the first word. If OP0 is a MEM, pass OP0 and the
686 offset computed above. Otherwise, get the proper word and pass an
688 word = (GET_CODE (op0) == MEM ? op0
689 : operand_subword (op0, offset, 1, GET_MODE (op0)));
693 store_fixed_bit_field (word, GET_CODE (op0) == MEM ? offset : 0,
694 bitsize_1, bitpos % unit, part1, align);
696 /* Offset op0 by 1 word to get to the following one. */
697 if (GET_CODE (op0) == SUBREG)
698 word = operand_subword (SUBREG_REG (op0), SUBREG_WORD (op0) + offset + 1,
700 else if (GET_CODE (op0) == MEM)
703 word = operand_subword (op0, offset + 1, 1, GET_MODE (op0));
708 /* Store PART2 into the second word. */
709 store_fixed_bit_field (word,
710 (GET_CODE (op0) == MEM
711 ? CEIL (offset + 1, UNITS_PER_WORD) * UNITS_PER_WORD
713 bitsize_2, 0, part2, align);
716 /* Generate code to extract a byte-field from STR_RTX
717 containing BITSIZE bits, starting at BITNUM,
718 and put it in TARGET if possible (if TARGET is nonzero).
719 Regardless of TARGET, we return the rtx for where the value is placed.
722 STR_RTX is the structure containing the byte (a REG or MEM).
723 UNSIGNEDP is nonzero if this is an unsigned bit field.
724 MODE is the natural mode of the field value once extracted.
725 TMODE is the mode the caller would like the value to have;
726 but the value may be returned with type MODE instead.
728 ALIGN is the alignment that STR_RTX is known to have, measured in bytes.
729 TOTAL_SIZE is the size in bytes of the containing structure,
732 If a TARGET is specified and we can store in it at no extra cost,
733 we do so, and return TARGET.
734 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
735 if they are equally easy. */
738 extract_bit_field (str_rtx, bitsize, bitnum, unsignedp,
739 target, mode, tmode, align, total_size)
741 register int bitsize;
745 enum machine_mode mode, tmode;
749 int unit = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
750 register int offset = bitnum / unit;
751 register int bitpos = bitnum % unit;
752 register rtx op0 = str_rtx;
753 rtx spec_target = target;
754 rtx spec_target_subreg = 0;
756 if (GET_CODE (str_rtx) == MEM && ! MEM_IN_STRUCT_P (str_rtx))
759 /* Discount the part of the structure before the desired byte.
760 We need to know how many bytes are safe to reference after it. */
762 total_size -= (bitpos / BIGGEST_ALIGNMENT
763 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
765 if (tmode == VOIDmode)
768 while (GET_CODE (op0) == SUBREG)
770 offset += SUBREG_WORD (op0);
771 op0 = SUBREG_REG (op0);
775 /* If OP0 is a register, BITPOS must count within a word.
776 But as we have it, it counts within whatever size OP0 now has.
777 On a bigendian machine, these are not the same, so convert. */
778 if (GET_CODE (op0) != MEM && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
779 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
782 /* Extracting a full-word or multi-word value
783 from a structure in a register.
784 This can be done with just SUBREG.
785 So too extracting a subword value in
786 the least significant part of the register. */
788 if (GET_CODE (op0) == REG
789 && ((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
790 && bitpos % BITS_PER_WORD == 0)
791 || (mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0) != BLKmode
793 && bitpos + bitsize == BITS_PER_WORD
799 enum machine_mode mode1
800 = mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0);
802 if (mode1 != GET_MODE (op0))
803 op0 = gen_rtx (SUBREG, mode1, op0, offset);
806 return convert_to_mode (tmode, op0, unsignedp);
810 /* Handle fields bigger than a word. */
812 if (bitsize > BITS_PER_WORD)
814 /* Here we transfer the words of the field
815 in the order least significant first.
816 This is because the most significant word is the one which may
817 be less than full. */
819 int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
822 if (target == 0 || GET_CODE (target) != REG)
823 target = gen_reg_rtx (mode);
825 for (i = 0; i < nwords; i++)
827 /* If I is 0, use the low-order word in both field and target;
828 if I is 1, use the next to lowest word; and so on. */
829 int wordnum = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
830 int bit_offset = (WORDS_BIG_ENDIAN
831 ? MAX (0, bitsize - (i + 1) * BITS_PER_WORD)
832 : i * BITS_PER_WORD);
833 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
835 = extract_bit_field (op0, MIN (BITS_PER_WORD,
836 bitsize - i * BITS_PER_WORD),
838 1, target_part, mode, word_mode,
841 if (target_part == 0)
844 if (result_part != target_part)
845 emit_move_insn (target_part, result_part);
851 /* From here on we know the desired field is smaller than a word
852 so we can assume it is an integer. So we can safely extract it as one
853 size of integer, if necessary, and then truncate or extend
854 to the size that is wanted. */
856 /* OFFSET is the number of words or bytes (UNIT says which)
857 from STR_RTX to the first word or byte containing part of the field. */
859 if (GET_CODE (op0) == REG)
862 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
863 op0 = gen_rtx (SUBREG, TYPE_MODE (type_for_size (BITS_PER_WORD, 0)),
869 op0 = protect_from_queue (str_rtx, 1);
872 /* Now OFFSET is nonzero only for memory operands. */
878 && (GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_extzv][0])
881 int xbitpos = bitpos, xoffset = offset;
882 rtx bitsize_rtx, bitpos_rtx;
883 rtx last = get_last_insn();
885 rtx xtarget = target;
886 rtx xspec_target = spec_target;
887 rtx xspec_target_subreg = spec_target_subreg;
889 enum machine_mode maxmode
890 = insn_operand_mode[(int) CODE_FOR_extzv][0];
892 if (GET_CODE (xop0) == MEM)
894 int save_volatile_ok = volatile_ok;
897 /* Is the memory operand acceptable? */
899 || ! ((*insn_operand_predicate[(int) CODE_FOR_extzv][1])
900 (xop0, GET_MODE (xop0))))
902 /* No, load into a reg and extract from there. */
903 enum machine_mode bestmode;
905 /* Get the mode to use for inserting into this field. If
906 OP0 is BLKmode, get the smallest mode consistent with the
907 alignment. If OP0 is a non-BLKmode object that is no
908 wider than MAXMODE, use its mode. Otherwise, use the
909 smallest mode containing the field. */
911 if (GET_MODE (xop0) == BLKmode
912 || (GET_MODE_SIZE (GET_MODE (op0))
913 > GET_MODE_SIZE (maxmode)))
914 bestmode = get_best_mode (bitsize, bitnum,
915 align * BITS_PER_UNIT, maxmode,
916 MEM_VOLATILE_P (xop0));
918 bestmode = GET_MODE (xop0);
920 if (bestmode == VOIDmode)
923 /* Compute offset as multiple of this unit,
924 counting in bytes. */
925 unit = GET_MODE_BITSIZE (bestmode);
926 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
927 xbitpos = bitnum % unit;
928 xop0 = change_address (xop0, bestmode,
929 plus_constant (XEXP (xop0, 0),
931 /* Fetch it to a register in that size. */
932 xop0 = force_reg (bestmode, xop0);
934 /* XBITPOS counts within UNIT, which is what is expected. */
937 /* Get ref to first byte containing part of the field. */
938 xop0 = change_address (xop0, byte_mode,
939 plus_constant (XEXP (xop0, 0), xoffset));
941 volatile_ok = save_volatile_ok;
944 /* If op0 is a register, we need it in MAXMODE (which is usually
945 SImode). to make it acceptable to the format of extzv. */
946 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
948 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
949 xop0 = gen_rtx (SUBREG, maxmode, xop0, 0);
951 /* On big-endian machines, we count bits from the most significant.
952 If the bit field insn does not, we must invert. */
953 #if BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN
954 xbitpos = unit - bitsize - xbitpos;
956 /* Now convert from counting within UNIT to counting in MAXMODE. */
958 if (GET_CODE (xop0) != MEM)
959 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
961 unit = GET_MODE_BITSIZE (maxmode);
964 || (flag_force_mem && GET_CODE (xtarget) == MEM))
965 xtarget = xspec_target = gen_reg_rtx (tmode);
967 if (GET_MODE (xtarget) != maxmode)
969 if (GET_CODE (xtarget) == REG)
970 xspec_target_subreg = xtarget = gen_lowpart (maxmode, xtarget);
972 xtarget = gen_reg_rtx (maxmode);
975 /* If this machine's extzv insists on a register target,
976 make sure we have one. */
977 if (! ((*insn_operand_predicate[(int) CODE_FOR_extzv][0])
979 xtarget = gen_reg_rtx (maxmode);
981 bitsize_rtx = gen_rtx (CONST_INT, VOIDmode, bitsize);
982 bitpos_rtx = gen_rtx (CONST_INT, VOIDmode, xbitpos);
984 pat = gen_extzv (protect_from_queue (xtarget, 1),
985 xop0, bitsize_rtx, bitpos_rtx);
990 spec_target = xspec_target;
991 spec_target_subreg = xspec_target_subreg;
995 delete_insns_since (last);
996 target = extract_fixed_bit_field (tmode, op0, offset, bitsize,
997 bitpos, target, 1, align);
1003 target = extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1010 && (GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_extv][0])
1013 int xbitpos = bitpos, xoffset = offset;
1014 rtx bitsize_rtx, bitpos_rtx;
1015 rtx last = get_last_insn();
1016 rtx xop0 = op0, xtarget = target;
1017 rtx xspec_target = spec_target;
1018 rtx xspec_target_subreg = spec_target_subreg;
1020 enum machine_mode maxmode
1021 = insn_operand_mode[(int) CODE_FOR_extv][0];
1023 if (GET_CODE (xop0) == MEM)
1025 /* Is the memory operand acceptable? */
1026 if (! ((*insn_operand_predicate[(int) CODE_FOR_extv][1])
1027 (xop0, GET_MODE (xop0))))
1029 /* No, load into a reg and extract from there. */
1030 enum machine_mode bestmode;
1032 /* Get the mode to use for inserting into this field. If
1033 OP0 is BLKmode, get the smallest mode consistent with the
1034 alignment. If OP0 is a non-BLKmode object that is no
1035 wider than MAXMODE, use its mode. Otherwise, use the
1036 smallest mode containing the field. */
1038 if (GET_MODE (xop0) == BLKmode
1039 || (GET_MODE_SIZE (GET_MODE (op0))
1040 > GET_MODE_SIZE (maxmode)))
1041 bestmode = get_best_mode (bitsize, bitnum,
1042 align * BITS_PER_UNIT, maxmode,
1043 MEM_VOLATILE_P (xop0));
1045 bestmode = GET_MODE (xop0);
1047 if (bestmode == VOIDmode)
1050 /* Compute offset as multiple of this unit,
1051 counting in bytes. */
1052 unit = GET_MODE_BITSIZE (bestmode);
1053 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1054 xbitpos = bitnum % unit;
1055 xop0 = change_address (xop0, bestmode,
1056 plus_constant (XEXP (xop0, 0),
1058 /* Fetch it to a register in that size. */
1059 xop0 = force_reg (bestmode, xop0);
1061 /* XBITPOS counts within UNIT, which is what is expected. */
1064 /* Get ref to first byte containing part of the field. */
1065 xop0 = change_address (xop0, byte_mode,
1066 plus_constant (XEXP (xop0, 0), xoffset));
1069 /* If op0 is a register, we need it in MAXMODE (which is usually
1070 SImode) to make it acceptable to the format of extv. */
1071 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1073 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1074 xop0 = gen_rtx (SUBREG, maxmode, xop0, 0);
1076 /* On big-endian machines, we count bits from the most significant.
1077 If the bit field insn does not, we must invert. */
1078 #if BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN
1079 xbitpos = unit - bitsize - xbitpos;
1081 /* XBITPOS counts within a size of UNIT.
1082 Adjust to count within a size of MAXMODE. */
1084 if (GET_CODE (xop0) != MEM)
1085 xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
1087 unit = GET_MODE_BITSIZE (maxmode);
1090 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1091 xtarget = xspec_target = gen_reg_rtx (tmode);
1093 if (GET_MODE (xtarget) != maxmode)
1095 if (GET_CODE (xtarget) == REG)
1096 xspec_target_subreg = xtarget = gen_lowpart (maxmode, xtarget);
1098 xtarget = gen_reg_rtx (maxmode);
1101 /* If this machine's extv insists on a register target,
1102 make sure we have one. */
1103 if (! ((*insn_operand_predicate[(int) CODE_FOR_extv][0])
1104 (xtarget, maxmode)))
1105 xtarget = gen_reg_rtx (maxmode);
1107 bitsize_rtx = gen_rtx (CONST_INT, VOIDmode, bitsize);
1108 bitpos_rtx = gen_rtx (CONST_INT, VOIDmode, xbitpos);
1110 pat = gen_extv (protect_from_queue (xtarget, 1),
1111 xop0, bitsize_rtx, bitpos_rtx);
1116 spec_target = xspec_target;
1117 spec_target_subreg = xspec_target_subreg;
1121 delete_insns_since (last);
1122 target = extract_fixed_bit_field (tmode, op0, offset, bitsize,
1123 bitpos, target, 0, align);
1129 target = extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1132 if (target == spec_target)
1134 if (target == spec_target_subreg)
1136 if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
1138 /* If the target mode is floating-point, first convert to the
1139 integer mode of that size and then access it as a floating-point
1140 value via a SUBREG. */
1141 if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
1143 target = convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode),
1146 if (GET_CODE (target) != REG)
1147 target = copy_to_reg (target);
1148 return gen_rtx (SUBREG, tmode, target, 0);
1151 return convert_to_mode (tmode, target, unsignedp);
1156 /* Extract a bit field using shifts and boolean operations
1157 Returns an rtx to represent the value.
1158 OP0 addresses a register (word) or memory (byte).
1159 BITPOS says which bit within the word or byte the bit field starts in.
1160 OFFSET says how many bytes farther the bit field starts;
1161 it is 0 if OP0 is a register.
1162 BITSIZE says how many bits long the bit field is.
1163 (If OP0 is a register, it may be narrower than a full word,
1164 but BITPOS still counts within a full word,
1165 which is significant on bigendian machines.)
1167 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1168 If TARGET is nonzero, attempts to store the value there
1169 and return TARGET, but this is not guaranteed.
1170 If TARGET is not used, create a pseudo-reg of mode TMODE for the value.
1172 ALIGN is the alignment that STR_RTX is known to have, measured in bytes. */
1175 extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1176 target, unsignedp, align)
1177 enum machine_mode tmode;
1178 register rtx op0, target;
1179 register int offset, bitsize, bitpos;
1183 int total_bits = BITS_PER_WORD;
1184 enum machine_mode mode;
1186 if (GET_CODE (op0) == SUBREG || GET_CODE (op0) == REG)
1188 /* Special treatment for a bit field split across two registers. */
1189 if (bitsize + bitpos > BITS_PER_WORD)
1190 return extract_split_bit_field (op0, bitsize, bitpos,
1195 /* Get the proper mode to use for this field. We want a mode that
1196 includes the entire field. If such a mode would be larger than
1197 a word, we won't be doing the extraction the normal way. */
1199 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1200 align * BITS_PER_UNIT, word_mode,
1201 GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
1203 if (mode == VOIDmode)
1204 /* The only way this should occur is if the field spans word
1206 return extract_split_bit_field (op0, bitsize,
1207 bitpos + offset * BITS_PER_UNIT,
1210 total_bits = GET_MODE_BITSIZE (mode);
1212 /* Get ref to an aligned byte, halfword, or word containing the field.
1213 Adjust BITPOS to be position within a word,
1214 and OFFSET to be the offset of that word.
1215 Then alter OP0 to refer to that word. */
1216 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1217 offset -= (offset % (total_bits / BITS_PER_UNIT));
1218 op0 = change_address (op0, mode,
1219 plus_constant (XEXP (op0, 0), offset));
1222 mode = GET_MODE (op0);
1224 #if BYTES_BIG_ENDIAN
1225 /* BITPOS is the distance between our msb and that of OP0.
1226 Convert it to the distance from the lsb. */
1228 bitpos = total_bits - bitsize - bitpos;
1230 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1231 We have reduced the big-endian case to the little-endian case. */
1237 /* If the field does not already start at the lsb,
1238 shift it so it does. */
1239 tree amount = build_int_2 (bitpos, 0);
1240 /* Maybe propagate the target for the shift. */
1241 /* But not if we will return it--could confuse integrate.c. */
1242 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1243 && !REG_FUNCTION_VALUE_P (target)
1245 if (tmode != mode) subtarget = 0;
1246 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1248 /* Convert the value to the desired mode. */
1250 op0 = convert_to_mode (tmode, op0, 1);
1252 /* Unless the msb of the field used to be the msb when we shifted,
1253 mask out the upper bits. */
1255 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize
1257 #ifdef SLOW_ZERO_EXTEND
1258 /* Always generate an `and' if
1259 we just zero-extended op0 and SLOW_ZERO_EXTEND, since it
1260 will combine fruitfully with the zero-extend. */
1265 return expand_binop (GET_MODE (op0), and_optab, op0,
1266 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1267 target, 1, OPTAB_LIB_WIDEN);
1271 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1272 then arithmetic-shift its lsb to the lsb of the word. */
1273 op0 = force_reg (mode, op0);
1277 /* Find the narrowest integer mode that contains the field. */
1279 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1280 mode = GET_MODE_WIDER_MODE (mode))
1281 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1283 op0 = convert_to_mode (mode, op0, 0);
1287 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1289 tree amount = build_int_2 (GET_MODE_BITSIZE (mode) - (bitsize + bitpos), 0);
1290 /* Maybe propagate the target for the shift. */
1291 /* But not if we will return the result--could confuse integrate.c. */
1292 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1293 && ! REG_FUNCTION_VALUE_P (target)
1295 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1298 return expand_shift (RSHIFT_EXPR, mode, op0,
1299 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1303 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1304 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1305 complement of that if COMPLEMENT. The mask is truncated if
1306 necessary to the width of mode MODE. */
1309 mask_rtx (mode, bitpos, bitsize, complement)
1310 enum machine_mode mode;
1311 int bitpos, bitsize, complement;
1313 int masklow, maskhigh;
1315 if (bitpos < HOST_BITS_PER_INT)
1316 masklow = -1 << bitpos;
1320 if (bitpos + bitsize < HOST_BITS_PER_INT)
1321 masklow &= (unsigned) -1 >> (HOST_BITS_PER_INT - bitpos - bitsize);
1323 if (bitpos <= HOST_BITS_PER_INT)
1326 maskhigh = -1 << (bitpos - HOST_BITS_PER_INT);
1328 if (bitpos + bitsize > HOST_BITS_PER_INT)
1329 maskhigh &= (unsigned) -1 >> (2 * HOST_BITS_PER_INT - bitpos - bitsize);
1335 maskhigh = ~maskhigh;
1339 return immed_double_const (masklow, maskhigh, mode);
1342 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1343 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1346 lshift_value (mode, value, bitpos, bitsize)
1347 enum machine_mode mode;
1349 int bitpos, bitsize;
1351 unsigned v = INTVAL (value);
1354 if (bitsize < HOST_BITS_PER_INT)
1355 v &= ~(-1 << bitsize);
1357 if (bitpos < HOST_BITS_PER_INT)
1360 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_INT - bitpos)) : 0);
1365 high = v << (bitpos - HOST_BITS_PER_INT);
1368 return immed_double_const (low, high, mode);
1371 /* Extract a bit field that is split across two words
1372 and return an RTX for the result.
1374 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1375 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1376 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
1379 extract_split_bit_field (op0, bitsize, bitpos, unsignedp, align)
1381 int bitsize, bitpos, unsignedp, align;
1383 /* BITSIZE_1 is size of the part in the first word. */
1384 int bitsize_1 = BITS_PER_WORD - bitpos % BITS_PER_WORD;
1385 /* BITSIZE_2 is size of the rest (in the following word). */
1386 int bitsize_2 = bitsize - bitsize_1;
1387 rtx part1, part2, result;
1388 int unit = GET_CODE (op0) == MEM ? BITS_PER_UNIT : BITS_PER_WORD;
1389 int offset = bitpos / unit;
1392 /* The field must span exactly one word boundary. */
1393 if (bitpos / BITS_PER_WORD != (bitpos + bitsize - 1) / BITS_PER_WORD - 1)
1396 /* Get the part of the bit field from the first word. If OP0 is a MEM,
1397 pass OP0 and the offset computed above. Otherwise, get the proper
1398 word and pass an offset of zero. */
1399 word = (GET_CODE (op0) == MEM ? op0
1400 : operand_subword_force (op0, offset, GET_MODE (op0)));
1401 part1 = extract_fixed_bit_field (word_mode, word,
1402 GET_CODE (op0) == MEM ? offset : 0,
1403 bitsize_1, bitpos % unit, 0, 1, align);
1405 /* Offset op0 by 1 word to get to the following one. */
1406 if (GET_CODE (op0) == SUBREG)
1407 word = operand_subword_force (SUBREG_REG (op0),
1408 SUBREG_WORD (op0) + offset + 1, VOIDmode);
1409 else if (GET_CODE (op0) == MEM)
1412 word = operand_subword_force (op0, offset + 1, GET_MODE (op0));
1414 /* Get the part of the bit field from the second word. */
1415 part2 = extract_fixed_bit_field (word_mode, word,
1416 (GET_CODE (op0) == MEM
1417 ? CEIL (offset + 1, UNITS_PER_WORD) * UNITS_PER_WORD
1419 bitsize_2, 0, 0, 1, align);
1421 /* Shift the more significant part up to fit above the other part. */
1422 #if BYTES_BIG_ENDIAN
1423 part1 = expand_shift (LSHIFT_EXPR, word_mode, part1,
1424 build_int_2 (bitsize_2, 0), 0, 1);
1426 part2 = expand_shift (LSHIFT_EXPR, word_mode, part2,
1427 build_int_2 (bitsize_1, 0), 0, 1);
1430 /* Combine the two parts with bitwise or. This works
1431 because we extracted both parts as unsigned bit fields. */
1432 result = expand_binop (word_mode, ior_optab, part1, part2, 0, 1,
1435 /* Unsigned bit field: we are done. */
1438 /* Signed bit field: sign-extend with two arithmetic shifts. */
1439 result = expand_shift (LSHIFT_EXPR, word_mode, result,
1440 build_int_2 (BITS_PER_WORD - bitsize, 0), 0, 0);
1441 return expand_shift (RSHIFT_EXPR, word_mode, result,
1442 build_int_2 (BITS_PER_WORD - bitsize, 0), 0, 0);
1445 /* Add INC into TARGET. */
1448 expand_inc (target, inc)
1451 rtx value = expand_binop (GET_MODE (target), add_optab,
1453 target, 0, OPTAB_LIB_WIDEN);
1454 if (value != target)
1455 emit_move_insn (target, value);
1458 /* Subtract DEC from TARGET. */
1461 expand_dec (target, dec)
1464 rtx value = expand_binop (GET_MODE (target), sub_optab,
1466 target, 0, OPTAB_LIB_WIDEN);
1467 if (value != target)
1468 emit_move_insn (target, value);
1471 /* Output a shift instruction for expression code CODE,
1472 with SHIFTED being the rtx for the value to shift,
1473 and AMOUNT the tree for the amount to shift by.
1474 Store the result in the rtx TARGET, if that is convenient.
1475 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
1476 Return the rtx for where the value is. */
1479 expand_shift (code, mode, shifted, amount, target, unsignedp)
1480 enum tree_code code;
1481 register enum machine_mode mode;
1484 register rtx target;
1487 register rtx op1, temp = 0;
1488 register int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
1489 register int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
1492 /* Previously detected shift-counts computed by NEGATE_EXPR
1493 and shifted in the other direction; but that does not work
1496 op1 = expand_expr (amount, 0, VOIDmode, 0);
1498 if (op1 == const0_rtx)
1501 for (try = 0; temp == 0 && try < 3; try++)
1503 enum optab_methods methods;
1506 methods = OPTAB_DIRECT;
1508 methods = OPTAB_WIDEN;
1510 methods = OPTAB_LIB_WIDEN;
1514 /* Widening does not work for rotation. */
1515 if (methods == OPTAB_WIDEN)
1517 else if (methods == OPTAB_LIB_WIDEN)
1518 methods = OPTAB_LIB;
1520 temp = expand_binop (mode,
1521 left ? rotl_optab : rotr_optab,
1522 shifted, op1, target, unsignedp, methods);
1526 temp = expand_binop (mode,
1527 left ? lshl_optab : lshr_optab,
1528 shifted, op1, target, unsignedp, methods);
1529 if (temp == 0 && left)
1530 temp = expand_binop (mode, ashl_optab,
1531 shifted, op1, target, unsignedp, methods);
1534 /* Do arithmetic shifts.
1535 Also, if we are going to widen the operand, we can just as well
1536 use an arithmetic right-shift instead of a logical one. */
1537 if (temp == 0 && ! rotate
1538 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
1540 enum optab_methods methods1 = methods;
1542 /* If trying to widen a log shift to an arithmetic shift,
1543 don't accept an arithmetic shift of the same size. */
1545 methods1 = OPTAB_MUST_WIDEN;
1547 /* Arithmetic shift */
1549 temp = expand_binop (mode,
1550 left ? ashl_optab : ashr_optab,
1551 shifted, op1, target, unsignedp, methods1);
1555 /* We can do a logical (unsigned) right shift with a bit-field
1556 extract insn. But first check if one of the above methods worked. */
1560 if (unsignedp && code == RSHIFT_EXPR && ! BITS_BIG_ENDIAN && HAVE_extzv)
1562 enum machine_mode output_mode
1563 = insn_operand_mode[(int) CODE_FOR_extzv][0];
1565 if ((methods == OPTAB_DIRECT && mode == output_mode)
1566 || (methods == OPTAB_WIDEN
1567 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (output_mode)))
1569 /* Note convert_to_mode does protect_from_queue. */
1570 rtx shifted1 = convert_to_mode (output_mode, shifted, 1);
1571 enum machine_mode length_mode
1572 = insn_operand_mode[(int) CODE_FOR_extzv][2];
1573 enum machine_mode pos_mode
1574 = insn_operand_mode[(int) CODE_FOR_extzv][3];
1576 rtx last = get_last_insn ();
1582 target1 = protect_from_queue (target, 1);
1584 /* We define extract insns as having OUTPUT_MODE in a register
1585 and the mode of operand 1 in memory. Since we want
1586 OUTPUT_MODE, we will always force the operand into a
1587 register. At some point we might want to support MEM
1589 shifted1 = force_reg (output_mode, shifted1);
1591 /* If we don't have or cannot use a suggested target,
1592 make a place for the result, in the proper mode. */
1593 if (methods == OPTAB_WIDEN || target1 == 0
1594 || ! ((*insn_operand_predicate[(int) CODE_FOR_extzv][0])
1595 (target1, output_mode)))
1596 target1 = gen_reg_rtx (output_mode);
1598 xop1 = convert_to_mode (pos_mode, xop1,
1599 TREE_UNSIGNED (TREE_TYPE (amount)));
1601 /* If this machine's extzv insists on a register for
1602 operand 3 (position), arrange for that. */
1603 if (! ((*insn_operand_predicate[(int) CODE_FOR_extzv][3])
1605 xop1 = force_reg (pos_mode, xop1);
1607 /* WIDTH gets the width of the bit field to extract:
1608 wordsize minus # bits to shift by. */
1609 if (GET_CODE (xop1) == CONST_INT)
1610 width = gen_rtx (CONST_INT, VOIDmode,
1611 (GET_MODE_BITSIZE (mode) - INTVAL (op1)));
1614 /* Now get the width in the proper mode. */
1615 width = convert_to_mode (length_mode, op1,
1616 TREE_UNSIGNED (TREE_TYPE (amount)));
1618 width = expand_binop (length_mode, sub_optab,
1619 gen_rtx (CONST_INT, VOIDmode,
1620 GET_MODE_BITSIZE (mode)),
1621 width, 0, 0, OPTAB_LIB_WIDEN);
1624 /* If this machine's extzv insists on a register for
1625 operand 2 (length), arrange for that. */
1626 if (! ((*insn_operand_predicate[(int) CODE_FOR_extzv][2])
1627 (width, length_mode)))
1628 width = force_reg (length_mode, width);
1630 /* Now extract with WIDTH, omitting OP1 least sig bits. */
1631 pat = gen_extzv (target1, shifted1, width, xop1);
1635 temp = convert_to_mode (mode, target1, 1);
1638 delete_insns_since (last);
1641 /* Can also do logical shift with signed bit-field extract
1642 followed by inserting the bit-field at a different position.
1643 That strategy is not yet implemented. */
1645 #endif /* HAVE_extzv */
1653 enum alg_code { alg_add, alg_subtract, alg_compound };
1655 /* This structure records a sequence of operations.
1656 `ops' is the number of operations recorded.
1657 `cost' is their total cost.
1658 The operations are stored in `op' and the corresponding
1659 integer coefficients in `coeff'.
1660 These are the operations:
1661 alg_add Add to the total the multiplicand times the coefficient.
1662 alg_subtract Subtract the multiplicand times the coefficient.
1663 alg_compound This coefficient plus or minus the following one
1664 is multiplied into the total. The following operation
1665 is alg_add or alg_subtract to indicate whether to add
1666 or subtract the two coefficients. */
1668 #ifndef MAX_BITS_PER_WORD
1669 #define MAX_BITS_PER_WORD BITS_PER_WORD
1676 enum alg_code op[MAX_BITS_PER_WORD];
1677 unsigned int coeff[MAX_BITS_PER_WORD];
1680 /* Compute and return the best algorithm for multiplying by T.
1681 Assume that add insns cost ADD_COST and shifts cost SHIFT_COST.
1682 Return cost -1 if would cost more than MAX_COST. */
1684 static struct algorithm
1685 synth_mult (t, add_cost, shift_cost, max_cost)
1687 int add_cost, shift_cost;
1691 struct algorithm *best_alg = (struct algorithm *)alloca (sizeof (struct algorithm));
1692 struct algorithm *alg_in = (struct algorithm *)alloca (sizeof (struct algorithm));
1695 /* No matter what happens, we want to return a valid algorithm. */
1696 best_alg->cost = max_cost;
1699 /* Is t an exponent of 2, so we can just do a shift? */
1705 if (max_cost >= shift_cost)
1707 best_alg->cost = shift_cost;
1709 best_alg->op[0] = alg_add;
1710 best_alg->coeff[0] = t;
1713 best_alg->cost = -1;
1726 /* If MAX_COST just permits as little as an addition (or less), we won't
1727 succeed in synthesizing an algorithm for t. Return immediately with
1728 an indication of failure. */
1729 if (max_cost <= add_cost)
1731 best_alg->cost = -1;
1735 /* Look for factors of t of the form
1736 t = q(2**m +- 1), 2 <= m <= floor(log2(t)) - 1.
1737 If we find such a factor, we can multiply by t using an algorithm that
1738 multiplies by q, shift the result by m and add/subtract it to itself. */
1740 for (m = floor_log2 (t) - 1; m >= 2; m--)
1742 int m_exp_2 = 1 << m;
1750 cost = add_cost + shift_cost * 2;
1752 *alg_in = synth_mult (q, add_cost, shift_cost,
1753 MIN (max_cost, best_alg->cost) - cost);
1755 if (alg_in->cost >= 0)
1757 cost += alg_in->cost;
1759 if (cost < best_alg->cost)
1761 struct algorithm *x;
1765 best_alg->coeff[best_alg->ops] = m_exp_2;
1766 best_alg->op[best_alg->ops++] = alg_compound;
1767 best_alg->coeff[best_alg->ops] = 1;
1768 best_alg->op[best_alg->ops++] = alg_add;
1769 best_alg->cost = cost;
1779 cost = add_cost + shift_cost * 2;
1781 *alg_in = synth_mult (q, add_cost, shift_cost,
1782 MIN (max_cost, best_alg->cost) - cost);
1784 if (alg_in->cost >= 0)
1786 cost += alg_in->cost;
1788 if (cost < best_alg->cost)
1790 struct algorithm *x;
1794 best_alg->coeff[best_alg->ops] = m_exp_2;
1795 best_alg->op[best_alg->ops++] = alg_compound;
1796 best_alg->coeff[best_alg->ops] = 1;
1797 best_alg->op[best_alg->ops++] = alg_subtract;
1798 best_alg->cost = cost;
1804 /* Try load effective address instructions, i.e. do a*3, a*5, a*9. */
1810 q = t & -t; /* get out lsb */
1811 w = (t - q) & -(t - q); /* get out next lsb */
1813 if (w / q <= lea_max_mul)
1815 cost = lea_cost + (q != 1 ? shift_cost : 0);
1817 *alg_in = synth_mult (t - q - w, add_cost, shift_cost,
1818 MIN (max_cost, best_alg->cost) - cost);
1820 if (alg_in->cost >= 0)
1822 cost += alg_in->cost;
1824 /* Use <= to prefer this method to the factoring method
1825 when the cost appears the same, because this method
1826 uses fewer temporary registers. */
1827 if (cost <= best_alg->cost)
1829 struct algorithm *x;
1833 best_alg->coeff[best_alg->ops] = w;
1834 best_alg->op[best_alg->ops++] = alg_add;
1835 best_alg->coeff[best_alg->ops] = q;
1836 best_alg->op[best_alg->ops++] = alg_add;
1837 best_alg->cost = cost;
1843 /* Now, use the good old method to add or subtract at the leftmost
1850 q = t & -t; /* get out lsb */
1851 for (w = q; (w & t) != 0; w <<= 1)
1854 /* Reject the case where t has only two bits.
1855 Thus we prefer addition in that case. */
1856 && !(t < w && w == q << 2))
1858 /* There are many bits in a row. Make 'em by subtraction. */
1864 *alg_in = synth_mult (t + q, add_cost, shift_cost,
1865 MIN (max_cost, best_alg->cost) - cost);
1867 if (alg_in->cost >= 0)
1869 cost += alg_in->cost;
1871 /* Use <= to prefer this method to the factoring method
1872 when the cost appears the same, because this method
1873 uses fewer temporary registers. */
1874 if (cost <= best_alg->cost)
1876 struct algorithm *x;
1880 best_alg->coeff[best_alg->ops] = q;
1881 best_alg->op[best_alg->ops++] = alg_subtract;
1882 best_alg->cost = cost;
1888 /* There's only one bit at the left. Make it by addition. */
1894 *alg_in = synth_mult (t - q, add_cost, shift_cost,
1895 MIN (max_cost, best_alg->cost) - cost);
1897 if (alg_in->cost >= 0)
1899 cost += alg_in->cost;
1901 if (cost <= best_alg->cost)
1903 struct algorithm *x;
1907 best_alg->coeff[best_alg->ops] = q;
1908 best_alg->op[best_alg->ops++] = alg_add;
1909 best_alg->cost = cost;
1915 if (best_alg->cost >= max_cost)
1916 best_alg->cost = -1;
1920 /* Perform a multiplication and return an rtx for the result.
1921 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
1922 TARGET is a suggestion for where to store the result (an rtx).
1924 We check specially for a constant integer as OP1.
1925 If you want this check for OP0 as well, then before calling
1926 you should swap the two operands if OP0 would be constant. */
1929 expand_mult (mode, op0, op1, target, unsignedp)
1930 enum machine_mode mode;
1931 register rtx op0, op1, target;
1934 rtx const_op1 = op1;
1936 /* If we are multiplying in DImode, it may still be a win
1937 to try to work with shifts and adds. */
1938 if (GET_CODE (op1) == CONST_DOUBLE
1939 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_INT
1940 && HOST_BITS_PER_INT <= BITS_PER_WORD)
1942 if ((CONST_DOUBLE_HIGH (op1) == 0 && CONST_DOUBLE_LOW (op1) >= 0)
1943 || (CONST_DOUBLE_HIGH (op1) == -1 && CONST_DOUBLE_LOW (op1) < 0))
1944 const_op1 = gen_rtx (CONST_INT, VOIDmode, CONST_DOUBLE_LOW (op1));
1947 if (GET_CODE (const_op1) == CONST_INT && ! mult_is_very_cheap && optimize)
1949 struct algorithm alg;
1950 struct algorithm neg_alg;
1952 int absval = INTVAL (op1);
1955 /* Try to do the computation two ways: multiply by the negative of OP1
1956 and then negate, or do the multiplication directly. The latter is
1957 usually faster for positive numbers and the former for negative
1958 numbers, but the opposite can be faster if the original value
1959 has a factor of 2**m +/- 1, while the negated value does not or
1962 alg = synth_mult (absval, add_cost, shift_cost, mult_cost);
1963 neg_alg = synth_mult (- absval, add_cost, shift_cost,
1964 mult_cost - negate_cost);
1966 if (neg_alg.cost >= 0 && neg_alg.cost + negate_cost < alg.cost)
1967 alg = neg_alg, negate = 1, absval = - absval;
1971 /* If we found something, it must be cheaper than multiply.
1975 int factors_seen = 0;
1977 op0 = protect_from_queue (op0, 0);
1979 /* Avoid referencing memory over and over.
1980 For speed, but also for correctness when mem is volatile. */
1981 if (GET_CODE (op0) == MEM)
1982 op0 = force_reg (mode, op0);
1985 accum = copy_to_mode_reg (mode, op0);
1988 /* 1 if this is the last in a series of adds and subtracts. */
1989 int last = (1 == alg.ops || alg.op[1] == alg_compound);
1990 int log = floor_log2 (alg.coeff[0]);
1991 if (! factors_seen && ! last)
1992 log -= floor_log2 (alg.coeff[1]);
1994 if (alg.op[0] != alg_add)
1996 accum = expand_shift (LSHIFT_EXPR, mode, op0,
1997 build_int_2 (log, 0),
2001 while (++opno < alg.ops)
2003 int log = floor_log2 (alg.coeff[opno]);
2004 /* 1 if this is the last in a series of adds and subtracts. */
2005 int last = (opno + 1 == alg.ops
2006 || alg.op[opno + 1] == alg_compound);
2008 /* If we have not yet seen any separate factors (alg_compound)
2009 then turn op0<<a1 + op0<<a2 + op0<<a3... into
2010 (op0<<(a1-a2) + op0)<<(a2-a3) + op0... */
2011 switch (alg.op[opno])
2016 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2017 build_int_2 (log, 0), 0, 0);
2018 accum = force_operand (gen_rtx (PLUS, mode, accum, tem),
2024 log -= floor_log2 (alg.coeff[opno + 1]);
2025 accum = force_operand (gen_rtx (PLUS, mode, accum, op0),
2027 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2028 build_int_2 (log, 0), accum, 0);
2035 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2036 build_int_2 (log, 0), 0, 0);
2037 accum = force_operand (gen_rtx (MINUS, mode, accum, tem),
2043 log -= floor_log2 (alg.coeff[opno + 1]);
2044 accum = force_operand (gen_rtx (MINUS, mode, accum, op0),
2046 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2047 build_int_2 (log, 0), accum, 0);
2054 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2055 build_int_2 (log, 0), 0, 0);
2057 log = floor_log2 (alg.coeff[opno + 1]);
2058 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2059 build_int_2 (log, 0), 0, 0);
2061 if (alg.op[opno] == alg_add)
2062 accum = force_operand (gen_rtx (PLUS, mode, tem, accum),
2065 accum = force_operand (gen_rtx (MINUS, mode, tem, accum),
2070 /* Write a REG_EQUAL note on the last insn so that we can cse
2071 multiplication sequences. We need not do this if we were
2072 multiplying by a power of two, since only one insn would have
2075 ??? We could also write REG_EQUAL notes on the last insn of
2076 each sequence that uses a single temporary, but it is not
2077 clear how to calculate the partial product so far.
2079 Torbjorn: Can you do this? */
2081 if (exact_log2 (absval) < 0)
2083 last = get_last_insn ();
2085 = gen_rtx (EXPR_LIST, REG_EQUAL,
2086 gen_rtx (MULT, mode, op0,
2087 negate ? gen_rtx (CONST_INT,
2093 return (negate ? expand_unop (mode, neg_optab, accum, target, 0)
2098 /* This used to use umul_optab if unsigned,
2099 but I think that for non-widening multiply there is no difference
2100 between signed and unsigned. */
2101 op0 = expand_binop (mode, smul_optab,
2102 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
2108 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
2109 if that is convenient, and returning where the result is.
2110 You may request either the quotient or the remainder as the result;
2111 specify REM_FLAG nonzero to get the remainder.
2113 CODE is the expression code for which kind of division this is;
2114 it controls how rounding is done. MODE is the machine mode to use.
2115 UNSIGNEDP nonzero means do unsigned division. */
2117 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
2118 and then correct it by or'ing in missing high bits
2119 if result of ANDI is nonzero.
2120 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
2121 This could optimize to a bfexts instruction.
2122 But C doesn't use these operations, so their optimizations are
2126 expand_divmod (rem_flag, code, mode, op0, op1, target, unsignedp)
2128 enum tree_code code;
2129 enum machine_mode mode;
2130 register rtx op0, op1, target;
2133 register rtx result = 0;
2134 enum machine_mode compute_mode;
2136 int can_clobber_op0;
2137 int mod_insn_no_good = 0;
2138 rtx adjusted_op0 = op0;
2139 optab optab1, optab2;
2141 /* Don't use the function value register as a target
2142 since we have to read it as well as write it,
2143 and function-inlining gets confused by this. */
2144 if (target && REG_P (target) && REG_FUNCTION_VALUE_P (target))
2147 /* Don't clobber an operand while doing a multi-step calculation. */
2149 if ((rem_flag && (reg_mentioned_p (target, op0)
2150 || (GET_CODE (op0) == MEM && GET_CODE (target) == MEM)))
2151 || reg_mentioned_p (target, op1)
2152 || (GET_CODE (op1) == MEM && GET_CODE (target) == MEM))
2155 can_clobber_op0 = (GET_CODE (op0) == REG && op0 == target);
2157 if (GET_CODE (op1) == CONST_INT)
2158 log = exact_log2 (INTVAL (op1));
2160 /* If log is >= 0, we are dividing by 2**log, and will do it by shifting,
2161 which is really floor-division. Otherwise we will really do a divide,
2162 and we assume that is trunc-division.
2164 We must correct the dividend by adding or subtracting something
2165 based on the divisor, in order to do the kind of rounding specified
2166 by CODE. The correction depends on what kind of rounding is actually
2167 available, and that depends on whether we will shift or divide.
2169 In many of these cases it is possible to perform the operation by a
2170 clever series of logical operations (shifts and/or exclusive-ors).
2171 Although avoiding the jump has the advantage that it extends the basic
2172 block and allows further optimization, the branch-free code is normally
2173 at least one instruction longer in the (most common) case where the
2174 dividend is non-negative. Performance measurements of the two
2175 alternatives show that the branch-free code is slightly faster on the
2176 IBM ROMP but slower on CISC processors (significantly slower on the
2177 VAX). Accordingly, the jump code has been retained.
2179 On machines where the jump code is slower, the cost of a DIV or MOD
2180 operation can be set small (less than twice that of an addition); in
2181 that case, we pretend that we don't have a power of two and perform
2182 a normal division or modulus operation. */
2184 if ((code == TRUNC_MOD_EXPR || code == TRUNC_DIV_EXPR)
2186 && (rem_flag ? smod_pow2_cheap : sdiv_pow2_cheap))
2189 /* Get the mode in which to perform this computation. Normally it will
2190 be MODE, but sometimes we can't do the desired operation in MODE.
2191 If so, pick a wider mode in which we can do the operation. Convert
2192 to that mode at the start to avoid repeated conversions.
2194 First see what operations we need. These depend on the expression
2195 we are evaluating. (We assume that divxx3 insns exist under the
2196 same conditions that modxx3 insns and that these insns don't normally
2197 fail. If these assumptions are not correct, we may generate less
2198 efficient code in some cases.)
2200 Then see if we find a mode in which we can open-code that operation
2201 (either a division, modulus, or shift). Finally, check for the smallest
2202 mode for which we can do the operation with a library call. */
2204 optab1 = (log >= 0 ? (unsignedp ? lshr_optab : ashr_optab)
2205 : (unsignedp ? udiv_optab : sdiv_optab));
2206 optab2 = (log >= 0 ? optab1 : (unsignedp ? udivmod_optab : sdivmod_optab));
2208 for (compute_mode = mode; compute_mode != VOIDmode;
2209 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
2210 if (optab1->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing
2211 || optab2->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing)
2214 if (compute_mode == VOIDmode)
2215 for (compute_mode = mode; compute_mode != VOIDmode;
2216 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
2217 if (optab1->handlers[(int) compute_mode].libfunc
2218 || optab2->handlers[(int) compute_mode].libfunc)
2221 /* If we still couldn't find a mode, use MODE; we'll probably abort in
2223 if (compute_mode == VOIDmode)
2224 compute_mode = mode;
2226 /* Now convert to the best mode to use. Show we made a copy of OP0
2227 and hence we can clobber it (we cannot use a SUBREG to widen
2229 if (compute_mode != mode)
2231 adjusted_op0 = op0 = convert_to_mode (compute_mode, op0, unsignedp);
2232 can_clobber_op0 = 1;
2233 op1 = convert_to_mode (compute_mode, op1, unsignedp);
2236 if (target == 0 || GET_MODE (target) != compute_mode)
2237 target = gen_reg_rtx (compute_mode);
2241 case TRUNC_MOD_EXPR:
2242 case TRUNC_DIV_EXPR:
2243 if (log >= 0 && ! unsignedp)
2245 rtx label = gen_label_rtx ();
2246 if (! can_clobber_op0)
2248 adjusted_op0 = copy_to_suggested_reg (adjusted_op0, target,
2250 /* Copy op0 to a reg, since emit_cmp_insn will call emit_queue
2251 which will screw up mem refs for autoincrements. */
2252 op0 = force_reg (compute_mode, op0);
2254 emit_cmp_insn (adjusted_op0, const0_rtx, GE, 0, compute_mode, 0, 0);
2255 emit_jump_insn (gen_bge (label));
2256 expand_inc (adjusted_op0, plus_constant (op1, -1));
2258 mod_insn_no_good = 1;
2262 case FLOOR_DIV_EXPR:
2263 case FLOOR_MOD_EXPR:
2264 if (log < 0 && ! unsignedp)
2266 rtx label = gen_label_rtx ();
2267 if (! can_clobber_op0)
2269 adjusted_op0 = copy_to_suggested_reg (adjusted_op0, target,
2271 /* Copy op0 to a reg, since emit_cmp_insn will call emit_queue
2272 which will screw up mem refs for autoincrements. */
2273 op0 = force_reg (compute_mode, op0);
2275 emit_cmp_insn (adjusted_op0, const0_rtx, GE, 0, compute_mode, 0, 0);
2276 emit_jump_insn (gen_bge (label));
2277 expand_dec (adjusted_op0, op1);
2278 expand_inc (adjusted_op0, const1_rtx);
2280 mod_insn_no_good = 1;
2286 if (! can_clobber_op0)
2288 adjusted_op0 = copy_to_suggested_reg (adjusted_op0, target,
2290 /* Copy op0 to a reg, since emit_cmp_insn will call emit_queue
2291 which will screw up mem refs for autoincrements. */
2292 op0 = force_reg (compute_mode, op0);
2299 label = gen_label_rtx ();
2300 emit_cmp_insn (adjusted_op0, const0_rtx, LE, 0, compute_mode, 0, 0);
2301 emit_jump_insn (gen_ble (label));
2303 expand_inc (adjusted_op0, op1);
2304 expand_dec (adjusted_op0, const1_rtx);
2310 adjusted_op0 = expand_binop (compute_mode, add_optab,
2311 adjusted_op0, plus_constant (op1, -1),
2312 0, 0, OPTAB_LIB_WIDEN);
2314 mod_insn_no_good = 1;
2317 case ROUND_DIV_EXPR:
2318 case ROUND_MOD_EXPR:
2319 if (! can_clobber_op0)
2321 adjusted_op0 = copy_to_suggested_reg (adjusted_op0, target,
2323 /* Copy op0 to a reg, since emit_cmp_insn will call emit_queue
2324 which will screw up mem refs for autoincrements. */
2325 op0 = force_reg (compute_mode, op0);
2329 op1 = expand_shift (RSHIFT_EXPR, compute_mode, op1,
2330 integer_one_node, 0, 0);
2333 rtx label = gen_label_rtx ();
2334 emit_cmp_insn (adjusted_op0, const0_rtx, GE, 0, compute_mode, 0, 0);
2335 emit_jump_insn (gen_bge (label));
2336 expand_unop (compute_mode, neg_optab, op1, op1, 0);
2339 expand_inc (adjusted_op0, op1);
2343 op1 = gen_rtx (CONST_INT, VOIDmode, (1 << log) / 2);
2344 expand_inc (adjusted_op0, op1);
2346 mod_insn_no_good = 1;
2350 if (rem_flag && !mod_insn_no_good)
2352 /* Try to produce the remainder directly */
2354 result = expand_binop (compute_mode, and_optab, adjusted_op0,
2355 gen_rtx (CONST_INT, VOIDmode,
2357 target, 1, OPTAB_LIB_WIDEN);
2360 /* See if we can do remainder without a library call. */
2361 result = sign_expand_binop (mode, umod_optab, smod_optab,
2362 adjusted_op0, op1, target,
2363 unsignedp, OPTAB_WIDEN);
2366 /* No luck there. Can we do remainder and divide at once
2367 without a library call? */
2368 result = gen_reg_rtx (compute_mode);
2369 if (! expand_twoval_binop (unsignedp
2370 ? udivmod_optab : sdivmod_optab,
2372 0, result, unsignedp))
2379 return gen_lowpart (mode, result);
2381 /* Produce the quotient. */
2383 result = expand_shift (RSHIFT_EXPR, compute_mode, adjusted_op0,
2384 build_int_2 (log, 0), target, unsignedp);
2385 else if (rem_flag && !mod_insn_no_good)
2386 /* If producing quotient in order to subtract for remainder,
2387 and a remainder subroutine would be ok,
2388 don't use a divide subroutine. */
2389 result = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
2390 adjusted_op0, op1, 0, unsignedp, OPTAB_WIDEN);
2393 /* Try a quotient insn, but not a library call. */
2394 result = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
2395 adjusted_op0, op1, rem_flag ? 0 : target,
2396 unsignedp, OPTAB_WIDEN);
2399 /* No luck there. Try a quotient-and-remainder insn,
2400 keeping the quotient alone. */
2401 result = gen_reg_rtx (mode);
2402 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
2404 result, 0, unsignedp))
2408 /* If still no luck, use a library call. */
2410 result = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
2411 adjusted_op0, op1, rem_flag ? 0 : target,
2412 unsignedp, OPTAB_LIB_WIDEN);
2415 /* If we really want the remainder, get it by subtraction. */
2419 /* No divide instruction either. Use library for remainder. */
2420 result = sign_expand_binop (compute_mode, umod_optab, smod_optab,
2422 unsignedp, OPTAB_LIB_WIDEN);
2425 /* We divided. Now finish doing X - Y * (X / Y). */
2426 result = expand_mult (compute_mode, result, op1, target, unsignedp);
2427 if (! result) abort ();
2428 result = expand_binop (compute_mode, sub_optab, op0,
2429 result, target, unsignedp, OPTAB_LIB_WIDEN);
2436 return gen_lowpart (mode, result);
2439 /* Return a tree node with data type TYPE, describing the value of X.
2440 Usually this is an RTL_EXPR, if there is no obvious better choice.
2441 X may be an expression, however we only support those expressions
2442 generated by loop.c. */
2451 switch (GET_CODE (x))
2454 t = build_int_2 (INTVAL (x),
2455 ! TREE_UNSIGNED (type) && INTVAL (x) >= 0 ? 0 : -1);
2456 TREE_TYPE (t) = type;
2460 if (GET_MODE (x) == VOIDmode)
2462 t = build_int_2 (CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
2463 TREE_TYPE (t) = type;
2469 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
2470 t = build_real (type, d);
2476 return fold (build (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
2477 make_tree (type, XEXP (x, 1))));
2480 return fold (build (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
2481 make_tree (type, XEXP (x, 1))));
2484 return fold (build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0))));
2487 return fold (build (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
2488 make_tree (type, XEXP (x, 1))));
2491 return fold (build (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
2492 make_tree (type, XEXP (x, 1))));
2495 return fold (convert (type,
2496 build (RSHIFT_EXPR, unsigned_type (type),
2497 make_tree (unsigned_type (type),
2499 make_tree (type, XEXP (x, 1)))));
2502 return fold (convert (type,
2503 build (RSHIFT_EXPR, signed_type (type),
2504 make_tree (signed_type (type), XEXP (x, 0)),
2505 make_tree (type, XEXP (x, 1)))));
2508 if (TREE_CODE (type) != REAL_TYPE)
2509 t = signed_type (type);
2513 return fold (convert (type,
2514 build (TRUNC_DIV_EXPR, t,
2515 make_tree (t, XEXP (x, 0)),
2516 make_tree (t, XEXP (x, 1)))));
2518 t = unsigned_type (type);
2519 return fold (convert (type,
2520 build (TRUNC_DIV_EXPR, t,
2521 make_tree (t, XEXP (x, 0)),
2522 make_tree (t, XEXP (x, 1)))));
2524 t = make_node (RTL_EXPR);
2525 TREE_TYPE (t) = type;
2526 RTL_EXPR_RTL (t) = x;
2527 /* There are no insns to be output
2528 when this rtl_expr is used. */
2529 RTL_EXPR_SEQUENCE (t) = 0;
2534 /* Return an rtx representing the value of X * MULT + ADD.
2535 TARGET is a suggestion for where to store the result (an rtx).
2536 MODE is the machine mode for the computation.
2537 X and MULT must have mode MODE. ADD may have a different mode.
2538 So can X (defaults to same as MODE).
2539 UNSIGNEDP is non-zero to do unsigned multiplication.
2540 This may emit insns. */
2543 expand_mult_add (x, target, mult, add, mode, unsignedp)
2544 rtx x, target, mult, add;
2545 enum machine_mode mode;
2548 tree type = type_for_mode (mode, unsignedp);
2549 tree add_type = (GET_MODE (add) == VOIDmode
2550 ? type : type_for_mode (GET_MODE (add), unsignedp));
2551 tree result = fold (build (PLUS_EXPR, type,
2552 fold (build (MULT_EXPR, type,
2553 make_tree (type, x),
2554 make_tree (type, mult))),
2555 make_tree (add_type, add)));
2557 return expand_expr (result, target, VOIDmode, 0);
2560 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
2561 and returning TARGET.
2563 If TARGET is 0, a pseudo-register or constant is returned. */
2566 expand_and (op0, op1, target)
2567 rtx op0, op1, target;
2569 enum machine_mode mode = VOIDmode;
2572 if (GET_MODE (op0) != VOIDmode)
2573 mode = GET_MODE (op0);
2574 else if (GET_MODE (op1) != VOIDmode)
2575 mode = GET_MODE (op1);
2577 if (mode != VOIDmode)
2578 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
2579 else if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT)
2580 tem = gen_rtx (CONST_INT, VOIDmode, INTVAL (op0) & INTVAL (op1));
2586 else if (tem != target)
2587 emit_move_insn (target, tem);
2591 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
2592 and storing in TARGET. Normally return TARGET.
2593 Return 0 if that cannot be done.
2595 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
2596 it is VOIDmode, they cannot both be CONST_INT.
2598 UNSIGNEDP is for the case where we have to widen the operands
2599 to perform the operation. It says to use zero-extension.
2601 NORMALIZEP is 1 if we should convert the result to be either zero
2602 or one one. Normalize is -1 if we should convert the result to be
2603 either zero or -1. If NORMALIZEP is zero, the result will be left
2604 "raw" out of the scc insn. */
2607 emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep)
2611 enum machine_mode mode;
2616 enum insn_code icode;
2617 enum machine_mode compare_mode;
2618 enum machine_mode target_mode = GET_MODE (target);
2621 rtx pattern, comparison;
2623 if (mode == VOIDmode)
2624 mode = GET_MODE (op0);
2626 /* For some comparisons with 1 and -1, we can convert this to
2627 comparisons with zero. This will often produce more opportunities for
2628 store-flag insns. */
2633 if (op1 == const1_rtx)
2634 op1 = const0_rtx, code = LE;
2637 if (op1 == constm1_rtx)
2638 op1 = const0_rtx, code = LT;
2641 if (op1 == const1_rtx)
2642 op1 = const0_rtx, code = GT;
2645 if (op1 == constm1_rtx)
2646 op1 = const0_rtx, code = GE;
2649 if (op1 == const1_rtx)
2650 op1 = const0_rtx, code = NE;
2653 if (op1 == const1_rtx)
2654 op1 = const0_rtx, code = EQ;
2658 /* From now on, we won't change CODE, so set ICODE now. */
2659 icode = setcc_gen_code[(int) code];
2661 /* If this is A < 0 or A >= 0, we can do this by taking the ones
2662 complement of A (for GE) and shifting the sign bit to the low bit. */
2663 if (op1 == const0_rtx && (code == LT || code == GE)
2664 && GET_MODE_CLASS (mode) == MODE_INT
2665 && (normalizep || STORE_FLAG_VALUE == 1
2666 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_INT
2667 && STORE_FLAG_VALUE == 1 << (GET_MODE_BITSIZE (mode) - 1))))
2669 rtx subtarget = target;
2671 /* If the result is to be wider than OP0, it is best to convert it
2672 first. If it is to be narrower, it is *incorrect* to convert it
2674 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
2676 op0 = convert_to_mode (target_mode, op0, 0);
2680 if (target_mode != mode)
2684 op0 = expand_unop (mode, one_cmpl_optab, op0, subtarget, 0);
2686 if (normalizep || STORE_FLAG_VALUE == 1)
2687 /* If we are supposed to produce a 0/1 value, we want to do
2688 a logical shift from the sign bit to the low-order bit; for
2689 a -1/0 value, we do an arithmetic shift. */
2690 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
2691 size_int (GET_MODE_BITSIZE (mode) - 1),
2692 subtarget, normalizep != -1);
2694 if (mode != target_mode)
2695 op0 = convert_to_mode (target_mode, op0, 0);
2700 if (icode != CODE_FOR_nothing)
2702 /* We think we may be able to do this with a scc insn. Emit the
2703 comparison and then the scc insn.
2705 compare_from_rtx may call emit_queue, which would be deleted below
2706 if the scc insn fails. So call it ourselves before setting LAST. */
2709 last = get_last_insn ();
2711 comparison = compare_from_rtx (op0, op1, code, unsignedp, mode, 0, 0);
2712 if (GET_CODE (comparison) == CONST_INT)
2713 return (comparison == const0_rtx ? const0_rtx
2714 : normalizep == 1 ? const1_rtx
2715 : normalizep == -1 ? constm1_rtx
2718 /* Get a reference to the target in the proper mode for this insn. */
2719 compare_mode = insn_operand_mode[(int) icode][0];
2721 if (preserve_subexpressions_p ()
2722 || ! (*insn_operand_predicate[(int) icode][0]) (subtarget, compare_mode))
2723 subtarget = gen_reg_rtx (compare_mode);
2725 pattern = GEN_FCN (icode) (subtarget);
2728 emit_insn (pattern);
2730 /* If we are converting to a wider mode, first convert to
2731 TARGET_MODE, then normalize. This produces better combining
2732 opportunities on machines that have a SIGN_EXTRACT when we are
2733 testing a single bit. This mostly benefits the 68k.
2735 If STORE_FLAG_VALUE does not have the sign bit set when
2736 interpreted in COMPARE_MODE, we can do this conversion as
2737 unsigned, which is usually more efficient. */
2738 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
2740 convert_move (target, subtarget,
2741 (GET_MODE_BITSIZE (compare_mode)
2742 <= HOST_BITS_PER_INT)
2743 && 0 == (STORE_FLAG_VALUE
2744 & (1 << (GET_MODE_BITSIZE (compare_mode) -1))));
2746 compare_mode = target_mode;
2751 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
2752 we don't have to do anything. */
2753 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
2755 else if (normalizep == - STORE_FLAG_VALUE)
2756 op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
2758 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
2759 makes it hard to use a value of just the sign bit due to
2760 ANSI integer constant typing rules. */
2761 else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_INT
2762 && (STORE_FLAG_VALUE
2763 & (1 << (GET_MODE_BITSIZE (compare_mode) - 1))))
2764 op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
2765 size_int (GET_MODE_BITSIZE (compare_mode) - 1),
2766 subtarget, normalizep == 1);
2767 else if (STORE_FLAG_VALUE & 1)
2769 op0 = expand_and (op0, const1_rtx, subtarget);
2770 if (normalizep == -1)
2771 op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
2776 /* If we were converting to a smaller mode, do the
2778 if (target_mode != compare_mode)
2780 convert_move (target, op0);
2789 delete_insns_since (last);
2791 subtarget = target_mode == mode ? target : 0;
2793 /* If we reached here, we can't do this with a scc insn. However, there
2794 are some comparisons that can be done directly. For example, if
2795 this is an equality comparison of integers, we can try to exclusive-or
2796 (or subtract) the two operands and use a recursive call to try the
2797 comparison with zero. Don't do any of these cases if branches are
2800 if (BRANCH_COST >= 0
2801 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
2802 && op1 != const0_rtx)
2804 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
2808 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
2811 tem = emit_store_flag (target, code, tem, const0_rtx,
2812 mode, unsignedp, normalizep);
2814 delete_insns_since (last);
2818 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
2819 the constant zero. Reject all other comparisons at this point. Only
2820 do LE and GT if branches are expensive since they are expensive on
2821 2-operand machines. */
2823 if (BRANCH_COST == 0
2824 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
2825 || (code != EQ && code != NE
2826 && (BRANCH_COST <= 1 || (code != LE && code != GT))))
2829 /* See what we need to return. We can only return a 1, -1, or the
2832 if (normalizep == 0)
2834 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
2835 normalizep = STORE_FLAG_VALUE;
2837 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_INT
2838 && STORE_FLAG_VALUE == 1 << (GET_MODE_BITSIZE (mode) - 1))
2844 /* Try to put the result of the comparison in the sign bit. Assume we can't
2845 do the necessary operation below. */
2849 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
2850 the sign bit set. */
2854 /* This is destructive, so SUBTARGET can't be OP0. */
2855 if (rtx_equal_p (subtarget, op0))
2858 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
2861 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
2865 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
2866 number of bits in the mode of OP0, minus one. */
2870 if (rtx_equal_p (subtarget, op0))
2873 tem = expand_shift (RSHIFT_EXPR, mode, op0,
2874 size_int (GET_MODE_BITSIZE (mode) - 1),
2876 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
2880 if (code == EQ || code == NE)
2882 /* For EQ or NE, one way to do the comparison is to apply an operation
2883 that converts the operand into a positive number if it is non-zero
2884 or zero if it was originally zero. Then, for EQ, we subtract 1 and
2885 for NE we negate. This puts the result in the sign bit. Then we
2886 normalize with a shift, if needed.
2888 Two operations that can do the above actions are ABS and FFS, so try
2889 them. If that doesn't work, and MODE is smaller than a full word,
2890 we can use zero-extension to the wider mode (an unsigned conversion)
2891 as the operation. */
2893 if (abs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2894 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
2895 else if (ffs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2896 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
2897 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
2900 tem = convert_to_mode (mode, op0, 1);
2906 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
2909 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
2912 /* If we couldn't do it that way, for NE we can "or" the two's complement
2913 of the value with itself. For EQ, we take the one's complement of
2914 that "or", which is an extra insn, so we only handle EQ if branches
2917 if (tem == 0 && (code == NE || BRANCH_COST > 1))
2919 if (rtx_equal_p (subtarget, op0))
2922 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
2923 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
2926 if (tem && code == EQ)
2927 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
2931 if (tem && normalizep)
2932 tem = expand_shift (RSHIFT_EXPR, mode, tem,
2933 size_int (GET_MODE_BITSIZE (mode) - 1),
2934 tem, normalizep == 1);
2936 if (tem && GET_MODE (tem) != target_mode)
2938 convert_move (target, tem, 0);
2943 delete_insns_since (last);
2947 emit_jump_insn ((*bcc_gen_fctn[(int) code]) (label));
2948 emit_move_insn (target, const1_rtx);