1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 88, 89, 92-97, 1998 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
28 #include "insn-flags.h"
29 #include "insn-codes.h"
30 #include "insn-config.h"
35 static void store_fixed_bit_field PROTO((rtx, int, int, int, rtx, int));
36 static void store_split_bit_field PROTO((rtx, int, int, rtx, int));
37 static rtx extract_fixed_bit_field PROTO((enum machine_mode, rtx, int,
38 int, int, rtx, int, int));
39 static rtx mask_rtx PROTO((enum machine_mode, int,
41 static rtx lshift_value PROTO((enum machine_mode, rtx,
43 static rtx extract_split_bit_field PROTO((rtx, int, int, int, int));
45 #define CEIL(x,y) (((x) + (y) - 1) / (y))
47 /* Non-zero means divides or modulus operations are relatively cheap for
48 powers of two, so don't use branches; emit the operation instead.
49 Usually, this will mean that the MD file will emit non-branch
52 static int sdiv_pow2_cheap, smod_pow2_cheap;
54 #ifndef SLOW_UNALIGNED_ACCESS
55 #define SLOW_UNALIGNED_ACCESS STRICT_ALIGNMENT
58 /* For compilers that support multiple targets with different word sizes,
59 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
60 is the H8/300(H) compiler. */
62 #ifndef MAX_BITS_PER_WORD
63 #define MAX_BITS_PER_WORD BITS_PER_WORD
66 /* Cost of various pieces of RTL. Note that some of these are indexed by shift count,
68 static int add_cost, negate_cost, zero_cost;
69 static int shift_cost[MAX_BITS_PER_WORD];
70 static int shiftadd_cost[MAX_BITS_PER_WORD];
71 static int shiftsub_cost[MAX_BITS_PER_WORD];
72 static int mul_cost[NUM_MACHINE_MODES];
73 static int div_cost[NUM_MACHINE_MODES];
74 static int mul_widen_cost[NUM_MACHINE_MODES];
75 static int mul_highpart_cost[NUM_MACHINE_MODES];
81 /* This is "some random pseudo register" for purposes of calling recog
82 to see what insns exist. */
83 rtx reg = gen_rtx_REG (word_mode, 10000);
84 rtx shift_insn, shiftadd_insn, shiftsub_insn;
87 enum machine_mode mode, wider_mode;
91 /* Since we are on the permanent obstack, we must be sure we save this
92 spot AFTER we call start_sequence, since it will reuse the rtl it
95 free_point = (char *) oballoc (0);
97 zero_cost = rtx_cost (const0_rtx, 0);
98 add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
100 shift_insn = emit_insn (gen_rtx_SET (VOIDmode, reg,
101 gen_rtx_ASHIFT (word_mode, reg,
105 = emit_insn (gen_rtx_SET (VOIDmode, reg,
106 gen_rtx_PLUS (word_mode,
107 gen_rtx_MULT (word_mode,
112 = emit_insn (gen_rtx_SET (VOIDmode, reg,
113 gen_rtx_MINUS (word_mode,
114 gen_rtx_MULT (word_mode,
121 shiftadd_cost[0] = shiftsub_cost[0] = add_cost;
123 for (m = 1; m < BITS_PER_WORD; m++)
125 shift_cost[m] = shiftadd_cost[m] = shiftsub_cost[m] = 32000;
127 XEXP (SET_SRC (PATTERN (shift_insn)), 1) = GEN_INT (m);
128 if (recog (PATTERN (shift_insn), shift_insn, &dummy) >= 0)
129 shift_cost[m] = rtx_cost (SET_SRC (PATTERN (shift_insn)), SET);
131 XEXP (XEXP (SET_SRC (PATTERN (shiftadd_insn)), 0), 1)
132 = GEN_INT ((HOST_WIDE_INT) 1 << m);
133 if (recog (PATTERN (shiftadd_insn), shiftadd_insn, &dummy) >= 0)
134 shiftadd_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftadd_insn)), SET);
136 XEXP (XEXP (SET_SRC (PATTERN (shiftsub_insn)), 0), 1)
137 = GEN_INT ((HOST_WIDE_INT) 1 << m);
138 if (recog (PATTERN (shiftsub_insn), shiftsub_insn, &dummy) >= 0)
139 shiftsub_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftsub_insn)), SET);
142 negate_cost = rtx_cost (gen_rtx_NEG (word_mode, reg), SET);
145 = (rtx_cost (gen_rtx_DIV (word_mode, reg, GEN_INT (32)), SET)
148 = (rtx_cost (gen_rtx_MOD (word_mode, reg, GEN_INT (32)), SET)
151 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
153 mode = GET_MODE_WIDER_MODE (mode))
155 reg = gen_rtx_REG (mode, 10000);
156 div_cost[(int) mode] = rtx_cost (gen_rtx_UDIV (mode, reg, reg), SET);
157 mul_cost[(int) mode] = rtx_cost (gen_rtx_MULT (mode, reg, reg), SET);
158 wider_mode = GET_MODE_WIDER_MODE (mode);
159 if (wider_mode != VOIDmode)
161 mul_widen_cost[(int) wider_mode]
162 = rtx_cost (gen_rtx_MULT (wider_mode,
163 gen_rtx_ZERO_EXTEND (wider_mode, reg),
164 gen_rtx_ZERO_EXTEND (wider_mode, reg)),
166 mul_highpart_cost[(int) mode]
167 = rtx_cost (gen_rtx_TRUNCATE
171 gen_rtx_MULT (wider_mode,
172 gen_rtx_ZERO_EXTEND (wider_mode, reg),
173 gen_rtx_ZERO_EXTEND (wider_mode, reg)),
174 GEN_INT (GET_MODE_BITSIZE (mode)))),
179 /* Free the objects we just allocated. */
184 /* Return an rtx representing minus the value of X.
185 MODE is the intended mode of the result,
186 useful if X is a CONST_INT. */
190 enum machine_mode mode;
193 rtx result = simplify_unary_operation (NEG, mode, x, mode);
196 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
201 /* Generate code to store value from rtx VALUE
202 into a bit-field within structure STR_RTX
203 containing BITSIZE bits starting at bit BITNUM.
204 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
205 ALIGN is the alignment that STR_RTX is known to have, measured in bytes.
206 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
208 /* ??? Note that there are two different ideas here for how
209 to determine the size to count bits within, for a register.
210 One is BITS_PER_WORD, and the other is the size of operand 3
211 of the insv pattern. (The latter assumes that an n-bit machine
212 will be able to insert bit fields up to n bits wide.)
213 It isn't certain that either of these is right.
214 extract_bit_field has the same quandary. */
217 store_bit_field (str_rtx, bitsize, bitnum, fieldmode, value, align, total_size)
219 register int bitsize;
221 enum machine_mode fieldmode;
226 int unit = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
227 register int offset = bitnum / unit;
228 register int bitpos = bitnum % unit;
229 register rtx op0 = str_rtx;
231 if (GET_CODE (str_rtx) == MEM && ! MEM_IN_STRUCT_P (str_rtx))
234 /* Discount the part of the structure before the desired byte.
235 We need to know how many bytes are safe to reference after it. */
237 total_size -= (bitpos / BIGGEST_ALIGNMENT
238 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
240 while (GET_CODE (op0) == SUBREG)
242 /* The following line once was done only if WORDS_BIG_ENDIAN,
243 but I think that is a mistake. WORDS_BIG_ENDIAN is
244 meaningful at a much higher level; when structures are copied
245 between memory and regs, the higher-numbered regs
246 always get higher addresses. */
247 offset += SUBREG_WORD (op0);
248 /* We used to adjust BITPOS here, but now we do the whole adjustment
249 right after the loop. */
250 op0 = SUBREG_REG (op0);
253 /* If OP0 is a register, BITPOS must count within a word.
254 But as we have it, it counts within whatever size OP0 now has.
255 On a bigendian machine, these are not the same, so convert. */
257 && GET_CODE (op0) != MEM
258 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
259 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
261 value = protect_from_queue (value, 0);
264 value = force_not_mem (value);
266 /* Note that the adjustment of BITPOS above has no effect on whether
267 BITPOS is 0 in a REG bigger than a word. */
268 if (GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
269 && (GET_CODE (op0) != MEM
270 || ! SLOW_UNALIGNED_ACCESS
271 || (offset * BITS_PER_UNIT % bitsize == 0
272 && align % GET_MODE_SIZE (fieldmode) == 0))
273 && bitpos == 0 && bitsize == GET_MODE_BITSIZE (fieldmode))
275 /* Storing in a full-word or multi-word field in a register
276 can be done with just SUBREG. */
277 if (GET_MODE (op0) != fieldmode)
279 if (GET_CODE (op0) == REG)
280 op0 = gen_rtx_SUBREG (fieldmode, op0, offset);
282 op0 = change_address (op0, fieldmode,
283 plus_constant (XEXP (op0, 0), offset));
285 emit_move_insn (op0, value);
289 /* Storing an lsb-aligned field in a register
290 can be done with a movestrict instruction. */
292 if (GET_CODE (op0) != MEM
293 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
294 && bitsize == GET_MODE_BITSIZE (fieldmode)
295 && (GET_MODE (op0) == fieldmode
296 || (movstrict_optab->handlers[(int) fieldmode].insn_code
297 != CODE_FOR_nothing)))
299 /* Get appropriate low part of the value being stored. */
300 if (GET_CODE (value) == CONST_INT || GET_CODE (value) == REG)
301 value = gen_lowpart (fieldmode, value);
302 else if (!(GET_CODE (value) == SYMBOL_REF
303 || GET_CODE (value) == LABEL_REF
304 || GET_CODE (value) == CONST))
305 value = convert_to_mode (fieldmode, value, 0);
307 if (GET_MODE (op0) == fieldmode)
308 emit_move_insn (op0, value);
311 int icode = movstrict_optab->handlers[(int) fieldmode].insn_code;
312 if(! (*insn_operand_predicate[icode][1]) (value, fieldmode))
313 value = copy_to_mode_reg (fieldmode, value);
314 emit_insn (GEN_FCN (icode)
315 (gen_rtx_SUBREG (fieldmode, op0, offset), value));
320 /* Handle fields bigger than a word. */
322 if (bitsize > BITS_PER_WORD)
324 /* Here we transfer the words of the field
325 in the order least significant first.
326 This is because the most significant word is the one which may
328 However, only do that if the value is not BLKmode. */
330 int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
332 int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
335 /* This is the mode we must force value to, so that there will be enough
336 subwords to extract. Note that fieldmode will often (always?) be
337 VOIDmode, because that is what store_field uses to indicate that this
338 is a bit field, but passing VOIDmode to operand_subword_force will
339 result in an abort. */
340 fieldmode = mode_for_size (nwords * BITS_PER_WORD, MODE_INT, 0);
342 for (i = 0; i < nwords; i++)
344 /* If I is 0, use the low-order word in both field and target;
345 if I is 1, use the next to lowest word; and so on. */
346 int wordnum = (backwards ? nwords - i - 1 : i);
347 int bit_offset = (backwards
348 ? MAX (bitsize - (i + 1) * BITS_PER_WORD, 0)
349 : i * BITS_PER_WORD);
350 store_bit_field (op0, MIN (BITS_PER_WORD,
351 bitsize - i * BITS_PER_WORD),
352 bitnum + bit_offset, word_mode,
353 operand_subword_force (value, wordnum,
354 (GET_MODE (value) == VOIDmode
356 : GET_MODE (value))),
362 /* From here on we can assume that the field to be stored in is
363 a full-word (whatever type that is), since it is shorter than a word. */
365 /* OFFSET is the number of words or bytes (UNIT says which)
366 from STR_RTX to the first word or byte containing part of the field. */
368 if (GET_CODE (op0) == REG)
371 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
372 op0 = gen_rtx_SUBREG (TYPE_MODE (type_for_size (BITS_PER_WORD, 0)),
378 op0 = protect_from_queue (op0, 1);
381 /* If VALUE is a floating-point mode, access it as an integer of the
382 corresponding size. This can occur on a machine with 64 bit registers
383 that uses SFmode for float. This can also occur for unaligned float
385 if (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT)
387 if (GET_CODE (value) != REG)
388 value = copy_to_reg (value);
389 value = gen_rtx_SUBREG (word_mode, value, 0);
392 /* Now OFFSET is nonzero only if OP0 is memory
393 and is therefore always measured in bytes. */
397 && GET_MODE (value) != BLKmode
398 && !(bitsize == 1 && GET_CODE (value) == CONST_INT)
399 /* Ensure insv's size is wide enough for this field. */
400 && (GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_insv][3])
402 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
404 > GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_insv][3]))))
406 int xbitpos = bitpos;
409 rtx last = get_last_insn ();
411 enum machine_mode maxmode
412 = insn_operand_mode[(int) CODE_FOR_insv][3];
414 int save_volatile_ok = volatile_ok;
417 /* If this machine's insv can only insert into a register, copy OP0
418 into a register and save it back later. */
419 /* This used to check flag_force_mem, but that was a serious
420 de-optimization now that flag_force_mem is enabled by -O2. */
421 if (GET_CODE (op0) == MEM
422 && ! ((*insn_operand_predicate[(int) CODE_FOR_insv][0])
426 enum machine_mode bestmode;
428 /* Get the mode to use for inserting into this field. If OP0 is
429 BLKmode, get the smallest mode consistent with the alignment. If
430 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
431 mode. Otherwise, use the smallest mode containing the field. */
433 if (GET_MODE (op0) == BLKmode
434 || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
436 = get_best_mode (bitsize, bitnum, align * BITS_PER_UNIT, maxmode,
437 MEM_VOLATILE_P (op0));
439 bestmode = GET_MODE (op0);
441 if (bestmode == VOIDmode
442 || (SLOW_UNALIGNED_ACCESS && GET_MODE_SIZE (bestmode) > align))
445 /* Adjust address to point to the containing unit of that mode. */
446 unit = GET_MODE_BITSIZE (bestmode);
447 /* Compute offset as multiple of this unit, counting in bytes. */
448 offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
449 bitpos = bitnum % unit;
450 op0 = change_address (op0, bestmode,
451 plus_constant (XEXP (op0, 0), offset));
453 /* Fetch that unit, store the bitfield in it, then store the unit. */
454 tempreg = copy_to_reg (op0);
455 store_bit_field (tempreg, bitsize, bitpos, fieldmode, value,
457 emit_move_insn (op0, tempreg);
460 volatile_ok = save_volatile_ok;
462 /* Add OFFSET into OP0's address. */
463 if (GET_CODE (xop0) == MEM)
464 xop0 = change_address (xop0, byte_mode,
465 plus_constant (XEXP (xop0, 0), offset));
467 /* If xop0 is a register, we need it in MAXMODE
468 to make it acceptable to the format of insv. */
469 if (GET_CODE (xop0) == SUBREG)
470 /* We can't just change the mode, because this might clobber op0,
471 and we will need the original value of op0 if insv fails. */
472 xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_WORD (xop0));
473 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
474 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
476 /* On big-endian machines, we count bits from the most significant.
477 If the bit field insn does not, we must invert. */
479 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
480 xbitpos = unit - bitsize - xbitpos;
482 /* We have been counting XBITPOS within UNIT.
483 Count instead within the size of the register. */
484 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
485 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
487 unit = GET_MODE_BITSIZE (maxmode);
489 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
491 if (GET_MODE (value) != maxmode)
493 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
495 /* Optimization: Don't bother really extending VALUE
496 if it has all the bits we will actually use. However,
497 if we must narrow it, be sure we do it correctly. */
499 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode))
501 /* Avoid making subreg of a subreg, or of a mem. */
502 if (GET_CODE (value1) != REG)
503 value1 = copy_to_reg (value1);
504 value1 = gen_rtx_SUBREG (maxmode, value1, 0);
507 value1 = gen_lowpart (maxmode, value1);
509 else if (!CONSTANT_P (value))
510 /* Parse phase is supposed to make VALUE's data type
511 match that of the component reference, which is a type
512 at least as wide as the field; so VALUE should have
513 a mode that corresponds to that type. */
517 /* If this machine's insv insists on a register,
518 get VALUE1 into a register. */
519 if (! ((*insn_operand_predicate[(int) CODE_FOR_insv][3])
521 value1 = force_reg (maxmode, value1);
523 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
528 delete_insns_since (last);
529 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
535 /* Insv is not available; store using shifts and boolean ops. */
536 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
540 /* Use shifts and boolean operations to store VALUE
541 into a bit field of width BITSIZE
542 in a memory location specified by OP0 except offset by OFFSET bytes.
543 (OFFSET must be 0 if OP0 is a register.)
544 The field starts at position BITPOS within the byte.
545 (If OP0 is a register, it may be a full word or a narrower mode,
546 but BITPOS still counts within a full word,
547 which is significant on bigendian machines.)
548 STRUCT_ALIGN is the alignment the structure is known to have (in bytes).
550 Note that protect_from_queue has already been done on OP0 and VALUE. */
553 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, struct_align)
555 register int offset, bitsize, bitpos;
559 register enum machine_mode mode;
560 int total_bits = BITS_PER_WORD;
565 if (! SLOW_UNALIGNED_ACCESS)
566 struct_align = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
568 /* There is a case not handled here:
569 a structure with a known alignment of just a halfword
570 and a field split across two aligned halfwords within the structure.
571 Or likewise a structure with a known alignment of just a byte
572 and a field split across two bytes.
573 Such cases are not supposed to be able to occur. */
575 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
579 /* Special treatment for a bit field split across two registers. */
580 if (bitsize + bitpos > BITS_PER_WORD)
582 store_split_bit_field (op0, bitsize, bitpos,
583 value, BITS_PER_WORD);
589 /* Get the proper mode to use for this field. We want a mode that
590 includes the entire field. If such a mode would be larger than
591 a word, we won't be doing the extraction the normal way. */
593 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
594 struct_align * BITS_PER_UNIT, word_mode,
595 GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
597 if (mode == VOIDmode)
599 /* The only way this should occur is if the field spans word
601 store_split_bit_field (op0,
602 bitsize, bitpos + offset * BITS_PER_UNIT,
603 value, struct_align);
607 total_bits = GET_MODE_BITSIZE (mode);
609 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
610 be be in the range 0 to total_bits-1, and put any excess bytes in
612 if (bitpos >= total_bits)
614 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
615 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
619 /* Get ref to an aligned byte, halfword, or word containing the field.
620 Adjust BITPOS to be position within a word,
621 and OFFSET to be the offset of that word.
622 Then alter OP0 to refer to that word. */
623 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
624 offset -= (offset % (total_bits / BITS_PER_UNIT));
625 op0 = change_address (op0, mode,
626 plus_constant (XEXP (op0, 0), offset));
629 mode = GET_MODE (op0);
631 /* Now MODE is either some integral mode for a MEM as OP0,
632 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
633 The bit field is contained entirely within OP0.
634 BITPOS is the starting bit number within OP0.
635 (OP0's mode may actually be narrower than MODE.) */
637 if (BYTES_BIG_ENDIAN)
638 /* BITPOS is the distance between our msb
639 and that of the containing datum.
640 Convert it to the distance from the lsb. */
641 bitpos = total_bits - bitsize - bitpos;
643 /* Now BITPOS is always the distance between our lsb
646 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
647 we must first convert its mode to MODE. */
649 if (GET_CODE (value) == CONST_INT)
651 register HOST_WIDE_INT v = INTVAL (value);
653 if (bitsize < HOST_BITS_PER_WIDE_INT)
654 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
658 else if ((bitsize < HOST_BITS_PER_WIDE_INT
659 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
660 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
663 value = lshift_value (mode, value, bitpos, bitsize);
667 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
668 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
670 if (GET_MODE (value) != mode)
672 if ((GET_CODE (value) == REG || GET_CODE (value) == SUBREG)
673 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
674 value = gen_lowpart (mode, value);
676 value = convert_to_mode (mode, value, 1);
680 value = expand_binop (mode, and_optab, value,
681 mask_rtx (mode, 0, bitsize, 0),
682 NULL_RTX, 1, OPTAB_LIB_WIDEN);
684 value = expand_shift (LSHIFT_EXPR, mode, value,
685 build_int_2 (bitpos, 0), NULL_RTX, 1);
688 /* Now clear the chosen bits in OP0,
689 except that if VALUE is -1 we need not bother. */
691 subtarget = (GET_CODE (op0) == REG || ! flag_force_mem) ? op0 : 0;
695 temp = expand_binop (mode, and_optab, op0,
696 mask_rtx (mode, bitpos, bitsize, 1),
697 subtarget, 1, OPTAB_LIB_WIDEN);
703 /* Now logical-or VALUE into OP0, unless it is zero. */
706 temp = expand_binop (mode, ior_optab, temp, value,
707 subtarget, 1, OPTAB_LIB_WIDEN);
709 emit_move_insn (op0, temp);
712 /* Store a bit field that is split across multiple accessible memory objects.
714 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
715 BITSIZE is the field width; BITPOS the position of its first bit
717 VALUE is the value to store.
718 ALIGN is the known alignment of OP0, measured in bytes.
719 This is also the size of the memory objects to be used.
721 This does not yet handle fields wider than BITS_PER_WORD. */
724 store_split_bit_field (op0, bitsize, bitpos, value, align)
733 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
735 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
736 unit = BITS_PER_WORD;
738 unit = MIN (align * BITS_PER_UNIT, BITS_PER_WORD);
740 /* If VALUE is a constant other than a CONST_INT, get it into a register in
741 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
742 that VALUE might be a floating-point constant. */
743 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
745 rtx word = gen_lowpart_common (word_mode, value);
747 if (word && (value != word))
750 value = gen_lowpart_common (word_mode,
751 force_reg (GET_MODE (value) != VOIDmode
753 : word_mode, value));
755 else if (GET_CODE (value) == ADDRESSOF)
756 value = copy_to_reg (value);
758 while (bitsdone < bitsize)
765 offset = (bitpos + bitsdone) / unit;
766 thispos = (bitpos + bitsdone) % unit;
768 /* THISSIZE must not overrun a word boundary. Otherwise,
769 store_fixed_bit_field will call us again, and we will mutually
771 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
772 thissize = MIN (thissize, unit - thispos);
774 if (BYTES_BIG_ENDIAN)
778 /* We must do an endian conversion exactly the same way as it is
779 done in extract_bit_field, so that the two calls to
780 extract_fixed_bit_field will have comparable arguments. */
781 if (GET_CODE (value) != MEM || GET_MODE (value) == BLKmode)
782 total_bits = BITS_PER_WORD;
784 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
786 /* Fetch successively less significant portions. */
787 if (GET_CODE (value) == CONST_INT)
788 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
789 >> (bitsize - bitsdone - thissize))
790 & (((HOST_WIDE_INT) 1 << thissize) - 1));
792 /* The args are chosen so that the last part includes the
793 lsb. Give extract_bit_field the value it needs (with
794 endianness compensation) to fetch the piece we want.
796 ??? We have no idea what the alignment of VALUE is, so
797 we have to use a guess. */
799 = extract_fixed_bit_field
800 (word_mode, value, 0, thissize,
801 total_bits - bitsize + bitsdone, NULL_RTX, 1,
802 GET_MODE (value) == VOIDmode
804 : (GET_MODE (value) == BLKmode
806 : GET_MODE_ALIGNMENT (GET_MODE (value)) / BITS_PER_UNIT));
810 /* Fetch successively more significant portions. */
811 if (GET_CODE (value) == CONST_INT)
812 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
814 & (((HOST_WIDE_INT) 1 << thissize) - 1));
817 = extract_fixed_bit_field
818 (word_mode, value, 0, thissize, bitsdone, NULL_RTX, 1,
819 GET_MODE (value) == VOIDmode
821 : (GET_MODE (value) == BLKmode
823 : GET_MODE_ALIGNMENT (GET_MODE (value)) / BITS_PER_UNIT));
826 /* If OP0 is a register, then handle OFFSET here.
828 When handling multiword bitfields, extract_bit_field may pass
829 down a word_mode SUBREG of a larger REG for a bitfield that actually
830 crosses a word boundary. Thus, for a SUBREG, we must find
831 the current word starting from the base register. */
832 if (GET_CODE (op0) == SUBREG)
834 word = operand_subword_force (SUBREG_REG (op0),
835 SUBREG_WORD (op0) + offset,
836 GET_MODE (SUBREG_REG (op0)));
839 else if (GET_CODE (op0) == REG)
841 word = operand_subword_force (op0, offset, GET_MODE (op0));
847 /* OFFSET is in UNITs, and UNIT is in bits.
848 store_fixed_bit_field wants offset in bytes. */
849 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT,
850 thissize, thispos, part, align);
851 bitsdone += thissize;
855 /* Generate code to extract a byte-field from STR_RTX
856 containing BITSIZE bits, starting at BITNUM,
857 and put it in TARGET if possible (if TARGET is nonzero).
858 Regardless of TARGET, we return the rtx for where the value is placed.
861 STR_RTX is the structure containing the byte (a REG or MEM).
862 UNSIGNEDP is nonzero if this is an unsigned bit field.
863 MODE is the natural mode of the field value once extracted.
864 TMODE is the mode the caller would like the value to have;
865 but the value may be returned with type MODE instead.
867 ALIGN is the alignment that STR_RTX is known to have, measured in bytes.
868 TOTAL_SIZE is the size in bytes of the containing structure,
871 If a TARGET is specified and we can store in it at no extra cost,
872 we do so, and return TARGET.
873 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
874 if they are equally easy. */
877 extract_bit_field (str_rtx, bitsize, bitnum, unsignedp,
878 target, mode, tmode, align, total_size)
880 register int bitsize;
884 enum machine_mode mode, tmode;
888 int unit = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
889 register int offset = bitnum / unit;
890 register int bitpos = bitnum % unit;
891 register rtx op0 = str_rtx;
892 rtx spec_target = target;
893 rtx spec_target_subreg = 0;
895 /* Discount the part of the structure before the desired byte.
896 We need to know how many bytes are safe to reference after it. */
898 total_size -= (bitpos / BIGGEST_ALIGNMENT
899 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
901 if (tmode == VOIDmode)
903 while (GET_CODE (op0) == SUBREG)
905 int outer_size = GET_MODE_BITSIZE (GET_MODE (op0));
906 int inner_size = GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)));
908 offset += SUBREG_WORD (op0);
910 inner_size = MIN (inner_size, BITS_PER_WORD);
912 if (BYTES_BIG_ENDIAN && (outer_size < inner_size))
914 bitpos += inner_size - outer_size;
917 offset += (bitpos / unit);
922 op0 = SUBREG_REG (op0);
925 /* ??? We currently assume TARGET is at least as big as BITSIZE.
926 If that's wrong, the solution is to test for it and set TARGET to 0
929 /* If OP0 is a register, BITPOS must count within a word.
930 But as we have it, it counts within whatever size OP0 now has.
931 On a bigendian machine, these are not the same, so convert. */
933 && GET_CODE (op0) != MEM
934 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
935 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
937 /* Extracting a full-word or multi-word value
938 from a structure in a register or aligned memory.
939 This can be done with just SUBREG.
940 So too extracting a subword value in
941 the least significant part of the register. */
943 if (((GET_CODE (op0) == REG
944 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
945 GET_MODE_BITSIZE (GET_MODE (op0))))
946 || (GET_CODE (op0) == MEM
947 && (! SLOW_UNALIGNED_ACCESS
948 || (offset * BITS_PER_UNIT % bitsize == 0
949 && align * BITS_PER_UNIT % bitsize == 0))))
950 && ((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
951 && bitpos % BITS_PER_WORD == 0)
952 || (mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0) != BLKmode
954 ? bitpos + bitsize == BITS_PER_WORD
957 enum machine_mode mode1
958 = mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0);
960 if (mode1 != GET_MODE (op0))
962 if (GET_CODE (op0) == REG)
963 op0 = gen_rtx_SUBREG (mode1, op0, offset);
965 op0 = change_address (op0, mode1,
966 plus_constant (XEXP (op0, 0), offset));
969 return convert_to_mode (tmode, op0, unsignedp);
973 /* Handle fields bigger than a word. */
975 if (bitsize > BITS_PER_WORD)
977 /* Here we transfer the words of the field
978 in the order least significant first.
979 This is because the most significant word is the one which may
980 be less than full. */
982 int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
985 if (target == 0 || GET_CODE (target) != REG)
986 target = gen_reg_rtx (mode);
988 /* Indicate for flow that the entire target reg is being set. */
989 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
991 for (i = 0; i < nwords; i++)
993 /* If I is 0, use the low-order word in both field and target;
994 if I is 1, use the next to lowest word; and so on. */
995 /* Word number in TARGET to use. */
996 int wordnum = (WORDS_BIG_ENDIAN
997 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
999 /* Offset from start of field in OP0. */
1000 int bit_offset = (WORDS_BIG_ENDIAN
1001 ? MAX (0, bitsize - (i + 1) * BITS_PER_WORD)
1002 : i * BITS_PER_WORD);
1003 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1005 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1006 bitsize - i * BITS_PER_WORD),
1007 bitnum + bit_offset,
1008 1, target_part, mode, word_mode,
1011 if (target_part == 0)
1014 if (result_part != target_part)
1015 emit_move_insn (target_part, result_part);
1020 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1021 need to be zero'd out. */
1022 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1026 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1027 for (i = nwords; i < total_words; i++)
1029 int wordnum = WORDS_BIG_ENDIAN ? total_words - i - 1 : i;
1030 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1031 emit_move_insn (target_part, const0_rtx);
1037 /* Signed bit field: sign-extend with two arithmetic shifts. */
1038 target = expand_shift (LSHIFT_EXPR, mode, target,
1039 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1041 return expand_shift (RSHIFT_EXPR, mode, target,
1042 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1046 /* From here on we know the desired field is smaller than a word
1047 so we can assume it is an integer. So we can safely extract it as one
1048 size of integer, if necessary, and then truncate or extend
1049 to the size that is wanted. */
1051 /* OFFSET is the number of words or bytes (UNIT says which)
1052 from STR_RTX to the first word or byte containing part of the field. */
1054 if (GET_CODE (op0) == REG)
1057 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1058 op0 = gen_rtx_SUBREG (TYPE_MODE (type_for_size (BITS_PER_WORD, 0)),
1064 op0 = protect_from_queue (str_rtx, 1);
1067 /* Now OFFSET is nonzero only for memory operands. */
1073 && (GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_extzv][0])
1075 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1076 && (bitsize + bitpos
1077 > GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_extzv][0]))))
1079 int xbitpos = bitpos, xoffset = offset;
1080 rtx bitsize_rtx, bitpos_rtx;
1081 rtx last = get_last_insn ();
1083 rtx xtarget = target;
1084 rtx xspec_target = spec_target;
1085 rtx xspec_target_subreg = spec_target_subreg;
1087 enum machine_mode maxmode
1088 = insn_operand_mode[(int) CODE_FOR_extzv][0];
1090 if (GET_CODE (xop0) == MEM)
1092 int save_volatile_ok = volatile_ok;
1095 /* Is the memory operand acceptable? */
1096 if (! ((*insn_operand_predicate[(int) CODE_FOR_extzv][1])
1097 (xop0, GET_MODE (xop0))))
1099 /* No, load into a reg and extract from there. */
1100 enum machine_mode bestmode;
1102 /* Get the mode to use for inserting into this field. If
1103 OP0 is BLKmode, get the smallest mode consistent with the
1104 alignment. If OP0 is a non-BLKmode object that is no
1105 wider than MAXMODE, use its mode. Otherwise, use the
1106 smallest mode containing the field. */
1108 if (GET_MODE (xop0) == BLKmode
1109 || (GET_MODE_SIZE (GET_MODE (op0))
1110 > GET_MODE_SIZE (maxmode)))
1111 bestmode = get_best_mode (bitsize, bitnum,
1112 align * BITS_PER_UNIT, maxmode,
1113 MEM_VOLATILE_P (xop0));
1115 bestmode = GET_MODE (xop0);
1117 if (bestmode == VOIDmode
1118 || (SLOW_UNALIGNED_ACCESS && GET_MODE_SIZE (bestmode) > align))
1121 /* Compute offset as multiple of this unit,
1122 counting in bytes. */
1123 unit = GET_MODE_BITSIZE (bestmode);
1124 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1125 xbitpos = bitnum % unit;
1126 xop0 = change_address (xop0, bestmode,
1127 plus_constant (XEXP (xop0, 0),
1129 /* Fetch it to a register in that size. */
1130 xop0 = force_reg (bestmode, xop0);
1132 /* XBITPOS counts within UNIT, which is what is expected. */
1135 /* Get ref to first byte containing part of the field. */
1136 xop0 = change_address (xop0, byte_mode,
1137 plus_constant (XEXP (xop0, 0), xoffset));
1139 volatile_ok = save_volatile_ok;
1142 /* If op0 is a register, we need it in MAXMODE (which is usually
1143 SImode). to make it acceptable to the format of extzv. */
1144 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1146 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1147 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1149 /* On big-endian machines, we count bits from the most significant.
1150 If the bit field insn does not, we must invert. */
1151 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1152 xbitpos = unit - bitsize - xbitpos;
1154 /* Now convert from counting within UNIT to counting in MAXMODE. */
1155 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1156 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
1158 unit = GET_MODE_BITSIZE (maxmode);
1161 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1162 xtarget = xspec_target = gen_reg_rtx (tmode);
1164 if (GET_MODE (xtarget) != maxmode)
1166 if (GET_CODE (xtarget) == REG)
1168 int wider = (GET_MODE_SIZE (maxmode)
1169 > GET_MODE_SIZE (GET_MODE (xtarget)));
1170 xtarget = gen_lowpart (maxmode, xtarget);
1172 xspec_target_subreg = xtarget;
1175 xtarget = gen_reg_rtx (maxmode);
1178 /* If this machine's extzv insists on a register target,
1179 make sure we have one. */
1180 if (! ((*insn_operand_predicate[(int) CODE_FOR_extzv][0])
1181 (xtarget, maxmode)))
1182 xtarget = gen_reg_rtx (maxmode);
1184 bitsize_rtx = GEN_INT (bitsize);
1185 bitpos_rtx = GEN_INT (xbitpos);
1187 pat = gen_extzv (protect_from_queue (xtarget, 1),
1188 xop0, bitsize_rtx, bitpos_rtx);
1193 spec_target = xspec_target;
1194 spec_target_subreg = xspec_target_subreg;
1198 delete_insns_since (last);
1199 target = extract_fixed_bit_field (tmode, op0, offset, bitsize,
1200 bitpos, target, 1, align);
1206 target = extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1213 && (GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_extv][0])
1215 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1216 && (bitsize + bitpos
1217 > GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_extv][0]))))
1219 int xbitpos = bitpos, xoffset = offset;
1220 rtx bitsize_rtx, bitpos_rtx;
1221 rtx last = get_last_insn ();
1222 rtx xop0 = op0, xtarget = target;
1223 rtx xspec_target = spec_target;
1224 rtx xspec_target_subreg = spec_target_subreg;
1226 enum machine_mode maxmode
1227 = insn_operand_mode[(int) CODE_FOR_extv][0];
1229 if (GET_CODE (xop0) == MEM)
1231 /* Is the memory operand acceptable? */
1232 if (! ((*insn_operand_predicate[(int) CODE_FOR_extv][1])
1233 (xop0, GET_MODE (xop0))))
1235 /* No, load into a reg and extract from there. */
1236 enum machine_mode bestmode;
1238 /* Get the mode to use for inserting into this field. If
1239 OP0 is BLKmode, get the smallest mode consistent with the
1240 alignment. If OP0 is a non-BLKmode object that is no
1241 wider than MAXMODE, use its mode. Otherwise, use the
1242 smallest mode containing the field. */
1244 if (GET_MODE (xop0) == BLKmode
1245 || (GET_MODE_SIZE (GET_MODE (op0))
1246 > GET_MODE_SIZE (maxmode)))
1247 bestmode = get_best_mode (bitsize, bitnum,
1248 align * BITS_PER_UNIT, maxmode,
1249 MEM_VOLATILE_P (xop0));
1251 bestmode = GET_MODE (xop0);
1253 if (bestmode == VOIDmode
1254 || (SLOW_UNALIGNED_ACCESS && GET_MODE_SIZE (bestmode) > align))
1257 /* Compute offset as multiple of this unit,
1258 counting in bytes. */
1259 unit = GET_MODE_BITSIZE (bestmode);
1260 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1261 xbitpos = bitnum % unit;
1262 xop0 = change_address (xop0, bestmode,
1263 plus_constant (XEXP (xop0, 0),
1265 /* Fetch it to a register in that size. */
1266 xop0 = force_reg (bestmode, xop0);
1268 /* XBITPOS counts within UNIT, which is what is expected. */
1271 /* Get ref to first byte containing part of the field. */
1272 xop0 = change_address (xop0, byte_mode,
1273 plus_constant (XEXP (xop0, 0), xoffset));
1276 /* If op0 is a register, we need it in MAXMODE (which is usually
1277 SImode) to make it acceptable to the format of extv. */
1278 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1280 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1281 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1283 /* On big-endian machines, we count bits from the most significant.
1284 If the bit field insn does not, we must invert. */
1285 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1286 xbitpos = unit - bitsize - xbitpos;
1288 /* XBITPOS counts within a size of UNIT.
1289 Adjust to count within a size of MAXMODE. */
1290 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1291 xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
1293 unit = GET_MODE_BITSIZE (maxmode);
1296 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1297 xtarget = xspec_target = gen_reg_rtx (tmode);
1299 if (GET_MODE (xtarget) != maxmode)
1301 if (GET_CODE (xtarget) == REG)
1303 int wider = (GET_MODE_SIZE (maxmode)
1304 > GET_MODE_SIZE (GET_MODE (xtarget)));
1305 xtarget = gen_lowpart (maxmode, xtarget);
1307 xspec_target_subreg = xtarget;
1310 xtarget = gen_reg_rtx (maxmode);
1313 /* If this machine's extv insists on a register target,
1314 make sure we have one. */
1315 if (! ((*insn_operand_predicate[(int) CODE_FOR_extv][0])
1316 (xtarget, maxmode)))
1317 xtarget = gen_reg_rtx (maxmode);
1319 bitsize_rtx = GEN_INT (bitsize);
1320 bitpos_rtx = GEN_INT (xbitpos);
1322 pat = gen_extv (protect_from_queue (xtarget, 1),
1323 xop0, bitsize_rtx, bitpos_rtx);
1328 spec_target = xspec_target;
1329 spec_target_subreg = xspec_target_subreg;
1333 delete_insns_since (last);
1334 target = extract_fixed_bit_field (tmode, op0, offset, bitsize,
1335 bitpos, target, 0, align);
1341 target = extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1344 if (target == spec_target)
1346 if (target == spec_target_subreg)
1348 if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
1350 /* If the target mode is floating-point, first convert to the
1351 integer mode of that size and then access it as a floating-point
1352 value via a SUBREG. */
1353 if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
1355 target = convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode),
1358 if (GET_CODE (target) != REG)
1359 target = copy_to_reg (target);
1360 return gen_rtx_SUBREG (tmode, target, 0);
1363 return convert_to_mode (tmode, target, unsignedp);
1368 /* Extract a bit field using shifts and boolean operations
1369 Returns an rtx to represent the value.
1370 OP0 addresses a register (word) or memory (byte).
1371 BITPOS says which bit within the word or byte the bit field starts in.
1372 OFFSET says how many bytes farther the bit field starts;
1373 it is 0 if OP0 is a register.
1374 BITSIZE says how many bits long the bit field is.
1375 (If OP0 is a register, it may be narrower than a full word,
1376 but BITPOS still counts within a full word,
1377 which is significant on bigendian machines.)
1379 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1380 If TARGET is nonzero, attempts to store the value there
1381 and return TARGET, but this is not guaranteed.
1382 If TARGET is not used, create a pseudo-reg of mode TMODE for the value.
1384 ALIGN is the alignment that STR_RTX is known to have, measured in bytes. */
1387 extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1388 target, unsignedp, align)
1389 enum machine_mode tmode;
1390 register rtx op0, target;
1391 register int offset, bitsize, bitpos;
1395 int total_bits = BITS_PER_WORD;
1396 enum machine_mode mode;
1398 if (GET_CODE (op0) == SUBREG || GET_CODE (op0) == REG)
1400 /* Special treatment for a bit field split across two registers. */
1401 if (bitsize + bitpos > BITS_PER_WORD)
1402 return extract_split_bit_field (op0, bitsize, bitpos,
1407 /* Get the proper mode to use for this field. We want a mode that
1408 includes the entire field. If such a mode would be larger than
1409 a word, we won't be doing the extraction the normal way. */
1411 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1412 align * BITS_PER_UNIT, word_mode,
1413 GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
1415 if (mode == VOIDmode)
1416 /* The only way this should occur is if the field spans word
1418 return extract_split_bit_field (op0, bitsize,
1419 bitpos + offset * BITS_PER_UNIT,
1422 total_bits = GET_MODE_BITSIZE (mode);
1424 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1425 be be in the range 0 to total_bits-1, and put any excess bytes in
1427 if (bitpos >= total_bits)
1429 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1430 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1434 /* Get ref to an aligned byte, halfword, or word containing the field.
1435 Adjust BITPOS to be position within a word,
1436 and OFFSET to be the offset of that word.
1437 Then alter OP0 to refer to that word. */
1438 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1439 offset -= (offset % (total_bits / BITS_PER_UNIT));
1440 op0 = change_address (op0, mode,
1441 plus_constant (XEXP (op0, 0), offset));
1444 mode = GET_MODE (op0);
1446 if (BYTES_BIG_ENDIAN)
1448 /* BITPOS is the distance between our msb and that of OP0.
1449 Convert it to the distance from the lsb. */
1451 bitpos = total_bits - bitsize - bitpos;
1454 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1455 We have reduced the big-endian case to the little-endian case. */
1461 /* If the field does not already start at the lsb,
1462 shift it so it does. */
1463 tree amount = build_int_2 (bitpos, 0);
1464 /* Maybe propagate the target for the shift. */
1465 /* But not if we will return it--could confuse integrate.c. */
1466 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1467 && !REG_FUNCTION_VALUE_P (target)
1469 if (tmode != mode) subtarget = 0;
1470 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1472 /* Convert the value to the desired mode. */
1474 op0 = convert_to_mode (tmode, op0, 1);
1476 /* Unless the msb of the field used to be the msb when we shifted,
1477 mask out the upper bits. */
1479 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize
1481 #ifdef SLOW_ZERO_EXTEND
1482 /* Always generate an `and' if
1483 we just zero-extended op0 and SLOW_ZERO_EXTEND, since it
1484 will combine fruitfully with the zero-extend. */
1489 return expand_binop (GET_MODE (op0), and_optab, op0,
1490 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1491 target, 1, OPTAB_LIB_WIDEN);
1495 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1496 then arithmetic-shift its lsb to the lsb of the word. */
1497 op0 = force_reg (mode, op0);
1501 /* Find the narrowest integer mode that contains the field. */
1503 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1504 mode = GET_MODE_WIDER_MODE (mode))
1505 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1507 op0 = convert_to_mode (mode, op0, 0);
1511 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1513 tree amount = build_int_2 (GET_MODE_BITSIZE (mode) - (bitsize + bitpos), 0);
1514 /* Maybe propagate the target for the shift. */
1515 /* But not if we will return the result--could confuse integrate.c. */
1516 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1517 && ! REG_FUNCTION_VALUE_P (target)
1519 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1522 return expand_shift (RSHIFT_EXPR, mode, op0,
1523 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1527 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1528 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1529 complement of that if COMPLEMENT. The mask is truncated if
1530 necessary to the width of mode MODE. The mask is zero-extended if
1531 BITSIZE+BITPOS is too small for MODE. */
1534 mask_rtx (mode, bitpos, bitsize, complement)
1535 enum machine_mode mode;
1536 int bitpos, bitsize, complement;
1538 HOST_WIDE_INT masklow, maskhigh;
1540 if (bitpos < HOST_BITS_PER_WIDE_INT)
1541 masklow = (HOST_WIDE_INT) -1 << bitpos;
1545 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1546 masklow &= ((unsigned HOST_WIDE_INT) -1
1547 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1549 if (bitpos <= HOST_BITS_PER_WIDE_INT)
1552 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1554 if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1555 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1556 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1562 maskhigh = ~maskhigh;
1566 return immed_double_const (masklow, maskhigh, mode);
1569 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1570 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1573 lshift_value (mode, value, bitpos, bitsize)
1574 enum machine_mode mode;
1576 int bitpos, bitsize;
1578 unsigned HOST_WIDE_INT v = INTVAL (value);
1579 HOST_WIDE_INT low, high;
1581 if (bitsize < HOST_BITS_PER_WIDE_INT)
1582 v &= ~((HOST_WIDE_INT) -1 << bitsize);
1584 if (bitpos < HOST_BITS_PER_WIDE_INT)
1587 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1592 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1595 return immed_double_const (low, high, mode);
1598 /* Extract a bit field that is split across two words
1599 and return an RTX for the result.
1601 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1602 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1603 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend.
1605 ALIGN is the known alignment of OP0, measured in bytes.
1606 This is also the size of the memory objects to be used. */
1609 extract_split_bit_field (op0, bitsize, bitpos, unsignedp, align)
1611 int bitsize, bitpos, unsignedp, align;
1618 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1620 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1621 unit = BITS_PER_WORD;
1623 unit = MIN (align * BITS_PER_UNIT, BITS_PER_WORD);
1625 while (bitsdone < bitsize)
1632 offset = (bitpos + bitsdone) / unit;
1633 thispos = (bitpos + bitsdone) % unit;
1635 /* THISSIZE must not overrun a word boundary. Otherwise,
1636 extract_fixed_bit_field will call us again, and we will mutually
1638 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1639 thissize = MIN (thissize, unit - thispos);
1641 /* If OP0 is a register, then handle OFFSET here.
1643 When handling multiword bitfields, extract_bit_field may pass
1644 down a word_mode SUBREG of a larger REG for a bitfield that actually
1645 crosses a word boundary. Thus, for a SUBREG, we must find
1646 the current word starting from the base register. */
1647 if (GET_CODE (op0) == SUBREG)
1649 word = operand_subword_force (SUBREG_REG (op0),
1650 SUBREG_WORD (op0) + offset,
1651 GET_MODE (SUBREG_REG (op0)));
1654 else if (GET_CODE (op0) == REG)
1656 word = operand_subword_force (op0, offset, GET_MODE (op0));
1662 /* Extract the parts in bit-counting order,
1663 whose meaning is determined by BYTES_PER_UNIT.
1664 OFFSET is in UNITs, and UNIT is in bits.
1665 extract_fixed_bit_field wants offset in bytes. */
1666 part = extract_fixed_bit_field (word_mode, word,
1667 offset * unit / BITS_PER_UNIT,
1668 thissize, thispos, 0, 1, align);
1669 bitsdone += thissize;
1671 /* Shift this part into place for the result. */
1672 if (BYTES_BIG_ENDIAN)
1674 if (bitsize != bitsdone)
1675 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1676 build_int_2 (bitsize - bitsdone, 0), 0, 1);
1680 if (bitsdone != thissize)
1681 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1682 build_int_2 (bitsdone - thissize, 0), 0, 1);
1688 /* Combine the parts with bitwise or. This works
1689 because we extracted each part as an unsigned bit field. */
1690 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
1696 /* Unsigned bit field: we are done. */
1699 /* Signed bit field: sign-extend with two arithmetic shifts. */
1700 result = expand_shift (LSHIFT_EXPR, word_mode, result,
1701 build_int_2 (BITS_PER_WORD - bitsize, 0),
1703 return expand_shift (RSHIFT_EXPR, word_mode, result,
1704 build_int_2 (BITS_PER_WORD - bitsize, 0), NULL_RTX, 0);
1707 /* Add INC into TARGET. */
1710 expand_inc (target, inc)
1713 rtx value = expand_binop (GET_MODE (target), add_optab,
1715 target, 0, OPTAB_LIB_WIDEN);
1716 if (value != target)
1717 emit_move_insn (target, value);
1720 /* Subtract DEC from TARGET. */
1723 expand_dec (target, dec)
1726 rtx value = expand_binop (GET_MODE (target), sub_optab,
1728 target, 0, OPTAB_LIB_WIDEN);
1729 if (value != target)
1730 emit_move_insn (target, value);
1733 /* Output a shift instruction for expression code CODE,
1734 with SHIFTED being the rtx for the value to shift,
1735 and AMOUNT the tree for the amount to shift by.
1736 Store the result in the rtx TARGET, if that is convenient.
1737 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
1738 Return the rtx for where the value is. */
1741 expand_shift (code, mode, shifted, amount, target, unsignedp)
1742 enum tree_code code;
1743 register enum machine_mode mode;
1746 register rtx target;
1749 register rtx op1, temp = 0;
1750 register int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
1751 register int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
1754 /* Previously detected shift-counts computed by NEGATE_EXPR
1755 and shifted in the other direction; but that does not work
1758 op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0);
1760 #ifdef SHIFT_COUNT_TRUNCATED
1761 if (SHIFT_COUNT_TRUNCATED)
1763 if (GET_CODE (op1) == CONST_INT
1764 && (unsigned HOST_WIDE_INT) INTVAL (op1) >= GET_MODE_BITSIZE (mode))
1765 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
1766 % GET_MODE_BITSIZE (mode));
1767 else if (GET_CODE (op1) == SUBREG
1768 && SUBREG_WORD (op1) == 0)
1769 op1 = SUBREG_REG (op1);
1773 if (op1 == const0_rtx)
1776 for (try = 0; temp == 0 && try < 3; try++)
1778 enum optab_methods methods;
1781 methods = OPTAB_DIRECT;
1783 methods = OPTAB_WIDEN;
1785 methods = OPTAB_LIB_WIDEN;
1789 /* Widening does not work for rotation. */
1790 if (methods == OPTAB_WIDEN)
1792 else if (methods == OPTAB_LIB_WIDEN)
1794 /* If we have been unable to open-code this by a rotation,
1795 do it as the IOR of two shifts. I.e., to rotate A
1796 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
1797 where C is the bitsize of A.
1799 It is theoretically possible that the target machine might
1800 not be able to perform either shift and hence we would
1801 be making two libcalls rather than just the one for the
1802 shift (similarly if IOR could not be done). We will allow
1803 this extremely unlikely lossage to avoid complicating the
1806 rtx subtarget = target == shifted ? 0 : target;
1808 tree type = TREE_TYPE (amount);
1809 tree new_amount = make_tree (type, op1);
1811 = fold (build (MINUS_EXPR, type,
1813 build_int_2 (GET_MODE_BITSIZE (mode),
1817 shifted = force_reg (mode, shifted);
1819 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
1820 mode, shifted, new_amount, subtarget, 1);
1821 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
1822 mode, shifted, other_amount, 0, 1);
1823 return expand_binop (mode, ior_optab, temp, temp1, target,
1824 unsignedp, methods);
1827 temp = expand_binop (mode,
1828 left ? rotl_optab : rotr_optab,
1829 shifted, op1, target, unsignedp, methods);
1831 /* If we don't have the rotate, but we are rotating by a constant
1832 that is in range, try a rotate in the opposite direction. */
1834 if (temp == 0 && GET_CODE (op1) == CONST_INT
1835 && INTVAL (op1) > 0 && INTVAL (op1) < GET_MODE_BITSIZE (mode))
1836 temp = expand_binop (mode,
1837 left ? rotr_optab : rotl_optab,
1839 GEN_INT (GET_MODE_BITSIZE (mode)
1841 target, unsignedp, methods);
1844 temp = expand_binop (mode,
1845 left ? ashl_optab : lshr_optab,
1846 shifted, op1, target, unsignedp, methods);
1848 /* Do arithmetic shifts.
1849 Also, if we are going to widen the operand, we can just as well
1850 use an arithmetic right-shift instead of a logical one. */
1851 if (temp == 0 && ! rotate
1852 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
1854 enum optab_methods methods1 = methods;
1856 /* If trying to widen a log shift to an arithmetic shift,
1857 don't accept an arithmetic shift of the same size. */
1859 methods1 = OPTAB_MUST_WIDEN;
1861 /* Arithmetic shift */
1863 temp = expand_binop (mode,
1864 left ? ashl_optab : ashr_optab,
1865 shifted, op1, target, unsignedp, methods1);
1868 /* We used to try extzv here for logical right shifts, but that was
1869 only useful for one machine, the VAX, and caused poor code
1870 generation there for lshrdi3, so the code was deleted and a
1871 define_expand for lshrsi3 was added to vax.md. */
1879 enum alg_code { alg_zero, alg_m, alg_shift,
1880 alg_add_t_m2, alg_sub_t_m2,
1881 alg_add_factor, alg_sub_factor,
1882 alg_add_t2_m, alg_sub_t2_m,
1883 alg_add, alg_subtract, alg_factor, alg_shiftop };
1885 /* This structure records a sequence of operations.
1886 `ops' is the number of operations recorded.
1887 `cost' is their total cost.
1888 The operations are stored in `op' and the corresponding
1889 logarithms of the integer coefficients in `log'.
1891 These are the operations:
1892 alg_zero total := 0;
1893 alg_m total := multiplicand;
1894 alg_shift total := total * coeff
1895 alg_add_t_m2 total := total + multiplicand * coeff;
1896 alg_sub_t_m2 total := total - multiplicand * coeff;
1897 alg_add_factor total := total * coeff + total;
1898 alg_sub_factor total := total * coeff - total;
1899 alg_add_t2_m total := total * coeff + multiplicand;
1900 alg_sub_t2_m total := total * coeff - multiplicand;
1902 The first operand must be either alg_zero or alg_m. */
1908 /* The size of the OP and LOG fields are not directly related to the
1909 word size, but the worst-case algorithms will be if we have few
1910 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
1911 In that case we will generate shift-by-2, add, shift-by-2, add,...,
1912 in total wordsize operations. */
1913 enum alg_code op[MAX_BITS_PER_WORD];
1914 char log[MAX_BITS_PER_WORD];
1917 /* Compute and return the best algorithm for multiplying by T.
1918 The algorithm must cost less than cost_limit
1919 If retval.cost >= COST_LIMIT, no algorithm was found and all
1920 other field of the returned struct are undefined. */
1923 synth_mult (alg_out, t, cost_limit)
1924 struct algorithm *alg_out;
1925 unsigned HOST_WIDE_INT t;
1929 struct algorithm *alg_in, *best_alg;
1931 unsigned HOST_WIDE_INT q;
1933 /* Indicate that no algorithm is yet found. If no algorithm
1934 is found, this value will be returned and indicate failure. */
1935 alg_out->cost = cost_limit;
1937 if (cost_limit <= 0)
1940 /* t == 1 can be done in zero cost. */
1945 alg_out->op[0] = alg_m;
1949 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
1953 if (zero_cost >= cost_limit)
1958 alg_out->cost = zero_cost;
1959 alg_out->op[0] = alg_zero;
1964 /* We'll be needing a couple extra algorithm structures now. */
1966 alg_in = (struct algorithm *)alloca (sizeof (struct algorithm));
1967 best_alg = (struct algorithm *)alloca (sizeof (struct algorithm));
1969 /* If we have a group of zero bits at the low-order part of T, try
1970 multiplying by the remaining bits and then doing a shift. */
1974 m = floor_log2 (t & -t); /* m = number of low zero bits */
1976 cost = shift_cost[m];
1977 synth_mult (alg_in, q, cost_limit - cost);
1979 cost += alg_in->cost;
1980 if (cost < cost_limit)
1982 struct algorithm *x;
1983 x = alg_in, alg_in = best_alg, best_alg = x;
1984 best_alg->log[best_alg->ops] = m;
1985 best_alg->op[best_alg->ops] = alg_shift;
1990 /* If we have an odd number, add or subtract one. */
1993 unsigned HOST_WIDE_INT w;
1995 for (w = 1; (w & t) != 0; w <<= 1)
1997 /* If T was -1, then W will be zero after the loop. This is another
1998 case where T ends with ...111. Handling this with (T + 1) and
1999 subtract 1 produces slightly better code and results in algorithm
2000 selection much faster than treating it like the ...0111 case
2004 /* Reject the case where t is 3.
2005 Thus we prefer addition in that case. */
2008 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2011 synth_mult (alg_in, t + 1, cost_limit - cost);
2013 cost += alg_in->cost;
2014 if (cost < cost_limit)
2016 struct algorithm *x;
2017 x = alg_in, alg_in = best_alg, best_alg = x;
2018 best_alg->log[best_alg->ops] = 0;
2019 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2025 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2028 synth_mult (alg_in, t - 1, cost_limit - cost);
2030 cost += alg_in->cost;
2031 if (cost < cost_limit)
2033 struct algorithm *x;
2034 x = alg_in, alg_in = best_alg, best_alg = x;
2035 best_alg->log[best_alg->ops] = 0;
2036 best_alg->op[best_alg->ops] = alg_add_t_m2;
2042 /* Look for factors of t of the form
2043 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2044 If we find such a factor, we can multiply by t using an algorithm that
2045 multiplies by q, shift the result by m and add/subtract it to itself.
2047 We search for large factors first and loop down, even if large factors
2048 are less probable than small; if we find a large factor we will find a
2049 good sequence quickly, and therefore be able to prune (by decreasing
2050 COST_LIMIT) the search. */
2052 for (m = floor_log2 (t - 1); m >= 2; m--)
2054 unsigned HOST_WIDE_INT d;
2056 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2057 if (t % d == 0 && t > d)
2059 cost = MIN (shiftadd_cost[m], add_cost + shift_cost[m]);
2060 synth_mult (alg_in, t / d, cost_limit - cost);
2062 cost += alg_in->cost;
2063 if (cost < cost_limit)
2065 struct algorithm *x;
2066 x = alg_in, alg_in = best_alg, best_alg = x;
2067 best_alg->log[best_alg->ops] = m;
2068 best_alg->op[best_alg->ops] = alg_add_factor;
2071 /* Other factors will have been taken care of in the recursion. */
2075 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2076 if (t % d == 0 && t > d)
2078 cost = MIN (shiftsub_cost[m], add_cost + shift_cost[m]);
2079 synth_mult (alg_in, t / d, cost_limit - cost);
2081 cost += alg_in->cost;
2082 if (cost < cost_limit)
2084 struct algorithm *x;
2085 x = alg_in, alg_in = best_alg, best_alg = x;
2086 best_alg->log[best_alg->ops] = m;
2087 best_alg->op[best_alg->ops] = alg_sub_factor;
2094 /* Try shift-and-add (load effective address) instructions,
2095 i.e. do a*3, a*5, a*9. */
2103 cost = shiftadd_cost[m];
2104 synth_mult (alg_in, (t - 1) >> m, cost_limit - cost);
2106 cost += alg_in->cost;
2107 if (cost < cost_limit)
2109 struct algorithm *x;
2110 x = alg_in, alg_in = best_alg, best_alg = x;
2111 best_alg->log[best_alg->ops] = m;
2112 best_alg->op[best_alg->ops] = alg_add_t2_m;
2122 cost = shiftsub_cost[m];
2123 synth_mult (alg_in, (t + 1) >> m, cost_limit - cost);
2125 cost += alg_in->cost;
2126 if (cost < cost_limit)
2128 struct algorithm *x;
2129 x = alg_in, alg_in = best_alg, best_alg = x;
2130 best_alg->log[best_alg->ops] = m;
2131 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2137 /* If cost_limit has not decreased since we stored it in alg_out->cost,
2138 we have not found any algorithm. */
2139 if (cost_limit == alg_out->cost)
2142 /* If we are getting a too long sequence for `struct algorithm'
2143 to record, make this search fail. */
2144 if (best_alg->ops == MAX_BITS_PER_WORD)
2147 /* Copy the algorithm from temporary space to the space at alg_out.
2148 We avoid using structure assignment because the majority of
2149 best_alg is normally undefined, and this is a critical function. */
2150 alg_out->ops = best_alg->ops + 1;
2151 alg_out->cost = cost_limit;
2152 bcopy ((char *) best_alg->op, (char *) alg_out->op,
2153 alg_out->ops * sizeof *alg_out->op);
2154 bcopy ((char *) best_alg->log, (char *) alg_out->log,
2155 alg_out->ops * sizeof *alg_out->log);
2158 /* Perform a multiplication and return an rtx for the result.
2159 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
2160 TARGET is a suggestion for where to store the result (an rtx).
2162 We check specially for a constant integer as OP1.
2163 If you want this check for OP0 as well, then before calling
2164 you should swap the two operands if OP0 would be constant. */
2167 expand_mult (mode, op0, op1, target, unsignedp)
2168 enum machine_mode mode;
2169 register rtx op0, op1, target;
2172 rtx const_op1 = op1;
2174 /* synth_mult does an `unsigned int' multiply. As long as the mode is
2175 less than or equal in size to `unsigned int' this doesn't matter.
2176 If the mode is larger than `unsigned int', then synth_mult works only
2177 if the constant value exactly fits in an `unsigned int' without any
2178 truncation. This means that multiplying by negative values does
2179 not work; results are off by 2^32 on a 32 bit machine. */
2181 /* If we are multiplying in DImode, it may still be a win
2182 to try to work with shifts and adds. */
2183 if (GET_CODE (op1) == CONST_DOUBLE
2184 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_INT
2185 && HOST_BITS_PER_INT >= BITS_PER_WORD
2186 && CONST_DOUBLE_HIGH (op1) == 0)
2187 const_op1 = GEN_INT (CONST_DOUBLE_LOW (op1));
2188 else if (HOST_BITS_PER_INT < GET_MODE_BITSIZE (mode)
2189 && GET_CODE (op1) == CONST_INT
2190 && INTVAL (op1) < 0)
2193 /* We used to test optimize here, on the grounds that it's better to
2194 produce a smaller program when -O is not used.
2195 But this causes such a terrible slowdown sometimes
2196 that it seems better to use synth_mult always. */
2198 if (const_op1 && GET_CODE (const_op1) == CONST_INT)
2200 struct algorithm alg;
2201 struct algorithm alg2;
2202 HOST_WIDE_INT val = INTVAL (op1);
2203 HOST_WIDE_INT val_so_far;
2206 enum {basic_variant, negate_variant, add_variant} variant = basic_variant;
2208 /* Try to do the computation three ways: multiply by the negative of OP1
2209 and then negate, do the multiplication directly, or do multiplication
2212 mult_cost = rtx_cost (gen_rtx_MULT (mode, op0, op1), SET);
2213 mult_cost = MIN (12 * add_cost, mult_cost);
2215 synth_mult (&alg, val, mult_cost);
2217 /* This works only if the inverted value actually fits in an
2219 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2221 synth_mult (&alg2, - val,
2222 (alg.cost < mult_cost ? alg.cost : mult_cost) - negate_cost);
2223 if (alg2.cost + negate_cost < alg.cost)
2224 alg = alg2, variant = negate_variant;
2227 /* This proves very useful for division-by-constant. */
2228 synth_mult (&alg2, val - 1,
2229 (alg.cost < mult_cost ? alg.cost : mult_cost) - add_cost);
2230 if (alg2.cost + add_cost < alg.cost)
2231 alg = alg2, variant = add_variant;
2233 if (alg.cost < mult_cost)
2235 /* We found something cheaper than a multiply insn. */
2239 op0 = protect_from_queue (op0, 0);
2241 /* Avoid referencing memory over and over.
2242 For speed, but also for correctness when mem is volatile. */
2243 if (GET_CODE (op0) == MEM)
2244 op0 = force_reg (mode, op0);
2246 /* ACCUM starts out either as OP0 or as a zero, depending on
2247 the first operation. */
2249 if (alg.op[0] == alg_zero)
2251 accum = copy_to_mode_reg (mode, const0_rtx);
2254 else if (alg.op[0] == alg_m)
2256 accum = copy_to_mode_reg (mode, op0);
2262 for (opno = 1; opno < alg.ops; opno++)
2264 int log = alg.log[opno];
2265 int preserve = preserve_subexpressions_p ();
2266 rtx shift_subtarget = preserve ? 0 : accum;
2268 = (opno == alg.ops - 1 && target != 0 && variant != add_variant
2270 rtx accum_target = preserve ? 0 : accum;
2272 switch (alg.op[opno])
2275 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2276 build_int_2 (log, 0), NULL_RTX, 0);
2281 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2282 build_int_2 (log, 0), NULL_RTX, 0);
2283 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2284 add_target ? add_target : accum_target);
2285 val_so_far += (HOST_WIDE_INT) 1 << log;
2289 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2290 build_int_2 (log, 0), NULL_RTX, 0);
2291 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2292 add_target ? add_target : accum_target);
2293 val_so_far -= (HOST_WIDE_INT) 1 << log;
2297 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2298 build_int_2 (log, 0), shift_subtarget,
2300 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2301 add_target ? add_target : accum_target);
2302 val_so_far = (val_so_far << log) + 1;
2306 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2307 build_int_2 (log, 0), shift_subtarget,
2309 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2310 add_target ? add_target : accum_target);
2311 val_so_far = (val_so_far << log) - 1;
2314 case alg_add_factor:
2315 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2316 build_int_2 (log, 0), NULL_RTX, 0);
2317 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2318 add_target ? add_target : accum_target);
2319 val_so_far += val_so_far << log;
2322 case alg_sub_factor:
2323 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2324 build_int_2 (log, 0), NULL_RTX, 0);
2325 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
2326 (add_target ? add_target
2327 : preserve ? 0 : tem));
2328 val_so_far = (val_so_far << log) - val_so_far;
2335 /* Write a REG_EQUAL note on the last insn so that we can cse
2336 multiplication sequences. */
2338 insn = get_last_insn ();
2340 = gen_rtx_EXPR_LIST (REG_EQUAL,
2341 gen_rtx_MULT (mode, op0, GEN_INT (val_so_far)),
2345 if (variant == negate_variant)
2347 val_so_far = - val_so_far;
2348 accum = expand_unop (mode, neg_optab, accum, target, 0);
2350 else if (variant == add_variant)
2352 val_so_far = val_so_far + 1;
2353 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
2356 if (val != val_so_far)
2363 /* This used to use umul_optab if unsigned, but for non-widening multiply
2364 there is no difference between signed and unsigned. */
2365 op0 = expand_binop (mode, smul_optab,
2366 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
2372 /* Return the smallest n such that 2**n >= X. */
2376 unsigned HOST_WIDE_INT x;
2378 return floor_log2 (x - 1) + 1;
2381 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
2382 replace division by D, and put the least significant N bits of the result
2383 in *MULTIPLIER_PTR and return the most significant bit.
2385 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
2386 needed precision is in PRECISION (should be <= N).
2388 PRECISION should be as small as possible so this function can choose
2389 multiplier more freely.
2391 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
2392 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
2394 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
2395 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
2398 unsigned HOST_WIDE_INT
2399 choose_multiplier (d, n, precision, multiplier_ptr, post_shift_ptr, lgup_ptr)
2400 unsigned HOST_WIDE_INT d;
2403 unsigned HOST_WIDE_INT *multiplier_ptr;
2404 int *post_shift_ptr;
2407 unsigned HOST_WIDE_INT mhigh_hi, mhigh_lo;
2408 unsigned HOST_WIDE_INT mlow_hi, mlow_lo;
2409 int lgup, post_shift;
2411 unsigned HOST_WIDE_INT nh, nl, dummy1, dummy2;
2413 /* lgup = ceil(log2(divisor)); */
2414 lgup = ceil_log2 (d);
2420 pow2 = n + lgup - precision;
2422 if (pow == 2 * HOST_BITS_PER_WIDE_INT)
2424 /* We could handle this with some effort, but this case is much better
2425 handled directly with a scc insn, so rely on caller using that. */
2429 /* mlow = 2^(N + lgup)/d */
2430 if (pow >= HOST_BITS_PER_WIDE_INT)
2432 nh = (unsigned HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
2438 nl = (unsigned HOST_WIDE_INT) 1 << pow;
2440 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2441 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
2443 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
2444 if (pow2 >= HOST_BITS_PER_WIDE_INT)
2445 nh |= (unsigned HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
2447 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
2448 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2449 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
2451 if (mhigh_hi && nh - d >= d)
2453 if (mhigh_hi > 1 || mlow_hi > 1)
2455 /* assert that mlow < mhigh. */
2456 if (! (mlow_hi < mhigh_hi || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo)))
2459 /* If precision == N, then mlow, mhigh exceed 2^N
2460 (but they do not exceed 2^(N+1)). */
2462 /* Reduce to lowest terms */
2463 for (post_shift = lgup; post_shift > 0; post_shift--)
2465 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
2466 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
2476 *post_shift_ptr = post_shift;
2478 if (n < HOST_BITS_PER_WIDE_INT)
2480 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
2481 *multiplier_ptr = mhigh_lo & mask;
2482 return mhigh_lo >= mask;
2486 *multiplier_ptr = mhigh_lo;
2491 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
2492 congruent to 1 (mod 2**N). */
2494 static unsigned HOST_WIDE_INT
2496 unsigned HOST_WIDE_INT x;
2499 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
2501 /* The algorithm notes that the choice y = x satisfies
2502 x*y == 1 mod 2^3, since x is assumed odd.
2503 Each iteration doubles the number of bits of significance in y. */
2505 unsigned HOST_WIDE_INT mask;
2506 unsigned HOST_WIDE_INT y = x;
2509 mask = (n == HOST_BITS_PER_WIDE_INT
2510 ? ~(unsigned HOST_WIDE_INT) 0
2511 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
2515 y = y * (2 - x*y) & mask; /* Modulo 2^N */
2521 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
2522 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
2523 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
2524 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
2527 The result is put in TARGET if that is convenient.
2529 MODE is the mode of operation. */
2532 expand_mult_highpart_adjust (mode, adj_operand, op0, op1, target, unsignedp)
2533 enum machine_mode mode;
2534 register rtx adj_operand, op0, op1, target;
2538 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
2540 tem = expand_shift (RSHIFT_EXPR, mode, op0,
2541 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2543 tem = expand_and (tem, op1, NULL_RTX);
2545 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2548 tem = expand_shift (RSHIFT_EXPR, mode, op1,
2549 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2551 tem = expand_and (tem, op0, NULL_RTX);
2552 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2558 /* Emit code to multiply OP0 and CNST1, putting the high half of the result
2559 in TARGET if that is convenient, and return where the result is. If the
2560 operation can not be performed, 0 is returned.
2562 MODE is the mode of operation and result.
2564 UNSIGNEDP nonzero means unsigned multiply.
2566 MAX_COST is the total allowed cost for the expanded RTL. */
2569 expand_mult_highpart (mode, op0, cnst1, target, unsignedp, max_cost)
2570 enum machine_mode mode;
2571 register rtx op0, target;
2572 unsigned HOST_WIDE_INT cnst1;
2576 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
2577 optab mul_highpart_optab;
2580 int size = GET_MODE_BITSIZE (mode);
2583 /* We can't support modes wider than HOST_BITS_PER_INT. */
2584 if (size > HOST_BITS_PER_WIDE_INT)
2587 op1 = GEN_INT (cnst1);
2589 if (GET_MODE_BITSIZE (wider_mode) <= HOST_BITS_PER_INT)
2593 = immed_double_const (cnst1,
2596 : -(cnst1 >> (HOST_BITS_PER_WIDE_INT - 1))),
2599 /* expand_mult handles constant multiplication of word_mode
2600 or narrower. It does a poor job for large modes. */
2601 if (size < BITS_PER_WORD
2602 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2604 /* We have to do this, since expand_binop doesn't do conversion for
2605 multiply. Maybe change expand_binop to handle widening multiply? */
2606 op0 = convert_to_mode (wider_mode, op0, unsignedp);
2608 tem = expand_mult (wider_mode, op0, wide_op1, NULL_RTX, unsignedp);
2609 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2610 build_int_2 (size, 0), NULL_RTX, 1);
2611 return convert_modes (mode, wider_mode, tem, unsignedp);
2615 target = gen_reg_rtx (mode);
2617 /* Firstly, try using a multiplication insn that only generates the needed
2618 high part of the product, and in the sign flavor of unsignedp. */
2619 if (mul_highpart_cost[(int) mode] < max_cost)
2621 mul_highpart_optab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
2622 target = expand_binop (mode, mul_highpart_optab,
2623 op0, wide_op1, target, unsignedp, OPTAB_DIRECT);
2628 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
2629 Need to adjust the result after the multiplication. */
2630 if (mul_highpart_cost[(int) mode] + 2 * shift_cost[size-1] + 4 * add_cost < max_cost)
2632 mul_highpart_optab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
2633 target = expand_binop (mode, mul_highpart_optab,
2634 op0, wide_op1, target, unsignedp, OPTAB_DIRECT);
2636 /* We used the wrong signedness. Adjust the result. */
2637 return expand_mult_highpart_adjust (mode, target, op0,
2638 op1, target, unsignedp);
2641 /* Try widening multiplication. */
2642 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
2643 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2644 && mul_widen_cost[(int) wider_mode] < max_cost)
2646 op1 = force_reg (mode, op1);
2650 /* Try widening the mode and perform a non-widening multiplication. */
2651 moptab = smul_optab;
2652 if (smul_optab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2653 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2659 /* Try widening multiplication of opposite signedness, and adjust. */
2660 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
2661 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2662 && (mul_widen_cost[(int) wider_mode]
2663 + 2 * shift_cost[size-1] + 4 * add_cost < max_cost))
2665 rtx regop1 = force_reg (mode, op1);
2666 tem = expand_binop (wider_mode, moptab, op0, regop1,
2667 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
2670 /* Extract the high half of the just generated product. */
2671 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2672 build_int_2 (size, 0), NULL_RTX, 1);
2673 tem = convert_modes (mode, wider_mode, tem, unsignedp);
2674 /* We used the wrong signedness. Adjust the result. */
2675 return expand_mult_highpart_adjust (mode, tem, op0, op1,
2683 /* Pass NULL_RTX as target since TARGET has wrong mode. */
2684 tem = expand_binop (wider_mode, moptab, op0, op1,
2685 NULL_RTX, unsignedp, OPTAB_WIDEN);
2689 /* Extract the high half of the just generated product. */
2690 if (mode == word_mode)
2692 return gen_highpart (mode, tem);
2696 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2697 build_int_2 (size, 0), NULL_RTX, 1);
2698 return convert_modes (mode, wider_mode, tem, unsignedp);
2702 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
2703 if that is convenient, and returning where the result is.
2704 You may request either the quotient or the remainder as the result;
2705 specify REM_FLAG nonzero to get the remainder.
2707 CODE is the expression code for which kind of division this is;
2708 it controls how rounding is done. MODE is the machine mode to use.
2709 UNSIGNEDP nonzero means do unsigned division. */
2711 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
2712 and then correct it by or'ing in missing high bits
2713 if result of ANDI is nonzero.
2714 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
2715 This could optimize to a bfexts instruction.
2716 But C doesn't use these operations, so their optimizations are
2719 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
2722 expand_divmod (rem_flag, code, mode, op0, op1, target, unsignedp)
2724 enum tree_code code;
2725 enum machine_mode mode;
2726 register rtx op0, op1, target;
2729 enum machine_mode compute_mode;
2730 register rtx tquotient;
2731 rtx quotient = 0, remainder = 0;
2735 optab optab1, optab2;
2736 int op1_is_constant, op1_is_pow2;
2737 int max_cost, extra_cost;
2739 op1_is_constant = GET_CODE (op1) == CONST_INT;
2740 op1_is_pow2 = (op1_is_constant
2741 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
2742 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1))))));
2745 This is the structure of expand_divmod:
2747 First comes code to fix up the operands so we can perform the operations
2748 correctly and efficiently.
2750 Second comes a switch statement with code specific for each rounding mode.
2751 For some special operands this code emits all RTL for the desired
2752 operation, for other cases, it generates only a quotient and stores it in
2753 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
2754 to indicate that it has not done anything.
2756 Last comes code that finishes the operation. If QUOTIENT is set and
2757 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
2758 QUOTIENT is not set, it is computed using trunc rounding.
2760 We try to generate special code for division and remainder when OP1 is a
2761 constant. If |OP1| = 2**n we can use shifts and some other fast
2762 operations. For other values of OP1, we compute a carefully selected
2763 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
2766 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
2767 half of the product. Different strategies for generating the product are
2768 implemented in expand_mult_highpart.
2770 If what we actually want is the remainder, we generate that by another
2771 by-constant multiplication and a subtraction. */
2773 /* We shouldn't be called with OP1 == const1_rtx, but some of the
2774 code below will malfunction if we are, so check here and handle
2775 the special case if so. */
2776 if (op1 == const1_rtx)
2777 return rem_flag ? const0_rtx : op0;
2780 /* Don't use the function value register as a target
2781 since we have to read it as well as write it,
2782 and function-inlining gets confused by this. */
2783 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
2784 /* Don't clobber an operand while doing a multi-step calculation. */
2785 || ((rem_flag || op1_is_constant)
2786 && (reg_mentioned_p (target, op0)
2787 || (GET_CODE (op0) == MEM && GET_CODE (target) == MEM)))
2788 || reg_mentioned_p (target, op1)
2789 || (GET_CODE (op1) == MEM && GET_CODE (target) == MEM)))
2792 /* Get the mode in which to perform this computation. Normally it will
2793 be MODE, but sometimes we can't do the desired operation in MODE.
2794 If so, pick a wider mode in which we can do the operation. Convert
2795 to that mode at the start to avoid repeated conversions.
2797 First see what operations we need. These depend on the expression
2798 we are evaluating. (We assume that divxx3 insns exist under the
2799 same conditions that modxx3 insns and that these insns don't normally
2800 fail. If these assumptions are not correct, we may generate less
2801 efficient code in some cases.)
2803 Then see if we find a mode in which we can open-code that operation
2804 (either a division, modulus, or shift). Finally, check for the smallest
2805 mode for which we can do the operation with a library call. */
2807 /* We might want to refine this now that we have division-by-constant
2808 optimization. Since expand_mult_highpart tries so many variants, it is
2809 not straightforward to generalize this. Maybe we should make an array
2810 of possible modes in init_expmed? Save this for GCC 2.7. */
2812 optab1 = (op1_is_pow2 ? (unsignedp ? lshr_optab : ashr_optab)
2813 : (unsignedp ? udiv_optab : sdiv_optab));
2814 optab2 = (op1_is_pow2 ? optab1 : (unsignedp ? udivmod_optab : sdivmod_optab));
2816 for (compute_mode = mode; compute_mode != VOIDmode;
2817 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
2818 if (optab1->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing
2819 || optab2->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing)
2822 if (compute_mode == VOIDmode)
2823 for (compute_mode = mode; compute_mode != VOIDmode;
2824 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
2825 if (optab1->handlers[(int) compute_mode].libfunc
2826 || optab2->handlers[(int) compute_mode].libfunc)
2829 /* If we still couldn't find a mode, use MODE, but we'll probably abort
2831 if (compute_mode == VOIDmode)
2832 compute_mode = mode;
2834 if (target && GET_MODE (target) == compute_mode)
2837 tquotient = gen_reg_rtx (compute_mode);
2839 size = GET_MODE_BITSIZE (compute_mode);
2841 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
2842 (mode), and thereby get better code when OP1 is a constant. Do that
2843 later. It will require going over all usages of SIZE below. */
2844 size = GET_MODE_BITSIZE (mode);
2847 max_cost = div_cost[(int) compute_mode]
2848 - (rem_flag ? mul_cost[(int) compute_mode] + add_cost : 0);
2850 /* Now convert to the best mode to use. */
2851 if (compute_mode != mode)
2853 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
2854 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
2856 /* convert_modes may have placed op1 into a register, so we
2857 must recompute the following. */
2858 op1_is_constant = GET_CODE (op1) == CONST_INT;
2859 op1_is_pow2 = (op1_is_constant
2860 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
2862 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
2865 /* If one of the operands is a volatile MEM, copy it into a register. */
2867 if (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0))
2868 op0 = force_reg (compute_mode, op0);
2869 if (GET_CODE (op1) == MEM && MEM_VOLATILE_P (op1))
2870 op1 = force_reg (compute_mode, op1);
2872 /* If we need the remainder or if OP1 is constant, we need to
2873 put OP0 in a register in case it has any queued subexpressions. */
2874 if (rem_flag || op1_is_constant)
2875 op0 = force_reg (compute_mode, op0);
2877 last = get_last_insn ();
2879 /* Promote floor rounding to trunc rounding for unsigned operations. */
2882 if (code == FLOOR_DIV_EXPR)
2883 code = TRUNC_DIV_EXPR;
2884 if (code == FLOOR_MOD_EXPR)
2885 code = TRUNC_MOD_EXPR;
2886 if (code == EXACT_DIV_EXPR && op1_is_pow2)
2887 code = TRUNC_DIV_EXPR;
2890 if (op1 != const0_rtx)
2893 case TRUNC_MOD_EXPR:
2894 case TRUNC_DIV_EXPR:
2895 if (op1_is_constant)
2899 unsigned HOST_WIDE_INT mh, ml;
2900 int pre_shift, post_shift;
2902 unsigned HOST_WIDE_INT d = INTVAL (op1);
2904 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
2906 pre_shift = floor_log2 (d);
2910 = expand_binop (compute_mode, and_optab, op0,
2911 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
2915 return gen_lowpart (mode, remainder);
2917 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
2918 build_int_2 (pre_shift, 0),
2921 else if (size <= HOST_BITS_PER_WIDE_INT)
2923 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
2925 /* Most significant bit of divisor is set; emit an scc
2927 quotient = emit_store_flag (tquotient, GEU, op0, op1,
2928 compute_mode, 1, 1);
2934 /* Find a suitable multiplier and right shift count
2935 instead of multiplying with D. */
2937 mh = choose_multiplier (d, size, size,
2938 &ml, &post_shift, &dummy);
2940 /* If the suggested multiplier is more than SIZE bits,
2941 we can do better for even divisors, using an
2942 initial right shift. */
2943 if (mh != 0 && (d & 1) == 0)
2945 pre_shift = floor_log2 (d & -d);
2946 mh = choose_multiplier (d >> pre_shift, size,
2948 &ml, &post_shift, &dummy);
2959 extra_cost = (shift_cost[post_shift - 1]
2960 + shift_cost[1] + 2 * add_cost);
2961 t1 = expand_mult_highpart (compute_mode, op0, ml,
2963 max_cost - extra_cost);
2966 t2 = force_operand (gen_rtx_MINUS (compute_mode,
2969 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
2970 build_int_2 (1, 0), NULL_RTX,1);
2971 t4 = force_operand (gen_rtx_PLUS (compute_mode,
2975 = expand_shift (RSHIFT_EXPR, compute_mode, t4,
2976 build_int_2 (post_shift - 1, 0),
2983 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
2984 build_int_2 (pre_shift, 0),
2986 extra_cost = (shift_cost[pre_shift]
2987 + shift_cost[post_shift]);
2988 t2 = expand_mult_highpart (compute_mode, t1, ml,
2990 max_cost - extra_cost);
2994 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
2995 build_int_2 (post_shift, 0),
3000 else /* Too wide mode to use tricky code */
3003 insn = get_last_insn ();
3005 && (set = single_set (insn)) != 0
3006 && SET_DEST (set) == quotient)
3008 = gen_rtx_EXPR_LIST (REG_EQUAL,
3009 gen_rtx_UDIV (compute_mode, op0, op1),
3012 else /* TRUNC_DIV, signed */
3014 unsigned HOST_WIDE_INT ml;
3015 int lgup, post_shift;
3016 HOST_WIDE_INT d = INTVAL (op1);
3017 unsigned HOST_WIDE_INT abs_d = d >= 0 ? d : -d;
3019 /* n rem d = n rem -d */
3020 if (rem_flag && d < 0)
3023 op1 = GEN_INT (abs_d);
3029 quotient = expand_unop (compute_mode, neg_optab, op0,
3031 else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
3033 /* This case is not handled correctly below. */
3034 quotient = emit_store_flag (tquotient, EQ, op0, op1,
3035 compute_mode, 1, 1);
3039 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
3040 && (rem_flag ? smod_pow2_cheap : sdiv_pow2_cheap))
3042 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
3044 lgup = floor_log2 (abs_d);
3045 if (abs_d != 2 && BRANCH_COST < 3)
3047 rtx label = gen_label_rtx ();
3050 t1 = copy_to_mode_reg (compute_mode, op0);
3051 emit_cmp_insn (t1, const0_rtx, GE,
3052 NULL_RTX, compute_mode, 0, 0);
3053 emit_jump_insn (gen_bge (label));
3054 expand_inc (t1, GEN_INT (abs_d - 1));
3056 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3057 build_int_2 (lgup, 0),
3063 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3064 build_int_2 (size - 1, 0),
3066 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3067 build_int_2 (size - lgup, 0),
3069 t3 = force_operand (gen_rtx_PLUS (compute_mode,
3072 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3073 build_int_2 (lgup, 0),
3077 /* We have computed OP0 / abs(OP1). If OP1 is negative, negate
3081 insn = get_last_insn ();
3083 && (set = single_set (insn)) != 0
3084 && SET_DEST (set) == quotient)
3086 = gen_rtx_EXPR_LIST (REG_EQUAL,
3087 gen_rtx_DIV (compute_mode,
3092 quotient = expand_unop (compute_mode, neg_optab,
3093 quotient, quotient, 0);
3096 else if (size <= HOST_BITS_PER_WIDE_INT)
3098 choose_multiplier (abs_d, size, size - 1,
3099 &ml, &post_shift, &lgup);
3100 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
3104 extra_cost = (shift_cost[post_shift]
3105 + shift_cost[size - 1] + add_cost);
3106 t1 = expand_mult_highpart (compute_mode, op0, ml,
3108 max_cost - extra_cost);
3111 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3112 build_int_2 (post_shift, 0), NULL_RTX, 0);
3113 t3 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3114 build_int_2 (size - 1, 0), NULL_RTX, 0);
3116 quotient = force_operand (gen_rtx_MINUS (compute_mode, t3, t2),
3119 quotient = force_operand (gen_rtx_MINUS (compute_mode, t2, t3),
3126 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
3127 extra_cost = (shift_cost[post_shift]
3128 + shift_cost[size - 1] + 2 * add_cost);
3129 t1 = expand_mult_highpart (compute_mode, op0, ml,
3131 max_cost - extra_cost);
3134 t2 = force_operand (gen_rtx_PLUS (compute_mode, t1, op0),
3136 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3137 build_int_2 (post_shift, 0), NULL_RTX, 0);
3138 t4 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3139 build_int_2 (size - 1, 0), NULL_RTX, 0);
3141 quotient = force_operand (gen_rtx_MINUS (compute_mode, t4, t3),
3144 quotient = force_operand (gen_rtx_MINUS (compute_mode, t3, t4),
3148 else /* Too wide mode to use tricky code */
3151 insn = get_last_insn ();
3153 && (set = single_set (insn)) != 0
3154 && SET_DEST (set) == quotient)
3156 = gen_rtx_EXPR_LIST (REG_EQUAL,
3157 gen_rtx_DIV (compute_mode, op0, op1),
3163 delete_insns_since (last);
3166 case FLOOR_DIV_EXPR:
3167 case FLOOR_MOD_EXPR:
3168 /* We will come here only for signed operations. */
3169 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3171 unsigned HOST_WIDE_INT mh, ml;
3172 int pre_shift, lgup, post_shift;
3173 HOST_WIDE_INT d = INTVAL (op1);
3177 /* We could just as easily deal with negative constants here,
3178 but it does not seem worth the trouble for GCC 2.6. */
3179 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3181 pre_shift = floor_log2 (d);
3184 remainder = expand_binop (compute_mode, and_optab, op0,
3185 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3186 remainder, 0, OPTAB_LIB_WIDEN);
3188 return gen_lowpart (mode, remainder);
3190 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3191 build_int_2 (pre_shift, 0),
3198 mh = choose_multiplier (d, size, size - 1,
3199 &ml, &post_shift, &lgup);
3203 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3204 build_int_2 (size - 1, 0), NULL_RTX, 0);
3205 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
3206 NULL_RTX, 0, OPTAB_WIDEN);
3207 extra_cost = (shift_cost[post_shift]
3208 + shift_cost[size - 1] + 2 * add_cost);
3209 t3 = expand_mult_highpart (compute_mode, t2, ml,
3211 max_cost - extra_cost);
3214 t4 = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3215 build_int_2 (post_shift, 0),
3217 quotient = expand_binop (compute_mode, xor_optab,
3218 t4, t1, tquotient, 0,
3225 rtx nsign, t1, t2, t3, t4;
3226 t1 = force_operand (gen_rtx_PLUS (compute_mode,
3227 op0, constm1_rtx), NULL_RTX);
3228 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
3230 nsign = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3231 build_int_2 (size - 1, 0), NULL_RTX, 0);
3232 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
3234 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
3239 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
3241 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3250 delete_insns_since (last);
3252 /* Try using an instruction that produces both the quotient and
3253 remainder, using truncation. We can easily compensate the quotient
3254 or remainder to get floor rounding, once we have the remainder.
3255 Notice that we compute also the final remainder value here,
3256 and return the result right away. */
3257 if (target == 0 || GET_MODE (target) != compute_mode)
3258 target = gen_reg_rtx (compute_mode);
3263 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3264 quotient = gen_reg_rtx (compute_mode);
3269 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3270 remainder = gen_reg_rtx (compute_mode);
3273 if (expand_twoval_binop (sdivmod_optab, op0, op1,
3274 quotient, remainder, 0))
3276 /* This could be computed with a branch-less sequence.
3277 Save that for later. */
3279 rtx label = gen_label_rtx ();
3280 emit_cmp_insn (remainder, const0_rtx, EQ, NULL_RTX,
3281 compute_mode, 0, 0);
3282 emit_jump_insn (gen_beq (label));
3283 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3284 NULL_RTX, 0, OPTAB_WIDEN);
3285 emit_cmp_insn (tem, const0_rtx, GE, NULL_RTX, compute_mode, 0, 0);
3286 emit_jump_insn (gen_bge (label));
3287 expand_dec (quotient, const1_rtx);
3288 expand_inc (remainder, op1);
3290 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3293 /* No luck with division elimination or divmod. Have to do it
3294 by conditionally adjusting op0 *and* the result. */
3296 rtx label1, label2, label3, label4, label5;
3300 quotient = gen_reg_rtx (compute_mode);
3301 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3302 label1 = gen_label_rtx ();
3303 label2 = gen_label_rtx ();
3304 label3 = gen_label_rtx ();
3305 label4 = gen_label_rtx ();
3306 label5 = gen_label_rtx ();
3307 emit_cmp_insn (op1, const0_rtx, LT, NULL_RTX, compute_mode, 0, 0);
3308 emit_jump_insn (gen_blt (label2));
3309 emit_cmp_insn (adjusted_op0, const0_rtx, LT, NULL_RTX,
3310 compute_mode, 0, 0);
3311 emit_jump_insn (gen_blt (label1));
3312 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3313 quotient, 0, OPTAB_LIB_WIDEN);
3314 if (tem != quotient)
3315 emit_move_insn (quotient, tem);
3316 emit_jump_insn (gen_jump (label5));
3318 emit_label (label1);
3319 expand_inc (adjusted_op0, const1_rtx);
3320 emit_jump_insn (gen_jump (label4));
3322 emit_label (label2);
3323 emit_cmp_insn (adjusted_op0, const0_rtx, GT, NULL_RTX,
3324 compute_mode, 0, 0);
3325 emit_jump_insn (gen_bgt (label3));
3326 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3327 quotient, 0, OPTAB_LIB_WIDEN);
3328 if (tem != quotient)
3329 emit_move_insn (quotient, tem);
3330 emit_jump_insn (gen_jump (label5));
3332 emit_label (label3);
3333 expand_dec (adjusted_op0, const1_rtx);
3334 emit_label (label4);
3335 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3336 quotient, 0, OPTAB_LIB_WIDEN);
3337 if (tem != quotient)
3338 emit_move_insn (quotient, tem);
3339 expand_dec (quotient, const1_rtx);
3340 emit_label (label5);
3348 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
3351 unsigned HOST_WIDE_INT d = INTVAL (op1);
3352 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3353 build_int_2 (floor_log2 (d), 0),
3355 t2 = expand_binop (compute_mode, and_optab, op0,
3357 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3358 t3 = gen_reg_rtx (compute_mode);
3359 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3360 compute_mode, 1, 1);
3364 lab = gen_label_rtx ();
3365 emit_cmp_insn (t2, const0_rtx, EQ, NULL_RTX,
3366 compute_mode, 0, 0);
3367 emit_jump_insn (gen_beq (lab));
3368 expand_inc (t1, const1_rtx);
3373 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3379 /* Try using an instruction that produces both the quotient and
3380 remainder, using truncation. We can easily compensate the
3381 quotient or remainder to get ceiling rounding, once we have the
3382 remainder. Notice that we compute also the final remainder
3383 value here, and return the result right away. */
3384 if (target == 0 || GET_MODE (target) != compute_mode)
3385 target = gen_reg_rtx (compute_mode);
3389 remainder = (GET_CODE (target) == REG
3390 ? target : gen_reg_rtx (compute_mode));
3391 quotient = gen_reg_rtx (compute_mode);
3395 quotient = (GET_CODE (target) == REG
3396 ? target : gen_reg_rtx (compute_mode));
3397 remainder = gen_reg_rtx (compute_mode);
3400 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
3403 /* This could be computed with a branch-less sequence.
3404 Save that for later. */
3405 rtx label = gen_label_rtx ();
3406 emit_cmp_insn (remainder, const0_rtx, EQ, NULL_RTX,
3407 compute_mode, 0, 0);
3408 emit_jump_insn (gen_beq (label));
3409 expand_inc (quotient, const1_rtx);
3410 expand_dec (remainder, op1);
3412 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3415 /* No luck with division elimination or divmod. Have to do it
3416 by conditionally adjusting op0 *and* the result. */
3419 rtx adjusted_op0, tem;
3421 quotient = gen_reg_rtx (compute_mode);
3422 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3423 label1 = gen_label_rtx ();
3424 label2 = gen_label_rtx ();
3425 emit_cmp_insn (adjusted_op0, const0_rtx, NE, NULL_RTX,
3426 compute_mode, 0, 0);
3427 emit_jump_insn (gen_bne (label1));
3428 emit_move_insn (quotient, const0_rtx);
3429 emit_jump_insn (gen_jump (label2));
3431 emit_label (label1);
3432 expand_dec (adjusted_op0, const1_rtx);
3433 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
3434 quotient, 1, OPTAB_LIB_WIDEN);
3435 if (tem != quotient)
3436 emit_move_insn (quotient, tem);
3437 expand_inc (quotient, const1_rtx);
3438 emit_label (label2);
3443 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3444 && INTVAL (op1) >= 0)
3446 /* This is extremely similar to the code for the unsigned case
3447 above. For 2.7 we should merge these variants, but for
3448 2.6.1 I don't want to touch the code for unsigned since that
3449 get used in C. The signed case will only be used by other
3453 unsigned HOST_WIDE_INT d = INTVAL (op1);
3454 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3455 build_int_2 (floor_log2 (d), 0),
3457 t2 = expand_binop (compute_mode, and_optab, op0,
3459 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3460 t3 = gen_reg_rtx (compute_mode);
3461 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3462 compute_mode, 1, 1);
3466 lab = gen_label_rtx ();
3467 emit_cmp_insn (t2, const0_rtx, EQ, NULL_RTX,
3468 compute_mode, 0, 0);
3469 emit_jump_insn (gen_beq (lab));
3470 expand_inc (t1, const1_rtx);
3475 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3481 /* Try using an instruction that produces both the quotient and
3482 remainder, using truncation. We can easily compensate the
3483 quotient or remainder to get ceiling rounding, once we have the
3484 remainder. Notice that we compute also the final remainder
3485 value here, and return the result right away. */
3486 if (target == 0 || GET_MODE (target) != compute_mode)
3487 target = gen_reg_rtx (compute_mode);
3490 remainder= (GET_CODE (target) == REG
3491 ? target : gen_reg_rtx (compute_mode));
3492 quotient = gen_reg_rtx (compute_mode);
3496 quotient = (GET_CODE (target) == REG
3497 ? target : gen_reg_rtx (compute_mode));
3498 remainder = gen_reg_rtx (compute_mode);
3501 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
3504 /* This could be computed with a branch-less sequence.
3505 Save that for later. */
3507 rtx label = gen_label_rtx ();
3508 emit_cmp_insn (remainder, const0_rtx, EQ, NULL_RTX,
3509 compute_mode, 0, 0);
3510 emit_jump_insn (gen_beq (label));
3511 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3512 NULL_RTX, 0, OPTAB_WIDEN);
3513 emit_cmp_insn (tem, const0_rtx, LT, NULL_RTX,
3514 compute_mode, 0, 0);
3515 emit_jump_insn (gen_blt (label));
3516 expand_inc (quotient, const1_rtx);
3517 expand_dec (remainder, op1);
3519 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3522 /* No luck with division elimination or divmod. Have to do it
3523 by conditionally adjusting op0 *and* the result. */
3525 rtx label1, label2, label3, label4, label5;
3529 quotient = gen_reg_rtx (compute_mode);
3530 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3531 label1 = gen_label_rtx ();
3532 label2 = gen_label_rtx ();
3533 label3 = gen_label_rtx ();
3534 label4 = gen_label_rtx ();
3535 label5 = gen_label_rtx ();
3536 emit_cmp_insn (op1, const0_rtx, LT, NULL_RTX,
3537 compute_mode, 0, 0);
3538 emit_jump_insn (gen_blt (label2));
3539 emit_cmp_insn (adjusted_op0, const0_rtx, GT, NULL_RTX,
3540 compute_mode, 0, 0);
3541 emit_jump_insn (gen_bgt (label1));
3542 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3543 quotient, 0, OPTAB_LIB_WIDEN);
3544 if (tem != quotient)
3545 emit_move_insn (quotient, tem);
3546 emit_jump_insn (gen_jump (label5));
3548 emit_label (label1);
3549 expand_dec (adjusted_op0, const1_rtx);
3550 emit_jump_insn (gen_jump (label4));
3552 emit_label (label2);
3553 emit_cmp_insn (adjusted_op0, const0_rtx, LT, NULL_RTX,
3554 compute_mode, 0, 0);
3555 emit_jump_insn (gen_blt (label3));
3556 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3557 quotient, 0, OPTAB_LIB_WIDEN);
3558 if (tem != quotient)
3559 emit_move_insn (quotient, tem);
3560 emit_jump_insn (gen_jump (label5));
3562 emit_label (label3);
3563 expand_inc (adjusted_op0, const1_rtx);
3564 emit_label (label4);
3565 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3566 quotient, 0, OPTAB_LIB_WIDEN);
3567 if (tem != quotient)
3568 emit_move_insn (quotient, tem);
3569 expand_inc (quotient, const1_rtx);
3570 emit_label (label5);
3575 case EXACT_DIV_EXPR:
3576 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3578 HOST_WIDE_INT d = INTVAL (op1);
3579 unsigned HOST_WIDE_INT ml;
3583 post_shift = floor_log2 (d & -d);
3584 ml = invert_mod2n (d >> post_shift, size);
3585 t1 = expand_mult (compute_mode, op0, GEN_INT (ml), NULL_RTX,
3587 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3588 build_int_2 (post_shift, 0),
3589 NULL_RTX, unsignedp);
3591 insn = get_last_insn ();
3593 = gen_rtx_EXPR_LIST (REG_EQUAL,
3594 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
3601 case ROUND_DIV_EXPR:
3602 case ROUND_MOD_EXPR:
3607 label = gen_label_rtx ();
3608 quotient = gen_reg_rtx (compute_mode);
3609 remainder = gen_reg_rtx (compute_mode);
3610 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
3613 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
3614 quotient, 1, OPTAB_LIB_WIDEN);
3615 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
3616 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3617 remainder, 1, OPTAB_LIB_WIDEN);
3619 tem = plus_constant (op1, -1);
3620 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3621 build_int_2 (1, 0), NULL_RTX, 1);
3622 emit_cmp_insn (remainder, tem, LEU, NULL_RTX, compute_mode, 0, 0);
3623 emit_jump_insn (gen_bleu (label));
3624 expand_inc (quotient, const1_rtx);
3625 expand_dec (remainder, op1);
3630 rtx abs_rem, abs_op1, tem, mask;
3632 label = gen_label_rtx ();
3633 quotient = gen_reg_rtx (compute_mode);
3634 remainder = gen_reg_rtx (compute_mode);
3635 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
3638 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
3639 quotient, 0, OPTAB_LIB_WIDEN);
3640 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
3641 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3642 remainder, 0, OPTAB_LIB_WIDEN);
3644 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 0, 0);
3645 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 0, 0);
3646 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
3647 build_int_2 (1, 0), NULL_RTX, 1);
3648 emit_cmp_insn (tem, abs_op1, LTU, NULL_RTX, compute_mode, 0, 0);
3649 emit_jump_insn (gen_bltu (label));
3650 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3651 NULL_RTX, 0, OPTAB_WIDEN);
3652 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3653 build_int_2 (size - 1, 0), NULL_RTX, 0);
3654 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
3655 NULL_RTX, 0, OPTAB_WIDEN);
3656 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3657 NULL_RTX, 0, OPTAB_WIDEN);
3658 expand_inc (quotient, tem);
3659 tem = expand_binop (compute_mode, xor_optab, mask, op1,
3660 NULL_RTX, 0, OPTAB_WIDEN);
3661 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3662 NULL_RTX, 0, OPTAB_WIDEN);
3663 expand_dec (remainder, tem);
3666 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3674 if (target && GET_MODE (target) != compute_mode)
3679 /* Try to produce the remainder directly without a library call. */
3680 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
3682 unsignedp, OPTAB_WIDEN);
3685 /* No luck there. Can we do remainder and divide at once
3686 without a library call? */
3687 remainder = gen_reg_rtx (compute_mode);
3688 if (! expand_twoval_binop ((unsignedp
3692 NULL_RTX, remainder, unsignedp))
3697 return gen_lowpart (mode, remainder);
3700 /* Produce the quotient. Try a quotient insn, but not a library call.
3701 If we have a divmod in this mode, use it in preference to widening
3702 the div (for this test we assume it will not fail). Note that optab2
3703 is set to the one of the two optabs that the call below will use. */
3705 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
3706 op0, op1, rem_flag ? NULL_RTX : target,
3708 ((optab2->handlers[(int) compute_mode].insn_code
3709 != CODE_FOR_nothing)
3710 ? OPTAB_DIRECT : OPTAB_WIDEN));
3714 /* No luck there. Try a quotient-and-remainder insn,
3715 keeping the quotient alone. */
3716 quotient = gen_reg_rtx (compute_mode);
3717 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
3719 quotient, NULL_RTX, unsignedp))
3723 /* Still no luck. If we are not computing the remainder,
3724 use a library call for the quotient. */
3725 quotient = sign_expand_binop (compute_mode,
3726 udiv_optab, sdiv_optab,
3728 unsignedp, OPTAB_LIB_WIDEN);
3735 if (target && GET_MODE (target) != compute_mode)
3739 /* No divide instruction either. Use library for remainder. */
3740 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
3742 unsignedp, OPTAB_LIB_WIDEN);
3745 /* We divided. Now finish doing X - Y * (X / Y). */
3746 remainder = expand_mult (compute_mode, quotient, op1,
3747 NULL_RTX, unsignedp);
3748 remainder = expand_binop (compute_mode, sub_optab, op0,
3749 remainder, target, unsignedp,
3754 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3757 /* Return a tree node with data type TYPE, describing the value of X.
3758 Usually this is an RTL_EXPR, if there is no obvious better choice.
3759 X may be an expression, however we only support those expressions
3760 generated by loop.c. */
3769 switch (GET_CODE (x))
3772 t = build_int_2 (INTVAL (x),
3773 TREE_UNSIGNED (type) || INTVAL (x) >= 0 ? 0 : -1);
3774 TREE_TYPE (t) = type;
3778 if (GET_MODE (x) == VOIDmode)
3780 t = build_int_2 (CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
3781 TREE_TYPE (t) = type;
3787 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
3788 t = build_real (type, d);
3794 return fold (build (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
3795 make_tree (type, XEXP (x, 1))));
3798 return fold (build (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
3799 make_tree (type, XEXP (x, 1))));
3802 return fold (build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0))));
3805 return fold (build (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
3806 make_tree (type, XEXP (x, 1))));
3809 return fold (build (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
3810 make_tree (type, XEXP (x, 1))));
3813 return fold (convert (type,
3814 build (RSHIFT_EXPR, unsigned_type (type),
3815 make_tree (unsigned_type (type),
3817 make_tree (type, XEXP (x, 1)))));
3820 return fold (convert (type,
3821 build (RSHIFT_EXPR, signed_type (type),
3822 make_tree (signed_type (type), XEXP (x, 0)),
3823 make_tree (type, XEXP (x, 1)))));
3826 if (TREE_CODE (type) != REAL_TYPE)
3827 t = signed_type (type);
3831 return fold (convert (type,
3832 build (TRUNC_DIV_EXPR, t,
3833 make_tree (t, XEXP (x, 0)),
3834 make_tree (t, XEXP (x, 1)))));
3836 t = unsigned_type (type);
3837 return fold (convert (type,
3838 build (TRUNC_DIV_EXPR, t,
3839 make_tree (t, XEXP (x, 0)),
3840 make_tree (t, XEXP (x, 1)))));
3842 t = make_node (RTL_EXPR);
3843 TREE_TYPE (t) = type;
3844 RTL_EXPR_RTL (t) = x;
3845 /* There are no insns to be output
3846 when this rtl_expr is used. */
3847 RTL_EXPR_SEQUENCE (t) = 0;
3852 /* Return an rtx representing the value of X * MULT + ADD.
3853 TARGET is a suggestion for where to store the result (an rtx).
3854 MODE is the machine mode for the computation.
3855 X and MULT must have mode MODE. ADD may have a different mode.
3856 So can X (defaults to same as MODE).
3857 UNSIGNEDP is non-zero to do unsigned multiplication.
3858 This may emit insns. */
3861 expand_mult_add (x, target, mult, add, mode, unsignedp)
3862 rtx x, target, mult, add;
3863 enum machine_mode mode;
3866 tree type = type_for_mode (mode, unsignedp);
3867 tree add_type = (GET_MODE (add) == VOIDmode
3868 ? type : type_for_mode (GET_MODE (add), unsignedp));
3869 tree result = fold (build (PLUS_EXPR, type,
3870 fold (build (MULT_EXPR, type,
3871 make_tree (type, x),
3872 make_tree (type, mult))),
3873 make_tree (add_type, add)));
3875 return expand_expr (result, target, VOIDmode, 0);
3878 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
3879 and returning TARGET.
3881 If TARGET is 0, a pseudo-register or constant is returned. */
3884 expand_and (op0, op1, target)
3885 rtx op0, op1, target;
3887 enum machine_mode mode = VOIDmode;
3890 if (GET_MODE (op0) != VOIDmode)
3891 mode = GET_MODE (op0);
3892 else if (GET_MODE (op1) != VOIDmode)
3893 mode = GET_MODE (op1);
3895 if (mode != VOIDmode)
3896 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
3897 else if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT)
3898 tem = GEN_INT (INTVAL (op0) & INTVAL (op1));
3904 else if (tem != target)
3905 emit_move_insn (target, tem);
3909 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
3910 and storing in TARGET. Normally return TARGET.
3911 Return 0 if that cannot be done.
3913 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
3914 it is VOIDmode, they cannot both be CONST_INT.
3916 UNSIGNEDP is for the case where we have to widen the operands
3917 to perform the operation. It says to use zero-extension.
3919 NORMALIZEP is 1 if we should convert the result to be either zero
3920 or one. Normalize is -1 if we should convert the result to be
3921 either zero or -1. If NORMALIZEP is zero, the result will be left
3922 "raw" out of the scc insn. */
3925 emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep)
3929 enum machine_mode mode;
3934 enum insn_code icode;
3935 enum machine_mode compare_mode;
3936 enum machine_mode target_mode = GET_MODE (target);
3938 rtx last = get_last_insn ();
3939 rtx pattern, comparison;
3941 /* If one operand is constant, make it the second one. Only do this
3942 if the other operand is not constant as well. */
3944 if ((CONSTANT_P (op0) && ! CONSTANT_P (op1))
3945 || (GET_CODE (op0) == CONST_INT && GET_CODE (op1) != CONST_INT))
3950 code = swap_condition (code);
3953 if (mode == VOIDmode)
3954 mode = GET_MODE (op0);
3956 /* For some comparisons with 1 and -1, we can convert this to
3957 comparisons with zero. This will often produce more opportunities for
3958 store-flag insns. */
3963 if (op1 == const1_rtx)
3964 op1 = const0_rtx, code = LE;
3967 if (op1 == constm1_rtx)
3968 op1 = const0_rtx, code = LT;
3971 if (op1 == const1_rtx)
3972 op1 = const0_rtx, code = GT;
3975 if (op1 == constm1_rtx)
3976 op1 = const0_rtx, code = GE;
3979 if (op1 == const1_rtx)
3980 op1 = const0_rtx, code = NE;
3983 if (op1 == const1_rtx)
3984 op1 = const0_rtx, code = EQ;
3990 /* From now on, we won't change CODE, so set ICODE now. */
3991 icode = setcc_gen_code[(int) code];
3993 /* If this is A < 0 or A >= 0, we can do this by taking the ones
3994 complement of A (for GE) and shifting the sign bit to the low bit. */
3995 if (op1 == const0_rtx && (code == LT || code == GE)
3996 && GET_MODE_CLASS (mode) == MODE_INT
3997 && (normalizep || STORE_FLAG_VALUE == 1
3998 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
3999 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4000 == (HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))))
4004 /* If the result is to be wider than OP0, it is best to convert it
4005 first. If it is to be narrower, it is *incorrect* to convert it
4007 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
4009 op0 = protect_from_queue (op0, 0);
4010 op0 = convert_modes (target_mode, mode, op0, 0);
4014 if (target_mode != mode)
4018 op0 = expand_unop (mode, one_cmpl_optab, op0,
4019 ((STORE_FLAG_VALUE == 1 || normalizep)
4020 ? 0 : subtarget), 0);
4022 if (STORE_FLAG_VALUE == 1 || normalizep)
4023 /* If we are supposed to produce a 0/1 value, we want to do
4024 a logical shift from the sign bit to the low-order bit; for
4025 a -1/0 value, we do an arithmetic shift. */
4026 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
4027 size_int (GET_MODE_BITSIZE (mode) - 1),
4028 subtarget, normalizep != -1);
4030 if (mode != target_mode)
4031 op0 = convert_modes (target_mode, mode, op0, 0);
4036 if (icode != CODE_FOR_nothing)
4038 /* We think we may be able to do this with a scc insn. Emit the
4039 comparison and then the scc insn.
4041 compare_from_rtx may call emit_queue, which would be deleted below
4042 if the scc insn fails. So call it ourselves before setting LAST. */
4045 last = get_last_insn ();
4048 = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX, 0);
4049 if (GET_CODE (comparison) == CONST_INT)
4050 return (comparison == const0_rtx ? const0_rtx
4051 : normalizep == 1 ? const1_rtx
4052 : normalizep == -1 ? constm1_rtx
4055 /* If the code of COMPARISON doesn't match CODE, something is
4056 wrong; we can no longer be sure that we have the operation.
4057 We could handle this case, but it should not happen. */
4059 if (GET_CODE (comparison) != code)
4062 /* Get a reference to the target in the proper mode for this insn. */
4063 compare_mode = insn_operand_mode[(int) icode][0];
4065 if (preserve_subexpressions_p ()
4066 || ! (*insn_operand_predicate[(int) icode][0]) (subtarget, compare_mode))
4067 subtarget = gen_reg_rtx (compare_mode);
4069 pattern = GEN_FCN (icode) (subtarget);
4072 emit_insn (pattern);
4074 /* If we are converting to a wider mode, first convert to
4075 TARGET_MODE, then normalize. This produces better combining
4076 opportunities on machines that have a SIGN_EXTRACT when we are
4077 testing a single bit. This mostly benefits the 68k.
4079 If STORE_FLAG_VALUE does not have the sign bit set when
4080 interpreted in COMPARE_MODE, we can do this conversion as
4081 unsigned, which is usually more efficient. */
4082 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
4084 convert_move (target, subtarget,
4085 (GET_MODE_BITSIZE (compare_mode)
4086 <= HOST_BITS_PER_WIDE_INT)
4087 && 0 == (STORE_FLAG_VALUE
4088 & ((HOST_WIDE_INT) 1
4089 << (GET_MODE_BITSIZE (compare_mode) -1))));
4091 compare_mode = target_mode;
4096 /* If we want to keep subexpressions around, don't reuse our
4099 if (preserve_subexpressions_p ())
4102 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
4103 we don't have to do anything. */
4104 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
4106 else if (normalizep == - STORE_FLAG_VALUE)
4107 op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
4109 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
4110 makes it hard to use a value of just the sign bit due to
4111 ANSI integer constant typing rules. */
4112 else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT
4113 && (STORE_FLAG_VALUE
4114 & ((HOST_WIDE_INT) 1
4115 << (GET_MODE_BITSIZE (compare_mode) - 1))))
4116 op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
4117 size_int (GET_MODE_BITSIZE (compare_mode) - 1),
4118 subtarget, normalizep == 1);
4119 else if (STORE_FLAG_VALUE & 1)
4121 op0 = expand_and (op0, const1_rtx, subtarget);
4122 if (normalizep == -1)
4123 op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
4128 /* If we were converting to a smaller mode, do the
4130 if (target_mode != compare_mode)
4132 convert_move (target, op0, 0);
4140 delete_insns_since (last);
4142 /* If expensive optimizations, use different pseudo registers for each
4143 insn, instead of reusing the same pseudo. This leads to better CSE,
4144 but slows down the compiler, since there are more pseudos */
4145 subtarget = (!flag_expensive_optimizations
4146 && (target_mode == mode)) ? target : NULL_RTX;
4148 /* If we reached here, we can't do this with a scc insn. However, there
4149 are some comparisons that can be done directly. For example, if
4150 this is an equality comparison of integers, we can try to exclusive-or
4151 (or subtract) the two operands and use a recursive call to try the
4152 comparison with zero. Don't do any of these cases if branches are
4156 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
4157 && op1 != const0_rtx)
4159 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
4163 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
4166 tem = emit_store_flag (target, code, tem, const0_rtx,
4167 mode, unsignedp, normalizep);
4169 delete_insns_since (last);
4173 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
4174 the constant zero. Reject all other comparisons at this point. Only
4175 do LE and GT if branches are expensive since they are expensive on
4176 2-operand machines. */
4178 if (BRANCH_COST == 0
4179 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
4180 || (code != EQ && code != NE
4181 && (BRANCH_COST <= 1 || (code != LE && code != GT))))
4184 /* See what we need to return. We can only return a 1, -1, or the
4187 if (normalizep == 0)
4189 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
4190 normalizep = STORE_FLAG_VALUE;
4192 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4193 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4194 == (HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
4200 /* Try to put the result of the comparison in the sign bit. Assume we can't
4201 do the necessary operation below. */
4205 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
4206 the sign bit set. */
4210 /* This is destructive, so SUBTARGET can't be OP0. */
4211 if (rtx_equal_p (subtarget, op0))
4214 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
4217 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
4221 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
4222 number of bits in the mode of OP0, minus one. */
4226 if (rtx_equal_p (subtarget, op0))
4229 tem = expand_shift (RSHIFT_EXPR, mode, op0,
4230 size_int (GET_MODE_BITSIZE (mode) - 1),
4232 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
4236 if (code == EQ || code == NE)
4238 /* For EQ or NE, one way to do the comparison is to apply an operation
4239 that converts the operand into a positive number if it is non-zero
4240 or zero if it was originally zero. Then, for EQ, we subtract 1 and
4241 for NE we negate. This puts the result in the sign bit. Then we
4242 normalize with a shift, if needed.
4244 Two operations that can do the above actions are ABS and FFS, so try
4245 them. If that doesn't work, and MODE is smaller than a full word,
4246 we can use zero-extension to the wider mode (an unsigned conversion)
4247 as the operation. */
4249 if (abs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4250 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
4251 else if (ffs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4252 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
4253 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
4255 op0 = protect_from_queue (op0, 0);
4256 tem = convert_modes (word_mode, mode, op0, 1);
4263 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
4266 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
4269 /* If we couldn't do it that way, for NE we can "or" the two's complement
4270 of the value with itself. For EQ, we take the one's complement of
4271 that "or", which is an extra insn, so we only handle EQ if branches
4274 if (tem == 0 && (code == NE || BRANCH_COST > 1))
4276 if (rtx_equal_p (subtarget, op0))
4279 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
4280 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
4283 if (tem && code == EQ)
4284 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
4288 if (tem && normalizep)
4289 tem = expand_shift (RSHIFT_EXPR, mode, tem,
4290 size_int (GET_MODE_BITSIZE (mode) - 1),
4291 subtarget, normalizep == 1);
4295 if (GET_MODE (tem) != target_mode)
4297 convert_move (target, tem, 0);
4300 else if (!subtarget)
4302 emit_move_insn (target, tem);
4307 delete_insns_since (last);
4312 /* Like emit_store_flag, but always succeeds. */
4315 emit_store_flag_force (target, code, op0, op1, mode, unsignedp, normalizep)
4319 enum machine_mode mode;
4325 /* First see if emit_store_flag can do the job. */
4326 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
4330 if (normalizep == 0)
4333 /* If this failed, we have to do this with set/compare/jump/set code. */
4335 if (GET_CODE (target) != REG
4336 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
4337 target = gen_reg_rtx (GET_MODE (target));
4339 emit_move_insn (target, const1_rtx);
4340 tem = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX, 0);
4341 if (GET_CODE (tem) == CONST_INT)
4344 label = gen_label_rtx ();
4345 if (bcc_gen_fctn[(int) code] == 0)
4348 emit_jump_insn ((*bcc_gen_fctn[(int) code]) (label));
4349 emit_move_insn (target, const0_rtx);