1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 88, 89, 92-6, 1997 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
28 #include "insn-flags.h"
29 #include "insn-codes.h"
30 #include "insn-config.h"
35 static void store_fixed_bit_field PROTO((rtx, int, int, int, rtx, int));
36 static void store_split_bit_field PROTO((rtx, int, int, rtx, int));
37 static rtx extract_fixed_bit_field PROTO((enum machine_mode, rtx, int,
38 int, int, rtx, int, int));
39 static rtx mask_rtx PROTO((enum machine_mode, int,
41 static rtx lshift_value PROTO((enum machine_mode, rtx,
43 static rtx extract_split_bit_field PROTO((rtx, int, int, int, int));
45 #define CEIL(x,y) (((x) + (y) - 1) / (y))
47 /* Non-zero means divides or modulus operations are relatively cheap for
48 powers of two, so don't use branches; emit the operation instead.
49 Usually, this will mean that the MD file will emit non-branch
52 static int sdiv_pow2_cheap, smod_pow2_cheap;
54 #ifndef SLOW_UNALIGNED_ACCESS
55 #define SLOW_UNALIGNED_ACCESS STRICT_ALIGNMENT
58 /* For compilers that support multiple targets with different word sizes,
59 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
60 is the H8/300(H) compiler. */
62 #ifndef MAX_BITS_PER_WORD
63 #define MAX_BITS_PER_WORD BITS_PER_WORD
66 /* Cost of various pieces of RTL. Note that some of these are indexed by shift count,
68 static int add_cost, negate_cost, zero_cost;
69 static int shift_cost[MAX_BITS_PER_WORD];
70 static int shiftadd_cost[MAX_BITS_PER_WORD];
71 static int shiftsub_cost[MAX_BITS_PER_WORD];
72 static int mul_cost[NUM_MACHINE_MODES];
73 static int div_cost[NUM_MACHINE_MODES];
74 static int mul_widen_cost[NUM_MACHINE_MODES];
75 static int mul_highpart_cost[NUM_MACHINE_MODES];
81 /* This is "some random pseudo register" for purposes of calling recog
82 to see what insns exist. */
83 rtx reg = gen_rtx (REG, word_mode, 10000);
84 rtx shift_insn, shiftadd_insn, shiftsub_insn;
87 enum machine_mode mode, wider_mode;
91 /* Since we are on the permanent obstack, we must be sure we save this
92 spot AFTER we call start_sequence, since it will reuse the rtl it
95 free_point = (char *) oballoc (0);
97 zero_cost = rtx_cost (const0_rtx, 0);
98 add_cost = rtx_cost (gen_rtx (PLUS, word_mode, reg, reg), SET);
100 shift_insn = emit_insn (gen_rtx (SET, VOIDmode, reg,
101 gen_rtx (ASHIFT, word_mode, reg,
104 shiftadd_insn = emit_insn (gen_rtx (SET, VOIDmode, reg,
105 gen_rtx (PLUS, word_mode,
106 gen_rtx (MULT, word_mode,
110 shiftsub_insn = emit_insn (gen_rtx (SET, VOIDmode, reg,
111 gen_rtx (MINUS, word_mode,
112 gen_rtx (MULT, word_mode,
119 shiftadd_cost[0] = shiftsub_cost[0] = add_cost;
121 for (m = 1; m < BITS_PER_WORD; m++)
123 shift_cost[m] = shiftadd_cost[m] = shiftsub_cost[m] = 32000;
125 XEXP (SET_SRC (PATTERN (shift_insn)), 1) = GEN_INT (m);
126 if (recog (PATTERN (shift_insn), shift_insn, &dummy) >= 0)
127 shift_cost[m] = rtx_cost (SET_SRC (PATTERN (shift_insn)), SET);
129 XEXP (XEXP (SET_SRC (PATTERN (shiftadd_insn)), 0), 1)
130 = GEN_INT ((HOST_WIDE_INT) 1 << m);
131 if (recog (PATTERN (shiftadd_insn), shiftadd_insn, &dummy) >= 0)
132 shiftadd_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftadd_insn)), SET);
134 XEXP (XEXP (SET_SRC (PATTERN (shiftsub_insn)), 0), 1)
135 = GEN_INT ((HOST_WIDE_INT) 1 << m);
136 if (recog (PATTERN (shiftsub_insn), shiftsub_insn, &dummy) >= 0)
137 shiftsub_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftsub_insn)), SET);
140 negate_cost = rtx_cost (gen_rtx (NEG, word_mode, reg), SET);
143 = (rtx_cost (gen_rtx (DIV, word_mode, reg, GEN_INT (32)), SET)
146 = (rtx_cost (gen_rtx (MOD, word_mode, reg, GEN_INT (32)), SET)
149 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
151 mode = GET_MODE_WIDER_MODE (mode))
153 reg = gen_rtx (REG, mode, 10000);
154 div_cost[(int) mode] = rtx_cost (gen_rtx (UDIV, mode, reg, reg), SET);
155 mul_cost[(int) mode] = rtx_cost (gen_rtx (MULT, mode, reg, reg), SET);
156 wider_mode = GET_MODE_WIDER_MODE (mode);
157 if (wider_mode != VOIDmode)
159 mul_widen_cost[(int) wider_mode]
160 = rtx_cost (gen_rtx (MULT, wider_mode,
161 gen_rtx (ZERO_EXTEND, wider_mode, reg),
162 gen_rtx (ZERO_EXTEND, wider_mode, reg)),
164 mul_highpart_cost[(int) mode]
165 = rtx_cost (gen_rtx (TRUNCATE, mode,
166 gen_rtx (LSHIFTRT, wider_mode,
167 gen_rtx (MULT, wider_mode,
168 gen_rtx (ZERO_EXTEND, wider_mode, reg),
169 gen_rtx (ZERO_EXTEND, wider_mode, reg)),
170 GEN_INT (GET_MODE_BITSIZE (mode)))),
175 /* Free the objects we just allocated. */
180 /* Return an rtx representing minus the value of X.
181 MODE is the intended mode of the result,
182 useful if X is a CONST_INT. */
186 enum machine_mode mode;
189 rtx result = simplify_unary_operation (NEG, mode, x, mode);
192 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
197 /* Generate code to store value from rtx VALUE
198 into a bit-field within structure STR_RTX
199 containing BITSIZE bits starting at bit BITNUM.
200 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
201 ALIGN is the alignment that STR_RTX is known to have, measured in bytes.
202 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
204 /* ??? Note that there are two different ideas here for how
205 to determine the size to count bits within, for a register.
206 One is BITS_PER_WORD, and the other is the size of operand 3
207 of the insv pattern. (The latter assumes that an n-bit machine
208 will be able to insert bit fields up to n bits wide.)
209 It isn't certain that either of these is right.
210 extract_bit_field has the same quandary. */
213 store_bit_field (str_rtx, bitsize, bitnum, fieldmode, value, align, total_size)
215 register int bitsize;
217 enum machine_mode fieldmode;
222 int unit = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
223 register int offset = bitnum / unit;
224 register int bitpos = bitnum % unit;
225 register rtx op0 = str_rtx;
227 if (GET_CODE (str_rtx) == MEM && ! MEM_IN_STRUCT_P (str_rtx))
230 /* Discount the part of the structure before the desired byte.
231 We need to know how many bytes are safe to reference after it. */
233 total_size -= (bitpos / BIGGEST_ALIGNMENT
234 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
236 while (GET_CODE (op0) == SUBREG)
238 /* The following line once was done only if WORDS_BIG_ENDIAN,
239 but I think that is a mistake. WORDS_BIG_ENDIAN is
240 meaningful at a much higher level; when structures are copied
241 between memory and regs, the higher-numbered regs
242 always get higher addresses. */
243 offset += SUBREG_WORD (op0);
244 /* We used to adjust BITPOS here, but now we do the whole adjustment
245 right after the loop. */
246 op0 = SUBREG_REG (op0);
249 /* If OP0 is a register, BITPOS must count within a word.
250 But as we have it, it counts within whatever size OP0 now has.
251 On a bigendian machine, these are not the same, so convert. */
253 && GET_CODE (op0) != MEM
254 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
255 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
257 value = protect_from_queue (value, 0);
260 value = force_not_mem (value);
262 /* Note that the adjustment of BITPOS above has no effect on whether
263 BITPOS is 0 in a REG bigger than a word. */
264 if (GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
265 && (GET_CODE (op0) != MEM
266 || ! SLOW_UNALIGNED_ACCESS
267 || (offset * BITS_PER_UNIT % bitsize == 0
268 && align % GET_MODE_SIZE (fieldmode) == 0))
269 && bitpos == 0 && bitsize == GET_MODE_BITSIZE (fieldmode))
271 /* Storing in a full-word or multi-word field in a register
272 can be done with just SUBREG. */
273 if (GET_MODE (op0) != fieldmode)
275 if (GET_CODE (op0) == REG)
276 op0 = gen_rtx (SUBREG, fieldmode, op0, offset);
278 op0 = change_address (op0, fieldmode,
279 plus_constant (XEXP (op0, 0), offset));
281 emit_move_insn (op0, value);
285 /* Storing an lsb-aligned field in a register
286 can be done with a movestrict instruction. */
288 if (GET_CODE (op0) != MEM
289 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
290 && bitsize == GET_MODE_BITSIZE (fieldmode)
291 && (GET_MODE (op0) == fieldmode
292 || (movstrict_optab->handlers[(int) fieldmode].insn_code
293 != CODE_FOR_nothing)))
295 /* Get appropriate low part of the value being stored. */
296 if (GET_CODE (value) == CONST_INT || GET_CODE (value) == REG)
297 value = gen_lowpart (fieldmode, value);
298 else if (!(GET_CODE (value) == SYMBOL_REF
299 || GET_CODE (value) == LABEL_REF
300 || GET_CODE (value) == CONST))
301 value = convert_to_mode (fieldmode, value, 0);
303 if (GET_MODE (op0) == fieldmode)
304 emit_move_insn (op0, value);
307 int icode = movstrict_optab->handlers[(int) fieldmode].insn_code;
308 if(! (*insn_operand_predicate[icode][1]) (value, fieldmode))
309 value = copy_to_mode_reg (fieldmode, value);
310 emit_insn (GEN_FCN (icode)
311 (gen_rtx (SUBREG, fieldmode, op0, offset), value));
316 /* Handle fields bigger than a word. */
318 if (bitsize > BITS_PER_WORD)
320 /* Here we transfer the words of the field
321 in the order least significant first.
322 This is because the most significant word is the one which may
324 However, only do that if the value is not BLKmode. */
326 int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
328 int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
331 /* This is the mode we must force value to, so that there will be enough
332 subwords to extract. Note that fieldmode will often (always?) be
333 VOIDmode, because that is what store_field uses to indicate that this
334 is a bit field, but passing VOIDmode to operand_subword_force will
335 result in an abort. */
336 fieldmode = mode_for_size (nwords * BITS_PER_WORD, MODE_INT, 0);
338 for (i = 0; i < nwords; i++)
340 /* If I is 0, use the low-order word in both field and target;
341 if I is 1, use the next to lowest word; and so on. */
342 int wordnum = (backwards ? nwords - i - 1 : i);
343 int bit_offset = (backwards
344 ? MAX (bitsize - (i + 1) * BITS_PER_WORD, 0)
345 : i * BITS_PER_WORD);
346 store_bit_field (op0, MIN (BITS_PER_WORD,
347 bitsize - i * BITS_PER_WORD),
348 bitnum + bit_offset, word_mode,
349 operand_subword_force (value, wordnum,
350 (GET_MODE (value) == VOIDmode
352 : GET_MODE (value))),
358 /* From here on we can assume that the field to be stored in is
359 a full-word (whatever type that is), since it is shorter than a word. */
361 /* OFFSET is the number of words or bytes (UNIT says which)
362 from STR_RTX to the first word or byte containing part of the field. */
364 if (GET_CODE (op0) == REG)
367 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
368 op0 = gen_rtx (SUBREG, TYPE_MODE (type_for_size (BITS_PER_WORD, 0)),
374 op0 = protect_from_queue (op0, 1);
377 /* If VALUE is a floating-point mode, access it as an integer of the
378 corresponding size. This can occur on a machine with 64 bit registers
379 that uses SFmode for float. This can also occur for unaligned float
381 if (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT)
383 if (GET_CODE (value) != REG)
384 value = copy_to_reg (value);
385 value = gen_rtx (SUBREG, word_mode, value, 0);
388 /* Now OFFSET is nonzero only if OP0 is memory
389 and is therefore always measured in bytes. */
393 && GET_MODE (value) != BLKmode
394 && !(bitsize == 1 && GET_CODE (value) == CONST_INT)
395 /* Ensure insv's size is wide enough for this field. */
396 && (GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_insv][3])
398 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
400 > GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_insv][3]))))
402 int xbitpos = bitpos;
405 rtx last = get_last_insn ();
407 enum machine_mode maxmode
408 = insn_operand_mode[(int) CODE_FOR_insv][3];
410 int save_volatile_ok = volatile_ok;
413 /* If this machine's insv can only insert into a register, copy OP0
414 into a register and save it back later. */
415 /* This used to check flag_force_mem, but that was a serious
416 de-optimization now that flag_force_mem is enabled by -O2. */
417 if (GET_CODE (op0) == MEM
418 && ! ((*insn_operand_predicate[(int) CODE_FOR_insv][0])
422 enum machine_mode bestmode;
424 /* Get the mode to use for inserting into this field. If OP0 is
425 BLKmode, get the smallest mode consistent with the alignment. If
426 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
427 mode. Otherwise, use the smallest mode containing the field. */
429 if (GET_MODE (op0) == BLKmode
430 || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
432 = get_best_mode (bitsize, bitnum, align * BITS_PER_UNIT, maxmode,
433 MEM_VOLATILE_P (op0));
435 bestmode = GET_MODE (op0);
437 if (bestmode == VOIDmode
438 || (SLOW_UNALIGNED_ACCESS && GET_MODE_SIZE (bestmode) > align))
441 /* Adjust address to point to the containing unit of that mode. */
442 unit = GET_MODE_BITSIZE (bestmode);
443 /* Compute offset as multiple of this unit, counting in bytes. */
444 offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
445 bitpos = bitnum % unit;
446 op0 = change_address (op0, bestmode,
447 plus_constant (XEXP (op0, 0), offset));
449 /* Fetch that unit, store the bitfield in it, then store the unit. */
450 tempreg = copy_to_reg (op0);
451 store_bit_field (tempreg, bitsize, bitpos, fieldmode, value,
453 emit_move_insn (op0, tempreg);
456 volatile_ok = save_volatile_ok;
458 /* Add OFFSET into OP0's address. */
459 if (GET_CODE (xop0) == MEM)
460 xop0 = change_address (xop0, byte_mode,
461 plus_constant (XEXP (xop0, 0), offset));
463 /* If xop0 is a register, we need it in MAXMODE
464 to make it acceptable to the format of insv. */
465 if (GET_CODE (xop0) == SUBREG)
466 /* We can't just change the mode, because this might clobber op0,
467 and we will need the original value of op0 if insv fails. */
468 xop0 = gen_rtx (SUBREG, maxmode, SUBREG_REG (xop0), SUBREG_WORD (xop0));
469 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
470 xop0 = gen_rtx (SUBREG, maxmode, xop0, 0);
472 /* On big-endian machines, we count bits from the most significant.
473 If the bit field insn does not, we must invert. */
475 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
476 xbitpos = unit - bitsize - xbitpos;
478 /* We have been counting XBITPOS within UNIT.
479 Count instead within the size of the register. */
480 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
481 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
483 unit = GET_MODE_BITSIZE (maxmode);
485 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
487 if (GET_MODE (value) != maxmode)
489 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
491 /* Optimization: Don't bother really extending VALUE
492 if it has all the bits we will actually use. However,
493 if we must narrow it, be sure we do it correctly. */
495 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode))
497 /* Avoid making subreg of a subreg, or of a mem. */
498 if (GET_CODE (value1) != REG)
499 value1 = copy_to_reg (value1);
500 value1 = gen_rtx (SUBREG, maxmode, value1, 0);
503 value1 = gen_lowpart (maxmode, value1);
505 else if (!CONSTANT_P (value))
506 /* Parse phase is supposed to make VALUE's data type
507 match that of the component reference, which is a type
508 at least as wide as the field; so VALUE should have
509 a mode that corresponds to that type. */
513 /* If this machine's insv insists on a register,
514 get VALUE1 into a register. */
515 if (! ((*insn_operand_predicate[(int) CODE_FOR_insv][3])
517 value1 = force_reg (maxmode, value1);
519 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
524 delete_insns_since (last);
525 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
531 /* Insv is not available; store using shifts and boolean ops. */
532 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
536 /* Use shifts and boolean operations to store VALUE
537 into a bit field of width BITSIZE
538 in a memory location specified by OP0 except offset by OFFSET bytes.
539 (OFFSET must be 0 if OP0 is a register.)
540 The field starts at position BITPOS within the byte.
541 (If OP0 is a register, it may be a full word or a narrower mode,
542 but BITPOS still counts within a full word,
543 which is significant on bigendian machines.)
544 STRUCT_ALIGN is the alignment the structure is known to have (in bytes).
546 Note that protect_from_queue has already been done on OP0 and VALUE. */
549 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, struct_align)
551 register int offset, bitsize, bitpos;
555 register enum machine_mode mode;
556 int total_bits = BITS_PER_WORD;
561 if (! SLOW_UNALIGNED_ACCESS)
562 struct_align = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
564 /* There is a case not handled here:
565 a structure with a known alignment of just a halfword
566 and a field split across two aligned halfwords within the structure.
567 Or likewise a structure with a known alignment of just a byte
568 and a field split across two bytes.
569 Such cases are not supposed to be able to occur. */
571 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
575 /* Special treatment for a bit field split across two registers. */
576 if (bitsize + bitpos > BITS_PER_WORD)
578 store_split_bit_field (op0, bitsize, bitpos,
579 value, BITS_PER_WORD);
585 /* Get the proper mode to use for this field. We want a mode that
586 includes the entire field. If such a mode would be larger than
587 a word, we won't be doing the extraction the normal way. */
589 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
590 struct_align * BITS_PER_UNIT, word_mode,
591 GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
593 if (mode == VOIDmode)
595 /* The only way this should occur is if the field spans word
597 store_split_bit_field (op0,
598 bitsize, bitpos + offset * BITS_PER_UNIT,
599 value, struct_align);
603 total_bits = GET_MODE_BITSIZE (mode);
605 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
606 be be in the range 0 to total_bits-1, and put any excess bytes in
608 if (bitpos >= total_bits)
610 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
611 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
615 /* Get ref to an aligned byte, halfword, or word containing the field.
616 Adjust BITPOS to be position within a word,
617 and OFFSET to be the offset of that word.
618 Then alter OP0 to refer to that word. */
619 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
620 offset -= (offset % (total_bits / BITS_PER_UNIT));
621 op0 = change_address (op0, mode,
622 plus_constant (XEXP (op0, 0), offset));
625 mode = GET_MODE (op0);
627 /* Now MODE is either some integral mode for a MEM as OP0,
628 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
629 The bit field is contained entirely within OP0.
630 BITPOS is the starting bit number within OP0.
631 (OP0's mode may actually be narrower than MODE.) */
633 if (BYTES_BIG_ENDIAN)
634 /* BITPOS is the distance between our msb
635 and that of the containing datum.
636 Convert it to the distance from the lsb. */
637 bitpos = total_bits - bitsize - bitpos;
639 /* Now BITPOS is always the distance between our lsb
642 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
643 we must first convert its mode to MODE. */
645 if (GET_CODE (value) == CONST_INT)
647 register HOST_WIDE_INT v = INTVAL (value);
649 if (bitsize < HOST_BITS_PER_WIDE_INT)
650 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
654 else if ((bitsize < HOST_BITS_PER_WIDE_INT
655 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
656 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
659 value = lshift_value (mode, value, bitpos, bitsize);
663 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
664 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
666 if (GET_MODE (value) != mode)
668 if ((GET_CODE (value) == REG || GET_CODE (value) == SUBREG)
669 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
670 value = gen_lowpart (mode, value);
672 value = convert_to_mode (mode, value, 1);
676 value = expand_binop (mode, and_optab, value,
677 mask_rtx (mode, 0, bitsize, 0),
678 NULL_RTX, 1, OPTAB_LIB_WIDEN);
680 value = expand_shift (LSHIFT_EXPR, mode, value,
681 build_int_2 (bitpos, 0), NULL_RTX, 1);
684 /* Now clear the chosen bits in OP0,
685 except that if VALUE is -1 we need not bother. */
687 subtarget = (GET_CODE (op0) == REG || ! flag_force_mem) ? op0 : 0;
691 temp = expand_binop (mode, and_optab, op0,
692 mask_rtx (mode, bitpos, bitsize, 1),
693 subtarget, 1, OPTAB_LIB_WIDEN);
699 /* Now logical-or VALUE into OP0, unless it is zero. */
702 temp = expand_binop (mode, ior_optab, temp, value,
703 subtarget, 1, OPTAB_LIB_WIDEN);
705 emit_move_insn (op0, temp);
708 /* Store a bit field that is split across multiple accessible memory objects.
710 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
711 BITSIZE is the field width; BITPOS the position of its first bit
713 VALUE is the value to store.
714 ALIGN is the known alignment of OP0, measured in bytes.
715 This is also the size of the memory objects to be used.
717 This does not yet handle fields wider than BITS_PER_WORD. */
720 store_split_bit_field (op0, bitsize, bitpos, value, align)
729 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
731 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
732 unit = BITS_PER_WORD;
734 unit = MIN (align * BITS_PER_UNIT, BITS_PER_WORD);
736 /* If VALUE is a constant other than a CONST_INT, get it into a register in
737 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
738 that VALUE might be a floating-point constant. */
739 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
741 rtx word = gen_lowpart_common (word_mode, value);
743 if (word && (value != word))
746 value = gen_lowpart_common (word_mode,
747 force_reg (GET_MODE (value) != VOIDmode
749 : word_mode, value));
751 else if (GET_CODE (value) == ADDRESSOF)
752 value = copy_to_reg (value);
754 while (bitsdone < bitsize)
761 offset = (bitpos + bitsdone) / unit;
762 thispos = (bitpos + bitsdone) % unit;
764 /* THISSIZE must not overrun a word boundary. Otherwise,
765 store_fixed_bit_field will call us again, and we will mutually
767 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
768 thissize = MIN (thissize, unit - thispos);
770 if (BYTES_BIG_ENDIAN)
774 /* We must do an endian conversion exactly the same way as it is
775 done in extract_bit_field, so that the two calls to
776 extract_fixed_bit_field will have comparable arguments. */
777 if (GET_CODE (value) != MEM || GET_MODE (value) == BLKmode)
778 total_bits = BITS_PER_WORD;
780 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
782 /* Fetch successively less significant portions. */
783 if (GET_CODE (value) == CONST_INT)
784 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
785 >> (bitsize - bitsdone - thissize))
786 & (((HOST_WIDE_INT) 1 << thissize) - 1));
788 /* The args are chosen so that the last part includes the
789 lsb. Give extract_bit_field the value it needs (with
790 endianness compensation) to fetch the piece we want.
792 ??? We have no idea what the alignment of VALUE is, so
793 we have to use a guess. */
795 = extract_fixed_bit_field
796 (word_mode, value, 0, thissize,
797 total_bits - bitsize + bitsdone, NULL_RTX, 1,
798 GET_MODE (value) == VOIDmode
800 : (GET_MODE (value) == BLKmode
802 : GET_MODE_ALIGNMENT (GET_MODE (value)) / BITS_PER_UNIT));
806 /* Fetch successively more significant portions. */
807 if (GET_CODE (value) == CONST_INT)
808 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
810 & (((HOST_WIDE_INT) 1 << thissize) - 1));
813 = extract_fixed_bit_field
814 (word_mode, value, 0, thissize, bitsdone, NULL_RTX, 1,
815 GET_MODE (value) == VOIDmode
817 : (GET_MODE (value) == BLKmode
819 : GET_MODE_ALIGNMENT (GET_MODE (value)) / BITS_PER_UNIT));
822 /* If OP0 is a register, then handle OFFSET here.
824 When handling multiword bitfields, extract_bit_field may pass
825 down a word_mode SUBREG of a larger REG for a bitfield that actually
826 crosses a word boundary. Thus, for a SUBREG, we must find
827 the current word starting from the base register. */
828 if (GET_CODE (op0) == SUBREG)
830 word = operand_subword_force (SUBREG_REG (op0),
831 SUBREG_WORD (op0) + offset,
832 GET_MODE (SUBREG_REG (op0)));
835 else if (GET_CODE (op0) == REG)
837 word = operand_subword_force (op0, offset, GET_MODE (op0));
843 /* OFFSET is in UNITs, and UNIT is in bits.
844 store_fixed_bit_field wants offset in bytes. */
845 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT,
846 thissize, thispos, part, align);
847 bitsdone += thissize;
851 /* Generate code to extract a byte-field from STR_RTX
852 containing BITSIZE bits, starting at BITNUM,
853 and put it in TARGET if possible (if TARGET is nonzero).
854 Regardless of TARGET, we return the rtx for where the value is placed.
857 STR_RTX is the structure containing the byte (a REG or MEM).
858 UNSIGNEDP is nonzero if this is an unsigned bit field.
859 MODE is the natural mode of the field value once extracted.
860 TMODE is the mode the caller would like the value to have;
861 but the value may be returned with type MODE instead.
863 ALIGN is the alignment that STR_RTX is known to have, measured in bytes.
864 TOTAL_SIZE is the size in bytes of the containing structure,
867 If a TARGET is specified and we can store in it at no extra cost,
868 we do so, and return TARGET.
869 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
870 if they are equally easy. */
873 extract_bit_field (str_rtx, bitsize, bitnum, unsignedp,
874 target, mode, tmode, align, total_size)
876 register int bitsize;
880 enum machine_mode mode, tmode;
884 int unit = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
885 register int offset = bitnum / unit;
886 register int bitpos = bitnum % unit;
887 register rtx op0 = str_rtx;
888 rtx spec_target = target;
889 rtx spec_target_subreg = 0;
891 /* Discount the part of the structure before the desired byte.
892 We need to know how many bytes are safe to reference after it. */
894 total_size -= (bitpos / BIGGEST_ALIGNMENT
895 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
897 if (tmode == VOIDmode)
899 while (GET_CODE (op0) == SUBREG)
901 int outer_size = GET_MODE_BITSIZE (GET_MODE (op0));
902 int inner_size = GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)));
904 offset += SUBREG_WORD (op0);
906 if (BYTES_BIG_ENDIAN && (outer_size < inner_size))
908 bitpos += inner_size - outer_size;
911 offset += (bitpos / unit);
916 op0 = SUBREG_REG (op0);
919 /* ??? We currently assume TARGET is at least as big as BITSIZE.
920 If that's wrong, the solution is to test for it and set TARGET to 0
923 /* If OP0 is a register, BITPOS must count within a word.
924 But as we have it, it counts within whatever size OP0 now has.
925 On a bigendian machine, these are not the same, so convert. */
927 && GET_CODE (op0) != MEM
928 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
929 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
931 /* Extracting a full-word or multi-word value
932 from a structure in a register or aligned memory.
933 This can be done with just SUBREG.
934 So too extracting a subword value in
935 the least significant part of the register. */
937 if (((GET_CODE (op0) == REG
938 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
939 GET_MODE_BITSIZE (GET_MODE (op0))))
940 || (GET_CODE (op0) == MEM
941 && (! SLOW_UNALIGNED_ACCESS
942 || (offset * BITS_PER_UNIT % bitsize == 0
943 && align * BITS_PER_UNIT % bitsize == 0))))
944 && ((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
945 && bitpos % BITS_PER_WORD == 0)
946 || (mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0) != BLKmode
948 ? bitpos + bitsize == BITS_PER_WORD
951 enum machine_mode mode1
952 = mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0);
954 if (mode1 != GET_MODE (op0))
956 if (GET_CODE (op0) == REG)
957 op0 = gen_rtx (SUBREG, mode1, op0, offset);
959 op0 = change_address (op0, mode1,
960 plus_constant (XEXP (op0, 0), offset));
963 return convert_to_mode (tmode, op0, unsignedp);
967 /* Handle fields bigger than a word. */
969 if (bitsize > BITS_PER_WORD)
971 /* Here we transfer the words of the field
972 in the order least significant first.
973 This is because the most significant word is the one which may
974 be less than full. */
976 int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
979 if (target == 0 || GET_CODE (target) != REG)
980 target = gen_reg_rtx (mode);
982 /* Indicate for flow that the entire target reg is being set. */
983 emit_insn (gen_rtx (CLOBBER, VOIDmode, target));
985 for (i = 0; i < nwords; i++)
987 /* If I is 0, use the low-order word in both field and target;
988 if I is 1, use the next to lowest word; and so on. */
989 /* Word number in TARGET to use. */
990 int wordnum = (WORDS_BIG_ENDIAN
991 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
993 /* Offset from start of field in OP0. */
994 int bit_offset = (WORDS_BIG_ENDIAN
995 ? MAX (0, bitsize - (i + 1) * BITS_PER_WORD)
996 : i * BITS_PER_WORD);
997 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
999 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1000 bitsize - i * BITS_PER_WORD),
1001 bitnum + bit_offset,
1002 1, target_part, mode, word_mode,
1005 if (target_part == 0)
1008 if (result_part != target_part)
1009 emit_move_insn (target_part, result_part);
1014 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1015 need to be zero'd out. */
1016 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1020 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1021 for (i = nwords; i < total_words; i++)
1023 int wordnum = WORDS_BIG_ENDIAN ? total_words - i - 1 : i;
1024 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1025 emit_move_insn (target_part, const0_rtx);
1031 /* Signed bit field: sign-extend with two arithmetic shifts. */
1032 target = expand_shift (LSHIFT_EXPR, mode, target,
1033 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1035 return expand_shift (RSHIFT_EXPR, mode, target,
1036 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1040 /* From here on we know the desired field is smaller than a word
1041 so we can assume it is an integer. So we can safely extract it as one
1042 size of integer, if necessary, and then truncate or extend
1043 to the size that is wanted. */
1045 /* OFFSET is the number of words or bytes (UNIT says which)
1046 from STR_RTX to the first word or byte containing part of the field. */
1048 if (GET_CODE (op0) == REG)
1051 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1052 op0 = gen_rtx (SUBREG, TYPE_MODE (type_for_size (BITS_PER_WORD, 0)),
1058 op0 = protect_from_queue (str_rtx, 1);
1061 /* Now OFFSET is nonzero only for memory operands. */
1067 && (GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_extzv][0])
1069 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1070 && (bitsize + bitpos
1071 > GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_extzv][0]))))
1073 int xbitpos = bitpos, xoffset = offset;
1074 rtx bitsize_rtx, bitpos_rtx;
1075 rtx last = get_last_insn();
1077 rtx xtarget = target;
1078 rtx xspec_target = spec_target;
1079 rtx xspec_target_subreg = spec_target_subreg;
1081 enum machine_mode maxmode
1082 = insn_operand_mode[(int) CODE_FOR_extzv][0];
1084 if (GET_CODE (xop0) == MEM)
1086 int save_volatile_ok = volatile_ok;
1089 /* Is the memory operand acceptable? */
1090 if (! ((*insn_operand_predicate[(int) CODE_FOR_extzv][1])
1091 (xop0, GET_MODE (xop0))))
1093 /* No, load into a reg and extract from there. */
1094 enum machine_mode bestmode;
1096 /* Get the mode to use for inserting into this field. If
1097 OP0 is BLKmode, get the smallest mode consistent with the
1098 alignment. If OP0 is a non-BLKmode object that is no
1099 wider than MAXMODE, use its mode. Otherwise, use the
1100 smallest mode containing the field. */
1102 if (GET_MODE (xop0) == BLKmode
1103 || (GET_MODE_SIZE (GET_MODE (op0))
1104 > GET_MODE_SIZE (maxmode)))
1105 bestmode = get_best_mode (bitsize, bitnum,
1106 align * BITS_PER_UNIT, maxmode,
1107 MEM_VOLATILE_P (xop0));
1109 bestmode = GET_MODE (xop0);
1111 if (bestmode == VOIDmode
1112 || (SLOW_UNALIGNED_ACCESS && GET_MODE_SIZE (bestmode) > align))
1115 /* Compute offset as multiple of this unit,
1116 counting in bytes. */
1117 unit = GET_MODE_BITSIZE (bestmode);
1118 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1119 xbitpos = bitnum % unit;
1120 xop0 = change_address (xop0, bestmode,
1121 plus_constant (XEXP (xop0, 0),
1123 /* Fetch it to a register in that size. */
1124 xop0 = force_reg (bestmode, xop0);
1126 /* XBITPOS counts within UNIT, which is what is expected. */
1129 /* Get ref to first byte containing part of the field. */
1130 xop0 = change_address (xop0, byte_mode,
1131 plus_constant (XEXP (xop0, 0), xoffset));
1133 volatile_ok = save_volatile_ok;
1136 /* If op0 is a register, we need it in MAXMODE (which is usually
1137 SImode). to make it acceptable to the format of extzv. */
1138 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1140 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1141 xop0 = gen_rtx (SUBREG, maxmode, xop0, 0);
1143 /* On big-endian machines, we count bits from the most significant.
1144 If the bit field insn does not, we must invert. */
1145 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1146 xbitpos = unit - bitsize - xbitpos;
1148 /* Now convert from counting within UNIT to counting in MAXMODE. */
1149 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1150 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
1152 unit = GET_MODE_BITSIZE (maxmode);
1155 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1156 xtarget = xspec_target = gen_reg_rtx (tmode);
1158 if (GET_MODE (xtarget) != maxmode)
1160 if (GET_CODE (xtarget) == REG)
1162 int wider = (GET_MODE_SIZE (maxmode)
1163 > GET_MODE_SIZE (GET_MODE (xtarget)));
1164 xtarget = gen_lowpart (maxmode, xtarget);
1166 xspec_target_subreg = xtarget;
1169 xtarget = gen_reg_rtx (maxmode);
1172 /* If this machine's extzv insists on a register target,
1173 make sure we have one. */
1174 if (! ((*insn_operand_predicate[(int) CODE_FOR_extzv][0])
1175 (xtarget, maxmode)))
1176 xtarget = gen_reg_rtx (maxmode);
1178 bitsize_rtx = GEN_INT (bitsize);
1179 bitpos_rtx = GEN_INT (xbitpos);
1181 pat = gen_extzv (protect_from_queue (xtarget, 1),
1182 xop0, bitsize_rtx, bitpos_rtx);
1187 spec_target = xspec_target;
1188 spec_target_subreg = xspec_target_subreg;
1192 delete_insns_since (last);
1193 target = extract_fixed_bit_field (tmode, op0, offset, bitsize,
1194 bitpos, target, 1, align);
1200 target = extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1207 && (GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_extv][0])
1209 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1210 && (bitsize + bitpos
1211 > GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_extv][0]))))
1213 int xbitpos = bitpos, xoffset = offset;
1214 rtx bitsize_rtx, bitpos_rtx;
1215 rtx last = get_last_insn();
1216 rtx xop0 = op0, xtarget = target;
1217 rtx xspec_target = spec_target;
1218 rtx xspec_target_subreg = spec_target_subreg;
1220 enum machine_mode maxmode
1221 = insn_operand_mode[(int) CODE_FOR_extv][0];
1223 if (GET_CODE (xop0) == MEM)
1225 /* Is the memory operand acceptable? */
1226 if (! ((*insn_operand_predicate[(int) CODE_FOR_extv][1])
1227 (xop0, GET_MODE (xop0))))
1229 /* No, load into a reg and extract from there. */
1230 enum machine_mode bestmode;
1232 /* Get the mode to use for inserting into this field. If
1233 OP0 is BLKmode, get the smallest mode consistent with the
1234 alignment. If OP0 is a non-BLKmode object that is no
1235 wider than MAXMODE, use its mode. Otherwise, use the
1236 smallest mode containing the field. */
1238 if (GET_MODE (xop0) == BLKmode
1239 || (GET_MODE_SIZE (GET_MODE (op0))
1240 > GET_MODE_SIZE (maxmode)))
1241 bestmode = get_best_mode (bitsize, bitnum,
1242 align * BITS_PER_UNIT, maxmode,
1243 MEM_VOLATILE_P (xop0));
1245 bestmode = GET_MODE (xop0);
1247 if (bestmode == VOIDmode
1248 || (SLOW_UNALIGNED_ACCESS && GET_MODE_SIZE (bestmode) > align))
1251 /* Compute offset as multiple of this unit,
1252 counting in bytes. */
1253 unit = GET_MODE_BITSIZE (bestmode);
1254 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1255 xbitpos = bitnum % unit;
1256 xop0 = change_address (xop0, bestmode,
1257 plus_constant (XEXP (xop0, 0),
1259 /* Fetch it to a register in that size. */
1260 xop0 = force_reg (bestmode, xop0);
1262 /* XBITPOS counts within UNIT, which is what is expected. */
1265 /* Get ref to first byte containing part of the field. */
1266 xop0 = change_address (xop0, byte_mode,
1267 plus_constant (XEXP (xop0, 0), xoffset));
1270 /* If op0 is a register, we need it in MAXMODE (which is usually
1271 SImode) to make it acceptable to the format of extv. */
1272 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1274 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1275 xop0 = gen_rtx (SUBREG, maxmode, xop0, 0);
1277 /* On big-endian machines, we count bits from the most significant.
1278 If the bit field insn does not, we must invert. */
1279 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1280 xbitpos = unit - bitsize - xbitpos;
1282 /* XBITPOS counts within a size of UNIT.
1283 Adjust to count within a size of MAXMODE. */
1284 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1285 xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
1287 unit = GET_MODE_BITSIZE (maxmode);
1290 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1291 xtarget = xspec_target = gen_reg_rtx (tmode);
1293 if (GET_MODE (xtarget) != maxmode)
1295 if (GET_CODE (xtarget) == REG)
1297 int wider = (GET_MODE_SIZE (maxmode)
1298 > GET_MODE_SIZE (GET_MODE (xtarget)));
1299 xtarget = gen_lowpart (maxmode, xtarget);
1301 xspec_target_subreg = xtarget;
1304 xtarget = gen_reg_rtx (maxmode);
1307 /* If this machine's extv insists on a register target,
1308 make sure we have one. */
1309 if (! ((*insn_operand_predicate[(int) CODE_FOR_extv][0])
1310 (xtarget, maxmode)))
1311 xtarget = gen_reg_rtx (maxmode);
1313 bitsize_rtx = GEN_INT (bitsize);
1314 bitpos_rtx = GEN_INT (xbitpos);
1316 pat = gen_extv (protect_from_queue (xtarget, 1),
1317 xop0, bitsize_rtx, bitpos_rtx);
1322 spec_target = xspec_target;
1323 spec_target_subreg = xspec_target_subreg;
1327 delete_insns_since (last);
1328 target = extract_fixed_bit_field (tmode, op0, offset, bitsize,
1329 bitpos, target, 0, align);
1335 target = extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1338 if (target == spec_target)
1340 if (target == spec_target_subreg)
1342 if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
1344 /* If the target mode is floating-point, first convert to the
1345 integer mode of that size and then access it as a floating-point
1346 value via a SUBREG. */
1347 if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
1349 target = convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode),
1352 if (GET_CODE (target) != REG)
1353 target = copy_to_reg (target);
1354 return gen_rtx (SUBREG, tmode, target, 0);
1357 return convert_to_mode (tmode, target, unsignedp);
1362 /* Extract a bit field using shifts and boolean operations
1363 Returns an rtx to represent the value.
1364 OP0 addresses a register (word) or memory (byte).
1365 BITPOS says which bit within the word or byte the bit field starts in.
1366 OFFSET says how many bytes farther the bit field starts;
1367 it is 0 if OP0 is a register.
1368 BITSIZE says how many bits long the bit field is.
1369 (If OP0 is a register, it may be narrower than a full word,
1370 but BITPOS still counts within a full word,
1371 which is significant on bigendian machines.)
1373 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1374 If TARGET is nonzero, attempts to store the value there
1375 and return TARGET, but this is not guaranteed.
1376 If TARGET is not used, create a pseudo-reg of mode TMODE for the value.
1378 ALIGN is the alignment that STR_RTX is known to have, measured in bytes. */
1381 extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1382 target, unsignedp, align)
1383 enum machine_mode tmode;
1384 register rtx op0, target;
1385 register int offset, bitsize, bitpos;
1389 int total_bits = BITS_PER_WORD;
1390 enum machine_mode mode;
1392 if (GET_CODE (op0) == SUBREG || GET_CODE (op0) == REG)
1394 /* Special treatment for a bit field split across two registers. */
1395 if (bitsize + bitpos > BITS_PER_WORD)
1396 return extract_split_bit_field (op0, bitsize, bitpos,
1401 /* Get the proper mode to use for this field. We want a mode that
1402 includes the entire field. If such a mode would be larger than
1403 a word, we won't be doing the extraction the normal way. */
1405 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1406 align * BITS_PER_UNIT, word_mode,
1407 GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
1409 if (mode == VOIDmode)
1410 /* The only way this should occur is if the field spans word
1412 return extract_split_bit_field (op0, bitsize,
1413 bitpos + offset * BITS_PER_UNIT,
1416 total_bits = GET_MODE_BITSIZE (mode);
1418 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1419 be be in the range 0 to total_bits-1, and put any excess bytes in
1421 if (bitpos >= total_bits)
1423 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1424 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1428 /* Get ref to an aligned byte, halfword, or word containing the field.
1429 Adjust BITPOS to be position within a word,
1430 and OFFSET to be the offset of that word.
1431 Then alter OP0 to refer to that word. */
1432 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1433 offset -= (offset % (total_bits / BITS_PER_UNIT));
1434 op0 = change_address (op0, mode,
1435 plus_constant (XEXP (op0, 0), offset));
1438 mode = GET_MODE (op0);
1440 if (BYTES_BIG_ENDIAN)
1442 /* BITPOS is the distance between our msb and that of OP0.
1443 Convert it to the distance from the lsb. */
1445 bitpos = total_bits - bitsize - bitpos;
1448 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1449 We have reduced the big-endian case to the little-endian case. */
1455 /* If the field does not already start at the lsb,
1456 shift it so it does. */
1457 tree amount = build_int_2 (bitpos, 0);
1458 /* Maybe propagate the target for the shift. */
1459 /* But not if we will return it--could confuse integrate.c. */
1460 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1461 && !REG_FUNCTION_VALUE_P (target)
1463 if (tmode != mode) subtarget = 0;
1464 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1466 /* Convert the value to the desired mode. */
1468 op0 = convert_to_mode (tmode, op0, 1);
1470 /* Unless the msb of the field used to be the msb when we shifted,
1471 mask out the upper bits. */
1473 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize
1475 #ifdef SLOW_ZERO_EXTEND
1476 /* Always generate an `and' if
1477 we just zero-extended op0 and SLOW_ZERO_EXTEND, since it
1478 will combine fruitfully with the zero-extend. */
1483 return expand_binop (GET_MODE (op0), and_optab, op0,
1484 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1485 target, 1, OPTAB_LIB_WIDEN);
1489 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1490 then arithmetic-shift its lsb to the lsb of the word. */
1491 op0 = force_reg (mode, op0);
1495 /* Find the narrowest integer mode that contains the field. */
1497 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1498 mode = GET_MODE_WIDER_MODE (mode))
1499 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1501 op0 = convert_to_mode (mode, op0, 0);
1505 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1507 tree amount = build_int_2 (GET_MODE_BITSIZE (mode) - (bitsize + bitpos), 0);
1508 /* Maybe propagate the target for the shift. */
1509 /* But not if we will return the result--could confuse integrate.c. */
1510 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1511 && ! REG_FUNCTION_VALUE_P (target)
1513 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1516 return expand_shift (RSHIFT_EXPR, mode, op0,
1517 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1521 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1522 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1523 complement of that if COMPLEMENT. The mask is truncated if
1524 necessary to the width of mode MODE. The mask is zero-extended if
1525 BITSIZE+BITPOS is too small for MODE. */
1528 mask_rtx (mode, bitpos, bitsize, complement)
1529 enum machine_mode mode;
1530 int bitpos, bitsize, complement;
1532 HOST_WIDE_INT masklow, maskhigh;
1534 if (bitpos < HOST_BITS_PER_WIDE_INT)
1535 masklow = (HOST_WIDE_INT) -1 << bitpos;
1539 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1540 masklow &= ((unsigned HOST_WIDE_INT) -1
1541 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1543 if (bitpos <= HOST_BITS_PER_WIDE_INT)
1546 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1548 if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1549 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1550 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1556 maskhigh = ~maskhigh;
1560 return immed_double_const (masklow, maskhigh, mode);
1563 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1564 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1567 lshift_value (mode, value, bitpos, bitsize)
1568 enum machine_mode mode;
1570 int bitpos, bitsize;
1572 unsigned HOST_WIDE_INT v = INTVAL (value);
1573 HOST_WIDE_INT low, high;
1575 if (bitsize < HOST_BITS_PER_WIDE_INT)
1576 v &= ~((HOST_WIDE_INT) -1 << bitsize);
1578 if (bitpos < HOST_BITS_PER_WIDE_INT)
1581 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1586 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1589 return immed_double_const (low, high, mode);
1592 /* Extract a bit field that is split across two words
1593 and return an RTX for the result.
1595 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1596 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1597 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend.
1599 ALIGN is the known alignment of OP0, measured in bytes.
1600 This is also the size of the memory objects to be used. */
1603 extract_split_bit_field (op0, bitsize, bitpos, unsignedp, align)
1605 int bitsize, bitpos, unsignedp, align;
1612 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1614 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1615 unit = BITS_PER_WORD;
1617 unit = MIN (align * BITS_PER_UNIT, BITS_PER_WORD);
1619 while (bitsdone < bitsize)
1626 offset = (bitpos + bitsdone) / unit;
1627 thispos = (bitpos + bitsdone) % unit;
1629 /* THISSIZE must not overrun a word boundary. Otherwise,
1630 extract_fixed_bit_field will call us again, and we will mutually
1632 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1633 thissize = MIN (thissize, unit - thispos);
1635 /* If OP0 is a register, then handle OFFSET here.
1637 When handling multiword bitfields, extract_bit_field may pass
1638 down a word_mode SUBREG of a larger REG for a bitfield that actually
1639 crosses a word boundary. Thus, for a SUBREG, we must find
1640 the current word starting from the base register. */
1641 if (GET_CODE (op0) == SUBREG)
1643 word = operand_subword_force (SUBREG_REG (op0),
1644 SUBREG_WORD (op0) + offset,
1645 GET_MODE (SUBREG_REG (op0)));
1648 else if (GET_CODE (op0) == REG)
1650 word = operand_subword_force (op0, offset, GET_MODE (op0));
1656 /* Extract the parts in bit-counting order,
1657 whose meaning is determined by BYTES_PER_UNIT.
1658 OFFSET is in UNITs, and UNIT is in bits.
1659 extract_fixed_bit_field wants offset in bytes. */
1660 part = extract_fixed_bit_field (word_mode, word,
1661 offset * unit / BITS_PER_UNIT,
1662 thissize, thispos, 0, 1, align);
1663 bitsdone += thissize;
1665 /* Shift this part into place for the result. */
1666 if (BYTES_BIG_ENDIAN)
1668 if (bitsize != bitsdone)
1669 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1670 build_int_2 (bitsize - bitsdone, 0), 0, 1);
1674 if (bitsdone != thissize)
1675 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1676 build_int_2 (bitsdone - thissize, 0), 0, 1);
1682 /* Combine the parts with bitwise or. This works
1683 because we extracted each part as an unsigned bit field. */
1684 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
1690 /* Unsigned bit field: we are done. */
1693 /* Signed bit field: sign-extend with two arithmetic shifts. */
1694 result = expand_shift (LSHIFT_EXPR, word_mode, result,
1695 build_int_2 (BITS_PER_WORD - bitsize, 0),
1697 return expand_shift (RSHIFT_EXPR, word_mode, result,
1698 build_int_2 (BITS_PER_WORD - bitsize, 0), NULL_RTX, 0);
1701 /* Add INC into TARGET. */
1704 expand_inc (target, inc)
1707 rtx value = expand_binop (GET_MODE (target), add_optab,
1709 target, 0, OPTAB_LIB_WIDEN);
1710 if (value != target)
1711 emit_move_insn (target, value);
1714 /* Subtract DEC from TARGET. */
1717 expand_dec (target, dec)
1720 rtx value = expand_binop (GET_MODE (target), sub_optab,
1722 target, 0, OPTAB_LIB_WIDEN);
1723 if (value != target)
1724 emit_move_insn (target, value);
1727 /* Output a shift instruction for expression code CODE,
1728 with SHIFTED being the rtx for the value to shift,
1729 and AMOUNT the tree for the amount to shift by.
1730 Store the result in the rtx TARGET, if that is convenient.
1731 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
1732 Return the rtx for where the value is. */
1735 expand_shift (code, mode, shifted, amount, target, unsignedp)
1736 enum tree_code code;
1737 register enum machine_mode mode;
1740 register rtx target;
1743 register rtx op1, temp = 0;
1744 register int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
1745 register int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
1748 /* Previously detected shift-counts computed by NEGATE_EXPR
1749 and shifted in the other direction; but that does not work
1752 op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0);
1754 #ifdef SHIFT_COUNT_TRUNCATED
1755 if (SHIFT_COUNT_TRUNCATED)
1757 if (GET_CODE (op1) == CONST_INT
1758 && (unsigned HOST_WIDE_INT) INTVAL (op1) >= GET_MODE_BITSIZE (mode))
1759 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
1760 % GET_MODE_BITSIZE (mode));
1761 else if (GET_CODE (op1) == SUBREG
1762 && SUBREG_WORD (op1) == 0)
1763 op1 = SUBREG_REG (op1);
1767 if (op1 == const0_rtx)
1770 for (try = 0; temp == 0 && try < 3; try++)
1772 enum optab_methods methods;
1775 methods = OPTAB_DIRECT;
1777 methods = OPTAB_WIDEN;
1779 methods = OPTAB_LIB_WIDEN;
1783 /* Widening does not work for rotation. */
1784 if (methods == OPTAB_WIDEN)
1786 else if (methods == OPTAB_LIB_WIDEN)
1788 /* If we have been unable to open-code this by a rotation,
1789 do it as the IOR of two shifts. I.e., to rotate A
1790 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
1791 where C is the bitsize of A.
1793 It is theoretically possible that the target machine might
1794 not be able to perform either shift and hence we would
1795 be making two libcalls rather than just the one for the
1796 shift (similarly if IOR could not be done). We will allow
1797 this extremely unlikely lossage to avoid complicating the
1800 rtx subtarget = target == shifted ? 0 : target;
1802 tree type = TREE_TYPE (amount);
1803 tree new_amount = make_tree (type, op1);
1805 = fold (build (MINUS_EXPR, type,
1807 build_int_2 (GET_MODE_BITSIZE (mode),
1811 shifted = force_reg (mode, shifted);
1813 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
1814 mode, shifted, new_amount, subtarget, 1);
1815 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
1816 mode, shifted, other_amount, 0, 1);
1817 return expand_binop (mode, ior_optab, temp, temp1, target,
1818 unsignedp, methods);
1821 temp = expand_binop (mode,
1822 left ? rotl_optab : rotr_optab,
1823 shifted, op1, target, unsignedp, methods);
1825 /* If we don't have the rotate, but we are rotating by a constant
1826 that is in range, try a rotate in the opposite direction. */
1828 if (temp == 0 && GET_CODE (op1) == CONST_INT
1829 && INTVAL (op1) > 0 && INTVAL (op1) < GET_MODE_BITSIZE (mode))
1830 temp = expand_binop (mode,
1831 left ? rotr_optab : rotl_optab,
1833 GEN_INT (GET_MODE_BITSIZE (mode)
1835 target, unsignedp, methods);
1838 temp = expand_binop (mode,
1839 left ? ashl_optab : lshr_optab,
1840 shifted, op1, target, unsignedp, methods);
1842 /* Do arithmetic shifts.
1843 Also, if we are going to widen the operand, we can just as well
1844 use an arithmetic right-shift instead of a logical one. */
1845 if (temp == 0 && ! rotate
1846 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
1848 enum optab_methods methods1 = methods;
1850 /* If trying to widen a log shift to an arithmetic shift,
1851 don't accept an arithmetic shift of the same size. */
1853 methods1 = OPTAB_MUST_WIDEN;
1855 /* Arithmetic shift */
1857 temp = expand_binop (mode,
1858 left ? ashl_optab : ashr_optab,
1859 shifted, op1, target, unsignedp, methods1);
1862 /* We used to try extzv here for logical right shifts, but that was
1863 only useful for one machine, the VAX, and caused poor code
1864 generation there for lshrdi3, so the code was deleted and a
1865 define_expand for lshrsi3 was added to vax.md. */
1873 enum alg_code { alg_zero, alg_m, alg_shift,
1874 alg_add_t_m2, alg_sub_t_m2,
1875 alg_add_factor, alg_sub_factor,
1876 alg_add_t2_m, alg_sub_t2_m,
1877 alg_add, alg_subtract, alg_factor, alg_shiftop };
1879 /* This structure records a sequence of operations.
1880 `ops' is the number of operations recorded.
1881 `cost' is their total cost.
1882 The operations are stored in `op' and the corresponding
1883 logarithms of the integer coefficients in `log'.
1885 These are the operations:
1886 alg_zero total := 0;
1887 alg_m total := multiplicand;
1888 alg_shift total := total * coeff
1889 alg_add_t_m2 total := total + multiplicand * coeff;
1890 alg_sub_t_m2 total := total - multiplicand * coeff;
1891 alg_add_factor total := total * coeff + total;
1892 alg_sub_factor total := total * coeff - total;
1893 alg_add_t2_m total := total * coeff + multiplicand;
1894 alg_sub_t2_m total := total * coeff - multiplicand;
1896 The first operand must be either alg_zero or alg_m. */
1902 /* The size of the OP and LOG fields are not directly related to the
1903 word size, but the worst-case algorithms will be if we have few
1904 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
1905 In that case we will generate shift-by-2, add, shift-by-2, add,...,
1906 in total wordsize operations. */
1907 enum alg_code op[MAX_BITS_PER_WORD];
1908 char log[MAX_BITS_PER_WORD];
1911 /* Compute and return the best algorithm for multiplying by T.
1912 The algorithm must cost less than cost_limit
1913 If retval.cost >= COST_LIMIT, no algorithm was found and all
1914 other field of the returned struct are undefined. */
1917 synth_mult (alg_out, t, cost_limit)
1918 struct algorithm *alg_out;
1919 unsigned HOST_WIDE_INT t;
1923 struct algorithm *alg_in, *best_alg;
1925 unsigned HOST_WIDE_INT q;
1927 /* Indicate that no algorithm is yet found. If no algorithm
1928 is found, this value will be returned and indicate failure. */
1929 alg_out->cost = cost_limit;
1931 if (cost_limit <= 0)
1934 /* t == 1 can be done in zero cost. */
1939 alg_out->op[0] = alg_m;
1943 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
1947 if (zero_cost >= cost_limit)
1952 alg_out->cost = zero_cost;
1953 alg_out->op[0] = alg_zero;
1958 /* We'll be needing a couple extra algorithm structures now. */
1960 alg_in = (struct algorithm *)alloca (sizeof (struct algorithm));
1961 best_alg = (struct algorithm *)alloca (sizeof (struct algorithm));
1963 /* If we have a group of zero bits at the low-order part of T, try
1964 multiplying by the remaining bits and then doing a shift. */
1968 m = floor_log2 (t & -t); /* m = number of low zero bits */
1970 cost = shift_cost[m];
1971 synth_mult (alg_in, q, cost_limit - cost);
1973 cost += alg_in->cost;
1974 if (cost < cost_limit)
1976 struct algorithm *x;
1977 x = alg_in, alg_in = best_alg, best_alg = x;
1978 best_alg->log[best_alg->ops] = m;
1979 best_alg->op[best_alg->ops] = alg_shift;
1984 /* If we have an odd number, add or subtract one. */
1987 unsigned HOST_WIDE_INT w;
1989 for (w = 1; (w & t) != 0; w <<= 1)
1992 /* Reject the case where t is 3.
1993 Thus we prefer addition in that case. */
1996 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
1999 synth_mult (alg_in, t + 1, cost_limit - cost);
2001 cost += alg_in->cost;
2002 if (cost < cost_limit)
2004 struct algorithm *x;
2005 x = alg_in, alg_in = best_alg, best_alg = x;
2006 best_alg->log[best_alg->ops] = 0;
2007 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2013 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2016 synth_mult (alg_in, t - 1, cost_limit - cost);
2018 cost += alg_in->cost;
2019 if (cost < cost_limit)
2021 struct algorithm *x;
2022 x = alg_in, alg_in = best_alg, best_alg = x;
2023 best_alg->log[best_alg->ops] = 0;
2024 best_alg->op[best_alg->ops] = alg_add_t_m2;
2030 /* Look for factors of t of the form
2031 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2032 If we find such a factor, we can multiply by t using an algorithm that
2033 multiplies by q, shift the result by m and add/subtract it to itself.
2035 We search for large factors first and loop down, even if large factors
2036 are less probable than small; if we find a large factor we will find a
2037 good sequence quickly, and therefore be able to prune (by decreasing
2038 COST_LIMIT) the search. */
2040 for (m = floor_log2 (t - 1); m >= 2; m--)
2042 unsigned HOST_WIDE_INT d;
2044 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2045 if (t % d == 0 && t > d)
2047 cost = MIN (shiftadd_cost[m], add_cost + shift_cost[m]);
2048 synth_mult (alg_in, t / d, cost_limit - cost);
2050 cost += alg_in->cost;
2051 if (cost < cost_limit)
2053 struct algorithm *x;
2054 x = alg_in, alg_in = best_alg, best_alg = x;
2055 best_alg->log[best_alg->ops] = m;
2056 best_alg->op[best_alg->ops] = alg_add_factor;
2059 /* Other factors will have been taken care of in the recursion. */
2063 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2064 if (t % d == 0 && t > d)
2066 cost = MIN (shiftsub_cost[m], add_cost + shift_cost[m]);
2067 synth_mult (alg_in, t / d, cost_limit - cost);
2069 cost += alg_in->cost;
2070 if (cost < cost_limit)
2072 struct algorithm *x;
2073 x = alg_in, alg_in = best_alg, best_alg = x;
2074 best_alg->log[best_alg->ops] = m;
2075 best_alg->op[best_alg->ops] = alg_sub_factor;
2082 /* Try shift-and-add (load effective address) instructions,
2083 i.e. do a*3, a*5, a*9. */
2091 cost = shiftadd_cost[m];
2092 synth_mult (alg_in, (t - 1) >> m, cost_limit - cost);
2094 cost += alg_in->cost;
2095 if (cost < cost_limit)
2097 struct algorithm *x;
2098 x = alg_in, alg_in = best_alg, best_alg = x;
2099 best_alg->log[best_alg->ops] = m;
2100 best_alg->op[best_alg->ops] = alg_add_t2_m;
2110 cost = shiftsub_cost[m];
2111 synth_mult (alg_in, (t + 1) >> m, cost_limit - cost);
2113 cost += alg_in->cost;
2114 if (cost < cost_limit)
2116 struct algorithm *x;
2117 x = alg_in, alg_in = best_alg, best_alg = x;
2118 best_alg->log[best_alg->ops] = m;
2119 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2125 /* If cost_limit has not decreased since we stored it in alg_out->cost,
2126 we have not found any algorithm. */
2127 if (cost_limit == alg_out->cost)
2130 /* If we are getting a too long sequence for `struct algorithm'
2131 to record, make this search fail. */
2132 if (best_alg->ops == MAX_BITS_PER_WORD)
2135 /* Copy the algorithm from temporary space to the space at alg_out.
2136 We avoid using structure assignment because the majority of
2137 best_alg is normally undefined, and this is a critical function. */
2138 alg_out->ops = best_alg->ops + 1;
2139 alg_out->cost = cost_limit;
2140 bcopy ((char *) best_alg->op, (char *) alg_out->op,
2141 alg_out->ops * sizeof *alg_out->op);
2142 bcopy ((char *) best_alg->log, (char *) alg_out->log,
2143 alg_out->ops * sizeof *alg_out->log);
2146 /* Perform a multiplication and return an rtx for the result.
2147 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
2148 TARGET is a suggestion for where to store the result (an rtx).
2150 We check specially for a constant integer as OP1.
2151 If you want this check for OP0 as well, then before calling
2152 you should swap the two operands if OP0 would be constant. */
2155 expand_mult (mode, op0, op1, target, unsignedp)
2156 enum machine_mode mode;
2157 register rtx op0, op1, target;
2160 rtx const_op1 = op1;
2162 /* synth_mult does an `unsigned int' multiply. As long as the mode is
2163 less than or equal in size to `unsigned int' this doesn't matter.
2164 If the mode is larger than `unsigned int', then synth_mult works only
2165 if the constant value exactly fits in an `unsigned int' without any
2166 truncation. This means that multiplying by negative values does
2167 not work; results are off by 2^32 on a 32 bit machine. */
2169 /* If we are multiplying in DImode, it may still be a win
2170 to try to work with shifts and adds. */
2171 if (GET_CODE (op1) == CONST_DOUBLE
2172 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_INT
2173 && HOST_BITS_PER_INT >= BITS_PER_WORD
2174 && CONST_DOUBLE_HIGH (op1) == 0)
2175 const_op1 = GEN_INT (CONST_DOUBLE_LOW (op1));
2176 else if (HOST_BITS_PER_INT < GET_MODE_BITSIZE (mode)
2177 && GET_CODE (op1) == CONST_INT
2178 && INTVAL (op1) < 0)
2181 /* We used to test optimize here, on the grounds that it's better to
2182 produce a smaller program when -O is not used.
2183 But this causes such a terrible slowdown sometimes
2184 that it seems better to use synth_mult always. */
2186 if (const_op1 && GET_CODE (const_op1) == CONST_INT)
2188 struct algorithm alg;
2189 struct algorithm alg2;
2190 HOST_WIDE_INT val = INTVAL (op1);
2191 HOST_WIDE_INT val_so_far;
2194 enum {basic_variant, negate_variant, add_variant} variant = basic_variant;
2196 /* Try to do the computation three ways: multiply by the negative of OP1
2197 and then negate, do the multiplication directly, or do multiplication
2200 mult_cost = rtx_cost (gen_rtx (MULT, mode, op0, op1), SET);
2201 mult_cost = MIN (12 * add_cost, mult_cost);
2203 synth_mult (&alg, val, mult_cost);
2205 /* This works only if the inverted value actually fits in an
2207 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2209 synth_mult (&alg2, - val,
2210 (alg.cost < mult_cost ? alg.cost : mult_cost) - negate_cost);
2211 if (alg2.cost + negate_cost < alg.cost)
2212 alg = alg2, variant = negate_variant;
2215 /* This proves very useful for division-by-constant. */
2216 synth_mult (&alg2, val - 1,
2217 (alg.cost < mult_cost ? alg.cost : mult_cost) - add_cost);
2218 if (alg2.cost + add_cost < alg.cost)
2219 alg = alg2, variant = add_variant;
2221 if (alg.cost < mult_cost)
2223 /* We found something cheaper than a multiply insn. */
2227 op0 = protect_from_queue (op0, 0);
2229 /* Avoid referencing memory over and over.
2230 For speed, but also for correctness when mem is volatile. */
2231 if (GET_CODE (op0) == MEM)
2232 op0 = force_reg (mode, op0);
2234 /* ACCUM starts out either as OP0 or as a zero, depending on
2235 the first operation. */
2237 if (alg.op[0] == alg_zero)
2239 accum = copy_to_mode_reg (mode, const0_rtx);
2242 else if (alg.op[0] == alg_m)
2244 accum = copy_to_mode_reg (mode, op0);
2250 for (opno = 1; opno < alg.ops; opno++)
2252 int log = alg.log[opno];
2253 int preserve = preserve_subexpressions_p ();
2254 rtx shift_subtarget = preserve ? 0 : accum;
2256 = (opno == alg.ops - 1 && target != 0 && variant != add_variant
2258 rtx accum_target = preserve ? 0 : accum;
2260 switch (alg.op[opno])
2263 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2264 build_int_2 (log, 0), NULL_RTX, 0);
2269 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2270 build_int_2 (log, 0), NULL_RTX, 0);
2271 accum = force_operand (gen_rtx (PLUS, mode, accum, tem),
2272 add_target ? add_target : accum_target);
2273 val_so_far += (HOST_WIDE_INT) 1 << log;
2277 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2278 build_int_2 (log, 0), NULL_RTX, 0);
2279 accum = force_operand (gen_rtx (MINUS, mode, accum, tem),
2280 add_target ? add_target : accum_target);
2281 val_so_far -= (HOST_WIDE_INT) 1 << log;
2285 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2286 build_int_2 (log, 0), shift_subtarget,
2288 accum = force_operand (gen_rtx (PLUS, mode, accum, op0),
2289 add_target ? add_target : accum_target);
2290 val_so_far = (val_so_far << log) + 1;
2294 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2295 build_int_2 (log, 0), shift_subtarget,
2297 accum = force_operand (gen_rtx (MINUS, mode, accum, op0),
2298 add_target ? add_target : accum_target);
2299 val_so_far = (val_so_far << log) - 1;
2302 case alg_add_factor:
2303 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2304 build_int_2 (log, 0), NULL_RTX, 0);
2305 accum = force_operand (gen_rtx (PLUS, mode, accum, tem),
2306 add_target ? add_target : accum_target);
2307 val_so_far += val_so_far << log;
2310 case alg_sub_factor:
2311 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2312 build_int_2 (log, 0), NULL_RTX, 0);
2313 accum = force_operand (gen_rtx (MINUS, mode, tem, accum),
2314 (add_target ? add_target
2315 : preserve ? 0 : tem));
2316 val_so_far = (val_so_far << log) - val_so_far;
2323 /* Write a REG_EQUAL note on the last insn so that we can cse
2324 multiplication sequences. */
2326 insn = get_last_insn ();
2328 = gen_rtx (EXPR_LIST, REG_EQUAL,
2329 gen_rtx (MULT, mode, op0, GEN_INT (val_so_far)),
2333 if (variant == negate_variant)
2335 val_so_far = - val_so_far;
2336 accum = expand_unop (mode, neg_optab, accum, target, 0);
2338 else if (variant == add_variant)
2340 val_so_far = val_so_far + 1;
2341 accum = force_operand (gen_rtx (PLUS, mode, accum, op0), target);
2344 if (val != val_so_far)
2351 /* This used to use umul_optab if unsigned, but for non-widening multiply
2352 there is no difference between signed and unsigned. */
2353 op0 = expand_binop (mode, smul_optab,
2354 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
2360 /* Return the smallest n such that 2**n >= X. */
2364 unsigned HOST_WIDE_INT x;
2366 return floor_log2 (x - 1) + 1;
2369 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
2370 replace division by D, and put the least significant N bits of the result
2371 in *MULTIPLIER_PTR and return the most significant bit.
2373 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
2374 needed precision is in PRECISION (should be <= N).
2376 PRECISION should be as small as possible so this function can choose
2377 multiplier more freely.
2379 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
2380 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
2382 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
2383 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
2386 unsigned HOST_WIDE_INT
2387 choose_multiplier (d, n, precision, multiplier_ptr, post_shift_ptr, lgup_ptr)
2388 unsigned HOST_WIDE_INT d;
2391 unsigned HOST_WIDE_INT *multiplier_ptr;
2392 int *post_shift_ptr;
2395 unsigned HOST_WIDE_INT mhigh_hi, mhigh_lo;
2396 unsigned HOST_WIDE_INT mlow_hi, mlow_lo;
2397 int lgup, post_shift;
2399 unsigned HOST_WIDE_INT nh, nl, dummy1, dummy2;
2401 /* lgup = ceil(log2(divisor)); */
2402 lgup = ceil_log2 (d);
2408 pow2 = n + lgup - precision;
2410 if (pow == 2 * HOST_BITS_PER_WIDE_INT)
2412 /* We could handle this with some effort, but this case is much better
2413 handled directly with a scc insn, so rely on caller using that. */
2417 /* mlow = 2^(N + lgup)/d */
2418 if (pow >= HOST_BITS_PER_WIDE_INT)
2420 nh = (unsigned HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
2426 nl = (unsigned HOST_WIDE_INT) 1 << pow;
2428 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2429 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
2431 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
2432 if (pow2 >= HOST_BITS_PER_WIDE_INT)
2433 nh |= (unsigned HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
2435 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
2436 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2437 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
2439 if (mhigh_hi && nh - d >= d)
2441 if (mhigh_hi > 1 || mlow_hi > 1)
2443 /* assert that mlow < mhigh. */
2444 if (! (mlow_hi < mhigh_hi || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo)))
2447 /* If precision == N, then mlow, mhigh exceed 2^N
2448 (but they do not exceed 2^(N+1)). */
2450 /* Reduce to lowest terms */
2451 for (post_shift = lgup; post_shift > 0; post_shift--)
2453 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
2454 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
2464 *post_shift_ptr = post_shift;
2466 if (n < HOST_BITS_PER_WIDE_INT)
2468 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
2469 *multiplier_ptr = mhigh_lo & mask;
2470 return mhigh_lo >= mask;
2474 *multiplier_ptr = mhigh_lo;
2479 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
2480 congruent to 1 (mod 2**N). */
2482 static unsigned HOST_WIDE_INT
2484 unsigned HOST_WIDE_INT x;
2487 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
2489 /* The algorithm notes that the choice y = x satisfies
2490 x*y == 1 mod 2^3, since x is assumed odd.
2491 Each iteration doubles the number of bits of significance in y. */
2493 unsigned HOST_WIDE_INT mask;
2494 unsigned HOST_WIDE_INT y = x;
2497 mask = (n == HOST_BITS_PER_WIDE_INT
2498 ? ~(unsigned HOST_WIDE_INT) 0
2499 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
2503 y = y * (2 - x*y) & mask; /* Modulo 2^N */
2509 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
2510 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
2511 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
2512 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
2515 The result is put in TARGET if that is convenient.
2517 MODE is the mode of operation. */
2520 expand_mult_highpart_adjust (mode, adj_operand, op0, op1, target, unsignedp)
2521 enum machine_mode mode;
2522 register rtx adj_operand, op0, op1, target;
2526 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
2528 tem = expand_shift (RSHIFT_EXPR, mode, op0,
2529 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2531 tem = expand_and (tem, op1, NULL_RTX);
2532 adj_operand = force_operand (gen_rtx (adj_code, mode, adj_operand, tem),
2535 tem = expand_shift (RSHIFT_EXPR, mode, op1,
2536 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2538 tem = expand_and (tem, op0, NULL_RTX);
2539 target = force_operand (gen_rtx (adj_code, mode, adj_operand, tem), target);
2544 /* Emit code to multiply OP0 and CNST1, putting the high half of the result
2545 in TARGET if that is convenient, and return where the result is. If the
2546 operation can not be performed, 0 is returned.
2548 MODE is the mode of operation and result.
2550 UNSIGNEDP nonzero means unsigned multiply.
2552 MAX_COST is the total allowed cost for the expanded RTL. */
2555 expand_mult_highpart (mode, op0, cnst1, target, unsignedp, max_cost)
2556 enum machine_mode mode;
2557 register rtx op0, target;
2558 unsigned HOST_WIDE_INT cnst1;
2562 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
2563 optab mul_highpart_optab;
2566 int size = GET_MODE_BITSIZE (mode);
2569 /* We can't support modes wider than HOST_BITS_PER_INT. */
2570 if (size > HOST_BITS_PER_WIDE_INT)
2573 op1 = GEN_INT (cnst1);
2575 if (GET_MODE_BITSIZE (wider_mode) <= HOST_BITS_PER_INT)
2579 = immed_double_const (cnst1,
2582 : -(cnst1 >> (HOST_BITS_PER_WIDE_INT - 1))),
2585 /* expand_mult handles constant multiplication of word_mode
2586 or narrower. It does a poor job for large modes. */
2587 if (size < BITS_PER_WORD
2588 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2590 /* We have to do this, since expand_binop doesn't do conversion for
2591 multiply. Maybe change expand_binop to handle widening multiply? */
2592 op0 = convert_to_mode (wider_mode, op0, unsignedp);
2594 tem = expand_mult (wider_mode, op0, wide_op1, NULL_RTX, unsignedp);
2595 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2596 build_int_2 (size, 0), NULL_RTX, 1);
2597 return convert_modes (mode, wider_mode, tem, unsignedp);
2601 target = gen_reg_rtx (mode);
2603 /* Firstly, try using a multiplication insn that only generates the needed
2604 high part of the product, and in the sign flavor of unsignedp. */
2605 if (mul_highpart_cost[(int) mode] < max_cost)
2607 mul_highpart_optab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
2608 target = expand_binop (mode, mul_highpart_optab,
2609 op0, wide_op1, target, unsignedp, OPTAB_DIRECT);
2614 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
2615 Need to adjust the result after the multiplication. */
2616 if (mul_highpart_cost[(int) mode] + 2 * shift_cost[size-1] + 4 * add_cost < max_cost)
2618 mul_highpart_optab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
2619 target = expand_binop (mode, mul_highpart_optab,
2620 op0, wide_op1, target, unsignedp, OPTAB_DIRECT);
2622 /* We used the wrong signedness. Adjust the result. */
2623 return expand_mult_highpart_adjust (mode, target, op0,
2624 op1, target, unsignedp);
2627 /* Try widening multiplication. */
2628 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
2629 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2630 && mul_widen_cost[(int) wider_mode] < max_cost)
2632 op1 = force_reg (mode, op1);
2636 /* Try widening the mode and perform a non-widening multiplication. */
2637 moptab = smul_optab;
2638 if (smul_optab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2639 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2645 /* Try widening multiplication of opposite signedness, and adjust. */
2646 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
2647 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2648 && (mul_widen_cost[(int) wider_mode]
2649 + 2 * shift_cost[size-1] + 4 * add_cost < max_cost))
2651 rtx regop1 = force_reg (mode, op1);
2652 tem = expand_binop (wider_mode, moptab, op0, regop1,
2653 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
2656 /* Extract the high half of the just generated product. */
2657 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2658 build_int_2 (size, 0), NULL_RTX, 1);
2659 tem = convert_modes (mode, wider_mode, tem, unsignedp);
2660 /* We used the wrong signedness. Adjust the result. */
2661 return expand_mult_highpart_adjust (mode, tem, op0, op1,
2669 /* Pass NULL_RTX as target since TARGET has wrong mode. */
2670 tem = expand_binop (wider_mode, moptab, op0, op1,
2671 NULL_RTX, unsignedp, OPTAB_WIDEN);
2675 /* Extract the high half of the just generated product. */
2676 if (mode == word_mode)
2678 return gen_highpart (mode, tem);
2682 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2683 build_int_2 (size, 0), NULL_RTX, 1);
2684 return convert_modes (mode, wider_mode, tem, unsignedp);
2688 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
2689 if that is convenient, and returning where the result is.
2690 You may request either the quotient or the remainder as the result;
2691 specify REM_FLAG nonzero to get the remainder.
2693 CODE is the expression code for which kind of division this is;
2694 it controls how rounding is done. MODE is the machine mode to use.
2695 UNSIGNEDP nonzero means do unsigned division. */
2697 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
2698 and then correct it by or'ing in missing high bits
2699 if result of ANDI is nonzero.
2700 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
2701 This could optimize to a bfexts instruction.
2702 But C doesn't use these operations, so their optimizations are
2705 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
2708 expand_divmod (rem_flag, code, mode, op0, op1, target, unsignedp)
2710 enum tree_code code;
2711 enum machine_mode mode;
2712 register rtx op0, op1, target;
2715 enum machine_mode compute_mode;
2716 register rtx tquotient;
2717 rtx quotient = 0, remainder = 0;
2721 optab optab1, optab2;
2722 int op1_is_constant, op1_is_pow2;
2723 int max_cost, extra_cost;
2725 op1_is_constant = GET_CODE (op1) == CONST_INT;
2726 op1_is_pow2 = (op1_is_constant
2727 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
2728 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1))))));
2731 This is the structure of expand_divmod:
2733 First comes code to fix up the operands so we can perform the operations
2734 correctly and efficiently.
2736 Second comes a switch statement with code specific for each rounding mode.
2737 For some special operands this code emits all RTL for the desired
2738 operation, for other cases, it generates only a quotient and stores it in
2739 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
2740 to indicate that it has not done anything.
2742 Last comes code that finishes the operation. If QUOTIENT is set and
2743 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
2744 QUOTIENT is not set, it is computed using trunc rounding.
2746 We try to generate special code for division and remainder when OP1 is a
2747 constant. If |OP1| = 2**n we can use shifts and some other fast
2748 operations. For other values of OP1, we compute a carefully selected
2749 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
2752 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
2753 half of the product. Different strategies for generating the product are
2754 implemented in expand_mult_highpart.
2756 If what we actually want is the remainder, we generate that by another
2757 by-constant multiplication and a subtraction. */
2759 /* We shouldn't be called with OP1 == const1_rtx, but some of the
2760 code below will malfunction if we are, so check here and handle
2761 the special case if so. */
2762 if (op1 == const1_rtx)
2763 return rem_flag ? const0_rtx : op0;
2766 /* Don't use the function value register as a target
2767 since we have to read it as well as write it,
2768 and function-inlining gets confused by this. */
2769 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
2770 /* Don't clobber an operand while doing a multi-step calculation. */
2771 || ((rem_flag || op1_is_constant)
2772 && (reg_mentioned_p (target, op0)
2773 || (GET_CODE (op0) == MEM && GET_CODE (target) == MEM)))
2774 || reg_mentioned_p (target, op1)
2775 || (GET_CODE (op1) == MEM && GET_CODE (target) == MEM)))
2778 /* Get the mode in which to perform this computation. Normally it will
2779 be MODE, but sometimes we can't do the desired operation in MODE.
2780 If so, pick a wider mode in which we can do the operation. Convert
2781 to that mode at the start to avoid repeated conversions.
2783 First see what operations we need. These depend on the expression
2784 we are evaluating. (We assume that divxx3 insns exist under the
2785 same conditions that modxx3 insns and that these insns don't normally
2786 fail. If these assumptions are not correct, we may generate less
2787 efficient code in some cases.)
2789 Then see if we find a mode in which we can open-code that operation
2790 (either a division, modulus, or shift). Finally, check for the smallest
2791 mode for which we can do the operation with a library call. */
2793 /* We might want to refine this now that we have division-by-constant
2794 optimization. Since expand_mult_highpart tries so many variants, it is
2795 not straightforward to generalize this. Maybe we should make an array
2796 of possible modes in init_expmed? Save this for GCC 2.7. */
2798 optab1 = (op1_is_pow2 ? (unsignedp ? lshr_optab : ashr_optab)
2799 : (unsignedp ? udiv_optab : sdiv_optab));
2800 optab2 = (op1_is_pow2 ? optab1 : (unsignedp ? udivmod_optab : sdivmod_optab));
2802 for (compute_mode = mode; compute_mode != VOIDmode;
2803 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
2804 if (optab1->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing
2805 || optab2->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing)
2808 if (compute_mode == VOIDmode)
2809 for (compute_mode = mode; compute_mode != VOIDmode;
2810 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
2811 if (optab1->handlers[(int) compute_mode].libfunc
2812 || optab2->handlers[(int) compute_mode].libfunc)
2815 /* If we still couldn't find a mode, use MODE, but we'll probably abort
2817 if (compute_mode == VOIDmode)
2818 compute_mode = mode;
2820 if (target && GET_MODE (target) == compute_mode)
2823 tquotient = gen_reg_rtx (compute_mode);
2825 size = GET_MODE_BITSIZE (compute_mode);
2827 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
2828 (mode), and thereby get better code when OP1 is a constant. Do that
2829 later. It will require going over all usages of SIZE below. */
2830 size = GET_MODE_BITSIZE (mode);
2833 max_cost = div_cost[(int) compute_mode]
2834 - (rem_flag ? mul_cost[(int) compute_mode] + add_cost : 0);
2836 /* Now convert to the best mode to use. */
2837 if (compute_mode != mode)
2839 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
2840 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
2842 /* convert_modes may have placed op1 into a register, so we
2843 must recompute the following. */
2844 op1_is_constant = GET_CODE (op1) == CONST_INT;
2845 op1_is_pow2 = (op1_is_constant
2846 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
2848 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
2851 /* If one of the operands is a volatile MEM, copy it into a register. */
2853 if (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0))
2854 op0 = force_reg (compute_mode, op0);
2855 if (GET_CODE (op1) == MEM && MEM_VOLATILE_P (op1))
2856 op1 = force_reg (compute_mode, op1);
2858 /* If we need the remainder or if OP1 is constant, we need to
2859 put OP0 in a register in case it has any queued subexpressions. */
2860 if (rem_flag || op1_is_constant)
2861 op0 = force_reg (compute_mode, op0);
2863 last = get_last_insn ();
2865 /* Promote floor rounding to trunc rounding for unsigned operations. */
2868 if (code == FLOOR_DIV_EXPR)
2869 code = TRUNC_DIV_EXPR;
2870 if (code == FLOOR_MOD_EXPR)
2871 code = TRUNC_MOD_EXPR;
2872 if (code == EXACT_DIV_EXPR && op1_is_pow2)
2873 code = TRUNC_DIV_EXPR;
2876 if (op1 != const0_rtx)
2879 case TRUNC_MOD_EXPR:
2880 case TRUNC_DIV_EXPR:
2881 if (op1_is_constant)
2885 unsigned HOST_WIDE_INT mh, ml;
2886 int pre_shift, post_shift;
2888 unsigned HOST_WIDE_INT d = INTVAL (op1);
2890 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
2892 pre_shift = floor_log2 (d);
2896 = expand_binop (compute_mode, and_optab, op0,
2897 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
2901 return gen_lowpart (mode, remainder);
2903 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
2904 build_int_2 (pre_shift, 0),
2907 else if (size <= HOST_BITS_PER_WIDE_INT)
2909 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
2911 /* Most significant bit of divisor is set; emit an scc
2913 quotient = emit_store_flag (tquotient, GEU, op0, op1,
2914 compute_mode, 1, 1);
2920 /* Find a suitable multiplier and right shift count
2921 instead of multiplying with D. */
2923 mh = choose_multiplier (d, size, size,
2924 &ml, &post_shift, &dummy);
2926 /* If the suggested multiplier is more than SIZE bits,
2927 we can do better for even divisors, using an
2928 initial right shift. */
2929 if (mh != 0 && (d & 1) == 0)
2931 pre_shift = floor_log2 (d & -d);
2932 mh = choose_multiplier (d >> pre_shift, size,
2934 &ml, &post_shift, &dummy);
2945 extra_cost = (shift_cost[post_shift - 1]
2946 + shift_cost[1] + 2 * add_cost);
2947 t1 = expand_mult_highpart (compute_mode, op0, ml,
2949 max_cost - extra_cost);
2952 t2 = force_operand (gen_rtx (MINUS, compute_mode,
2955 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
2956 build_int_2 (1, 0), NULL_RTX,1);
2957 t4 = force_operand (gen_rtx (PLUS, compute_mode,
2961 = expand_shift (RSHIFT_EXPR, compute_mode, t4,
2962 build_int_2 (post_shift - 1, 0),
2969 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
2970 build_int_2 (pre_shift, 0),
2972 extra_cost = (shift_cost[pre_shift]
2973 + shift_cost[post_shift]);
2974 t2 = expand_mult_highpart (compute_mode, t1, ml,
2976 max_cost - extra_cost);
2980 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
2981 build_int_2 (post_shift, 0),
2986 else /* Too wide mode to use tricky code */
2989 insn = get_last_insn ();
2991 && (set = single_set (insn)) != 0
2992 && SET_DEST (set) == quotient)
2994 = gen_rtx (EXPR_LIST, REG_EQUAL,
2995 gen_rtx (UDIV, compute_mode, op0, op1),
2998 else /* TRUNC_DIV, signed */
3000 unsigned HOST_WIDE_INT ml;
3001 int lgup, post_shift;
3002 HOST_WIDE_INT d = INTVAL (op1);
3003 unsigned HOST_WIDE_INT abs_d = d >= 0 ? d : -d;
3005 /* n rem d = n rem -d */
3006 if (rem_flag && d < 0)
3009 op1 = GEN_INT (abs_d);
3015 quotient = expand_unop (compute_mode, neg_optab, op0,
3017 else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
3019 /* This case is not handled correctly below. */
3020 quotient = emit_store_flag (tquotient, EQ, op0, op1,
3021 compute_mode, 1, 1);
3025 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
3026 && (rem_flag ? smod_pow2_cheap : sdiv_pow2_cheap))
3028 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
3030 lgup = floor_log2 (abs_d);
3031 if (abs_d != 2 && BRANCH_COST < 3)
3033 rtx label = gen_label_rtx ();
3036 t1 = copy_to_mode_reg (compute_mode, op0);
3037 emit_cmp_insn (t1, const0_rtx, GE,
3038 NULL_RTX, compute_mode, 0, 0);
3039 emit_jump_insn (gen_bge (label));
3040 expand_inc (t1, GEN_INT (abs_d - 1));
3042 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3043 build_int_2 (lgup, 0),
3049 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3050 build_int_2 (size - 1, 0),
3052 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3053 build_int_2 (size - lgup, 0),
3055 t3 = force_operand (gen_rtx (PLUS, compute_mode,
3058 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3059 build_int_2 (lgup, 0),
3063 /* We have computed OP0 / abs(OP1). If OP1 is negative, negate
3067 insn = get_last_insn ();
3069 && (set = single_set (insn)) != 0
3070 && SET_DEST (set) == quotient)
3072 = gen_rtx (EXPR_LIST, REG_EQUAL,
3073 gen_rtx (DIV, compute_mode, op0,
3077 quotient = expand_unop (compute_mode, neg_optab,
3078 quotient, quotient, 0);
3081 else if (size <= HOST_BITS_PER_WIDE_INT)
3083 choose_multiplier (abs_d, size, size - 1,
3084 &ml, &post_shift, &lgup);
3085 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
3089 extra_cost = (shift_cost[post_shift]
3090 + shift_cost[size - 1] + add_cost);
3091 t1 = expand_mult_highpart (compute_mode, op0, ml,
3093 max_cost - extra_cost);
3096 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3097 build_int_2 (post_shift, 0), NULL_RTX, 0);
3098 t3 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3099 build_int_2 (size - 1, 0), NULL_RTX, 0);
3101 quotient = force_operand (gen_rtx (MINUS, compute_mode, t3, t2),
3104 quotient = force_operand (gen_rtx (MINUS, compute_mode, t2, t3),
3111 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
3112 extra_cost = (shift_cost[post_shift]
3113 + shift_cost[size - 1] + 2 * add_cost);
3114 t1 = expand_mult_highpart (compute_mode, op0, ml,
3116 max_cost - extra_cost);
3119 t2 = force_operand (gen_rtx (PLUS, compute_mode, t1, op0),
3121 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3122 build_int_2 (post_shift, 0), NULL_RTX, 0);
3123 t4 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3124 build_int_2 (size - 1, 0), NULL_RTX, 0);
3126 quotient = force_operand (gen_rtx (MINUS, compute_mode, t4, t3),
3129 quotient = force_operand (gen_rtx (MINUS, compute_mode, t3, t4),
3133 else /* Too wide mode to use tricky code */
3136 insn = get_last_insn ();
3138 && (set = single_set (insn)) != 0
3139 && SET_DEST (set) == quotient)
3141 = gen_rtx (EXPR_LIST, REG_EQUAL,
3142 gen_rtx (DIV, compute_mode, op0, op1),
3148 delete_insns_since (last);
3151 case FLOOR_DIV_EXPR:
3152 case FLOOR_MOD_EXPR:
3153 /* We will come here only for signed operations. */
3154 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3156 unsigned HOST_WIDE_INT mh, ml;
3157 int pre_shift, lgup, post_shift;
3158 HOST_WIDE_INT d = INTVAL (op1);
3162 /* We could just as easily deal with negative constants here,
3163 but it does not seem worth the trouble for GCC 2.6. */
3164 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3166 pre_shift = floor_log2 (d);
3169 remainder = expand_binop (compute_mode, and_optab, op0,
3170 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3171 remainder, 0, OPTAB_LIB_WIDEN);
3173 return gen_lowpart (mode, remainder);
3175 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3176 build_int_2 (pre_shift, 0),
3183 mh = choose_multiplier (d, size, size - 1,
3184 &ml, &post_shift, &lgup);
3188 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3189 build_int_2 (size - 1, 0), NULL_RTX, 0);
3190 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
3191 NULL_RTX, 0, OPTAB_WIDEN);
3192 extra_cost = (shift_cost[post_shift]
3193 + shift_cost[size - 1] + 2 * add_cost);
3194 t3 = expand_mult_highpart (compute_mode, t2, ml,
3196 max_cost - extra_cost);
3199 t4 = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3200 build_int_2 (post_shift, 0),
3202 quotient = expand_binop (compute_mode, xor_optab,
3203 t4, t1, tquotient, 0,
3210 rtx nsign, t1, t2, t3, t4;
3211 t1 = force_operand (gen_rtx (PLUS, compute_mode,
3212 op0, constm1_rtx), NULL_RTX);
3213 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
3215 nsign = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3216 build_int_2 (size - 1, 0), NULL_RTX, 0);
3217 t3 = force_operand (gen_rtx (MINUS, compute_mode, t1, nsign),
3219 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
3224 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
3226 quotient = force_operand (gen_rtx (PLUS, compute_mode,
3235 delete_insns_since (last);
3237 /* Try using an instruction that produces both the quotient and
3238 remainder, using truncation. We can easily compensate the quotient
3239 or remainder to get floor rounding, once we have the remainder.
3240 Notice that we compute also the final remainder value here,
3241 and return the result right away. */
3242 if (target == 0 || GET_MODE (target) != compute_mode)
3243 target = gen_reg_rtx (compute_mode);
3248 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3249 quotient = gen_reg_rtx (compute_mode);
3254 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3255 remainder = gen_reg_rtx (compute_mode);
3258 if (expand_twoval_binop (sdivmod_optab, op0, op1,
3259 quotient, remainder, 0))
3261 /* This could be computed with a branch-less sequence.
3262 Save that for later. */
3264 rtx label = gen_label_rtx ();
3265 emit_cmp_insn (remainder, const0_rtx, EQ, NULL_RTX,
3266 compute_mode, 0, 0);
3267 emit_jump_insn (gen_beq (label));
3268 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3269 NULL_RTX, 0, OPTAB_WIDEN);
3270 emit_cmp_insn (tem, const0_rtx, GE, NULL_RTX, compute_mode, 0, 0);
3271 emit_jump_insn (gen_bge (label));
3272 expand_dec (quotient, const1_rtx);
3273 expand_inc (remainder, op1);
3275 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3278 /* No luck with division elimination or divmod. Have to do it
3279 by conditionally adjusting op0 *and* the result. */
3281 rtx label1, label2, label3, label4, label5;
3285 quotient = gen_reg_rtx (compute_mode);
3286 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3287 label1 = gen_label_rtx ();
3288 label2 = gen_label_rtx ();
3289 label3 = gen_label_rtx ();
3290 label4 = gen_label_rtx ();
3291 label5 = gen_label_rtx ();
3292 emit_cmp_insn (op1, const0_rtx, LT, NULL_RTX, compute_mode, 0, 0);
3293 emit_jump_insn (gen_blt (label2));
3294 emit_cmp_insn (adjusted_op0, const0_rtx, LT, NULL_RTX,
3295 compute_mode, 0, 0);
3296 emit_jump_insn (gen_blt (label1));
3297 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3298 quotient, 0, OPTAB_LIB_WIDEN);
3299 if (tem != quotient)
3300 emit_move_insn (quotient, tem);
3301 emit_jump_insn (gen_jump (label5));
3303 emit_label (label1);
3304 expand_inc (adjusted_op0, const1_rtx);
3305 emit_jump_insn (gen_jump (label4));
3307 emit_label (label2);
3308 emit_cmp_insn (adjusted_op0, const0_rtx, GT, NULL_RTX,
3309 compute_mode, 0, 0);
3310 emit_jump_insn (gen_bgt (label3));
3311 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3312 quotient, 0, OPTAB_LIB_WIDEN);
3313 if (tem != quotient)
3314 emit_move_insn (quotient, tem);
3315 emit_jump_insn (gen_jump (label5));
3317 emit_label (label3);
3318 expand_dec (adjusted_op0, const1_rtx);
3319 emit_label (label4);
3320 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3321 quotient, 0, OPTAB_LIB_WIDEN);
3322 if (tem != quotient)
3323 emit_move_insn (quotient, tem);
3324 expand_dec (quotient, const1_rtx);
3325 emit_label (label5);
3333 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
3336 unsigned HOST_WIDE_INT d = INTVAL (op1);
3337 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3338 build_int_2 (floor_log2 (d), 0),
3340 t2 = expand_binop (compute_mode, and_optab, op0,
3342 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3343 t3 = gen_reg_rtx (compute_mode);
3344 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3345 compute_mode, 1, 1);
3349 lab = gen_label_rtx ();
3350 emit_cmp_insn (t2, const0_rtx, EQ, NULL_RTX,
3351 compute_mode, 0, 0);
3352 emit_jump_insn (gen_beq (lab));
3353 expand_inc (t1, const1_rtx);
3358 quotient = force_operand (gen_rtx (PLUS, compute_mode,
3364 /* Try using an instruction that produces both the quotient and
3365 remainder, using truncation. We can easily compensate the
3366 quotient or remainder to get ceiling rounding, once we have the
3367 remainder. Notice that we compute also the final remainder
3368 value here, and return the result right away. */
3369 if (target == 0 || GET_MODE (target) != compute_mode)
3370 target = gen_reg_rtx (compute_mode);
3374 remainder = (GET_CODE (target) == REG
3375 ? target : gen_reg_rtx (compute_mode));
3376 quotient = gen_reg_rtx (compute_mode);
3380 quotient = (GET_CODE (target) == REG
3381 ? target : gen_reg_rtx (compute_mode));
3382 remainder = gen_reg_rtx (compute_mode);
3385 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
3388 /* This could be computed with a branch-less sequence.
3389 Save that for later. */
3390 rtx label = gen_label_rtx ();
3391 emit_cmp_insn (remainder, const0_rtx, EQ, NULL_RTX,
3392 compute_mode, 0, 0);
3393 emit_jump_insn (gen_beq (label));
3394 expand_inc (quotient, const1_rtx);
3395 expand_dec (remainder, op1);
3397 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3400 /* No luck with division elimination or divmod. Have to do it
3401 by conditionally adjusting op0 *and* the result. */
3404 rtx adjusted_op0, tem;
3406 quotient = gen_reg_rtx (compute_mode);
3407 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3408 label1 = gen_label_rtx ();
3409 label2 = gen_label_rtx ();
3410 emit_cmp_insn (adjusted_op0, const0_rtx, NE, NULL_RTX,
3411 compute_mode, 0, 0);
3412 emit_jump_insn (gen_bne (label1));
3413 emit_move_insn (quotient, const0_rtx);
3414 emit_jump_insn (gen_jump (label2));
3416 emit_label (label1);
3417 expand_dec (adjusted_op0, const1_rtx);
3418 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
3419 quotient, 1, OPTAB_LIB_WIDEN);
3420 if (tem != quotient)
3421 emit_move_insn (quotient, tem);
3422 expand_inc (quotient, const1_rtx);
3423 emit_label (label2);
3428 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3429 && INTVAL (op1) >= 0)
3431 /* This is extremely similar to the code for the unsigned case
3432 above. For 2.7 we should merge these variants, but for
3433 2.6.1 I don't want to touch the code for unsigned since that
3434 get used in C. The signed case will only be used by other
3438 unsigned HOST_WIDE_INT d = INTVAL (op1);
3439 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3440 build_int_2 (floor_log2 (d), 0),
3442 t2 = expand_binop (compute_mode, and_optab, op0,
3444 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3445 t3 = gen_reg_rtx (compute_mode);
3446 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3447 compute_mode, 1, 1);
3451 lab = gen_label_rtx ();
3452 emit_cmp_insn (t2, const0_rtx, EQ, NULL_RTX,
3453 compute_mode, 0, 0);
3454 emit_jump_insn (gen_beq (lab));
3455 expand_inc (t1, const1_rtx);
3460 quotient = force_operand (gen_rtx (PLUS, compute_mode,
3466 /* Try using an instruction that produces both the quotient and
3467 remainder, using truncation. We can easily compensate the
3468 quotient or remainder to get ceiling rounding, once we have the
3469 remainder. Notice that we compute also the final remainder
3470 value here, and return the result right away. */
3471 if (target == 0 || GET_MODE (target) != compute_mode)
3472 target = gen_reg_rtx (compute_mode);
3475 remainder= (GET_CODE (target) == REG
3476 ? target : gen_reg_rtx (compute_mode));
3477 quotient = gen_reg_rtx (compute_mode);
3481 quotient = (GET_CODE (target) == REG
3482 ? target : gen_reg_rtx (compute_mode));
3483 remainder = gen_reg_rtx (compute_mode);
3486 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
3489 /* This could be computed with a branch-less sequence.
3490 Save that for later. */
3492 rtx label = gen_label_rtx ();
3493 emit_cmp_insn (remainder, const0_rtx, EQ, NULL_RTX,
3494 compute_mode, 0, 0);
3495 emit_jump_insn (gen_beq (label));
3496 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3497 NULL_RTX, 0, OPTAB_WIDEN);
3498 emit_cmp_insn (tem, const0_rtx, LT, NULL_RTX,
3499 compute_mode, 0, 0);
3500 emit_jump_insn (gen_blt (label));
3501 expand_inc (quotient, const1_rtx);
3502 expand_dec (remainder, op1);
3504 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3507 /* No luck with division elimination or divmod. Have to do it
3508 by conditionally adjusting op0 *and* the result. */
3510 rtx label1, label2, label3, label4, label5;
3514 quotient = gen_reg_rtx (compute_mode);
3515 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3516 label1 = gen_label_rtx ();
3517 label2 = gen_label_rtx ();
3518 label3 = gen_label_rtx ();
3519 label4 = gen_label_rtx ();
3520 label5 = gen_label_rtx ();
3521 emit_cmp_insn (op1, const0_rtx, LT, NULL_RTX,
3522 compute_mode, 0, 0);
3523 emit_jump_insn (gen_blt (label2));
3524 emit_cmp_insn (adjusted_op0, const0_rtx, GT, NULL_RTX,
3525 compute_mode, 0, 0);
3526 emit_jump_insn (gen_bgt (label1));
3527 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3528 quotient, 0, OPTAB_LIB_WIDEN);
3529 if (tem != quotient)
3530 emit_move_insn (quotient, tem);
3531 emit_jump_insn (gen_jump (label5));
3533 emit_label (label1);
3534 expand_dec (adjusted_op0, const1_rtx);
3535 emit_jump_insn (gen_jump (label4));
3537 emit_label (label2);
3538 emit_cmp_insn (adjusted_op0, const0_rtx, LT, NULL_RTX,
3539 compute_mode, 0, 0);
3540 emit_jump_insn (gen_blt (label3));
3541 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3542 quotient, 0, OPTAB_LIB_WIDEN);
3543 if (tem != quotient)
3544 emit_move_insn (quotient, tem);
3545 emit_jump_insn (gen_jump (label5));
3547 emit_label (label3);
3548 expand_inc (adjusted_op0, const1_rtx);
3549 emit_label (label4);
3550 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3551 quotient, 0, OPTAB_LIB_WIDEN);
3552 if (tem != quotient)
3553 emit_move_insn (quotient, tem);
3554 expand_inc (quotient, const1_rtx);
3555 emit_label (label5);
3560 case EXACT_DIV_EXPR:
3561 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3563 HOST_WIDE_INT d = INTVAL (op1);
3564 unsigned HOST_WIDE_INT ml;
3568 post_shift = floor_log2 (d & -d);
3569 ml = invert_mod2n (d >> post_shift, size);
3570 t1 = expand_mult (compute_mode, op0, GEN_INT (ml), NULL_RTX,
3572 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3573 build_int_2 (post_shift, 0),
3574 NULL_RTX, unsignedp);
3576 insn = get_last_insn ();
3578 = gen_rtx (EXPR_LIST, REG_EQUAL,
3579 gen_rtx (unsignedp ? UDIV : DIV, compute_mode,
3585 case ROUND_DIV_EXPR:
3586 case ROUND_MOD_EXPR:
3591 label = gen_label_rtx ();
3592 quotient = gen_reg_rtx (compute_mode);
3593 remainder = gen_reg_rtx (compute_mode);
3594 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
3597 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
3598 quotient, 1, OPTAB_LIB_WIDEN);
3599 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
3600 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3601 remainder, 1, OPTAB_LIB_WIDEN);
3603 tem = plus_constant (op1, -1);
3604 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3605 build_int_2 (1, 0), NULL_RTX, 1);
3606 emit_cmp_insn (remainder, tem, LEU, NULL_RTX, compute_mode, 0, 0);
3607 emit_jump_insn (gen_bleu (label));
3608 expand_inc (quotient, const1_rtx);
3609 expand_dec (remainder, op1);
3614 rtx abs_rem, abs_op1, tem, mask;
3616 label = gen_label_rtx ();
3617 quotient = gen_reg_rtx (compute_mode);
3618 remainder = gen_reg_rtx (compute_mode);
3619 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
3622 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
3623 quotient, 0, OPTAB_LIB_WIDEN);
3624 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
3625 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3626 remainder, 0, OPTAB_LIB_WIDEN);
3628 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 0, 0);
3629 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 0, 0);
3630 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
3631 build_int_2 (1, 0), NULL_RTX, 1);
3632 emit_cmp_insn (tem, abs_op1, LTU, NULL_RTX, compute_mode, 0, 0);
3633 emit_jump_insn (gen_bltu (label));
3634 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3635 NULL_RTX, 0, OPTAB_WIDEN);
3636 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3637 build_int_2 (size - 1, 0), NULL_RTX, 0);
3638 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
3639 NULL_RTX, 0, OPTAB_WIDEN);
3640 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3641 NULL_RTX, 0, OPTAB_WIDEN);
3642 expand_inc (quotient, tem);
3643 tem = expand_binop (compute_mode, xor_optab, mask, op1,
3644 NULL_RTX, 0, OPTAB_WIDEN);
3645 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3646 NULL_RTX, 0, OPTAB_WIDEN);
3647 expand_dec (remainder, tem);
3650 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3658 if (target && GET_MODE (target) != compute_mode)
3663 /* Try to produce the remainder directly without a library call. */
3664 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
3666 unsignedp, OPTAB_WIDEN);
3669 /* No luck there. Can we do remainder and divide at once
3670 without a library call? */
3671 remainder = gen_reg_rtx (compute_mode);
3672 if (! expand_twoval_binop ((unsignedp
3676 NULL_RTX, remainder, unsignedp))
3681 return gen_lowpart (mode, remainder);
3684 /* Produce the quotient. Try a quotient insn, but not a library call.
3685 If we have a divmod in this mode, use it in preference to widening
3686 the div (for this test we assume it will not fail). Note that optab2
3687 is set to the one of the two optabs that the call below will use. */
3689 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
3690 op0, op1, rem_flag ? NULL_RTX : target,
3692 ((optab2->handlers[(int) compute_mode].insn_code
3693 != CODE_FOR_nothing)
3694 ? OPTAB_DIRECT : OPTAB_WIDEN));
3698 /* No luck there. Try a quotient-and-remainder insn,
3699 keeping the quotient alone. */
3700 quotient = gen_reg_rtx (compute_mode);
3701 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
3703 quotient, NULL_RTX, unsignedp))
3707 /* Still no luck. If we are not computing the remainder,
3708 use a library call for the quotient. */
3709 quotient = sign_expand_binop (compute_mode,
3710 udiv_optab, sdiv_optab,
3712 unsignedp, OPTAB_LIB_WIDEN);
3719 if (target && GET_MODE (target) != compute_mode)
3723 /* No divide instruction either. Use library for remainder. */
3724 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
3726 unsignedp, OPTAB_LIB_WIDEN);
3729 /* We divided. Now finish doing X - Y * (X / Y). */
3730 remainder = expand_mult (compute_mode, quotient, op1,
3731 NULL_RTX, unsignedp);
3732 remainder = expand_binop (compute_mode, sub_optab, op0,
3733 remainder, target, unsignedp,
3738 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3741 /* Return a tree node with data type TYPE, describing the value of X.
3742 Usually this is an RTL_EXPR, if there is no obvious better choice.
3743 X may be an expression, however we only support those expressions
3744 generated by loop.c. */
3753 switch (GET_CODE (x))
3756 t = build_int_2 (INTVAL (x),
3757 TREE_UNSIGNED (type) || INTVAL (x) >= 0 ? 0 : -1);
3758 TREE_TYPE (t) = type;
3762 if (GET_MODE (x) == VOIDmode)
3764 t = build_int_2 (CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
3765 TREE_TYPE (t) = type;
3771 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
3772 t = build_real (type, d);
3778 return fold (build (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
3779 make_tree (type, XEXP (x, 1))));
3782 return fold (build (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
3783 make_tree (type, XEXP (x, 1))));
3786 return fold (build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0))));
3789 return fold (build (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
3790 make_tree (type, XEXP (x, 1))));
3793 return fold (build (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
3794 make_tree (type, XEXP (x, 1))));
3797 return fold (convert (type,
3798 build (RSHIFT_EXPR, unsigned_type (type),
3799 make_tree (unsigned_type (type),
3801 make_tree (type, XEXP (x, 1)))));
3804 return fold (convert (type,
3805 build (RSHIFT_EXPR, signed_type (type),
3806 make_tree (signed_type (type), XEXP (x, 0)),
3807 make_tree (type, XEXP (x, 1)))));
3810 if (TREE_CODE (type) != REAL_TYPE)
3811 t = signed_type (type);
3815 return fold (convert (type,
3816 build (TRUNC_DIV_EXPR, t,
3817 make_tree (t, XEXP (x, 0)),
3818 make_tree (t, XEXP (x, 1)))));
3820 t = unsigned_type (type);
3821 return fold (convert (type,
3822 build (TRUNC_DIV_EXPR, t,
3823 make_tree (t, XEXP (x, 0)),
3824 make_tree (t, XEXP (x, 1)))));
3826 t = make_node (RTL_EXPR);
3827 TREE_TYPE (t) = type;
3828 RTL_EXPR_RTL (t) = x;
3829 /* There are no insns to be output
3830 when this rtl_expr is used. */
3831 RTL_EXPR_SEQUENCE (t) = 0;
3836 /* Return an rtx representing the value of X * MULT + ADD.
3837 TARGET is a suggestion for where to store the result (an rtx).
3838 MODE is the machine mode for the computation.
3839 X and MULT must have mode MODE. ADD may have a different mode.
3840 So can X (defaults to same as MODE).
3841 UNSIGNEDP is non-zero to do unsigned multiplication.
3842 This may emit insns. */
3845 expand_mult_add (x, target, mult, add, mode, unsignedp)
3846 rtx x, target, mult, add;
3847 enum machine_mode mode;
3850 tree type = type_for_mode (mode, unsignedp);
3851 tree add_type = (GET_MODE (add) == VOIDmode
3852 ? type : type_for_mode (GET_MODE (add), unsignedp));
3853 tree result = fold (build (PLUS_EXPR, type,
3854 fold (build (MULT_EXPR, type,
3855 make_tree (type, x),
3856 make_tree (type, mult))),
3857 make_tree (add_type, add)));
3859 return expand_expr (result, target, VOIDmode, 0);
3862 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
3863 and returning TARGET.
3865 If TARGET is 0, a pseudo-register or constant is returned. */
3868 expand_and (op0, op1, target)
3869 rtx op0, op1, target;
3871 enum machine_mode mode = VOIDmode;
3874 if (GET_MODE (op0) != VOIDmode)
3875 mode = GET_MODE (op0);
3876 else if (GET_MODE (op1) != VOIDmode)
3877 mode = GET_MODE (op1);
3879 if (mode != VOIDmode)
3880 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
3881 else if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT)
3882 tem = GEN_INT (INTVAL (op0) & INTVAL (op1));
3888 else if (tem != target)
3889 emit_move_insn (target, tem);
3893 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
3894 and storing in TARGET. Normally return TARGET.
3895 Return 0 if that cannot be done.
3897 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
3898 it is VOIDmode, they cannot both be CONST_INT.
3900 UNSIGNEDP is for the case where we have to widen the operands
3901 to perform the operation. It says to use zero-extension.
3903 NORMALIZEP is 1 if we should convert the result to be either zero
3904 or one. Normalize is -1 if we should convert the result to be
3905 either zero or -1. If NORMALIZEP is zero, the result will be left
3906 "raw" out of the scc insn. */
3909 emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep)
3913 enum machine_mode mode;
3918 enum insn_code icode;
3919 enum machine_mode compare_mode;
3920 enum machine_mode target_mode = GET_MODE (target);
3922 rtx last = get_last_insn ();
3923 rtx pattern, comparison;
3925 /* If one operand is constant, make it the second one. Only do this
3926 if the other operand is not constant as well. */
3928 if ((CONSTANT_P (op0) && ! CONSTANT_P (op1))
3929 || (GET_CODE (op0) == CONST_INT && GET_CODE (op1) != CONST_INT))
3934 code = swap_condition (code);
3937 if (mode == VOIDmode)
3938 mode = GET_MODE (op0);
3940 /* For some comparisons with 1 and -1, we can convert this to
3941 comparisons with zero. This will often produce more opportunities for
3942 store-flag insns. */
3947 if (op1 == const1_rtx)
3948 op1 = const0_rtx, code = LE;
3951 if (op1 == constm1_rtx)
3952 op1 = const0_rtx, code = LT;
3955 if (op1 == const1_rtx)
3956 op1 = const0_rtx, code = GT;
3959 if (op1 == constm1_rtx)
3960 op1 = const0_rtx, code = GE;
3963 if (op1 == const1_rtx)
3964 op1 = const0_rtx, code = NE;
3967 if (op1 == const1_rtx)
3968 op1 = const0_rtx, code = EQ;
3974 /* From now on, we won't change CODE, so set ICODE now. */
3975 icode = setcc_gen_code[(int) code];
3977 /* If this is A < 0 or A >= 0, we can do this by taking the ones
3978 complement of A (for GE) and shifting the sign bit to the low bit. */
3979 if (op1 == const0_rtx && (code == LT || code == GE)
3980 && GET_MODE_CLASS (mode) == MODE_INT
3981 && (normalizep || STORE_FLAG_VALUE == 1
3982 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
3983 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
3984 == (HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))))
3988 /* If the result is to be wider than OP0, it is best to convert it
3989 first. If it is to be narrower, it is *incorrect* to convert it
3991 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
3993 op0 = protect_from_queue (op0, 0);
3994 op0 = convert_modes (target_mode, mode, op0, 0);
3998 if (target_mode != mode)
4002 op0 = expand_unop (mode, one_cmpl_optab, op0,
4003 ((STORE_FLAG_VALUE == 1 || normalizep)
4004 ? 0 : subtarget), 0);
4006 if (STORE_FLAG_VALUE == 1 || normalizep)
4007 /* If we are supposed to produce a 0/1 value, we want to do
4008 a logical shift from the sign bit to the low-order bit; for
4009 a -1/0 value, we do an arithmetic shift. */
4010 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
4011 size_int (GET_MODE_BITSIZE (mode) - 1),
4012 subtarget, normalizep != -1);
4014 if (mode != target_mode)
4015 op0 = convert_modes (target_mode, mode, op0, 0);
4020 if (icode != CODE_FOR_nothing)
4022 /* We think we may be able to do this with a scc insn. Emit the
4023 comparison and then the scc insn.
4025 compare_from_rtx may call emit_queue, which would be deleted below
4026 if the scc insn fails. So call it ourselves before setting LAST. */
4029 last = get_last_insn ();
4032 = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX, 0);
4033 if (GET_CODE (comparison) == CONST_INT)
4034 return (comparison == const0_rtx ? const0_rtx
4035 : normalizep == 1 ? const1_rtx
4036 : normalizep == -1 ? constm1_rtx
4039 /* If the code of COMPARISON doesn't match CODE, something is
4040 wrong; we can no longer be sure that we have the operation.
4041 We could handle this case, but it should not happen. */
4043 if (GET_CODE (comparison) != code)
4046 /* Get a reference to the target in the proper mode for this insn. */
4047 compare_mode = insn_operand_mode[(int) icode][0];
4049 if (preserve_subexpressions_p ()
4050 || ! (*insn_operand_predicate[(int) icode][0]) (subtarget, compare_mode))
4051 subtarget = gen_reg_rtx (compare_mode);
4053 pattern = GEN_FCN (icode) (subtarget);
4056 emit_insn (pattern);
4058 /* If we are converting to a wider mode, first convert to
4059 TARGET_MODE, then normalize. This produces better combining
4060 opportunities on machines that have a SIGN_EXTRACT when we are
4061 testing a single bit. This mostly benefits the 68k.
4063 If STORE_FLAG_VALUE does not have the sign bit set when
4064 interpreted in COMPARE_MODE, we can do this conversion as
4065 unsigned, which is usually more efficient. */
4066 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
4068 convert_move (target, subtarget,
4069 (GET_MODE_BITSIZE (compare_mode)
4070 <= HOST_BITS_PER_WIDE_INT)
4071 && 0 == (STORE_FLAG_VALUE
4072 & ((HOST_WIDE_INT) 1
4073 << (GET_MODE_BITSIZE (compare_mode) -1))));
4075 compare_mode = target_mode;
4080 /* If we want to keep subexpressions around, don't reuse our
4083 if (preserve_subexpressions_p ())
4086 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
4087 we don't have to do anything. */
4088 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
4090 else if (normalizep == - STORE_FLAG_VALUE)
4091 op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
4093 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
4094 makes it hard to use a value of just the sign bit due to
4095 ANSI integer constant typing rules. */
4096 else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT
4097 && (STORE_FLAG_VALUE
4098 & ((HOST_WIDE_INT) 1
4099 << (GET_MODE_BITSIZE (compare_mode) - 1))))
4100 op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
4101 size_int (GET_MODE_BITSIZE (compare_mode) - 1),
4102 subtarget, normalizep == 1);
4103 else if (STORE_FLAG_VALUE & 1)
4105 op0 = expand_and (op0, const1_rtx, subtarget);
4106 if (normalizep == -1)
4107 op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
4112 /* If we were converting to a smaller mode, do the
4114 if (target_mode != compare_mode)
4116 convert_move (target, op0, 0);
4124 delete_insns_since (last);
4126 /* If expensive optimizations, use different pseudo registers for each
4127 insn, instead of reusing the same pseudo. This leads to better CSE,
4128 but slows down the compiler, since there are more pseudos */
4129 subtarget = (!flag_expensive_optimizations
4130 && (target_mode == mode)) ? target : NULL_RTX;
4132 /* If we reached here, we can't do this with a scc insn. However, there
4133 are some comparisons that can be done directly. For example, if
4134 this is an equality comparison of integers, we can try to exclusive-or
4135 (or subtract) the two operands and use a recursive call to try the
4136 comparison with zero. Don't do any of these cases if branches are
4140 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
4141 && op1 != const0_rtx)
4143 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
4147 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
4150 tem = emit_store_flag (target, code, tem, const0_rtx,
4151 mode, unsignedp, normalizep);
4153 delete_insns_since (last);
4157 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
4158 the constant zero. Reject all other comparisons at this point. Only
4159 do LE and GT if branches are expensive since they are expensive on
4160 2-operand machines. */
4162 if (BRANCH_COST == 0
4163 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
4164 || (code != EQ && code != NE
4165 && (BRANCH_COST <= 1 || (code != LE && code != GT))))
4168 /* See what we need to return. We can only return a 1, -1, or the
4171 if (normalizep == 0)
4173 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
4174 normalizep = STORE_FLAG_VALUE;
4176 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4177 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4178 == (HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
4184 /* Try to put the result of the comparison in the sign bit. Assume we can't
4185 do the necessary operation below. */
4189 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
4190 the sign bit set. */
4194 /* This is destructive, so SUBTARGET can't be OP0. */
4195 if (rtx_equal_p (subtarget, op0))
4198 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
4201 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
4205 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
4206 number of bits in the mode of OP0, minus one. */
4210 if (rtx_equal_p (subtarget, op0))
4213 tem = expand_shift (RSHIFT_EXPR, mode, op0,
4214 size_int (GET_MODE_BITSIZE (mode) - 1),
4216 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
4220 if (code == EQ || code == NE)
4222 /* For EQ or NE, one way to do the comparison is to apply an operation
4223 that converts the operand into a positive number if it is non-zero
4224 or zero if it was originally zero. Then, for EQ, we subtract 1 and
4225 for NE we negate. This puts the result in the sign bit. Then we
4226 normalize with a shift, if needed.
4228 Two operations that can do the above actions are ABS and FFS, so try
4229 them. If that doesn't work, and MODE is smaller than a full word,
4230 we can use zero-extension to the wider mode (an unsigned conversion)
4231 as the operation. */
4233 if (abs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4234 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
4235 else if (ffs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4236 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
4237 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
4239 op0 = protect_from_queue (op0, 0);
4240 tem = convert_modes (word_mode, mode, op0, 1);
4247 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
4250 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
4253 /* If we couldn't do it that way, for NE we can "or" the two's complement
4254 of the value with itself. For EQ, we take the one's complement of
4255 that "or", which is an extra insn, so we only handle EQ if branches
4258 if (tem == 0 && (code == NE || BRANCH_COST > 1))
4260 if (rtx_equal_p (subtarget, op0))
4263 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
4264 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
4267 if (tem && code == EQ)
4268 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
4272 if (tem && normalizep)
4273 tem = expand_shift (RSHIFT_EXPR, mode, tem,
4274 size_int (GET_MODE_BITSIZE (mode) - 1),
4275 subtarget, normalizep == 1);
4279 if (GET_MODE (tem) != target_mode)
4281 convert_move (target, tem, 0);
4284 else if (!subtarget)
4286 emit_move_insn (target, tem);
4291 delete_insns_since (last);
4296 /* Like emit_store_flag, but always succeeds. */
4299 emit_store_flag_force (target, code, op0, op1, mode, unsignedp, normalizep)
4303 enum machine_mode mode;
4309 /* First see if emit_store_flag can do the job. */
4310 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
4314 if (normalizep == 0)
4317 /* If this failed, we have to do this with set/compare/jump/set code. */
4319 if (GET_CODE (target) != REG
4320 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
4321 target = gen_reg_rtx (GET_MODE (target));
4323 emit_move_insn (target, const1_rtx);
4324 tem = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX, 0);
4325 if (GET_CODE (tem) == CONST_INT)
4328 label = gen_label_rtx ();
4329 if (bcc_gen_fctn[(int) code] == 0)
4332 emit_jump_insn ((*bcc_gen_fctn[(int) code]) (label));
4333 emit_move_insn (target, const0_rtx);