1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006
5 Free Software Foundation, Inc.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 2, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to the Free
21 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
27 #include "coretypes.h"
34 #include "insn-config.h"
39 #include "langhooks.h"
41 static void store_fixed_bit_field (rtx, unsigned HOST_WIDE_INT,
42 unsigned HOST_WIDE_INT,
43 unsigned HOST_WIDE_INT, rtx);
44 static void store_split_bit_field (rtx, unsigned HOST_WIDE_INT,
45 unsigned HOST_WIDE_INT, rtx);
46 static rtx extract_fixed_bit_field (enum machine_mode, rtx,
47 unsigned HOST_WIDE_INT,
48 unsigned HOST_WIDE_INT,
49 unsigned HOST_WIDE_INT, rtx, int);
50 static rtx mask_rtx (enum machine_mode, int, int, int);
51 static rtx lshift_value (enum machine_mode, rtx, int, int);
52 static rtx extract_split_bit_field (rtx, unsigned HOST_WIDE_INT,
53 unsigned HOST_WIDE_INT, int);
54 static void do_cmp_and_jump (rtx, rtx, enum rtx_code, enum machine_mode, rtx);
55 static rtx expand_smod_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
56 static rtx expand_sdiv_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
58 /* Test whether a value is zero of a power of two. */
59 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
61 /* Nonzero means divides or modulus operations are relatively cheap for
62 powers of two, so don't use branches; emit the operation instead.
63 Usually, this will mean that the MD file will emit non-branch
66 static bool sdiv_pow2_cheap[NUM_MACHINE_MODES];
67 static bool smod_pow2_cheap[NUM_MACHINE_MODES];
69 #ifndef SLOW_UNALIGNED_ACCESS
70 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
73 /* For compilers that support multiple targets with different word sizes,
74 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
75 is the H8/300(H) compiler. */
77 #ifndef MAX_BITS_PER_WORD
78 #define MAX_BITS_PER_WORD BITS_PER_WORD
81 /* Reduce conditional compilation elsewhere. */
84 #define CODE_FOR_insv CODE_FOR_nothing
85 #define gen_insv(a,b,c,d) NULL_RTX
89 #define CODE_FOR_extv CODE_FOR_nothing
90 #define gen_extv(a,b,c,d) NULL_RTX
94 #define CODE_FOR_extzv CODE_FOR_nothing
95 #define gen_extzv(a,b,c,d) NULL_RTX
98 /* Cost of various pieces of RTL. Note that some of these are indexed by
99 shift count and some by mode. */
100 static int zero_cost;
101 static int add_cost[NUM_MACHINE_MODES];
102 static int neg_cost[NUM_MACHINE_MODES];
103 static int shift_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
104 static int shiftadd_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
105 static int shiftsub_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
106 static int mul_cost[NUM_MACHINE_MODES];
107 static int sdiv_cost[NUM_MACHINE_MODES];
108 static int udiv_cost[NUM_MACHINE_MODES];
109 static int mul_widen_cost[NUM_MACHINE_MODES];
110 static int mul_highpart_cost[NUM_MACHINE_MODES];
117 struct rtx_def reg; rtunion reg_fld[2];
118 struct rtx_def plus; rtunion plus_fld1;
120 struct rtx_def mult; rtunion mult_fld1;
121 struct rtx_def sdiv; rtunion sdiv_fld1;
122 struct rtx_def udiv; rtunion udiv_fld1;
124 struct rtx_def sdiv_32; rtunion sdiv_32_fld1;
125 struct rtx_def smod_32; rtunion smod_32_fld1;
126 struct rtx_def wide_mult; rtunion wide_mult_fld1;
127 struct rtx_def wide_lshr; rtunion wide_lshr_fld1;
128 struct rtx_def wide_trunc;
129 struct rtx_def shift; rtunion shift_fld1;
130 struct rtx_def shift_mult; rtunion shift_mult_fld1;
131 struct rtx_def shift_add; rtunion shift_add_fld1;
132 struct rtx_def shift_sub; rtunion shift_sub_fld1;
135 rtx pow2[MAX_BITS_PER_WORD];
136 rtx cint[MAX_BITS_PER_WORD];
138 enum machine_mode mode, wider_mode;
140 zero_cost = rtx_cost (const0_rtx, 0);
142 for (m = 1; m < MAX_BITS_PER_WORD; m++)
144 pow2[m] = GEN_INT ((HOST_WIDE_INT) 1 << m);
145 cint[m] = GEN_INT (m);
148 memset (&all, 0, sizeof all);
150 PUT_CODE (&all.reg, REG);
151 /* Avoid using hard regs in ways which may be unsupported. */
152 REGNO (&all.reg) = LAST_VIRTUAL_REGISTER + 1;
154 PUT_CODE (&all.plus, PLUS);
155 XEXP (&all.plus, 0) = &all.reg;
156 XEXP (&all.plus, 1) = &all.reg;
158 PUT_CODE (&all.neg, NEG);
159 XEXP (&all.neg, 0) = &all.reg;
161 PUT_CODE (&all.mult, MULT);
162 XEXP (&all.mult, 0) = &all.reg;
163 XEXP (&all.mult, 1) = &all.reg;
165 PUT_CODE (&all.sdiv, DIV);
166 XEXP (&all.sdiv, 0) = &all.reg;
167 XEXP (&all.sdiv, 1) = &all.reg;
169 PUT_CODE (&all.udiv, UDIV);
170 XEXP (&all.udiv, 0) = &all.reg;
171 XEXP (&all.udiv, 1) = &all.reg;
173 PUT_CODE (&all.sdiv_32, DIV);
174 XEXP (&all.sdiv_32, 0) = &all.reg;
175 XEXP (&all.sdiv_32, 1) = 32 < MAX_BITS_PER_WORD ? cint[32] : GEN_INT (32);
177 PUT_CODE (&all.smod_32, MOD);
178 XEXP (&all.smod_32, 0) = &all.reg;
179 XEXP (&all.smod_32, 1) = XEXP (&all.sdiv_32, 1);
181 PUT_CODE (&all.zext, ZERO_EXTEND);
182 XEXP (&all.zext, 0) = &all.reg;
184 PUT_CODE (&all.wide_mult, MULT);
185 XEXP (&all.wide_mult, 0) = &all.zext;
186 XEXP (&all.wide_mult, 1) = &all.zext;
188 PUT_CODE (&all.wide_lshr, LSHIFTRT);
189 XEXP (&all.wide_lshr, 0) = &all.wide_mult;
191 PUT_CODE (&all.wide_trunc, TRUNCATE);
192 XEXP (&all.wide_trunc, 0) = &all.wide_lshr;
194 PUT_CODE (&all.shift, ASHIFT);
195 XEXP (&all.shift, 0) = &all.reg;
197 PUT_CODE (&all.shift_mult, MULT);
198 XEXP (&all.shift_mult, 0) = &all.reg;
200 PUT_CODE (&all.shift_add, PLUS);
201 XEXP (&all.shift_add, 0) = &all.shift_mult;
202 XEXP (&all.shift_add, 1) = &all.reg;
204 PUT_CODE (&all.shift_sub, MINUS);
205 XEXP (&all.shift_sub, 0) = &all.shift_mult;
206 XEXP (&all.shift_sub, 1) = &all.reg;
208 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
210 mode = GET_MODE_WIDER_MODE (mode))
212 PUT_MODE (&all.reg, mode);
213 PUT_MODE (&all.plus, mode);
214 PUT_MODE (&all.neg, mode);
215 PUT_MODE (&all.mult, mode);
216 PUT_MODE (&all.sdiv, mode);
217 PUT_MODE (&all.udiv, mode);
218 PUT_MODE (&all.sdiv_32, mode);
219 PUT_MODE (&all.smod_32, mode);
220 PUT_MODE (&all.wide_trunc, mode);
221 PUT_MODE (&all.shift, mode);
222 PUT_MODE (&all.shift_mult, mode);
223 PUT_MODE (&all.shift_add, mode);
224 PUT_MODE (&all.shift_sub, mode);
226 add_cost[mode] = rtx_cost (&all.plus, SET);
227 neg_cost[mode] = rtx_cost (&all.neg, SET);
228 mul_cost[mode] = rtx_cost (&all.mult, SET);
229 sdiv_cost[mode] = rtx_cost (&all.sdiv, SET);
230 udiv_cost[mode] = rtx_cost (&all.udiv, SET);
232 sdiv_pow2_cheap[mode] = (rtx_cost (&all.sdiv_32, SET)
233 <= 2 * add_cost[mode]);
234 smod_pow2_cheap[mode] = (rtx_cost (&all.smod_32, SET)
235 <= 4 * add_cost[mode]);
237 wider_mode = GET_MODE_WIDER_MODE (mode);
238 if (wider_mode != VOIDmode)
240 PUT_MODE (&all.zext, wider_mode);
241 PUT_MODE (&all.wide_mult, wider_mode);
242 PUT_MODE (&all.wide_lshr, wider_mode);
243 XEXP (&all.wide_lshr, 1) = GEN_INT (GET_MODE_BITSIZE (mode));
245 mul_widen_cost[wider_mode] = rtx_cost (&all.wide_mult, SET);
246 mul_highpart_cost[mode] = rtx_cost (&all.wide_trunc, SET);
249 shift_cost[mode][0] = 0;
250 shiftadd_cost[mode][0] = shiftsub_cost[mode][0] = add_cost[mode];
252 n = MIN (MAX_BITS_PER_WORD, GET_MODE_BITSIZE (mode));
253 for (m = 1; m < n; m++)
255 XEXP (&all.shift, 1) = cint[m];
256 XEXP (&all.shift_mult, 1) = pow2[m];
258 shift_cost[mode][m] = rtx_cost (&all.shift, SET);
259 shiftadd_cost[mode][m] = rtx_cost (&all.shift_add, SET);
260 shiftsub_cost[mode][m] = rtx_cost (&all.shift_sub, SET);
265 /* Return an rtx representing minus the value of X.
266 MODE is the intended mode of the result,
267 useful if X is a CONST_INT. */
270 negate_rtx (enum machine_mode mode, rtx x)
272 rtx result = simplify_unary_operation (NEG, mode, x, mode);
275 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
280 /* Report on the availability of insv/extv/extzv and the desired mode
281 of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo
282 is false; else the mode of the specified operand. If OPNO is -1,
283 all the caller cares about is whether the insn is available. */
285 mode_for_extraction (enum extraction_pattern pattern, int opno)
287 const struct insn_data *data;
294 data = &insn_data[CODE_FOR_insv];
297 return MAX_MACHINE_MODE;
302 data = &insn_data[CODE_FOR_extv];
305 return MAX_MACHINE_MODE;
310 data = &insn_data[CODE_FOR_extzv];
313 return MAX_MACHINE_MODE;
322 /* Everyone who uses this function used to follow it with
323 if (result == VOIDmode) result = word_mode; */
324 if (data->operand[opno].mode == VOIDmode)
326 return data->operand[opno].mode;
330 /* Generate code to store value from rtx VALUE
331 into a bit-field within structure STR_RTX
332 containing BITSIZE bits starting at bit BITNUM.
333 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
334 ALIGN is the alignment that STR_RTX is known to have.
335 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
337 /* ??? Note that there are two different ideas here for how
338 to determine the size to count bits within, for a register.
339 One is BITS_PER_WORD, and the other is the size of operand 3
342 If operand 3 of the insv pattern is VOIDmode, then we will use BITS_PER_WORD
343 else, we use the mode of operand 3. */
346 store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
347 unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode,
351 = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD;
352 unsigned HOST_WIDE_INT offset, bitpos;
357 enum machine_mode op_mode = mode_for_extraction (EP_insv, 3);
359 while (GET_CODE (op0) == SUBREG)
361 /* The following line once was done only if WORDS_BIG_ENDIAN,
362 but I think that is a mistake. WORDS_BIG_ENDIAN is
363 meaningful at a much higher level; when structures are copied
364 between memory and regs, the higher-numbered regs
365 always get higher addresses. */
366 int inner_mode_size = GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)));
367 int outer_mode_size = GET_MODE_SIZE (GET_MODE (op0));
371 /* Paradoxical subregs need special handling on big endian machines. */
372 if (SUBREG_BYTE (op0) == 0 && inner_mode_size < outer_mode_size)
374 int difference = inner_mode_size - outer_mode_size;
376 if (WORDS_BIG_ENDIAN)
377 byte_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
378 if (BYTES_BIG_ENDIAN)
379 byte_offset += difference % UNITS_PER_WORD;
382 byte_offset = SUBREG_BYTE (op0);
384 bitnum += byte_offset * BITS_PER_UNIT;
385 op0 = SUBREG_REG (op0);
388 /* No action is needed if the target is a register and if the field
389 lies completely outside that register. This can occur if the source
390 code contains an out-of-bounds access to a small array. */
391 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
394 /* Use vec_set patterns for inserting parts of vectors whenever
396 if (VECTOR_MODE_P (GET_MODE (op0))
398 && (vec_set_optab->handlers[GET_MODE (op0)].insn_code
400 && fieldmode == GET_MODE_INNER (GET_MODE (op0))
401 && bitsize == GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
402 && !(bitnum % GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
404 enum machine_mode outermode = GET_MODE (op0);
405 enum machine_mode innermode = GET_MODE_INNER (outermode);
406 int icode = (int) vec_set_optab->handlers[outermode].insn_code;
407 int pos = bitnum / GET_MODE_BITSIZE (innermode);
408 rtx rtxpos = GEN_INT (pos);
412 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
413 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
414 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
418 if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
419 src = copy_to_mode_reg (mode1, src);
421 if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
422 rtxpos = copy_to_mode_reg (mode1, rtxpos);
424 /* We could handle this, but we should always be called with a pseudo
425 for our targets and all insns should take them as outputs. */
426 gcc_assert ((*insn_data[icode].operand[0].predicate) (dest, mode0)
427 && (*insn_data[icode].operand[1].predicate) (src, mode1)
428 && (*insn_data[icode].operand[2].predicate) (rtxpos, mode2));
429 pat = GEN_FCN (icode) (dest, src, rtxpos);
440 /* If the target is a register, overwriting the entire object, or storing
441 a full-word or multi-word field can be done with just a SUBREG.
443 If the target is memory, storing any naturally aligned field can be
444 done with a simple store. For targets that support fast unaligned
445 memory, any naturally sized, unit aligned field can be done directly. */
447 offset = bitnum / unit;
448 bitpos = bitnum % unit;
449 byte_offset = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
450 + (offset * UNITS_PER_WORD);
453 && bitsize == GET_MODE_BITSIZE (fieldmode)
455 ? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
456 || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
457 && byte_offset % GET_MODE_SIZE (fieldmode) == 0)
458 : (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
459 || (offset * BITS_PER_UNIT % bitsize == 0
460 && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0))))
463 op0 = adjust_address (op0, fieldmode, offset);
464 else if (GET_MODE (op0) != fieldmode)
465 op0 = simplify_gen_subreg (fieldmode, op0, GET_MODE (op0),
467 emit_move_insn (op0, value);
471 /* Make sure we are playing with integral modes. Pun with subregs
472 if we aren't. This must come after the entire register case above,
473 since that case is valid for any mode. The following cases are only
474 valid for integral modes. */
476 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
477 if (imode != GET_MODE (op0))
480 op0 = adjust_address (op0, imode, 0);
483 gcc_assert (imode != BLKmode);
484 op0 = gen_lowpart (imode, op0);
489 /* We may be accessing data outside the field, which means
490 we can alias adjacent data. */
493 op0 = shallow_copy_rtx (op0);
494 set_mem_alias_set (op0, 0);
495 set_mem_expr (op0, 0);
498 /* If OP0 is a register, BITPOS must count within a word.
499 But as we have it, it counts within whatever size OP0 now has.
500 On a bigendian machine, these are not the same, so convert. */
503 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
504 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
506 /* Storing an lsb-aligned field in a register
507 can be done with a movestrict instruction. */
510 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
511 && bitsize == GET_MODE_BITSIZE (fieldmode)
512 && (movstrict_optab->handlers[fieldmode].insn_code
513 != CODE_FOR_nothing))
515 int icode = movstrict_optab->handlers[fieldmode].insn_code;
517 /* Get appropriate low part of the value being stored. */
518 if (GET_CODE (value) == CONST_INT || REG_P (value))
519 value = gen_lowpart (fieldmode, value);
520 else if (!(GET_CODE (value) == SYMBOL_REF
521 || GET_CODE (value) == LABEL_REF
522 || GET_CODE (value) == CONST))
523 value = convert_to_mode (fieldmode, value, 0);
525 if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode))
526 value = copy_to_mode_reg (fieldmode, value);
528 if (GET_CODE (op0) == SUBREG)
530 /* Else we've got some float mode source being extracted into
531 a different float mode destination -- this combination of
532 subregs results in Severe Tire Damage. */
533 gcc_assert (GET_MODE (SUBREG_REG (op0)) == fieldmode
534 || GET_MODE_CLASS (fieldmode) == MODE_INT
535 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT);
536 op0 = SUBREG_REG (op0);
539 emit_insn (GEN_FCN (icode)
540 (gen_rtx_SUBREG (fieldmode, op0,
541 (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
542 + (offset * UNITS_PER_WORD)),
548 /* Handle fields bigger than a word. */
550 if (bitsize > BITS_PER_WORD)
552 /* Here we transfer the words of the field
553 in the order least significant first.
554 This is because the most significant word is the one which may
556 However, only do that if the value is not BLKmode. */
558 unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
559 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
562 /* This is the mode we must force value to, so that there will be enough
563 subwords to extract. Note that fieldmode will often (always?) be
564 VOIDmode, because that is what store_field uses to indicate that this
565 is a bit field, but passing VOIDmode to operand_subword_force
567 fieldmode = GET_MODE (value);
568 if (fieldmode == VOIDmode)
569 fieldmode = smallest_mode_for_size (nwords * BITS_PER_WORD, MODE_INT);
571 for (i = 0; i < nwords; i++)
573 /* If I is 0, use the low-order word in both field and target;
574 if I is 1, use the next to lowest word; and so on. */
575 unsigned int wordnum = (backwards ? nwords - i - 1 : i);
576 unsigned int bit_offset = (backwards
577 ? MAX ((int) bitsize - ((int) i + 1)
580 : (int) i * BITS_PER_WORD);
582 store_bit_field (op0, MIN (BITS_PER_WORD,
583 bitsize - i * BITS_PER_WORD),
584 bitnum + bit_offset, word_mode,
585 operand_subword_force (value, wordnum, fieldmode));
590 /* From here on we can assume that the field to be stored in is
591 a full-word (whatever type that is), since it is shorter than a word. */
593 /* OFFSET is the number of words or bytes (UNIT says which)
594 from STR_RTX to the first word or byte containing part of the field. */
599 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
603 /* Since this is a destination (lvalue), we can't copy
604 it to a pseudo. We can remove a SUBREG that does not
605 change the size of the operand. Such a SUBREG may
606 have been added above. */
607 gcc_assert (GET_CODE (op0) == SUBREG
608 && (GET_MODE_SIZE (GET_MODE (op0))
609 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))));
610 op0 = SUBREG_REG (op0);
612 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
613 op0, (offset * UNITS_PER_WORD));
618 /* If VALUE has a floating-point or complex mode, access it as an
619 integer of the corresponding size. This can occur on a machine
620 with 64 bit registers that uses SFmode for float. It can also
621 occur for unaligned float or complex fields. */
623 if (GET_MODE (value) != VOIDmode
624 && GET_MODE_CLASS (GET_MODE (value)) != MODE_INT
625 && GET_MODE_CLASS (GET_MODE (value)) != MODE_PARTIAL_INT)
627 value = gen_reg_rtx (int_mode_for_mode (GET_MODE (value)));
628 emit_move_insn (gen_lowpart (GET_MODE (orig_value), value), orig_value);
631 /* Now OFFSET is nonzero only if OP0 is memory
632 and is therefore always measured in bytes. */
635 && GET_MODE (value) != BLKmode
637 && GET_MODE_BITSIZE (op_mode) >= bitsize
638 && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
639 && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode)))
640 && insn_data[CODE_FOR_insv].operand[1].predicate (GEN_INT (bitsize),
643 int xbitpos = bitpos;
646 rtx last = get_last_insn ();
648 enum machine_mode maxmode = mode_for_extraction (EP_insv, 3);
649 int save_volatile_ok = volatile_ok;
653 /* If this machine's insv can only insert into a register, copy OP0
654 into a register and save it back later. */
656 && ! ((*insn_data[(int) CODE_FOR_insv].operand[0].predicate)
660 enum machine_mode bestmode;
662 /* Get the mode to use for inserting into this field. If OP0 is
663 BLKmode, get the smallest mode consistent with the alignment. If
664 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
665 mode. Otherwise, use the smallest mode containing the field. */
667 if (GET_MODE (op0) == BLKmode
668 || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
670 = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0), maxmode,
671 MEM_VOLATILE_P (op0));
673 bestmode = GET_MODE (op0);
675 if (bestmode == VOIDmode
676 || GET_MODE_SIZE (bestmode) < GET_MODE_SIZE (fieldmode)
677 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
678 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
681 /* Adjust address to point to the containing unit of that mode.
682 Compute offset as multiple of this unit, counting in bytes. */
683 unit = GET_MODE_BITSIZE (bestmode);
684 offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
685 bitpos = bitnum % unit;
686 op0 = adjust_address (op0, bestmode, offset);
688 /* Fetch that unit, store the bitfield in it, then store
690 tempreg = copy_to_reg (op0);
691 store_bit_field (tempreg, bitsize, bitpos, fieldmode, orig_value);
692 emit_move_insn (op0, tempreg);
695 volatile_ok = save_volatile_ok;
697 /* Add OFFSET into OP0's address. */
699 xop0 = adjust_address (xop0, byte_mode, offset);
701 /* If xop0 is a register, we need it in MAXMODE
702 to make it acceptable to the format of insv. */
703 if (GET_CODE (xop0) == SUBREG)
704 /* We can't just change the mode, because this might clobber op0,
705 and we will need the original value of op0 if insv fails. */
706 xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
707 if (REG_P (xop0) && GET_MODE (xop0) != maxmode)
708 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
710 /* On big-endian machines, we count bits from the most significant.
711 If the bit field insn does not, we must invert. */
713 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
714 xbitpos = unit - bitsize - xbitpos;
716 /* We have been counting XBITPOS within UNIT.
717 Count instead within the size of the register. */
718 if (BITS_BIG_ENDIAN && !MEM_P (xop0))
719 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
721 unit = GET_MODE_BITSIZE (maxmode);
723 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
725 if (GET_MODE (value) != maxmode)
727 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
729 /* Optimization: Don't bother really extending VALUE
730 if it has all the bits we will actually use. However,
731 if we must narrow it, be sure we do it correctly. */
733 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode))
737 tmp = simplify_subreg (maxmode, value1, GET_MODE (value), 0);
739 tmp = simplify_gen_subreg (maxmode,
740 force_reg (GET_MODE (value),
742 GET_MODE (value), 0);
746 value1 = gen_lowpart (maxmode, value1);
748 else if (GET_CODE (value) == CONST_INT)
749 value1 = gen_int_mode (INTVAL (value), maxmode);
751 /* Parse phase is supposed to make VALUE's data type
752 match that of the component reference, which is a type
753 at least as wide as the field; so VALUE should have
754 a mode that corresponds to that type. */
755 gcc_assert (CONSTANT_P (value));
758 /* If this machine's insv insists on a register,
759 get VALUE1 into a register. */
760 if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate)
762 value1 = force_reg (maxmode, value1);
764 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
769 delete_insns_since (last);
770 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
775 /* Insv is not available; store using shifts and boolean ops. */
776 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
780 /* Use shifts and boolean operations to store VALUE
781 into a bit field of width BITSIZE
782 in a memory location specified by OP0 except offset by OFFSET bytes.
783 (OFFSET must be 0 if OP0 is a register.)
784 The field starts at position BITPOS within the byte.
785 (If OP0 is a register, it may be a full word or a narrower mode,
786 but BITPOS still counts within a full word,
787 which is significant on bigendian machines.) */
790 store_fixed_bit_field (rtx op0, unsigned HOST_WIDE_INT offset,
791 unsigned HOST_WIDE_INT bitsize,
792 unsigned HOST_WIDE_INT bitpos, rtx value)
794 enum machine_mode mode;
795 unsigned int total_bits = BITS_PER_WORD;
800 /* There is a case not handled here:
801 a structure with a known alignment of just a halfword
802 and a field split across two aligned halfwords within the structure.
803 Or likewise a structure with a known alignment of just a byte
804 and a field split across two bytes.
805 Such cases are not supposed to be able to occur. */
807 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
809 gcc_assert (!offset);
810 /* Special treatment for a bit field split across two registers. */
811 if (bitsize + bitpos > BITS_PER_WORD)
813 store_split_bit_field (op0, bitsize, bitpos, value);
819 /* Get the proper mode to use for this field. We want a mode that
820 includes the entire field. If such a mode would be larger than
821 a word, we won't be doing the extraction the normal way.
822 We don't want a mode bigger than the destination. */
824 mode = GET_MODE (op0);
825 if (GET_MODE_BITSIZE (mode) == 0
826 || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
828 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
829 MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
831 if (mode == VOIDmode)
833 /* The only way this should occur is if the field spans word
835 store_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT,
840 total_bits = GET_MODE_BITSIZE (mode);
842 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
843 be in the range 0 to total_bits-1, and put any excess bytes in
845 if (bitpos >= total_bits)
847 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
848 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
852 /* Get ref to an aligned byte, halfword, or word containing the field.
853 Adjust BITPOS to be position within a word,
854 and OFFSET to be the offset of that word.
855 Then alter OP0 to refer to that word. */
856 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
857 offset -= (offset % (total_bits / BITS_PER_UNIT));
858 op0 = adjust_address (op0, mode, offset);
861 mode = GET_MODE (op0);
863 /* Now MODE is either some integral mode for a MEM as OP0,
864 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
865 The bit field is contained entirely within OP0.
866 BITPOS is the starting bit number within OP0.
867 (OP0's mode may actually be narrower than MODE.) */
869 if (BYTES_BIG_ENDIAN)
870 /* BITPOS is the distance between our msb
871 and that of the containing datum.
872 Convert it to the distance from the lsb. */
873 bitpos = total_bits - bitsize - bitpos;
875 /* Now BITPOS is always the distance between our lsb
878 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
879 we must first convert its mode to MODE. */
881 if (GET_CODE (value) == CONST_INT)
883 HOST_WIDE_INT v = INTVAL (value);
885 if (bitsize < HOST_BITS_PER_WIDE_INT)
886 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
890 else if ((bitsize < HOST_BITS_PER_WIDE_INT
891 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
892 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
895 value = lshift_value (mode, value, bitpos, bitsize);
899 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
900 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
902 if (GET_MODE (value) != mode)
904 if ((REG_P (value) || GET_CODE (value) == SUBREG)
905 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
906 value = gen_lowpart (mode, value);
908 value = convert_to_mode (mode, value, 1);
912 value = expand_binop (mode, and_optab, value,
913 mask_rtx (mode, 0, bitsize, 0),
914 NULL_RTX, 1, OPTAB_LIB_WIDEN);
916 value = expand_shift (LSHIFT_EXPR, mode, value,
917 build_int_cst (NULL_TREE, bitpos), NULL_RTX, 1);
920 /* Now clear the chosen bits in OP0,
921 except that if VALUE is -1 we need not bother. */
922 /* We keep the intermediates in registers to allow CSE to combine
923 consecutive bitfield assignments. */
925 temp = force_reg (mode, op0);
929 temp = expand_binop (mode, and_optab, temp,
930 mask_rtx (mode, bitpos, bitsize, 1),
931 NULL_RTX, 1, OPTAB_LIB_WIDEN);
932 temp = force_reg (mode, temp);
935 /* Now logical-or VALUE into OP0, unless it is zero. */
939 temp = expand_binop (mode, ior_optab, temp, value,
940 NULL_RTX, 1, OPTAB_LIB_WIDEN);
941 temp = force_reg (mode, temp);
945 emit_move_insn (op0, temp);
948 /* Store a bit field that is split across multiple accessible memory objects.
950 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
951 BITSIZE is the field width; BITPOS the position of its first bit
953 VALUE is the value to store.
955 This does not yet handle fields wider than BITS_PER_WORD. */
958 store_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
959 unsigned HOST_WIDE_INT bitpos, rtx value)
962 unsigned int bitsdone = 0;
964 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
966 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
967 unit = BITS_PER_WORD;
969 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
971 /* If VALUE is a constant other than a CONST_INT, get it into a register in
972 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
973 that VALUE might be a floating-point constant. */
974 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
976 rtx word = gen_lowpart_common (word_mode, value);
978 if (word && (value != word))
981 value = gen_lowpart_common (word_mode,
982 force_reg (GET_MODE (value) != VOIDmode
984 : word_mode, value));
987 while (bitsdone < bitsize)
989 unsigned HOST_WIDE_INT thissize;
991 unsigned HOST_WIDE_INT thispos;
992 unsigned HOST_WIDE_INT offset;
994 offset = (bitpos + bitsdone) / unit;
995 thispos = (bitpos + bitsdone) % unit;
997 /* THISSIZE must not overrun a word boundary. Otherwise,
998 store_fixed_bit_field will call us again, and we will mutually
1000 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1001 thissize = MIN (thissize, unit - thispos);
1003 if (BYTES_BIG_ENDIAN)
1007 /* We must do an endian conversion exactly the same way as it is
1008 done in extract_bit_field, so that the two calls to
1009 extract_fixed_bit_field will have comparable arguments. */
1010 if (!MEM_P (value) || GET_MODE (value) == BLKmode)
1011 total_bits = BITS_PER_WORD;
1013 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
1015 /* Fetch successively less significant portions. */
1016 if (GET_CODE (value) == CONST_INT)
1017 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1018 >> (bitsize - bitsdone - thissize))
1019 & (((HOST_WIDE_INT) 1 << thissize) - 1));
1021 /* The args are chosen so that the last part includes the
1022 lsb. Give extract_bit_field the value it needs (with
1023 endianness compensation) to fetch the piece we want. */
1024 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
1025 total_bits - bitsize + bitsdone,
1030 /* Fetch successively more significant portions. */
1031 if (GET_CODE (value) == CONST_INT)
1032 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1034 & (((HOST_WIDE_INT) 1 << thissize) - 1));
1036 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
1037 bitsdone, NULL_RTX, 1);
1040 /* If OP0 is a register, then handle OFFSET here.
1042 When handling multiword bitfields, extract_bit_field may pass
1043 down a word_mode SUBREG of a larger REG for a bitfield that actually
1044 crosses a word boundary. Thus, for a SUBREG, we must find
1045 the current word starting from the base register. */
1046 if (GET_CODE (op0) == SUBREG)
1048 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1049 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1050 GET_MODE (SUBREG_REG (op0)));
1053 else if (REG_P (op0))
1055 word = operand_subword_force (op0, offset, GET_MODE (op0));
1061 /* OFFSET is in UNITs, and UNIT is in bits.
1062 store_fixed_bit_field wants offset in bytes. */
1063 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT, thissize,
1065 bitsdone += thissize;
1069 /* Generate code to extract a byte-field from STR_RTX
1070 containing BITSIZE bits, starting at BITNUM,
1071 and put it in TARGET if possible (if TARGET is nonzero).
1072 Regardless of TARGET, we return the rtx for where the value is placed.
1074 STR_RTX is the structure containing the byte (a REG or MEM).
1075 UNSIGNEDP is nonzero if this is an unsigned bit field.
1076 MODE is the natural mode of the field value once extracted.
1077 TMODE is the mode the caller would like the value to have;
1078 but the value may be returned with type MODE instead.
1080 TOTAL_SIZE is the size in bytes of the containing structure,
1083 If a TARGET is specified and we can store in it at no extra cost,
1084 we do so, and return TARGET.
1085 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1086 if they are equally easy. */
1089 extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1090 unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target,
1091 enum machine_mode mode, enum machine_mode tmode)
1094 = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD;
1095 unsigned HOST_WIDE_INT offset, bitpos;
1097 rtx spec_target = target;
1098 rtx spec_target_subreg = 0;
1099 enum machine_mode int_mode;
1100 enum machine_mode extv_mode = mode_for_extraction (EP_extv, 0);
1101 enum machine_mode extzv_mode = mode_for_extraction (EP_extzv, 0);
1102 enum machine_mode mode1;
1105 if (tmode == VOIDmode)
1108 while (GET_CODE (op0) == SUBREG)
1110 bitnum += SUBREG_BYTE (op0) * BITS_PER_UNIT;
1111 op0 = SUBREG_REG (op0);
1114 /* If we have an out-of-bounds access to a register, just return an
1115 uninitialized register of the required mode. This can occur if the
1116 source code contains an out-of-bounds access to a small array. */
1117 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
1118 return gen_reg_rtx (tmode);
1121 && mode == GET_MODE (op0)
1123 && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
1125 /* We're trying to extract a full register from itself. */
1129 /* Use vec_extract patterns for extracting parts of vectors whenever
1131 if (VECTOR_MODE_P (GET_MODE (op0))
1133 && (vec_extract_optab->handlers[GET_MODE (op0)].insn_code
1134 != CODE_FOR_nothing)
1135 && ((bitnum + bitsize - 1) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
1136 == bitnum / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
1138 enum machine_mode outermode = GET_MODE (op0);
1139 enum machine_mode innermode = GET_MODE_INNER (outermode);
1140 int icode = (int) vec_extract_optab->handlers[outermode].insn_code;
1141 unsigned HOST_WIDE_INT pos = bitnum / GET_MODE_BITSIZE (innermode);
1142 rtx rtxpos = GEN_INT (pos);
1144 rtx dest = NULL, pat, seq;
1145 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
1146 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
1147 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
1149 if (innermode == tmode || innermode == mode)
1153 dest = gen_reg_rtx (innermode);
1157 if (! (*insn_data[icode].operand[0].predicate) (dest, mode0))
1158 dest = copy_to_mode_reg (mode0, dest);
1160 if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
1161 src = copy_to_mode_reg (mode1, src);
1163 if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
1164 rtxpos = copy_to_mode_reg (mode1, rtxpos);
1166 /* We could handle this, but we should always be called with a pseudo
1167 for our targets and all insns should take them as outputs. */
1168 gcc_assert ((*insn_data[icode].operand[0].predicate) (dest, mode0)
1169 && (*insn_data[icode].operand[1].predicate) (src, mode1)
1170 && (*insn_data[icode].operand[2].predicate) (rtxpos, mode2));
1172 pat = GEN_FCN (icode) (dest, src, rtxpos);
1183 /* Make sure we are playing with integral modes. Pun with subregs
1186 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1187 if (imode != GET_MODE (op0))
1190 op0 = adjust_address (op0, imode, 0);
1193 gcc_assert (imode != BLKmode);
1194 op0 = gen_lowpart (imode, op0);
1196 /* If we got a SUBREG, force it into a register since we
1197 aren't going to be able to do another SUBREG on it. */
1198 if (GET_CODE (op0) == SUBREG)
1199 op0 = force_reg (imode, op0);
1204 /* We may be accessing data outside the field, which means
1205 we can alias adjacent data. */
1208 op0 = shallow_copy_rtx (op0);
1209 set_mem_alias_set (op0, 0);
1210 set_mem_expr (op0, 0);
1213 /* Extraction of a full-word or multi-word value from a structure
1214 in a register or aligned memory can be done with just a SUBREG.
1215 A subword value in the least significant part of a register
1216 can also be extracted with a SUBREG. For this, we need the
1217 byte offset of the value in op0. */
1219 bitpos = bitnum % unit;
1220 offset = bitnum / unit;
1221 byte_offset = bitpos / BITS_PER_UNIT + offset * UNITS_PER_WORD;
1223 /* If OP0 is a register, BITPOS must count within a word.
1224 But as we have it, it counts within whatever size OP0 now has.
1225 On a bigendian machine, these are not the same, so convert. */
1226 if (BYTES_BIG_ENDIAN
1228 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1229 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1231 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1232 If that's wrong, the solution is to test for it and set TARGET to 0
1235 /* Only scalar integer modes can be converted via subregs. There is an
1236 additional problem for FP modes here in that they can have a precision
1237 which is different from the size. mode_for_size uses precision, but
1238 we want a mode based on the size, so we must avoid calling it for FP
1240 mode1 = (SCALAR_INT_MODE_P (tmode)
1241 ? mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0)
1244 if (((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1245 && bitpos % BITS_PER_WORD == 0)
1246 || (mode1 != BLKmode
1247 /* ??? The big endian test here is wrong. This is correct
1248 if the value is in a register, and if mode_for_size is not
1249 the same mode as op0. This causes us to get unnecessarily
1250 inefficient code from the Thumb port when -mbig-endian. */
1251 && (BYTES_BIG_ENDIAN
1252 ? bitpos + bitsize == BITS_PER_WORD
1255 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1256 GET_MODE_BITSIZE (GET_MODE (op0)))
1257 && GET_MODE_SIZE (mode1) != 0
1258 && byte_offset % GET_MODE_SIZE (mode1) == 0)
1260 && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
1261 || (offset * BITS_PER_UNIT % bitsize == 0
1262 && MEM_ALIGN (op0) % bitsize == 0)))))
1264 if (mode1 != GET_MODE (op0))
1267 op0 = adjust_address (op0, mode1, offset);
1270 rtx sub = simplify_gen_subreg (mode1, op0, GET_MODE (op0),
1273 goto no_subreg_mode_swap;
1278 return convert_to_mode (tmode, op0, unsignedp);
1281 no_subreg_mode_swap:
1283 /* Handle fields bigger than a word. */
1285 if (bitsize > BITS_PER_WORD)
1287 /* Here we transfer the words of the field
1288 in the order least significant first.
1289 This is because the most significant word is the one which may
1290 be less than full. */
1292 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1295 if (target == 0 || !REG_P (target))
1296 target = gen_reg_rtx (mode);
1298 /* Indicate for flow that the entire target reg is being set. */
1299 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
1301 for (i = 0; i < nwords; i++)
1303 /* If I is 0, use the low-order word in both field and target;
1304 if I is 1, use the next to lowest word; and so on. */
1305 /* Word number in TARGET to use. */
1306 unsigned int wordnum
1308 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1310 /* Offset from start of field in OP0. */
1311 unsigned int bit_offset = (WORDS_BIG_ENDIAN
1312 ? MAX (0, ((int) bitsize - ((int) i + 1)
1313 * (int) BITS_PER_WORD))
1314 : (int) i * BITS_PER_WORD);
1315 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1317 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1318 bitsize - i * BITS_PER_WORD),
1319 bitnum + bit_offset, 1, target_part, mode,
1322 gcc_assert (target_part);
1324 if (result_part != target_part)
1325 emit_move_insn (target_part, result_part);
1330 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1331 need to be zero'd out. */
1332 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1334 unsigned int i, total_words;
1336 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1337 for (i = nwords; i < total_words; i++)
1339 (operand_subword (target,
1340 WORDS_BIG_ENDIAN ? total_words - i - 1 : i,
1347 /* Signed bit field: sign-extend with two arithmetic shifts. */
1348 target = expand_shift (LSHIFT_EXPR, mode, target,
1349 build_int_cst (NULL_TREE,
1350 GET_MODE_BITSIZE (mode) - bitsize),
1352 return expand_shift (RSHIFT_EXPR, mode, target,
1353 build_int_cst (NULL_TREE,
1354 GET_MODE_BITSIZE (mode) - bitsize),
1358 /* From here on we know the desired field is smaller than a word. */
1360 /* Check if there is a correspondingly-sized integer field, so we can
1361 safely extract it as one size of integer, if necessary; then
1362 truncate or extend to the size that is wanted; then use SUBREGs or
1363 convert_to_mode to get one of the modes we really wanted. */
1365 int_mode = int_mode_for_mode (tmode);
1366 if (int_mode == BLKmode)
1367 int_mode = int_mode_for_mode (mode);
1368 /* Should probably push op0 out to memory and then do a load. */
1369 gcc_assert (int_mode != BLKmode);
1371 /* OFFSET is the number of words or bytes (UNIT says which)
1372 from STR_RTX to the first word or byte containing part of the field. */
1376 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1379 op0 = copy_to_reg (op0);
1380 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1381 op0, (offset * UNITS_PER_WORD));
1386 /* Now OFFSET is nonzero only for memory operands. */
1392 && GET_MODE_BITSIZE (extzv_mode) >= bitsize
1393 && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
1394 && (bitsize + bitpos > GET_MODE_BITSIZE (extzv_mode))))
1396 unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset;
1397 rtx bitsize_rtx, bitpos_rtx;
1398 rtx last = get_last_insn ();
1400 rtx xtarget = target;
1401 rtx xspec_target = spec_target;
1402 rtx xspec_target_subreg = spec_target_subreg;
1404 enum machine_mode maxmode = mode_for_extraction (EP_extzv, 0);
1408 int save_volatile_ok = volatile_ok;
1411 /* Is the memory operand acceptable? */
1412 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[1].predicate)
1413 (xop0, GET_MODE (xop0))))
1415 /* No, load into a reg and extract from there. */
1416 enum machine_mode bestmode;
1418 /* Get the mode to use for inserting into this field. If
1419 OP0 is BLKmode, get the smallest mode consistent with the
1420 alignment. If OP0 is a non-BLKmode object that is no
1421 wider than MAXMODE, use its mode. Otherwise, use the
1422 smallest mode containing the field. */
1424 if (GET_MODE (xop0) == BLKmode
1425 || (GET_MODE_SIZE (GET_MODE (op0))
1426 > GET_MODE_SIZE (maxmode)))
1427 bestmode = get_best_mode (bitsize, bitnum,
1428 MEM_ALIGN (xop0), maxmode,
1429 MEM_VOLATILE_P (xop0));
1431 bestmode = GET_MODE (xop0);
1433 if (bestmode == VOIDmode
1434 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1435 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
1438 /* Compute offset as multiple of this unit,
1439 counting in bytes. */
1440 unit = GET_MODE_BITSIZE (bestmode);
1441 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1442 xbitpos = bitnum % unit;
1443 xop0 = adjust_address (xop0, bestmode, xoffset);
1445 /* Make sure register is big enough for the whole field. */
1446 if (xoffset * BITS_PER_UNIT + unit
1447 < offset * BITS_PER_UNIT + bitsize)
1450 /* Fetch it to a register in that size. */
1451 xop0 = force_reg (bestmode, xop0);
1453 /* XBITPOS counts within UNIT, which is what is expected. */
1456 /* Get ref to first byte containing part of the field. */
1457 xop0 = adjust_address (xop0, byte_mode, xoffset);
1459 volatile_ok = save_volatile_ok;
1462 /* If op0 is a register, we need it in MAXMODE (which is usually
1463 SImode). to make it acceptable to the format of extzv. */
1464 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1466 if (REG_P (xop0) && GET_MODE (xop0) != maxmode)
1467 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1469 /* On big-endian machines, we count bits from the most significant.
1470 If the bit field insn does not, we must invert. */
1471 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1472 xbitpos = unit - bitsize - xbitpos;
1474 /* Now convert from counting within UNIT to counting in MAXMODE. */
1475 if (BITS_BIG_ENDIAN && !MEM_P (xop0))
1476 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
1478 unit = GET_MODE_BITSIZE (maxmode);
1481 xtarget = xspec_target = gen_reg_rtx (tmode);
1483 if (GET_MODE (xtarget) != maxmode)
1485 if (REG_P (xtarget))
1487 int wider = (GET_MODE_SIZE (maxmode)
1488 > GET_MODE_SIZE (GET_MODE (xtarget)));
1489 xtarget = gen_lowpart (maxmode, xtarget);
1491 xspec_target_subreg = xtarget;
1494 xtarget = gen_reg_rtx (maxmode);
1497 /* If this machine's extzv insists on a register target,
1498 make sure we have one. */
1499 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[0].predicate)
1500 (xtarget, maxmode)))
1501 xtarget = gen_reg_rtx (maxmode);
1503 bitsize_rtx = GEN_INT (bitsize);
1504 bitpos_rtx = GEN_INT (xbitpos);
1506 pat = gen_extzv (xtarget, xop0, bitsize_rtx, bitpos_rtx);
1511 spec_target = xspec_target;
1512 spec_target_subreg = xspec_target_subreg;
1516 delete_insns_since (last);
1517 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1523 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1530 && GET_MODE_BITSIZE (extv_mode) >= bitsize
1531 && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
1532 && (bitsize + bitpos > GET_MODE_BITSIZE (extv_mode))))
1534 int xbitpos = bitpos, xoffset = offset;
1535 rtx bitsize_rtx, bitpos_rtx;
1536 rtx last = get_last_insn ();
1537 rtx xop0 = op0, xtarget = target;
1538 rtx xspec_target = spec_target;
1539 rtx xspec_target_subreg = spec_target_subreg;
1541 enum machine_mode maxmode = mode_for_extraction (EP_extv, 0);
1545 /* Is the memory operand acceptable? */
1546 if (! ((*insn_data[(int) CODE_FOR_extv].operand[1].predicate)
1547 (xop0, GET_MODE (xop0))))
1549 /* No, load into a reg and extract from there. */
1550 enum machine_mode bestmode;
1552 /* Get the mode to use for inserting into this field. If
1553 OP0 is BLKmode, get the smallest mode consistent with the
1554 alignment. If OP0 is a non-BLKmode object that is no
1555 wider than MAXMODE, use its mode. Otherwise, use the
1556 smallest mode containing the field. */
1558 if (GET_MODE (xop0) == BLKmode
1559 || (GET_MODE_SIZE (GET_MODE (op0))
1560 > GET_MODE_SIZE (maxmode)))
1561 bestmode = get_best_mode (bitsize, bitnum,
1562 MEM_ALIGN (xop0), maxmode,
1563 MEM_VOLATILE_P (xop0));
1565 bestmode = GET_MODE (xop0);
1567 if (bestmode == VOIDmode
1568 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1569 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
1572 /* Compute offset as multiple of this unit,
1573 counting in bytes. */
1574 unit = GET_MODE_BITSIZE (bestmode);
1575 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1576 xbitpos = bitnum % unit;
1577 xop0 = adjust_address (xop0, bestmode, xoffset);
1579 /* Make sure register is big enough for the whole field. */
1580 if (xoffset * BITS_PER_UNIT + unit
1581 < offset * BITS_PER_UNIT + bitsize)
1584 /* Fetch it to a register in that size. */
1585 xop0 = force_reg (bestmode, xop0);
1587 /* XBITPOS counts within UNIT, which is what is expected. */
1590 /* Get ref to first byte containing part of the field. */
1591 xop0 = adjust_address (xop0, byte_mode, xoffset);
1594 /* If op0 is a register, we need it in MAXMODE (which is usually
1595 SImode) to make it acceptable to the format of extv. */
1596 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1598 if (REG_P (xop0) && GET_MODE (xop0) != maxmode)
1599 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1601 /* On big-endian machines, we count bits from the most significant.
1602 If the bit field insn does not, we must invert. */
1603 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1604 xbitpos = unit - bitsize - xbitpos;
1606 /* XBITPOS counts within a size of UNIT.
1607 Adjust to count within a size of MAXMODE. */
1608 if (BITS_BIG_ENDIAN && !MEM_P (xop0))
1609 xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
1611 unit = GET_MODE_BITSIZE (maxmode);
1614 xtarget = xspec_target = gen_reg_rtx (tmode);
1616 if (GET_MODE (xtarget) != maxmode)
1618 if (REG_P (xtarget))
1620 int wider = (GET_MODE_SIZE (maxmode)
1621 > GET_MODE_SIZE (GET_MODE (xtarget)));
1622 xtarget = gen_lowpart (maxmode, xtarget);
1624 xspec_target_subreg = xtarget;
1627 xtarget = gen_reg_rtx (maxmode);
1630 /* If this machine's extv insists on a register target,
1631 make sure we have one. */
1632 if (! ((*insn_data[(int) CODE_FOR_extv].operand[0].predicate)
1633 (xtarget, maxmode)))
1634 xtarget = gen_reg_rtx (maxmode);
1636 bitsize_rtx = GEN_INT (bitsize);
1637 bitpos_rtx = GEN_INT (xbitpos);
1639 pat = gen_extv (xtarget, xop0, bitsize_rtx, bitpos_rtx);
1644 spec_target = xspec_target;
1645 spec_target_subreg = xspec_target_subreg;
1649 delete_insns_since (last);
1650 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1656 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1659 if (target == spec_target)
1661 if (target == spec_target_subreg)
1663 if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
1665 /* If the target mode is not a scalar integral, first convert to the
1666 integer mode of that size and then access it as a floating-point
1667 value via a SUBREG. */
1668 if (!SCALAR_INT_MODE_P (tmode))
1670 enum machine_mode smode
1671 = mode_for_size (GET_MODE_BITSIZE (tmode), MODE_INT, 0);
1672 target = convert_to_mode (smode, target, unsignedp);
1673 target = force_reg (smode, target);
1674 return gen_lowpart (tmode, target);
1677 return convert_to_mode (tmode, target, unsignedp);
1682 /* Extract a bit field using shifts and boolean operations
1683 Returns an rtx to represent the value.
1684 OP0 addresses a register (word) or memory (byte).
1685 BITPOS says which bit within the word or byte the bit field starts in.
1686 OFFSET says how many bytes farther the bit field starts;
1687 it is 0 if OP0 is a register.
1688 BITSIZE says how many bits long the bit field is.
1689 (If OP0 is a register, it may be narrower than a full word,
1690 but BITPOS still counts within a full word,
1691 which is significant on bigendian machines.)
1693 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1694 If TARGET is nonzero, attempts to store the value there
1695 and return TARGET, but this is not guaranteed.
1696 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1699 extract_fixed_bit_field (enum machine_mode tmode, rtx op0,
1700 unsigned HOST_WIDE_INT offset,
1701 unsigned HOST_WIDE_INT bitsize,
1702 unsigned HOST_WIDE_INT bitpos, rtx target,
1705 unsigned int total_bits = BITS_PER_WORD;
1706 enum machine_mode mode;
1708 if (GET_CODE (op0) == SUBREG || REG_P (op0))
1710 /* Special treatment for a bit field split across two registers. */
1711 if (bitsize + bitpos > BITS_PER_WORD)
1712 return extract_split_bit_field (op0, bitsize, bitpos, unsignedp);
1716 /* Get the proper mode to use for this field. We want a mode that
1717 includes the entire field. If such a mode would be larger than
1718 a word, we won't be doing the extraction the normal way. */
1720 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1721 MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
1723 if (mode == VOIDmode)
1724 /* The only way this should occur is if the field spans word
1726 return extract_split_bit_field (op0, bitsize,
1727 bitpos + offset * BITS_PER_UNIT,
1730 total_bits = GET_MODE_BITSIZE (mode);
1732 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1733 be in the range 0 to total_bits-1, and put any excess bytes in
1735 if (bitpos >= total_bits)
1737 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1738 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1742 /* Get ref to an aligned byte, halfword, or word containing the field.
1743 Adjust BITPOS to be position within a word,
1744 and OFFSET to be the offset of that word.
1745 Then alter OP0 to refer to that word. */
1746 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1747 offset -= (offset % (total_bits / BITS_PER_UNIT));
1748 op0 = adjust_address (op0, mode, offset);
1751 mode = GET_MODE (op0);
1753 if (BYTES_BIG_ENDIAN)
1754 /* BITPOS is the distance between our msb and that of OP0.
1755 Convert it to the distance from the lsb. */
1756 bitpos = total_bits - bitsize - bitpos;
1758 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1759 We have reduced the big-endian case to the little-endian case. */
1765 /* If the field does not already start at the lsb,
1766 shift it so it does. */
1767 tree amount = build_int_cst (NULL_TREE, bitpos);
1768 /* Maybe propagate the target for the shift. */
1769 /* But not if we will return it--could confuse integrate.c. */
1770 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1771 if (tmode != mode) subtarget = 0;
1772 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1774 /* Convert the value to the desired mode. */
1776 op0 = convert_to_mode (tmode, op0, 1);
1778 /* Unless the msb of the field used to be the msb when we shifted,
1779 mask out the upper bits. */
1781 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize)
1782 return expand_binop (GET_MODE (op0), and_optab, op0,
1783 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1784 target, 1, OPTAB_LIB_WIDEN);
1788 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1789 then arithmetic-shift its lsb to the lsb of the word. */
1790 op0 = force_reg (mode, op0);
1794 /* Find the narrowest integer mode that contains the field. */
1796 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1797 mode = GET_MODE_WIDER_MODE (mode))
1798 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1800 op0 = convert_to_mode (mode, op0, 0);
1804 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1807 = build_int_cst (NULL_TREE,
1808 GET_MODE_BITSIZE (mode) - (bitsize + bitpos));
1809 /* Maybe propagate the target for the shift. */
1810 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1811 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1814 return expand_shift (RSHIFT_EXPR, mode, op0,
1815 build_int_cst (NULL_TREE,
1816 GET_MODE_BITSIZE (mode) - bitsize),
1820 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1821 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1822 complement of that if COMPLEMENT. The mask is truncated if
1823 necessary to the width of mode MODE. The mask is zero-extended if
1824 BITSIZE+BITPOS is too small for MODE. */
1827 mask_rtx (enum machine_mode mode, int bitpos, int bitsize, int complement)
1829 HOST_WIDE_INT masklow, maskhigh;
1833 else if (bitpos < HOST_BITS_PER_WIDE_INT)
1834 masklow = (HOST_WIDE_INT) -1 << bitpos;
1838 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1839 masklow &= ((unsigned HOST_WIDE_INT) -1
1840 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1842 if (bitpos <= HOST_BITS_PER_WIDE_INT)
1845 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1849 else if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1850 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1851 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1857 maskhigh = ~maskhigh;
1861 return immed_double_const (masklow, maskhigh, mode);
1864 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1865 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1868 lshift_value (enum machine_mode mode, rtx value, int bitpos, int bitsize)
1870 unsigned HOST_WIDE_INT v = INTVAL (value);
1871 HOST_WIDE_INT low, high;
1873 if (bitsize < HOST_BITS_PER_WIDE_INT)
1874 v &= ~((HOST_WIDE_INT) -1 << bitsize);
1876 if (bitpos < HOST_BITS_PER_WIDE_INT)
1879 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1884 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1887 return immed_double_const (low, high, mode);
1890 /* Extract a bit field from a memory by forcing the alignment of the
1891 memory. This efficient only if the field spans at least 4 boundaries.
1894 BITSIZE is the field width; BITPOS is the position of the first bit.
1895 UNSIGNEDP is true if the result should be zero-extended. */
1898 extract_force_align_mem_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1899 unsigned HOST_WIDE_INT bitpos,
1902 enum machine_mode mode, dmode;
1903 unsigned int m_bitsize, m_size;
1904 unsigned int sign_shift_up, sign_shift_dn;
1905 rtx base, a1, a2, v1, v2, comb, shift, result, start;
1907 /* Choose a mode that will fit BITSIZE. */
1908 mode = smallest_mode_for_size (bitsize, MODE_INT);
1909 m_size = GET_MODE_SIZE (mode);
1910 m_bitsize = GET_MODE_BITSIZE (mode);
1912 /* Choose a mode twice as wide. Fail if no such mode exists. */
1913 dmode = mode_for_size (m_bitsize * 2, MODE_INT, false);
1914 if (dmode == BLKmode)
1917 do_pending_stack_adjust ();
1918 start = get_last_insn ();
1920 /* At the end, we'll need an additional shift to deal with sign/zero
1921 extension. By default this will be a left+right shift of the
1922 appropriate size. But we may be able to eliminate one of them. */
1923 sign_shift_up = sign_shift_dn = m_bitsize - bitsize;
1925 if (STRICT_ALIGNMENT)
1927 base = plus_constant (XEXP (op0, 0), bitpos / BITS_PER_UNIT);
1928 bitpos %= BITS_PER_UNIT;
1930 /* We load two values to be concatenate. There's an edge condition
1931 that bears notice -- an aligned value at the end of a page can
1932 only load one value lest we segfault. So the two values we load
1933 are at "base & -size" and "(base + size - 1) & -size". If base
1934 is unaligned, the addresses will be aligned and sequential; if
1935 base is aligned, the addresses will both be equal to base. */
1937 a1 = expand_simple_binop (Pmode, AND, force_operand (base, NULL),
1938 GEN_INT (-(HOST_WIDE_INT)m_size),
1939 NULL, true, OPTAB_LIB_WIDEN);
1940 mark_reg_pointer (a1, m_bitsize);
1941 v1 = gen_rtx_MEM (mode, a1);
1942 set_mem_align (v1, m_bitsize);
1943 v1 = force_reg (mode, validize_mem (v1));
1945 a2 = plus_constant (base, GET_MODE_SIZE (mode) - 1);
1946 a2 = expand_simple_binop (Pmode, AND, force_operand (a2, NULL),
1947 GEN_INT (-(HOST_WIDE_INT)m_size),
1948 NULL, true, OPTAB_LIB_WIDEN);
1949 v2 = gen_rtx_MEM (mode, a2);
1950 set_mem_align (v2, m_bitsize);
1951 v2 = force_reg (mode, validize_mem (v2));
1953 /* Combine these two values into a double-word value. */
1954 if (m_bitsize == BITS_PER_WORD)
1956 comb = gen_reg_rtx (dmode);
1957 emit_insn (gen_rtx_CLOBBER (VOIDmode, comb));
1958 emit_move_insn (gen_rtx_SUBREG (mode, comb, 0), v1);
1959 emit_move_insn (gen_rtx_SUBREG (mode, comb, m_size), v2);
1963 if (BYTES_BIG_ENDIAN)
1964 comb = v1, v1 = v2, v2 = comb;
1965 v1 = convert_modes (dmode, mode, v1, true);
1968 v2 = convert_modes (dmode, mode, v2, true);
1969 v2 = expand_simple_binop (dmode, ASHIFT, v2, GEN_INT (m_bitsize),
1970 NULL, true, OPTAB_LIB_WIDEN);
1973 comb = expand_simple_binop (dmode, IOR, v1, v2, NULL,
1974 true, OPTAB_LIB_WIDEN);
1979 shift = expand_simple_binop (Pmode, AND, base, GEN_INT (m_size - 1),
1980 NULL, true, OPTAB_LIB_WIDEN);
1981 shift = expand_mult (Pmode, shift, GEN_INT (BITS_PER_UNIT), NULL, 1);
1985 if (sign_shift_up <= bitpos)
1986 bitpos -= sign_shift_up, sign_shift_up = 0;
1987 shift = expand_simple_binop (Pmode, PLUS, shift, GEN_INT (bitpos),
1988 NULL, true, OPTAB_LIB_WIDEN);
1993 unsigned HOST_WIDE_INT offset = bitpos / BITS_PER_UNIT;
1994 bitpos %= BITS_PER_UNIT;
1996 /* When strict alignment is not required, we can just load directly
1997 from memory without masking. If the remaining BITPOS offset is
1998 small enough, we may be able to do all operations in MODE as
1999 opposed to DMODE. */
2000 if (bitpos + bitsize <= m_bitsize)
2002 comb = adjust_address (op0, dmode, offset);
2004 if (sign_shift_up <= bitpos)
2005 bitpos -= sign_shift_up, sign_shift_up = 0;
2006 shift = GEN_INT (bitpos);
2009 /* Shift down the double-word such that the requested value is at bit 0. */
2010 if (shift != const0_rtx)
2011 comb = expand_simple_binop (dmode, unsignedp ? LSHIFTRT : ASHIFTRT,
2012 comb, shift, NULL, unsignedp, OPTAB_LIB_WIDEN);
2016 /* If the field exactly matches MODE, then all we need to do is return the
2017 lowpart. Otherwise, shift to get the sign bits set properly. */
2018 result = force_reg (mode, gen_lowpart (mode, comb));
2021 result = expand_simple_binop (mode, ASHIFT, result,
2022 GEN_INT (sign_shift_up),
2023 NULL_RTX, 0, OPTAB_LIB_WIDEN);
2025 result = expand_simple_binop (mode, unsignedp ? LSHIFTRT : ASHIFTRT,
2026 result, GEN_INT (sign_shift_dn),
2027 NULL_RTX, 0, OPTAB_LIB_WIDEN);
2032 delete_insns_since (start);
2036 /* Extract a bit field that is split across two words
2037 and return an RTX for the result.
2039 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
2040 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
2041 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
2044 extract_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
2045 unsigned HOST_WIDE_INT bitpos, int unsignedp)
2048 unsigned int bitsdone = 0;
2049 rtx result = NULL_RTX;
2052 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
2054 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
2055 unit = BITS_PER_WORD;
2058 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
2059 if (0 && bitsize / unit > 2)
2061 rtx tmp = extract_force_align_mem_bit_field (op0, bitsize, bitpos,
2068 while (bitsdone < bitsize)
2070 unsigned HOST_WIDE_INT thissize;
2072 unsigned HOST_WIDE_INT thispos;
2073 unsigned HOST_WIDE_INT offset;
2075 offset = (bitpos + bitsdone) / unit;
2076 thispos = (bitpos + bitsdone) % unit;
2078 /* THISSIZE must not overrun a word boundary. Otherwise,
2079 extract_fixed_bit_field will call us again, and we will mutually
2081 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
2082 thissize = MIN (thissize, unit - thispos);
2084 /* If OP0 is a register, then handle OFFSET here.
2086 When handling multiword bitfields, extract_bit_field may pass
2087 down a word_mode SUBREG of a larger REG for a bitfield that actually
2088 crosses a word boundary. Thus, for a SUBREG, we must find
2089 the current word starting from the base register. */
2090 if (GET_CODE (op0) == SUBREG)
2092 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
2093 word = operand_subword_force (SUBREG_REG (op0), word_offset,
2094 GET_MODE (SUBREG_REG (op0)));
2097 else if (REG_P (op0))
2099 word = operand_subword_force (op0, offset, GET_MODE (op0));
2105 /* Extract the parts in bit-counting order,
2106 whose meaning is determined by BYTES_PER_UNIT.
2107 OFFSET is in UNITs, and UNIT is in bits.
2108 extract_fixed_bit_field wants offset in bytes. */
2109 part = extract_fixed_bit_field (word_mode, word,
2110 offset * unit / BITS_PER_UNIT,
2111 thissize, thispos, 0, 1);
2112 bitsdone += thissize;
2114 /* Shift this part into place for the result. */
2115 if (BYTES_BIG_ENDIAN)
2117 if (bitsize != bitsdone)
2118 part = expand_shift (LSHIFT_EXPR, word_mode, part,
2119 build_int_cst (NULL_TREE, bitsize - bitsdone),
2124 if (bitsdone != thissize)
2125 part = expand_shift (LSHIFT_EXPR, word_mode, part,
2126 build_int_cst (NULL_TREE,
2127 bitsdone - thissize), 0, 1);
2133 /* Combine the parts with bitwise or. This works
2134 because we extracted each part as an unsigned bit field. */
2135 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
2141 /* Unsigned bit field: we are done. */
2144 /* Signed bit field: sign-extend with two arithmetic shifts. */
2145 result = expand_shift (LSHIFT_EXPR, word_mode, result,
2146 build_int_cst (NULL_TREE, BITS_PER_WORD - bitsize),
2148 return expand_shift (RSHIFT_EXPR, word_mode, result,
2149 build_int_cst (NULL_TREE, BITS_PER_WORD - bitsize),
2153 /* Add INC into TARGET. */
2156 expand_inc (rtx target, rtx inc)
2158 rtx value = expand_binop (GET_MODE (target), add_optab,
2160 target, 0, OPTAB_LIB_WIDEN);
2161 if (value != target)
2162 emit_move_insn (target, value);
2165 /* Subtract DEC from TARGET. */
2168 expand_dec (rtx target, rtx dec)
2170 rtx value = expand_binop (GET_MODE (target), sub_optab,
2172 target, 0, OPTAB_LIB_WIDEN);
2173 if (value != target)
2174 emit_move_insn (target, value);
2177 /* Output a shift instruction for expression code CODE,
2178 with SHIFTED being the rtx for the value to shift,
2179 and AMOUNT the tree for the amount to shift by.
2180 Store the result in the rtx TARGET, if that is convenient.
2181 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2182 Return the rtx for where the value is. */
2185 expand_shift (enum tree_code code, enum machine_mode mode, rtx shifted,
2186 tree amount, rtx target, int unsignedp)
2189 int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
2190 int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
2193 /* Previously detected shift-counts computed by NEGATE_EXPR
2194 and shifted in the other direction; but that does not work
2197 op1 = expand_normal (amount);
2199 if (SHIFT_COUNT_TRUNCATED)
2201 if (GET_CODE (op1) == CONST_INT
2202 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
2203 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
2204 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
2205 % GET_MODE_BITSIZE (mode));
2206 else if (GET_CODE (op1) == SUBREG
2207 && subreg_lowpart_p (op1))
2208 op1 = SUBREG_REG (op1);
2211 if (op1 == const0_rtx)
2214 /* Check whether its cheaper to implement a left shift by a constant
2215 bit count by a sequence of additions. */
2216 if (code == LSHIFT_EXPR
2217 && GET_CODE (op1) == CONST_INT
2219 && INTVAL (op1) < GET_MODE_BITSIZE (mode)
2220 && shift_cost[mode][INTVAL (op1)] > INTVAL (op1) * add_cost[mode])
2223 for (i = 0; i < INTVAL (op1); i++)
2225 temp = force_reg (mode, shifted);
2226 shifted = expand_binop (mode, add_optab, temp, temp, NULL_RTX,
2227 unsignedp, OPTAB_LIB_WIDEN);
2232 for (try = 0; temp == 0 && try < 3; try++)
2234 enum optab_methods methods;
2237 methods = OPTAB_DIRECT;
2239 methods = OPTAB_WIDEN;
2241 methods = OPTAB_LIB_WIDEN;
2245 /* Widening does not work for rotation. */
2246 if (methods == OPTAB_WIDEN)
2248 else if (methods == OPTAB_LIB_WIDEN)
2250 /* If we have been unable to open-code this by a rotation,
2251 do it as the IOR of two shifts. I.e., to rotate A
2252 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
2253 where C is the bitsize of A.
2255 It is theoretically possible that the target machine might
2256 not be able to perform either shift and hence we would
2257 be making two libcalls rather than just the one for the
2258 shift (similarly if IOR could not be done). We will allow
2259 this extremely unlikely lossage to avoid complicating the
2262 rtx subtarget = target == shifted ? 0 : target;
2264 tree type = TREE_TYPE (amount);
2265 tree new_amount = make_tree (type, op1);
2267 = fold_build2 (MINUS_EXPR, type,
2268 build_int_cst (type, GET_MODE_BITSIZE (mode)),
2271 shifted = force_reg (mode, shifted);
2273 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
2274 mode, shifted, new_amount, 0, 1);
2275 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
2276 mode, shifted, other_amount, subtarget, 1);
2277 return expand_binop (mode, ior_optab, temp, temp1, target,
2278 unsignedp, methods);
2281 temp = expand_binop (mode,
2282 left ? rotl_optab : rotr_optab,
2283 shifted, op1, target, unsignedp, methods);
2286 temp = expand_binop (mode,
2287 left ? ashl_optab : lshr_optab,
2288 shifted, op1, target, unsignedp, methods);
2290 /* Do arithmetic shifts.
2291 Also, if we are going to widen the operand, we can just as well
2292 use an arithmetic right-shift instead of a logical one. */
2293 if (temp == 0 && ! rotate
2294 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2296 enum optab_methods methods1 = methods;
2298 /* If trying to widen a log shift to an arithmetic shift,
2299 don't accept an arithmetic shift of the same size. */
2301 methods1 = OPTAB_MUST_WIDEN;
2303 /* Arithmetic shift */
2305 temp = expand_binop (mode,
2306 left ? ashl_optab : ashr_optab,
2307 shifted, op1, target, unsignedp, methods1);
2310 /* We used to try extzv here for logical right shifts, but that was
2311 only useful for one machine, the VAX, and caused poor code
2312 generation there for lshrdi3, so the code was deleted and a
2313 define_expand for lshrsi3 was added to vax.md. */
2333 /* This structure holds the "cost" of a multiply sequence. The
2334 "cost" field holds the total rtx_cost of every operator in the
2335 synthetic multiplication sequence, hence cost(a op b) is defined
2336 as rtx_cost(op) + cost(a) + cost(b), where cost(leaf) is zero.
2337 The "latency" field holds the minimum possible latency of the
2338 synthetic multiply, on a hypothetical infinitely parallel CPU.
2339 This is the critical path, or the maximum height, of the expression
2340 tree which is the sum of rtx_costs on the most expensive path from
2341 any leaf to the root. Hence latency(a op b) is defined as zero for
2342 leaves and rtx_cost(op) + max(latency(a), latency(b)) otherwise. */
2345 short cost; /* Total rtx_cost of the multiplication sequence. */
2346 short latency; /* The latency of the multiplication sequence. */
2349 /* This macro is used to compare a pointer to a mult_cost against an
2350 single integer "rtx_cost" value. This is equivalent to the macro
2351 CHEAPER_MULT_COST(X,Z) where Z = {Y,Y}. */
2352 #define MULT_COST_LESS(X,Y) ((X)->cost < (Y) \
2353 || ((X)->cost == (Y) && (X)->latency < (Y)))
2355 /* This macro is used to compare two pointers to mult_costs against
2356 each other. The macro returns true if X is cheaper than Y.
2357 Currently, the cheaper of two mult_costs is the one with the
2358 lower "cost". If "cost"s are tied, the lower latency is cheaper. */
2359 #define CHEAPER_MULT_COST(X,Y) ((X)->cost < (Y)->cost \
2360 || ((X)->cost == (Y)->cost \
2361 && (X)->latency < (Y)->latency))
2363 /* This structure records a sequence of operations.
2364 `ops' is the number of operations recorded.
2365 `cost' is their total cost.
2366 The operations are stored in `op' and the corresponding
2367 logarithms of the integer coefficients in `log'.
2369 These are the operations:
2370 alg_zero total := 0;
2371 alg_m total := multiplicand;
2372 alg_shift total := total * coeff
2373 alg_add_t_m2 total := total + multiplicand * coeff;
2374 alg_sub_t_m2 total := total - multiplicand * coeff;
2375 alg_add_factor total := total * coeff + total;
2376 alg_sub_factor total := total * coeff - total;
2377 alg_add_t2_m total := total * coeff + multiplicand;
2378 alg_sub_t2_m total := total * coeff - multiplicand;
2380 The first operand must be either alg_zero or alg_m. */
2384 struct mult_cost cost;
2386 /* The size of the OP and LOG fields are not directly related to the
2387 word size, but the worst-case algorithms will be if we have few
2388 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2389 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2390 in total wordsize operations. */
2391 enum alg_code op[MAX_BITS_PER_WORD];
2392 char log[MAX_BITS_PER_WORD];
2395 /* The entry for our multiplication cache/hash table. */
2396 struct alg_hash_entry {
2397 /* The number we are multiplying by. */
2398 unsigned HOST_WIDE_INT t;
2400 /* The mode in which we are multiplying something by T. */
2401 enum machine_mode mode;
2403 /* The best multiplication algorithm for t. */
2406 /* The cost of multiplication if ALG_CODE is not alg_impossible.
2407 Otherwise, the cost within which multiplication by T is
2409 struct mult_cost cost;
2412 /* The number of cache/hash entries. */
2413 #if HOST_BITS_PER_WIDE_INT == 64
2414 #define NUM_ALG_HASH_ENTRIES 1031
2416 #define NUM_ALG_HASH_ENTRIES 307
2419 /* Each entry of ALG_HASH caches alg_code for some integer. This is
2420 actually a hash table. If we have a collision, that the older
2421 entry is kicked out. */
2422 static struct alg_hash_entry alg_hash[NUM_ALG_HASH_ENTRIES];
2424 /* Indicates the type of fixup needed after a constant multiplication.
2425 BASIC_VARIANT means no fixup is needed, NEGATE_VARIANT means that
2426 the result should be negated, and ADD_VARIANT means that the
2427 multiplicand should be added to the result. */
2428 enum mult_variant {basic_variant, negate_variant, add_variant};
2430 static void synth_mult (struct algorithm *, unsigned HOST_WIDE_INT,
2431 const struct mult_cost *, enum machine_mode mode);
2432 static bool choose_mult_variant (enum machine_mode, HOST_WIDE_INT,
2433 struct algorithm *, enum mult_variant *, int);
2434 static rtx expand_mult_const (enum machine_mode, rtx, HOST_WIDE_INT, rtx,
2435 const struct algorithm *, enum mult_variant);
2436 static unsigned HOST_WIDE_INT choose_multiplier (unsigned HOST_WIDE_INT, int,
2437 int, rtx *, int *, int *);
2438 static unsigned HOST_WIDE_INT invert_mod2n (unsigned HOST_WIDE_INT, int);
2439 static rtx extract_high_half (enum machine_mode, rtx);
2440 static rtx expand_mult_highpart (enum machine_mode, rtx, rtx, rtx, int, int);
2441 static rtx expand_mult_highpart_optab (enum machine_mode, rtx, rtx, rtx,
2443 /* Compute and return the best algorithm for multiplying by T.
2444 The algorithm must cost less than cost_limit
2445 If retval.cost >= COST_LIMIT, no algorithm was found and all
2446 other field of the returned struct are undefined.
2447 MODE is the machine mode of the multiplication. */
2450 synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t,
2451 const struct mult_cost *cost_limit, enum machine_mode mode)
2454 struct algorithm *alg_in, *best_alg;
2455 struct mult_cost best_cost;
2456 struct mult_cost new_limit;
2457 int op_cost, op_latency;
2458 unsigned HOST_WIDE_INT q;
2459 int maxm = MIN (BITS_PER_WORD, GET_MODE_BITSIZE (mode));
2461 bool cache_hit = false;
2462 enum alg_code cache_alg = alg_zero;
2464 /* Indicate that no algorithm is yet found. If no algorithm
2465 is found, this value will be returned and indicate failure. */
2466 alg_out->cost.cost = cost_limit->cost + 1;
2467 alg_out->cost.latency = cost_limit->latency + 1;
2469 if (cost_limit->cost < 0
2470 || (cost_limit->cost == 0 && cost_limit->latency <= 0))
2473 /* Restrict the bits of "t" to the multiplication's mode. */
2474 t &= GET_MODE_MASK (mode);
2476 /* t == 1 can be done in zero cost. */
2480 alg_out->cost.cost = 0;
2481 alg_out->cost.latency = 0;
2482 alg_out->op[0] = alg_m;
2486 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2490 if (MULT_COST_LESS (cost_limit, zero_cost))
2495 alg_out->cost.cost = zero_cost;
2496 alg_out->cost.latency = zero_cost;
2497 alg_out->op[0] = alg_zero;
2502 /* We'll be needing a couple extra algorithm structures now. */
2504 alg_in = alloca (sizeof (struct algorithm));
2505 best_alg = alloca (sizeof (struct algorithm));
2506 best_cost = *cost_limit;
2508 /* Compute the hash index. */
2509 hash_index = (t ^ (unsigned int) mode) % NUM_ALG_HASH_ENTRIES;
2511 /* See if we already know what to do for T. */
2512 if (alg_hash[hash_index].t == t
2513 && alg_hash[hash_index].mode == mode
2514 && alg_hash[hash_index].alg != alg_unknown)
2516 cache_alg = alg_hash[hash_index].alg;
2518 if (cache_alg == alg_impossible)
2520 /* The cache tells us that it's impossible to synthesize
2521 multiplication by T within alg_hash[hash_index].cost. */
2522 if (!CHEAPER_MULT_COST (&alg_hash[hash_index].cost, cost_limit))
2523 /* COST_LIMIT is at least as restrictive as the one
2524 recorded in the hash table, in which case we have no
2525 hope of synthesizing a multiplication. Just
2529 /* If we get here, COST_LIMIT is less restrictive than the
2530 one recorded in the hash table, so we may be able to
2531 synthesize a multiplication. Proceed as if we didn't
2532 have the cache entry. */
2536 if (CHEAPER_MULT_COST (cost_limit, &alg_hash[hash_index].cost))
2537 /* The cached algorithm shows that this multiplication
2538 requires more cost than COST_LIMIT. Just return. This
2539 way, we don't clobber this cache entry with
2540 alg_impossible but retain useful information. */
2552 goto do_alg_addsub_t_m2;
2554 case alg_add_factor:
2555 case alg_sub_factor:
2556 goto do_alg_addsub_factor;
2559 goto do_alg_add_t2_m;
2562 goto do_alg_sub_t2_m;
2570 /* If we have a group of zero bits at the low-order part of T, try
2571 multiplying by the remaining bits and then doing a shift. */
2576 m = floor_log2 (t & -t); /* m = number of low zero bits */
2580 /* The function expand_shift will choose between a shift and
2581 a sequence of additions, so the observed cost is given as
2582 MIN (m * add_cost[mode], shift_cost[mode][m]). */
2583 op_cost = m * add_cost[mode];
2584 if (shift_cost[mode][m] < op_cost)
2585 op_cost = shift_cost[mode][m];
2586 new_limit.cost = best_cost.cost - op_cost;
2587 new_limit.latency = best_cost.latency - op_cost;
2588 synth_mult (alg_in, q, &new_limit, mode);
2590 alg_in->cost.cost += op_cost;
2591 alg_in->cost.latency += op_cost;
2592 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2594 struct algorithm *x;
2595 best_cost = alg_in->cost;
2596 x = alg_in, alg_in = best_alg, best_alg = x;
2597 best_alg->log[best_alg->ops] = m;
2598 best_alg->op[best_alg->ops] = alg_shift;
2605 /* If we have an odd number, add or subtract one. */
2608 unsigned HOST_WIDE_INT w;
2611 for (w = 1; (w & t) != 0; w <<= 1)
2613 /* If T was -1, then W will be zero after the loop. This is another
2614 case where T ends with ...111. Handling this with (T + 1) and
2615 subtract 1 produces slightly better code and results in algorithm
2616 selection much faster than treating it like the ...0111 case
2620 /* Reject the case where t is 3.
2621 Thus we prefer addition in that case. */
2624 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2626 op_cost = add_cost[mode];
2627 new_limit.cost = best_cost.cost - op_cost;
2628 new_limit.latency = best_cost.latency - op_cost;
2629 synth_mult (alg_in, t + 1, &new_limit, mode);
2631 alg_in->cost.cost += op_cost;
2632 alg_in->cost.latency += op_cost;
2633 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2635 struct algorithm *x;
2636 best_cost = alg_in->cost;
2637 x = alg_in, alg_in = best_alg, best_alg = x;
2638 best_alg->log[best_alg->ops] = 0;
2639 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2644 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2646 op_cost = add_cost[mode];
2647 new_limit.cost = best_cost.cost - op_cost;
2648 new_limit.latency = best_cost.latency - op_cost;
2649 synth_mult (alg_in, t - 1, &new_limit, mode);
2651 alg_in->cost.cost += op_cost;
2652 alg_in->cost.latency += op_cost;
2653 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2655 struct algorithm *x;
2656 best_cost = alg_in->cost;
2657 x = alg_in, alg_in = best_alg, best_alg = x;
2658 best_alg->log[best_alg->ops] = 0;
2659 best_alg->op[best_alg->ops] = alg_add_t_m2;
2666 /* Look for factors of t of the form
2667 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2668 If we find such a factor, we can multiply by t using an algorithm that
2669 multiplies by q, shift the result by m and add/subtract it to itself.
2671 We search for large factors first and loop down, even if large factors
2672 are less probable than small; if we find a large factor we will find a
2673 good sequence quickly, and therefore be able to prune (by decreasing
2674 COST_LIMIT) the search. */
2676 do_alg_addsub_factor:
2677 for (m = floor_log2 (t - 1); m >= 2; m--)
2679 unsigned HOST_WIDE_INT d;
2681 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2682 if (t % d == 0 && t > d && m < maxm
2683 && (!cache_hit || cache_alg == alg_add_factor))
2685 /* If the target has a cheap shift-and-add instruction use
2686 that in preference to a shift insn followed by an add insn.
2687 Assume that the shift-and-add is "atomic" with a latency
2688 equal to its cost, otherwise assume that on superscalar
2689 hardware the shift may be executed concurrently with the
2690 earlier steps in the algorithm. */
2691 op_cost = add_cost[mode] + shift_cost[mode][m];
2692 if (shiftadd_cost[mode][m] < op_cost)
2694 op_cost = shiftadd_cost[mode][m];
2695 op_latency = op_cost;
2698 op_latency = add_cost[mode];
2700 new_limit.cost = best_cost.cost - op_cost;
2701 new_limit.latency = best_cost.latency - op_latency;
2702 synth_mult (alg_in, t / d, &new_limit, mode);
2704 alg_in->cost.cost += op_cost;
2705 alg_in->cost.latency += op_latency;
2706 if (alg_in->cost.latency < op_cost)
2707 alg_in->cost.latency = op_cost;
2708 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2710 struct algorithm *x;
2711 best_cost = alg_in->cost;
2712 x = alg_in, alg_in = best_alg, best_alg = x;
2713 best_alg->log[best_alg->ops] = m;
2714 best_alg->op[best_alg->ops] = alg_add_factor;
2716 /* Other factors will have been taken care of in the recursion. */
2720 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2721 if (t % d == 0 && t > d && m < maxm
2722 && (!cache_hit || cache_alg == alg_sub_factor))
2724 /* If the target has a cheap shift-and-subtract insn use
2725 that in preference to a shift insn followed by a sub insn.
2726 Assume that the shift-and-sub is "atomic" with a latency
2727 equal to it's cost, otherwise assume that on superscalar
2728 hardware the shift may be executed concurrently with the
2729 earlier steps in the algorithm. */
2730 op_cost = add_cost[mode] + shift_cost[mode][m];
2731 if (shiftsub_cost[mode][m] < op_cost)
2733 op_cost = shiftsub_cost[mode][m];
2734 op_latency = op_cost;
2737 op_latency = add_cost[mode];
2739 new_limit.cost = best_cost.cost - op_cost;
2740 new_limit.latency = best_cost.latency - op_latency;
2741 synth_mult (alg_in, t / d, &new_limit, mode);
2743 alg_in->cost.cost += op_cost;
2744 alg_in->cost.latency += op_latency;
2745 if (alg_in->cost.latency < op_cost)
2746 alg_in->cost.latency = op_cost;
2747 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2749 struct algorithm *x;
2750 best_cost = alg_in->cost;
2751 x = alg_in, alg_in = best_alg, best_alg = x;
2752 best_alg->log[best_alg->ops] = m;
2753 best_alg->op[best_alg->ops] = alg_sub_factor;
2761 /* Try shift-and-add (load effective address) instructions,
2762 i.e. do a*3, a*5, a*9. */
2769 if (m >= 0 && m < maxm)
2771 op_cost = shiftadd_cost[mode][m];
2772 new_limit.cost = best_cost.cost - op_cost;
2773 new_limit.latency = best_cost.latency - op_cost;
2774 synth_mult (alg_in, (t - 1) >> m, &new_limit, mode);
2776 alg_in->cost.cost += op_cost;
2777 alg_in->cost.latency += op_cost;
2778 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2780 struct algorithm *x;
2781 best_cost = alg_in->cost;
2782 x = alg_in, alg_in = best_alg, best_alg = x;
2783 best_alg->log[best_alg->ops] = m;
2784 best_alg->op[best_alg->ops] = alg_add_t2_m;
2794 if (m >= 0 && m < maxm)
2796 op_cost = shiftsub_cost[mode][m];
2797 new_limit.cost = best_cost.cost - op_cost;
2798 new_limit.latency = best_cost.latency - op_cost;
2799 synth_mult (alg_in, (t + 1) >> m, &new_limit, mode);
2801 alg_in->cost.cost += op_cost;
2802 alg_in->cost.latency += op_cost;
2803 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2805 struct algorithm *x;
2806 best_cost = alg_in->cost;
2807 x = alg_in, alg_in = best_alg, best_alg = x;
2808 best_alg->log[best_alg->ops] = m;
2809 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2817 /* If best_cost has not decreased, we have not found any algorithm. */
2818 if (!CHEAPER_MULT_COST (&best_cost, cost_limit))
2820 /* We failed to find an algorithm. Record alg_impossible for
2821 this case (that is, <T, MODE, COST_LIMIT>) so that next time
2822 we are asked to find an algorithm for T within the same or
2823 lower COST_LIMIT, we can immediately return to the
2825 alg_hash[hash_index].t = t;
2826 alg_hash[hash_index].mode = mode;
2827 alg_hash[hash_index].alg = alg_impossible;
2828 alg_hash[hash_index].cost = *cost_limit;
2832 /* Cache the result. */
2835 alg_hash[hash_index].t = t;
2836 alg_hash[hash_index].mode = mode;
2837 alg_hash[hash_index].alg = best_alg->op[best_alg->ops];
2838 alg_hash[hash_index].cost.cost = best_cost.cost;
2839 alg_hash[hash_index].cost.latency = best_cost.latency;
2842 /* If we are getting a too long sequence for `struct algorithm'
2843 to record, make this search fail. */
2844 if (best_alg->ops == MAX_BITS_PER_WORD)
2847 /* Copy the algorithm from temporary space to the space at alg_out.
2848 We avoid using structure assignment because the majority of
2849 best_alg is normally undefined, and this is a critical function. */
2850 alg_out->ops = best_alg->ops + 1;
2851 alg_out->cost = best_cost;
2852 memcpy (alg_out->op, best_alg->op,
2853 alg_out->ops * sizeof *alg_out->op);
2854 memcpy (alg_out->log, best_alg->log,
2855 alg_out->ops * sizeof *alg_out->log);
2858 /* Find the cheapest way of multiplying a value of mode MODE by VAL.
2859 Try three variations:
2861 - a shift/add sequence based on VAL itself
2862 - a shift/add sequence based on -VAL, followed by a negation
2863 - a shift/add sequence based on VAL - 1, followed by an addition.
2865 Return true if the cheapest of these cost less than MULT_COST,
2866 describing the algorithm in *ALG and final fixup in *VARIANT. */
2869 choose_mult_variant (enum machine_mode mode, HOST_WIDE_INT val,
2870 struct algorithm *alg, enum mult_variant *variant,
2873 struct algorithm alg2;
2874 struct mult_cost limit;
2877 /* Fail quickly for impossible bounds. */
2881 /* Ensure that mult_cost provides a reasonable upper bound.
2882 Any constant multiplication can be performed with less
2883 than 2 * bits additions. */
2884 op_cost = 2 * GET_MODE_BITSIZE (mode) * add_cost[mode];
2885 if (mult_cost > op_cost)
2886 mult_cost = op_cost;
2888 *variant = basic_variant;
2889 limit.cost = mult_cost;
2890 limit.latency = mult_cost;
2891 synth_mult (alg, val, &limit, mode);
2893 /* This works only if the inverted value actually fits in an
2895 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2897 op_cost = neg_cost[mode];
2898 if (MULT_COST_LESS (&alg->cost, mult_cost))
2900 limit.cost = alg->cost.cost - op_cost;
2901 limit.latency = alg->cost.latency - op_cost;
2905 limit.cost = mult_cost - op_cost;
2906 limit.latency = mult_cost - op_cost;
2909 synth_mult (&alg2, -val, &limit, mode);
2910 alg2.cost.cost += op_cost;
2911 alg2.cost.latency += op_cost;
2912 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2913 *alg = alg2, *variant = negate_variant;
2916 /* This proves very useful for division-by-constant. */
2917 op_cost = add_cost[mode];
2918 if (MULT_COST_LESS (&alg->cost, mult_cost))
2920 limit.cost = alg->cost.cost - op_cost;
2921 limit.latency = alg->cost.latency - op_cost;
2925 limit.cost = mult_cost - op_cost;
2926 limit.latency = mult_cost - op_cost;
2929 synth_mult (&alg2, val - 1, &limit, mode);
2930 alg2.cost.cost += op_cost;
2931 alg2.cost.latency += op_cost;
2932 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2933 *alg = alg2, *variant = add_variant;
2935 return MULT_COST_LESS (&alg->cost, mult_cost);
2938 /* A subroutine of expand_mult, used for constant multiplications.
2939 Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
2940 convenient. Use the shift/add sequence described by ALG and apply
2941 the final fixup specified by VARIANT. */
2944 expand_mult_const (enum machine_mode mode, rtx op0, HOST_WIDE_INT val,
2945 rtx target, const struct algorithm *alg,
2946 enum mult_variant variant)
2948 HOST_WIDE_INT val_so_far;
2949 rtx insn, accum, tem;
2951 enum machine_mode nmode;
2953 /* Avoid referencing memory over and over.
2954 For speed, but also for correctness when mem is volatile. */
2956 op0 = force_reg (mode, op0);
2958 /* ACCUM starts out either as OP0 or as a zero, depending on
2959 the first operation. */
2961 if (alg->op[0] == alg_zero)
2963 accum = copy_to_mode_reg (mode, const0_rtx);
2966 else if (alg->op[0] == alg_m)
2968 accum = copy_to_mode_reg (mode, op0);
2974 for (opno = 1; opno < alg->ops; opno++)
2976 int log = alg->log[opno];
2977 rtx shift_subtarget = optimize ? 0 : accum;
2979 = (opno == alg->ops - 1 && target != 0 && variant != add_variant
2982 rtx accum_target = optimize ? 0 : accum;
2984 switch (alg->op[opno])
2987 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2988 build_int_cst (NULL_TREE, log),
2994 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2995 build_int_cst (NULL_TREE, log),
2997 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2998 add_target ? add_target : accum_target);
2999 val_so_far += (HOST_WIDE_INT) 1 << log;
3003 tem = expand_shift (LSHIFT_EXPR, mode, op0,
3004 build_int_cst (NULL_TREE, log),
3006 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
3007 add_target ? add_target : accum_target);
3008 val_so_far -= (HOST_WIDE_INT) 1 << log;
3012 accum = expand_shift (LSHIFT_EXPR, mode, accum,
3013 build_int_cst (NULL_TREE, log),
3016 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
3017 add_target ? add_target : accum_target);
3018 val_so_far = (val_so_far << log) + 1;
3022 accum = expand_shift (LSHIFT_EXPR, mode, accum,
3023 build_int_cst (NULL_TREE, log),
3024 shift_subtarget, 0);
3025 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
3026 add_target ? add_target : accum_target);
3027 val_so_far = (val_so_far << log) - 1;
3030 case alg_add_factor:
3031 tem = expand_shift (LSHIFT_EXPR, mode, accum,
3032 build_int_cst (NULL_TREE, log),
3034 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
3035 add_target ? add_target : accum_target);
3036 val_so_far += val_so_far << log;
3039 case alg_sub_factor:
3040 tem = expand_shift (LSHIFT_EXPR, mode, accum,
3041 build_int_cst (NULL_TREE, log),
3043 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
3045 ? add_target : (optimize ? 0 : tem)));
3046 val_so_far = (val_so_far << log) - val_so_far;
3053 /* Write a REG_EQUAL note on the last insn so that we can cse
3054 multiplication sequences. Note that if ACCUM is a SUBREG,
3055 we've set the inner register and must properly indicate
3058 tem = op0, nmode = mode;
3059 if (GET_CODE (accum) == SUBREG)
3061 nmode = GET_MODE (SUBREG_REG (accum));
3062 tem = gen_lowpart (nmode, op0);
3065 insn = get_last_insn ();
3066 set_unique_reg_note (insn, REG_EQUAL,
3067 gen_rtx_MULT (nmode, tem, GEN_INT (val_so_far)));
3070 if (variant == negate_variant)
3072 val_so_far = -val_so_far;
3073 accum = expand_unop (mode, neg_optab, accum, target, 0);
3075 else if (variant == add_variant)
3077 val_so_far = val_so_far + 1;
3078 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
3081 /* Compare only the bits of val and val_so_far that are significant
3082 in the result mode, to avoid sign-/zero-extension confusion. */
3083 val &= GET_MODE_MASK (mode);
3084 val_so_far &= GET_MODE_MASK (mode);
3085 gcc_assert (val == val_so_far);
3090 /* Perform a multiplication and return an rtx for the result.
3091 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3092 TARGET is a suggestion for where to store the result (an rtx).
3094 We check specially for a constant integer as OP1.
3095 If you want this check for OP0 as well, then before calling
3096 you should swap the two operands if OP0 would be constant. */
3099 expand_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3102 enum mult_variant variant;
3103 struct algorithm algorithm;
3106 /* Handling const0_rtx here allows us to use zero as a rogue value for
3108 if (op1 == const0_rtx)
3110 if (op1 == const1_rtx)
3112 if (op1 == constm1_rtx)
3113 return expand_unop (mode,
3114 GET_MODE_CLASS (mode) == MODE_INT
3115 && !unsignedp && flag_trapv
3116 ? negv_optab : neg_optab,
3119 /* These are the operations that are potentially turned into a sequence
3120 of shifts and additions. */
3121 if (SCALAR_INT_MODE_P (mode)
3122 && (unsignedp || !flag_trapv))
3124 HOST_WIDE_INT coeff = 0;
3125 rtx fake_reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
3127 /* synth_mult does an `unsigned int' multiply. As long as the mode is
3128 less than or equal in size to `unsigned int' this doesn't matter.
3129 If the mode is larger than `unsigned int', then synth_mult works
3130 only if the constant value exactly fits in an `unsigned int' without
3131 any truncation. This means that multiplying by negative values does
3132 not work; results are off by 2^32 on a 32 bit machine. */
3134 if (GET_CODE (op1) == CONST_INT)
3136 /* Attempt to handle multiplication of DImode values by negative
3137 coefficients, by performing the multiplication by a positive
3138 multiplier and then inverting the result. */
3139 if (INTVAL (op1) < 0
3140 && GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT)
3142 /* Its safe to use -INTVAL (op1) even for INT_MIN, as the
3143 result is interpreted as an unsigned coefficient.
3144 Exclude cost of op0 from max_cost to match the cost
3145 calculation of the synth_mult. */
3146 max_cost = rtx_cost (gen_rtx_MULT (mode, fake_reg, op1), SET)
3149 && choose_mult_variant (mode, -INTVAL (op1), &algorithm,
3150 &variant, max_cost))
3152 rtx temp = expand_mult_const (mode, op0, -INTVAL (op1),
3153 NULL_RTX, &algorithm,
3155 return expand_unop (mode, neg_optab, temp, target, 0);
3158 else coeff = INTVAL (op1);
3160 else if (GET_CODE (op1) == CONST_DOUBLE)
3162 /* If we are multiplying in DImode, it may still be a win
3163 to try to work with shifts and adds. */
3164 if (CONST_DOUBLE_HIGH (op1) == 0)
3165 coeff = CONST_DOUBLE_LOW (op1);
3166 else if (CONST_DOUBLE_LOW (op1) == 0
3167 && EXACT_POWER_OF_2_OR_ZERO_P (CONST_DOUBLE_HIGH (op1)))
3169 int shift = floor_log2 (CONST_DOUBLE_HIGH (op1))
3170 + HOST_BITS_PER_WIDE_INT;
3171 return expand_shift (LSHIFT_EXPR, mode, op0,
3172 build_int_cst (NULL_TREE, shift),
3177 /* We used to test optimize here, on the grounds that it's better to
3178 produce a smaller program when -O is not used. But this causes
3179 such a terrible slowdown sometimes that it seems better to always
3183 /* Special case powers of two. */
3184 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3185 return expand_shift (LSHIFT_EXPR, mode, op0,
3186 build_int_cst (NULL_TREE, floor_log2 (coeff)),
3189 /* Exclude cost of op0 from max_cost to match the cost
3190 calculation of the synth_mult. */
3191 max_cost = rtx_cost (gen_rtx_MULT (mode, fake_reg, op1), SET);
3192 if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3194 return expand_mult_const (mode, op0, coeff, target,
3195 &algorithm, variant);
3199 if (GET_CODE (op0) == CONST_DOUBLE)
3206 /* Expand x*2.0 as x+x. */
3207 if (GET_CODE (op1) == CONST_DOUBLE
3208 && SCALAR_FLOAT_MODE_P (mode))
3211 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
3213 if (REAL_VALUES_EQUAL (d, dconst2))
3215 op0 = force_reg (GET_MODE (op0), op0);
3216 return expand_binop (mode, add_optab, op0, op0,
3217 target, unsignedp, OPTAB_LIB_WIDEN);
3221 /* This used to use umul_optab if unsigned, but for non-widening multiply
3222 there is no difference between signed and unsigned. */
3223 op0 = expand_binop (mode,
3225 && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT)
3226 ? smulv_optab : smul_optab,
3227 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
3232 /* Return the smallest n such that 2**n >= X. */
3235 ceil_log2 (unsigned HOST_WIDE_INT x)
3237 return floor_log2 (x - 1) + 1;
3240 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
3241 replace division by D, and put the least significant N bits of the result
3242 in *MULTIPLIER_PTR and return the most significant bit.
3244 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
3245 needed precision is in PRECISION (should be <= N).
3247 PRECISION should be as small as possible so this function can choose
3248 multiplier more freely.
3250 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
3251 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
3253 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
3254 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
3257 unsigned HOST_WIDE_INT
3258 choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
3259 rtx *multiplier_ptr, int *post_shift_ptr, int *lgup_ptr)
3261 HOST_WIDE_INT mhigh_hi, mlow_hi;
3262 unsigned HOST_WIDE_INT mhigh_lo, mlow_lo;
3263 int lgup, post_shift;
3265 unsigned HOST_WIDE_INT nl, dummy1;
3266 HOST_WIDE_INT nh, dummy2;
3268 /* lgup = ceil(log2(divisor)); */
3269 lgup = ceil_log2 (d);
3271 gcc_assert (lgup <= n);
3274 pow2 = n + lgup - precision;
3276 /* We could handle this with some effort, but this case is much
3277 better handled directly with a scc insn, so rely on caller using
3279 gcc_assert (pow != 2 * HOST_BITS_PER_WIDE_INT);
3281 /* mlow = 2^(N + lgup)/d */
3282 if (pow >= HOST_BITS_PER_WIDE_INT)
3284 nh = (HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
3290 nl = (unsigned HOST_WIDE_INT) 1 << pow;
3292 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
3293 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
3295 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
3296 if (pow2 >= HOST_BITS_PER_WIDE_INT)
3297 nh |= (HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
3299 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
3300 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
3301 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
3303 gcc_assert (!mhigh_hi || nh - d < d);
3304 gcc_assert (mhigh_hi <= 1 && mlow_hi <= 1);
3305 /* Assert that mlow < mhigh. */
3306 gcc_assert (mlow_hi < mhigh_hi
3307 || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo));
3309 /* If precision == N, then mlow, mhigh exceed 2^N
3310 (but they do not exceed 2^(N+1)). */
3312 /* Reduce to lowest terms. */
3313 for (post_shift = lgup; post_shift > 0; post_shift--)
3315 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
3316 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
3326 *post_shift_ptr = post_shift;
3328 if (n < HOST_BITS_PER_WIDE_INT)
3330 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
3331 *multiplier_ptr = GEN_INT (mhigh_lo & mask);
3332 return mhigh_lo >= mask;
3336 *multiplier_ptr = GEN_INT (mhigh_lo);
3341 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
3342 congruent to 1 (mod 2**N). */
3344 static unsigned HOST_WIDE_INT
3345 invert_mod2n (unsigned HOST_WIDE_INT x, int n)
3347 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
3349 /* The algorithm notes that the choice y = x satisfies
3350 x*y == 1 mod 2^3, since x is assumed odd.
3351 Each iteration doubles the number of bits of significance in y. */
3353 unsigned HOST_WIDE_INT mask;
3354 unsigned HOST_WIDE_INT y = x;
3357 mask = (n == HOST_BITS_PER_WIDE_INT
3358 ? ~(unsigned HOST_WIDE_INT) 0
3359 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
3363 y = y * (2 - x*y) & mask; /* Modulo 2^N */
3369 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
3370 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
3371 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
3372 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to