1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
5 Free Software Foundation, Inc.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
26 #include "coretypes.h"
33 #include "insn-config.h"
38 #include "langhooks.h"
42 static void store_fixed_bit_field (rtx, unsigned HOST_WIDE_INT,
43 unsigned HOST_WIDE_INT,
44 unsigned HOST_WIDE_INT, rtx);
45 static void store_split_bit_field (rtx, unsigned HOST_WIDE_INT,
46 unsigned HOST_WIDE_INT, rtx);
47 static rtx extract_fixed_bit_field (enum machine_mode, rtx,
48 unsigned HOST_WIDE_INT,
49 unsigned HOST_WIDE_INT,
50 unsigned HOST_WIDE_INT, rtx, int);
51 static rtx mask_rtx (enum machine_mode, int, int, int);
52 static rtx lshift_value (enum machine_mode, rtx, int, int);
53 static rtx extract_split_bit_field (rtx, unsigned HOST_WIDE_INT,
54 unsigned HOST_WIDE_INT, int);
55 static void do_cmp_and_jump (rtx, rtx, enum rtx_code, enum machine_mode, rtx);
56 static rtx expand_smod_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
57 static rtx expand_sdiv_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
59 /* Test whether a value is zero of a power of two. */
60 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
62 /* Nonzero means divides or modulus operations are relatively cheap for
63 powers of two, so don't use branches; emit the operation instead.
64 Usually, this will mean that the MD file will emit non-branch
67 static bool sdiv_pow2_cheap[NUM_MACHINE_MODES];
68 static bool smod_pow2_cheap[NUM_MACHINE_MODES];
70 #ifndef SLOW_UNALIGNED_ACCESS
71 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
74 /* For compilers that support multiple targets with different word sizes,
75 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
76 is the H8/300(H) compiler. */
78 #ifndef MAX_BITS_PER_WORD
79 #define MAX_BITS_PER_WORD BITS_PER_WORD
82 /* Reduce conditional compilation elsewhere. */
85 #define CODE_FOR_insv CODE_FOR_nothing
86 #define gen_insv(a,b,c,d) NULL_RTX
90 #define CODE_FOR_extv CODE_FOR_nothing
91 #define gen_extv(a,b,c,d) NULL_RTX
95 #define CODE_FOR_extzv CODE_FOR_nothing
96 #define gen_extzv(a,b,c,d) NULL_RTX
99 /* Cost of various pieces of RTL. Note that some of these are indexed by
100 shift count and some by mode. */
101 static int zero_cost;
102 static int add_cost[NUM_MACHINE_MODES];
103 static int neg_cost[NUM_MACHINE_MODES];
104 static int shift_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
105 static int shiftadd_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
106 static int shiftsub_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
107 static int mul_cost[NUM_MACHINE_MODES];
108 static int sdiv_cost[NUM_MACHINE_MODES];
109 static int udiv_cost[NUM_MACHINE_MODES];
110 static int mul_widen_cost[NUM_MACHINE_MODES];
111 static int mul_highpart_cost[NUM_MACHINE_MODES];
118 struct rtx_def reg; rtunion reg_fld[2];
119 struct rtx_def plus; rtunion plus_fld1;
121 struct rtx_def mult; rtunion mult_fld1;
122 struct rtx_def sdiv; rtunion sdiv_fld1;
123 struct rtx_def udiv; rtunion udiv_fld1;
125 struct rtx_def sdiv_32; rtunion sdiv_32_fld1;
126 struct rtx_def smod_32; rtunion smod_32_fld1;
127 struct rtx_def wide_mult; rtunion wide_mult_fld1;
128 struct rtx_def wide_lshr; rtunion wide_lshr_fld1;
129 struct rtx_def wide_trunc;
130 struct rtx_def shift; rtunion shift_fld1;
131 struct rtx_def shift_mult; rtunion shift_mult_fld1;
132 struct rtx_def shift_add; rtunion shift_add_fld1;
133 struct rtx_def shift_sub; rtunion shift_sub_fld1;
136 rtx pow2[MAX_BITS_PER_WORD];
137 rtx cint[MAX_BITS_PER_WORD];
139 enum machine_mode mode, wider_mode;
141 zero_cost = rtx_cost (const0_rtx, 0);
143 for (m = 1; m < MAX_BITS_PER_WORD; m++)
145 pow2[m] = GEN_INT ((HOST_WIDE_INT) 1 << m);
146 cint[m] = GEN_INT (m);
149 memset (&all, 0, sizeof all);
151 PUT_CODE (&all.reg, REG);
152 /* Avoid using hard regs in ways which may be unsupported. */
153 SET_REGNO (&all.reg, LAST_VIRTUAL_REGISTER + 1);
155 PUT_CODE (&all.plus, PLUS);
156 XEXP (&all.plus, 0) = &all.reg;
157 XEXP (&all.plus, 1) = &all.reg;
159 PUT_CODE (&all.neg, NEG);
160 XEXP (&all.neg, 0) = &all.reg;
162 PUT_CODE (&all.mult, MULT);
163 XEXP (&all.mult, 0) = &all.reg;
164 XEXP (&all.mult, 1) = &all.reg;
166 PUT_CODE (&all.sdiv, DIV);
167 XEXP (&all.sdiv, 0) = &all.reg;
168 XEXP (&all.sdiv, 1) = &all.reg;
170 PUT_CODE (&all.udiv, UDIV);
171 XEXP (&all.udiv, 0) = &all.reg;
172 XEXP (&all.udiv, 1) = &all.reg;
174 PUT_CODE (&all.sdiv_32, DIV);
175 XEXP (&all.sdiv_32, 0) = &all.reg;
176 XEXP (&all.sdiv_32, 1) = 32 < MAX_BITS_PER_WORD ? cint[32] : GEN_INT (32);
178 PUT_CODE (&all.smod_32, MOD);
179 XEXP (&all.smod_32, 0) = &all.reg;
180 XEXP (&all.smod_32, 1) = XEXP (&all.sdiv_32, 1);
182 PUT_CODE (&all.zext, ZERO_EXTEND);
183 XEXP (&all.zext, 0) = &all.reg;
185 PUT_CODE (&all.wide_mult, MULT);
186 XEXP (&all.wide_mult, 0) = &all.zext;
187 XEXP (&all.wide_mult, 1) = &all.zext;
189 PUT_CODE (&all.wide_lshr, LSHIFTRT);
190 XEXP (&all.wide_lshr, 0) = &all.wide_mult;
192 PUT_CODE (&all.wide_trunc, TRUNCATE);
193 XEXP (&all.wide_trunc, 0) = &all.wide_lshr;
195 PUT_CODE (&all.shift, ASHIFT);
196 XEXP (&all.shift, 0) = &all.reg;
198 PUT_CODE (&all.shift_mult, MULT);
199 XEXP (&all.shift_mult, 0) = &all.reg;
201 PUT_CODE (&all.shift_add, PLUS);
202 XEXP (&all.shift_add, 0) = &all.shift_mult;
203 XEXP (&all.shift_add, 1) = &all.reg;
205 PUT_CODE (&all.shift_sub, MINUS);
206 XEXP (&all.shift_sub, 0) = &all.shift_mult;
207 XEXP (&all.shift_sub, 1) = &all.reg;
209 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
211 mode = GET_MODE_WIDER_MODE (mode))
213 PUT_MODE (&all.reg, mode);
214 PUT_MODE (&all.plus, mode);
215 PUT_MODE (&all.neg, mode);
216 PUT_MODE (&all.mult, mode);
217 PUT_MODE (&all.sdiv, mode);
218 PUT_MODE (&all.udiv, mode);
219 PUT_MODE (&all.sdiv_32, mode);
220 PUT_MODE (&all.smod_32, mode);
221 PUT_MODE (&all.wide_trunc, mode);
222 PUT_MODE (&all.shift, mode);
223 PUT_MODE (&all.shift_mult, mode);
224 PUT_MODE (&all.shift_add, mode);
225 PUT_MODE (&all.shift_sub, mode);
227 add_cost[mode] = rtx_cost (&all.plus, SET);
228 neg_cost[mode] = rtx_cost (&all.neg, SET);
229 mul_cost[mode] = rtx_cost (&all.mult, SET);
230 sdiv_cost[mode] = rtx_cost (&all.sdiv, SET);
231 udiv_cost[mode] = rtx_cost (&all.udiv, SET);
233 sdiv_pow2_cheap[mode] = (rtx_cost (&all.sdiv_32, SET)
234 <= 2 * add_cost[mode]);
235 smod_pow2_cheap[mode] = (rtx_cost (&all.smod_32, SET)
236 <= 4 * add_cost[mode]);
238 wider_mode = GET_MODE_WIDER_MODE (mode);
239 if (wider_mode != VOIDmode)
241 PUT_MODE (&all.zext, wider_mode);
242 PUT_MODE (&all.wide_mult, wider_mode);
243 PUT_MODE (&all.wide_lshr, wider_mode);
244 XEXP (&all.wide_lshr, 1) = GEN_INT (GET_MODE_BITSIZE (mode));
246 mul_widen_cost[wider_mode] = rtx_cost (&all.wide_mult, SET);
247 mul_highpart_cost[mode] = rtx_cost (&all.wide_trunc, SET);
250 shift_cost[mode][0] = 0;
251 shiftadd_cost[mode][0] = shiftsub_cost[mode][0] = add_cost[mode];
253 n = MIN (MAX_BITS_PER_WORD, GET_MODE_BITSIZE (mode));
254 for (m = 1; m < n; m++)
256 XEXP (&all.shift, 1) = cint[m];
257 XEXP (&all.shift_mult, 1) = pow2[m];
259 shift_cost[mode][m] = rtx_cost (&all.shift, SET);
260 shiftadd_cost[mode][m] = rtx_cost (&all.shift_add, SET);
261 shiftsub_cost[mode][m] = rtx_cost (&all.shift_sub, SET);
266 /* Return an rtx representing minus the value of X.
267 MODE is the intended mode of the result,
268 useful if X is a CONST_INT. */
271 negate_rtx (enum machine_mode mode, rtx x)
273 rtx result = simplify_unary_operation (NEG, mode, x, mode);
276 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
281 /* Report on the availability of insv/extv/extzv and the desired mode
282 of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo
283 is false; else the mode of the specified operand. If OPNO is -1,
284 all the caller cares about is whether the insn is available. */
286 mode_for_extraction (enum extraction_pattern pattern, int opno)
288 const struct insn_data *data;
295 data = &insn_data[CODE_FOR_insv];
298 return MAX_MACHINE_MODE;
303 data = &insn_data[CODE_FOR_extv];
306 return MAX_MACHINE_MODE;
311 data = &insn_data[CODE_FOR_extzv];
314 return MAX_MACHINE_MODE;
323 /* Everyone who uses this function used to follow it with
324 if (result == VOIDmode) result = word_mode; */
325 if (data->operand[opno].mode == VOIDmode)
327 return data->operand[opno].mode;
330 /* Return true if X, of mode MODE, matches the predicate for operand
331 OPNO of instruction ICODE. Allow volatile memories, regardless of
332 the ambient volatile_ok setting. */
335 check_predicate_volatile_ok (enum insn_code icode, int opno,
336 rtx x, enum machine_mode mode)
338 bool save_volatile_ok, result;
340 save_volatile_ok = volatile_ok;
341 result = insn_data[(int) icode].operand[opno].predicate (x, mode);
342 volatile_ok = save_volatile_ok;
346 /* A subroutine of store_bit_field, with the same arguments. Return true
347 if the operation could be implemented.
349 If FALLBACK_P is true, fall back to store_fixed_bit_field if we have
350 no other way of implementing the operation. If FALLBACK_P is false,
351 return false instead. */
354 store_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
355 unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode,
356 rtx value, bool fallback_p)
359 = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD;
360 unsigned HOST_WIDE_INT offset, bitpos;
365 enum machine_mode op_mode = mode_for_extraction (EP_insv, 3);
367 while (GET_CODE (op0) == SUBREG)
369 /* The following line once was done only if WORDS_BIG_ENDIAN,
370 but I think that is a mistake. WORDS_BIG_ENDIAN is
371 meaningful at a much higher level; when structures are copied
372 between memory and regs, the higher-numbered regs
373 always get higher addresses. */
374 int inner_mode_size = GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)));
375 int outer_mode_size = GET_MODE_SIZE (GET_MODE (op0));
379 /* Paradoxical subregs need special handling on big endian machines. */
380 if (SUBREG_BYTE (op0) == 0 && inner_mode_size < outer_mode_size)
382 int difference = inner_mode_size - outer_mode_size;
384 if (WORDS_BIG_ENDIAN)
385 byte_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
386 if (BYTES_BIG_ENDIAN)
387 byte_offset += difference % UNITS_PER_WORD;
390 byte_offset = SUBREG_BYTE (op0);
392 bitnum += byte_offset * BITS_PER_UNIT;
393 op0 = SUBREG_REG (op0);
396 /* No action is needed if the target is a register and if the field
397 lies completely outside that register. This can occur if the source
398 code contains an out-of-bounds access to a small array. */
399 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
402 /* Use vec_set patterns for inserting parts of vectors whenever
404 if (VECTOR_MODE_P (GET_MODE (op0))
406 && (vec_set_optab->handlers[GET_MODE (op0)].insn_code
408 && fieldmode == GET_MODE_INNER (GET_MODE (op0))
409 && bitsize == GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
410 && !(bitnum % GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
412 enum machine_mode outermode = GET_MODE (op0);
413 enum machine_mode innermode = GET_MODE_INNER (outermode);
414 int icode = (int) vec_set_optab->handlers[outermode].insn_code;
415 int pos = bitnum / GET_MODE_BITSIZE (innermode);
416 rtx rtxpos = GEN_INT (pos);
420 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
421 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
422 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
426 if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
427 src = copy_to_mode_reg (mode1, src);
429 if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
430 rtxpos = copy_to_mode_reg (mode1, rtxpos);
432 /* We could handle this, but we should always be called with a pseudo
433 for our targets and all insns should take them as outputs. */
434 gcc_assert ((*insn_data[icode].operand[0].predicate) (dest, mode0)
435 && (*insn_data[icode].operand[1].predicate) (src, mode1)
436 && (*insn_data[icode].operand[2].predicate) (rtxpos, mode2));
437 pat = GEN_FCN (icode) (dest, src, rtxpos);
448 /* If the target is a register, overwriting the entire object, or storing
449 a full-word or multi-word field can be done with just a SUBREG.
451 If the target is memory, storing any naturally aligned field can be
452 done with a simple store. For targets that support fast unaligned
453 memory, any naturally sized, unit aligned field can be done directly. */
455 offset = bitnum / unit;
456 bitpos = bitnum % unit;
457 byte_offset = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
458 + (offset * UNITS_PER_WORD);
461 && bitsize == GET_MODE_BITSIZE (fieldmode)
463 ? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
464 || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
465 && byte_offset % GET_MODE_SIZE (fieldmode) == 0)
466 : (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
467 || (offset * BITS_PER_UNIT % bitsize == 0
468 && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0))))
471 op0 = adjust_address (op0, fieldmode, offset);
472 else if (GET_MODE (op0) != fieldmode)
473 op0 = simplify_gen_subreg (fieldmode, op0, GET_MODE (op0),
475 emit_move_insn (op0, value);
479 /* Make sure we are playing with integral modes. Pun with subregs
480 if we aren't. This must come after the entire register case above,
481 since that case is valid for any mode. The following cases are only
482 valid for integral modes. */
484 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
485 if (imode != GET_MODE (op0))
488 op0 = adjust_address (op0, imode, 0);
491 gcc_assert (imode != BLKmode);
492 op0 = gen_lowpart (imode, op0);
497 /* We may be accessing data outside the field, which means
498 we can alias adjacent data. */
501 op0 = shallow_copy_rtx (op0);
502 set_mem_alias_set (op0, 0);
503 set_mem_expr (op0, 0);
506 /* If OP0 is a register, BITPOS must count within a word.
507 But as we have it, it counts within whatever size OP0 now has.
508 On a bigendian machine, these are not the same, so convert. */
511 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
512 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
514 /* Storing an lsb-aligned field in a register
515 can be done with a movestrict instruction. */
518 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
519 && bitsize == GET_MODE_BITSIZE (fieldmode)
520 && (movstrict_optab->handlers[fieldmode].insn_code
521 != CODE_FOR_nothing))
523 int icode = movstrict_optab->handlers[fieldmode].insn_code;
525 /* Get appropriate low part of the value being stored. */
526 if (GET_CODE (value) == CONST_INT || REG_P (value))
527 value = gen_lowpart (fieldmode, value);
528 else if (!(GET_CODE (value) == SYMBOL_REF
529 || GET_CODE (value) == LABEL_REF
530 || GET_CODE (value) == CONST))
531 value = convert_to_mode (fieldmode, value, 0);
533 if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode))
534 value = copy_to_mode_reg (fieldmode, value);
536 if (GET_CODE (op0) == SUBREG)
538 /* Else we've got some float mode source being extracted into
539 a different float mode destination -- this combination of
540 subregs results in Severe Tire Damage. */
541 gcc_assert (GET_MODE (SUBREG_REG (op0)) == fieldmode
542 || GET_MODE_CLASS (fieldmode) == MODE_INT
543 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT);
544 op0 = SUBREG_REG (op0);
547 emit_insn (GEN_FCN (icode)
548 (gen_rtx_SUBREG (fieldmode, op0,
549 (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
550 + (offset * UNITS_PER_WORD)),
556 /* Handle fields bigger than a word. */
558 if (bitsize > BITS_PER_WORD)
560 /* Here we transfer the words of the field
561 in the order least significant first.
562 This is because the most significant word is the one which may
564 However, only do that if the value is not BLKmode. */
566 unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
567 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
571 /* This is the mode we must force value to, so that there will be enough
572 subwords to extract. Note that fieldmode will often (always?) be
573 VOIDmode, because that is what store_field uses to indicate that this
574 is a bit field, but passing VOIDmode to operand_subword_force
576 fieldmode = GET_MODE (value);
577 if (fieldmode == VOIDmode)
578 fieldmode = smallest_mode_for_size (nwords * BITS_PER_WORD, MODE_INT);
580 last = get_last_insn ();
581 for (i = 0; i < nwords; i++)
583 /* If I is 0, use the low-order word in both field and target;
584 if I is 1, use the next to lowest word; and so on. */
585 unsigned int wordnum = (backwards ? nwords - i - 1 : i);
586 unsigned int bit_offset = (backwards
587 ? MAX ((int) bitsize - ((int) i + 1)
590 : (int) i * BITS_PER_WORD);
591 rtx value_word = operand_subword_force (value, wordnum, fieldmode);
593 if (!store_bit_field_1 (op0, MIN (BITS_PER_WORD,
594 bitsize - i * BITS_PER_WORD),
595 bitnum + bit_offset, word_mode,
596 value_word, fallback_p))
598 delete_insns_since (last);
605 /* From here on we can assume that the field to be stored in is
606 a full-word (whatever type that is), since it is shorter than a word. */
608 /* OFFSET is the number of words or bytes (UNIT says which)
609 from STR_RTX to the first word or byte containing part of the field. */
614 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
618 /* Since this is a destination (lvalue), we can't copy
619 it to a pseudo. We can remove a SUBREG that does not
620 change the size of the operand. Such a SUBREG may
621 have been added above. */
622 gcc_assert (GET_CODE (op0) == SUBREG
623 && (GET_MODE_SIZE (GET_MODE (op0))
624 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))));
625 op0 = SUBREG_REG (op0);
627 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
628 op0, (offset * UNITS_PER_WORD));
633 /* If VALUE has a floating-point or complex mode, access it as an
634 integer of the corresponding size. This can occur on a machine
635 with 64 bit registers that uses SFmode for float. It can also
636 occur for unaligned float or complex fields. */
638 if (GET_MODE (value) != VOIDmode
639 && GET_MODE_CLASS (GET_MODE (value)) != MODE_INT
640 && GET_MODE_CLASS (GET_MODE (value)) != MODE_PARTIAL_INT)
642 value = gen_reg_rtx (int_mode_for_mode (GET_MODE (value)));
643 emit_move_insn (gen_lowpart (GET_MODE (orig_value), value), orig_value);
646 /* Now OFFSET is nonzero only if OP0 is memory
647 and is therefore always measured in bytes. */
650 && GET_MODE (value) != BLKmode
652 && GET_MODE_BITSIZE (op_mode) >= bitsize
653 && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
654 && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode)))
655 && insn_data[CODE_FOR_insv].operand[1].predicate (GEN_INT (bitsize),
657 && check_predicate_volatile_ok (CODE_FOR_insv, 0, op0, VOIDmode))
659 int xbitpos = bitpos;
662 rtx last = get_last_insn ();
665 /* Add OFFSET into OP0's address. */
667 xop0 = adjust_address (xop0, byte_mode, offset);
669 /* If xop0 is a register, we need it in OP_MODE
670 to make it acceptable to the format of insv. */
671 if (GET_CODE (xop0) == SUBREG)
672 /* We can't just change the mode, because this might clobber op0,
673 and we will need the original value of op0 if insv fails. */
674 xop0 = gen_rtx_SUBREG (op_mode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
675 if (REG_P (xop0) && GET_MODE (xop0) != op_mode)
676 xop0 = gen_rtx_SUBREG (op_mode, xop0, 0);
678 /* On big-endian machines, we count bits from the most significant.
679 If the bit field insn does not, we must invert. */
681 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
682 xbitpos = unit - bitsize - xbitpos;
684 /* We have been counting XBITPOS within UNIT.
685 Count instead within the size of the register. */
686 if (BITS_BIG_ENDIAN && !MEM_P (xop0))
687 xbitpos += GET_MODE_BITSIZE (op_mode) - unit;
689 unit = GET_MODE_BITSIZE (op_mode);
691 /* Convert VALUE to op_mode (which insv insn wants) in VALUE1. */
693 if (GET_MODE (value) != op_mode)
695 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
697 /* Optimization: Don't bother really extending VALUE
698 if it has all the bits we will actually use. However,
699 if we must narrow it, be sure we do it correctly. */
701 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (op_mode))
705 tmp = simplify_subreg (op_mode, value1, GET_MODE (value), 0);
707 tmp = simplify_gen_subreg (op_mode,
708 force_reg (GET_MODE (value),
710 GET_MODE (value), 0);
714 value1 = gen_lowpart (op_mode, value1);
716 else if (GET_CODE (value) == CONST_INT)
717 value1 = gen_int_mode (INTVAL (value), op_mode);
719 /* Parse phase is supposed to make VALUE's data type
720 match that of the component reference, which is a type
721 at least as wide as the field; so VALUE should have
722 a mode that corresponds to that type. */
723 gcc_assert (CONSTANT_P (value));
726 /* If this machine's insv insists on a register,
727 get VALUE1 into a register. */
728 if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate)
730 value1 = force_reg (op_mode, value1);
732 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
738 delete_insns_since (last);
741 /* If OP0 is a memory, try copying it to a register and seeing if a
742 cheap register alternative is available. */
743 if (HAVE_insv && MEM_P (op0))
745 enum machine_mode bestmode;
747 /* Get the mode to use for inserting into this field. If OP0 is
748 BLKmode, get the smallest mode consistent with the alignment. If
749 OP0 is a non-BLKmode object that is no wider than OP_MODE, use its
750 mode. Otherwise, use the smallest mode containing the field. */
752 if (GET_MODE (op0) == BLKmode
753 || (op_mode != MAX_MACHINE_MODE
754 && GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (op_mode)))
755 bestmode = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0),
756 (op_mode == MAX_MACHINE_MODE
757 ? VOIDmode : op_mode),
758 MEM_VOLATILE_P (op0));
760 bestmode = GET_MODE (op0);
762 if (bestmode != VOIDmode
763 && GET_MODE_SIZE (bestmode) >= GET_MODE_SIZE (fieldmode)
764 && !(SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
765 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
767 rtx last, tempreg, xop0;
768 unsigned HOST_WIDE_INT xoffset, xbitpos;
770 last = get_last_insn ();
772 /* Adjust address to point to the containing unit of
773 that mode. Compute the offset as a multiple of this unit,
774 counting in bytes. */
775 unit = GET_MODE_BITSIZE (bestmode);
776 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
777 xbitpos = bitnum % unit;
778 xop0 = adjust_address (op0, bestmode, xoffset);
780 /* Fetch that unit, store the bitfield in it, then store
782 tempreg = copy_to_reg (xop0);
783 if (store_bit_field_1 (tempreg, bitsize, xbitpos,
784 fieldmode, orig_value, false))
786 emit_move_insn (xop0, tempreg);
789 delete_insns_since (last);
796 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
800 /* Generate code to store value from rtx VALUE
801 into a bit-field within structure STR_RTX
802 containing BITSIZE bits starting at bit BITNUM.
803 FIELDMODE is the machine-mode of the FIELD_DECL node for this field. */
806 store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
807 unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode,
810 if (!store_bit_field_1 (str_rtx, bitsize, bitnum, fieldmode, value, true))
814 /* Use shifts and boolean operations to store VALUE
815 into a bit field of width BITSIZE
816 in a memory location specified by OP0 except offset by OFFSET bytes.
817 (OFFSET must be 0 if OP0 is a register.)
818 The field starts at position BITPOS within the byte.
819 (If OP0 is a register, it may be a full word or a narrower mode,
820 but BITPOS still counts within a full word,
821 which is significant on bigendian machines.) */
824 store_fixed_bit_field (rtx op0, unsigned HOST_WIDE_INT offset,
825 unsigned HOST_WIDE_INT bitsize,
826 unsigned HOST_WIDE_INT bitpos, rtx value)
828 enum machine_mode mode;
829 unsigned int total_bits = BITS_PER_WORD;
834 /* There is a case not handled here:
835 a structure with a known alignment of just a halfword
836 and a field split across two aligned halfwords within the structure.
837 Or likewise a structure with a known alignment of just a byte
838 and a field split across two bytes.
839 Such cases are not supposed to be able to occur. */
841 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
843 gcc_assert (!offset);
844 /* Special treatment for a bit field split across two registers. */
845 if (bitsize + bitpos > BITS_PER_WORD)
847 store_split_bit_field (op0, bitsize, bitpos, value);
853 /* Get the proper mode to use for this field. We want a mode that
854 includes the entire field. If such a mode would be larger than
855 a word, we won't be doing the extraction the normal way.
856 We don't want a mode bigger than the destination. */
858 mode = GET_MODE (op0);
859 if (GET_MODE_BITSIZE (mode) == 0
860 || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
862 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
863 MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
865 if (mode == VOIDmode)
867 /* The only way this should occur is if the field spans word
869 store_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT,
874 total_bits = GET_MODE_BITSIZE (mode);
876 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
877 be in the range 0 to total_bits-1, and put any excess bytes in
879 if (bitpos >= total_bits)
881 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
882 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
886 /* Get ref to an aligned byte, halfword, or word containing the field.
887 Adjust BITPOS to be position within a word,
888 and OFFSET to be the offset of that word.
889 Then alter OP0 to refer to that word. */
890 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
891 offset -= (offset % (total_bits / BITS_PER_UNIT));
892 op0 = adjust_address (op0, mode, offset);
895 mode = GET_MODE (op0);
897 /* Now MODE is either some integral mode for a MEM as OP0,
898 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
899 The bit field is contained entirely within OP0.
900 BITPOS is the starting bit number within OP0.
901 (OP0's mode may actually be narrower than MODE.) */
903 if (BYTES_BIG_ENDIAN)
904 /* BITPOS is the distance between our msb
905 and that of the containing datum.
906 Convert it to the distance from the lsb. */
907 bitpos = total_bits - bitsize - bitpos;
909 /* Now BITPOS is always the distance between our lsb
912 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
913 we must first convert its mode to MODE. */
915 if (GET_CODE (value) == CONST_INT)
917 HOST_WIDE_INT v = INTVAL (value);
919 if (bitsize < HOST_BITS_PER_WIDE_INT)
920 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
924 else if ((bitsize < HOST_BITS_PER_WIDE_INT
925 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
926 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
929 value = lshift_value (mode, value, bitpos, bitsize);
933 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
934 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
936 if (GET_MODE (value) != mode)
938 if ((REG_P (value) || GET_CODE (value) == SUBREG)
939 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
940 value = gen_lowpart (mode, value);
942 value = convert_to_mode (mode, value, 1);
946 value = expand_binop (mode, and_optab, value,
947 mask_rtx (mode, 0, bitsize, 0),
948 NULL_RTX, 1, OPTAB_LIB_WIDEN);
950 value = expand_shift (LSHIFT_EXPR, mode, value,
951 build_int_cst (NULL_TREE, bitpos), NULL_RTX, 1);
954 /* Now clear the chosen bits in OP0,
955 except that if VALUE is -1 we need not bother. */
956 /* We keep the intermediates in registers to allow CSE to combine
957 consecutive bitfield assignments. */
959 temp = force_reg (mode, op0);
963 temp = expand_binop (mode, and_optab, temp,
964 mask_rtx (mode, bitpos, bitsize, 1),
965 NULL_RTX, 1, OPTAB_LIB_WIDEN);
966 temp = force_reg (mode, temp);
969 /* Now logical-or VALUE into OP0, unless it is zero. */
973 temp = expand_binop (mode, ior_optab, temp, value,
974 NULL_RTX, 1, OPTAB_LIB_WIDEN);
975 temp = force_reg (mode, temp);
979 emit_move_insn (op0, temp);
982 /* Store a bit field that is split across multiple accessible memory objects.
984 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
985 BITSIZE is the field width; BITPOS the position of its first bit
987 VALUE is the value to store.
989 This does not yet handle fields wider than BITS_PER_WORD. */
992 store_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
993 unsigned HOST_WIDE_INT bitpos, rtx value)
996 unsigned int bitsdone = 0;
998 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1000 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
1001 unit = BITS_PER_WORD;
1003 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1005 /* If VALUE is a constant other than a CONST_INT, get it into a register in
1006 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
1007 that VALUE might be a floating-point constant. */
1008 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
1010 rtx word = gen_lowpart_common (word_mode, value);
1012 if (word && (value != word))
1015 value = gen_lowpart_common (word_mode,
1016 force_reg (GET_MODE (value) != VOIDmode
1018 : word_mode, value));
1021 while (bitsdone < bitsize)
1023 unsigned HOST_WIDE_INT thissize;
1025 unsigned HOST_WIDE_INT thispos;
1026 unsigned HOST_WIDE_INT offset;
1028 offset = (bitpos + bitsdone) / unit;
1029 thispos = (bitpos + bitsdone) % unit;
1031 /* THISSIZE must not overrun a word boundary. Otherwise,
1032 store_fixed_bit_field will call us again, and we will mutually
1034 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1035 thissize = MIN (thissize, unit - thispos);
1037 if (BYTES_BIG_ENDIAN)
1041 /* We must do an endian conversion exactly the same way as it is
1042 done in extract_bit_field, so that the two calls to
1043 extract_fixed_bit_field will have comparable arguments. */
1044 if (!MEM_P (value) || GET_MODE (value) == BLKmode)
1045 total_bits = BITS_PER_WORD;
1047 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
1049 /* Fetch successively less significant portions. */
1050 if (GET_CODE (value) == CONST_INT)
1051 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1052 >> (bitsize - bitsdone - thissize))
1053 & (((HOST_WIDE_INT) 1 << thissize) - 1));
1055 /* The args are chosen so that the last part includes the
1056 lsb. Give extract_bit_field the value it needs (with
1057 endianness compensation) to fetch the piece we want. */
1058 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
1059 total_bits - bitsize + bitsdone,
1064 /* Fetch successively more significant portions. */
1065 if (GET_CODE (value) == CONST_INT)
1066 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1068 & (((HOST_WIDE_INT) 1 << thissize) - 1));
1070 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
1071 bitsdone, NULL_RTX, 1);
1074 /* If OP0 is a register, then handle OFFSET here.
1076 When handling multiword bitfields, extract_bit_field may pass
1077 down a word_mode SUBREG of a larger REG for a bitfield that actually
1078 crosses a word boundary. Thus, for a SUBREG, we must find
1079 the current word starting from the base register. */
1080 if (GET_CODE (op0) == SUBREG)
1082 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1083 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1084 GET_MODE (SUBREG_REG (op0)));
1087 else if (REG_P (op0))
1089 word = operand_subword_force (op0, offset, GET_MODE (op0));
1095 /* OFFSET is in UNITs, and UNIT is in bits.
1096 store_fixed_bit_field wants offset in bytes. */
1097 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT, thissize,
1099 bitsdone += thissize;
1103 /* A subroutine of extract_bit_field_1 that converts return value X
1104 to either MODE or TMODE. MODE, TMODE and UNSIGNEDP are arguments
1105 to extract_bit_field. */
1108 convert_extracted_bit_field (rtx x, enum machine_mode mode,
1109 enum machine_mode tmode, bool unsignedp)
1111 if (GET_MODE (x) == tmode || GET_MODE (x) == mode)
1114 /* If the x mode is not a scalar integral, first convert to the
1115 integer mode of that size and then access it as a floating-point
1116 value via a SUBREG. */
1117 if (!SCALAR_INT_MODE_P (tmode))
1119 enum machine_mode smode;
1121 smode = mode_for_size (GET_MODE_BITSIZE (tmode), MODE_INT, 0);
1122 x = convert_to_mode (smode, x, unsignedp);
1123 x = force_reg (smode, x);
1124 return gen_lowpart (tmode, x);
1127 return convert_to_mode (tmode, x, unsignedp);
1130 /* A subroutine of extract_bit_field, with the same arguments.
1131 If FALLBACK_P is true, fall back to extract_fixed_bit_field
1132 if we can find no other means of implementing the operation.
1133 if FALLBACK_P is false, return NULL instead. */
1136 extract_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1137 unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target,
1138 enum machine_mode mode, enum machine_mode tmode,
1142 = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD;
1143 unsigned HOST_WIDE_INT offset, bitpos;
1145 enum machine_mode int_mode;
1146 enum machine_mode ext_mode;
1147 enum machine_mode mode1;
1148 enum insn_code icode;
1151 if (tmode == VOIDmode)
1154 while (GET_CODE (op0) == SUBREG)
1156 bitnum += SUBREG_BYTE (op0) * BITS_PER_UNIT;
1157 op0 = SUBREG_REG (op0);
1160 /* If we have an out-of-bounds access to a register, just return an
1161 uninitialized register of the required mode. This can occur if the
1162 source code contains an out-of-bounds access to a small array. */
1163 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
1164 return gen_reg_rtx (tmode);
1167 && mode == GET_MODE (op0)
1169 && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
1171 /* We're trying to extract a full register from itself. */
1175 /* See if we can get a better vector mode before extracting. */
1176 if (VECTOR_MODE_P (GET_MODE (op0))
1178 && GET_MODE_INNER (GET_MODE (op0)) != tmode)
1180 enum machine_mode new_mode;
1181 int nunits = GET_MODE_NUNITS (GET_MODE (op0));
1183 if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
1184 new_mode = MIN_MODE_VECTOR_FLOAT;
1186 new_mode = MIN_MODE_VECTOR_INT;
1188 for (; new_mode != VOIDmode ; new_mode = GET_MODE_WIDER_MODE (new_mode))
1189 if (GET_MODE_NUNITS (new_mode) == nunits
1190 && GET_MODE_INNER (new_mode) == tmode
1191 && targetm.vector_mode_supported_p (new_mode))
1193 if (new_mode != VOIDmode)
1194 op0 = gen_lowpart (new_mode, op0);
1197 /* Use vec_extract patterns for extracting parts of vectors whenever
1199 if (VECTOR_MODE_P (GET_MODE (op0))
1201 && (vec_extract_optab->handlers[GET_MODE (op0)].insn_code
1202 != CODE_FOR_nothing)
1203 && ((bitnum + bitsize - 1) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
1204 == bitnum / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
1206 enum machine_mode outermode = GET_MODE (op0);
1207 enum machine_mode innermode = GET_MODE_INNER (outermode);
1208 int icode = (int) vec_extract_optab->handlers[outermode].insn_code;
1209 unsigned HOST_WIDE_INT pos = bitnum / GET_MODE_BITSIZE (innermode);
1210 rtx rtxpos = GEN_INT (pos);
1212 rtx dest = NULL, pat, seq;
1213 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
1214 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
1215 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
1217 if (innermode == tmode || innermode == mode)
1221 dest = gen_reg_rtx (innermode);
1225 if (! (*insn_data[icode].operand[0].predicate) (dest, mode0))
1226 dest = copy_to_mode_reg (mode0, dest);
1228 if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
1229 src = copy_to_mode_reg (mode1, src);
1231 if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
1232 rtxpos = copy_to_mode_reg (mode1, rtxpos);
1234 /* We could handle this, but we should always be called with a pseudo
1235 for our targets and all insns should take them as outputs. */
1236 gcc_assert ((*insn_data[icode].operand[0].predicate) (dest, mode0)
1237 && (*insn_data[icode].operand[1].predicate) (src, mode1)
1238 && (*insn_data[icode].operand[2].predicate) (rtxpos, mode2));
1240 pat = GEN_FCN (icode) (dest, src, rtxpos);
1248 return gen_lowpart (tmode, dest);
1253 /* Make sure we are playing with integral modes. Pun with subregs
1256 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1257 if (imode != GET_MODE (op0))
1260 op0 = adjust_address (op0, imode, 0);
1263 gcc_assert (imode != BLKmode);
1264 op0 = gen_lowpart (imode, op0);
1266 /* If we got a SUBREG, force it into a register since we
1267 aren't going to be able to do another SUBREG on it. */
1268 if (GET_CODE (op0) == SUBREG)
1269 op0 = force_reg (imode, op0);
1274 /* We may be accessing data outside the field, which means
1275 we can alias adjacent data. */
1278 op0 = shallow_copy_rtx (op0);
1279 set_mem_alias_set (op0, 0);
1280 set_mem_expr (op0, 0);
1283 /* Extraction of a full-word or multi-word value from a structure
1284 in a register or aligned memory can be done with just a SUBREG.
1285 A subword value in the least significant part of a register
1286 can also be extracted with a SUBREG. For this, we need the
1287 byte offset of the value in op0. */
1289 bitpos = bitnum % unit;
1290 offset = bitnum / unit;
1291 byte_offset = bitpos / BITS_PER_UNIT + offset * UNITS_PER_WORD;
1293 /* If OP0 is a register, BITPOS must count within a word.
1294 But as we have it, it counts within whatever size OP0 now has.
1295 On a bigendian machine, these are not the same, so convert. */
1296 if (BYTES_BIG_ENDIAN
1298 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1299 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1301 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1302 If that's wrong, the solution is to test for it and set TARGET to 0
1305 /* Only scalar integer modes can be converted via subregs. There is an
1306 additional problem for FP modes here in that they can have a precision
1307 which is different from the size. mode_for_size uses precision, but
1308 we want a mode based on the size, so we must avoid calling it for FP
1310 mode1 = (SCALAR_INT_MODE_P (tmode)
1311 ? mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0)
1314 if (((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1315 && bitpos % BITS_PER_WORD == 0)
1316 || (mode1 != BLKmode
1317 /* ??? The big endian test here is wrong. This is correct
1318 if the value is in a register, and if mode_for_size is not
1319 the same mode as op0. This causes us to get unnecessarily
1320 inefficient code from the Thumb port when -mbig-endian. */
1321 && (BYTES_BIG_ENDIAN
1322 ? bitpos + bitsize == BITS_PER_WORD
1325 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1326 GET_MODE_BITSIZE (GET_MODE (op0)))
1327 && GET_MODE_SIZE (mode1) != 0
1328 && byte_offset % GET_MODE_SIZE (mode1) == 0)
1330 && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
1331 || (offset * BITS_PER_UNIT % bitsize == 0
1332 && MEM_ALIGN (op0) % bitsize == 0)))))
1334 if (mode1 != GET_MODE (op0))
1337 op0 = adjust_address (op0, mode1, offset);
1340 rtx sub = simplify_gen_subreg (mode1, op0, GET_MODE (op0),
1343 goto no_subreg_mode_swap;
1348 return convert_to_mode (tmode, op0, unsignedp);
1351 no_subreg_mode_swap:
1353 /* Handle fields bigger than a word. */
1355 if (bitsize > BITS_PER_WORD)
1357 /* Here we transfer the words of the field
1358 in the order least significant first.
1359 This is because the most significant word is the one which may
1360 be less than full. */
1362 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1365 if (target == 0 || !REG_P (target))
1366 target = gen_reg_rtx (mode);
1368 /* Indicate for flow that the entire target reg is being set. */
1369 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
1371 for (i = 0; i < nwords; i++)
1373 /* If I is 0, use the low-order word in both field and target;
1374 if I is 1, use the next to lowest word; and so on. */
1375 /* Word number in TARGET to use. */
1376 unsigned int wordnum
1378 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1380 /* Offset from start of field in OP0. */
1381 unsigned int bit_offset = (WORDS_BIG_ENDIAN
1382 ? MAX (0, ((int) bitsize - ((int) i + 1)
1383 * (int) BITS_PER_WORD))
1384 : (int) i * BITS_PER_WORD);
1385 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1387 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1388 bitsize - i * BITS_PER_WORD),
1389 bitnum + bit_offset, 1, target_part, mode,
1392 gcc_assert (target_part);
1394 if (result_part != target_part)
1395 emit_move_insn (target_part, result_part);
1400 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1401 need to be zero'd out. */
1402 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1404 unsigned int i, total_words;
1406 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1407 for (i = nwords; i < total_words; i++)
1409 (operand_subword (target,
1410 WORDS_BIG_ENDIAN ? total_words - i - 1 : i,
1417 /* Signed bit field: sign-extend with two arithmetic shifts. */
1418 target = expand_shift (LSHIFT_EXPR, mode, target,
1419 build_int_cst (NULL_TREE,
1420 GET_MODE_BITSIZE (mode) - bitsize),
1422 return expand_shift (RSHIFT_EXPR, mode, target,
1423 build_int_cst (NULL_TREE,
1424 GET_MODE_BITSIZE (mode) - bitsize),
1428 /* From here on we know the desired field is smaller than a word. */
1430 /* Check if there is a correspondingly-sized integer field, so we can
1431 safely extract it as one size of integer, if necessary; then
1432 truncate or extend to the size that is wanted; then use SUBREGs or
1433 convert_to_mode to get one of the modes we really wanted. */
1435 int_mode = int_mode_for_mode (tmode);
1436 if (int_mode == BLKmode)
1437 int_mode = int_mode_for_mode (mode);
1438 /* Should probably push op0 out to memory and then do a load. */
1439 gcc_assert (int_mode != BLKmode);
1441 /* OFFSET is the number of words or bytes (UNIT says which)
1442 from STR_RTX to the first word or byte containing part of the field. */
1446 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1449 op0 = copy_to_reg (op0);
1450 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1451 op0, (offset * UNITS_PER_WORD));
1456 /* Now OFFSET is nonzero only for memory operands. */
1457 ext_mode = mode_for_extraction (unsignedp ? EP_extzv : EP_extv, 0);
1458 icode = unsignedp ? CODE_FOR_extzv : CODE_FOR_extv;
1459 if (ext_mode != MAX_MACHINE_MODE
1461 && GET_MODE_BITSIZE (ext_mode) >= bitsize
1462 /* If op0 is a register, we need it in EXT_MODE to make it
1463 acceptable to the format of ext(z)v. */
1464 && !(GET_CODE (op0) == SUBREG && GET_MODE (op0) != ext_mode)
1465 && !((REG_P (op0) || GET_CODE (op0) == SUBREG)
1466 && (bitsize + bitpos > GET_MODE_BITSIZE (ext_mode)))
1467 && check_predicate_volatile_ok (icode, 1, op0, GET_MODE (op0)))
1469 unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset;
1470 rtx bitsize_rtx, bitpos_rtx;
1471 rtx last = get_last_insn ();
1473 rtx xtarget = target;
1474 rtx xspec_target = target;
1475 rtx xspec_target_subreg = 0;
1478 /* If op0 is a register, we need it in EXT_MODE to make it
1479 acceptable to the format of ext(z)v. */
1480 if (REG_P (xop0) && GET_MODE (xop0) != ext_mode)
1481 xop0 = gen_rtx_SUBREG (ext_mode, xop0, 0);
1483 /* Get ref to first byte containing part of the field. */
1484 xop0 = adjust_address (xop0, byte_mode, xoffset);
1486 /* On big-endian machines, we count bits from the most significant.
1487 If the bit field insn does not, we must invert. */
1488 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1489 xbitpos = unit - bitsize - xbitpos;
1491 /* Now convert from counting within UNIT to counting in EXT_MODE. */
1492 if (BITS_BIG_ENDIAN && !MEM_P (xop0))
1493 xbitpos += GET_MODE_BITSIZE (ext_mode) - unit;
1495 unit = GET_MODE_BITSIZE (ext_mode);
1498 xtarget = xspec_target = gen_reg_rtx (tmode);
1500 if (GET_MODE (xtarget) != ext_mode)
1502 if (REG_P (xtarget))
1504 xtarget = gen_lowpart (ext_mode, xtarget);
1505 if (GET_MODE_SIZE (ext_mode)
1506 > GET_MODE_SIZE (GET_MODE (xspec_target)))
1507 xspec_target_subreg = xtarget;
1510 xtarget = gen_reg_rtx (ext_mode);
1513 /* If this machine's ext(z)v insists on a register target,
1514 make sure we have one. */
1515 if (!insn_data[(int) icode].operand[0].predicate (xtarget, ext_mode))
1516 xtarget = gen_reg_rtx (ext_mode);
1518 bitsize_rtx = GEN_INT (bitsize);
1519 bitpos_rtx = GEN_INT (xbitpos);
1522 ? gen_extzv (xtarget, xop0, bitsize_rtx, bitpos_rtx)
1523 : gen_extv (xtarget, xop0, bitsize_rtx, bitpos_rtx));
1527 if (xtarget == xspec_target)
1529 if (xtarget == xspec_target_subreg)
1530 return xspec_target;
1531 return convert_extracted_bit_field (xtarget, mode, tmode, unsignedp);
1533 delete_insns_since (last);
1536 /* If OP0 is a memory, try copying it to a register and seeing if a
1537 cheap register alternative is available. */
1538 if (ext_mode != MAX_MACHINE_MODE && MEM_P (op0))
1540 enum machine_mode bestmode;
1542 /* Get the mode to use for inserting into this field. If
1543 OP0 is BLKmode, get the smallest mode consistent with the
1544 alignment. If OP0 is a non-BLKmode object that is no
1545 wider than EXT_MODE, use its mode. Otherwise, use the
1546 smallest mode containing the field. */
1548 if (GET_MODE (op0) == BLKmode
1549 || (ext_mode != MAX_MACHINE_MODE
1550 && GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (ext_mode)))
1551 bestmode = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0),
1552 (ext_mode == MAX_MACHINE_MODE
1553 ? VOIDmode : ext_mode),
1554 MEM_VOLATILE_P (op0));
1556 bestmode = GET_MODE (op0);
1558 if (bestmode != VOIDmode
1559 && !(SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
1560 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
1562 unsigned HOST_WIDE_INT xoffset, xbitpos;
1564 /* Compute the offset as a multiple of this unit,
1565 counting in bytes. */
1566 unit = GET_MODE_BITSIZE (bestmode);
1567 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1568 xbitpos = bitnum % unit;
1570 /* Make sure the register is big enough for the whole field. */
1571 if (xoffset * BITS_PER_UNIT + unit
1572 >= offset * BITS_PER_UNIT + bitsize)
1574 rtx last, result, xop0;
1576 last = get_last_insn ();
1578 /* Fetch it to a register in that size. */
1579 xop0 = adjust_address (op0, bestmode, xoffset);
1580 xop0 = force_reg (bestmode, xop0);
1581 result = extract_bit_field_1 (xop0, bitsize, xbitpos,
1583 mode, tmode, false);
1587 delete_insns_since (last);
1595 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1596 bitpos, target, unsignedp);
1597 return convert_extracted_bit_field (target, mode, tmode, unsignedp);
1600 /* Generate code to extract a byte-field from STR_RTX
1601 containing BITSIZE bits, starting at BITNUM,
1602 and put it in TARGET if possible (if TARGET is nonzero).
1603 Regardless of TARGET, we return the rtx for where the value is placed.
1605 STR_RTX is the structure containing the byte (a REG or MEM).
1606 UNSIGNEDP is nonzero if this is an unsigned bit field.
1607 MODE is the natural mode of the field value once extracted.
1608 TMODE is the mode the caller would like the value to have;
1609 but the value may be returned with type MODE instead.
1611 If a TARGET is specified and we can store in it at no extra cost,
1612 we do so, and return TARGET.
1613 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1614 if they are equally easy. */
1617 extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1618 unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target,
1619 enum machine_mode mode, enum machine_mode tmode)
1621 return extract_bit_field_1 (str_rtx, bitsize, bitnum, unsignedp,
1622 target, mode, tmode, true);
1625 /* Extract a bit field using shifts and boolean operations
1626 Returns an rtx to represent the value.
1627 OP0 addresses a register (word) or memory (byte).
1628 BITPOS says which bit within the word or byte the bit field starts in.
1629 OFFSET says how many bytes farther the bit field starts;
1630 it is 0 if OP0 is a register.
1631 BITSIZE says how many bits long the bit field is.
1632 (If OP0 is a register, it may be narrower than a full word,
1633 but BITPOS still counts within a full word,
1634 which is significant on bigendian machines.)
1636 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1637 If TARGET is nonzero, attempts to store the value there
1638 and return TARGET, but this is not guaranteed.
1639 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1642 extract_fixed_bit_field (enum machine_mode tmode, rtx op0,
1643 unsigned HOST_WIDE_INT offset,
1644 unsigned HOST_WIDE_INT bitsize,
1645 unsigned HOST_WIDE_INT bitpos, rtx target,
1648 unsigned int total_bits = BITS_PER_WORD;
1649 enum machine_mode mode;
1651 if (GET_CODE (op0) == SUBREG || REG_P (op0))
1653 /* Special treatment for a bit field split across two registers. */
1654 if (bitsize + bitpos > BITS_PER_WORD)
1655 return extract_split_bit_field (op0, bitsize, bitpos, unsignedp);
1659 /* Get the proper mode to use for this field. We want a mode that
1660 includes the entire field. If such a mode would be larger than
1661 a word, we won't be doing the extraction the normal way. */
1663 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1664 MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
1666 if (mode == VOIDmode)
1667 /* The only way this should occur is if the field spans word
1669 return extract_split_bit_field (op0, bitsize,
1670 bitpos + offset * BITS_PER_UNIT,
1673 total_bits = GET_MODE_BITSIZE (mode);
1675 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1676 be in the range 0 to total_bits-1, and put any excess bytes in
1678 if (bitpos >= total_bits)
1680 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1681 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1685 /* Get ref to an aligned byte, halfword, or word containing the field.
1686 Adjust BITPOS to be position within a word,
1687 and OFFSET to be the offset of that word.
1688 Then alter OP0 to refer to that word. */
1689 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1690 offset -= (offset % (total_bits / BITS_PER_UNIT));
1691 op0 = adjust_address (op0, mode, offset);
1694 mode = GET_MODE (op0);
1696 if (BYTES_BIG_ENDIAN)
1697 /* BITPOS is the distance between our msb and that of OP0.
1698 Convert it to the distance from the lsb. */
1699 bitpos = total_bits - bitsize - bitpos;
1701 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1702 We have reduced the big-endian case to the little-endian case. */
1708 /* If the field does not already start at the lsb,
1709 shift it so it does. */
1710 tree amount = build_int_cst (NULL_TREE, bitpos);
1711 /* Maybe propagate the target for the shift. */
1712 /* But not if we will return it--could confuse integrate.c. */
1713 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1714 if (tmode != mode) subtarget = 0;
1715 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1717 /* Convert the value to the desired mode. */
1719 op0 = convert_to_mode (tmode, op0, 1);
1721 /* Unless the msb of the field used to be the msb when we shifted,
1722 mask out the upper bits. */
1724 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize)
1725 return expand_binop (GET_MODE (op0), and_optab, op0,
1726 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1727 target, 1, OPTAB_LIB_WIDEN);
1731 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1732 then arithmetic-shift its lsb to the lsb of the word. */
1733 op0 = force_reg (mode, op0);
1737 /* Find the narrowest integer mode that contains the field. */
1739 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1740 mode = GET_MODE_WIDER_MODE (mode))
1741 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1743 op0 = convert_to_mode (mode, op0, 0);
1747 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1750 = build_int_cst (NULL_TREE,
1751 GET_MODE_BITSIZE (mode) - (bitsize + bitpos));
1752 /* Maybe propagate the target for the shift. */
1753 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1754 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1757 return expand_shift (RSHIFT_EXPR, mode, op0,
1758 build_int_cst (NULL_TREE,
1759 GET_MODE_BITSIZE (mode) - bitsize),
1763 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1764 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1765 complement of that if COMPLEMENT. The mask is truncated if
1766 necessary to the width of mode MODE. The mask is zero-extended if
1767 BITSIZE+BITPOS is too small for MODE. */
1770 mask_rtx (enum machine_mode mode, int bitpos, int bitsize, int complement)
1772 HOST_WIDE_INT masklow, maskhigh;
1776 else if (bitpos < HOST_BITS_PER_WIDE_INT)
1777 masklow = (HOST_WIDE_INT) -1 << bitpos;
1781 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1782 masklow &= ((unsigned HOST_WIDE_INT) -1
1783 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1785 if (bitpos <= HOST_BITS_PER_WIDE_INT)
1788 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1792 else if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1793 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1794 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1800 maskhigh = ~maskhigh;
1804 return immed_double_const (masklow, maskhigh, mode);
1807 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1808 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1811 lshift_value (enum machine_mode mode, rtx value, int bitpos, int bitsize)
1813 unsigned HOST_WIDE_INT v = INTVAL (value);
1814 HOST_WIDE_INT low, high;
1816 if (bitsize < HOST_BITS_PER_WIDE_INT)
1817 v &= ~((HOST_WIDE_INT) -1 << bitsize);
1819 if (bitpos < HOST_BITS_PER_WIDE_INT)
1822 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1827 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1830 return immed_double_const (low, high, mode);
1833 /* Extract a bit field from a memory by forcing the alignment of the
1834 memory. This efficient only if the field spans at least 4 boundaries.
1837 BITSIZE is the field width; BITPOS is the position of the first bit.
1838 UNSIGNEDP is true if the result should be zero-extended. */
1841 extract_force_align_mem_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1842 unsigned HOST_WIDE_INT bitpos,
1845 enum machine_mode mode, dmode;
1846 unsigned int m_bitsize, m_size;
1847 unsigned int sign_shift_up, sign_shift_dn;
1848 rtx base, a1, a2, v1, v2, comb, shift, result, start;
1850 /* Choose a mode that will fit BITSIZE. */
1851 mode = smallest_mode_for_size (bitsize, MODE_INT);
1852 m_size = GET_MODE_SIZE (mode);
1853 m_bitsize = GET_MODE_BITSIZE (mode);
1855 /* Choose a mode twice as wide. Fail if no such mode exists. */
1856 dmode = mode_for_size (m_bitsize * 2, MODE_INT, false);
1857 if (dmode == BLKmode)
1860 do_pending_stack_adjust ();
1861 start = get_last_insn ();
1863 /* At the end, we'll need an additional shift to deal with sign/zero
1864 extension. By default this will be a left+right shift of the
1865 appropriate size. But we may be able to eliminate one of them. */
1866 sign_shift_up = sign_shift_dn = m_bitsize - bitsize;
1868 if (STRICT_ALIGNMENT)
1870 base = plus_constant (XEXP (op0, 0), bitpos / BITS_PER_UNIT);
1871 bitpos %= BITS_PER_UNIT;
1873 /* We load two values to be concatenate. There's an edge condition
1874 that bears notice -- an aligned value at the end of a page can
1875 only load one value lest we segfault. So the two values we load
1876 are at "base & -size" and "(base + size - 1) & -size". If base
1877 is unaligned, the addresses will be aligned and sequential; if
1878 base is aligned, the addresses will both be equal to base. */
1880 a1 = expand_simple_binop (Pmode, AND, force_operand (base, NULL),
1881 GEN_INT (-(HOST_WIDE_INT)m_size),
1882 NULL, true, OPTAB_LIB_WIDEN);
1883 mark_reg_pointer (a1, m_bitsize);
1884 v1 = gen_rtx_MEM (mode, a1);
1885 set_mem_align (v1, m_bitsize);
1886 v1 = force_reg (mode, validize_mem (v1));
1888 a2 = plus_constant (base, GET_MODE_SIZE (mode) - 1);
1889 a2 = expand_simple_binop (Pmode, AND, force_operand (a2, NULL),
1890 GEN_INT (-(HOST_WIDE_INT)m_size),
1891 NULL, true, OPTAB_LIB_WIDEN);
1892 v2 = gen_rtx_MEM (mode, a2);
1893 set_mem_align (v2, m_bitsize);
1894 v2 = force_reg (mode, validize_mem (v2));
1896 /* Combine these two values into a double-word value. */
1897 if (m_bitsize == BITS_PER_WORD)
1899 comb = gen_reg_rtx (dmode);
1900 emit_insn (gen_rtx_CLOBBER (VOIDmode, comb));
1901 emit_move_insn (gen_rtx_SUBREG (mode, comb, 0), v1);
1902 emit_move_insn (gen_rtx_SUBREG (mode, comb, m_size), v2);
1906 if (BYTES_BIG_ENDIAN)
1907 comb = v1, v1 = v2, v2 = comb;
1908 v1 = convert_modes (dmode, mode, v1, true);
1911 v2 = convert_modes (dmode, mode, v2, true);
1912 v2 = expand_simple_binop (dmode, ASHIFT, v2, GEN_INT (m_bitsize),
1913 NULL, true, OPTAB_LIB_WIDEN);
1916 comb = expand_simple_binop (dmode, IOR, v1, v2, NULL,
1917 true, OPTAB_LIB_WIDEN);
1922 shift = expand_simple_binop (Pmode, AND, base, GEN_INT (m_size - 1),
1923 NULL, true, OPTAB_LIB_WIDEN);
1924 shift = expand_mult (Pmode, shift, GEN_INT (BITS_PER_UNIT), NULL, 1);
1928 if (sign_shift_up <= bitpos)
1929 bitpos -= sign_shift_up, sign_shift_up = 0;
1930 shift = expand_simple_binop (Pmode, PLUS, shift, GEN_INT (bitpos),
1931 NULL, true, OPTAB_LIB_WIDEN);
1936 unsigned HOST_WIDE_INT offset = bitpos / BITS_PER_UNIT;
1937 bitpos %= BITS_PER_UNIT;
1939 /* When strict alignment is not required, we can just load directly
1940 from memory without masking. If the remaining BITPOS offset is
1941 small enough, we may be able to do all operations in MODE as
1942 opposed to DMODE. */
1943 if (bitpos + bitsize <= m_bitsize)
1945 comb = adjust_address (op0, dmode, offset);
1947 if (sign_shift_up <= bitpos)
1948 bitpos -= sign_shift_up, sign_shift_up = 0;
1949 shift = GEN_INT (bitpos);
1952 /* Shift down the double-word such that the requested value is at bit 0. */
1953 if (shift != const0_rtx)
1954 comb = expand_simple_binop (dmode, unsignedp ? LSHIFTRT : ASHIFTRT,
1955 comb, shift, NULL, unsignedp, OPTAB_LIB_WIDEN);
1959 /* If the field exactly matches MODE, then all we need to do is return the
1960 lowpart. Otherwise, shift to get the sign bits set properly. */
1961 result = force_reg (mode, gen_lowpart (mode, comb));
1964 result = expand_simple_binop (mode, ASHIFT, result,
1965 GEN_INT (sign_shift_up),
1966 NULL_RTX, 0, OPTAB_LIB_WIDEN);
1968 result = expand_simple_binop (mode, unsignedp ? LSHIFTRT : ASHIFTRT,
1969 result, GEN_INT (sign_shift_dn),
1970 NULL_RTX, 0, OPTAB_LIB_WIDEN);
1975 delete_insns_since (start);
1979 /* Extract a bit field that is split across two words
1980 and return an RTX for the result.
1982 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1983 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1984 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
1987 extract_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1988 unsigned HOST_WIDE_INT bitpos, int unsignedp)
1991 unsigned int bitsdone = 0;
1992 rtx result = NULL_RTX;
1995 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1997 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
1998 unit = BITS_PER_WORD;
2001 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
2002 if (0 && bitsize / unit > 2)
2004 rtx tmp = extract_force_align_mem_bit_field (op0, bitsize, bitpos,
2011 while (bitsdone < bitsize)
2013 unsigned HOST_WIDE_INT thissize;
2015 unsigned HOST_WIDE_INT thispos;
2016 unsigned HOST_WIDE_INT offset;
2018 offset = (bitpos + bitsdone) / unit;
2019 thispos = (bitpos + bitsdone) % unit;
2021 /* THISSIZE must not overrun a word boundary. Otherwise,
2022 extract_fixed_bit_field will call us again, and we will mutually
2024 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
2025 thissize = MIN (thissize, unit - thispos);
2027 /* If OP0 is a register, then handle OFFSET here.
2029 When handling multiword bitfields, extract_bit_field may pass
2030 down a word_mode SUBREG of a larger REG for a bitfield that actually
2031 crosses a word boundary. Thus, for a SUBREG, we must find
2032 the current word starting from the base register. */
2033 if (GET_CODE (op0) == SUBREG)
2035 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
2036 word = operand_subword_force (SUBREG_REG (op0), word_offset,
2037 GET_MODE (SUBREG_REG (op0)));
2040 else if (REG_P (op0))
2042 word = operand_subword_force (op0, offset, GET_MODE (op0));
2048 /* Extract the parts in bit-counting order,
2049 whose meaning is determined by BYTES_PER_UNIT.
2050 OFFSET is in UNITs, and UNIT is in bits.
2051 extract_fixed_bit_field wants offset in bytes. */
2052 part = extract_fixed_bit_field (word_mode, word,
2053 offset * unit / BITS_PER_UNIT,
2054 thissize, thispos, 0, 1);
2055 bitsdone += thissize;
2057 /* Shift this part into place for the result. */
2058 if (BYTES_BIG_ENDIAN)
2060 if (bitsize != bitsdone)
2061 part = expand_shift (LSHIFT_EXPR, word_mode, part,
2062 build_int_cst (NULL_TREE, bitsize - bitsdone),
2067 if (bitsdone != thissize)
2068 part = expand_shift (LSHIFT_EXPR, word_mode, part,
2069 build_int_cst (NULL_TREE,
2070 bitsdone - thissize), 0, 1);
2076 /* Combine the parts with bitwise or. This works
2077 because we extracted each part as an unsigned bit field. */
2078 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
2084 /* Unsigned bit field: we are done. */
2087 /* Signed bit field: sign-extend with two arithmetic shifts. */
2088 result = expand_shift (LSHIFT_EXPR, word_mode, result,
2089 build_int_cst (NULL_TREE, BITS_PER_WORD - bitsize),
2091 return expand_shift (RSHIFT_EXPR, word_mode, result,
2092 build_int_cst (NULL_TREE, BITS_PER_WORD - bitsize),
2096 /* Add INC into TARGET. */
2099 expand_inc (rtx target, rtx inc)
2101 rtx value = expand_binop (GET_MODE (target), add_optab,
2103 target, 0, OPTAB_LIB_WIDEN);
2104 if (value != target)
2105 emit_move_insn (target, value);
2108 /* Subtract DEC from TARGET. */
2111 expand_dec (rtx target, rtx dec)
2113 rtx value = expand_binop (GET_MODE (target), sub_optab,
2115 target, 0, OPTAB_LIB_WIDEN);
2116 if (value != target)
2117 emit_move_insn (target, value);
2120 /* Output a shift instruction for expression code CODE,
2121 with SHIFTED being the rtx for the value to shift,
2122 and AMOUNT the tree for the amount to shift by.
2123 Store the result in the rtx TARGET, if that is convenient.
2124 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2125 Return the rtx for where the value is. */
2128 expand_shift (enum tree_code code, enum machine_mode mode, rtx shifted,
2129 tree amount, rtx target, int unsignedp)
2132 int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
2133 int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
2136 /* Previously detected shift-counts computed by NEGATE_EXPR
2137 and shifted in the other direction; but that does not work
2140 op1 = expand_normal (amount);
2142 if (SHIFT_COUNT_TRUNCATED)
2144 if (GET_CODE (op1) == CONST_INT
2145 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
2146 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
2147 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
2148 % GET_MODE_BITSIZE (mode));
2149 else if (GET_CODE (op1) == SUBREG
2150 && subreg_lowpart_p (op1))
2151 op1 = SUBREG_REG (op1);
2154 if (op1 == const0_rtx)
2157 /* Check whether its cheaper to implement a left shift by a constant
2158 bit count by a sequence of additions. */
2159 if (code == LSHIFT_EXPR
2160 && GET_CODE (op1) == CONST_INT
2162 && INTVAL (op1) < GET_MODE_BITSIZE (mode)
2163 && INTVAL (op1) < MAX_BITS_PER_WORD
2164 && shift_cost[mode][INTVAL (op1)] > INTVAL (op1) * add_cost[mode]
2165 && shift_cost[mode][INTVAL (op1)] != MAX_COST)
2168 for (i = 0; i < INTVAL (op1); i++)
2170 temp = force_reg (mode, shifted);
2171 shifted = expand_binop (mode, add_optab, temp, temp, NULL_RTX,
2172 unsignedp, OPTAB_LIB_WIDEN);
2177 for (try = 0; temp == 0 && try < 3; try++)
2179 enum optab_methods methods;
2182 methods = OPTAB_DIRECT;
2184 methods = OPTAB_WIDEN;
2186 methods = OPTAB_LIB_WIDEN;
2190 /* Widening does not work for rotation. */
2191 if (methods == OPTAB_WIDEN)
2193 else if (methods == OPTAB_LIB_WIDEN)
2195 /* If we have been unable to open-code this by a rotation,
2196 do it as the IOR of two shifts. I.e., to rotate A
2197 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
2198 where C is the bitsize of A.
2200 It is theoretically possible that the target machine might
2201 not be able to perform either shift and hence we would
2202 be making two libcalls rather than just the one for the
2203 shift (similarly if IOR could not be done). We will allow
2204 this extremely unlikely lossage to avoid complicating the
2207 rtx subtarget = target == shifted ? 0 : target;
2208 tree new_amount, other_amount;
2210 tree type = TREE_TYPE (amount);
2211 if (GET_MODE (op1) != TYPE_MODE (type)
2212 && GET_MODE (op1) != VOIDmode)
2213 op1 = convert_to_mode (TYPE_MODE (type), op1, 1);
2214 new_amount = make_tree (type, op1);
2216 = fold_build2 (MINUS_EXPR, type,
2217 build_int_cst (type, GET_MODE_BITSIZE (mode)),
2220 shifted = force_reg (mode, shifted);
2222 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
2223 mode, shifted, new_amount, 0, 1);
2224 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
2225 mode, shifted, other_amount, subtarget, 1);
2226 return expand_binop (mode, ior_optab, temp, temp1, target,
2227 unsignedp, methods);
2230 temp = expand_binop (mode,
2231 left ? rotl_optab : rotr_optab,
2232 shifted, op1, target, unsignedp, methods);
2235 temp = expand_binop (mode,
2236 left ? ashl_optab : lshr_optab,
2237 shifted, op1, target, unsignedp, methods);
2239 /* Do arithmetic shifts.
2240 Also, if we are going to widen the operand, we can just as well
2241 use an arithmetic right-shift instead of a logical one. */
2242 if (temp == 0 && ! rotate
2243 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2245 enum optab_methods methods1 = methods;
2247 /* If trying to widen a log shift to an arithmetic shift,
2248 don't accept an arithmetic shift of the same size. */
2250 methods1 = OPTAB_MUST_WIDEN;
2252 /* Arithmetic shift */
2254 temp = expand_binop (mode,
2255 left ? ashl_optab : ashr_optab,
2256 shifted, op1, target, unsignedp, methods1);
2259 /* We used to try extzv here for logical right shifts, but that was
2260 only useful for one machine, the VAX, and caused poor code
2261 generation there for lshrdi3, so the code was deleted and a
2262 define_expand for lshrsi3 was added to vax.md. */
2282 /* This structure holds the "cost" of a multiply sequence. The
2283 "cost" field holds the total rtx_cost of every operator in the
2284 synthetic multiplication sequence, hence cost(a op b) is defined
2285 as rtx_cost(op) + cost(a) + cost(b), where cost(leaf) is zero.
2286 The "latency" field holds the minimum possible latency of the
2287 synthetic multiply, on a hypothetical infinitely parallel CPU.
2288 This is the critical path, or the maximum height, of the expression
2289 tree which is the sum of rtx_costs on the most expensive path from
2290 any leaf to the root. Hence latency(a op b) is defined as zero for
2291 leaves and rtx_cost(op) + max(latency(a), latency(b)) otherwise. */
2294 short cost; /* Total rtx_cost of the multiplication sequence. */
2295 short latency; /* The latency of the multiplication sequence. */
2298 /* This macro is used to compare a pointer to a mult_cost against an
2299 single integer "rtx_cost" value. This is equivalent to the macro
2300 CHEAPER_MULT_COST(X,Z) where Z = {Y,Y}. */
2301 #define MULT_COST_LESS(X,Y) ((X)->cost < (Y) \
2302 || ((X)->cost == (Y) && (X)->latency < (Y)))
2304 /* This macro is used to compare two pointers to mult_costs against
2305 each other. The macro returns true if X is cheaper than Y.
2306 Currently, the cheaper of two mult_costs is the one with the
2307 lower "cost". If "cost"s are tied, the lower latency is cheaper. */
2308 #define CHEAPER_MULT_COST(X,Y) ((X)->cost < (Y)->cost \
2309 || ((X)->cost == (Y)->cost \
2310 && (X)->latency < (Y)->latency))
2312 /* This structure records a sequence of operations.
2313 `ops' is the number of operations recorded.
2314 `cost' is their total cost.
2315 The operations are stored in `op' and the corresponding
2316 logarithms of the integer coefficients in `log'.
2318 These are the operations:
2319 alg_zero total := 0;
2320 alg_m total := multiplicand;
2321 alg_shift total := total * coeff
2322 alg_add_t_m2 total := total + multiplicand * coeff;
2323 alg_sub_t_m2 total := total - multiplicand * coeff;
2324 alg_add_factor total := total * coeff + total;
2325 alg_sub_factor total := total * coeff - total;
2326 alg_add_t2_m total := total * coeff + multiplicand;
2327 alg_sub_t2_m total := total * coeff - multiplicand;
2329 The first operand must be either alg_zero or alg_m. */
2333 struct mult_cost cost;
2335 /* The size of the OP and LOG fields are not directly related to the
2336 word size, but the worst-case algorithms will be if we have few
2337 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2338 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2339 in total wordsize operations. */
2340 enum alg_code op[MAX_BITS_PER_WORD];
2341 char log[MAX_BITS_PER_WORD];
2344 /* The entry for our multiplication cache/hash table. */
2345 struct alg_hash_entry {
2346 /* The number we are multiplying by. */
2347 unsigned HOST_WIDE_INT t;
2349 /* The mode in which we are multiplying something by T. */
2350 enum machine_mode mode;
2352 /* The best multiplication algorithm for t. */
2355 /* The cost of multiplication if ALG_CODE is not alg_impossible.
2356 Otherwise, the cost within which multiplication by T is
2358 struct mult_cost cost;
2361 /* The number of cache/hash entries. */
2362 #if HOST_BITS_PER_WIDE_INT == 64
2363 #define NUM_ALG_HASH_ENTRIES 1031
2365 #define NUM_ALG_HASH_ENTRIES 307
2368 /* Each entry of ALG_HASH caches alg_code for some integer. This is
2369 actually a hash table. If we have a collision, that the older
2370 entry is kicked out. */
2371 static struct alg_hash_entry alg_hash[NUM_ALG_HASH_ENTRIES];
2373 /* Indicates the type of fixup needed after a constant multiplication.
2374 BASIC_VARIANT means no fixup is needed, NEGATE_VARIANT means that
2375 the result should be negated, and ADD_VARIANT means that the
2376 multiplicand should be added to the result. */
2377 enum mult_variant {basic_variant, negate_variant, add_variant};
2379 static void synth_mult (struct algorithm *, unsigned HOST_WIDE_INT,
2380 const struct mult_cost *, enum machine_mode mode);
2381 static bool choose_mult_variant (enum machine_mode, HOST_WIDE_INT,
2382 struct algorithm *, enum mult_variant *, int);
2383 static rtx expand_mult_const (enum machine_mode, rtx, HOST_WIDE_INT, rtx,
2384 const struct algorithm *, enum mult_variant);
2385 static unsigned HOST_WIDE_INT choose_multiplier (unsigned HOST_WIDE_INT, int,
2386 int, rtx *, int *, int *);
2387 static unsigned HOST_WIDE_INT invert_mod2n (unsigned HOST_WIDE_INT, int);
2388 static rtx extract_high_half (enum machine_mode, rtx);
2389 static rtx expand_mult_highpart (enum machine_mode, rtx, rtx, rtx, int, int);
2390 static rtx expand_mult_highpart_optab (enum machine_mode, rtx, rtx, rtx,
2392 /* Compute and return the best algorithm for multiplying by T.
2393 The algorithm must cost less than cost_limit
2394 If retval.cost >= COST_LIMIT, no algorithm was found and all
2395 other field of the returned struct are undefined.
2396 MODE is the machine mode of the multiplication. */
2399 synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t,
2400 const struct mult_cost *cost_limit, enum machine_mode mode)
2403 struct algorithm *alg_in, *best_alg;
2404 struct mult_cost best_cost;
2405 struct mult_cost new_limit;
2406 int op_cost, op_latency;
2407 unsigned HOST_WIDE_INT q;
2408 int maxm = MIN (BITS_PER_WORD, GET_MODE_BITSIZE (mode));
2410 bool cache_hit = false;
2411 enum alg_code cache_alg = alg_zero;
2413 /* Indicate that no algorithm is yet found. If no algorithm
2414 is found, this value will be returned and indicate failure. */
2415 alg_out->cost.cost = cost_limit->cost + 1;
2416 alg_out->cost.latency = cost_limit->latency + 1;
2418 if (cost_limit->cost < 0
2419 || (cost_limit->cost == 0 && cost_limit->latency <= 0))
2422 /* Restrict the bits of "t" to the multiplication's mode. */
2423 t &= GET_MODE_MASK (mode);
2425 /* t == 1 can be done in zero cost. */
2429 alg_out->cost.cost = 0;
2430 alg_out->cost.latency = 0;
2431 alg_out->op[0] = alg_m;
2435 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2439 if (MULT_COST_LESS (cost_limit, zero_cost))
2444 alg_out->cost.cost = zero_cost;
2445 alg_out->cost.latency = zero_cost;
2446 alg_out->op[0] = alg_zero;
2451 /* We'll be needing a couple extra algorithm structures now. */
2453 alg_in = alloca (sizeof (struct algorithm));
2454 best_alg = alloca (sizeof (struct algorithm));
2455 best_cost = *cost_limit;
2457 /* Compute the hash index. */
2458 hash_index = (t ^ (unsigned int) mode) % NUM_ALG_HASH_ENTRIES;
2460 /* See if we already know what to do for T. */
2461 if (alg_hash[hash_index].t == t
2462 && alg_hash[hash_index].mode == mode
2463 && alg_hash[hash_index].alg != alg_unknown)
2465 cache_alg = alg_hash[hash_index].alg;
2467 if (cache_alg == alg_impossible)
2469 /* The cache tells us that it's impossible to synthesize
2470 multiplication by T within alg_hash[hash_index].cost. */
2471 if (!CHEAPER_MULT_COST (&alg_hash[hash_index].cost, cost_limit))
2472 /* COST_LIMIT is at least as restrictive as the one
2473 recorded in the hash table, in which case we have no
2474 hope of synthesizing a multiplication. Just
2478 /* If we get here, COST_LIMIT is less restrictive than the
2479 one recorded in the hash table, so we may be able to
2480 synthesize a multiplication. Proceed as if we didn't
2481 have the cache entry. */
2485 if (CHEAPER_MULT_COST (cost_limit, &alg_hash[hash_index].cost))
2486 /* The cached algorithm shows that this multiplication
2487 requires more cost than COST_LIMIT. Just return. This
2488 way, we don't clobber this cache entry with
2489 alg_impossible but retain useful information. */
2501 goto do_alg_addsub_t_m2;
2503 case alg_add_factor:
2504 case alg_sub_factor:
2505 goto do_alg_addsub_factor;
2508 goto do_alg_add_t2_m;
2511 goto do_alg_sub_t2_m;
2519 /* If we have a group of zero bits at the low-order part of T, try
2520 multiplying by the remaining bits and then doing a shift. */
2525 m = floor_log2 (t & -t); /* m = number of low zero bits */
2529 /* The function expand_shift will choose between a shift and
2530 a sequence of additions, so the observed cost is given as
2531 MIN (m * add_cost[mode], shift_cost[mode][m]). */
2532 op_cost = m * add_cost[mode];
2533 if (shift_cost[mode][m] < op_cost)
2534 op_cost = shift_cost[mode][m];
2535 new_limit.cost = best_cost.cost - op_cost;
2536 new_limit.latency = best_cost.latency - op_cost;
2537 synth_mult (alg_in, q, &new_limit, mode);
2539 alg_in->cost.cost += op_cost;
2540 alg_in->cost.latency += op_cost;
2541 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2543 struct algorithm *x;
2544 best_cost = alg_in->cost;
2545 x = alg_in, alg_in = best_alg, best_alg = x;
2546 best_alg->log[best_alg->ops] = m;
2547 best_alg->op[best_alg->ops] = alg_shift;
2554 /* If we have an odd number, add or subtract one. */
2557 unsigned HOST_WIDE_INT w;
2560 for (w = 1; (w & t) != 0; w <<= 1)
2562 /* If T was -1, then W will be zero after the loop. This is another
2563 case where T ends with ...111. Handling this with (T + 1) and
2564 subtract 1 produces slightly better code and results in algorithm
2565 selection much faster than treating it like the ...0111 case
2569 /* Reject the case where t is 3.
2570 Thus we prefer addition in that case. */
2573 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2575 op_cost = add_cost[mode];
2576 new_limit.cost = best_cost.cost - op_cost;
2577 new_limit.latency = best_cost.latency - op_cost;
2578 synth_mult (alg_in, t + 1, &new_limit, mode);
2580 alg_in->cost.cost += op_cost;
2581 alg_in->cost.latency += op_cost;
2582 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2584 struct algorithm *x;
2585 best_cost = alg_in->cost;
2586 x = alg_in, alg_in = best_alg, best_alg = x;
2587 best_alg->log[best_alg->ops] = 0;
2588 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2593 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2595 op_cost = add_cost[mode];
2596 new_limit.cost = best_cost.cost - op_cost;
2597 new_limit.latency = best_cost.latency - op_cost;
2598 synth_mult (alg_in, t - 1, &new_limit, mode);
2600 alg_in->cost.cost += op_cost;
2601 alg_in->cost.latency += op_cost;
2602 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2604 struct algorithm *x;
2605 best_cost = alg_in->cost;
2606 x = alg_in, alg_in = best_alg, best_alg = x;
2607 best_alg->log[best_alg->ops] = 0;
2608 best_alg->op[best_alg->ops] = alg_add_t_m2;
2615 /* Look for factors of t of the form
2616 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2617 If we find such a factor, we can multiply by t using an algorithm that
2618 multiplies by q, shift the result by m and add/subtract it to itself.
2620 We search for large factors first and loop down, even if large factors
2621 are less probable than small; if we find a large factor we will find a
2622 good sequence quickly, and therefore be able to prune (by decreasing
2623 COST_LIMIT) the search. */
2625 do_alg_addsub_factor:
2626 for (m = floor_log2 (t - 1); m >= 2; m--)
2628 unsigned HOST_WIDE_INT d;
2630 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2631 if (t % d == 0 && t > d && m < maxm
2632 && (!cache_hit || cache_alg == alg_add_factor))
2634 /* If the target has a cheap shift-and-add instruction use
2635 that in preference to a shift insn followed by an add insn.
2636 Assume that the shift-and-add is "atomic" with a latency
2637 equal to its cost, otherwise assume that on superscalar
2638 hardware the shift may be executed concurrently with the
2639 earlier steps in the algorithm. */
2640 op_cost = add_cost[mode] + shift_cost[mode][m];
2641 if (shiftadd_cost[mode][m] < op_cost)
2643 op_cost = shiftadd_cost[mode][m];
2644 op_latency = op_cost;
2647 op_latency = add_cost[mode];
2649 new_limit.cost = best_cost.cost - op_cost;
2650 new_limit.latency = best_cost.latency - op_latency;
2651 synth_mult (alg_in, t / d, &new_limit, mode);
2653 alg_in->cost.cost += op_cost;
2654 alg_in->cost.latency += op_latency;
2655 if (alg_in->cost.latency < op_cost)
2656 alg_in->cost.latency = op_cost;
2657 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2659 struct algorithm *x;
2660 best_cost = alg_in->cost;
2661 x = alg_in, alg_in = best_alg, best_alg = x;
2662 best_alg->log[best_alg->ops] = m;
2663 best_alg->op[best_alg->ops] = alg_add_factor;
2665 /* Other factors will have been taken care of in the recursion. */
2669 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2670 if (t % d == 0 && t > d && m < maxm
2671 && (!cache_hit || cache_alg == alg_sub_factor))
2673 /* If the target has a cheap shift-and-subtract insn use
2674 that in preference to a shift insn followed by a sub insn.
2675 Assume that the shift-and-sub is "atomic" with a latency
2676 equal to it's cost, otherwise assume that on superscalar
2677 hardware the shift may be executed concurrently with the
2678 earlier steps in the algorithm. */
2679 op_cost = add_cost[mode] + shift_cost[mode][m];
2680 if (shiftsub_cost[mode][m] < op_cost)
2682 op_cost = shiftsub_cost[mode][m];
2683 op_latency = op_cost;
2686 op_latency = add_cost[mode];
2688 new_limit.cost = best_cost.cost - op_cost;
2689 new_limit.latency = best_cost.latency - op_latency;
2690 synth_mult (alg_in, t / d, &new_limit, mode);
2692 alg_in->cost.cost += op_cost;
2693 alg_in->cost.latency += op_latency;
2694 if (alg_in->cost.latency < op_cost)
2695 alg_in->cost.latency = op_cost;
2696 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2698 struct algorithm *x;
2699 best_cost = alg_in->cost;
2700 x = alg_in, alg_in = best_alg, best_alg = x;
2701 best_alg->log[best_alg->ops] = m;
2702 best_alg->op[best_alg->ops] = alg_sub_factor;
2710 /* Try shift-and-add (load effective address) instructions,
2711 i.e. do a*3, a*5, a*9. */
2718 if (m >= 0 && m < maxm)
2720 op_cost = shiftadd_cost[mode][m];
2721 new_limit.cost = best_cost.cost - op_cost;
2722 new_limit.latency = best_cost.latency - op_cost;
2723 synth_mult (alg_in, (t - 1) >> m, &new_limit, mode);
2725 alg_in->cost.cost += op_cost;
2726 alg_in->cost.latency += op_cost;
2727 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2729 struct algorithm *x;
2730 best_cost = alg_in->cost;
2731 x = alg_in, alg_in = best_alg, best_alg = x;
2732 best_alg->log[best_alg->ops] = m;
2733 best_alg->op[best_alg->ops] = alg_add_t2_m;
2743 if (m >= 0 && m < maxm)
2745 op_cost = shiftsub_cost[mode][m];
2746 new_limit.cost = best_cost.cost - op_cost;
2747 new_limit.latency = best_cost.latency - op_cost;
2748 synth_mult (alg_in, (t + 1) >> m, &new_limit, mode);
2750 alg_in->cost.cost += op_cost;
2751 alg_in->cost.latency += op_cost;
2752 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2754 struct algorithm *x;
2755 best_cost = alg_in->cost;
2756 x = alg_in, alg_in = best_alg, best_alg = x;
2757 best_alg->log[best_alg->ops] = m;
2758 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2766 /* If best_cost has not decreased, we have not found any algorithm. */
2767 if (!CHEAPER_MULT_COST (&best_cost, cost_limit))
2769 /* We failed to find an algorithm. Record alg_impossible for
2770 this case (that is, <T, MODE, COST_LIMIT>) so that next time
2771 we are asked to find an algorithm for T within the same or
2772 lower COST_LIMIT, we can immediately return to the
2774 alg_hash[hash_index].t = t;
2775 alg_hash[hash_index].mode = mode;
2776 alg_hash[hash_index].alg = alg_impossible;
2777 alg_hash[hash_index].cost = *cost_limit;
2781 /* Cache the result. */
2784 alg_hash[hash_index].t = t;
2785 alg_hash[hash_index].mode = mode;
2786 alg_hash[hash_index].alg = best_alg->op[best_alg->ops];
2787 alg_hash[hash_index].cost.cost = best_cost.cost;
2788 alg_hash[hash_index].cost.latency = best_cost.latency;
2791 /* If we are getting a too long sequence for `struct algorithm'
2792 to record, make this search fail. */
2793 if (best_alg->ops == MAX_BITS_PER_WORD)
2796 /* Copy the algorithm from temporary space to the space at alg_out.
2797 We avoid using structure assignment because the majority of
2798 best_alg is normally undefined, and this is a critical function. */
2799 alg_out->ops = best_alg->ops + 1;
2800 alg_out->cost = best_cost;
2801 memcpy (alg_out->op, best_alg->op,
2802 alg_out->ops * sizeof *alg_out->op);
2803 memcpy (alg_out->log, best_alg->log,
2804 alg_out->ops * sizeof *alg_out->log);
2807 /* Find the cheapest way of multiplying a value of mode MODE by VAL.
2808 Try three variations:
2810 - a shift/add sequence based on VAL itself
2811 - a shift/add sequence based on -VAL, followed by a negation
2812 - a shift/add sequence based on VAL - 1, followed by an addition.
2814 Return true if the cheapest of these cost less than MULT_COST,
2815 describing the algorithm in *ALG and final fixup in *VARIANT. */
2818 choose_mult_variant (enum machine_mode mode, HOST_WIDE_INT val,
2819 struct algorithm *alg, enum mult_variant *variant,
2822 struct algorithm alg2;
2823 struct mult_cost limit;
2826 /* Fail quickly for impossible bounds. */
2830 /* Ensure that mult_cost provides a reasonable upper bound.
2831 Any constant multiplication can be performed with less
2832 than 2 * bits additions. */
2833 op_cost = 2 * GET_MODE_BITSIZE (mode) * add_cost[mode];
2834 if (mult_cost > op_cost)
2835 mult_cost = op_cost;
2837 *variant = basic_variant;
2838 limit.cost = mult_cost;
2839 limit.latency = mult_cost;
2840 synth_mult (alg, val, &limit, mode);
2842 /* This works only if the inverted value actually fits in an
2844 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2846 op_cost = neg_cost[mode];
2847 if (MULT_COST_LESS (&alg->cost, mult_cost))
2849 limit.cost = alg->cost.cost - op_cost;
2850 limit.latency = alg->cost.latency - op_cost;
2854 limit.cost = mult_cost - op_cost;
2855 limit.latency = mult_cost - op_cost;
2858 synth_mult (&alg2, -val, &limit, mode);
2859 alg2.cost.cost += op_cost;
2860 alg2.cost.latency += op_cost;
2861 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2862 *alg = alg2, *variant = negate_variant;
2865 /* This proves very useful for division-by-constant. */
2866 op_cost = add_cost[mode];
2867 if (MULT_COST_LESS (&alg->cost, mult_cost))
2869 limit.cost = alg->cost.cost - op_cost;
2870 limit.latency = alg->cost.latency - op_cost;
2874 limit.cost = mult_cost - op_cost;
2875 limit.latency = mult_cost - op_cost;
2878 synth_mult (&alg2, val - 1, &limit, mode);
2879 alg2.cost.cost += op_cost;
2880 alg2.cost.latency += op_cost;
2881 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2882 *alg = alg2, *variant = add_variant;
2884 return MULT_COST_LESS (&alg->cost, mult_cost);
2887 /* A subroutine of expand_mult, used for constant multiplications.
2888 Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
2889 convenient. Use the shift/add sequence described by ALG and apply
2890 the final fixup specified by VARIANT. */
2893 expand_mult_const (enum machine_mode mode, rtx op0, HOST_WIDE_INT val,
2894 rtx target, const struct algorithm *alg,
2895 enum mult_variant variant)
2897 HOST_WIDE_INT val_so_far;
2898 rtx insn, accum, tem;
2900 enum machine_mode nmode;
2902 /* Avoid referencing memory over and over and invalid sharing
2904 op0 = force_reg (mode, op0);
2906 /* ACCUM starts out either as OP0 or as a zero, depending on
2907 the first operation. */
2909 if (alg->op[0] == alg_zero)
2911 accum = copy_to_mode_reg (mode, const0_rtx);
2914 else if (alg->op[0] == alg_m)
2916 accum = copy_to_mode_reg (mode, op0);
2922 for (opno = 1; opno < alg->ops; opno++)
2924 int log = alg->log[opno];
2925 rtx shift_subtarget = optimize ? 0 : accum;
2927 = (opno == alg->ops - 1 && target != 0 && variant != add_variant
2930 rtx accum_target = optimize ? 0 : accum;
2932 switch (alg->op[opno])
2935 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2936 build_int_cst (NULL_TREE, log),
2942 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2943 build_int_cst (NULL_TREE, log),
2945 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2946 add_target ? add_target : accum_target);
2947 val_so_far += (HOST_WIDE_INT) 1 << log;
2951 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2952 build_int_cst (NULL_TREE, log),
2954 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2955 add_target ? add_target : accum_target);
2956 val_so_far -= (HOST_WIDE_INT) 1 << log;
2960 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2961 build_int_cst (NULL_TREE, log),
2964 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2965 add_target ? add_target : accum_target);
2966 val_so_far = (val_so_far << log) + 1;
2970 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2971 build_int_cst (NULL_TREE, log),
2972 shift_subtarget, 0);
2973 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2974 add_target ? add_target : accum_target);
2975 val_so_far = (val_so_far << log) - 1;
2978 case alg_add_factor:
2979 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2980 build_int_cst (NULL_TREE, log),
2982 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2983 add_target ? add_target : accum_target);
2984 val_so_far += val_so_far << log;
2987 case alg_sub_factor:
2988 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2989 build_int_cst (NULL_TREE, log),
2991 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
2993 ? add_target : (optimize ? 0 : tem)));
2994 val_so_far = (val_so_far << log) - val_so_far;
3001 /* Write a REG_EQUAL note on the last insn so that we can cse
3002 multiplication sequences. Note that if ACCUM is a SUBREG,
3003 we've set the inner register and must properly indicate
3006 tem = op0, nmode = mode;
3007 if (GET_CODE (accum) == SUBREG)
3009 nmode = GET_MODE (SUBREG_REG (accum));
3010 tem = gen_lowpart (nmode, op0);
3013 insn = get_last_insn ();
3014 set_unique_reg_note (insn, REG_EQUAL,
3015 gen_rtx_MULT (nmode, tem,
3016 GEN_INT (val_so_far)));
3019 if (variant == negate_variant)
3021 val_so_far = -val_so_far;
3022 accum = expand_unop (mode, neg_optab, accum, target, 0);
3024 else if (variant == add_variant)
3026 val_so_far = val_so_far + 1;
3027 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
3030 /* Compare only the bits of val and val_so_far that are significant
3031 in the result mode, to avoid sign-/zero-extension confusion. */
3032 val &= GET_MODE_MASK (mode);
3033 val_so_far &= GET_MODE_MASK (mode);
3034 gcc_assert (val == val_so_far);
3039 /* Perform a multiplication and return an rtx for the result.
3040 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3041 TARGET is a suggestion for where to store the result (an rtx).
3043 We check specially for a constant integer as OP1.
3044 If you want this check for OP0 as well, then before calling
3045 you should swap the two operands if OP0 would be constant. */
3048 expand_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3051 enum mult_variant variant;
3052 struct algorithm algorithm;
3055 /* Handling const0_rtx here allows us to use zero as a rogue value for
3057 if (op1 == const0_rtx)
3059 if (op1 == const1_rtx)
3061 if (op1 == constm1_rtx)
3062 return expand_unop (mode,
3063 GET_MODE_CLASS (mode) == MODE_INT
3064 && !unsignedp && flag_trapv
3065 ? negv_optab : neg_optab,
3068 /* These are the operations that are potentially turned into a sequence
3069 of shifts and additions. */
3070 if (SCALAR_INT_MODE_P (mode)
3071 && (unsignedp || !flag_trapv))
3073 HOST_WIDE_INT coeff = 0;
3074 rtx fake_reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
3076 /* synth_mult does an `unsigned int' multiply. As long as the mode is
3077 less than or equal in size to `unsigned int' this doesn't matter.
3078 If the mode is larger than `unsigned int', then synth_mult works
3079 only if the constant value exactly fits in an `unsigned int' without
3080 any truncation. This means that multiplying by negative values does
3081 not work; results are off by 2^32 on a 32 bit machine. */
3083 if (GET_CODE (op1) == CONST_INT)
3085 /* Attempt to handle multiplication of DImode values by negative
3086 coefficients, by performing the multiplication by a positive
3087 multiplier and then inverting the result. */
3088 if (INTVAL (op1) < 0
3089 && GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT)
3091 /* Its safe to use -INTVAL (op1) even for INT_MIN, as the
3092 result is interpreted as an unsigned coefficient.
3093 Exclude cost of op0 from max_cost to match the cost
3094 calculation of the synth_mult. */
3095 max_cost = rtx_cost (gen_rtx_MULT (mode, fake_reg, op1), SET)
3098 && choose_mult_variant (mode, -INTVAL (op1), &algorithm,
3099 &variant, max_cost))
3101 rtx temp = expand_mult_const (mode, op0, -INTVAL (op1),
3102 NULL_RTX, &algorithm,
3104 return expand_unop (mode, neg_optab, temp, target, 0);
3107 else coeff = INTVAL (op1);
3109 else if (GET_CODE (op1) == CONST_DOUBLE)
3111 /* If we are multiplying in DImode, it may still be a win
3112 to try to work with shifts and adds. */
3113 if (CONST_DOUBLE_HIGH (op1) == 0)
3114 coeff = CONST_DOUBLE_LOW (op1);
3115 else if (CONST_DOUBLE_LOW (op1) == 0
3116 && EXACT_POWER_OF_2_OR_ZERO_P (CONST_DOUBLE_HIGH (op1)))
3118 int shift = floor_log2 (CONST_DOUBLE_HIGH (op1))
3119 + HOST_BITS_PER_WIDE_INT;
3120 return expand_shift (LSHIFT_EXPR, mode, op0,
3121 build_int_cst (NULL_TREE, shift),
3126 /* We used to test optimize here, on the grounds that it's better to
3127 produce a smaller program when -O is not used. But this causes
3128 such a terrible slowdown sometimes that it seems better to always
3132 /* Special case powers of two. */
3133 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3134 return expand_shift (LSHIFT_EXPR, mode, op0,
3135 build_int_cst (NULL_TREE, floor_log2 (coeff)),
3138 /* Exclude cost of op0 from max_cost to match the cost
3139 calculation of the synth_mult. */
3140 max_cost = rtx_cost (gen_rtx_MULT (mode, fake_reg, op1), SET);
3141 if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3143 return expand_mult_const (mode, op0, coeff, target,
3144 &algorithm, variant);
3148 if (GET_CODE (op0) == CONST_DOUBLE)
3155 /* Expand x*2.0 as x+x. */
3156 if (GET_CODE (op1) == CONST_DOUBLE
3157 && SCALAR_FLOAT_MODE_P (mode))
3160 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
3162 if (REAL_VALUES_EQUAL (d, dconst2))
3164 op0 = force_reg (GET_MODE (op0), op0);
3165 return expand_binop (mode, add_optab, op0, op0,
3166 target, unsignedp, OPTAB_LIB_WIDEN);
3170 /* This used to use umul_optab if unsigned, but for non-widening multiply
3171 there is no difference between signed and unsigned. */
3172 op0 = expand_binop (mode,
3174 && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT)
3175 ? smulv_optab : smul_optab,
3176 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
3181 /* Return the smallest n such that 2**n >= X. */
3184 ceil_log2 (unsigned HOST_WIDE_INT x)
3186 return floor_log2 (x - 1) + 1;
3189 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
3190 replace division by D, and put the least significant N bits of the result
3191 in *MULTIPLIER_PTR and return the most significant bit.
3193 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
3194 needed precision is in PRECISION (should be <= N).
3196 PRECISION should be as small as possible so this function can choose
3197 multiplier more freely.
3199 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
3200 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
3202 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
3203 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
3206 unsigned HOST_WIDE_INT
3207 choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
3208 rtx *multiplier_ptr, int *post_shift_ptr, int *lgup_ptr)
3210 HOST_WIDE_INT mhigh_hi, mlow_hi;
3211 unsigned HOST_WIDE_INT mhigh_lo, mlow_lo;
3212 int lgup, post_shift;
3214 unsigned HOST_WIDE_INT nl, dummy1;
3215 HOST_WIDE_INT nh, dummy2;
3217 /* lgup = ceil(log2(divisor)); */
3218 lgup = ceil_log2 (d);
3220 gcc_assert (lgup <= n);
3223 pow2 = n + lgup - precision;
3225 /* We could handle this with some effort, but this case is much
3226 better handled directly with a scc insn, so rely on caller using
3228 gcc_assert (pow != 2 * HOST_BITS_PER_WIDE_INT);
3230 /* mlow = 2^(N + lgup)/d */
3231 if (pow >= HOST_BITS_PER_WIDE_INT)
3233 nh = (HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
3239 nl = (unsigned HOST_WIDE_INT) 1 << pow;
3241 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
3242 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
3244 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
3245 if (pow2 >= HOST_BITS_PER_WIDE_INT)
3246 nh |= (HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
3248 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
3249 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
3250 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
3252 gcc_assert (!mhigh_hi || nh - d < d);
3253 gcc_assert (mhigh_hi <= 1 && mlow_hi <= 1);
3254 /* Assert that mlow < mhigh. */
3255 gcc_assert (mlow_hi < mhigh_hi
3256 || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo));
3258 /* If precision == N, then mlow, mhigh exceed 2^N
3259 (but they do not exceed 2^(N+1)). */
3261 /* Reduce to lowest terms. */
3262 for (post_shift = lgup; post_shift > 0; post_shift--)
3264 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
3265 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
3275 *post_shift_ptr = post_shift;
3277 if (n < HOST_BITS_PER_WIDE_INT)
3279 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
3280 *multiplier_ptr = GEN_INT (mhigh_lo & mask);
3281 return mhigh_lo >= mask;
3285 *multiplier_ptr = GEN_INT (mhigh_lo);
3290 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
3291 congruent to 1 (mod 2**N). */
3293 static unsigned HOST_WIDE_INT
3294 invert_mod2n (unsigned HOST_WIDE_INT x, int n)
3296 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
3298 /* The algorithm notes that the choice y = x satisfies
3299 x*y == 1 mod 2^3, since x is assumed odd.
3300 Each iteration doubles the number of bits of significance in y. */
3302 unsigned HOST_WIDE_INT mask;
3303 unsigned HOST_WIDE_INT y = x;
3306 mask = (n == HOST_BITS_PER_WIDE_INT
3307 ? ~(unsigned HOST_WIDE_INT) 0
3308 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
3312 y = y * (2 - x*y) & mask; /* Modulo 2^N */
3318 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
3319 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
3320 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
3321 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
3324 The result is put in TARGET if that is convenient.
3326 MODE is the mode of operation. */
3329 expand_mult_highpart_adjust (enum machine_mode mode, rtx adj_operand, rtx op0,
3330 rtx op1, rtx target, int unsignedp)
3333 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
3335 tem = expand_shift (RSHIFT_EXPR, mode, op0,
3336 build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode) - 1),
3338 tem = expand_and (mode, tem, op1, NULL_RTX);
3340 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3343 tem = expand_shift (RSHIFT_EXPR, mode, op1,
3344 build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode) - 1),
3346 tem = expand_and (mode, tem, op0, NULL_RTX);
3347 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3353 /* Subroutine of expand_mult_highpart. Return the MODE high part of OP. */
3356 extract_high_half (enum machine_mode mode, rtx op)
3358 enum machine_mode wider_mode;
3360 if (mode == word_mode)
3361 return gen_highpart (mode, op);
3363 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3365 wider_mode = GET_MODE_WIDER_MODE (mode);
3366 op = expand_shift (RSHIFT_EXPR, wider_mode, op,
3367 build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode)), 0, 1);
3368 return convert_modes (mode, wider_mode, op, 0);
3371 /* Like expand_mult_highpart, but only consider using a multiplication
3372 optab. OP1 is an rtx for the constant operand. */
3375 expand_mult_highpart_optab (enum machine_mode mode, rtx op0, rtx op1,
3376 rtx target, int unsignedp, int max_cost)
3378 rtx narrow_op1 = gen_int_mode (INTVAL (op1), mode);
3379 enum machine_mode wider_mode;
3384 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3386 wider_mode = GET_MODE_WIDER_MODE (mode);
3387 size = GET_MODE_BITSIZE (mode);
3389 /* Firstly, try using a multiplication insn that only generates the needed
3390 high part of the product, and in the sign flavor of unsignedp. */
3391 if (mul_highpart_cost[mode] < max_cost)
3393 moptab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
3394 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3395 unsignedp, OPTAB_DIRECT);
3400 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
3401 Need to adjust the result after the multiplication. */
3402 if (size - 1 < BITS_PER_WORD
3403 && (mul_highpart_cost[mode] + 2 * shift_cost[mode][size-1]
3404 + 4 * add_cost[mode] < max_cost))
3406 moptab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
3407 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3408 unsignedp, OPTAB_DIRECT);
3410 /* We used the wrong signedness. Adjust the result. */
3411 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3415 /* Try widening multiplication. */
3416 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
3417 if (moptab->handlers[wider_mode].insn_code != CODE_FOR_nothing
3418 && mul_widen_cost[wider_mode] < max_cost)
3420 tem = expand_binop (wider_mode, moptab, op0, narrow_op1, 0,
3421 unsignedp, OPTAB_WIDEN);
3423 return extract_high_half (mode, tem);
3426 /* Try widening the mode and perform a non-widening multiplication. */
3427 if (smul_optab->handlers[wider_mode].insn_code != CODE_FOR_nothing
3428 && size - 1 < BITS_PER_WORD
3429 && mul_cost[wider_mode] + shift_cost[mode][size-1] < max_cost)
3431 rtx insns, wop0, wop1;
3433 /* We need to widen the operands, for example to ensure the
3434 constant multiplier is correctly sign or zero extended.
3435 Use a sequence to clean-up any instructions emitted by
3436 the conversions if things don't work out. */
3438 wop0 = convert_modes (wider_mode, mode, op0, unsignedp);
3439 wop1 = convert_modes (wider_mode, mode, op1, unsignedp);
3440 tem = expand_binop (wider_mode, smul_optab, wop0, wop1, 0,
3441 unsignedp, OPTAB_WIDEN);
3442 insns = get_insns ();
3448 return extract_high_half (mode, tem);
3452 /* Try widening multiplication of opposite signedness, and adjust. */
3453 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
3454 if (moptab->handlers[wider_mode].insn_code != CODE_FOR_nothing
3455 && size - 1 < BITS_PER_WORD
3456 && (mul_widen_cost[wider_mode] + 2 * shift_cost[mode][size-1]
3457 + 4 * add_cost[mode] < max_cost))
3459 tem = expand_binop (wider_mode, moptab, op0, narrow_op1,
3460 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
3463 tem = extract_high_half (mode, tem);
3464 /* We used the wrong signedness. Adjust the result. */
3465 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3473 /* Emit code to multiply OP0 and OP1 (where OP1 is an integer constant),
3474 putting the high half of the result in TARGET if that is convenient,
3475 and return where the result is. If the operation can not be performed,
3478 MODE is the mode of operation and result.
3480 UNSIGNEDP nonzero means unsigned multiply.
3482 MAX_COST is the total allowed cost for the expanded RTL. */
3485 expand_mult_highpart (enum machine_mode mode, rtx op0, rtx op1,
3486 rtx target, int unsignedp, int max_cost)
3488 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
3489 unsigned HOST_WIDE_INT cnst1;
3491 bool sign_adjust = false;
3492 enum mult_variant variant;
3493 struct algorithm alg;
3496 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3497 /* We can't support modes wider than HOST_BITS_PER_INT. */
3498 gcc_assert (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT);
3500 cnst1 = INTVAL (op1) & GET_MODE_MASK (mode);
3502 /* We can't optimize modes wider than BITS_PER_WORD.
3503 ??? We might be able to perform double-word arithmetic if
3504 mode == word_mode, however all the cost calculations in
3505 synth_mult etc. assume single-word operations. */
3506 if (GET_MODE_BITSIZE (wider_mode) > BITS_PER_WORD)
3507 return expand_mult_highpart_optab (mode, op0, op1, target,
3508 unsignedp, max_cost);
3510 extra_cost = shift_cost[mode][GET_MODE_BITSIZE (mode) - 1];
3512 /* Check whether we try to multiply by a negative constant. */
3513 if (!unsignedp && ((cnst1 >> (GET_MODE_BITSIZE (mode) - 1)) & 1))
3516 extra_cost += add_cost[mode];
3519 /* See whether shift/add multiplication is cheap enough. */
3520 if (choose_mult_variant (wider_mode, cnst1, &alg, &variant,
3521 max_cost - extra_cost))
3523 /* See whether the specialized multiplication optabs are
3524 cheaper than the shift/add version. */
3525 tem = expand_mult_highpart_optab (mode, op0, op1, target, unsignedp,
3526 alg.cost.cost + extra_cost);
3530 tem = convert_to_mode (wider_mode, op0, unsignedp);
3531 tem = expand_mult_const (wider_mode, tem, cnst1, 0, &alg, variant);
3532 tem = extract_high_half (mode, tem);
3534 /* Adjust result for signedness. */
3536 tem = force_operand (gen_rtx_MINUS (mode, tem, op0), tem);
3540 return expand_mult_highpart_optab (mode, op0, op1, target,
3541 unsignedp, max_cost);
3545 /* Expand signed modulus of OP0 by a power of two D in mode MODE. */
3548 expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
3550 unsigned HOST_WIDE_INT masklow, maskhigh;
3551 rtx result, temp, shift, label;
3554 logd = floor_log2 (d);
3555 result = gen_reg_rtx (mode);
3557 /* Avoid conditional branches when they're expensive. */
3558 if (BRANCH_COST >= 2
3561 rtx signmask = emit_store_flag (result, LT, op0, const0_rtx,
3565 signmask = force_reg (mode, signmask);
3566 masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
3567 shift = GEN_INT (GET_MODE_BITSIZE (mode) - logd);
3569 /* Use the rtx_cost of a LSHIFTRT instruction to determine
3570 which instruction sequence to use. If logical right shifts
3571 are expensive the use 2 XORs, 2 SUBs and an AND, otherwise
3572 use a LSHIFTRT, 1 ADD, 1 SUB and an AND. */
3574 temp = gen_rtx_LSHIFTRT (mode, result, shift);
3575 if (lshr_optab->handlers[mode].insn_code == CODE_FOR_nothing
3576 || rtx_cost (temp, SET) > COSTS_N_INSNS (2))
3578 temp = expand_binop (mode, xor_optab, op0, signmask,
3579 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3580 temp = expand_binop (mode, sub_optab, temp, signmask,
3581 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3582 temp = expand_binop (mode, and_optab, temp, GEN_INT (masklow),
3583 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3584 temp = expand_binop (mode, xor_optab, temp, signmask,
3585 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3586 temp = expand_binop (mode, sub_optab, temp, signmask,
3587 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3591 signmask = expand_binop (mode, lshr_optab, signmask, shift,
3592 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3593 signmask = force_reg (mode, signmask);
3595 temp = expand_binop (mode, add_optab, op0, signmask,
3596 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3597 temp = expand_binop (mode, and_optab, temp, GEN_INT (masklow),
3598 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3599 temp = expand_binop (mode, sub_optab, temp, signmask,
3600 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3606 /* Mask contains the mode's signbit and the significant bits of the
3607 modulus. By including the signbit in the operation, many targets
3608 can avoid an explicit compare operation in the following comparison
3611 masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
3612 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3614 masklow |= (HOST_WIDE_INT) -1 << (GET_MODE_BITSIZE (mode) - 1);
3618 maskhigh = (HOST_WIDE_INT) -1
3619 << (GET_MODE_BITSIZE (mode) - HOST_BITS_PER_WIDE_INT - 1);
3621 temp = expand_binop (mode, and_optab, op0,
3622 immed_double_const (masklow, maskhigh, mode),
3623 result, 1, OPTAB_LIB_WIDEN);
3625 emit_move_insn (result, temp);
3627 label = gen_label_rtx ();
3628 do_cmp_and_jump (result, const0_rtx, GE, mode, label);
3630 temp = expand_binop (mode, sub_optab, result, const1_rtx, result,
3631 0, OPTAB_LIB_WIDEN);
3632 masklow = (HOST_WIDE_INT) -1 << logd;
3634 temp = expand_binop (mode, ior_optab, temp,
3635 immed_double_const (masklow, maskhigh, mode),
3636 result, 1, OPTAB_LIB_WIDEN);
3637 temp = expand_binop (mode, add_optab, temp, const1_rtx, result,
3638 0, OPTAB_LIB_WIDEN);
3640 emit_move_insn (result, temp);
3645 /* Expand signed division of OP0 by a power of two D in mode MODE.
3646 This routine is only called for positive values of D. */
3649 expand_sdiv_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
3655 logd = floor_log2 (d);
3656 shift = build_int_cst (NULL_TREE, logd);
3658 if (d == 2 && BRANCH_COST >= 1)
3660 temp = gen_reg_rtx (mode);
3661 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, 1);
3662 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3663 0, OPTAB_LIB_WIDEN);
3664 return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3667 #ifdef HAVE_conditional_move
3668 if (BRANCH_COST >= 2)
3672 /* ??? emit_conditional_move forces a stack adjustment via
3673 compare_from_rtx so, if the sequence is discarded, it will
3674 be lost. Do it now instead. */
3675 do_pending_stack_adjust ();
3678 temp2 = copy_to_mode_reg (mode, op0);
3679 temp = expand_binop (mode, add_optab, temp2, GEN_INT (d-1),
3680 NULL_RTX, 0, OPTAB_LIB_WIDEN);
3681 temp = force_reg (mode, temp);
3683 /* Construct "temp2 = (temp2 < 0) ? temp : temp2". */
3684 temp2 = emit_conditional_move (temp2, LT, temp2, const0_rtx,
3685 mode, temp, temp2, mode, 0);
3688 rtx seq = get_insns ();
3691 return expand_shift (RSHIFT_EXPR, mode, temp2, shift, NULL_RTX, 0);
3697 if (BRANCH_COST >= 2)
3699 int ushift = GET_MODE_BITSIZE (mode) - logd;
3701 temp = gen_reg_rtx (mode);
3702 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, -1);
3703 if (shift_cost[mode][ushift] > COSTS_N_INSNS (1))
3704 temp = expand_binop (mode, and_optab, temp, GEN_INT (d - 1),
3705 NULL_RTX, 0, OPTAB_LIB_WIDEN);
3707 temp = expand_shift (RSHIFT_EXPR, mode, temp,
3708 build_int_cst (NULL_TREE, ushift),
3710 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3711 0, OPTAB_LIB_WIDEN);
3712 return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3715 label = gen_label_rtx ();
3716 temp = copy_to_mode_reg (mode, op0);
3717 do_cmp_and_jump (temp, const0_rtx, GE, mode, label);
3718 expand_inc (temp, GEN_INT (d - 1));
3720 return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3723 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
3724 if that is convenient, and returning where the result is.
3725 You may request either the quotient or the remainder as the result;
3726 specify REM_FLAG nonzero to get the remainder.
3728 CODE is the expression code for which kind of division this is;
3729 it controls how rounding is done. MODE is the machine mode to use.
3730 UNSIGNEDP nonzero means do unsigned division. */
3732 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
3733 and then correct it by or'ing in missing high bits
3734 if result of ANDI is nonzero.
3735 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
3736 This could optimize to a bfexts instruction.
3737 But C doesn't use these operations, so their optimizations are
3739 /* ??? For modulo, we don't actually need the highpart of the first product,
3740 the low part will do nicely. And for small divisors, the second multiply
3741 can also be a low-part only multiply or even be completely left out.
3742 E.g. to calculate the remainder of a division by 3 with a 32 bit
3743 multiply, multiply with 0x55555556 and extract the upper two bits;
3744 the result is exact for inputs up to 0x1fffffff.
3745 The input range can be reduced by using cross-sum rules.
3746 For odd divisors >= 3, the following table gives right shift counts
3747 so that if a number is shifted by an integer multiple of the given
3748 amount, the remainder stays the same:
3749 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
3750 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
3751 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
3752 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
3753 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
3755 Cross-sum rules for even numbers can be derived by leaving as many bits
3756 to the right alone as the divisor has zeros to the right.
3757 E.g. if x is an unsigned 32 bit number:
3758 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
3762 expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode,
3763 rtx op0, rtx op1, rtx target, int unsignedp)
3765 enum machine_mode compute_mode;
3767 rtx quotient = 0, remainder = 0;
3771 optab optab1, optab2;
3772 int op1_is_constant, op1_is_pow2 = 0;
3773 int max_cost, extra_cost;
3774 static HOST_WIDE_INT last_div_const = 0;
3775 static HOST_WIDE_INT ext_op1;
3777 op1_is_constant = GET_CODE (op1) == CONST_INT;
3778 if (op1_is_constant)
3780 ext_op1 = INTVAL (op1);
3782 ext_op1 &= GET_MODE_MASK (mode);
3783 op1_is_pow2 = ((EXACT_POWER_OF_2_OR_ZERO_P (ext_op1)
3784 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-ext_op1))));
3788 This is the structure of expand_divmod:
3790 First comes code to fix up the operands so we can perform the operations
3791 correctly and efficiently.
3793 Second comes a switch statement with code specific for each rounding mode.
3794 For some special operands this code emits all RTL for the desired
3795 operation, for other cases, it generates only a quotient and stores it in
3796 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
3797 to indicate that it has not done anything.
3799 Last comes code that finishes the operation. If QUOTIENT is set and
3800 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
3801 QUOTIENT is not set, it is computed using trunc rounding.
3803 We try to generate special code for division and remainder when OP1 is a
3804 constant. If |OP1| = 2**n we can use shifts and some other fast
3805 operations. For other values of OP1, we compute a carefully selected
3806 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
3809 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
3810 half of the product. Different strategies for generating the product are
3811 implemented in expand_mult_highpart.
3813 If what we actually want is the remainder, we generate that by another
3814 by-constant multiplication and a subtraction. */
3816 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3817 code below will malfunction if we are, so check here and handle
3818 the special case if so. */
3819 if (op1 == const1_rtx)
3820 return rem_flag ? const0_rtx : op0;
3822 /* When dividing by -1, we could get an overflow.
3823 negv_optab can handle overflows. */
3824 if (! unsignedp && op1 == constm1_rtx)
3828 return expand_unop (mode, flag_trapv && GET_MODE_CLASS(mode) == MODE_INT
3829 ? negv_optab : neg_optab, op0, target, 0);
3833 /* Don't use the function value register as a target
3834 since we have to read it as well as write it,
3835 and function-inlining gets confused by this. */
3836 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
3837 /* Don't clobber an operand while doing a multi-step calculation. */
3838 || ((rem_flag || op1_is_constant)
3839 && (reg_mentioned_p (target, op0)
3840 || (MEM_P (op0) && MEM_P (target))))
3841 || reg_mentioned_p (target, op1)
3842 || (MEM_P (op1) && MEM_P (target))))
3845 /* Get the mode in which to perform this computation. Normally it will
3846 be MODE, but sometimes we can't do the desired operation in MODE.
3847 If so, pick a wider mode in which we can do the operation. Convert
3848 to that mode at the start to avoid repeated conversions.
3850 First see what operations we need. These depend on the expression
3851 we are evaluating. (We assume that divxx3 insns exist under the
3852 same conditions that modxx3 insns and that these insns don't normally
3853 fail. If these assumptions are not correct, we may generate less
3854 efficient code in some cases.)
3856 Then see if we find a mode in which we can open-code that operation
3857 (either a division, modulus, or shift). Finally, check for the smallest
3858 mode for which we can do the operation with a library call. */
3860 /* We might want to refine this now that we have division-by-constant
3861 optimization. Since expand_mult_highpart tries so many variants, it is
3862 not straightforward to generalize this. Maybe we should make an array
3863 of possible modes in init_expmed? Save this for GCC 2.7. */
3865 optab1 = ((op1_is_pow2 && op1 != const0_rtx)
3866 ? (unsignedp ? lshr_optab : ashr_optab)
3867 : (unsignedp ? udiv_optab : sdiv_optab));
3868 optab2 = ((op1_is_pow2 && op1 != const0_rtx)
3870 : (unsignedp ? udivmod_optab : sdivmod_optab));
3872 for (compute_mode = mode; compute_mode != VOIDmode;
3873 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3874 if (optab1->handlers[compute_mode].insn_code != CODE_FOR_nothing
3875 || optab2->handlers[compute_mode].insn_code != CODE_FOR_nothing)
3878 if (compute_mode == VOIDmode)
3879 for (compute_mode = mode; compute_mode != VOIDmode;
3880 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3881 if (optab1->handlers[compute_mode].libfunc
3882 || optab2->handlers[compute_mode].libfunc)
3885 /* If we still couldn't find a mode, use MODE, but expand_binop will
3887 if (compute_mode == VOIDmode)
3888 compute_mode = mode;
3890 if (target && GET_MODE (target) == compute_mode)
3893 tquotient = gen_reg_rtx (compute_mode);
3895 size = GET_MODE_BITSIZE (compute_mode);
3897 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3898 (mode), and thereby get better code when OP1 is a constant. Do that
3899 later. It will require going over all usages of SIZE below. */
3900 size = GET_MODE_BITSIZE (mode);
3903 /* Only deduct something for a REM if the last divide done was
3904 for a different constant. Then set the constant of the last
3906 max_cost = unsignedp ? udiv_cost[compute_mode] : sdiv_cost[compute_mode];
3907 if (rem_flag && ! (last_div_const != 0 && op1_is_constant
3908 && INTVAL (op1) == last_div_const))
3909 max_cost -= mul_cost[compute_mode] + add_cost[compute_mode];
3911 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
3913 /* Now convert to the best mode to use. */
3914 if (compute_mode != mode)
3916 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
3917 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
3919 /* convert_modes may have placed op1 into a register, so we
3920 must recompute the following. */
3921 op1_is_constant = GET_CODE (op1) == CONST_INT;
3922 op1_is_pow2 = (op1_is_constant
3923 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3925 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
3928 /* If one of the operands is a volatile MEM, copy it into a register. */
3930 if (MEM_P (op0) && MEM_VOLATILE_P (op0))
3931 op0 = force_reg (compute_mode, op0);
3932 if (MEM_P (op1) && MEM_VOLATILE_P (op1))
3933 op1 = force_reg (compute_mode, op1);
3935 /* If we need the remainder or if OP1 is constant, we need to
3936 put OP0 in a register in case it has any queued subexpressions. */
3937 if (rem_flag || op1_is_constant)
3938 op0 = force_reg (compute_mode, op0);
3940 last = get_last_insn ();
3942 /* Promote floor rounding to trunc rounding for unsigned operations. */
3945 if (code == FLOOR_DIV_EXPR)
3946 code = TRUNC_DIV_EXPR;
3947 if (code == FLOOR_MOD_EXPR)
3948 code = TRUNC_MOD_EXPR;
3949 if (code == EXACT_DIV_EXPR && op1_is_pow2)
3950 code = TRUNC_DIV_EXPR;
3953 if (op1 != const0_rtx)
3956 case TRUNC_MOD_EXPR:
3957 case TRUNC_DIV_EXPR:
3958 if (op1_is_constant)
3962 unsigned HOST_WIDE_INT mh;
3963 int pre_shift, post_shift;
3966 unsigned HOST_WIDE_INT d = (INTVAL (op1)
3967 & GET_MODE_MASK (compute_mode));
3969 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3971 pre_shift = floor_log2 (d);
3975 = expand_binop (compute_mode, and_optab, op0,
3976 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3980 return gen_lowpart (mode, remainder);
3982 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3983 build_int_cst (NULL_TREE,
3987 else if (size <= HOST_BITS_PER_WIDE_INT)
3989 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
3991 /* Most significant bit of divisor is set; emit an scc
3993 quotient = emit_store_flag (tquotient, GEU, op0, op1,
3994 compute_mode, 1, 1);
4000 /* Find a suitable multiplier and right shift count
4001 instead of multiplying with D. */
4003 mh = choose_multiplier (d, size, size,
4004 &ml, &post_shift, &dummy);
4006 /* If the suggested multiplier is more than SIZE bits,
4007 we can do better for even divisors, using an
4008 initial right shift. */
4009 if (mh != 0 && (d & 1) == 0)
4011 pre_shift = floor_log2 (d & -d);
4012 mh = choose_multiplier (d >> pre_shift, size,
4014 &ml, &post_shift, &dummy);
4024 if (post_shift - 1 >= BITS_PER_WORD)
4028 = (shift_cost[compute_mode][post_shift - 1]
4029 + shift_cost[compute_mode][1]
4030 + 2 * add_cost[compute_mode]);
4031 t1 = expand_mult_highpart (compute_mode, op0, ml,
4033 max_cost - extra_cost);
4036 t2 = force_operand (gen_rtx_MINUS (compute_mode,
4040 (RSHIFT_EXPR, compute_mode, t2,
4041 build_int_cst (NULL_TREE, 1),
4043 t4 = force_operand (gen_rtx_PLUS (compute_mode,
4046 quotient = expand_shift
4047 (RSHIFT_EXPR, compute_mode, t4,
4048 build_int_cst (NULL_TREE, post_shift - 1),
4055 if (pre_shift >= BITS_PER_WORD
4056 || post_shift >= BITS_PER_WORD)
4060 (RSHIFT_EXPR, compute_mode, op0,
4061 build_int_cst (NULL_TREE, pre_shift),
4064 = (shift_cost[compute_mode][pre_shift]
4065 + shift_cost[compute_mode][post_shift]);
4066 t2 = expand_mult_highpart (compute_mode, t1, ml,
4068 max_cost - extra_cost);
4071 quotient = expand_shift
4072 (RSHIFT_EXPR, compute_mode, t2,
4073 build_int_cst (NULL_TREE, post_shift),
4078 else /* Too wide mode to use tricky code */
4081 insn = get_last_insn ();
4083 && (set = single_set (insn)) != 0
4084 && SET_DEST (set) == quotient)
4085 set_unique_reg_note (insn,
4087 gen_rtx_UDIV (compute_mode, op0, op1));
4089 else /* TRUNC_DIV, signed */
4091 unsigned HOST_WIDE_INT ml;
4092 int lgup, post_shift;
4094 HOST_WIDE_INT d = INTVAL (op1);
4095 unsigned HOST_WIDE_INT abs_d;
4097 /* Since d might be INT_MIN, we have to cast to
4098 unsigned HOST_WIDE_INT before negating to avoid
4099 undefined signed overflow. */
4101 ? (unsigned HOST_WIDE_INT) d
4102 : - (unsigned HOST_WIDE_INT) d);
4104 /* n rem d = n rem -d */
4105 if (rem_flag && d < 0)
4108 op1 = gen_int_mode (abs_d, compute_mode);
4114 quotient = expand_unop (compute_mode, neg_optab, op0,
4116 else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
4118 /* This case is not handled correctly below. */
4119 quotient = emit_store_flag (tquotient, EQ, op0, op1,
4120 compute_mode, 1, 1);
4124 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
4125 && (rem_flag ? smod_pow2_cheap[compute_mode]
4126 : sdiv_pow2_cheap[compute_mode])
4127 /* We assume that cheap metric is true if the
4128 optab has an expander for this mode. */
4129 && (((rem_flag ? smod_optab : sdiv_optab)
4130 ->handlers[compute_mode].insn_code
4131 != CODE_FOR_nothing)
4132 || (sdivmod_optab->handlers[compute_mode]
4133 .insn_code != CODE_FOR_nothing)))
4135 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
4139 remainder = expand_smod_pow2 (compute_mode, op0, d);
4141 return gen_lowpart (mode, remainder);
4144 if (sdiv_pow2_cheap[compute_mode]
4145 && ((sdiv_optab->handlers[compute_mode].insn_code
4146 != CODE_FOR_nothing)
4147 || (sdivmod_optab->handlers[compute_mode].insn_code
4148 != CODE_FOR_nothing)))
4149 quotient = expand_divmod (0, TRUNC_DIV_EXPR,
4151 gen_int_mode (abs_d,
4155 quotient = expand_sdiv_pow2 (compute_mode, op0, abs_d);
4157 /* We have computed OP0 / abs(OP1). If OP1 is negative,
4158 negate the quotient. */
4161 insn = get_last_insn ();
4163 && (set = single_set (insn)) != 0
4164 && SET_DEST (set) == quotient
4165 && abs_d < ((unsigned HOST_WIDE_INT) 1
4166 << (HOST_BITS_PER_WIDE_INT - 1)))
4167 set_unique_reg_note (insn,
4169 gen_rtx_DIV (compute_mode,
4176 quotient = expand_unop (compute_mode, neg_optab,
4177 quotient, quotient, 0);
4180 else if (size <= HOST_BITS_PER_WIDE_INT)
4182 choose_multiplier (abs_d, size, size - 1,
4183 &mlr, &post_shift, &lgup);
4184 ml = (unsigned HOST_WIDE_INT) INTVAL (mlr);
4185 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
4189 if (post_shift >= BITS_PER_WORD
4190 || size - 1 >= BITS_PER_WORD)
4193 extra_cost = (shift_cost[compute_mode][post_shift]
4194 + shift_cost[compute_mode][size - 1]
4195 + add_cost[compute_mode]);
4196 t1 = expand_mult_highpart (compute_mode, op0, mlr,
4198 max_cost - extra_cost);
4202 (RSHIFT_EXPR, compute_mode, t1,
4203 build_int_cst (NULL_TREE, post_shift),
4206 (RSHIFT_EXPR, compute_mode, op0,
4207 build_int_cst (NULL_TREE, size - 1),
4211 = force_operand (gen_rtx_MINUS (compute_mode,
4216 = force_operand (gen_rtx_MINUS (compute_mode,
4224 if (post_shift >= BITS_PER_WORD
4225 || size - 1 >= BITS_PER_WORD)
4228 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
4229 mlr = gen_int_mode (ml, compute_mode);
4230 extra_cost = (shift_cost[compute_mode][post_shift]
4231 + shift_cost[compute_mode][size - 1]
4232 + 2 * add_cost[compute_mode]);
4233 t1 = expand_mult_highpart (compute_mode, op0, mlr,
4235 max_cost - extra_cost);
4238 t2 = force_operand (gen_rtx_PLUS (compute_mode,
4242 (RSHIFT_EXPR, compute_mode, t2,
4243 build_int_cst (NULL_TREE, post_shift),
4246 (RSHIFT_EXPR, compute_mode, op0,
4247 build_int_cst (NULL_TREE, size - 1),
4251 = force_operand (gen_rtx_MINUS (compute_mode,
4256 = force_operand (gen_rtx_MINUS (compute_mode,
4261 else /* Too wide mode to use tricky code */
4264 insn = get_last_insn ();
4266 && (set = single_set (insn)) != 0
4267 && SET_DEST (set) == quotient)
4268 set_unique_reg_note (insn,
4270 gen_rtx_DIV (compute_mode, op0, op1));
4275 delete_insns_since (last);
4278 case FLOOR_DIV_EXPR:
4279 case FLOOR_MOD_EXPR:
4280 /* We will come here only for signed operations. */
4281 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
4283 unsigned HOST_WIDE_INT mh;
4284 int pre_shift, lgup, post_shift;
4285 HOST_WIDE_INT d = INTVAL (op1);
4290 /* We could just as easily deal with negative constants here,
4291 but it does not seem worth the trouble for GCC 2.6. */
4292 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
4294 pre_shift = floor_log2 (d);
4297 remainder = expand_binop (compute_mode, and_optab, op0,
4298 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
4299 remainder, 0, OPTAB_LIB_WIDEN);
4301 return gen_lowpart (mode, remainder);
4303 quotient = expand_shift
4304 (RSHIFT_EXPR, compute_mode, op0,
4305 build_int_cst (NULL_TREE, pre_shift),
4312 mh = choose_multiplier (d, size, size - 1,
4313 &ml, &post_shift, &lgup);
4316 if (post_shift < BITS_PER_WORD
4317 && size - 1 < BITS_PER_WORD)
4320 (RSHIFT_EXPR, compute_mode, op0,
4321 build_int_cst (NULL_TREE, size - 1),
4323 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
4324 NULL_RTX, 0, OPTAB_WIDEN);
4325 extra_cost = (shift_cost[compute_mode][post_shift]
4326 + shift_cost[compute_mode][size - 1]
4327 + 2 * add_cost[compute_mode]);
4328 t3 = expand_mult_highpart (compute_mode, t2, ml,
4330 max_cost - extra_cost);
4334 (RSHIFT_EXPR, compute_mode, t3,
4335 build_int_cst (NULL_TREE, post_shift),
4337 quotient = expand_binop (compute_mode, xor_optab,
4338 t4, t1, tquotient, 0,
4346 rtx nsign, t1, t2, t3, t4;
4347 t1 = force_operand (gen_rtx_PLUS (compute_mode,
4348 op0, constm1_rtx), NULL_RTX);
4349 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
4351 nsign = expand_shift
4352 (RSHIFT_EXPR, compute_mode, t2,
4353 build_int_cst (NULL_TREE, size - 1),
4355 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
4357 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
4362 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
4364 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4373 delete_insns_since (last);
4375 /* Try using an instruction that produces both the quotient and
4376 remainder, using truncation. We can easily compensate the quotient
4377 or remainder to get floor rounding, once we have the remainder.
4378 Notice that we compute also the final remainder value here,
4379 and return the result right away. */
4380 if (target == 0 || GET_MODE (target) != compute_mode)
4381 target = gen_reg_rtx (compute_mode);
4386 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4387 quotient = gen_reg_rtx (compute_mode);
4392 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4393 remainder = gen_reg_rtx (compute_mode);
4396 if (expand_twoval_binop (sdivmod_optab, op0, op1,
4397 quotient, remainder, 0))
4399 /* This could be computed with a branch-less sequence.
4400 Save that for later. */
4402 rtx label = gen_label_rtx ();
4403 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
4404 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4405 NULL_RTX, 0, OPTAB_WIDEN);
4406 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
4407 expand_dec (quotient, const1_rtx);
4408 expand_inc (remainder, op1);
4410 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4413 /* No luck with division elimination or divmod. Have to do it
4414 by conditionally adjusting op0 *and* the result. */
4416 rtx label1, label2, label3, label4, label5;
4420 quotient = gen_reg_rtx (compute_mode);
4421 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4422 label1 = gen_label_rtx ();
4423 label2 = gen_label_rtx ();
4424 label3 = gen_label_rtx ();
4425 label4 = gen_label_rtx ();
4426 label5 = gen_label_rtx ();
4427 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4428 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
4429 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4430 quotient, 0, OPTAB_LIB_WIDEN);
4431 if (tem != quotient)
4432 emit_move_insn (quotient, tem);
4433 emit_jump_insn (gen_jump (label5));
4435 emit_label (label1);
4436 expand_inc (adjusted_op0, const1_rtx);
4437 emit_jump_insn (gen_jump (label4));
4439 emit_label (label2);
4440 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
4441 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4442 quotient, 0, OPTAB_LIB_WIDEN);
4443 if (tem != quotient)
4444 emit_move_insn (quotient, tem);
4445 emit_jump_insn (gen_jump (label5));
4447 emit_label (label3);
4448 expand_dec (adjusted_op0, const1_rtx);
4449 emit_label (label4);
4450 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4451 quotient, 0, OPTAB_LIB_WIDEN);
4452 if (tem != quotient)
4453 emit_move_insn (quotient, tem);
4454 expand_dec (quotient, const1_rtx);
4455 emit_label (label5);
4463 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
4466 unsigned HOST_WIDE_INT d = INTVAL (op1);
4467 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4468 build_int_cst (NULL_TREE, floor_log2 (d)),
4470 t2 = expand_binop (compute_mode, and_optab, op0,
4472 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4473 t3 = gen_reg_rtx (compute_mode);
4474 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4475 compute_mode, 1, 1);
4479 lab = gen_label_rtx ();
4480 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4481 expand_inc (t1, const1_rtx);
4486 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4492 /* Try using an instruction that produces both the quotient and
4493 remainder, using truncation. We can easily compensate the
4494 quotient or remainder to get ceiling rounding, once we have the
4495 remainder. Notice that we compute also the final remainder
4496 value here, and return the result right away. */
4497 if (target == 0 || GET_MODE (target) != compute_mode)
4498 target = gen_reg_rtx (compute_mode);
4502 remainder = (REG_P (target)
4503 ? target : gen_reg_rtx (compute_mode));
4504 quotient = gen_reg_rtx (compute_mode);
4508 quotient = (REG_P (target)
4509 ? target : gen_reg_rtx (compute_mode));
4510 remainder = gen_reg_rtx (compute_mode);
4513 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
4516 /* This could be computed with a branch-less sequence.
4517 Save that for later. */
4518 rtx label = gen_label_rtx ();
4519 do_cmp_and_jump (remainder, const0_rtx, EQ,
4520 compute_mode, label);
4521 expand_inc (quotient, const1_rtx);
4522 expand_dec (remainder, op1);
4524 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4527 /* No luck with division elimination or divmod. Have to do it
4528 by conditionally adjusting op0 *and* the result. */
4531 rtx adjusted_op0, tem;
4533 quotient = gen_reg_rtx (compute_mode);
4534 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4535 label1 = gen_label_rtx ();
4536 label2 = gen_label_rtx ();
4537 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
4538 compute_mode, label1);
4539 emit_move_insn (quotient, const0_rtx);
4540 emit_jump_insn (gen_jump (label2));
4542 emit_label (label1);
4543 expand_dec (adjusted_op0, const1_rtx);
4544 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
4545 quotient, 1, OPTAB_LIB_WIDEN);
4546 if (tem != quotient)
4547 emit_move_insn (quotient, tem);
4548 expand_inc (quotient, const1_rtx);
4549 emit_label (label2);
4554 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
4555 && INTVAL (op1) >= 0)
4557 /* This is extremely similar to the code for the unsigned case
4558 above. For 2.7 we should merge these variants, but for
4559 2.6.1 I don't want to touch the code for unsigned since that
4560 get used in C. The signed case will only be used by other
4564 unsigned HOST_WIDE_INT d = INTVAL (op1);
4565 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4566 build_int_cst (NULL_TREE, floor_log2 (d)),
4568 t2 = expand_binop (compute_mode, and_optab, op0,
4570 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4571 t3 = gen_reg_rtx (compute_mode);
4572 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4573 compute_mode, 1, 1);
4577 lab = gen_label_rtx ();
4578 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4579 expand_inc (t1, const1_rtx);
4584 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4590 /* Try using an instruction that produces both the quotient and
4591 remainder, using truncation. We can easily compensate the
4592 quotient or remainder to get ceiling rounding, once we have the
4593 remainder. Notice that we compute also the final remainder
4594 value here, and return the result right away. */
4595 if (target == 0 || GET_MODE (target) != compute_mode)
4596 target = gen_reg_rtx (compute_mode);
4599 remainder= (REG_P (target)
4600 ? target : gen_reg_rtx (compute_mode));
4601 quotient = gen_reg_rtx (compute_mode);
4605 quotient = (REG_P (target)
4606 ? target : gen_reg_rtx (compute_mode));
4607 remainder = gen_reg_rtx (compute_mode);
4610 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
4613 /* This could be computed with a branch-less sequence.
4614 Save that for later. */
4616 rtx label = gen_label_rtx ();
4617 do_cmp_and_jump (remainder, const0_rtx, EQ,
4618 compute_mode, label);
4619 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4620 NULL_RTX, 0, OPTAB_WIDEN);
4621 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
4622 expand_inc (quotient, const1_rtx);
4623 expand_dec (remainder, op1);
4625 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4628 /* No luck with division elimination or divmod. Have to do it
4629 by conditionally adjusting op0 *and* the result. */
4631 rtx label1, label2, label3, label4, label5;
4635 quotient = gen_reg_rtx (compute_mode);
4636 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4637 label1 = gen_label_rtx ();
4638 label2 = gen_label_rtx ();
4639 label3 = gen_label_rtx ();
4640 label4 = gen_label_rtx ();
4641 label5 = gen_label_rtx ();
4642 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4643 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
4644 compute_mode, label1);
4645 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4646 quotient, 0, OPTAB_LIB_WIDEN);
4647 if (tem != quotient)
4648 emit_move_insn (quotient, tem);
4649 emit_jump_insn (gen_jump (label5));
4651 emit_label (label1);
4652 expand_dec (adjusted_op0, const1_rtx);
4653 emit_jump_insn (gen_jump (label4));
4655 emit_label (label2);
4656 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
4657 compute_mode, label3);
4658 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4659 quotient, 0, OPTAB_LIB_WIDEN);
4660 if (tem != quotient)
4661 emit_move_insn (quotient, tem);
4662 emit_jump_insn (gen_jump (label5));
4664 emit_label (label3);
4665 expand_inc (adjusted_op0, const1_rtx);
4666 emit_label (label4);
4667 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4668 quotient, 0, OPTAB_LIB_WIDEN);
4669 if (tem != quotient)
4670 emit_move_insn (quotient, tem);
4671 expand_inc (quotient, const1_rtx);
4672 emit_label (label5);
4677 case EXACT_DIV_EXPR:
4678 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
4680 HOST_WIDE_INT d = INTVAL (op1);
4681 unsigned HOST_WIDE_INT ml;
4685 pre_shift = floor_log2 (d & -d);
4686 ml = invert_mod2n (d >> pre_shift, size);
4687 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4688 build_int_cst (NULL_TREE, pre_shift),
4689 NULL_RTX, unsignedp);
4690 quotient = expand_mult (compute_mode, t1,
4691 gen_int_mode (ml, compute_mode),
4694 insn = get_last_insn ();
4695 set_unique_reg_note (insn,
4697 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
4703 case ROUND_DIV_EXPR:
4704 case ROUND_MOD_EXPR:
4709 label = gen_label_rtx ();
4710 quotient = gen_reg_rtx (compute_mode);
4711 remainder = gen_reg_rtx (compute_mode);
4712 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
4715 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
4716 quotient, 1, OPTAB_LIB_WIDEN);
4717 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
4718 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4719 remainder, 1, OPTAB_LIB_WIDEN);
4721 tem = plus_constant (op1, -1);
4722 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4723 build_int_cst (NULL_TREE, 1),
4725 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
4726 expand_inc (quotient, const1_rtx);
4727 expand_dec (remainder, op1);
4732 rtx abs_rem, abs_op1, tem, mask;
4734 label = gen_label_rtx ();
4735 quotient = gen_reg_rtx (compute_mode);
4736 remainder = gen_reg_rtx (compute_mode);
4737 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
4740 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
4741 quotient, 0, OPTAB_LIB_WIDEN);
4742 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
4743 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4744 remainder, 0, OPTAB_LIB_WIDEN);
4746 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 1, 0);
4747 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 1, 0);
4748 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
4749 build_int_cst (NULL_TREE, 1),
4751 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
4752 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4753 NULL_RTX, 0, OPTAB_WIDEN);
4754 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4755 build_int_cst (NULL_TREE, size - 1),
4757 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
4758 NULL_RTX, 0, OPTAB_WIDEN);
4759 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4760 NULL_RTX, 0, OPTAB_WIDEN);
4761 expand_inc (quotient, tem);
4762 tem = expand_binop (compute_mode, xor_optab, mask, op1,
4763 NULL_RTX, 0, OPTAB_WIDEN);
4764 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4765 NULL_RTX, 0, OPTAB_WIDEN);
4766 expand_dec (remainder, tem);
4769 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4777 if (target && GET_MODE (target) != compute_mode)
4782 /* Try to produce the remainder without producing the quotient.
4783 If we seem to have a divmod pattern that does not require widening,
4784 don't try widening here. We should really have a WIDEN argument
4785 to expand_twoval_binop, since what we'd really like to do here is
4786 1) try a mod insn in compute_mode
4787 2) try a divmod insn in compute_mode
4788 3) try a div insn in compute_mode and multiply-subtract to get
4790 4) try the same things with widening allowed. */
4792 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4795 ((optab2->handlers[compute_mode].insn_code
4796 != CODE_FOR_nothing)
4797 ? OPTAB_DIRECT : OPTAB_WIDEN));
4800 /* No luck there. Can we do remainder and divide at once
4801 without a library call? */
4802 remainder = gen_reg_rtx (compute_mode);
4803 if (! expand_twoval_binop ((unsignedp
4807 NULL_RTX, remainder, unsignedp))
4812 return gen_lowpart (mode, remainder);
4815 /* Produce the quotient. Try a quotient insn, but not a library call.
4816 If we have a divmod in this mode, use it in preference to widening
4817 the div (for this test we assume it will not fail). Note that optab2
4818 is set to the one of the two optabs that the call below will use. */
4820 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
4821 op0, op1, rem_flag ? NULL_RTX : target,
4823 ((optab2->handlers[compute_mode].insn_code
4824 != CODE_FOR_nothing)
4825 ? OPTAB_DIRECT : OPTAB_WIDEN));
4829 /* No luck there. Try a quotient-and-remainder insn,
4830 keeping the quotient alone. */
4831 quotient = gen_reg_rtx (compute_mode);
4832 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
4834 quotient, NULL_RTX, unsignedp))
4838 /* Still no luck. If we are not computing the remainder,
4839 use a library call for the quotient. */
4840 quotient = sign_expand_binop (compute_mode,
4841 udiv_optab, sdiv_optab,
4843 unsignedp, OPTAB_LIB_WIDEN);
4850 if (target && GET_MODE (target) != compute_mode)
4855 /* No divide instruction either. Use library for remainder. */
4856 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4858 unsignedp, OPTAB_LIB_WIDEN);
4859 /* No remainder function. Try a quotient-and-remainder
4860 function, keeping the remainder. */
4863 remainder = gen_reg_rtx (compute_mode);
4864 if (!expand_twoval_binop_libfunc
4865 (unsignedp ? udivmod_optab : sdivmod_optab,
4867 NULL_RTX, remainder,
4868 unsignedp ? UMOD : MOD))
4869 remainder = NULL_RTX;
4874 /* We divided. Now finish doing X - Y * (X / Y). */
4875 remainder = expand_mult (compute_mode, quotient, op1,
4876 NULL_RTX, unsignedp);
4877 remainder = expand_binop (compute_mode, sub_optab, op0,
4878 remainder, target, unsignedp,
4883 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4886 /* Return a tree node with data type TYPE, describing the value of X.
4887 Usually this is an VAR_DECL, if there is no obvious better choice.
4888 X may be an expression, however we only support those expressions
4889 generated by loop.c. */
4892 make_tree (tree type, rtx x)
4896 switch (GET_CODE (x))
4900 HOST_WIDE_INT hi = 0;
4903 && !(TYPE_UNSIGNED (type)
4904 && (GET_MODE_BITSIZE (TYPE_MODE (type))
4905 < HOST_BITS_PER_WIDE_INT)))
4908 t = build_int_cst_wide (type, INTVAL (x), hi);
4914 if (GET_MODE (x) == VOIDmode)
4915 t = build_int_cst_wide (type,
4916 CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
4921 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
4922 t = build_real (type, d);
4929 int units = CONST_VECTOR_NUNITS (x);
4930 tree itype = TREE_TYPE (type);
4935 /* Build a tree with vector elements. */
4936 for (i = units - 1; i >= 0; --i)
4938 rtx elt = CONST_VECTOR_ELT (x, i);
4939 t = tree_cons (NULL_TREE, make_tree (itype, elt), t);
4942 return build_vector (type, t);
4946 return fold_build2 (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4947 make_tree (type, XEXP (x, 1)));
4950 return fold_build2 (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4951 make_tree (type, XEXP (x, 1)));
4954 return fold_build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0)));
4957 return fold_build2 (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
4958 make_tree (type, XEXP (x, 1)));
4961 return fold_build2 (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
4962 make_tree (type, XEXP (x, 1)));
4965 t = unsigned_type_for (type);
4966 return fold_convert (type, build2 (RSHIFT_EXPR, t,
4967 make_tree (t, XEXP (x, 0)),
4968 make_tree (type, XEXP (x, 1))));
4971 t = signed_type_for (type);
4972 return fold_convert (type, build2 (RSHIFT_EXPR, t,
4973 make_tree (t, XEXP (x, 0)),
4974 make_tree (type, XEXP (x, 1))));
4977 if (TREE_CODE (type) != REAL_TYPE)
4978 t = signed_type_for (type);
4982 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
4983 make_tree (t, XEXP (x, 0)),
4984 make_tree (t, XEXP (x, 1))));
4986 t = unsigned_type_for (type);
4987 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
4988 make_tree (t, XEXP (x, 0)),
4989 make_tree (t, XEXP (x, 1))));
4993 t = lang_hooks.types.type_for_mode (GET_MODE (XEXP (x, 0)),
4994 GET_CODE (x) == ZERO_EXTEND);
4995 return fold_convert (type, make_tree (t, XEXP (x, 0)));
4998 return make_tree (type, XEXP (x, 0));
5001 t = SYMBOL_REF_DECL (x);
5003 return fold_convert (type, build_fold_addr_expr (t));
5004 /* else fall through. */
5007 t = build_decl (VAR_DECL, NULL_TREE, type);
5009 /* If TYPE is a POINTER_TYPE, X might be Pmode with TYPE_MODE being
5010 ptr_mode. So convert. */
5011 if (POINTER_TYPE_P (type))
5012 x = convert_memory_address (TYPE_MODE (type), x);
5014 /* Note that we do *not* use SET_DECL_RTL here, because we do not
5015 want set_decl_rtl to go adjusting REG_ATTRS for this temporary. */
5016 t->decl_with_rtl.rtl = x;
5022 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
5023 and returning TARGET.
5025 If TARGET is 0, a pseudo-register or constant is returned. */
5028 expand_and (enum machine_mode mode, rtx op0, rtx op1, rtx target)
5032 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
5033 tem = simplify_binary_operation (AND, mode, op0, op1);
5035 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
5039 else if (tem != target)
5040 emit_move_insn (target, tem);
5044 /* Helper function for emit_store_flag. */
5046 emit_store_flag_1 (rtx target, rtx subtarget, enum machine_mode mode,
5050 enum machine_mode target_mode = GET_MODE (target);
5052 /* If we are converting to a wider mode, first convert to
5053 TARGET_MODE, then normalize. This produces better combining
5054 opportunities on machines that have a SIGN_EXTRACT when we are
5055 testing a single bit. This mostly benefits the 68k.
5057 If STORE_FLAG_VALUE does not have the sign bit set when
5058 interpreted in MODE, we can do this conversion as unsigned, which
5059 is usually more efficient. */
5060 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
5062 convert_move (target, subtarget,
5063 (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
5064 && 0 == (STORE_FLAG_VALUE
5065 & ((HOST_WIDE_INT) 1
5066 << (GET_MODE_BITSIZE (mode) -1))));
5073 /* If we want to keep subexpressions around, don't reuse our last
5078 /* Now normalize to the proper value in MODE. Sometimes we don't
5079 have to do anything. */
5080 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
5082 /* STORE_FLAG_VALUE might be the most negative number, so write
5083 the comparison this way to avoid a compiler-time warning. */
5084 else if (- normalizep == STORE_FLAG_VALUE)
5085 op0 = expand_unop (mode, neg_optab, op0, subtarget, 0);
5087 /* We don't want to use STORE_FLAG_VALUE < 0 below since this makes
5088 it hard to use a value of just the sign bit due to ANSI integer
5089 constant typing rules. */
5090 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5091 && (STORE_FLAG_VALUE
5092 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))))
5093 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
5094 size_int (GET_MODE_BITSIZE (mode) - 1), subtarget,
5098 gcc_assert (STORE_FLAG_VALUE & 1);
5100 op0 = expand_and (mode, op0, const1_rtx, subtarget);
5101 if (normalizep == -1)
5102 op0 = expand_unop (mode, neg_optab, op0, op0, 0);
5105 /* If we were converting to a smaller mode, do the conversion now. */
5106 if (target_mode != mode)
5108 convert_move (target, op0, 0);
5115 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
5116 and storing in TARGET. Normally return TARGET.
5117 Return 0 if that cannot be done.
5119 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
5120 it is VOIDmode, they cannot both be CONST_INT.
5122 UNSIGNEDP is for the case where we have to widen the operands
5123 to perform the operation. It says to use zero-extension.
5125 NORMALIZEP is 1 if we should convert the result to be either zero
5126 or one. Normalize is -1 if we should convert the result to be
5127 either zero or -1. If NORMALIZEP is zero, the result will be left
5128 "raw" out of the scc insn. */
5131 emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1,
5132 enum machine_mode mode, int unsignedp, int normalizep)
5135 enum insn_code icode;
5136 enum machine_mode compare_mode;
5137 enum machine_mode target_mode = GET_MODE (target);
5139 rtx last = get_last_insn ();
5140 rtx pattern, comparison;
5143 code = unsigned_condition (code);
5145 /* If one operand is constant, make it the second one. Only do this
5146 if the other operand is not constant as well. */
5148 if (swap_commutative_operands_p (op0, op1))
5153 code = swap_condition (code);
5156 if (mode == VOIDmode)
5157 mode = GET_MODE (op0);
5159 /* For some comparisons with 1 and -1, we can convert this to
5160 comparisons with zero. This will often produce more opportunities for
5161 store-flag insns. */
5166 if (op1 == const1_rtx)
5167 op1 = const0_rtx, code = LE;
5170 if (op1 == constm1_rtx)
5171 op1 = const0_rtx, code = LT;
5174 if (op1 == const1_rtx)
5175 op1 = const0_rtx, code = GT;
5178 if (op1 == constm1_rtx)
5179 op1 = const0_rtx, code = GE;
5182 if (op1 == const1_rtx)
5183 op1 = const0_rtx, code = NE;
5186 if (op1 == const1_rtx)
5187 op1 = const0_rtx, code = EQ;
5193 /* If we are comparing a double-word integer with zero or -1, we can
5194 convert the comparison into one involving a single word. */
5195 if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
5196 && GET_MODE_CLASS (mode) == MODE_INT
5197 && (!MEM_P (op0) || ! MEM_VOLATILE_P (op0)))
5199 if ((code == EQ || code == NE)
5200 && (op1 == const0_rtx || op1 == constm1_rtx))
5202 rtx op00, op01, op0both;
5204 /* Do a logical OR or AND of the two words and compare the
5206 op00 = simplify_gen_subreg (word_mode, op0, mode, 0);
5207 op01 = simplify_gen_subreg (word_mode, op0, mode, UNITS_PER_WORD);
5208 op0both = expand_binop (word_mode,
5209 op1 == const0_rtx ? ior_optab : and_optab,
5210 op00, op01, NULL_RTX, unsignedp,
5214 return emit_store_flag (target, code, op0both, op1, word_mode,
5215 unsignedp, normalizep);
5217 else if ((code == LT || code == GE) && op1 == const0_rtx)
5221 /* If testing the sign bit, can just test on high word. */
5222 op0h = simplify_gen_subreg (word_mode, op0, mode,
5223 subreg_highpart_offset (word_mode,
5225 return emit_store_flag (target, code, op0h, op1, word_mode,
5226 unsignedp, normalizep);
5230 /* If this is A < 0 or A >= 0, we can do this by taking the ones
5231 complement of A (for GE) and shifting the sign bit to the low bit. */
5232 if (op1 == const0_rtx && (code == LT || code == GE)
5233 && GET_MODE_CLASS (mode) == MODE_INT
5234 && (normalizep || STORE_FLAG_VALUE == 1
5235 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5236 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
5237 == ((unsigned HOST_WIDE_INT) 1
5238 << (GET_MODE_BITSIZE (mode) - 1))))))
5242 /* If the result is to be wider than OP0, it is best to convert it
5243 first. If it is to be narrower, it is *incorrect* to convert it
5245 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
5247 op0 = convert_modes (target_mode, mode, op0, 0);
5251 if (target_mode != mode)
5255 op0 = expand_unop (mode, one_cmpl_optab, op0,
5256 ((STORE_FLAG_VALUE == 1 || normalizep)
5257 ? 0 : subtarget), 0);
5259 if (STORE_FLAG_VALUE == 1 || normalizep)
5260 /* If we are supposed to produce a 0/1 value, we want to do
5261 a logical shift from the sign bit to the low-order bit; for
5262 a -1/0 value, we do an arithmetic shift. */
5263 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
5264 size_int (GET_MODE_BITSIZE (mode) - 1),
5265 subtarget, normalizep != -1);
5267 if (mode != target_mode)
5268 op0 = convert_modes (target_mode, mode, op0, 0);
5273 icode = setcc_gen_code[(int) code];
5275 if (icode != CODE_FOR_nothing)
5277 insn_operand_predicate_fn pred;
5279 /* We think we may be able to do this with a scc insn. Emit the
5280 comparison and then the scc insn. */
5282 do_pending_stack_adjust ();
5283 last = get_last_insn ();
5286 = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX);
5287 if (CONSTANT_P (comparison))
5289 switch (GET_CODE (comparison))
5292 if (comparison == const0_rtx)
5296 #ifdef FLOAT_STORE_FLAG_VALUE
5298 if (comparison == CONST0_RTX (GET_MODE (comparison)))
5306 if (normalizep == 1)
5308 if (normalizep == -1)
5310 return const_true_rtx;
5313 /* The code of COMPARISON may not match CODE if compare_from_rtx
5314 decided to swap its operands and reverse the original code.
5316 We know that compare_from_rtx returns either a CONST_INT or
5317 a new comparison code, so it is safe to just extract the
5318 code from COMPARISON. */
5319 code = GET_CODE (comparison);
5321 /* Get a reference to the target in the proper mode for this insn. */
5322 compare_mode = insn_data[(int) icode].operand[0].mode;
5324 pred = insn_data[(int) icode].operand[0].predicate;
5325 if (optimize || ! (*pred) (subtarget, compare_mode))
5326 subtarget = gen_reg_rtx (compare_mode);
5328 pattern = GEN_FCN (icode) (subtarget);
5331 emit_insn (pattern);
5332 return emit_store_flag_1 (target, subtarget, compare_mode,
5338 /* We don't have an scc insn, so try a cstore insn. */
5340 for (compare_mode = mode; compare_mode != VOIDmode;
5341 compare_mode = GET_MODE_WIDER_MODE (compare_mode))
5343 icode = cstore_optab->handlers[(int) compare_mode].insn_code;
5344 if (icode != CODE_FOR_nothing)
5348 if (icode != CODE_FOR_nothing)
5350 enum machine_mode result_mode
5351 = insn_data[(int) icode].operand[0].mode;
5352 rtx cstore_op0 = op0;
5353 rtx cstore_op1 = op1;
5355 do_pending_stack_adjust ();
5356 last = get_last_insn ();
5358 if (compare_mode != mode)
5360 cstore_op0 = convert_modes (compare_mode, mode, cstore_op0,
5362 cstore_op1 = convert_modes (compare_mode, mode, cstore_op1,
5366 if (!insn_data[(int) icode].operand[2].predicate (cstore_op0,
5368 cstore_op0 = copy_to_mode_reg (compare_mode, cstore_op0);
5370 if (!insn_data[(int) icode].operand[3].predicate (cstore_op1,
5372 cstore_op1 = copy_to_mode_reg (compare_mode, cstore_op1);
5374 comparison = gen_rtx_fmt_ee (code, result_mode, cstore_op0,
5378 if (optimize || !(insn_data[(int) icode].operand[0].predicate
5379 (subtarget, result_mode)))
5380 subtarget = gen_reg_rtx (result_mode);
5382 pattern = GEN_FCN (icode) (subtarget, comparison, cstore_op0,
5387 emit_insn (pattern);
5388 return emit_store_flag_1 (target, subtarget, result_mode,
5394 delete_insns_since (last);
5396 /* If optimizing, use different pseudo registers for each insn, instead
5397 of reusing the same pseudo. This leads to better CSE, but slows
5398 down the compiler, since there are more pseudos */
5399 subtarget = (!optimize
5400 && (target_mode == mode)) ? target : NULL_RTX;
5402 /* If we reached here, we can't do this with a scc insn. However, there
5403 are some comparisons that can be done directly. For example, if
5404 this is an equality comparison of integers, we can try to exclusive-or
5405 (or subtract) the two operands and use a recursive call to try the
5406 comparison with zero. Don't do any of these cases if branches are
5410 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
5411 && op1 != const0_rtx)
5413 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
5417 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
5420 tem = emit_store_flag (target, code, tem, const0_rtx,
5421 mode, unsignedp, normalizep);
5423 delete_insns_since (last);
5427 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
5428 the constant zero. Reject all other comparisons at this point. Only
5429 do LE and GT if branches are expensive since they are expensive on
5430 2-operand machines. */
5432 if (BRANCH_COST == 0
5433 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
5434 || (code != EQ && code != NE
5435 && (BRANCH_COST <= 1 || (code != LE && code != GT))))
5438 /* See what we need to return. We can only return a 1, -1, or the
5441 if (normalizep == 0)
5443 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
5444 normalizep = STORE_FLAG_VALUE;
5446 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5447 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
5448 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
5454 /* Try to put the result of the comparison in the sign bit. Assume we can't
5455 do the necessary operation below. */
5459 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
5460 the sign bit set. */
5464 /* This is destructive, so SUBTARGET can't be OP0. */
5465 if (rtx_equal_p (subtarget, op0))
5468 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
5471 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
5475 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
5476 number of bits in the mode of OP0, minus one. */
5480 if (rtx_equal_p (subtarget, op0))
5483 tem = expand_shift (RSHIFT_EXPR, mode, op0,
5484 size_int (GET_MODE_BITSIZE (mode) - 1),
5486 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
5490 if (code == EQ || code == NE)
5492 /* For EQ or NE, one way to do the comparison is to apply an operation
5493 that converts the operand into a positive number if it is nonzero
5494 or zero if it was originally zero. Then, for EQ, we subtract 1 and
5495 for NE we negate. This puts the result in the sign bit. Then we
5496 normalize with a shift, if needed.
5498 Two operations that can do the above actions are ABS and FFS, so try
5499 them. If that doesn't work, and MODE is smaller than a full word,
5500 we can use zero-extension to the wider mode (an unsigned conversion)
5501 as the operation. */
5503 /* Note that ABS doesn't yield a positive number for INT_MIN, but
5504 that is compensated by the subsequent overflow when subtracting
5507 if (abs_optab->handlers[mode].insn_code != CODE_FOR_nothing)
5508 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
5509 else if (ffs_optab->handlers[mode].insn_code != CODE_FOR_nothing)
5510 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
5511 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5513 tem = convert_modes (word_mode, mode, op0, 1);
5520 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
5523 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
5526 /* If we couldn't do it that way, for NE we can "or" the two's complement
5527 of the value with itself. For EQ, we take the one's complement of
5528 that "or", which is an extra insn, so we only handle EQ if branches
5531 if (tem == 0 && (code == NE || BRANCH_COST > 1))
5533 if (rtx_equal_p (subtarget, op0))
5536 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
5537 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
5540 if (tem && code == EQ)
5541 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
5545 if (tem && normalizep)
5546 tem = expand_shift (RSHIFT_EXPR, mode, tem,
5547 size_int (GET_MODE_BITSIZE (mode) - 1),
5548 subtarget, normalizep == 1);
5552 if (GET_MODE (tem) != target_mode)
5554 convert_move (target, tem, 0);
5557 else if (!subtarget)
5559 emit_move_insn (target, tem);
5564 delete_insns_since (last);
5569 /* Like emit_store_flag, but always succeeds. */
5572 emit_store_flag_force (rtx target, enum rtx_code code, rtx op0, rtx op1,
5573 enum machine_mode mode, int unsignedp, int normalizep)
5577 /* First see if emit_store_flag can do the job. */
5578 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
5582 if (normalizep == 0)
5585 /* If this failed, we have to do this with set/compare/jump/set code. */
5588 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
5589 target = gen_reg_rtx (GET_MODE (target));
5591 emit_move_insn (target, const1_rtx);
5592 label = gen_label_rtx ();
5593 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX,
5596 emit_move_insn (target, const0_rtx);
5602 /* Perform possibly multi-word comparison and conditional jump to LABEL
5603 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE. This is
5604 now a thin wrapper around do_compare_rtx_and_jump. */
5607 do_cmp_and_jump (rtx arg1, rtx arg2, enum rtx_code op, enum machine_mode mode,
5610 int unsignedp = (op == LTU || op == LEU || op == GTU || op == GEU);
5611 do_compare_rtx_and_jump (arg1, arg2, op, unsignedp, mode,
5612 NULL_RTX, NULL_RTX, label);