X-Git-Url: http://git.sourceforge.jp/view?p=pf3gnuchains%2Fgcc-fork.git;a=blobdiff_plain;f=gcc%2Fconfig%2Fi386%2Fi386.c;h=2890d04f5dbecb9c19718f1e1834110fc3392684;hp=7386ba55131dcbcbdeeeb7dc67ca8c7262bdbd8a;hb=762e0956af06bb38c37f0a714f230b8f9ba10636;hpb=c7b451a71588d13ffb5d17eb2ed3fe808bbae2a8 diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c index 7386ba55131..2890d04f5db 100644 --- a/gcc/config/i386/i386.c +++ b/gcc/config/i386/i386.c @@ -1,21 +1,21 @@ /* Subroutines used for code generation on IA-32. Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, - 2002, 2003 Free Software Foundation, Inc. + 2002, 2003, 2004 Free Software Foundation, Inc. -This file is part of GNU CC. +This file is part of GCC. -GNU CC is free software; you can redistribute it and/or modify +GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. -GNU CC is distributed in the hope that it will be useful, +GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License -along with GNU CC; see the file COPYING. If not, write to +along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ @@ -457,6 +457,50 @@ struct processor_costs pentium4_cost = { 43, /* cost of FSQRT instruction. */ }; +static const +struct processor_costs nocona_cost = { + 1, /* cost of an add instruction */ + 1, /* cost of a lea instruction */ + 1, /* variable shift costs */ + 1, /* constant shift costs */ + {10, 10, 10, 10, 10}, /* cost of starting a multiply */ + 0, /* cost of multiply per each bit set */ + {66, 66, 66, 66, 66}, /* cost of a divide/mod */ + 1, /* cost of movsx */ + 1, /* cost of movzx */ + 16, /* "large" insn */ + 9, /* MOVE_RATIO */ + 4, /* cost for loading QImode using movzbl */ + {4, 4, 4}, /* cost of loading integer registers + in QImode, HImode and SImode. + Relative to reg-reg move (2). */ + {4, 4, 4}, /* cost of storing integer registers */ + 3, /* cost of reg,reg fld/fst */ + {12, 12, 12}, /* cost of loading fp registers + in SFmode, DFmode and XFmode */ + {4, 4, 4}, /* cost of loading integer registers */ + 6, /* cost of moving MMX register */ + {12, 12}, /* cost of loading MMX registers + in SImode and DImode */ + {12, 12}, /* cost of storing MMX registers + in SImode and DImode */ + 6, /* cost of moving SSE register */ + {12, 12, 12}, /* cost of loading SSE registers + in SImode, DImode and TImode */ + {12, 12, 12}, /* cost of storing SSE registers + in SImode, DImode and TImode */ + 8, /* MMX or SSE register to integer */ + 128, /* size of prefetch block */ + 8, /* number of parallel prefetches */ + 1, /* Branch cost */ + 6, /* cost of FADD and FSUB insns. */ + 8, /* cost of FMUL instruction. */ + 40, /* cost of FDIV instruction. */ + 3, /* cost of FABS instruction. */ + 3, /* cost of FCHS instruction. */ + 44, /* cost of FSQRT instruction. */ +}; + const struct processor_costs *ix86_cost = &pentium_cost; /* Processor feature/optimization bitmasks. */ @@ -469,19 +513,20 @@ const struct processor_costs *ix86_cost = &pentium_cost; #define m_PENT4 (1<= 3) + if (ix86_function_regparm (type, NULL) >= 3) { /* ??? Need to count the actual number of registers to be used, not the possible number of registers. Fix later. */ @@ -1553,12 +1628,9 @@ ix86_function_ok_for_sibcall (decl, exp) /* Handle a "cdecl", "stdcall", or "fastcall" attribute; arguments as in struct attribute_spec.handler. */ static tree -ix86_handle_cdecl_attribute (node, name, args, flags, no_add_attrs) - tree *node; - tree name; - tree args ATTRIBUTE_UNUSED; - int flags ATTRIBUTE_UNUSED; - bool *no_add_attrs; +ix86_handle_cdecl_attribute (tree *node, tree name, + tree args ATTRIBUTE_UNUSED, + int flags ATTRIBUTE_UNUSED, bool *no_add_attrs) { if (TREE_CODE (*node) != FUNCTION_TYPE && TREE_CODE (*node) != METHOD_TYPE @@ -1603,12 +1675,8 @@ ix86_handle_cdecl_attribute (node, name, args, flags, no_add_attrs) /* Handle a "regparm" attribute; arguments as in struct attribute_spec.handler. */ static tree -ix86_handle_regparm_attribute (node, name, args, flags, no_add_attrs) - tree *node; - tree name; - tree args; - int flags ATTRIBUTE_UNUSED; - bool *no_add_attrs; +ix86_handle_regparm_attribute (tree *node, tree name, tree args, + int flags ATTRIBUTE_UNUSED, bool *no_add_attrs) { if (TREE_CODE (*node) != FUNCTION_TYPE && TREE_CODE (*node) != METHOD_TYPE @@ -1638,9 +1706,9 @@ ix86_handle_regparm_attribute (node, name, args, flags, no_add_attrs) } if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node))) - { - error ("fastcall and regparm attributes are not compatible"); - } + { + error ("fastcall and regparm attributes are not compatible"); + } } return NULL_TREE; @@ -1651,9 +1719,7 @@ ix86_handle_regparm_attribute (node, name, args, flags, no_add_attrs) warning to be generated). */ static int -ix86_comp_type_attributes (type1, type2) - tree type1; - tree type2; +ix86_comp_type_attributes (tree type1, tree type2) { /* Check for mismatch of non-default calling convention. */ const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall"; @@ -1661,31 +1727,80 @@ ix86_comp_type_attributes (type1, type2) if (TREE_CODE (type1) != FUNCTION_TYPE) return 1; - /* Check for mismatched fastcall types */ + /* Check for mismatched fastcall types */ if (!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1)) != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2))) - return 0; + return 0; /* Check for mismatched return types (cdecl vs stdcall). */ if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1)) != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2))) return 0; + if (ix86_function_regparm (type1, NULL) + != ix86_function_regparm (type2, NULL)) + return 0; return 1; } -/* Return the regparm value for a fuctio with the indicated TYPE. */ +/* Return the regparm value for a fuctio with the indicated TYPE and DECL. + DECL may be NULL when calling function indirectly + or considering a libcall. */ static int -ix86_fntype_regparm (type) - tree type; +ix86_function_regparm (tree type, tree decl) { tree attr; + int regparm = ix86_regparm; + bool user_convention = false; - attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type)); - if (attr) - return TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr))); - else - return ix86_regparm; + if (!TARGET_64BIT) + { + attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type)); + if (attr) + { + regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr))); + user_convention = true; + } + + if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type))) + { + regparm = 2; + user_convention = true; + } + + /* Use register calling convention for local functions when possible. */ + if (!TARGET_64BIT && !user_convention && decl + && flag_unit_at_a_time && !profile_flag) + { + struct cgraph_local_info *i = cgraph_local_info (decl); + if (i && i->local) + { + /* We can't use regparm(3) for nested functions as these use + static chain pointer in third argument. */ + if (DECL_CONTEXT (decl) && !DECL_NO_STATIC_CHAIN (decl)) + regparm = 2; + else + regparm = 3; + } + } + } + return regparm; +} + +/* Return true if EAX is live at the start of the function. Used by + ix86_expand_prologue to determine if we need special help before + calling allocate_stack_worker. */ + +static bool +ix86_eax_live_at_start_p (void) +{ + /* Cheat. Don't bother working forward from ix86_function_regparm + to the function type to whether an actual argument is located in + eax. Instead just look at cfg info, which is still close enough + to correct at this point. This gives false positives for broken + functions that might use uninitialized data that happens to be + allocated in eax, but who cares? */ + return REGNO_REG_SET_P (ENTRY_BLOCK_PTR->global_live_at_end, 0); } /* Value is the number of bytes of arguments automatically @@ -1706,17 +1821,15 @@ ix86_fntype_regparm (type) The attribute stdcall is equivalent to RTD on a per module basis. */ int -ix86_return_pops_args (fundecl, funtype, size) - tree fundecl; - tree funtype; - int size; +ix86_return_pops_args (tree fundecl, tree funtype, int size) { int rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE); - /* Cdecl functions override -mrtd, and never pop the stack. */ + /* Cdecl functions override -mrtd, and never pop the stack. */ if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype))) { - /* Stdcall and fastcall functions will pop the stack if not variable args. */ + /* Stdcall and fastcall functions will pop the stack if not + variable args. */ if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype)) || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype))) rtd = 1; @@ -1729,10 +1842,10 @@ ix86_return_pops_args (fundecl, funtype, size) } /* Lose any fake structure return argument if it is passed on the stack. */ - if (aggregate_value_p (TREE_TYPE (funtype)) + if (aggregate_value_p (TREE_TYPE (funtype), fundecl) && !TARGET_64BIT) { - int nregs = ix86_fntype_regparm (funtype); + int nregs = ix86_function_regparm (funtype, fundecl); if (!nregs) return GET_MODE_SIZE (Pmode); @@ -1745,8 +1858,7 @@ ix86_return_pops_args (fundecl, funtype, size) /* Return true when register may be used to pass function parameters. */ bool -ix86_function_arg_regno_p (regno) - int regno; +ix86_function_arg_regno_p (int regno) { int i; if (!TARGET_64BIT) @@ -1768,15 +1880,13 @@ ix86_function_arg_regno_p (regno) For a library call, FNTYPE is 0. */ void -init_cumulative_args (cum, fntype, libname, fndecl) - CUMULATIVE_ARGS *cum; /* Argument info to initialize */ - tree fntype; /* tree ptr for function decl */ - rtx libname; /* SYMBOL_REF of library name or 0 */ - tree fndecl; +init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */ + tree fntype, /* tree ptr for function decl */ + rtx libname, /* SYMBOL_REF of library name or 0 */ + tree fndecl) { static CUMULATIVE_ARGS zero_cum; tree param, next_param; - bool user_convention = false; if (TARGET_DEBUG_ARG) { @@ -1795,18 +1905,14 @@ init_cumulative_args (cum, fntype, libname, fndecl) *cum = zero_cum; /* Set up the number of registers to use for passing arguments. */ - cum->nregs = ix86_regparm; + if (fntype) + cum->nregs = ix86_function_regparm (fntype, fndecl); + else + cum->nregs = ix86_regparm; cum->sse_nregs = SSE_REGPARM_MAX; - if (fntype && !TARGET_64BIT) - { - tree attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (fntype)); - - if (attr) - { - cum->nregs = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr))); - user_convention = true; - } - } + cum->mmx_nregs = MMX_REGPARM_MAX; + cum->warn_sse = true; + cum->warn_mmx = true; cum->maybe_vaarg = false; /* Use ecx and edx registers if function has fastcall attribute */ @@ -1816,23 +1922,6 @@ init_cumulative_args (cum, fntype, libname, fndecl) { cum->nregs = 2; cum->fastcall = 1; - user_convention = true; - } - } - - /* Use register calling convention for local functions when possible. */ - if (!TARGET_64BIT && !user_convention && fndecl - && flag_unit_at_a_time) - { - struct cgraph_local_info *i = cgraph_local_info (fndecl); - if (i && i->local) - { - /* We can't use regparm(3) for nested functions as these use - static chain pointer in third argument. */ - if (DECL_CONTEXT (fndecl) && !DECL_NO_STATIC_CHAIN (fndecl)) - cum->nregs = 2; - else - cum->nregs = 3; } } @@ -1842,7 +1931,7 @@ init_cumulative_args (cum, fntype, libname, fndecl) are no variable arguments. If there are variable arguments, then we won't pass anything in registers */ - if (cum->nregs) + if (cum->nregs || !TARGET_MMX || !TARGET_SSE) { for (param = (fntype) ? TYPE_ARG_TYPES (fntype) : 0; param != 0; param = next_param) @@ -1853,6 +1942,10 @@ init_cumulative_args (cum, fntype, libname, fndecl) if (!TARGET_64BIT) { cum->nregs = 0; + cum->sse_nregs = 0; + cum->mmx_nregs = 0; + cum->warn_sse = 0; + cum->warn_mmx = 0; cum->fastcall = 0; } cum->maybe_vaarg = true; @@ -1877,8 +1970,7 @@ init_cumulative_args (cum, fntype, libname, fndecl) See the x86-64 PS ABI for details. */ static enum x86_64_reg_class -merge_classes (class1, class2) - enum x86_64_reg_class class1, class2; +merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2) { /* Rule #1: If both classes are equal, this is the resulting class. */ if (class1 == class2) @@ -1925,13 +2017,10 @@ merge_classes (class1, class2) */ static int -classify_argument (mode, type, classes, bit_offset) - enum machine_mode mode; - tree type; - enum x86_64_reg_class classes[MAX_CLASSES]; - int bit_offset; +classify_argument (enum machine_mode mode, tree type, + enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset) { - int bytes = + HOST_WIDE_INT bytes = (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode); int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD; @@ -1994,7 +2083,7 @@ classify_argument (mode, type, classes, bit_offset) } } } - /* And now merge the fields of structure. */ + /* And now merge the fields of structure. */ for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) { if (TREE_CODE (field) == FIELD_DECL) @@ -2009,7 +2098,7 @@ classify_argument (mode, type, classes, bit_offset) for (i = int_bit_position (field) / 8 / 8; i < (int_bit_position (field) + tree_low_cst (DECL_SIZE (field), 0) - + 63) / 8 / 8; i++) + + 63) / 8 / 8; i++) classes[i] = merge_classes (X86_64_INTEGER_CLASS, classes[i]); @@ -2097,6 +2186,31 @@ classify_argument (mode, type, classes, bit_offset) } } } + else if (TREE_CODE (type) == SET_TYPE) + { + if (bytes <= 4) + { + classes[0] = X86_64_INTEGERSI_CLASS; + return 1; + } + else if (bytes <= 8) + { + classes[0] = X86_64_INTEGER_CLASS; + return 1; + } + else if (bytes <= 12) + { + classes[0] = X86_64_INTEGER_CLASS; + classes[1] = X86_64_INTEGERSI_CLASS; + return 2; + } + else + { + classes[0] = X86_64_INTEGER_CLASS; + classes[1] = X86_64_INTEGER_CLASS; + return 2; + } + } else abort (); @@ -2169,11 +2283,14 @@ classify_argument (mode, type, classes, bit_offset) case DFmode: classes[0] = X86_64_SSEDF_CLASS; return 1; - case TFmode: + case XFmode: classes[0] = X86_64_X87_CLASS; classes[1] = X86_64_X87UP_CLASS; return 2; + case TFmode: case TCmode: + return 0; + case XCmode: classes[0] = X86_64_X87_CLASS; classes[1] = X86_64_X87UP_CLASS; classes[2] = X86_64_X87_CLASS; @@ -2211,11 +2328,8 @@ classify_argument (mode, type, classes, bit_offset) /* Examine the argument and return set number of register required in each class. Return 0 iff parameter should be passed in memory. */ static int -examine_argument (mode, type, in_return, int_nregs, sse_nregs) - enum machine_mode mode; - tree type; - int *int_nregs, *sse_nregs; - int in_return; +examine_argument (enum machine_mode mode, tree type, int in_return, + int *int_nregs, int *sse_nregs) { enum x86_64_reg_class class[MAX_CLASSES]; int n = classify_argument (mode, type, class, 0); @@ -2252,13 +2366,9 @@ examine_argument (mode, type, in_return, int_nregs, sse_nregs) /* Construct container for the argument used by GCC interface. See FUNCTION_ARG for the detailed description. */ static rtx -construct_container (mode, type, in_return, nintregs, nsseregs, intreg, sse_regno) - enum machine_mode mode; - tree type; - int in_return; - int nintregs, nsseregs; - const int * intreg; - int sse_regno; +construct_container (enum machine_mode mode, tree type, int in_return, + int nintregs, int nsseregs, const int * intreg, + int sse_regno) { enum machine_mode tmpmode; int bytes = @@ -2317,16 +2427,16 @@ construct_container (mode, type, in_return, nintregs, nsseregs, intreg, sse_regn return gen_rtx_REG (mode, SSE_REGNO (sse_regno)); if (n == 2 && class[0] == X86_64_X87_CLASS && class[1] == X86_64_X87UP_CLASS) - return gen_rtx_REG (TFmode, FIRST_STACK_REG); + return gen_rtx_REG (XFmode, FIRST_STACK_REG); if (n == 2 && class[0] == X86_64_INTEGER_CLASS && class[1] == X86_64_INTEGER_CLASS - && (mode == CDImode || mode == TImode) + && (mode == CDImode || mode == TImode || mode == TFmode) && intreg[0] + 1 == intreg[1]) return gen_rtx_REG (mode, intreg[0]); if (n == 4 && class[0] == X86_64_X87_CLASS && class[1] == X86_64_X87UP_CLASS && class[2] == X86_64_X87_CLASS && class[3] == X86_64_X87UP_CLASS) - return gen_rtx_REG (TCmode, FIRST_STACK_REG); + return gen_rtx_REG (XCmode, FIRST_STACK_REG); /* Otherwise figure out the entries of the PARALLEL. */ for (i = 0; i < n; i++) @@ -2394,11 +2504,10 @@ construct_container (mode, type, in_return, nintregs, nsseregs, intreg, sse_regn (TYPE is null for libcalls where that information may not be available.) */ void -function_arg_advance (cum, mode, type, named) - CUMULATIVE_ARGS *cum; /* current arg information */ - enum machine_mode mode; /* current arg mode */ - tree type; /* type of the argument or 0 if lib support */ - int named; /* whether or not the argument was named */ +function_arg_advance (CUMULATIVE_ARGS *cum, /* current arg information */ + enum machine_mode mode, /* current arg mode */ + tree type, /* type of the argument or 0 if lib support */ + int named) /* whether or not the argument was named */ { int bytes = (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode); @@ -2406,8 +2515,8 @@ function_arg_advance (cum, mode, type, named) if (TARGET_DEBUG_ARG) fprintf (stderr, - "function_adv (sz=%d, wds=%2d, nregs=%d, mode=%s, named=%d)\n\n", - words, cum->words, cum->nregs, GET_MODE_NAME (mode), named); + "function_adv (sz=%d, wds=%2d, nregs=%d, ssenregs=%d, mode=%s, named=%d)\n\n", + words, cum->words, cum->nregs, cum->sse_nregs, GET_MODE_NAME (mode), named); if (TARGET_64BIT) { int int_nregs, sse_nregs; @@ -2425,7 +2534,8 @@ function_arg_advance (cum, mode, type, named) } else { - if (TARGET_SSE && mode == TImode) + if (TARGET_SSE && SSE_REG_MODE_P (mode) + && (!type || !AGGREGATE_TYPE_P (type))) { cum->sse_words += words; cum->sse_nregs -= 1; @@ -2436,6 +2546,18 @@ function_arg_advance (cum, mode, type, named) cum->sse_regno = 0; } } + else if (TARGET_MMX && MMX_REG_MODE_P (mode) + && (!type || !AGGREGATE_TYPE_P (type))) + { + cum->mmx_words += words; + cum->mmx_nregs -= 1; + cum->mmx_regno += 1; + if (cum->mmx_nregs <= 0) + { + cum->mmx_nregs = 0; + cum->mmx_regno = 0; + } + } else { cum->words += words; @@ -2466,16 +2588,16 @@ function_arg_advance (cum, mode, type, named) (otherwise it is an extra parameter matching an ellipsis). */ rtx -function_arg (cum, mode, type, named) - CUMULATIVE_ARGS *cum; /* current arg information */ - enum machine_mode mode; /* current arg mode */ - tree type; /* type of the argument or 0 if lib support */ - int named; /* != 0 for normal args, == 0 for ... args */ +function_arg (CUMULATIVE_ARGS *cum, /* current arg information */ + enum machine_mode mode, /* current arg mode */ + tree type, /* type of the argument or 0 if lib support */ + int named) /* != 0 for normal args, == 0 for ... args */ { rtx ret = NULL_RTX; int bytes = (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode); int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD; + static bool warnedsse, warnedmmx; /* Handle a hidden AL argument containing number of registers for varargs x86-64 functions. For i386 ABI just return constm1_rtx to avoid @@ -2511,26 +2633,57 @@ function_arg (cum, mode, type, named) case HImode: case QImode: if (words <= cum->nregs) - { - int regno = cum->regno; - - /* Fastcall allocates the first two DWORD (SImode) or - smaller arguments to ECX and EDX. */ - if (cum->fastcall) - { - if (mode == BLKmode || mode == DImode) - break; - - /* ECX not EAX is the first allocated register. */ - if (regno == 0) - regno = 2; - } - ret = gen_rtx_REG (mode, regno); - } + { + int regno = cum->regno; + + /* Fastcall allocates the first two DWORD (SImode) or + smaller arguments to ECX and EDX. */ + if (cum->fastcall) + { + if (mode == BLKmode || mode == DImode) + break; + + /* ECX not EAX is the first allocated register. */ + if (regno == 0) + regno = 2; + } + ret = gen_rtx_REG (mode, regno); + } break; case TImode: - if (cum->sse_nregs) - ret = gen_rtx_REG (mode, cum->sse_regno); + case V16QImode: + case V8HImode: + case V4SImode: + case V2DImode: + case V4SFmode: + case V2DFmode: + if (!type || !AGGREGATE_TYPE_P (type)) + { + if (!TARGET_SSE && !warnedmmx && cum->warn_sse) + { + warnedsse = true; + warning ("SSE vector argument without SSE enabled " + "changes the ABI"); + } + if (cum->sse_nregs) + ret = gen_rtx_REG (mode, cum->sse_regno + FIRST_SSE_REG); + } + break; + case V8QImode: + case V4HImode: + case V2SImode: + case V2SFmode: + if (!type || !AGGREGATE_TYPE_P (type)) + { + if (!TARGET_MMX && !warnedmmx && cum->warn_mmx) + { + warnedmmx = true; + warning ("MMX vector argument without MMX enabled " + "changes the ABI"); + } + if (cum->mmx_nregs) + ret = gen_rtx_REG (mode, cum->mmx_regno + FIRST_MMX_REG); + } break; } @@ -2558,11 +2711,9 @@ function_arg (cum, mode, type, named) appropriate for passing a pointer to that type. */ int -function_arg_pass_by_reference (cum, mode, type, named) - CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED; - enum machine_mode mode ATTRIBUTE_UNUSED; - tree type; - int named ATTRIBUTE_UNUSED; +function_arg_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED, + enum machine_mode mode ATTRIBUTE_UNUSED, + tree type, int named ATTRIBUTE_UNUSED) { if (!TARGET_64BIT) return 0; @@ -2580,8 +2731,7 @@ function_arg_pass_by_reference (cum, mode, type, named) /* Return true when TYPE should be 128bit aligned for 32bit argument passing ABI */ static bool -contains_128bit_aligned_vector_p (type) - tree type; +contains_128bit_aligned_vector_p (tree type) { enum machine_mode mode = TYPE_MODE (type); if (SSE_REG_MODE_P (mode) @@ -2592,7 +2742,7 @@ contains_128bit_aligned_vector_p (type) if (AGGREGATE_TYPE_P (type)) { - /* Walk the agregates recursivly. */ + /* Walk the aggregates recursively. */ if (TREE_CODE (type) == RECORD_TYPE || TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == QUAL_UNION_TYPE) @@ -2615,7 +2765,7 @@ contains_128bit_aligned_vector_p (type) return true; } } - /* And now merge the fields of structure. */ + /* And now merge the fields of structure. */ for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) { if (TREE_CODE (field) == FIELD_DECL @@ -2635,13 +2785,11 @@ contains_128bit_aligned_vector_p (type) return false; } -/* Gives the alignment boundary, in bits, of an argument with the specified mode - and type. */ +/* Gives the alignment boundary, in bits, of an argument with the + specified mode and type. */ int -ix86_function_arg_boundary (mode, type) - enum machine_mode mode; - tree type; +ix86_function_arg_boundary (enum machine_mode mode, tree type) { int align; if (type) @@ -2654,7 +2802,7 @@ ix86_function_arg_boundary (mode, type) { /* i386 ABI defines all arguments to be 4 byte aligned. We have to make an exception for SSE modes since these require 128bit - alignment. + alignment. The handling here differs from field_alignment. ICC aligns MMX arguments to 4 byte boundaries, while structure fields are aligned @@ -2669,8 +2817,6 @@ ix86_function_arg_boundary (mode, type) if (!contains_128bit_aligned_vector_p (type)) align = PARM_BOUNDARY; } - if (align != PARM_BOUNDARY && !TARGET_SSE) - abort(); } if (align > 128) align = 128; @@ -2679,8 +2825,7 @@ ix86_function_arg_boundary (mode, type) /* Return true if N is a possible register number of function value. */ bool -ix86_function_value_regno_p (regno) - int regno; +ix86_function_value_regno_p (int regno) { if (!TARGET_64BIT) { @@ -2698,8 +2843,7 @@ ix86_function_value_regno_p (regno) If the precise function being called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0. */ rtx -ix86_function_value (valtype) - tree valtype; +ix86_function_value (tree valtype) { if (TARGET_64BIT) { @@ -2719,77 +2863,112 @@ ix86_function_value (valtype) /* Return false iff type is returned in memory. */ int -ix86_return_in_memory (type) - tree type; +ix86_return_in_memory (tree type) { - int needed_intregs, needed_sseregs; + int needed_intregs, needed_sseregs, size; + enum machine_mode mode = TYPE_MODE (type); + if (TARGET_64BIT) + return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs); + + if (mode == BLKmode) + return 1; + + size = int_size_in_bytes (type); + + if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8) + return 0; + + if (VECTOR_MODE_P (mode) || mode == TImode) { - return !examine_argument (TYPE_MODE (type), type, 1, - &needed_intregs, &needed_sseregs); - } - else - { - if (TYPE_MODE (type) == BLKmode) - return 1; - else if (MS_AGGREGATE_RETURN - && AGGREGATE_TYPE_P (type) - && int_size_in_bytes(type) <= 8) + /* User-created vectors small enough to fit in EAX. */ + if (size < 8) return 0; - else if ((VECTOR_MODE_P (TYPE_MODE (type)) - && int_size_in_bytes (type) == 8) - || (int_size_in_bytes (type) > 12 - && TYPE_MODE (type) != TImode - && TYPE_MODE (type) != TFmode - && !VECTOR_MODE_P (TYPE_MODE (type)))) + + /* MMX/3dNow values are returned on the stack, since we've + got to EMMS/FEMMS before returning. */ + if (size == 8) return 1; - return 0; + + /* SSE values are returned in XMM0. */ + /* ??? Except when it doesn't exist? We have a choice of + either (1) being abi incompatible with a -march switch, + or (2) generating an error here. Given no good solution, + I think the safest thing is one warning. The user won't + be able to use -Werror, but.... */ + if (size == 16) + { + static bool warned; + + if (TARGET_SSE) + return 0; + + if (!warned) + { + warned = true; + warning ("SSE vector return without SSE enabled " + "changes the ABI"); + } + return 1; + } } + + if (mode == XFmode) + return 0; + + if (size > 12) + return 1; + return 0; } /* Define how to find the value returned by a library function assuming the value has mode MODE. */ rtx -ix86_libcall_value (mode) - enum machine_mode mode; +ix86_libcall_value (enum machine_mode mode) { if (TARGET_64BIT) { switch (mode) { - case SFmode: - case SCmode: - case DFmode: - case DCmode: - return gen_rtx_REG (mode, FIRST_SSE_REG); - case TFmode: - case TCmode: - return gen_rtx_REG (mode, FIRST_FLOAT_REG); - default: - return gen_rtx_REG (mode, 0); + case SFmode: + case SCmode: + case DFmode: + case DCmode: + return gen_rtx_REG (mode, FIRST_SSE_REG); + case XFmode: + case XCmode: + return gen_rtx_REG (mode, FIRST_FLOAT_REG); + case TFmode: + case TCmode: + return NULL; + default: + return gen_rtx_REG (mode, 0); } } else - return gen_rtx_REG (mode, ix86_value_regno (mode)); + return gen_rtx_REG (mode, ix86_value_regno (mode)); } /* Given a mode, return the register to use for a return value. */ static int -ix86_value_regno (mode) - enum machine_mode mode; +ix86_value_regno (enum machine_mode mode) { + /* Floating point return values in %st(0). */ if (GET_MODE_CLASS (mode) == MODE_FLOAT && TARGET_FLOAT_RETURNS_IN_80387) return FIRST_FLOAT_REG; - if (mode == TImode || VECTOR_MODE_P (mode)) + /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where + we prevent this case when sse is not available. */ + if (mode == TImode || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16)) return FIRST_SSE_REG; + /* Everything else in %eax. */ return 0; } /* Create the va_list data type. */ -tree -ix86_build_va_list () +static tree +ix86_build_builtin_va_list (void) { tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl; @@ -2827,28 +3006,12 @@ ix86_build_va_list () return build_array_type (record, build_index_type (size_zero_node)); } -/* Perform any needed actions needed for a function that is receiving a - variable number of arguments. - - CUM is as above. - - MODE and TYPE are the mode and type of the current parameter. - - PRETEND_SIZE is a variable that should be set to the amount of stack - that must be pushed by the prolog to pretend that our caller pushed - it. - - Normally, this macro will push all remaining incoming registers on the - stack and set PRETEND_SIZE to the length of the registers pushed. */ - -void -ix86_setup_incoming_varargs (cum, mode, type, pretend_size, no_rtl) - CUMULATIVE_ARGS *cum; - enum machine_mode mode; - tree type; - int *pretend_size ATTRIBUTE_UNUSED; - int no_rtl; +/* Worker function for TARGET_SETUP_INCOMING_VARARGS. */ +static void +ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode, + tree type, int *pretend_size ATTRIBUTE_UNUSED, + int no_rtl) { CUMULATIVE_ARGS next_cum; rtx save_area = NULL_RTX, mem; @@ -2944,9 +3107,7 @@ ix86_setup_incoming_varargs (cum, mode, type, pretend_size, no_rtl) /* Implement va_start. */ void -ix86_va_start (valist, nextarg) - tree valist; - rtx nextarg; +ix86_va_start (tree valist, rtx nextarg) { HOST_WIDE_INT words, n_gpr, n_fpr; tree f_gpr, f_fpr, f_ovf, f_sav; @@ -3008,8 +3169,7 @@ ix86_va_start (valist, nextarg) /* Implement va_arg. */ rtx -ix86_va_arg (valist, type) - tree valist, type; +ix86_va_arg (tree valist, tree type) { static const int intreg[6] = { 0, 1, 2, 3, 4, 5 }; tree f_gpr, f_fpr, f_ovf, f_sav; @@ -3149,10 +3309,12 @@ ix86_va_arg (valist, type) { int i; rtx mem; + rtx x; /* Never use the memory itself, as it has the alias set. */ - addr_rtx = XEXP (assign_temp (type, 0, 1, 0), 0); - mem = gen_rtx_MEM (BLKmode, addr_rtx); + x = XEXP (assign_temp (type, 0, 1, 0), 0); + mem = gen_rtx_MEM (BLKmode, x); + force_operand (x, addr_rtx); set_mem_alias_set (mem, get_varargs_alias_set ()); set_mem_align (mem, BITS_PER_UNIT); @@ -3247,27 +3409,21 @@ ix86_va_arg (valist, type) /* Return nonzero if OP is either a i387 or SSE fp register. */ int -any_fp_register_operand (op, mode) - rtx op; - enum machine_mode mode ATTRIBUTE_UNUSED; +any_fp_register_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { return ANY_FP_REG_P (op); } /* Return nonzero if OP is an i387 fp register. */ int -fp_register_operand (op, mode) - rtx op; - enum machine_mode mode ATTRIBUTE_UNUSED; +fp_register_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { return FP_REG_P (op); } /* Return nonzero if OP is a non-fp register_operand. */ int -register_and_not_any_fp_reg_operand (op, mode) - rtx op; - enum machine_mode mode; +register_and_not_any_fp_reg_operand (rtx op, enum machine_mode mode) { return register_operand (op, mode) && !ANY_FP_REG_P (op); } @@ -3275,9 +3431,7 @@ register_and_not_any_fp_reg_operand (op, mode) /* Return nonzero if OP is a register operand other than an i387 fp register. */ int -register_and_not_fp_reg_operand (op, mode) - rtx op; - enum machine_mode mode; +register_and_not_fp_reg_operand (rtx op, enum machine_mode mode) { return register_operand (op, mode) && !FP_REG_P (op); } @@ -3285,9 +3439,7 @@ register_and_not_fp_reg_operand (op, mode) /* Return nonzero if OP is general operand representable on x86_64. */ int -x86_64_general_operand (op, mode) - rtx op; - enum machine_mode mode; +x86_64_general_operand (rtx op, enum machine_mode mode) { if (!TARGET_64BIT) return general_operand (op, mode); @@ -3300,9 +3452,7 @@ x86_64_general_operand (op, mode) as either sign extended or zero extended constant. */ int -x86_64_szext_general_operand (op, mode) - rtx op; - enum machine_mode mode; +x86_64_szext_general_operand (rtx op, enum machine_mode mode) { if (!TARGET_64BIT) return general_operand (op, mode); @@ -3314,9 +3464,7 @@ x86_64_szext_general_operand (op, mode) /* Return nonzero if OP is nonmemory operand representable on x86_64. */ int -x86_64_nonmemory_operand (op, mode) - rtx op; - enum machine_mode mode; +x86_64_nonmemory_operand (rtx op, enum machine_mode mode) { if (!TARGET_64BIT) return nonmemory_operand (op, mode); @@ -3328,9 +3476,7 @@ x86_64_nonmemory_operand (op, mode) /* Return nonzero if OP is nonmemory operand acceptable by movabs patterns. */ int -x86_64_movabs_operand (op, mode) - rtx op; - enum machine_mode mode; +x86_64_movabs_operand (rtx op, enum machine_mode mode) { if (!TARGET_64BIT || !flag_pic) return nonmemory_operand (op, mode); @@ -3341,12 +3487,31 @@ x86_64_movabs_operand (op, mode) return 0; } +/* Return nonzero if OPNUM's MEM should be matched + in movabs* patterns. */ + +int +ix86_check_movabs (rtx insn, int opnum) +{ + rtx set, mem; + + set = PATTERN (insn); + if (GET_CODE (set) == PARALLEL) + set = XVECEXP (set, 0, 0); + if (GET_CODE (set) != SET) + abort (); + mem = XEXP (set, opnum); + while (GET_CODE (mem) == SUBREG) + mem = SUBREG_REG (mem); + if (GET_CODE (mem) != MEM) + abort (); + return (volatile_ok || !MEM_VOLATILE_P (mem)); +} + /* Return nonzero if OP is nonmemory operand representable on x86_64. */ int -x86_64_szext_nonmemory_operand (op, mode) - rtx op; - enum machine_mode mode; +x86_64_szext_nonmemory_operand (rtx op, enum machine_mode mode) { if (!TARGET_64BIT) return nonmemory_operand (op, mode); @@ -3358,9 +3523,7 @@ x86_64_szext_nonmemory_operand (op, mode) /* Return nonzero if OP is immediate operand representable on x86_64. */ int -x86_64_immediate_operand (op, mode) - rtx op; - enum machine_mode mode; +x86_64_immediate_operand (rtx op, enum machine_mode mode) { if (!TARGET_64BIT) return immediate_operand (op, mode); @@ -3370,31 +3533,17 @@ x86_64_immediate_operand (op, mode) /* Return nonzero if OP is immediate operand representable on x86_64. */ int -x86_64_zext_immediate_operand (op, mode) - rtx op; - enum machine_mode mode ATTRIBUTE_UNUSED; +x86_64_zext_immediate_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { return x86_64_zero_extended_value (op); } -/* Return nonzero if OP is (const_int 1), else return zero. */ - -int -const_int_1_operand (op, mode) - rtx op; - enum machine_mode mode ATTRIBUTE_UNUSED; -{ - return (GET_CODE (op) == CONST_INT && INTVAL (op) == 1); -} - /* Return nonzero if OP is CONST_INT >= 1 and <= 31 (a valid operand for shift & compare patterns, as shifting by 0 does not change flags), else return zero. */ int -const_int_1_31_operand (op, mode) - rtx op; - enum machine_mode mode ATTRIBUTE_UNUSED; +const_int_1_31_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { return (GET_CODE (op) == CONST_INT && INTVAL (op) >= 1 && INTVAL (op) <= 31); } @@ -3403,9 +3552,7 @@ const_int_1_31_operand (op, mode) reference and a constant. */ int -symbolic_operand (op, mode) - register rtx op; - enum machine_mode mode ATTRIBUTE_UNUSED; +symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { switch (GET_CODE (op)) { @@ -3449,16 +3596,19 @@ symbolic_operand (op, mode) /* Return true if the operand contains a @GOT or @GOTOFF reference. */ int -pic_symbolic_operand (op, mode) - register rtx op; - enum machine_mode mode ATTRIBUTE_UNUSED; +pic_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { if (GET_CODE (op) != CONST) return 0; op = XEXP (op, 0); if (TARGET_64BIT) { - if (GET_CODE (XEXP (op, 0)) == UNSPEC) + if (GET_CODE (op) == UNSPEC + && XINT (op, 1) == UNSPEC_GOTPCREL) + return 1; + if (GET_CODE (op) == PLUS + && GET_CODE (XEXP (op, 0)) == UNSPEC + && XINT (XEXP (op, 0), 1) == UNSPEC_GOTPCREL) return 1; } else @@ -3478,9 +3628,7 @@ pic_symbolic_operand (op, mode) /* Return true if OP is a symbolic operand that resolves locally. */ static int -local_symbolic_operand (op, mode) - rtx op; - enum machine_mode mode ATTRIBUTE_UNUSED; +local_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { if (GET_CODE (op) == CONST && GET_CODE (XEXP (op, 0)) == PLUS @@ -3511,9 +3659,7 @@ local_symbolic_operand (op, mode) /* Test for various thread-local symbols. */ int -tls_symbolic_operand (op, mode) - register rtx op; - enum machine_mode mode ATTRIBUTE_UNUSED; +tls_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { if (GET_CODE (op) != SYMBOL_REF) return 0; @@ -3521,9 +3667,7 @@ tls_symbolic_operand (op, mode) } static inline int -tls_symbolic_operand_1 (op, kind) - rtx op; - enum tls_model kind; +tls_symbolic_operand_1 (rtx op, enum tls_model kind) { if (GET_CODE (op) != SYMBOL_REF) return 0; @@ -3531,33 +3675,27 @@ tls_symbolic_operand_1 (op, kind) } int -global_dynamic_symbolic_operand (op, mode) - register rtx op; - enum machine_mode mode ATTRIBUTE_UNUSED; +global_dynamic_symbolic_operand (rtx op, + enum machine_mode mode ATTRIBUTE_UNUSED) { return tls_symbolic_operand_1 (op, TLS_MODEL_GLOBAL_DYNAMIC); } int -local_dynamic_symbolic_operand (op, mode) - register rtx op; - enum machine_mode mode ATTRIBUTE_UNUSED; +local_dynamic_symbolic_operand (rtx op, + enum machine_mode mode ATTRIBUTE_UNUSED) { return tls_symbolic_operand_1 (op, TLS_MODEL_LOCAL_DYNAMIC); } int -initial_exec_symbolic_operand (op, mode) - register rtx op; - enum machine_mode mode ATTRIBUTE_UNUSED; +initial_exec_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { return tls_symbolic_operand_1 (op, TLS_MODEL_INITIAL_EXEC); } int -local_exec_symbolic_operand (op, mode) - register rtx op; - enum machine_mode mode ATTRIBUTE_UNUSED; +local_exec_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { return tls_symbolic_operand_1 (op, TLS_MODEL_LOCAL_EXEC); } @@ -3567,9 +3705,7 @@ local_exec_symbolic_operand (op, mode) reg + const, which the patterns can't handle. */ int -call_insn_operand (op, mode) - rtx op; - enum machine_mode mode ATTRIBUTE_UNUSED; +call_insn_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { /* Disallow indirect through a virtual register. This leads to compiler aborts when trying to eliminate them. */ @@ -3598,9 +3734,7 @@ call_insn_operand (op, mode) reg + const, which the patterns can't handle. */ int -sibcall_insn_operand (op, mode) - rtx op; - enum machine_mode mode ATTRIBUTE_UNUSED; +sibcall_insn_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { /* Disallow indirect through a virtual register. This leads to compiler aborts when trying to eliminate them. */ @@ -3620,9 +3754,7 @@ sibcall_insn_operand (op, mode) } int -constant_call_address_operand (op, mode) - rtx op; - enum machine_mode mode ATTRIBUTE_UNUSED; +constant_call_address_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { if (GET_CODE (op) == CONST && GET_CODE (XEXP (op, 0)) == PLUS @@ -3634,17 +3766,13 @@ constant_call_address_operand (op, mode) /* Match exactly zero and one. */ int -const0_operand (op, mode) - register rtx op; - enum machine_mode mode; +const0_operand (rtx op, enum machine_mode mode) { return op == CONST0_RTX (mode); } int -const1_operand (op, mode) - register rtx op; - enum machine_mode mode ATTRIBUTE_UNUSED; +const1_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { return op == const1_rtx; } @@ -3652,24 +3780,45 @@ const1_operand (op, mode) /* Match 2, 4, or 8. Used for leal multiplicands. */ int -const248_operand (op, mode) - register rtx op; - enum machine_mode mode ATTRIBUTE_UNUSED; +const248_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { return (GET_CODE (op) == CONST_INT && (INTVAL (op) == 2 || INTVAL (op) == 4 || INTVAL (op) == 8)); } +int +const_0_to_3_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) +{ + return (GET_CODE (op) == CONST_INT && INTVAL (op) >= 0 && INTVAL (op) < 4); +} + +int +const_0_to_7_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) +{ + return (GET_CODE (op) == CONST_INT && INTVAL (op) >= 0 && INTVAL (op) < 8); +} + +int +const_0_to_15_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) +{ + return (GET_CODE (op) == CONST_INT && INTVAL (op) >= 0 && INTVAL (op) < 16); +} + +int +const_0_to_255_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) +{ + return (GET_CODE (op) == CONST_INT && INTVAL (op) >= 0 && INTVAL (op) < 256); +} + + /* True if this is a constant appropriate for an increment or decrement. */ int -incdec_operand (op, mode) - register rtx op; - enum machine_mode mode ATTRIBUTE_UNUSED; +incdec_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { /* On Pentium4, the inc and dec operations causes extra dependency on flag registers, since carry flag is not set. */ - if (TARGET_PENTIUM4 && !optimize_size) + if ((TARGET_PENTIUM4 || TARGET_NOCONA) && !optimize_size) return 0; return op == const1_rtx || op == constm1_rtx; } @@ -3678,9 +3827,7 @@ incdec_operand (op, mode) expander. */ int -shiftdi_operand (op, mode) - rtx op; - enum machine_mode mode ATTRIBUTE_UNUSED; +shiftdi_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { if (TARGET_64BIT) return nonimmediate_operand (op, mode); @@ -3696,9 +3843,7 @@ shiftdi_operand (op, mode) Which would only happen in pathological cases. */ int -reg_no_sp_operand (op, mode) - register rtx op; - enum machine_mode mode; +reg_no_sp_operand (rtx op, enum machine_mode mode) { rtx t = op; if (GET_CODE (t) == SUBREG) @@ -3710,9 +3855,7 @@ reg_no_sp_operand (op, mode) } int -mmx_reg_operand (op, mode) - register rtx op; - enum machine_mode mode ATTRIBUTE_UNUSED; +mmx_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { return MMX_REG_P (op); } @@ -3721,9 +3864,7 @@ mmx_reg_operand (op, mode) general_operand. */ int -general_no_elim_operand (op, mode) - register rtx op; - enum machine_mode mode; +general_no_elim_operand (rtx op, enum machine_mode mode) { rtx t = op; if (GET_CODE (t) == SUBREG) @@ -3744,9 +3885,7 @@ general_no_elim_operand (op, mode) register_operand or const_int. */ int -nonmemory_no_elim_operand (op, mode) - register rtx op; - enum machine_mode mode; +nonmemory_no_elim_operand (rtx op, enum machine_mode mode) { rtx t = op; if (GET_CODE (t) == SUBREG) @@ -3763,9 +3902,7 @@ nonmemory_no_elim_operand (op, mode) otherwise work like register_operand. */ int -index_register_operand (op, mode) - register rtx op; - enum machine_mode mode; +index_register_operand (rtx op, enum machine_mode mode) { rtx t = op; if (GET_CODE (t) == SUBREG) @@ -3786,9 +3923,7 @@ index_register_operand (op, mode) /* Return true if op is a Q_REGS class register. */ int -q_regs_operand (op, mode) - register rtx op; - enum machine_mode mode; +q_regs_operand (rtx op, enum machine_mode mode) { if (mode != VOIDmode && GET_MODE (op) != mode) return 0; @@ -3800,9 +3935,7 @@ q_regs_operand (op, mode) /* Return true if op is an flags register. */ int -flags_reg_operand (op, mode) - register rtx op; - enum machine_mode mode; +flags_reg_operand (rtx op, enum machine_mode mode) { if (mode != VOIDmode && GET_MODE (op) != mode) return 0; @@ -3812,9 +3945,7 @@ flags_reg_operand (op, mode) /* Return true if op is a NON_Q_REGS class register. */ int -non_q_regs_operand (op, mode) - register rtx op; - enum machine_mode mode; +non_q_regs_operand (rtx op, enum machine_mode mode) { if (mode != VOIDmode && GET_MODE (op) != mode) return 0; @@ -3824,9 +3955,8 @@ non_q_regs_operand (op, mode) } int -zero_extended_scalar_load_operand (op, mode) - rtx op; - enum machine_mode mode ATTRIBUTE_UNUSED; +zero_extended_scalar_load_operand (rtx op, + enum machine_mode mode ATTRIBUTE_UNUSED) { unsigned n_elts; if (GET_CODE (op) != MEM) @@ -3850,9 +3980,7 @@ zero_extended_scalar_load_operand (op, mode) /* Return 1 when OP is operand acceptable for standard SSE move. */ int -vector_move_operand (op, mode) - rtx op; - enum machine_mode mode; +vector_move_operand (rtx op, enum machine_mode mode) { if (nonimmediate_operand (op, mode)) return 1; @@ -3861,12 +3989,27 @@ vector_move_operand (op, mode) return (op == CONST0_RTX (GET_MODE (op))); } +/* Return true if op if a valid address, and does not contain + a segment override. */ + +int +no_seg_address_operand (rtx op, enum machine_mode mode) +{ + struct ix86_address parts; + + if (! address_operand (op, mode)) + return 0; + + if (! ix86_decompose_address (op, &parts)) + abort (); + + return parts.seg == SEG_DEFAULT; +} + /* Return 1 if OP is a comparison that can be used in the CMPSS/CMPPS insns. */ int -sse_comparison_operator (op, mode) - rtx op; - enum machine_mode mode ATTRIBUTE_UNUSED; +sse_comparison_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { enum rtx_code code = GET_CODE (op); switch (code) @@ -3895,15 +4038,13 @@ sse_comparison_operator (op, mode) } /* Return 1 if OP is a valid comparison operator in valid mode. */ int -ix86_comparison_operator (op, mode) - register rtx op; - enum machine_mode mode; +ix86_comparison_operator (rtx op, enum machine_mode mode) { enum machine_mode inmode; enum rtx_code code = GET_CODE (op); if (mode != VOIDmode && GET_MODE (op) != mode) return 0; - if (GET_RTX_CLASS (code) != '<') + if (!COMPARISON_P (op)) return 0; inmode = GET_MODE (XEXP (op, 0)); @@ -3938,16 +4079,14 @@ ix86_comparison_operator (op, mode) /* Return 1 if OP is a valid comparison operator testing carry flag to be set. */ int -ix86_carry_flag_operator (op, mode) - register rtx op; - enum machine_mode mode; +ix86_carry_flag_operator (rtx op, enum machine_mode mode) { enum machine_mode inmode; enum rtx_code code = GET_CODE (op); if (mode != VOIDmode && GET_MODE (op) != mode) return 0; - if (GET_RTX_CLASS (code) != '<') + if (!COMPARISON_P (op)) return 0; inmode = GET_MODE (XEXP (op, 0)); if (GET_CODE (XEXP (op, 0)) != REG @@ -3972,16 +4111,14 @@ ix86_carry_flag_operator (op, mode) /* Return 1 if OP is a comparison operator that can be issued by fcmov. */ int -fcmov_comparison_operator (op, mode) - register rtx op; - enum machine_mode mode; +fcmov_comparison_operator (rtx op, enum machine_mode mode) { enum machine_mode inmode; enum rtx_code code = GET_CODE (op); if (mode != VOIDmode && GET_MODE (op) != mode) return 0; - if (GET_RTX_CLASS (code) != '<') + if (!COMPARISON_P (op)) return 0; inmode = GET_MODE (XEXP (op, 0)); if (inmode == CCFPmode || inmode == CCFPUmode) @@ -4011,9 +4148,7 @@ fcmov_comparison_operator (op, mode) /* Return 1 if OP is a binary operator that can be promoted to wider mode. */ int -promotable_binary_operator (op, mode) - register rtx op; - enum machine_mode mode ATTRIBUTE_UNUSED; +promotable_binary_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { switch (GET_CODE (op)) { @@ -4037,9 +4172,7 @@ promotable_binary_operator (op, mode) into registers. */ int -cmp_fp_expander_operand (op, mode) - register rtx op; - enum machine_mode mode; +cmp_fp_expander_operand (rtx op, enum machine_mode mode) { if (mode != VOIDmode && mode != GET_MODE (op)) return 0; @@ -4051,9 +4184,7 @@ cmp_fp_expander_operand (op, mode) /* Match an SI or HImode register for a zero_extract. */ int -ext_register_operand (op, mode) - register rtx op; - enum machine_mode mode ATTRIBUTE_UNUSED; +ext_register_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { int regno; if ((!TARGET_64BIT || GET_MODE (op) != DImode) @@ -4072,9 +4203,7 @@ ext_register_operand (op, mode) OP is the expression matched, and MODE is its mode. */ int -binary_fp_operator (op, mode) - register rtx op; - enum machine_mode mode; +binary_fp_operator (rtx op, enum machine_mode mode) { if (mode != VOIDmode && mode != GET_MODE (op)) return 0; @@ -4093,37 +4222,28 @@ binary_fp_operator (op, mode) } int -mult_operator (op, mode) - register rtx op; - enum machine_mode mode ATTRIBUTE_UNUSED; +mult_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { return GET_CODE (op) == MULT; } int -div_operator (op, mode) - register rtx op; - enum machine_mode mode ATTRIBUTE_UNUSED; +div_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { return GET_CODE (op) == DIV; } int -arith_or_logical_operator (op, mode) - rtx op; - enum machine_mode mode; +arith_or_logical_operator (rtx op, enum machine_mode mode) { return ((mode == VOIDmode || GET_MODE (op) == mode) - && (GET_RTX_CLASS (GET_CODE (op)) == 'c' - || GET_RTX_CLASS (GET_CODE (op)) == '2')); + && ARITHMETIC_P (op)); } /* Returns 1 if OP is memory operand with a displacement. */ int -memory_displacement_operand (op, mode) - register rtx op; - enum machine_mode mode; +memory_displacement_operand (rtx op, enum machine_mode mode) { struct ix86_address parts; @@ -4143,9 +4263,7 @@ memory_displacement_operand (op, mode) expander, and no actual insns use this. */ int -cmpsi_operand (op, mode) - rtx op; - enum machine_mode mode; +cmpsi_operand (rtx op, enum machine_mode mode) { if (nonimmediate_operand (op, mode)) return 1; @@ -4167,9 +4285,7 @@ cmpsi_operand (op, mode) modRM array. */ int -long_memory_operand (op, mode) - register rtx op; - enum machine_mode mode; +long_memory_operand (rtx op, enum machine_mode mode) { if (! memory_operand (op, mode)) return 0; @@ -4180,9 +4296,7 @@ long_memory_operand (op, mode) /* Return nonzero if the rtx is known aligned. */ int -aligned_operand (op, mode) - rtx op; - enum machine_mode mode; +aligned_operand (rtx op, enum machine_mode mode) { struct ix86_address parts; @@ -4208,11 +4322,6 @@ aligned_operand (op, mode) if (! ix86_decompose_address (op, &parts)) abort (); - if (parts.base && GET_CODE (parts.base) == SUBREG) - parts.base = SUBREG_REG (parts.base); - if (parts.index && GET_CODE (parts.index) == SUBREG) - parts.index = SUBREG_REG (parts.index); - /* Look for some component that isn't known to be aligned. */ if (parts.index) { @@ -4239,7 +4348,7 @@ aligned_operand (op, mode) /* Initialize the table of extra 80387 mathematical constants. */ static void -init_ext_80387_constants () +init_ext_80387_constants (void) { static const char * cst[5] = { @@ -4255,8 +4364,8 @@ init_ext_80387_constants () { real_from_string (&ext_80387_constants_table[i], cst[i]); /* Ensure each constant is rounded to XFmode precision. */ - real_convert (&ext_80387_constants_table[i], XFmode, - &ext_80387_constants_table[i]); + real_convert (&ext_80387_constants_table[i], + XFmode, &ext_80387_constants_table[i]); } ext_80387_constants_init = 1; @@ -4266,8 +4375,7 @@ init_ext_80387_constants () a special instruction. */ int -standard_80387_constant_p (x) - rtx x; +standard_80387_constant_p (rtx x) { if (GET_CODE (x) != CONST_DOUBLE || !FLOAT_MODE_P (GET_MODE (x))) return -1; @@ -4277,10 +4385,10 @@ standard_80387_constant_p (x) if (x == CONST1_RTX (GET_MODE (x))) return 2; - /* For XFmode constants, try to find a special 80387 instruction on - those CPUs that benefit from them. */ + /* For XFmode constants, try to find a special 80387 instruction when + optimizing for size or on those CPUs that benefit from them. */ if (GET_MODE (x) == XFmode - && x86_ext_80387_constants & TUNEMASK) + && (optimize_size || x86_ext_80387_constants & TUNEMASK)) { REAL_VALUE_TYPE r; int i; @@ -4301,24 +4409,23 @@ standard_80387_constant_p (x) the constant X. */ const char * -standard_80387_constant_opcode (x) - rtx x; +standard_80387_constant_opcode (rtx x) { switch (standard_80387_constant_p (x)) { - case 1: + case 1: return "fldz"; case 2: return "fld1"; - case 3: + case 3: return "fldlg2"; case 4: return "fldln2"; - case 5: + case 5: return "fldl2e"; case 6: return "fldl2t"; - case 7: + case 7: return "fldpi"; } abort (); @@ -4329,8 +4436,7 @@ standard_80387_constant_opcode (x) matches the return value from standard_80387_constant_p. */ rtx -standard_80387_constant_rtx (idx) - int idx; +standard_80387_constant_rtx (int idx) { int i; @@ -4351,14 +4457,14 @@ standard_80387_constant_rtx (idx) abort (); } - return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i], XFmode); + return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i], + XFmode); } /* Return 1 if X is FP constant we can load to SSE register w/o using memory. */ int -standard_sse_constant_p (x) - rtx x; +standard_sse_constant_p (rtx x) { if (x == const0_rtx) return 1; @@ -4368,11 +4474,10 @@ standard_sse_constant_p (x) /* Returns 1 if OP contains a symbol reference */ int -symbolic_reference_mentioned_p (op) - rtx op; +symbolic_reference_mentioned_p (rtx op) { - register const char *fmt; - register int i; + const char *fmt; + int i; if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF) return 1; @@ -4382,7 +4487,7 @@ symbolic_reference_mentioned_p (op) { if (fmt[i] == 'E') { - register int j; + int j; for (j = XVECLEN (op, i) - 1; j >= 0; j--) if (symbolic_reference_mentioned_p (XVECEXP (op, i, j))) @@ -4409,7 +4514,7 @@ symbolic_reference_mentioned_p (op) `return' is OK. */ int -ix86_can_use_return_insn_p () +ix86_can_use_return_insn_p (void) { struct ix86_frame frame; @@ -4433,8 +4538,7 @@ ix86_can_use_return_insn_p () /* Return 1 if VALUE can be stored in the sign extended immediate field. */ int -x86_64_sign_extended_value (value) - rtx value; +x86_64_sign_extended_value (rtx value) { switch (GET_CODE (value)) { @@ -4456,6 +4560,9 @@ x86_64_sign_extended_value (value) library. Don't count TLS SYMBOL_REFs here, since they should fit only if inside of UNSPEC handled below. */ case SYMBOL_REF: + /* TLS symbols are not constant. */ + if (tls_symbolic_operand (value, Pmode)) + return false; return (ix86_cmodel == CM_SMALL || ix86_cmodel == CM_KERNEL); /* For certain code models, the code is near as well. */ @@ -4542,8 +4649,7 @@ x86_64_sign_extended_value (value) /* Return 1 if VALUE can be stored in the zero extended immediate field. */ int -x86_64_zero_extended_value (value) - rtx value; +x86_64_zero_extended_value (rtx value) { switch (GET_CODE (value)) { @@ -4562,6 +4668,9 @@ x86_64_zero_extended_value (value) /* For certain code models, the symbolic references are known to fit. */ case SYMBOL_REF: + /* TLS symbols are not constant. */ + if (tls_symbolic_operand (value, Pmode)) + return false; return ix86_cmodel == CM_SMALL; /* For certain code models, the code is near as well. */ @@ -4622,7 +4731,7 @@ x86_64_zero_extended_value (value) be accessed via the stack pointer) in functions that seem suitable. */ int -ix86_frame_pointer_required () +ix86_frame_pointer_required (void) { /* If we accessed previous frames, then the generated code expects to be able to access the saved ebp value in our frame. */ @@ -4650,7 +4759,7 @@ ix86_frame_pointer_required () /* Record that the current function accesses previous call frames. */ void -ix86_setup_frame_addresses () +ix86_setup_frame_addresses (void) { cfun->machine->accesses_prev_frame = 1; } @@ -4667,9 +4776,7 @@ static int pic_labels_used; the given register. */ static void -get_pc_thunk_name (name, regno) - char name[32]; - unsigned int regno; +get_pc_thunk_name (char name[32], unsigned int regno) { if (USE_HIDDEN_LINKONCE) sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]); @@ -4682,8 +4789,7 @@ get_pc_thunk_name (name, regno) the return address of the caller and then returns. */ void -ix86_asm_file_end (file) - FILE *file; +ix86_file_end (void) { rtx xops[2]; int regno; @@ -4710,16 +4816,16 @@ ix86_asm_file_end (file) (*targetm.asm_out.unique_section) (decl, 0); named_section (decl, NULL, 0); - (*targetm.asm_out.globalize_label) (file, name); - fputs ("\t.hidden\t", file); - assemble_name (file, name); - fputc ('\n', file); - ASM_DECLARE_FUNCTION_NAME (file, name, decl); + (*targetm.asm_out.globalize_label) (asm_out_file, name); + fputs ("\t.hidden\t", asm_out_file); + assemble_name (asm_out_file, name); + fputc ('\n', asm_out_file); + ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl); } else { text_section (); - ASM_OUTPUT_LABEL (file, name); + ASM_OUTPUT_LABEL (asm_out_file, name); } xops[0] = gen_rtx_REG (SImode, regno); @@ -4727,13 +4833,15 @@ ix86_asm_file_end (file) output_asm_insn ("mov{l}\t{%1, %0|%0, %1}", xops); output_asm_insn ("ret", xops); } + + if (NEED_INDICATE_EXEC_STACK) + file_end_indicate_exec_stack (); } /* Emit code for the SET_GOT patterns. */ const char * -output_set_got (dest) - rtx dest; +output_set_got (rtx dest) { rtx xops[3]; @@ -4782,8 +4890,7 @@ output_set_got (dest) /* Generate an "push" pattern for input ARG. */ static rtx -gen_push (arg) - rtx arg; +gen_push (rtx arg) { return gen_rtx_SET (VOIDmode, gen_rtx_MEM (Pmode, @@ -4796,7 +4903,7 @@ gen_push (arg) for the entire function. */ static unsigned int -ix86_select_alt_pic_regnum () +ix86_select_alt_pic_regnum (void) { if (current_function_is_leaf && !current_function_profile) { @@ -4811,9 +4918,7 @@ ix86_select_alt_pic_regnum () /* Return 1 if we need to save REGNO. */ static int -ix86_save_reg (regno, maybe_eh_return) - unsigned int regno; - int maybe_eh_return; +ix86_save_reg (unsigned int regno, int maybe_eh_return) { if (pic_offset_table_rtx && regno == REAL_PIC_OFFSET_TABLE_REGNUM @@ -4849,7 +4954,7 @@ ix86_save_reg (regno, maybe_eh_return) /* Return number of registers to be saved on the stack. */ static int -ix86_nsaved_regs () +ix86_nsaved_regs (void) { int nregs = 0; int regno; @@ -4864,9 +4969,7 @@ ix86_nsaved_regs () its replacement, at the start of a routine. */ HOST_WIDE_INT -ix86_initial_elimination_offset (from, to) - int from; - int to; +ix86_initial_elimination_offset (int from, int to) { struct ix86_frame frame; ix86_compute_frame_layout (&frame); @@ -4892,12 +4995,11 @@ ix86_initial_elimination_offset (from, to) /* Fill structure ix86_frame about frame of currently computed function. */ static void -ix86_compute_frame_layout (frame) - struct ix86_frame *frame; +ix86_compute_frame_layout (struct ix86_frame *frame) { HOST_WIDE_INT total_size; int stack_alignment_needed = cfun->stack_alignment_needed / BITS_PER_UNIT; - int offset; + HOST_WIDE_INT offset; int preferred_alignment = cfun->preferred_stack_boundary / BITS_PER_UNIT; HOST_WIDE_INT size = get_frame_size (); @@ -4917,7 +5019,7 @@ ix86_compute_frame_layout (frame) /* The fast prologue uses move instead of push to save registers. This is significantly longer, but also executes faster as modern hardware can execute the moves in parallel, but can't do that for push/pop. - + Be careful about choosing what prologue to emit: When function takes many instructions to execute we may use slow version as well as in case function is known to be outside hot spot (this is known with @@ -4986,8 +5088,12 @@ ix86_compute_frame_layout (frame) offset += size; /* Add outgoing arguments area. Can be skipped if we eliminated - all the function calls as dead code. */ - if (ACCUMULATE_OUTGOING_ARGS && !current_function_is_leaf) + all the function calls as dead code. + Skipping is however impossible when function calls alloca. Alloca + expander assumes that last current_function_outgoing_args_size + of stack frame are unused. */ + if (ACCUMULATE_OUTGOING_ARGS + && (!current_function_is_leaf || current_function_calls_alloca)) { offset += current_function_outgoing_args_size; frame->outgoing_arguments_size = current_function_outgoing_args_size; @@ -5013,7 +5119,8 @@ ix86_compute_frame_layout (frame) (size + frame->padding1 + frame->padding2 + frame->outgoing_arguments_size + frame->va_arg_size); - if (!frame->to_allocate && frame->nregs <= 1) + if ((!frame->to_allocate && frame->nregs <= 1) + || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000)) frame->save_regs_using_mov = false; if (TARGET_RED_ZONE && current_function_sp_is_unchanging @@ -5048,9 +5155,9 @@ ix86_compute_frame_layout (frame) /* Emit code to save registers in the prologue. */ static void -ix86_emit_save_regs () +ix86_emit_save_regs (void) { - register int regno; + int regno; rtx insn; for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--) @@ -5064,9 +5171,7 @@ ix86_emit_save_regs () /* Emit code to save registers using MOV insns. First register is restored from POINTER + OFFSET. */ static void -ix86_emit_save_regs_using_mov (pointer, offset) - rtx pointer; - HOST_WIDE_INT offset; +ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset) { int regno; rtx insn; @@ -5082,10 +5187,45 @@ ix86_emit_save_regs_using_mov (pointer, offset) } } +/* Expand prologue or epilogue stack adjustment. + The pattern exist to put a dependency on all ebp-based memory accesses. + STYLE should be negative if instructions should be marked as frame related, + zero if %r11 register is live and cannot be freely used and positive + otherwise. */ + +static void +pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset, int style) +{ + rtx insn; + + if (! TARGET_64BIT) + insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset)); + else if (x86_64_immediate_operand (offset, DImode)) + insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset)); + else + { + rtx r11; + /* r11 is used by indirect sibcall return as well, set before the + epilogue and used after the epilogue. ATM indirect sibcall + shouldn't be used together with huge frame sizes in one + function because of the frame_size check in sibcall.c. */ + if (style == 0) + abort (); + r11 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */); + insn = emit_insn (gen_rtx_SET (DImode, r11, offset)); + if (style < 0) + RTX_FRAME_RELATED_P (insn) = 1; + insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11, + offset)); + } + if (style < 0) + RTX_FRAME_RELATED_P (insn) = 1; +} + /* Expand the prologue into a bunch of separate insns. */ void -ix86_expand_prologue () +ix86_expand_prologue (void) { rtx insn; bool pic_reg_used; @@ -5123,36 +5263,36 @@ ix86_expand_prologue () if (allocate == 0) ; else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT) - { - insn = emit_insn (gen_pro_epilogue_adjust_stack - (stack_pointer_rtx, stack_pointer_rtx, - GEN_INT (-allocate))); - RTX_FRAME_RELATED_P (insn) = 1; - } + pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx, + GEN_INT (-allocate), -1); else { - /* ??? Is this only valid for Win32? */ - - rtx arg0, sym; + /* Only valid for Win32. */ + rtx eax = gen_rtx_REG (SImode, 0); + bool eax_live = ix86_eax_live_at_start_p (); if (TARGET_64BIT) - abort (); + abort (); - arg0 = gen_rtx_REG (SImode, 0); - emit_move_insn (arg0, GEN_INT (allocate)); + if (eax_live) + { + emit_insn (gen_push (eax)); + allocate -= 4; + } - sym = gen_rtx_MEM (FUNCTION_MODE, - gen_rtx_SYMBOL_REF (Pmode, "_alloca")); - insn = emit_call_insn (gen_call (sym, const0_rtx, constm1_rtx)); + insn = emit_move_insn (eax, GEN_INT (allocate)); + RTX_FRAME_RELATED_P (insn) = 1; - CALL_INSN_FUNCTION_USAGE (insn) - = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, arg0), - CALL_INSN_FUNCTION_USAGE (insn)); + insn = emit_insn (gen_allocate_stack_worker (eax)); + RTX_FRAME_RELATED_P (insn) = 1; - /* Don't allow scheduling pass to move insns across __alloca - call. */ - emit_insn (gen_blockage (const0_rtx)); + if (eax_live) + { + rtx t = plus_constant (stack_pointer_rtx, allocate); + emit_move_insn (eax, gen_rtx_MEM (SImode, t)); + } } + if (frame.save_regs_using_mov && !TARGET_RED_ZONE) { if (!frame_pointer_needed || !frame.to_allocate) @@ -5197,19 +5337,29 @@ ix86_expand_prologue () /* Emit code to restore saved registers using MOV insns. First register is restored from POINTER + OFFSET. */ static void -ix86_emit_restore_regs_using_mov (pointer, offset, maybe_eh_return) - rtx pointer; - int offset; - int maybe_eh_return; +ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset, + int maybe_eh_return) { int regno; + rtx base_address = gen_rtx_MEM (Pmode, pointer); for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) if (ix86_save_reg (regno, maybe_eh_return)) { - emit_move_insn (gen_rtx_REG (Pmode, regno), - adjust_address (gen_rtx_MEM (Pmode, pointer), - Pmode, offset)); + /* Ensure that adjust_address won't be forced to produce pointer + out of range allowed by x86-64 instruction set. */ + if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode)) + { + rtx r11; + + r11 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */); + emit_move_insn (r11, GEN_INT (offset)); + emit_insn (gen_adddi3 (r11, r11, pointer)); + base_address = gen_rtx_MEM (Pmode, r11); + offset = 0; + } + emit_move_insn (gen_rtx_REG (Pmode, regno), + adjust_address (base_address, Pmode, offset)); offset += UNITS_PER_WORD; } } @@ -5217,8 +5367,7 @@ ix86_emit_restore_regs_using_mov (pointer, offset, maybe_eh_return) /* Restore function stack, frame, and registers. */ void -ix86_expand_epilogue (style) - int style; +ix86_expand_epilogue (int style) { int regno; int sp_valid = !frame_pointer_needed || current_function_sp_is_unchanging; @@ -5283,8 +5432,8 @@ ix86_expand_epilogue (style) tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx); emit_move_insn (hard_frame_pointer_rtx, tmp); - emit_insn (gen_pro_epilogue_adjust_stack - (stack_pointer_rtx, sa, const0_rtx)); + pro_epilogue_adjust_stack (stack_pointer_rtx, sa, + const0_rtx, style); } else { @@ -5295,19 +5444,19 @@ ix86_expand_epilogue (style) } } else if (!frame_pointer_needed) - emit_insn (gen_pro_epilogue_adjust_stack - (stack_pointer_rtx, stack_pointer_rtx, - GEN_INT (frame.to_allocate - + frame.nregs * UNITS_PER_WORD))); + pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx, + GEN_INT (frame.to_allocate + + frame.nregs * UNITS_PER_WORD), + style); /* If not an i386, mov & pop is faster than "leave". */ else if (TARGET_USE_LEAVE || optimize_size || !cfun->machine->use_fast_prologue_epilogue) emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ()); else { - emit_insn (gen_pro_epilogue_adjust_stack (stack_pointer_rtx, - hard_frame_pointer_rtx, - const0_rtx)); + pro_epilogue_adjust_stack (stack_pointer_rtx, + hard_frame_pointer_rtx, + const0_rtx, style); if (TARGET_64BIT) emit_insn (gen_popdi1 (hard_frame_pointer_rtx)); else @@ -5322,14 +5471,13 @@ ix86_expand_epilogue (style) { if (!frame_pointer_needed) abort (); - emit_insn (gen_pro_epilogue_adjust_stack (stack_pointer_rtx, - hard_frame_pointer_rtx, - GEN_INT (offset))); + pro_epilogue_adjust_stack (stack_pointer_rtx, + hard_frame_pointer_rtx, + GEN_INT (offset), style); } else if (frame.to_allocate) - emit_insn (gen_pro_epilogue_adjust_stack - (stack_pointer_rtx, stack_pointer_rtx, - GEN_INT (frame.to_allocate))); + pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx, + GEN_INT (frame.to_allocate), style); for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) if (ix86_save_reg (regno, false)) @@ -5368,7 +5516,7 @@ ix86_expand_epilogue (style) { rtx ecx = gen_rtx_REG (SImode, 2); - /* There are is no "pascal" calling convention in 64bit ABI. */ + /* There is no "pascal" calling convention in 64bit ABI. */ if (TARGET_64BIT) abort (); @@ -5386,9 +5534,8 @@ ix86_expand_epilogue (style) /* Reset from the function's potential modifications. */ static void -ix86_output_function_epilogue (file, size) - FILE *file ATTRIBUTE_UNUSED; - HOST_WIDE_INT size ATTRIBUTE_UNUSED; +ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED, + HOST_WIDE_INT size ATTRIBUTE_UNUSED) { if (pic_offset_table_rtx) REGNO (pic_offset_table_rtx) = REAL_PIC_OFFSET_TABLE_REGNUM; @@ -5397,13 +5544,10 @@ ix86_output_function_epilogue (file, size) /* Extract the parts of an RTL expression that is a valid memory address for an instruction. Return 0 if the structure of the address is grossly off. Return -1 if the address contains ASHIFT, so it is not - strictly valid, but still used for computing length of lea instruction. - */ + strictly valid, but still used for computing length of lea instruction. */ static int -ix86_decompose_address (addr, out) - register rtx addr; - struct ix86_address *out; +ix86_decompose_address (rtx addr, struct ix86_address *out) { rtx base = NULL_RTX; rtx index = NULL_RTX; @@ -5411,47 +5555,72 @@ ix86_decompose_address (addr, out) HOST_WIDE_INT scale = 1; rtx scale_rtx = NULL_RTX; int retval = 1; + enum ix86_address_seg seg = SEG_DEFAULT; - if (REG_P (addr) || GET_CODE (addr) == SUBREG) + if (GET_CODE (addr) == REG || GET_CODE (addr) == SUBREG) base = addr; else if (GET_CODE (addr) == PLUS) { - rtx op0 = XEXP (addr, 0); - rtx op1 = XEXP (addr, 1); - enum rtx_code code0 = GET_CODE (op0); - enum rtx_code code1 = GET_CODE (op1); + rtx addends[4], op; + int n = 0, i; - if (code0 == REG || code0 == SUBREG) - { - if (code1 == REG || code1 == SUBREG) - index = op0, base = op1; /* index + base */ - else - base = op0, disp = op1; /* base + displacement */ - } - else if (code0 == MULT) - { - index = XEXP (op0, 0); - scale_rtx = XEXP (op0, 1); - if (code1 == REG || code1 == SUBREG) - base = op1; /* index*scale + base */ - else - disp = op1; /* index*scale + disp */ - } - else if (code0 == PLUS && GET_CODE (XEXP (op0, 0)) == MULT) + op = addr; + do { - index = XEXP (XEXP (op0, 0), 0); /* index*scale + base + disp */ - scale_rtx = XEXP (XEXP (op0, 0), 1); - base = XEXP (op0, 1); - disp = op1; + if (n >= 4) + return 0; + addends[n++] = XEXP (op, 1); + op = XEXP (op, 0); } - else if (code0 == PLUS) + while (GET_CODE (op) == PLUS); + if (n >= 4) + return 0; + addends[n] = op; + + for (i = n; i >= 0; --i) { - index = XEXP (op0, 0); /* index + base + disp */ - base = XEXP (op0, 1); - disp = op1; + op = addends[i]; + switch (GET_CODE (op)) + { + case MULT: + if (index) + return 0; + index = XEXP (op, 0); + scale_rtx = XEXP (op, 1); + break; + + case UNSPEC: + if (XINT (op, 1) == UNSPEC_TP + && TARGET_TLS_DIRECT_SEG_REFS + && seg == SEG_DEFAULT) + seg = TARGET_64BIT ? SEG_FS : SEG_GS; + else + return 0; + break; + + case REG: + case SUBREG: + if (!base) + base = op; + else if (!index) + index = op; + else + return 0; + break; + + case CONST: + case CONST_INT: + case SYMBOL_REF: + case LABEL_REF: + if (disp) + return 0; + disp = op; + break; + + default: + return 0; + } } - else - return 0; } else if (GET_CODE (addr) == MULT) { @@ -5484,10 +5653,11 @@ ix86_decompose_address (addr, out) scale = INTVAL (scale_rtx); } - /* Allow arg pointer and stack pointer as index if there is not scaling */ + /* Allow arg pointer and stack pointer as index if there is not scaling. */ if (base && index && scale == 1 - && (index == arg_pointer_rtx || index == frame_pointer_rtx - || index == stack_pointer_rtx)) + && (index == arg_pointer_rtx + || index == frame_pointer_rtx + || (REG_P (index) && REGNO (index) == STACK_POINTER_REGNUM))) { rtx tmp = base; base = index; @@ -5520,6 +5690,7 @@ ix86_decompose_address (addr, out) out->index = index; out->disp = disp; out->scale = scale; + out->seg = seg; return retval; } @@ -5530,8 +5701,7 @@ ix86_decompose_address (addr, out) requires to two regs - that would mean more pseudos with longer lifetimes. */ static int -ix86_address_cost (x) - rtx x; +ix86_address_cost (rtx x) { struct ix86_address parts; int cost = 1; @@ -5539,14 +5709,11 @@ ix86_address_cost (x) if (!ix86_decompose_address (x, &parts)) abort (); - if (parts.base && GET_CODE (parts.base) == SUBREG) - parts.base = SUBREG_REG (parts.base); - if (parts.index && GET_CODE (parts.index) == SUBREG) - parts.index = SUBREG_REG (parts.index); - /* More complex memory references are better. */ if (parts.disp && parts.disp != const0_rtx) cost--; + if (parts.seg != SEG_DEFAULT) + cost--; /* Attempt to minimize number of registers in the address. */ if ((parts.base @@ -5592,8 +5759,7 @@ ix86_address_cost (x) UNSPEC), then return the base term. Otherwise return X. */ rtx -ix86_find_base_term (x) - rtx x; +ix86_find_base_term (rtx x) { rtx term; @@ -5632,8 +5798,7 @@ ix86_find_base_term (x) satisfies CONSTANT_P. */ bool -legitimate_constant_p (x) - rtx x; +legitimate_constant_p (rtx x) { rtx inner; @@ -5666,7 +5831,10 @@ legitimate_constant_p (x) switch (XINT (inner, 1)) { case UNSPEC_TPOFF: + case UNSPEC_NTPOFF: return local_exec_symbolic_operand (XVECEXP (inner, 0, 0), Pmode); + case UNSPEC_DTPOFF: + return local_dynamic_symbolic_operand (XVECEXP (inner, 0, 0), Pmode); default: return false; } @@ -5685,8 +5853,7 @@ legitimate_constant_p (x) is checked above. */ static bool -ix86_cannot_force_const_mem (x) - rtx x; +ix86_cannot_force_const_mem (rtx x) { return !legitimate_constant_p (x); } @@ -5694,8 +5861,7 @@ ix86_cannot_force_const_mem (x) /* Determine if a given RTX is a valid constant address. */ bool -constant_address_p (x) - rtx x; +constant_address_p (rtx x) { return CONSTANT_P (x) && legitimate_address_p (Pmode, x, 1); } @@ -5705,8 +5871,7 @@ constant_address_p (x) that X satisfies CONSTANT_P or is a CONST_DOUBLE. */ bool -legitimate_pic_operand_p (x) - rtx x; +legitimate_pic_operand_p (rtx x) { rtx inner; @@ -5739,8 +5904,7 @@ legitimate_pic_operand_p (x) in PIC mode. */ int -legitimate_pic_address_disp_p (disp) - register rtx disp; +legitimate_pic_address_disp_p (rtx disp) { bool saw_plus; @@ -5758,15 +5922,23 @@ legitimate_pic_address_disp_p (disp) if (GET_CODE (disp) == LABEL_REF) return 1; if (GET_CODE (disp) == CONST - && GET_CODE (XEXP (disp, 0)) == PLUS - && ((GET_CODE (XEXP (XEXP (disp, 0), 0)) == SYMBOL_REF - && ix86_cmodel == CM_SMALL_PIC - && SYMBOL_REF_LOCAL_P (XEXP (XEXP (disp, 0), 0))) - || GET_CODE (XEXP (XEXP (disp, 0), 0)) == LABEL_REF) - && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT - && INTVAL (XEXP (XEXP (disp, 0), 1)) < 16*1024*1024 - && INTVAL (XEXP (XEXP (disp, 0), 1)) >= -16*1024*1024) - return 1; + && GET_CODE (XEXP (disp, 0)) == PLUS) + { + rtx op0 = XEXP (XEXP (disp, 0), 0); + rtx op1 = XEXP (XEXP (disp, 0), 1); + + /* TLS references should always be enclosed in UNSPEC. */ + if (tls_symbolic_operand (op0, GET_MODE (op0))) + return 0; + if (((GET_CODE (op0) == SYMBOL_REF + && ix86_cmodel == CM_SMALL_PIC + && SYMBOL_REF_LOCAL_P (op0)) + || GET_CODE (op0) == LABEL_REF) + && GET_CODE (op1) == CONST_INT + && INTVAL (op1) < 16*1024*1024 + && INTVAL (op1) >= -16*1024*1024) + return 1; + } } if (GET_CODE (disp) != CONST) return 0; @@ -5803,7 +5975,7 @@ legitimate_pic_address_disp_p (disp) if (GET_CODE (XEXP (disp, 1)) == SYMBOL_REF) { const char *sym_name = XSTR (XEXP (disp, 1), 0); - if (strstr (sym_name, "$pb") != 0) + if (! strcmp (sym_name, "")) return 1; } } @@ -5846,10 +6018,7 @@ legitimate_pic_address_disp_p (disp) be recognized. */ int -legitimate_address_p (mode, addr, strict) - enum machine_mode mode; - register rtx addr; - int strict; +legitimate_address_p (enum machine_mode mode, rtx addr, int strict) { struct ix86_address parts; rtx base, index, disp; @@ -5865,13 +6034,6 @@ legitimate_address_p (mode, addr, strict) debug_rtx (addr); } - if (GET_CODE (addr) == UNSPEC && XINT (addr, 1) == UNSPEC_TP) - { - if (TARGET_DEBUG_ADDR) - fprintf (stderr, "Success.\n"); - return TRUE; - } - if (ix86_decompose_address (addr, &parts) <= 0) { reason = "decomposition failed"; @@ -5891,15 +6053,9 @@ legitimate_address_p (mode, addr, strict) if (base) { - rtx reg; reason_rtx = base; - if (GET_CODE (base) == SUBREG) - reg = SUBREG_REG (base); - else - reg = base; - - if (GET_CODE (reg) != REG) + if (GET_CODE (base) != REG) { reason = "base is not a register"; goto report_error; @@ -5911,8 +6067,8 @@ legitimate_address_p (mode, addr, strict) goto report_error; } - if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg)) - || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg))) + if ((strict && ! REG_OK_FOR_BASE_STRICT_P (base)) + || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (base))) { reason = "base is not valid"; goto report_error; @@ -5927,15 +6083,9 @@ legitimate_address_p (mode, addr, strict) if (index) { - rtx reg; reason_rtx = index; - if (GET_CODE (index) == SUBREG) - reg = SUBREG_REG (index); - else - reg = index; - - if (GET_CODE (reg) != REG) + if (GET_CODE (index) != REG) { reason = "index is not a register"; goto report_error; @@ -5947,8 +6097,8 @@ legitimate_address_p (mode, addr, strict) goto report_error; } - if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg)) - || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg))) + if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (index)) + || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (index))) { reason = "index is not valid"; goto report_error; @@ -6083,7 +6233,7 @@ legitimate_address_p (mode, addr, strict) /* Return an unique alias set for the GOT. */ static HOST_WIDE_INT -ix86_GOT_alias_set () +ix86_GOT_alias_set (void) { static HOST_WIDE_INT set = -1; if (set == -1) @@ -6110,9 +6260,7 @@ ix86_GOT_alias_set () reg also appears in the address. */ rtx -legitimize_pic_address (orig, reg) - rtx orig; - rtx reg; +legitimize_pic_address (rtx orig, rtx reg) { rtx addr = orig; rtx new = orig; @@ -6198,7 +6346,7 @@ legitimize_pic_address (orig, reg) /* We must match stuff we generate before. Assume the only unspecs that can get here are ours. Not that we could do - anything with them anyway... */ + anything with them anyway.... */ if (GET_CODE (addr) == UNSPEC || (GET_CODE (addr) == PLUS && GET_CODE (XEXP (addr, 0)) == UNSPEC)) @@ -6261,20 +6409,147 @@ legitimize_pic_address (orig, reg) return new; } -/* Load the thread pointer into a register. */ +/* Load the thread pointer. If TO_REG is true, force it into a register. */ static rtx -get_thread_pointer () +get_thread_pointer (int to_reg) { - rtx tp; + rtx tp, reg, insn; tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP); - tp = gen_rtx_MEM (Pmode, tp); - RTX_UNCHANGING_P (tp) = 1; - set_mem_alias_set (tp, ix86_GOT_alias_set ()); - tp = force_reg (Pmode, tp); + if (!to_reg) + return tp; + + reg = gen_reg_rtx (Pmode); + insn = gen_rtx_SET (VOIDmode, reg, tp); + insn = emit_insn (insn); + + return reg; +} + +/* A subroutine of legitimize_address and ix86_expand_move. FOR_MOV is + false if we expect this to be used for a memory address and true if + we expect to load the address into a register. */ + +static rtx +legitimize_tls_address (rtx x, enum tls_model model, int for_mov) +{ + rtx dest, base, off, pic; + int type; + + switch (model) + { + case TLS_MODEL_GLOBAL_DYNAMIC: + dest = gen_reg_rtx (Pmode); + if (TARGET_64BIT) + { + rtx rax = gen_rtx_REG (Pmode, 0), insns; + + start_sequence (); + emit_call_insn (gen_tls_global_dynamic_64 (rax, x)); + insns = get_insns (); + end_sequence (); + + emit_libcall_block (insns, dest, rax, x); + } + else + emit_insn (gen_tls_global_dynamic_32 (dest, x)); + break; + + case TLS_MODEL_LOCAL_DYNAMIC: + base = gen_reg_rtx (Pmode); + if (TARGET_64BIT) + { + rtx rax = gen_rtx_REG (Pmode, 0), insns, note; + + start_sequence (); + emit_call_insn (gen_tls_local_dynamic_base_64 (rax)); + insns = get_insns (); + end_sequence (); + + note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL); + note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note); + emit_libcall_block (insns, base, rax, note); + } + else + emit_insn (gen_tls_local_dynamic_base_32 (base)); + + off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF); + off = gen_rtx_CONST (Pmode, off); + + return gen_rtx_PLUS (Pmode, base, off); + + case TLS_MODEL_INITIAL_EXEC: + if (TARGET_64BIT) + { + pic = NULL; + type = UNSPEC_GOTNTPOFF; + } + else if (flag_pic) + { + if (reload_in_progress) + regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1; + pic = pic_offset_table_rtx; + type = TARGET_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF; + } + else if (!TARGET_GNU_TLS) + { + pic = gen_reg_rtx (Pmode); + emit_insn (gen_set_got (pic)); + type = UNSPEC_GOTTPOFF; + } + else + { + pic = NULL; + type = UNSPEC_INDNTPOFF; + } + + off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type); + off = gen_rtx_CONST (Pmode, off); + if (pic) + off = gen_rtx_PLUS (Pmode, pic, off); + off = gen_rtx_MEM (Pmode, off); + RTX_UNCHANGING_P (off) = 1; + set_mem_alias_set (off, ix86_GOT_alias_set ()); + + if (TARGET_64BIT || TARGET_GNU_TLS) + { + base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS); + off = force_reg (Pmode, off); + return gen_rtx_PLUS (Pmode, base, off); + } + else + { + base = get_thread_pointer (true); + dest = gen_reg_rtx (Pmode); + emit_insn (gen_subsi3 (dest, base, off)); + } + break; + + case TLS_MODEL_LOCAL_EXEC: + off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), + (TARGET_64BIT || TARGET_GNU_TLS) + ? UNSPEC_NTPOFF : UNSPEC_TPOFF); + off = gen_rtx_CONST (Pmode, off); + + if (TARGET_64BIT || TARGET_GNU_TLS) + { + base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS); + return gen_rtx_PLUS (Pmode, base, off); + } + else + { + base = get_thread_pointer (true); + dest = gen_reg_rtx (Pmode); + emit_insn (gen_subsi3 (dest, base, off)); + } + break; + + default: + abort (); + } - return tp; + return dest; } /* Try machine-dependent ways of modifying an illegitimate address @@ -6299,10 +6574,7 @@ get_thread_pointer () See comments by legitimize_pic_address in i386.c for details. */ rtx -legitimize_address (x, oldx, mode) - register rtx x; - register rtx oldx ATTRIBUTE_UNUSED; - enum machine_mode mode; +legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode) { int changed = 0; unsigned log; @@ -6316,120 +6588,7 @@ legitimize_address (x, oldx, mode) log = tls_symbolic_operand (x, mode); if (log) - { - rtx dest, base, off, pic; - int type; - - switch (log) - { - case TLS_MODEL_GLOBAL_DYNAMIC: - dest = gen_reg_rtx (Pmode); - if (TARGET_64BIT) - { - rtx rax = gen_rtx_REG (Pmode, 0), insns; - - start_sequence (); - emit_call_insn (gen_tls_global_dynamic_64 (rax, x)); - insns = get_insns (); - end_sequence (); - - emit_libcall_block (insns, dest, rax, x); - } - else - emit_insn (gen_tls_global_dynamic_32 (dest, x)); - break; - - case TLS_MODEL_LOCAL_DYNAMIC: - base = gen_reg_rtx (Pmode); - if (TARGET_64BIT) - { - rtx rax = gen_rtx_REG (Pmode, 0), insns, note; - - start_sequence (); - emit_call_insn (gen_tls_local_dynamic_base_64 (rax)); - insns = get_insns (); - end_sequence (); - - note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL); - note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note); - emit_libcall_block (insns, base, rax, note); - } - else - emit_insn (gen_tls_local_dynamic_base_32 (base)); - - off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF); - off = gen_rtx_CONST (Pmode, off); - - return gen_rtx_PLUS (Pmode, base, off); - - case TLS_MODEL_INITIAL_EXEC: - if (TARGET_64BIT) - { - pic = NULL; - type = UNSPEC_GOTNTPOFF; - } - else if (flag_pic) - { - if (reload_in_progress) - regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1; - pic = pic_offset_table_rtx; - type = TARGET_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF; - } - else if (!TARGET_GNU_TLS) - { - pic = gen_reg_rtx (Pmode); - emit_insn (gen_set_got (pic)); - type = UNSPEC_GOTTPOFF; - } - else - { - pic = NULL; - type = UNSPEC_INDNTPOFF; - } - - base = get_thread_pointer (); - - off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type); - off = gen_rtx_CONST (Pmode, off); - if (pic) - off = gen_rtx_PLUS (Pmode, pic, off); - off = gen_rtx_MEM (Pmode, off); - RTX_UNCHANGING_P (off) = 1; - set_mem_alias_set (off, ix86_GOT_alias_set ()); - dest = gen_reg_rtx (Pmode); - - if (TARGET_64BIT || TARGET_GNU_TLS) - { - emit_move_insn (dest, off); - return gen_rtx_PLUS (Pmode, base, dest); - } - else - emit_insn (gen_subsi3 (dest, base, off)); - break; - - case TLS_MODEL_LOCAL_EXEC: - base = get_thread_pointer (); - - off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), - (TARGET_64BIT || TARGET_GNU_TLS) - ? UNSPEC_NTPOFF : UNSPEC_TPOFF); - off = gen_rtx_CONST (Pmode, off); - - if (TARGET_64BIT || TARGET_GNU_TLS) - return gen_rtx_PLUS (Pmode, base, off); - else - { - dest = gen_reg_rtx (Pmode); - emit_insn (gen_subsi3 (dest, base, off)); - } - break; - - default: - abort (); - } - - return dest; - } + return legitimize_tls_address (x, log, false); if (flag_pic && SYMBOLIC_CONST (x)) return legitimize_pic_address (x, 0); @@ -6555,8 +6714,8 @@ legitimize_address (x, oldx, mode) if (GET_CODE (XEXP (x, 0)) == REG) { - register rtx temp = gen_reg_rtx (Pmode); - register rtx val = force_operand (XEXP (x, 1), temp); + rtx temp = gen_reg_rtx (Pmode); + rtx val = force_operand (XEXP (x, 1), temp); if (val != temp) emit_move_insn (temp, val); @@ -6566,8 +6725,8 @@ legitimize_address (x, oldx, mode) else if (GET_CODE (XEXP (x, 1)) == REG) { - register rtx temp = gen_reg_rtx (Pmode); - register rtx val = force_operand (XEXP (x, 0), temp); + rtx temp = gen_reg_rtx (Pmode); + rtx val = force_operand (XEXP (x, 0), temp); if (val != temp) emit_move_insn (temp, val); @@ -6585,10 +6744,7 @@ legitimize_address (x, oldx, mode) CODE is the operand print code from the output string. */ static void -output_pic_addr_const (file, x, code) - FILE *file; - rtx x; - int code; +output_pic_addr_const (FILE *file, rtx x, int code) { char buf[256]; @@ -6725,9 +6881,7 @@ output_pic_addr_const (file, x, code) We need to handle our special PIC relocations. */ void -i386_dwarf_output_addr_const (file, x) - FILE *file; - rtx x; +i386_dwarf_output_addr_const (FILE *file, rtx x) { #ifdef ASM_QUAD fprintf (file, "%s", TARGET_64BIT ? ASM_QUAD : ASM_LONG); @@ -6747,10 +6901,7 @@ i386_dwarf_output_addr_const (file, x) We need to emit DTP-relative relocations. */ void -i386_output_dwarf_dtprel (file, size, x) - FILE *file; - int size; - rtx x; +i386_output_dwarf_dtprel (FILE *file, int size, rtx x) { fputs (ASM_LONG, file); output_addr_const (file, x); @@ -6772,8 +6923,7 @@ i386_output_dwarf_dtprel (file, size, x) into a direct symbol reference. */ static rtx -ix86_delegitimize_address (orig_x) - rtx orig_x; +ix86_delegitimize_address (rtx orig_x) { rtx x = orig_x, y; @@ -6845,11 +6995,8 @@ ix86_delegitimize_address (orig_x) } static void -put_condition_code (code, mode, reverse, fp, file) - enum rtx_code code; - enum machine_mode mode; - int reverse, fp; - FILE *file; +put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse, + int fp, FILE *file) { const char *suffix; @@ -6934,11 +7081,16 @@ put_condition_code (code, mode, reverse, fp, file) fputs (suffix, file); } +/* Print the name of register X to FILE based on its machine mode and number. + If CODE is 'w', pretend the mode is HImode. + If CODE is 'b', pretend the mode is QImode. + If CODE is 'k', pretend the mode is SImode. + If CODE is 'q', pretend the mode is DImode. + If CODE is 'h', pretend the reg is the `high' byte register. + If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op. */ + void -print_reg (x, code, file) - rtx x; - int code; - FILE *file; +print_reg (rtx x, int code, FILE *file) { if (REGNO (x) == ARG_POINTER_REGNUM || REGNO (x) == FRAME_POINTER_REGNUM @@ -7010,12 +7162,17 @@ print_reg (x, code, file) /* FALLTHRU */ case 16: case 2: + normal: fputs (hi_reg_name[REGNO (x)], file); break; case 1: + if (REGNO (x) >= ARRAY_SIZE (qi_reg_name)) + goto normal; fputs (qi_reg_name[REGNO (x)], file); break; case 0: + if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name)) + goto normal; fputs (qi_high_reg_name[REGNO (x)], file); break; default: @@ -7028,7 +7185,7 @@ print_reg (x, code, file) pattern. */ static const char * -get_some_local_dynamic_name () +get_some_local_dynamic_name (void) { rtx insn; @@ -7044,9 +7201,7 @@ get_some_local_dynamic_name () } static int -get_some_local_dynamic_name_1 (px, data) - rtx *px; - void *data ATTRIBUTE_UNUSED; +get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED) { rtx x = *px; @@ -7065,8 +7220,8 @@ get_some_local_dynamic_name_1 (px, data) C -- print opcode suffix for set/cmov insn. c -- like C, but print reversed condition F,f -- likewise, but for floating-point. - O -- if CMOV_SUN_AS_SYNTAX, expand to "w.", "l." or "q.", otherwise - nothing + O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.", + otherwise nothing R -- print the prefix for register names. z -- print the opcode suffix for the size of the current operand. * -- print a star (in certain assembler syntax) @@ -7088,10 +7243,7 @@ get_some_local_dynamic_name_1 (px, data) */ void -print_operand (file, x, code) - FILE *file; - rtx x; - int code; +print_operand (FILE *file, rtx x, int code) { if (code) { @@ -7270,7 +7422,7 @@ print_operand (file, x, code) } return; case 'O': -#ifdef CMOV_SUN_AS_SYNTAX +#ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX if (ASSEMBLER_DIALECT == ASM_ATT) { switch (GET_MODE (x)) @@ -7290,7 +7442,7 @@ print_operand (file, x, code) put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file); return; case 'F': -#ifdef CMOV_SUN_AS_SYNTAX +#ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX if (ASSEMBLER_DIALECT == ASM_ATT) putc ('.', file); #endif @@ -7301,7 +7453,7 @@ print_operand (file, x, code) case 'c': /* Check to see if argument to %c is really a constant and not a condition code which needs to be reversed. */ - if (GET_RTX_CLASS (GET_CODE (x)) != '<') + if (!COMPARISON_P (x)) { output_operand_lossage ("operand is neither a constant nor a condition code, invalid operand code 'c'"); return; @@ -7309,7 +7461,7 @@ print_operand (file, x, code) put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file); return; case 'f': -#ifdef CMOV_SUN_AS_SYNTAX +#ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX if (ASSEMBLER_DIALECT == ASM_ATT) putc ('.', file); #endif @@ -7354,9 +7506,7 @@ print_operand (file, x, code) } if (GET_CODE (x) == REG) - { - PRINT_REG (x, code, file); - } + print_reg (x, code, file); else if (GET_CODE (x) == MEM) { @@ -7409,11 +7559,11 @@ print_operand (file, x, code) if (ASSEMBLER_DIALECT == ASM_ATT) putc ('$', file); - fprintf (file, "0x%lx", l); + fprintf (file, "0x%08lx", l); } - /* These float cases don't actually occur as immediate operands. */ - else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode) + /* These float cases don't actually occur as immediate operands. */ + else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode) { char dstr[30]; @@ -7422,7 +7572,7 @@ print_operand (file, x, code) } else if (GET_CODE (x) == CONST_DOUBLE - && (GET_MODE (x) == XFmode || GET_MODE (x) == TFmode)) + && GET_MODE (x) == XFmode) { char dstr[30]; @@ -7460,27 +7610,12 @@ print_operand (file, x, code) /* Print a memory operand whose address is ADDR. */ void -print_operand_address (file, addr) - FILE *file; - register rtx addr; +print_operand_address (FILE *file, rtx addr) { struct ix86_address parts; rtx base, index, disp; int scale; - if (GET_CODE (addr) == UNSPEC && XINT (addr, 1) == UNSPEC_TP) - { - if (ASSEMBLER_DIALECT == ASM_INTEL) - fputs ("DWORD PTR ", file); - if (ASSEMBLER_DIALECT == ASM_ATT || USER_LABEL_PREFIX[0] == 0) - putc ('%', file); - if (TARGET_64BIT) - fputs ("fs:0", file); - else - fputs ("gs:0", file); - return; - } - if (! ix86_decompose_address (addr, &parts)) abort (); @@ -7489,35 +7624,49 @@ print_operand_address (file, addr) disp = parts.disp; scale = parts.scale; + switch (parts.seg) + { + case SEG_DEFAULT: + break; + case SEG_FS: + case SEG_GS: + if (USER_LABEL_PREFIX[0] == 0) + putc ('%', file); + fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file); + break; + default: + abort (); + } + if (!base && !index) { /* Displacement only requires special attention. */ if (GET_CODE (disp) == CONST_INT) { - if (ASSEMBLER_DIALECT == ASM_INTEL) + if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT) { if (USER_LABEL_PREFIX[0] == 0) putc ('%', file); fputs ("ds:", file); } - fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (addr)); + fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp)); } else if (flag_pic) - output_pic_addr_const (file, addr, 0); + output_pic_addr_const (file, disp, 0); else - output_addr_const (file, addr); + output_addr_const (file, disp); /* Use one byte shorter RIP relative addressing for 64bit mode. */ if (TARGET_64BIT - && ((GET_CODE (addr) == SYMBOL_REF - && ! tls_symbolic_operand (addr, GET_MODE (addr))) - || GET_CODE (addr) == LABEL_REF - || (GET_CODE (addr) == CONST - && GET_CODE (XEXP (addr, 0)) == PLUS - && (GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF - || GET_CODE (XEXP (XEXP (addr, 0), 0)) == LABEL_REF) - && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT))) + && ((GET_CODE (disp) == SYMBOL_REF + && ! tls_symbolic_operand (disp, GET_MODE (disp))) + || GET_CODE (disp) == LABEL_REF + || (GET_CODE (disp) == CONST + && GET_CODE (XEXP (disp, 0)) == PLUS + && (GET_CODE (XEXP (XEXP (disp, 0), 0)) == SYMBOL_REF + || GET_CODE (XEXP (XEXP (disp, 0), 0)) == LABEL_REF) + && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT))) fputs ("(%rip)", file); } else @@ -7536,11 +7685,11 @@ print_operand_address (file, addr) putc ('(', file); if (base) - PRINT_REG (base, 0, file); + print_reg (base, 0, file); if (index) { putc (',', file); - PRINT_REG (index, 0, file); + print_reg (index, 0, file); if (scale != 1) fprintf (file, ",%d", scale); } @@ -7575,7 +7724,7 @@ print_operand_address (file, addr) putc ('[', file); if (base) { - PRINT_REG (base, 0, file); + print_reg (base, 0, file); if (offset) { if (INTVAL (offset) >= 0) @@ -7591,7 +7740,7 @@ print_operand_address (file, addr) if (index) { putc ('+', file); - PRINT_REG (index, 0, file); + print_reg (index, 0, file); if (scale != 1) fprintf (file, "*%d", scale); } @@ -7601,9 +7750,7 @@ print_operand_address (file, addr) } bool -output_addr_const_extra (file, x) - FILE *file; - rtx x; +output_addr_const_extra (FILE *file, rtx x) { rtx op; @@ -7659,10 +7806,7 @@ output_addr_const_extra (file, x) that parallel "operands". */ void -split_di (operands, num, lo_half, hi_half) - rtx operands[]; - int num; - rtx lo_half[], hi_half[]; +split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[]) { while (num--) { @@ -7693,10 +7837,7 @@ split_di (operands, num, lo_half, hi_half) that parallel "operands". */ void -split_ti (operands, num, lo_half, hi_half) - rtx operands[]; - int num; - rtx lo_half[], hi_half[]; +split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[]) { while (num--) { @@ -7735,9 +7876,7 @@ split_ti (operands, num, lo_half, hi_half) #endif const char * -output_387_binary_op (insn, operands) - rtx insn; - rtx *operands; +output_387_binary_op (rtx insn, rtx *operands) { static char buf[30]; const char *p; @@ -7945,8 +8084,7 @@ output_387_binary_op (insn, operands) trunc?f?i patterns. NORMAL is set to current control word, while ROUND_DOWN is set to control word rounding downwards. */ void -emit_i387_cw_initialization (normal, round_down) - rtx normal, round_down; +emit_i387_cw_initialization (rtx normal, rtx round_down) { rtx reg = gen_reg_rtx (HImode); @@ -7965,9 +8103,7 @@ emit_i387_cw_initialization (normal, round_down) operand may be [SDX]Fmode. */ const char * -output_fix_trunc (insn, operands) - rtx insn; - rtx *operands; +output_fix_trunc (rtx insn, rtx *operands) { int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0; int dimode_p = GET_MODE (operands[0]) == DImode; @@ -7999,10 +8135,7 @@ output_fix_trunc (insn, operands) when fucom should be used. */ const char * -output_fp_compare (insn, operands, eflags_p, unordered_p) - rtx insn; - rtx *operands; - int eflags_p, unordered_p; +output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p) { int stack_top_dies; rtx cmp_op0 = operands[0]; @@ -8126,9 +8259,7 @@ output_fp_compare (insn, operands, eflags_p, unordered_p) } void -ix86_output_addr_vec_elt (file, value) - FILE *file; - int value; +ix86_output_addr_vec_elt (FILE *file, int value) { const char *directive = ASM_LONG; @@ -8145,9 +8276,7 @@ ix86_output_addr_vec_elt (file, value) } void -ix86_output_addr_diff_elt (file, value, rel) - FILE *file; - int value, rel; +ix86_output_addr_diff_elt (FILE *file, int value, int rel) { if (TARGET_64BIT) fprintf (file, "%s%s%d-%s%d\n", @@ -8156,8 +8285,11 @@ ix86_output_addr_diff_elt (file, value, rel) fprintf (file, "%s%s%d@GOTOFF\n", ASM_LONG, LPREFIX, value); #if TARGET_MACHO else if (TARGET_MACHO) - fprintf (file, "%s%s%d-%s\n", ASM_LONG, LPREFIX, value, - machopic_function_base_name () + 1); + { + fprintf (file, "%s%s%d-", ASM_LONG, LPREFIX, value); + machopic_output_function_base_name (file); + fprintf(file, "\n"); + } #endif else asm_fprintf (file, "%s%U%s+[.-%s%d]\n", @@ -8168,8 +8300,7 @@ ix86_output_addr_diff_elt (file, value, rel) for the target. */ void -ix86_expand_clear (dest) - rtx dest; +ix86_expand_clear (rtx dest) { rtx tmp; @@ -8197,8 +8328,7 @@ ix86_expand_clear (dest) the constant pool rtx, else NULL. */ static rtx -maybe_get_pool_constant (x) - rtx x; +maybe_get_pool_constant (rtx x) { x = ix86_delegitimize_address (XEXP (x, 0)); @@ -8209,27 +8339,25 @@ maybe_get_pool_constant (x) } void -ix86_expand_move (mode, operands) - enum machine_mode mode; - rtx operands[]; +ix86_expand_move (enum machine_mode mode, rtx operands[]) { int strict = (reload_in_progress || reload_completed); - rtx insn, op0, op1, tmp; + rtx op0, op1; + enum tls_model model; op0 = operands[0]; op1 = operands[1]; - if (tls_symbolic_operand (op1, Pmode)) + model = tls_symbolic_operand (op1, Pmode); + if (model) { - op1 = legitimize_address (op1, op1, VOIDmode); - if (GET_CODE (op0) == MEM) - { - tmp = gen_reg_rtx (mode); - emit_insn (gen_rtx_SET (VOIDmode, tmp, op1)); - op1 = tmp; - } + op1 = legitimize_tls_address (op1, model, true); + op1 = force_operand (op1, op0); + if (op1 == op0) + return; } - else if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode)) + + if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode)) { #if TARGET_MACHO if (MACHOPIC_PURE) @@ -8242,18 +8370,11 @@ ix86_expand_move (mode, operands) op1 = machopic_legitimize_pic_address (op1, mode, temp == op1 ? 0 : temp); } - else - { - if (MACHOPIC_INDIRECT) - op1 = machopic_indirect_data_reference (op1, 0); - } - if (op0 != op1) - { - insn = gen_rtx_SET (VOIDmode, op0, op1); - emit_insn (insn); - } - return; -#endif /* TARGET_MACHO */ + else if (MACHOPIC_INDIRECT) + op1 = machopic_indirect_data_reference (op1, 0); + if (op0 == op1) + return; +#else if (GET_CODE (op0) == MEM) op1 = force_reg (Pmode, op1); else @@ -8266,6 +8387,7 @@ ix86_expand_move (mode, operands) return; op1 = temp; } +#endif /* TARGET_MACHO */ } else { @@ -8310,15 +8432,11 @@ ix86_expand_move (mode, operands) } } - insn = gen_rtx_SET (VOIDmode, op0, op1); - - emit_insn (insn); + emit_insn (gen_rtx_SET (VOIDmode, op0, op1)); } void -ix86_expand_vector_move (mode, operands) - enum machine_mode mode; - rtx operands[]; +ix86_expand_vector_move (enum machine_mode mode, rtx operands[]) { /* Force constants other than zero into memory. We do not know how the instructions used to build constants modify the upper 64 bits @@ -8347,10 +8465,8 @@ ix86_expand_vector_move (mode, operands) memory references (one output, two input) in a single insn. */ void -ix86_expand_binary_operator (code, mode, operands) - enum rtx_code code; - enum machine_mode mode; - rtx operands[]; +ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode, + rtx operands[]) { int matching_memory; rtx src1, src2, dst, op, clob; @@ -8360,7 +8476,7 @@ ix86_expand_binary_operator (code, mode, operands) src2 = operands[2]; /* Recognize = for commutative operators */ - if (GET_RTX_CLASS (code) == 'c' + if (GET_RTX_CLASS (code) == RTX_COMM_ARITH && (rtx_equal_p (dst, src2) || immediate_operand (src1, mode))) { @@ -8376,7 +8492,7 @@ ix86_expand_binary_operator (code, mode, operands) { if (rtx_equal_p (dst, src1)) matching_memory = 1; - else if (GET_RTX_CLASS (code) == 'c' + else if (GET_RTX_CLASS (code) == RTX_COMM_ARITH && rtx_equal_p (dst, src2)) matching_memory = 2; else @@ -8396,7 +8512,7 @@ ix86_expand_binary_operator (code, mode, operands) or non-matching memory. */ if ((CONSTANT_P (src1) || (!matching_memory && GET_CODE (src1) == MEM)) - && GET_RTX_CLASS (code) != 'c') + && GET_RTX_CLASS (code) != RTX_COMM_ARITH) src1 = force_reg (mode, src1); /* If optimizing, copy to regs to improve CSE */ @@ -8436,27 +8552,26 @@ ix86_expand_binary_operator (code, mode, operands) appropriate constraints. */ int -ix86_binary_operator_ok (code, mode, operands) - enum rtx_code code; - enum machine_mode mode ATTRIBUTE_UNUSED; - rtx operands[3]; +ix86_binary_operator_ok (enum rtx_code code, + enum machine_mode mode ATTRIBUTE_UNUSED, + rtx operands[3]) { /* Both source operands cannot be in memory. */ if (GET_CODE (operands[1]) == MEM && GET_CODE (operands[2]) == MEM) return 0; /* If the operation is not commutable, source 1 cannot be a constant. */ - if (CONSTANT_P (operands[1]) && GET_RTX_CLASS (code) != 'c') + if (CONSTANT_P (operands[1]) && GET_RTX_CLASS (code) != RTX_COMM_ARITH) return 0; /* If the destination is memory, we must have a matching source operand. */ if (GET_CODE (operands[0]) == MEM && ! (rtx_equal_p (operands[0], operands[1]) - || (GET_RTX_CLASS (code) == 'c' + || (GET_RTX_CLASS (code) == RTX_COMM_ARITH && rtx_equal_p (operands[0], operands[2])))) return 0; /* If the operation is not commutable and the source 1 is memory, we must have a matching destination. */ if (GET_CODE (operands[1]) == MEM - && GET_RTX_CLASS (code) != 'c' + && GET_RTX_CLASS (code) != RTX_COMM_ARITH && ! rtx_equal_p (operands[0], operands[1])) return 0; return 1; @@ -8467,10 +8582,8 @@ ix86_binary_operator_ok (code, mode, operands) memory references (one output, one input) in a single insn. */ void -ix86_expand_unary_operator (code, mode, operands) - enum rtx_code code; - enum machine_mode mode; - rtx operands[]; +ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode, + rtx operands[]) { int matching_memory; rtx src, dst, op, clob; @@ -8528,10 +8641,9 @@ ix86_expand_unary_operator (code, mode, operands) appropriate constraints. */ int -ix86_unary_operator_ok (code, mode, operands) - enum rtx_code code ATTRIBUTE_UNUSED; - enum machine_mode mode ATTRIBUTE_UNUSED; - rtx operands[2] ATTRIBUTE_UNUSED; +ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED, + enum machine_mode mode ATTRIBUTE_UNUSED, + rtx operands[2] ATTRIBUTE_UNUSED) { /* If one of operands is memory, source and destination must match. */ if ((GET_CODE (operands[0]) == MEM @@ -8546,9 +8658,7 @@ ix86_unary_operator_ok (code, mode, operands) CC mode is at least as constrained as REQ_MODE. */ int -ix86_match_ccmode (insn, req_mode) - rtx insn; - enum machine_mode req_mode; +ix86_match_ccmode (rtx insn, enum machine_mode req_mode) { rtx set; enum machine_mode set_mode; @@ -8595,9 +8705,7 @@ ix86_match_ccmode (insn, req_mode) /* Generate insn patterns to do an integer compare of OPERANDS. */ static rtx -ix86_expand_int_compare (code, op0, op1) - enum rtx_code code; - rtx op0, op1; +ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1) { enum machine_mode cmpmode; rtx tmp, flags; @@ -8619,8 +8727,7 @@ ix86_expand_int_compare (code, op0, op1) Return the appropriate mode to use. */ enum machine_mode -ix86_fp_compare_mode (code) - enum rtx_code code ATTRIBUTE_UNUSED; +ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED) { /* ??? In order to make all comparisons reversible, we do all comparisons non-trapping when compiling for IEEE. Once gcc is able to distinguish @@ -8631,9 +8738,7 @@ ix86_fp_compare_mode (code) } enum machine_mode -ix86_cc_mode (code, op0, op1) - enum rtx_code code; - rtx op0, op1; +ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1) { if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT) return ix86_fp_compare_mode (code); @@ -8677,11 +8782,68 @@ ix86_cc_mode (code, op0, op1) } } +/* Return the fixed registers used for condition codes. */ + +static bool +ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2) +{ + *p1 = FLAGS_REG; + *p2 = FPSR_REG; + return true; +} + +/* If two condition code modes are compatible, return a condition code + mode which is compatible with both. Otherwise, return + VOIDmode. */ + +static enum machine_mode +ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2) +{ + if (m1 == m2) + return m1; + + if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC) + return VOIDmode; + + if ((m1 == CCGCmode && m2 == CCGOCmode) + || (m1 == CCGOCmode && m2 == CCGCmode)) + return CCGCmode; + + switch (m1) + { + default: + abort (); + + case CCmode: + case CCGCmode: + case CCGOCmode: + case CCNOmode: + case CCZmode: + switch (m2) + { + default: + return VOIDmode; + + case CCmode: + case CCGCmode: + case CCGOCmode: + case CCNOmode: + case CCZmode: + return CCmode; + } + + case CCFPmode: + case CCFPUmode: + /* These are only compatible with themselves, which we already + checked above. */ + return VOIDmode; + } +} + /* Return true if we should use an FCOMI instruction for this fp comparison. */ int -ix86_use_fcomi_compare (code) - enum rtx_code code ATTRIBUTE_UNUSED; +ix86_use_fcomi_compare (enum rtx_code code ATTRIBUTE_UNUSED) { enum rtx_code swapped_code = swap_condition (code); return ((ix86_fp_comparison_cost (code) == ix86_fp_comparison_fcomi_cost (code)) @@ -8694,9 +8856,7 @@ ix86_use_fcomi_compare (code) comparison code is returned. */ static enum rtx_code -ix86_prepare_fp_compare_args (code, pop0, pop1) - enum rtx_code code; - rtx *pop0, *pop1; +ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1) { enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code); rtx op0 = *pop0, op1 = *pop1; @@ -8710,7 +8870,6 @@ ix86_prepare_fp_compare_args (code, pop0, pop1) if (!is_sse && (fpcmp_mode == CCFPUmode || op_mode == XFmode - || op_mode == TFmode || ix86_use_fcomi_compare (code))) { op0 = force_reg (op_mode, op0); @@ -8765,8 +8924,7 @@ ix86_prepare_fp_compare_args (code, pop0, pop1) code that will result in proper branch. Return UNKNOWN if no such code is available. */ static enum rtx_code -ix86_fp_compare_code_to_integer (code) - enum rtx_code code; +ix86_fp_compare_code_to_integer (enum rtx_code code) { switch (code) { @@ -8801,8 +8959,9 @@ ix86_fp_compare_code_to_integer (code) is not required, set value to NIL. We never require more than two branches. */ static void -ix86_fp_comparison_codes (code, bypass_code, first_code, second_code) - enum rtx_code code, *bypass_code, *first_code, *second_code; +ix86_fp_comparison_codes (enum rtx_code code, enum rtx_code *bypass_code, + enum rtx_code *first_code, + enum rtx_code *second_code) { *first_code = code; *bypass_code = NIL; @@ -8866,8 +9025,7 @@ ix86_fp_comparison_codes (code, bypass_code, first_code, second_code) In future this should be tweaked to compute bytes for optimize_size and take into account performance of various instructions on various CPUs. */ static int -ix86_fp_comparison_arithmetics_cost (code) - enum rtx_code code; +ix86_fp_comparison_arithmetics_cost (enum rtx_code code) { if (!TARGET_IEEE_FP) return 4; @@ -8902,8 +9060,7 @@ ix86_fp_comparison_arithmetics_cost (code) /* Return cost of comparison done using fcomi operation. See ix86_fp_comparison_arithmetics_cost for the metrics. */ static int -ix86_fp_comparison_fcomi_cost (code) - enum rtx_code code; +ix86_fp_comparison_fcomi_cost (enum rtx_code code) { enum rtx_code bypass_code, first_code, second_code; /* Return arbitrarily high cost when instruction is not supported - this @@ -8917,8 +9074,7 @@ ix86_fp_comparison_fcomi_cost (code) /* Return cost of comparison done using sahf operation. See ix86_fp_comparison_arithmetics_cost for the metrics. */ static int -ix86_fp_comparison_sahf_cost (code) - enum rtx_code code; +ix86_fp_comparison_sahf_cost (enum rtx_code code) { enum rtx_code bypass_code, first_code, second_code; /* Return arbitrarily high cost when instruction is not preferred - this @@ -8932,8 +9088,7 @@ ix86_fp_comparison_sahf_cost (code) /* Compute cost of the comparison done using any method. See ix86_fp_comparison_arithmetics_cost for the metrics. */ static int -ix86_fp_comparison_cost (code) - enum rtx_code code; +ix86_fp_comparison_cost (enum rtx_code code) { int fcomi_cost, sahf_cost, arithmetics_cost = 1024; int min; @@ -8952,11 +9107,8 @@ ix86_fp_comparison_cost (code) /* Generate insn patterns to do a floating point compare of OPERANDS. */ static rtx -ix86_expand_fp_compare (code, op0, op1, scratch, second_test, bypass_test) - enum rtx_code code; - rtx op0, op1, scratch; - rtx *second_test; - rtx *bypass_test; +ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch, + rtx *second_test, rtx *bypass_test) { enum machine_mode fpcmp_mode, intcmp_mode; rtx tmp, tmp2; @@ -9140,9 +9292,7 @@ ix86_expand_fp_compare (code, op0, op1, scratch, second_test, bypass_test) } rtx -ix86_expand_compare (code, second_test, bypass_test) - enum rtx_code code; - rtx *second_test, *bypass_test; +ix86_expand_compare (enum rtx_code code, rtx *second_test, rtx *bypass_test) { rtx op0, op1, ret; op0 = ix86_compare_op0; @@ -9164,8 +9314,7 @@ ix86_expand_compare (code, second_test, bypass_test) /* Return true if the CODE will result in nontrivial jump sequence. */ bool -ix86_fp_jump_nontrivial_p (code) - enum rtx_code code; +ix86_fp_jump_nontrivial_p (enum rtx_code code) { enum rtx_code bypass_code, first_code, second_code; if (!TARGET_CMOVE) @@ -9175,9 +9324,7 @@ ix86_fp_jump_nontrivial_p (code) } void -ix86_expand_branch (code, label) - enum rtx_code code; - rtx label; +ix86_expand_branch (enum rtx_code code, rtx label) { rtx tmp; @@ -9197,7 +9344,6 @@ ix86_expand_branch (code, label) case SFmode: case DFmode: case XFmode: - case TFmode: { rtvec vec; int use_fcomi; @@ -9363,9 +9509,8 @@ ix86_expand_branch (code, label) /* Split branch based on floating point condition. */ void -ix86_split_fp_branch (code, op1, op2, target1, target2, tmp) - enum rtx_code code; - rtx op1, op2, target1, target2, tmp; +ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2, + rtx target1, rtx target2, rtx tmp) { rtx second, bypass; rtx label = NULL_RTX; @@ -9441,11 +9586,9 @@ ix86_split_fp_branch (code, op1, op2, target1, target2, tmp) } int -ix86_expand_setcc (code, dest) - enum rtx_code code; - rtx dest; +ix86_expand_setcc (enum rtx_code code, rtx dest) { - rtx ret, tmp, tmpreg; + rtx ret, tmp, tmpreg, equiv; rtx second_test, bypass_test; if (GET_MODE (ix86_compare_op0) == DImode @@ -9484,21 +9627,25 @@ ix86_expand_setcc (code, dest) emit_insn (gen_iorqi3 (tmp, tmpreg, tmp2)); } + /* Attach a REG_EQUAL note describing the comparison result. */ + equiv = simplify_gen_relational (code, QImode, + GET_MODE (ix86_compare_op0), + ix86_compare_op0, ix86_compare_op1); + set_unique_reg_note (get_last_insn (), REG_EQUAL, equiv); + return 1; /* DONE */ } -/* Expand comparison setting or clearing carry flag. Return true when successful - and set pop for the operation. */ -bool -ix86_expand_carry_flag_compare (code, op0, op1, pop) - rtx op0, op1, *pop; - enum rtx_code code; +/* Expand comparison setting or clearing carry flag. Return true when + successful and set pop for the operation. */ +static bool +ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop) { enum machine_mode mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1); /* Do not handle DImode compares that go trought special path. Also we can't - deal with FP compares yet. This is possible to add. */ + deal with FP compares yet. This is possible to add. */ if ((mode == DImode && !TARGET_64BIT)) return false; if (FLOAT_MODE_P (mode)) @@ -9521,9 +9668,9 @@ ix86_expand_carry_flag_compare (code, op0, op1, pop) code = swap_condition (code); } - /* Try to expand the comparsion and verify that we end up with carry flag - based comparsion. This is fails to be true only when we decide to expand - comparsion using arithmetic that is not too common scenario. */ + /* Try to expand the comparison and verify that we end up with carry flag + based comparison. This is fails to be true only when we decide to expand + comparison using arithmetic that is not too common scenario. */ start_sequence (); compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX, &second_test, &bypass_test); @@ -9567,7 +9714,7 @@ ix86_expand_carry_flag_compare (code, op0, op1, pop) { op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0)); /* Bail out on overflow. We still can swap operands but that - would force loading of the constant into register. */ + would force loading of the constant into register. */ if (op1 == const0_rtx || !x86_64_immediate_operand (op1, GET_MODE (op1))) return false; @@ -9582,25 +9729,32 @@ ix86_expand_carry_flag_compare (code, op0, op1, pop) } break; - /* Convert a>0 into (unsigned)a<0x7fffffff. */ + /* Convert a>=0 into (unsigned)a<0x80000000. */ case LT: case GE: if (mode == DImode || op1 != const0_rtx) return false; - op1 = gen_int_mode (~(1 << (GET_MODE_BITSIZE (mode) - 1)), mode); + op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode); code = (code == LT ? GEU : LTU); break; case LE: case GT: if (mode == DImode || op1 != constm1_rtx) return false; - op1 = gen_int_mode (~(1 << (GET_MODE_BITSIZE (mode) - 1)), mode); + op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode); code = (code == LE ? GEU : LTU); break; default: return false; } + /* Swapping operands may cause constant to appear as first operand. */ + if (!nonimmediate_operand (op0, VOIDmode)) + { + if (no_new_pseudos) + return false; + op0 = force_reg (mode, op0); + } ix86_compare_op0 = op0; ix86_compare_op1 = op1; *pop = ix86_expand_compare (code, NULL, NULL); @@ -9610,8 +9764,7 @@ ix86_expand_carry_flag_compare (code, op0, op1, pop) } int -ix86_expand_int_movcc (operands) - rtx operands[]; +ix86_expand_int_movcc (rtx operands[]) { enum rtx_code code = GET_CODE (operands[1]), compare_code; rtx compare_seq, compare_op; @@ -9645,7 +9798,7 @@ ix86_expand_int_movcc (operands) diff = ct - cf; /* Sign bit compares are better done using shifts than we do by using - sbb. */ + sbb. */ if (sign_bit_compare_p || ix86_expand_carry_flag_compare (code, ix86_compare_op0, ix86_compare_op1, &compare_op)) @@ -9720,7 +9873,7 @@ ix86_expand_int_movcc (operands) * Size 5 - 8. */ if (ct) - tmp = expand_simple_binop (mode, PLUS, + tmp = expand_simple_binop (mode, PLUS, tmp, GEN_INT (ct), copy_rtx (tmp), 1, OPTAB_DIRECT); } @@ -9749,7 +9902,7 @@ ix86_expand_int_movcc (operands) */ tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1); if (cf) - tmp = expand_simple_binop (mode, PLUS, + tmp = expand_simple_binop (mode, PLUS, copy_rtx (tmp), GEN_INT (cf), copy_rtx (tmp), 1, OPTAB_DIRECT); } @@ -9777,7 +9930,7 @@ ix86_expand_int_movcc (operands) gen_int_mode (cf - ct, mode), copy_rtx (tmp), 1, OPTAB_DIRECT); if (ct) - tmp = expand_simple_binop (mode, PLUS, + tmp = expand_simple_binop (mode, PLUS, copy_rtx (tmp), GEN_INT (ct), copy_rtx (tmp), 1, OPTAB_DIRECT); } @@ -9846,7 +9999,7 @@ ix86_expand_int_movcc (operands) if (ct != -1) { cf = ct; - ct = -1; + ct = -1; code = reverse_condition (code); } @@ -9963,7 +10116,7 @@ ix86_expand_int_movcc (operands) /* notl op1 (if needed) sarl $31, op1 andl (cf-ct), op1 - addl ct, op1 + addl ct, op1 For x < 0 (resp. x <= -1) there will be no notl, so if possible swap the constants to get rid of the @@ -9974,13 +10127,13 @@ ix86_expand_int_movcc (operands) if (compare_code == GE || !cf) { - code = reverse_condition (code); + code = reverse_condition (code); compare_code = LT; } else { HOST_WIDE_INT tmp = cf; - cf = ct; + cf = ct; ct = tmp; } @@ -10092,7 +10245,7 @@ ix86_expand_int_movcc (operands) } if (! register_operand (operands[2], VOIDmode) - && (mode == QImode + && (mode == QImode || ! register_operand (operands[3], VOIDmode))) operands[2] = force_reg (mode, operands[2]); @@ -10122,8 +10275,7 @@ ix86_expand_int_movcc (operands) } int -ix86_expand_fp_movcc (operands) - rtx operands[]; +ix86_expand_fp_movcc (rtx operands[]) { enum rtx_code code; rtx tmp; @@ -10291,8 +10443,7 @@ ix86_expand_fp_movcc (operands) The default case using setcc followed by the conditional move can be done by generic code. */ int -ix86_expand_int_addcc (operands) - rtx operands[]; +ix86_expand_int_addcc (rtx operands[]) { enum rtx_code code = GET_CODE (operands[1]); rtx compare_op; @@ -10378,15 +10529,12 @@ ix86_expand_int_addcc (operands) in the right order. Maximally three parts are generated. */ static int -ix86_split_to_parts (operand, parts, mode) - rtx operand; - rtx *parts; - enum machine_mode mode; +ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode) { int size; if (!TARGET_64BIT) - size = mode == TFmode ? 3 : (GET_MODE_SIZE (mode) / 4); + size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4; else size = (GET_MODE_SIZE (mode) + 4) / 8; @@ -10446,7 +10594,6 @@ ix86_split_to_parts (operand, parts, mode) switch (mode) { case XFmode: - case TFmode: REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l); parts[2] = gen_int_mode (l[2], SImode); break; @@ -10469,18 +10616,19 @@ ix86_split_to_parts (operand, parts, mode) split_ti (&operand, 1, &parts[0], &parts[1]); if (mode == XFmode || mode == TFmode) { + enum machine_mode upper_mode = mode==XFmode ? SImode : DImode; if (REG_P (operand)) { if (!reload_completed) abort (); parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0); - parts[1] = gen_rtx_REG (SImode, REGNO (operand) + 1); + parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1); } else if (offsettable_memref_p (operand)) { operand = adjust_address (operand, DImode, 0); parts[0] = operand; - parts[1] = adjust_address (operand, SImode, 8); + parts[1] = adjust_address (operand, upper_mode, 8); } else if (GET_CODE (operand) == CONST_DOUBLE) { @@ -10488,7 +10636,7 @@ ix86_split_to_parts (operand, parts, mode) long l[3]; REAL_VALUE_FROM_CONST_DOUBLE (r, operand); - REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l); + real_to_target (l, &r, mode); /* Do not use shift by 32 to avoid warning on 32bit systems. */ if (HOST_BITS_PER_WIDE_INT >= 64) parts[0] @@ -10498,7 +10646,16 @@ ix86_split_to_parts (operand, parts, mode) DImode); else parts[0] = immed_double_const (l[0], l[1], DImode); - parts[1] = gen_int_mode (l[2], SImode); + if (upper_mode == SImode) + parts[1] = gen_int_mode (l[2], SImode); + else if (HOST_BITS_PER_WIDE_INT >= 64) + parts[1] + = gen_int_mode + ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1)) + + ((((HOST_WIDE_INT) l[3]) << 31) << 1), + DImode); + else + parts[1] = immed_double_const (l[2], l[3], DImode); } else abort (); @@ -10514,8 +10671,7 @@ ix86_split_to_parts (operand, parts, mode) int the correct order; operands 5-7 contain the output values. */ void -ix86_split_long_move (operands) - rtx operands[]; +ix86_split_long_move (rtx operands[]) { rtx part[2][3]; int nparts; @@ -10620,12 +10776,8 @@ ix86_split_long_move (operands) { if (nparts == 3) { - /* We use only first 12 bytes of TFmode value, but for pushing we - are required to adjust stack as if we were pushing real 16byte - value. */ - if (mode == TFmode && !TARGET_64BIT) - emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, - GEN_INT (-4))); + if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode) + emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-4))); emit_move_insn (part[0][2], part[1][2]); } } @@ -10706,8 +10858,7 @@ ix86_split_long_move (operands) } void -ix86_split_ashldi (operands, scratch) - rtx *operands, scratch; +ix86_split_ashldi (rtx *operands, rtx scratch) { rtx low[2], high[2]; int count; @@ -10759,8 +10910,7 @@ ix86_split_ashldi (operands, scratch) } void -ix86_split_ashrdi (operands, scratch) - rtx *operands, scratch; +ix86_split_ashrdi (rtx *operands, rtx scratch) { rtx low[2], high[2]; int count; @@ -10770,7 +10920,14 @@ ix86_split_ashrdi (operands, scratch) split_di (operands, 2, low, high); count = INTVAL (operands[2]) & 63; - if (count >= 32) + if (count == 63) + { + emit_move_insn (high[0], high[1]); + emit_insn (gen_ashrsi3 (high[0], high[0], GEN_INT (31))); + emit_move_insn (low[0], high[0]); + + } + else if (count >= 32) { emit_move_insn (low[0], high[1]); @@ -10818,8 +10975,7 @@ ix86_split_ashrdi (operands, scratch) } void -ix86_split_lshrdi (operands, scratch) - rtx *operands, scratch; +ix86_split_lshrdi (rtx *operands, rtx scratch) { rtx low[2], high[2]; int count; @@ -10874,9 +11030,7 @@ ix86_split_lshrdi (operands, scratch) /* Helper function for the string operations below. Dest VARIABLE whether it is aligned to VALUE bytes. If true, jump to the label. */ static rtx -ix86_expand_aligntest (variable, value) - rtx variable; - int value; +ix86_expand_aligntest (rtx variable, int value) { rtx label = gen_label_rtx (); rtx tmpcount = gen_reg_rtx (GET_MODE (variable)); @@ -10891,9 +11045,7 @@ ix86_expand_aligntest (variable, value) /* Adjust COUNTER by the VALUE. */ static void -ix86_adjust_counter (countreg, value) - rtx countreg; - HOST_WIDE_INT value; +ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value) { if (GET_MODE (countreg) == DImode) emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value))); @@ -10903,8 +11055,7 @@ ix86_adjust_counter (countreg, value) /* Zero extend possibly SImode EXP to Pmode register. */ rtx -ix86_zero_extend_to_Pmode (exp) - rtx exp; +ix86_zero_extend_to_Pmode (rtx exp) { rtx r; if (GET_MODE (exp) == VOIDmode) @@ -10919,14 +11070,12 @@ ix86_zero_extend_to_Pmode (exp) /* Expand string move (memcpy) operation. Use i386 string operations when profitable. expand_clrstr contains similar code. */ int -ix86_expand_movstr (dst, src, count_exp, align_exp) - rtx dst, src, count_exp, align_exp; +ix86_expand_movstr (rtx dst, rtx src, rtx count_exp, rtx align_exp) { - rtx srcreg, destreg, countreg; + rtx srcreg, destreg, countreg, srcexp, destexp; enum machine_mode counter_mode; HOST_WIDE_INT align = 0; unsigned HOST_WIDE_INT count = 0; - rtx insns; if (GET_CODE (align_exp) == CONST_INT) align = INTVAL (align_exp); @@ -10955,28 +11104,27 @@ ix86_expand_movstr (dst, src, count_exp, align_exp) else counter_mode = DImode; - start_sequence (); - if (counter_mode != SImode && counter_mode != DImode) abort (); destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0)); + if (destreg != XEXP (dst, 0)) + dst = replace_equiv_address_nv (dst, destreg); srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0)); - - emit_insn (gen_cld ()); + if (srcreg != XEXP (src, 0)) + src = replace_equiv_address_nv (src, srcreg); /* When optimizing for size emit simple rep ; movsb instruction for counts not divisible by 4. */ if ((!optimize || optimize_size) && (count == 0 || (count & 0x03))) { + emit_insn (gen_cld ()); countreg = ix86_zero_extend_to_Pmode (count_exp); - if (TARGET_64BIT) - emit_insn (gen_rep_movqi_rex64 (destreg, srcreg, countreg, - destreg, srcreg, countreg)); - else - emit_insn (gen_rep_movqi (destreg, srcreg, countreg, - destreg, srcreg, countreg)); + destexp = gen_rtx_PLUS (Pmode, destreg, countreg); + srcexp = gen_rtx_PLUS (Pmode, srcreg, countreg); + emit_insn (gen_rep_mov (destreg, dst, srcreg, src, countreg, + destexp, srcexp)); } /* For constant aligned (or small unaligned) copies use rep movsl @@ -10988,32 +11136,53 @@ ix86_expand_movstr (dst, src, count_exp, align_exp) || (!TARGET_PENTIUMPRO && !TARGET_64BIT && align >= 4) || optimize_size || count < (unsigned int) 64)) { + unsigned HOST_WIDE_INT offset = 0; int size = TARGET_64BIT && !optimize_size ? 8 : 4; + rtx srcmem, dstmem; + + emit_insn (gen_cld ()); if (count & ~(size - 1)) { countreg = copy_to_mode_reg (counter_mode, GEN_INT ((count >> (size == 4 ? 2 : 3)) & (TARGET_64BIT ? -1 : 0x3fffffff))); countreg = ix86_zero_extend_to_Pmode (countreg); - if (size == 4) - { - if (TARGET_64BIT) - emit_insn (gen_rep_movsi_rex64 (destreg, srcreg, countreg, - destreg, srcreg, countreg)); - else - emit_insn (gen_rep_movsi (destreg, srcreg, countreg, - destreg, srcreg, countreg)); - } - else - emit_insn (gen_rep_movdi_rex64 (destreg, srcreg, countreg, - destreg, srcreg, countreg)); + + destexp = gen_rtx_ASHIFT (Pmode, countreg, + GEN_INT (size == 4 ? 2 : 3)); + srcexp = gen_rtx_PLUS (Pmode, destexp, srcreg); + destexp = gen_rtx_PLUS (Pmode, destexp, destreg); + + emit_insn (gen_rep_mov (destreg, dst, srcreg, src, + countreg, destexp, srcexp)); + offset = count & ~(size - 1); } if (size == 8 && (count & 0x04)) - emit_insn (gen_strmovsi (destreg, srcreg)); + { + srcmem = adjust_automodify_address_nv (src, SImode, srcreg, + offset); + dstmem = adjust_automodify_address_nv (dst, SImode, destreg, + offset); + emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem)); + offset += 4; + } if (count & 0x02) - emit_insn (gen_strmovhi (destreg, srcreg)); + { + srcmem = adjust_automodify_address_nv (src, HImode, srcreg, + offset); + dstmem = adjust_automodify_address_nv (dst, HImode, destreg, + offset); + emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem)); + offset += 2; + } if (count & 0x01) - emit_insn (gen_strmovqi (destreg, srcreg)); + { + srcmem = adjust_automodify_address_nv (src, QImode, srcreg, + offset); + dstmem = adjust_automodify_address_nv (dst, QImode, destreg, + offset); + emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem)); + } } /* The generic code based on the glibc implementation: - align destination to 4 bytes (8 byte alignment is used for PentiumPro @@ -11024,22 +11193,23 @@ ix86_expand_movstr (dst, src, count_exp, align_exp) { rtx countreg2; rtx label = NULL; + rtx srcmem, dstmem; int desired_alignment = (TARGET_PENTIUMPRO && (count == 0 || count >= (unsigned int) 260) ? 8 : UNITS_PER_WORD); + /* Get rid of MEM_OFFSETs, they won't be accurate. */ + dst = change_address (dst, BLKmode, destreg); + src = change_address (src, BLKmode, srcreg); /* In case we don't know anything about the alignment, default to library version, since it is usually equally fast and result in - shorter code. + shorter code. Also emit call when we know that the count is large and call overhead will not be important. */ if (!TARGET_INLINE_ALL_STRINGOPS && (align < UNITS_PER_WORD || !TARGET_REP_MOVL_OPTIMAL)) - { - end_sequence (); - return 0; - } + return 0; if (TARGET_SINGLE_STRINGOP) emit_insn (gen_cld ()); @@ -11069,7 +11239,9 @@ ix86_expand_movstr (dst, src, count_exp, align_exp) if (align <= 1) { rtx label = ix86_expand_aligntest (destreg, 1); - emit_insn (gen_strmovqi (destreg, srcreg)); + srcmem = change_address (src, QImode, srcreg); + dstmem = change_address (dst, QImode, destreg); + emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem)); ix86_adjust_counter (countreg, 1); emit_label (label); LABEL_NUSES (label) = 1; @@ -11077,7 +11249,9 @@ ix86_expand_movstr (dst, src, count_exp, align_exp) if (align <= 2) { rtx label = ix86_expand_aligntest (destreg, 2); - emit_insn (gen_strmovhi (destreg, srcreg)); + srcmem = change_address (src, HImode, srcreg); + dstmem = change_address (dst, HImode, destreg); + emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem)); ix86_adjust_counter (countreg, 2); emit_label (label); LABEL_NUSES (label) = 1; @@ -11085,7 +11259,9 @@ ix86_expand_movstr (dst, src, count_exp, align_exp) if (align <= 4 && desired_alignment > 4) { rtx label = ix86_expand_aligntest (destreg, 4); - emit_insn (gen_strmovsi (destreg, srcreg)); + srcmem = change_address (src, SImode, srcreg); + dstmem = change_address (dst, SImode, destreg); + emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem)); ix86_adjust_counter (countreg, 4); emit_label (label); LABEL_NUSES (label) = 1; @@ -11103,15 +11279,17 @@ ix86_expand_movstr (dst, src, count_exp, align_exp) { emit_insn (gen_lshrdi3 (countreg2, ix86_zero_extend_to_Pmode (countreg), GEN_INT (3))); - emit_insn (gen_rep_movdi_rex64 (destreg, srcreg, countreg2, - destreg, srcreg, countreg2)); + destexp = gen_rtx_ASHIFT (Pmode, countreg2, GEN_INT (3)); } else { - emit_insn (gen_lshrsi3 (countreg2, countreg, GEN_INT (2))); - emit_insn (gen_rep_movsi (destreg, srcreg, countreg2, - destreg, srcreg, countreg2)); + emit_insn (gen_lshrsi3 (countreg2, countreg, const2_rtx)); + destexp = gen_rtx_ASHIFT (Pmode, countreg2, const2_rtx); } + srcexp = gen_rtx_PLUS (Pmode, destexp, srcreg); + destexp = gen_rtx_PLUS (Pmode, destexp, destreg); + emit_insn (gen_rep_mov (destreg, dst, srcreg, src, + countreg2, destexp, srcexp)); if (label) { @@ -11119,49 +11297,61 @@ ix86_expand_movstr (dst, src, count_exp, align_exp) LABEL_NUSES (label) = 1; } if (TARGET_64BIT && align > 4 && count != 0 && (count & 4)) - emit_insn (gen_strmovsi (destreg, srcreg)); + { + srcmem = change_address (src, SImode, srcreg); + dstmem = change_address (dst, SImode, destreg); + emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem)); + } if ((align <= 4 || count == 0) && TARGET_64BIT) { rtx label = ix86_expand_aligntest (countreg, 4); - emit_insn (gen_strmovsi (destreg, srcreg)); + srcmem = change_address (src, SImode, srcreg); + dstmem = change_address (dst, SImode, destreg); + emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem)); emit_label (label); LABEL_NUSES (label) = 1; } if (align > 2 && count != 0 && (count & 2)) - emit_insn (gen_strmovhi (destreg, srcreg)); + { + srcmem = change_address (src, HImode, srcreg); + dstmem = change_address (dst, HImode, destreg); + emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem)); + } if (align <= 2 || count == 0) { rtx label = ix86_expand_aligntest (countreg, 2); - emit_insn (gen_strmovhi (destreg, srcreg)); + srcmem = change_address (src, HImode, srcreg); + dstmem = change_address (dst, HImode, destreg); + emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem)); emit_label (label); LABEL_NUSES (label) = 1; } if (align > 1 && count != 0 && (count & 1)) - emit_insn (gen_strmovqi (destreg, srcreg)); + { + srcmem = change_address (src, QImode, srcreg); + dstmem = change_address (dst, QImode, destreg); + emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem)); + } if (align <= 1 || count == 0) { rtx label = ix86_expand_aligntest (countreg, 1); - emit_insn (gen_strmovqi (destreg, srcreg)); + srcmem = change_address (src, QImode, srcreg); + dstmem = change_address (dst, QImode, destreg); + emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem)); emit_label (label); LABEL_NUSES (label) = 1; } } - insns = get_insns (); - end_sequence (); - - ix86_set_move_mem_attrs (insns, dst, src, destreg, srcreg); - emit_insn (insns); return 1; } /* Expand string clear operation (bzero). Use i386 string operations when profitable. expand_movstr contains similar code. */ int -ix86_expand_clrstr (src, count_exp, align_exp) - rtx src, count_exp, align_exp; +ix86_expand_clrstr (rtx dst, rtx count_exp, rtx align_exp) { - rtx destreg, zeroreg, countreg; + rtx destreg, zeroreg, countreg, destexp; enum machine_mode counter_mode; HOST_WIDE_INT align = 0; unsigned HOST_WIDE_INT count = 0; @@ -11192,7 +11382,9 @@ ix86_expand_clrstr (src, count_exp, align_exp) else counter_mode = DImode; - destreg = copy_to_mode_reg (Pmode, XEXP (src, 0)); + destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0)); + if (destreg != XEXP (dst, 0)) + dst = replace_equiv_address_nv (dst, destreg); emit_insn (gen_cld ()); @@ -11203,12 +11395,8 @@ ix86_expand_clrstr (src, count_exp, align_exp) { countreg = ix86_zero_extend_to_Pmode (count_exp); zeroreg = copy_to_mode_reg (QImode, const0_rtx); - if (TARGET_64BIT) - emit_insn (gen_rep_stosqi_rex64 (destreg, countreg, zeroreg, - destreg, countreg)); - else - emit_insn (gen_rep_stosqi (destreg, countreg, zeroreg, - destreg, countreg)); + destexp = gen_rtx_PLUS (Pmode, destreg, countreg); + emit_insn (gen_rep_stos (destreg, countreg, dst, zeroreg, destexp)); } else if (count != 0 && (align >= 8 @@ -11216,6 +11404,8 @@ ix86_expand_clrstr (src, count_exp, align_exp) || optimize_size || count < (unsigned int) 64)) { int size = TARGET_64BIT && !optimize_size ? 8 : 4; + unsigned HOST_WIDE_INT offset = 0; + zeroreg = copy_to_mode_reg (size == 4 ? SImode : DImode, const0_rtx); if (count & ~(size - 1)) { @@ -11223,28 +11413,34 @@ ix86_expand_clrstr (src, count_exp, align_exp) GEN_INT ((count >> (size == 4 ? 2 : 3)) & (TARGET_64BIT ? -1 : 0x3fffffff))); countreg = ix86_zero_extend_to_Pmode (countreg); - if (size == 4) - { - if (TARGET_64BIT) - emit_insn (gen_rep_stossi_rex64 (destreg, countreg, zeroreg, - destreg, countreg)); - else - emit_insn (gen_rep_stossi (destreg, countreg, zeroreg, - destreg, countreg)); - } - else - emit_insn (gen_rep_stosdi_rex64 (destreg, countreg, zeroreg, - destreg, countreg)); + destexp = gen_rtx_ASHIFT (Pmode, countreg, GEN_INT (size == 4 ? 2 : 3)); + destexp = gen_rtx_PLUS (Pmode, destexp, destreg); + emit_insn (gen_rep_stos (destreg, countreg, dst, zeroreg, destexp)); + offset = count & ~(size - 1); } if (size == 8 && (count & 0x04)) - emit_insn (gen_strsetsi (destreg, + { + rtx mem = adjust_automodify_address_nv (dst, SImode, destreg, + offset); + emit_insn (gen_strset (destreg, mem, gen_rtx_SUBREG (SImode, zeroreg, 0))); + offset += 4; + } if (count & 0x02) - emit_insn (gen_strsethi (destreg, + { + rtx mem = adjust_automodify_address_nv (dst, HImode, destreg, + offset); + emit_insn (gen_strset (destreg, mem, gen_rtx_SUBREG (HImode, zeroreg, 0))); + offset += 2; + } if (count & 0x01) - emit_insn (gen_strsetqi (destreg, + { + rtx mem = adjust_automodify_address_nv (dst, QImode, destreg, + offset); + emit_insn (gen_strset (destreg, mem, gen_rtx_SUBREG (QImode, zeroreg, 0))); + } } else { @@ -11271,6 +11467,8 @@ ix86_expand_clrstr (src, count_exp, align_exp) countreg2 = gen_reg_rtx (Pmode); countreg = copy_to_mode_reg (counter_mode, count_exp); zeroreg = copy_to_mode_reg (Pmode, const0_rtx); + /* Get rid of MEM_OFFSET, it won't be accurate. */ + dst = change_address (dst, BLKmode, destreg); if (count == 0 && align < desired_alignment) { @@ -11281,8 +11479,8 @@ ix86_expand_clrstr (src, count_exp, align_exp) if (align <= 1) { rtx label = ix86_expand_aligntest (destreg, 1); - emit_insn (gen_strsetqi (destreg, - gen_rtx_SUBREG (QImode, zeroreg, 0))); + emit_insn (gen_strset (destreg, dst, + gen_rtx_SUBREG (QImode, zeroreg, 0))); ix86_adjust_counter (countreg, 1); emit_label (label); LABEL_NUSES (label) = 1; @@ -11290,8 +11488,8 @@ ix86_expand_clrstr (src, count_exp, align_exp) if (align <= 2) { rtx label = ix86_expand_aligntest (destreg, 2); - emit_insn (gen_strsethi (destreg, - gen_rtx_SUBREG (HImode, zeroreg, 0))); + emit_insn (gen_strset (destreg, dst, + gen_rtx_SUBREG (HImode, zeroreg, 0))); ix86_adjust_counter (countreg, 2); emit_label (label); LABEL_NUSES (label) = 1; @@ -11299,9 +11497,10 @@ ix86_expand_clrstr (src, count_exp, align_exp) if (align <= 4 && desired_alignment > 4) { rtx label = ix86_expand_aligntest (destreg, 4); - emit_insn (gen_strsetsi (destreg, (TARGET_64BIT - ? gen_rtx_SUBREG (SImode, zeroreg, 0) - : zeroreg))); + emit_insn (gen_strset (destreg, dst, + (TARGET_64BIT + ? gen_rtx_SUBREG (SImode, zeroreg, 0) + : zeroreg))); ix86_adjust_counter (countreg, 4); emit_label (label); LABEL_NUSES (label) = 1; @@ -11320,15 +11519,16 @@ ix86_expand_clrstr (src, count_exp, align_exp) { emit_insn (gen_lshrdi3 (countreg2, ix86_zero_extend_to_Pmode (countreg), GEN_INT (3))); - emit_insn (gen_rep_stosdi_rex64 (destreg, countreg2, zeroreg, - destreg, countreg2)); + destexp = gen_rtx_ASHIFT (Pmode, countreg2, GEN_INT (3)); } else { - emit_insn (gen_lshrsi3 (countreg2, countreg, GEN_INT (2))); - emit_insn (gen_rep_stossi (destreg, countreg2, zeroreg, - destreg, countreg2)); + emit_insn (gen_lshrsi3 (countreg2, countreg, const2_rtx)); + destexp = gen_rtx_ASHIFT (Pmode, countreg2, const2_rtx); } + destexp = gen_rtx_PLUS (Pmode, destexp, destreg); + emit_insn (gen_rep_stos (destreg, countreg2, dst, zeroreg, destexp)); + if (label) { emit_label (label); @@ -11336,45 +11536,45 @@ ix86_expand_clrstr (src, count_exp, align_exp) } if (TARGET_64BIT && align > 4 && count != 0 && (count & 4)) - emit_insn (gen_strsetsi (destreg, - gen_rtx_SUBREG (SImode, zeroreg, 0))); + emit_insn (gen_strset (destreg, dst, + gen_rtx_SUBREG (SImode, zeroreg, 0))); if (TARGET_64BIT && (align <= 4 || count == 0)) { rtx label = ix86_expand_aligntest (countreg, 4); - emit_insn (gen_strsetsi (destreg, - gen_rtx_SUBREG (SImode, zeroreg, 0))); + emit_insn (gen_strset (destreg, dst, + gen_rtx_SUBREG (SImode, zeroreg, 0))); emit_label (label); LABEL_NUSES (label) = 1; } if (align > 2 && count != 0 && (count & 2)) - emit_insn (gen_strsethi (destreg, - gen_rtx_SUBREG (HImode, zeroreg, 0))); + emit_insn (gen_strset (destreg, dst, + gen_rtx_SUBREG (HImode, zeroreg, 0))); if (align <= 2 || count == 0) { rtx label = ix86_expand_aligntest (countreg, 2); - emit_insn (gen_strsethi (destreg, - gen_rtx_SUBREG (HImode, zeroreg, 0))); + emit_insn (gen_strset (destreg, dst, + gen_rtx_SUBREG (HImode, zeroreg, 0))); emit_label (label); LABEL_NUSES (label) = 1; } if (align > 1 && count != 0 && (count & 1)) - emit_insn (gen_strsetqi (destreg, - gen_rtx_SUBREG (QImode, zeroreg, 0))); + emit_insn (gen_strset (destreg, dst, + gen_rtx_SUBREG (QImode, zeroreg, 0))); if (align <= 1 || count == 0) { rtx label = ix86_expand_aligntest (countreg, 1); - emit_insn (gen_strsetqi (destreg, - gen_rtx_SUBREG (QImode, zeroreg, 0))); + emit_insn (gen_strset (destreg, dst, + gen_rtx_SUBREG (QImode, zeroreg, 0))); emit_label (label); LABEL_NUSES (label) = 1; } } return 1; } + /* Expand strlen. */ int -ix86_expand_strlen (out, src, eoschar, align) - rtx out, src, eoschar, align; +ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align) { rtx addr, scratch1, scratch2, scratch3, scratch4; @@ -11403,7 +11603,7 @@ ix86_expand_strlen (out, src, eoschar, align) emit_move_insn (out, addr); - ix86_expand_strlensi_unroll_1 (out, align); + ix86_expand_strlensi_unroll_1 (out, src, align); /* strlensi_unroll_1 returns the address of the zero at the end of the string, like memchr(), so compute the length by subtracting @@ -11415,6 +11615,7 @@ ix86_expand_strlen (out, src, eoschar, align) } else { + rtx unspec; scratch2 = gen_reg_rtx (Pmode); scratch3 = gen_reg_rtx (Pmode); scratch4 = force_reg (Pmode, constm1_rtx); @@ -11423,17 +11624,19 @@ ix86_expand_strlen (out, src, eoschar, align) eoschar = force_reg (QImode, eoschar); emit_insn (gen_cld ()); + src = replace_equiv_address_nv (src, scratch3); + + /* If .md starts supporting :P, this can be done in .md. */ + unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align, + scratch4), UNSPEC_SCAS); + emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec)); if (TARGET_64BIT) { - emit_insn (gen_strlenqi_rex_1 (scratch1, scratch3, eoschar, - align, scratch4, scratch3)); emit_insn (gen_one_cmpldi2 (scratch2, scratch1)); emit_insn (gen_adddi3 (out, scratch2, constm1_rtx)); } else { - emit_insn (gen_strlenqi_1 (scratch1, scratch3, eoschar, - align, scratch4, scratch3)); emit_insn (gen_one_cmplsi2 (scratch2, scratch1)); emit_insn (gen_addsi3 (out, scratch2, constm1_rtx)); } @@ -11449,12 +11652,11 @@ ix86_expand_strlen (out, src, eoschar, align) scratch = scratch register, initialized with the startaddress when not aligned, otherwise undefined - This is just the body. It needs the initialisations mentioned above and + This is just the body. It needs the initializations mentioned above and some address computing at the end. These things are done in i386.md. */ static void -ix86_expand_strlensi_unroll_1 (out, align_rtx) - rtx out, align_rtx; +ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx) { int align; rtx tmp; @@ -11490,9 +11692,9 @@ ix86_expand_strlensi_unroll_1 (out, align_rtx) emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL, Pmode, 1, align_4_label); - emit_cmp_and_jump_insns (align_rtx, GEN_INT (2), EQ, NULL, + emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL, Pmode, 1, align_2_label); - emit_cmp_and_jump_insns (align_rtx, GEN_INT (2), GTU, NULL, + emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL, Pmode, 1, align_3_label); } else @@ -11500,14 +11702,14 @@ ix86_expand_strlensi_unroll_1 (out, align_rtx) /* Since the alignment is 2, we have to check 2 or 0 bytes; check if is aligned to 4 - byte. */ - align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (2), + align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx, NULL_RTX, 0, OPTAB_WIDEN); emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL, Pmode, 1, align_4_label); } - mem = gen_rtx_MEM (QImode, out); + mem = change_address (src, QImode, out); /* Now compare the bytes. */ @@ -11551,7 +11753,7 @@ ix86_expand_strlensi_unroll_1 (out, align_rtx) speed up. */ emit_label (align_4_label); - mem = gen_rtx_MEM (SImode, out); + mem = change_address (src, SImode, out); emit_move_insn (scratch, mem); if (TARGET_64BIT) emit_insn (gen_adddi3 (out, out, GEN_INT (4))); @@ -11586,7 +11788,7 @@ ix86_expand_strlensi_unroll_1 (out, align_rtx) tmpreg))); /* Emit lea manually to avoid clobbering of flags. */ emit_insn (gen_rtx_SET (SImode, reg2, - gen_rtx_PLUS (Pmode, out, GEN_INT (2)))); + gen_rtx_PLUS (Pmode, out, const2_rtx))); tmp = gen_rtx_REG (CCNOmode, FLAGS_REG); tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx); @@ -11613,9 +11815,9 @@ ix86_expand_strlensi_unroll_1 (out, align_rtx) /* Not in the first two. Move two bytes forward. */ emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16))); if (TARGET_64BIT) - emit_insn (gen_adddi3 (out, out, GEN_INT (2))); + emit_insn (gen_adddi3 (out, out, const2_rtx)); else - emit_insn (gen_addsi3 (out, out, GEN_INT (2))); + emit_insn (gen_addsi3 (out, out, const2_rtx)); emit_label (end_2_label); @@ -11634,9 +11836,9 @@ ix86_expand_strlensi_unroll_1 (out, align_rtx) } void -ix86_expand_call (retval, fnaddr, callarg1, callarg2, pop, sibcall) - rtx retval, fnaddr, callarg1, callarg2, pop; - int sibcall; +ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1, + rtx callarg2 ATTRIBUTE_UNUSED, + rtx pop, int sibcall) { rtx use = NULL, call; @@ -11673,7 +11875,7 @@ ix86_expand_call (retval, fnaddr, callarg1, callarg2, pop, sibcall) { rtx addr; addr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0)); - fnaddr = gen_rtx_REG (Pmode, 40); + fnaddr = gen_rtx_REG (Pmode, FIRST_REX_INT_REG + 3 /* R11 */); emit_move_insn (fnaddr, addr); fnaddr = gen_rtx_MEM (QImode, fnaddr); } @@ -11699,7 +11901,7 @@ ix86_expand_call (retval, fnaddr, callarg1, callarg2, pop, sibcall) function. */ static struct machine_function * -ix86_init_machine_status () +ix86_init_machine_status (void) { struct machine_function *f; @@ -11716,9 +11918,7 @@ ix86_init_machine_status () which slot to use. */ rtx -assign_386_stack_local (mode, n) - enum machine_mode mode; - int n; +assign_386_stack_local (enum machine_mode mode, int n) { struct stack_local_entry *s; @@ -11744,7 +11944,7 @@ assign_386_stack_local (mode, n) static GTY(()) rtx ix86_tls_symbol; rtx -ix86_tls_get_addr () +ix86_tls_get_addr (void) { if (!ix86_tls_symbol) @@ -11762,8 +11962,7 @@ ix86_tls_get_addr () encoding. Does not include the one-byte modrm, opcode, or prefix. */ static int -memory_address_length (addr) - rtx addr; +memory_address_length (rtx addr) { struct ix86_address parts; rtx base, index, disp; @@ -11783,10 +11982,15 @@ memory_address_length (addr) disp = parts.disp; len = 0; + /* Rule of thumb: + - esp as the base always wants an index, + - ebp as the base always wants a displacement. */ + /* Register Indirect. */ if (base && !index && !disp) { - /* Special cases: ebp and esp need the two-byte modrm form. */ + /* esp (for its index) and ebp (for its displacement) need + the two-byte modrm form. */ if (addr == stack_pointer_rtx || addr == arg_pointer_rtx || addr == frame_pointer_rtx @@ -11810,9 +12014,16 @@ memory_address_length (addr) else len = 4; } + /* ebp always wants a displacement. */ + else if (base == hard_frame_pointer_rtx) + len = 1; - /* An index requires the two-byte modrm form. */ - if (index) + /* An index requires the two-byte modrm form.... */ + if (index + /* ...like esp, which always wants an index. */ + || base == stack_pointer_rtx + || base == arg_pointer_rtx + || base == frame_pointer_rtx) len += 1; } @@ -11822,9 +12033,7 @@ memory_address_length (addr) /* Compute default value for "length_immediate" attribute. When SHORTFORM is set, expect that insn have 8bit immediate alternative. */ int -ix86_attr_length_immediate_default (insn, shortform) - rtx insn; - int shortform; +ix86_attr_length_immediate_default (rtx insn, int shortform) { int len = 0; int i; @@ -11864,8 +12073,7 @@ ix86_attr_length_immediate_default (insn, shortform) } /* Compute default value for "length_address" attribute. */ int -ix86_attr_length_address_default (insn) - rtx insn; +ix86_attr_length_address_default (rtx insn) { int i; @@ -11901,7 +12109,7 @@ ix86_attr_length_address_default (insn) /* Return the maximum number of instructions a cpu can issue. */ static int -ix86_issue_rate () +ix86_issue_rate (void) { switch (ix86_tune) { @@ -11913,6 +12121,7 @@ ix86_issue_rate () case PROCESSOR_PENTIUM4: case PROCESSOR_ATHLON: case PROCESSOR_K8: + case PROCESSOR_NOCONA: return 3; default: @@ -11924,9 +12133,7 @@ ix86_issue_rate () by DEP_INSN and nothing set by DEP_INSN. */ static int -ix86_flags_dependant (insn, dep_insn, insn_type) - rtx insn, dep_insn; - enum attr_type insn_type; +ix86_flags_dependant (rtx insn, rtx dep_insn, enum attr_type insn_type) { rtx set, set2; @@ -11971,9 +12178,7 @@ ix86_flags_dependant (insn, dep_insn, insn_type) address with operands set by DEP_INSN. */ static int -ix86_agi_dependant (insn, dep_insn, insn_type) - rtx insn, dep_insn; - enum attr_type insn_type; +ix86_agi_dependant (rtx insn, rtx dep_insn, enum attr_type insn_type) { rtx addr; @@ -12008,9 +12213,7 @@ ix86_agi_dependant (insn, dep_insn, insn_type) } static int -ix86_adjust_cost (insn, link, dep_insn, cost) - rtx insn, link, dep_insn; - int cost; +ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost) { enum attr_type insn_type, dep_insn_type; enum attr_memory memory, dep_memory; @@ -12076,7 +12279,7 @@ ix86_adjust_cost (insn, link, dep_insn, cost) previous instruction is not needed to compute the address. */ if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH) && !ix86_agi_dependant (insn, dep_insn, insn_type)) - { + { /* Claim moves to take one cycle, as core can issue one load at time and the next load can start cycle later. */ if (dep_insn_type == TYPE_IMOV @@ -12110,7 +12313,7 @@ ix86_adjust_cost (insn, link, dep_insn, cost) previous instruction is not needed to compute the address. */ if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH) && !ix86_agi_dependant (insn, dep_insn, insn_type)) - { + { /* Claim moves to take one cycle, as core can issue one load at time and the next load can start cycle later. */ if (dep_insn_type == TYPE_IMOV @@ -12133,15 +12336,15 @@ ix86_adjust_cost (insn, link, dep_insn, cost) previous instruction is not needed to compute the address. */ if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH) && !ix86_agi_dependant (insn, dep_insn, insn_type)) - { + { enum attr_unit unit = get_attr_unit (insn); int loadcost = 3; /* Because of the difference between the length of integer and floating unit pipeline preparation stages, the memory operands - for floating point are cheaper. + for floating point are cheaper. - ??? For Athlon it the difference is most propbably 2. */ + ??? For Athlon it the difference is most probably 2. */ if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN) loadcost = 3; else @@ -12160,256 +12363,12 @@ ix86_adjust_cost (insn, link, dep_insn, cost) return cost; } -static union -{ - struct ppro_sched_data - { - rtx decode[3]; - int issued_this_cycle; - } ppro; -} ix86_sched_data; - -static enum attr_ppro_uops -ix86_safe_ppro_uops (insn) - rtx insn; -{ - if (recog_memoized (insn) >= 0) - return get_attr_ppro_uops (insn); - else - return PPRO_UOPS_MANY; -} - -static void -ix86_dump_ppro_packet (dump) - FILE *dump; -{ - if (ix86_sched_data.ppro.decode[0]) - { - fprintf (dump, "PPRO packet: %d", - INSN_UID (ix86_sched_data.ppro.decode[0])); - if (ix86_sched_data.ppro.decode[1]) - fprintf (dump, " %d", INSN_UID (ix86_sched_data.ppro.decode[1])); - if (ix86_sched_data.ppro.decode[2]) - fprintf (dump, " %d", INSN_UID (ix86_sched_data.ppro.decode[2])); - fputc ('\n', dump); - } -} - -/* We're beginning a new block. Initialize data structures as necessary. */ - -static void -ix86_sched_init (dump, sched_verbose, veclen) - FILE *dump ATTRIBUTE_UNUSED; - int sched_verbose ATTRIBUTE_UNUSED; - int veclen ATTRIBUTE_UNUSED; -{ - memset (&ix86_sched_data, 0, sizeof (ix86_sched_data)); -} - -/* Shift INSN to SLOT, and shift everything else down. */ - -static void -ix86_reorder_insn (insnp, slot) - rtx *insnp, *slot; -{ - if (insnp != slot) - { - rtx insn = *insnp; - do - insnp[0] = insnp[1]; - while (++insnp != slot); - *insnp = insn; - } -} - -static void -ix86_sched_reorder_ppro (ready, e_ready) - rtx *ready; - rtx *e_ready; -{ - rtx decode[3]; - enum attr_ppro_uops cur_uops; - int issued_this_cycle; - rtx *insnp; - int i; - - /* At this point .ppro.decode contains the state of the three - decoders from last "cycle". That is, those insns that were - actually independent. But here we're scheduling for the - decoder, and we may find things that are decodable in the - same cycle. */ - - memcpy (decode, ix86_sched_data.ppro.decode, sizeof (decode)); - issued_this_cycle = 0; - - insnp = e_ready; - cur_uops = ix86_safe_ppro_uops (*insnp); - - /* If the decoders are empty, and we've a complex insn at the - head of the priority queue, let it issue without complaint. */ - if (decode[0] == NULL) - { - if (cur_uops == PPRO_UOPS_MANY) - { - decode[0] = *insnp; - goto ppro_done; - } - - /* Otherwise, search for a 2-4 uop unsn to issue. */ - while (cur_uops != PPRO_UOPS_FEW) - { - if (insnp == ready) - break; - cur_uops = ix86_safe_ppro_uops (*--insnp); - } - - /* If so, move it to the head of the line. */ - if (cur_uops == PPRO_UOPS_FEW) - ix86_reorder_insn (insnp, e_ready); - - /* Issue the head of the queue. */ - issued_this_cycle = 1; - decode[0] = *e_ready--; - } - - /* Look for simple insns to fill in the other two slots. */ - for (i = 1; i < 3; ++i) - if (decode[i] == NULL) - { - if (ready > e_ready) - goto ppro_done; - - insnp = e_ready; - cur_uops = ix86_safe_ppro_uops (*insnp); - while (cur_uops != PPRO_UOPS_ONE) - { - if (insnp == ready) - break; - cur_uops = ix86_safe_ppro_uops (*--insnp); - } - - /* Found one. Move it to the head of the queue and issue it. */ - if (cur_uops == PPRO_UOPS_ONE) - { - ix86_reorder_insn (insnp, e_ready); - decode[i] = *e_ready--; - issued_this_cycle++; - continue; - } - - /* ??? Didn't find one. Ideally, here we would do a lazy split - of 2-uop insns, issue one and queue the other. */ - } - - ppro_done: - if (issued_this_cycle == 0) - issued_this_cycle = 1; - ix86_sched_data.ppro.issued_this_cycle = issued_this_cycle; -} - -/* We are about to being issuing insns for this clock cycle. - Override the default sort algorithm to better slot instructions. */ static int -ix86_sched_reorder (dump, sched_verbose, ready, n_readyp, clock_var) - FILE *dump ATTRIBUTE_UNUSED; - int sched_verbose ATTRIBUTE_UNUSED; - rtx *ready; - int *n_readyp; - int clock_var ATTRIBUTE_UNUSED; +ia32_use_dfa_pipeline_interface (void) { - int n_ready = *n_readyp; - rtx *e_ready = ready + n_ready - 1; - - /* Make sure to go ahead and initialize key items in - ix86_sched_data if we are not going to bother trying to - reorder the ready queue. */ - if (n_ready < 2) - { - ix86_sched_data.ppro.issued_this_cycle = 1; - goto out; - } - - switch (ix86_tune) - { - default: - break; - - case PROCESSOR_PENTIUMPRO: - ix86_sched_reorder_ppro (ready, e_ready); - break; - } - -out: - return ix86_issue_rate (); -} - -/* We are about to issue INSN. Return the number of insns left on the - ready queue that can be issued this cycle. */ - -static int -ix86_variable_issue (dump, sched_verbose, insn, can_issue_more) - FILE *dump; - int sched_verbose; - rtx insn; - int can_issue_more; -{ - int i; - switch (ix86_tune) - { - default: - return can_issue_more - 1; - - case PROCESSOR_PENTIUMPRO: - { - enum attr_ppro_uops uops = ix86_safe_ppro_uops (insn); - - if (uops == PPRO_UOPS_MANY) - { - if (sched_verbose) - ix86_dump_ppro_packet (dump); - ix86_sched_data.ppro.decode[0] = insn; - ix86_sched_data.ppro.decode[1] = NULL; - ix86_sched_data.ppro.decode[2] = NULL; - if (sched_verbose) - ix86_dump_ppro_packet (dump); - ix86_sched_data.ppro.decode[0] = NULL; - } - else if (uops == PPRO_UOPS_FEW) - { - if (sched_verbose) - ix86_dump_ppro_packet (dump); - ix86_sched_data.ppro.decode[0] = insn; - ix86_sched_data.ppro.decode[1] = NULL; - ix86_sched_data.ppro.decode[2] = NULL; - } - else - { - for (i = 0; i < 3; ++i) - if (ix86_sched_data.ppro.decode[i] == NULL) - { - ix86_sched_data.ppro.decode[i] = insn; - break; - } - if (i == 3) - abort (); - if (i == 2) - { - if (sched_verbose) - ix86_dump_ppro_packet (dump); - ix86_sched_data.ppro.decode[0] = NULL; - ix86_sched_data.ppro.decode[1] = NULL; - ix86_sched_data.ppro.decode[2] = NULL; - } - } - } - return --ix86_sched_data.ppro.issued_this_cycle; - } -} - -static int -ia32_use_dfa_pipeline_interface () -{ - if (TARGET_PENTIUM || TARGET_ATHLON_K8) + if (TARGET_PENTIUM + || TARGET_PENTIUMPRO + || TARGET_ATHLON_K8) return 1; return 0; } @@ -12419,60 +12378,18 @@ ia32_use_dfa_pipeline_interface () large results extra work for the scheduler. */ static int -ia32_multipass_dfa_lookahead () +ia32_multipass_dfa_lookahead (void) { if (ix86_tune == PROCESSOR_PENTIUM) return 2; - else - return 0; -} - - -/* Walk through INSNS and look for MEM references whose address is DSTREG or - SRCREG and set the memory attribute to those of DSTREF and SRCREF, as - appropriate. */ -void -ix86_set_move_mem_attrs (insns, dstref, srcref, dstreg, srcreg) - rtx insns; - rtx dstref, srcref, dstreg, srcreg; -{ - rtx insn; + if (ix86_tune == PROCESSOR_PENTIUMPRO) + return 1; - for (insn = insns; insn != 0 ; insn = NEXT_INSN (insn)) - if (INSN_P (insn)) - ix86_set_move_mem_attrs_1 (PATTERN (insn), dstref, srcref, - dstreg, srcreg); + else + return 0; } -/* Subroutine of above to actually do the updating by recursively walking - the rtx. */ - -static void -ix86_set_move_mem_attrs_1 (x, dstref, srcref, dstreg, srcreg) - rtx x; - rtx dstref, srcref, dstreg, srcreg; -{ - enum rtx_code code = GET_CODE (x); - const char *format_ptr = GET_RTX_FORMAT (code); - int i, j; - - if (code == MEM && XEXP (x, 0) == dstreg) - MEM_COPY_ATTRIBUTES (x, dstref); - else if (code == MEM && XEXP (x, 0) == srcreg) - MEM_COPY_ATTRIBUTES (x, srcref); - - for (i = 0; i < GET_RTX_LENGTH (code); i++, format_ptr++) - { - if (*format_ptr == 'e') - ix86_set_move_mem_attrs_1 (XEXP (x, i), dstref, srcref, - dstreg, srcreg); - else if (*format_ptr == 'E') - for (j = XVECLEN (x, i) - 1; j >= 0; j--) - ix86_set_move_mem_attrs_1 (XVECEXP (x, i, j), dstref, srcref, - dstreg, srcreg); - } -} /* Compute the alignment given to a constant that is being placed in memory. EXP is the constant and ALIGN is the alignment that the object would @@ -12481,9 +12398,7 @@ ix86_set_move_mem_attrs_1 (x, dstref, srcref, dstreg, srcreg) the object. */ int -ix86_constant_alignment (exp, align) - tree exp; - int align; +ix86_constant_alignment (tree exp, int align) { if (TREE_CODE (exp) == REAL_CST) { @@ -12492,9 +12407,9 @@ ix86_constant_alignment (exp, align) else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128) return 128; } - else if (TREE_CODE (exp) == STRING_CST && TREE_STRING_LENGTH (exp) >= 31 - && align < 256) - return 256; + else if (!optimize_size && TREE_CODE (exp) == STRING_CST + && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD) + return BITS_PER_WORD; return align; } @@ -12505,9 +12420,7 @@ ix86_constant_alignment (exp, align) instead of that alignment to align the object. */ int -ix86_data_alignment (type, align) - tree type; - int align; +ix86_data_alignment (tree type, int align) { if (AGGREGATE_TYPE_P (type) && TYPE_SIZE (type) @@ -12571,9 +12484,7 @@ ix86_data_alignment (type, align) instead of that alignment to align the object. */ int -ix86_local_alignment (type, align) - tree type; - int align; +ix86_local_alignment (tree type, int align) { /* x86-64 ABI requires arrays greater than 16 bytes to be aligned to 16byte boundary. */ @@ -12626,8 +12537,7 @@ ix86_local_alignment (type, align) FNADDR is an RTX for the address of the function's pure code. CXT is an RTX for the static chain value for the function. */ void -x86_initialize_trampoline (tramp, fnaddr, cxt) - rtx tramp, fnaddr, cxt; +x86_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt) { if (!TARGET_64BIT) { @@ -12682,7 +12592,7 @@ x86_initialize_trampoline (tramp, fnaddr, cxt) } #ifdef TRANSFER_FROM_TRAMPOLINE - emit_library_call (gen_rtx (SYMBOL_REF, Pmode, "__enable_execute_stack"), + emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"), LCT_NORMAL, VOIDmode, 1, tramp, Pmode); #endif } @@ -12705,25 +12615,20 @@ struct builtin_description const unsigned int flag; }; -/* Used for builtins that are enabled both by -msse and -msse2. */ -#define MASK_SSE1 (MASK_SSE | MASK_SSE2) -#define MASK_SSE164 (MASK_SSE | MASK_SSE2 | MASK_64BIT) -#define MASK_SSE264 (MASK_SSE2 | MASK_64BIT) - static const struct builtin_description bdesc_comi[] = { - { MASK_SSE1, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 }, - { MASK_SSE1, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 }, - { MASK_SSE1, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 }, - { MASK_SSE1, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 }, - { MASK_SSE1, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 }, - { MASK_SSE1, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 }, - { MASK_SSE1, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 }, - { MASK_SSE1, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 }, - { MASK_SSE1, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 }, - { MASK_SSE1, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 }, - { MASK_SSE1, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 }, - { MASK_SSE1, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 }, + { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 }, + { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 }, + { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 }, + { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 }, + { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 }, + { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 }, + { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 }, + { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 }, + { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 }, + { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 }, + { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 }, + { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 }, { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 }, { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 }, { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 }, @@ -12741,51 +12646,51 @@ static const struct builtin_description bdesc_comi[] = static const struct builtin_description bdesc_2arg[] = { /* SSE */ - { MASK_SSE1, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, 0, 0 }, - { MASK_SSE1, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, 0, 0 }, - { MASK_SSE1, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, 0, 0 }, - { MASK_SSE1, CODE_FOR_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, 0, 0 }, - { MASK_SSE1, CODE_FOR_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, 0, 0 }, - { MASK_SSE1, CODE_FOR_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, 0, 0 }, - { MASK_SSE1, CODE_FOR_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, 0, 0 }, - { MASK_SSE1, CODE_FOR_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, 0, 0 }, - - { MASK_SSE1, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, 0 }, - { MASK_SSE1, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, 0 }, - { MASK_SSE1, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, 0 }, - { MASK_SSE1, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT, 1 }, - { MASK_SSE1, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE, 1 }, - { MASK_SSE1, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, 0 }, - { MASK_SSE1, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, EQ, 0 }, - { MASK_SSE1, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, LT, 0 }, - { MASK_SSE1, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, LE, 0 }, - { MASK_SSE1, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, LT, 1 }, - { MASK_SSE1, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, LE, 1 }, - { MASK_SSE1, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, UNORDERED, 0 }, - { MASK_SSE1, CODE_FOR_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, 0 }, - { MASK_SSE1, CODE_FOR_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, 0 }, - { MASK_SSE1, CODE_FOR_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, 0 }, - { MASK_SSE1, CODE_FOR_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, 0 }, - { MASK_SSE1, CODE_FOR_vmmaskncmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, EQ, 0 }, - { MASK_SSE1, CODE_FOR_vmmaskncmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, LT, 0 }, - { MASK_SSE1, CODE_FOR_vmmaskncmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, LE, 0 }, - { MASK_SSE1, CODE_FOR_vmmaskncmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, UNORDERED, 0 }, - - { MASK_SSE1, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, 0, 0 }, - { MASK_SSE1, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, 0, 0 }, - { MASK_SSE1, CODE_FOR_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, 0, 0 }, - { MASK_SSE1, CODE_FOR_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, 0, 0 }, - - { MASK_SSE1, CODE_FOR_sse_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, 0, 0 }, - { MASK_SSE1, CODE_FOR_sse_nandv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, 0, 0 }, - { MASK_SSE1, CODE_FOR_sse_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, 0, 0 }, - { MASK_SSE1, CODE_FOR_sse_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, 0, 0 }, - - { MASK_SSE1, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, 0, 0 }, - { MASK_SSE1, CODE_FOR_sse_movhlps, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, 0, 0 }, - { MASK_SSE1, CODE_FOR_sse_movlhps, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, 0, 0 }, - { MASK_SSE1, CODE_FOR_sse_unpckhps, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, 0, 0 }, - { MASK_SSE1, CODE_FOR_sse_unpcklps, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, 0, 0 }, + { MASK_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, 0, 0 }, + { MASK_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, 0, 0 }, + { MASK_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, 0, 0 }, + { MASK_SSE, CODE_FOR_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, 0, 0 }, + { MASK_SSE, CODE_FOR_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, 0, 0 }, + { MASK_SSE, CODE_FOR_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, 0, 0 }, + { MASK_SSE, CODE_FOR_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, 0, 0 }, + { MASK_SSE, CODE_FOR_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, 0, 0 }, + + { MASK_SSE, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, 0 }, + { MASK_SSE, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, 0 }, + { MASK_SSE, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, 0 }, + { MASK_SSE, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT, 1 }, + { MASK_SSE, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE, 1 }, + { MASK_SSE, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, 0 }, + { MASK_SSE, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, EQ, 0 }, + { MASK_SSE, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, LT, 0 }, + { MASK_SSE, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, LE, 0 }, + { MASK_SSE, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, LT, 1 }, + { MASK_SSE, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, LE, 1 }, + { MASK_SSE, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, UNORDERED, 0 }, + { MASK_SSE, CODE_FOR_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, 0 }, + { MASK_SSE, CODE_FOR_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, 0 }, + { MASK_SSE, CODE_FOR_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, 0 }, + { MASK_SSE, CODE_FOR_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, 0 }, + { MASK_SSE, CODE_FOR_vmmaskncmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, EQ, 0 }, + { MASK_SSE, CODE_FOR_vmmaskncmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, LT, 0 }, + { MASK_SSE, CODE_FOR_vmmaskncmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, LE, 0 }, + { MASK_SSE, CODE_FOR_vmmaskncmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, UNORDERED, 0 }, + + { MASK_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, 0, 0 }, + { MASK_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, 0, 0 }, + { MASK_SSE, CODE_FOR_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, 0, 0 }, + { MASK_SSE, CODE_FOR_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, 0, 0 }, + + { MASK_SSE, CODE_FOR_sse_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, 0, 0 }, + { MASK_SSE, CODE_FOR_sse_nandv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, 0, 0 }, + { MASK_SSE, CODE_FOR_sse_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, 0, 0 }, + { MASK_SSE, CODE_FOR_sse_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, 0, 0 }, + + { MASK_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, 0, 0 }, + { MASK_SSE, CODE_FOR_sse_movhlps, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, 0, 0 }, + { MASK_SSE, CODE_FOR_sse_movlhps, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, 0, 0 }, + { MASK_SSE, CODE_FOR_sse_unpckhps, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, 0, 0 }, + { MASK_SSE, CODE_FOR_sse_unpcklps, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, 0, 0 }, /* MMX */ { MASK_MMX, CODE_FOR_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, 0, 0 }, @@ -12808,15 +12713,15 @@ static const struct builtin_description bdesc_2arg[] = { MASK_MMX, CODE_FOR_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, 0, 0 }, { MASK_MMX, CODE_FOR_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, 0, 0 }, - { MASK_SSE1 | MASK_3DNOW_A, CODE_FOR_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, 0, 0 }, + { MASK_SSE | MASK_3DNOW_A, CODE_FOR_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, 0, 0 }, { MASK_MMX, CODE_FOR_mmx_anddi3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, 0, 0 }, { MASK_MMX, CODE_FOR_mmx_nanddi3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, 0, 0 }, { MASK_MMX, CODE_FOR_mmx_iordi3, "__builtin_ia32_por", IX86_BUILTIN_POR, 0, 0 }, { MASK_MMX, CODE_FOR_mmx_xordi3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, 0, 0 }, - { MASK_SSE1 | MASK_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, 0, 0 }, - { MASK_SSE1 | MASK_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, 0, 0 }, + { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, 0, 0 }, + { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, 0, 0 }, { MASK_MMX, CODE_FOR_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, 0, 0 }, { MASK_MMX, CODE_FOR_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, 0, 0 }, @@ -12825,10 +12730,10 @@ static const struct builtin_description bdesc_2arg[] = { MASK_MMX, CODE_FOR_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, 0, 0 }, { MASK_MMX, CODE_FOR_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, 0, 0 }, - { MASK_SSE1 | MASK_3DNOW_A, CODE_FOR_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, 0, 0 }, - { MASK_SSE1 | MASK_3DNOW_A, CODE_FOR_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, 0, 0 }, - { MASK_SSE1 | MASK_3DNOW_A, CODE_FOR_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, 0, 0 }, - { MASK_SSE1 | MASK_3DNOW_A, CODE_FOR_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, 0, 0 }, + { MASK_SSE | MASK_3DNOW_A, CODE_FOR_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, 0, 0 }, + { MASK_SSE | MASK_3DNOW_A, CODE_FOR_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, 0, 0 }, + { MASK_SSE | MASK_3DNOW_A, CODE_FOR_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, 0, 0 }, + { MASK_SSE | MASK_3DNOW_A, CODE_FOR_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, 0, 0 }, { MASK_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, 0, 0 }, { MASK_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, 0, 0 }, @@ -12842,9 +12747,9 @@ static const struct builtin_description bdesc_2arg[] = { MASK_MMX, CODE_FOR_mmx_packssdw, 0, IX86_BUILTIN_PACKSSDW, 0, 0 }, { MASK_MMX, CODE_FOR_mmx_packuswb, 0, IX86_BUILTIN_PACKUSWB, 0, 0 }, - { MASK_SSE1, CODE_FOR_cvtpi2ps, 0, IX86_BUILTIN_CVTPI2PS, 0, 0 }, - { MASK_SSE1, CODE_FOR_cvtsi2ss, 0, IX86_BUILTIN_CVTSI2SS, 0, 0 }, - { MASK_SSE164, CODE_FOR_cvtsi2ssq, 0, IX86_BUILTIN_CVTSI642SS, 0, 0 }, + { MASK_SSE, CODE_FOR_cvtpi2ps, 0, IX86_BUILTIN_CVTPI2PS, 0, 0 }, + { MASK_SSE, CODE_FOR_cvtsi2ss, 0, IX86_BUILTIN_CVTSI2SS, 0, 0 }, + { MASK_SSE | MASK_64BIT, CODE_FOR_cvtsi2ssq, 0, IX86_BUILTIN_CVTSI642SS, 0, 0 }, { MASK_MMX, CODE_FOR_ashlv4hi3, 0, IX86_BUILTIN_PSLLW, 0, 0 }, { MASK_MMX, CODE_FOR_ashlv4hi3, 0, IX86_BUILTIN_PSLLWI, 0, 0 }, @@ -12865,7 +12770,7 @@ static const struct builtin_description bdesc_2arg[] = { MASK_MMX, CODE_FOR_ashrv2si3, 0, IX86_BUILTIN_PSRAD, 0, 0 }, { MASK_MMX, CODE_FOR_ashrv2si3, 0, IX86_BUILTIN_PSRADI, 0, 0 }, - { MASK_SSE1 | MASK_3DNOW_A, CODE_FOR_mmx_psadbw, 0, IX86_BUILTIN_PSADBW, 0, 0 }, + { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_psadbw, 0, IX86_BUILTIN_PSADBW, 0, 0 }, { MASK_MMX, CODE_FOR_mmx_pmaddwd, 0, IX86_BUILTIN_PMADDWD, 0, 0 }, /* SSE2 */ @@ -12995,26 +12900,34 @@ static const struct builtin_description bdesc_2arg[] = { MASK_SSE2, CODE_FOR_sse2_pmaddwd, 0, IX86_BUILTIN_PMADDWD128, 0, 0 }, { MASK_SSE2, CODE_FOR_cvtsi2sd, 0, IX86_BUILTIN_CVTSI2SD, 0, 0 }, - { MASK_SSE264, CODE_FOR_cvtsi2sdq, 0, IX86_BUILTIN_CVTSI642SD, 0, 0 }, + { MASK_SSE2 | MASK_64BIT, CODE_FOR_cvtsi2sdq, 0, IX86_BUILTIN_CVTSI642SD, 0, 0 }, { MASK_SSE2, CODE_FOR_cvtsd2ss, 0, IX86_BUILTIN_CVTSD2SS, 0, 0 }, - { MASK_SSE2, CODE_FOR_cvtss2sd, 0, IX86_BUILTIN_CVTSS2SD, 0, 0 } + { MASK_SSE2, CODE_FOR_cvtss2sd, 0, IX86_BUILTIN_CVTSS2SD, 0, 0 }, + + /* SSE3 MMX */ + { MASK_SSE3, CODE_FOR_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, 0, 0 }, + { MASK_SSE3, CODE_FOR_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, 0, 0 }, + { MASK_SSE3, CODE_FOR_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, 0, 0 }, + { MASK_SSE3, CODE_FOR_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, 0, 0 }, + { MASK_SSE3, CODE_FOR_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, 0, 0 }, + { MASK_SSE3, CODE_FOR_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, 0, 0 } }; static const struct builtin_description bdesc_1arg[] = { - { MASK_SSE1 | MASK_3DNOW_A, CODE_FOR_mmx_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB, 0, 0 }, - { MASK_SSE1, CODE_FOR_sse_movmskps, 0, IX86_BUILTIN_MOVMSKPS, 0, 0 }, + { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB, 0, 0 }, + { MASK_SSE, CODE_FOR_sse_movmskps, 0, IX86_BUILTIN_MOVMSKPS, 0, 0 }, - { MASK_SSE1, CODE_FOR_sqrtv4sf2, 0, IX86_BUILTIN_SQRTPS, 0, 0 }, - { MASK_SSE1, CODE_FOR_rsqrtv4sf2, 0, IX86_BUILTIN_RSQRTPS, 0, 0 }, - { MASK_SSE1, CODE_FOR_rcpv4sf2, 0, IX86_BUILTIN_RCPPS, 0, 0 }, + { MASK_SSE, CODE_FOR_sqrtv4sf2, 0, IX86_BUILTIN_SQRTPS, 0, 0 }, + { MASK_SSE, CODE_FOR_rsqrtv4sf2, 0, IX86_BUILTIN_RSQRTPS, 0, 0 }, + { MASK_SSE, CODE_FOR_rcpv4sf2, 0, IX86_BUILTIN_RCPPS, 0, 0 }, - { MASK_SSE1, CODE_FOR_cvtps2pi, 0, IX86_BUILTIN_CVTPS2PI, 0, 0 }, - { MASK_SSE1, CODE_FOR_cvtss2si, 0, IX86_BUILTIN_CVTSS2SI, 0, 0 }, - { MASK_SSE164, CODE_FOR_cvtss2siq, 0, IX86_BUILTIN_CVTSS2SI64, 0, 0 }, - { MASK_SSE1, CODE_FOR_cvttps2pi, 0, IX86_BUILTIN_CVTTPS2PI, 0, 0 }, - { MASK_SSE1, CODE_FOR_cvttss2si, 0, IX86_BUILTIN_CVTTSS2SI, 0, 0 }, - { MASK_SSE164, CODE_FOR_cvttss2siq, 0, IX86_BUILTIN_CVTTSS2SI64, 0, 0 }, + { MASK_SSE, CODE_FOR_cvtps2pi, 0, IX86_BUILTIN_CVTPS2PI, 0, 0 }, + { MASK_SSE, CODE_FOR_cvtss2si, 0, IX86_BUILTIN_CVTSS2SI, 0, 0 }, + { MASK_SSE | MASK_64BIT, CODE_FOR_cvtss2siq, 0, IX86_BUILTIN_CVTSS2SI64, 0, 0 }, + { MASK_SSE, CODE_FOR_cvttps2pi, 0, IX86_BUILTIN_CVTTPS2PI, 0, 0 }, + { MASK_SSE, CODE_FOR_cvttss2si, 0, IX86_BUILTIN_CVTTSS2SI, 0, 0 }, + { MASK_SSE | MASK_64BIT, CODE_FOR_cvttss2siq, 0, IX86_BUILTIN_CVTTSS2SI64, 0, 0 }, { MASK_SSE2, CODE_FOR_sse2_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB128, 0, 0 }, { MASK_SSE2, CODE_FOR_sse2_movmskpd, 0, IX86_BUILTIN_MOVMSKPD, 0, 0 }, @@ -13036,18 +12949,23 @@ static const struct builtin_description bdesc_1arg[] = { MASK_SSE2, CODE_FOR_cvtsd2si, 0, IX86_BUILTIN_CVTSD2SI, 0, 0 }, { MASK_SSE2, CODE_FOR_cvttsd2si, 0, IX86_BUILTIN_CVTTSD2SI, 0, 0 }, - { MASK_SSE264, CODE_FOR_cvtsd2siq, 0, IX86_BUILTIN_CVTSD2SI64, 0, 0 }, - { MASK_SSE264, CODE_FOR_cvttsd2siq, 0, IX86_BUILTIN_CVTTSD2SI64, 0, 0 }, + { MASK_SSE2 | MASK_64BIT, CODE_FOR_cvtsd2siq, 0, IX86_BUILTIN_CVTSD2SI64, 0, 0 }, + { MASK_SSE2 | MASK_64BIT, CODE_FOR_cvttsd2siq, 0, IX86_BUILTIN_CVTTSD2SI64, 0, 0 }, { MASK_SSE2, CODE_FOR_cvtps2dq, 0, IX86_BUILTIN_CVTPS2DQ, 0, 0 }, { MASK_SSE2, CODE_FOR_cvtps2pd, 0, IX86_BUILTIN_CVTPS2PD, 0, 0 }, { MASK_SSE2, CODE_FOR_cvttps2dq, 0, IX86_BUILTIN_CVTTPS2DQ, 0, 0 }, - { MASK_SSE2, CODE_FOR_sse2_movq, 0, IX86_BUILTIN_MOVQ, 0, 0 } + { MASK_SSE2, CODE_FOR_sse2_movq, 0, IX86_BUILTIN_MOVQ, 0, 0 }, + + /* SSE3 */ + { MASK_SSE3, CODE_FOR_movshdup, 0, IX86_BUILTIN_MOVSHDUP, 0, 0 }, + { MASK_SSE3, CODE_FOR_movsldup, 0, IX86_BUILTIN_MOVSLDUP, 0, 0 }, + { MASK_SSE3, CODE_FOR_movddup, 0, IX86_BUILTIN_MOVDDUP, 0, 0 } }; void -ix86_init_builtins () +ix86_init_builtins (void) { if (TARGET_MMX) ix86_init_mmx_sse_builtins (); @@ -13057,11 +12975,22 @@ ix86_init_builtins () is zero. Otherwise, if TARGET_SSE is not set, only expand the MMX builtins. */ static void -ix86_init_mmx_sse_builtins () +ix86_init_mmx_sse_builtins (void) { const struct builtin_description * d; size_t i; + tree V16QI_type_node = build_vector_type_for_mode (intQI_type_node, V16QImode); + tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode); + tree V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode); + tree V2DI_type_node = build_vector_type_for_mode (intDI_type_node, V2DImode); + tree V2DF_type_node = build_vector_type_for_mode (double_type_node, V2DFmode); + tree V4SF_type_node = build_vector_type_for_mode (float_type_node, V4SFmode); + tree V4SI_type_node = build_vector_type_for_mode (intSI_type_node, V4SImode); + tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode); + tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode); + tree V8HI_type_node = build_vector_type_for_mode (intHI_type_node, V8HImode); + tree pchar_type_node = build_pointer_type (char_type_node); tree pcchar_type_node = build_pointer_type ( build_type_variant (char_type_node, 1, 0)); @@ -13134,6 +13063,13 @@ ix86_init_mmx_sse_builtins () = build_function_type (void_type_node, void_list_node); tree void_ftype_unsigned = build_function_type_list (void_type_node, unsigned_type_node, NULL_TREE); + tree void_ftype_unsigned_unsigned + = build_function_type_list (void_type_node, unsigned_type_node, + unsigned_type_node, NULL_TREE); + tree void_ftype_pcvoid_unsigned_unsigned + = build_function_type_list (void_type_node, const_ptr_type_node, + unsigned_type_node, unsigned_type_node, + NULL_TREE); tree unsigned_ftype_void = build_function_type (unsigned_type_node, void_list_node); tree di_ftype_void @@ -13247,7 +13183,7 @@ ix86_init_mmx_sse_builtins () = build_function_type_list (integer_type_node, V2DF_type_node, NULL_TREE); tree int64_ftype_v2df = build_function_type_list (long_long_integer_type_node, - V2DF_type_node, NULL_TREE); + V2DF_type_node, NULL_TREE); tree v2df_ftype_v2df_int = build_function_type_list (V2DF_type_node, V2DF_type_node, integer_type_node, NULL_TREE); @@ -13355,6 +13291,27 @@ ix86_init_mmx_sse_builtins () tree v2di_ftype_v2di = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE); + tree float80_type; + tree float128_type; + + /* The __float80 type. */ + if (TYPE_MODE (long_double_type_node) == XFmode) + (*lang_hooks.types.register_builtin_type) (long_double_type_node, + "__float80"); + else + { + /* The __float80 type. */ + float80_type = make_node (REAL_TYPE); + TYPE_PRECISION (float80_type) = 96; + layout_type (float80_type); + (*lang_hooks.types.register_builtin_type) (float80_type, "__float80"); + } + + float128_type = make_node (REAL_TYPE); + TYPE_PRECISION (float128_type) = 128; + layout_type (float128_type); + (*lang_hooks.types.register_builtin_type) (float128_type, "__float128"); + /* Add all builtins that are more or less simple operations on two operands. */ for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++) @@ -13452,52 +13409,52 @@ ix86_init_mmx_sse_builtins () def_builtin (MASK_MMX, "__builtin_ia32_packssdw", v4hi_ftype_v2si_v2si, IX86_BUILTIN_PACKSSDW); def_builtin (MASK_MMX, "__builtin_ia32_packuswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKUSWB); - def_builtin (MASK_SSE1, "__builtin_ia32_ldmxcsr", void_ftype_unsigned, IX86_BUILTIN_LDMXCSR); - def_builtin (MASK_SSE1, "__builtin_ia32_stmxcsr", unsigned_ftype_void, IX86_BUILTIN_STMXCSR); - def_builtin (MASK_SSE1, "__builtin_ia32_cvtpi2ps", v4sf_ftype_v4sf_v2si, IX86_BUILTIN_CVTPI2PS); - def_builtin (MASK_SSE1, "__builtin_ia32_cvtps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTPS2PI); - def_builtin (MASK_SSE1, "__builtin_ia32_cvtsi2ss", v4sf_ftype_v4sf_int, IX86_BUILTIN_CVTSI2SS); - def_builtin (MASK_SSE164, "__builtin_ia32_cvtsi642ss", v4sf_ftype_v4sf_int64, IX86_BUILTIN_CVTSI642SS); - def_builtin (MASK_SSE1, "__builtin_ia32_cvtss2si", int_ftype_v4sf, IX86_BUILTIN_CVTSS2SI); - def_builtin (MASK_SSE164, "__builtin_ia32_cvtss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTSS2SI64); - def_builtin (MASK_SSE1, "__builtin_ia32_cvttps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTTPS2PI); - def_builtin (MASK_SSE1, "__builtin_ia32_cvttss2si", int_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI); - def_builtin (MASK_SSE164, "__builtin_ia32_cvttss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI64); - - def_builtin (MASK_SSE1 | MASK_3DNOW_A, "__builtin_ia32_pextrw", int_ftype_v4hi_int, IX86_BUILTIN_PEXTRW); - def_builtin (MASK_SSE1 | MASK_3DNOW_A, "__builtin_ia32_pinsrw", v4hi_ftype_v4hi_int_int, IX86_BUILTIN_PINSRW); - - def_builtin (MASK_SSE1 | MASK_3DNOW_A, "__builtin_ia32_maskmovq", void_ftype_v8qi_v8qi_pchar, IX86_BUILTIN_MASKMOVQ); - - def_builtin (MASK_SSE1, "__builtin_ia32_loadaps", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADAPS); - def_builtin (MASK_SSE1, "__builtin_ia32_loadups", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADUPS); - def_builtin (MASK_SSE1, "__builtin_ia32_loadss", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADSS); - def_builtin (MASK_SSE1, "__builtin_ia32_storeaps", void_ftype_pfloat_v4sf, IX86_BUILTIN_STOREAPS); - def_builtin (MASK_SSE1, "__builtin_ia32_storeups", void_ftype_pfloat_v4sf, IX86_BUILTIN_STOREUPS); - def_builtin (MASK_SSE1, "__builtin_ia32_storess", void_ftype_pfloat_v4sf, IX86_BUILTIN_STORESS); - - def_builtin (MASK_SSE1, "__builtin_ia32_loadhps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADHPS); - def_builtin (MASK_SSE1, "__builtin_ia32_loadlps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADLPS); - def_builtin (MASK_SSE1, "__builtin_ia32_storehps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STOREHPS); - def_builtin (MASK_SSE1, "__builtin_ia32_storelps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STORELPS); - - def_builtin (MASK_SSE1, "__builtin_ia32_movmskps", int_ftype_v4sf, IX86_BUILTIN_MOVMSKPS); - def_builtin (MASK_SSE1 | MASK_3DNOW_A, "__builtin_ia32_pmovmskb", int_ftype_v8qi, IX86_BUILTIN_PMOVMSKB); - def_builtin (MASK_SSE1, "__builtin_ia32_movntps", void_ftype_pfloat_v4sf, IX86_BUILTIN_MOVNTPS); - def_builtin (MASK_SSE1 | MASK_3DNOW_A, "__builtin_ia32_movntq", void_ftype_pdi_di, IX86_BUILTIN_MOVNTQ); - - def_builtin (MASK_SSE1 | MASK_3DNOW_A, "__builtin_ia32_sfence", void_ftype_void, IX86_BUILTIN_SFENCE); - - def_builtin (MASK_SSE1 | MASK_3DNOW_A, "__builtin_ia32_psadbw", di_ftype_v8qi_v8qi, IX86_BUILTIN_PSADBW); - - def_builtin (MASK_SSE1, "__builtin_ia32_rcpps", v4sf_ftype_v4sf, IX86_BUILTIN_RCPPS); - def_builtin (MASK_SSE1, "__builtin_ia32_rcpss", v4sf_ftype_v4sf, IX86_BUILTIN_RCPSS); - def_builtin (MASK_SSE1, "__builtin_ia32_rsqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTPS); - def_builtin (MASK_SSE1, "__builtin_ia32_rsqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTSS); - def_builtin (MASK_SSE1, "__builtin_ia32_sqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTPS); - def_builtin (MASK_SSE1, "__builtin_ia32_sqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTSS); - - def_builtin (MASK_SSE1, "__builtin_ia32_shufps", v4sf_ftype_v4sf_v4sf_int, IX86_BUILTIN_SHUFPS); + def_builtin (MASK_SSE, "__builtin_ia32_ldmxcsr", void_ftype_unsigned, IX86_BUILTIN_LDMXCSR); + def_builtin (MASK_SSE, "__builtin_ia32_stmxcsr", unsigned_ftype_void, IX86_BUILTIN_STMXCSR); + def_builtin (MASK_SSE, "__builtin_ia32_cvtpi2ps", v4sf_ftype_v4sf_v2si, IX86_BUILTIN_CVTPI2PS); + def_builtin (MASK_SSE, "__builtin_ia32_cvtps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTPS2PI); + def_builtin (MASK_SSE, "__builtin_ia32_cvtsi2ss", v4sf_ftype_v4sf_int, IX86_BUILTIN_CVTSI2SS); + def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtsi642ss", v4sf_ftype_v4sf_int64, IX86_BUILTIN_CVTSI642SS); + def_builtin (MASK_SSE, "__builtin_ia32_cvtss2si", int_ftype_v4sf, IX86_BUILTIN_CVTSS2SI); + def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTSS2SI64); + def_builtin (MASK_SSE, "__builtin_ia32_cvttps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTTPS2PI); + def_builtin (MASK_SSE, "__builtin_ia32_cvttss2si", int_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI); + def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvttss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI64); + + def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pextrw", int_ftype_v4hi_int, IX86_BUILTIN_PEXTRW); + def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pinsrw", v4hi_ftype_v4hi_int_int, IX86_BUILTIN_PINSRW); + + def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_maskmovq", void_ftype_v8qi_v8qi_pchar, IX86_BUILTIN_MASKMOVQ); + + def_builtin (MASK_SSE, "__builtin_ia32_loadaps", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADAPS); + def_builtin (MASK_SSE, "__builtin_ia32_loadups", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADUPS); + def_builtin (MASK_SSE, "__builtin_ia32_loadss", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADSS); + def_builtin (MASK_SSE, "__builtin_ia32_storeaps", void_ftype_pfloat_v4sf, IX86_BUILTIN_STOREAPS); + def_builtin (MASK_SSE, "__builtin_ia32_storeups", void_ftype_pfloat_v4sf, IX86_BUILTIN_STOREUPS); + def_builtin (MASK_SSE, "__builtin_ia32_storess", void_ftype_pfloat_v4sf, IX86_BUILTIN_STORESS); + + def_builtin (MASK_SSE, "__builtin_ia32_loadhps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADHPS); + def_builtin (MASK_SSE, "__builtin_ia32_loadlps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADLPS); + def_builtin (MASK_SSE, "__builtin_ia32_storehps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STOREHPS); + def_builtin (MASK_SSE, "__builtin_ia32_storelps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STORELPS); + + def_builtin (MASK_SSE, "__builtin_ia32_movmskps", int_ftype_v4sf, IX86_BUILTIN_MOVMSKPS); + def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pmovmskb", int_ftype_v8qi, IX86_BUILTIN_PMOVMSKB); + def_builtin (MASK_SSE, "__builtin_ia32_movntps", void_ftype_pfloat_v4sf, IX86_BUILTIN_MOVNTPS); + def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_movntq", void_ftype_pdi_di, IX86_BUILTIN_MOVNTQ); + + def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_sfence", void_ftype_void, IX86_BUILTIN_SFENCE); + + def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_psadbw", di_ftype_v8qi_v8qi, IX86_BUILTIN_PSADBW); + + def_builtin (MASK_SSE, "__builtin_ia32_rcpps", v4sf_ftype_v4sf, IX86_BUILTIN_RCPPS); + def_builtin (MASK_SSE, "__builtin_ia32_rcpss", v4sf_ftype_v4sf, IX86_BUILTIN_RCPSS); + def_builtin (MASK_SSE, "__builtin_ia32_rsqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTPS); + def_builtin (MASK_SSE, "__builtin_ia32_rsqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTSS); + def_builtin (MASK_SSE, "__builtin_ia32_sqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTPS); + def_builtin (MASK_SSE, "__builtin_ia32_sqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTSS); + + def_builtin (MASK_SSE, "__builtin_ia32_shufps", v4sf_ftype_v4sf_v4sf_int, IX86_BUILTIN_SHUFPS); /* Original 3DNow! */ def_builtin (MASK_3DNOW, "__builtin_ia32_femms", void_ftype_void, IX86_BUILTIN_FEMMS); @@ -13529,7 +13486,7 @@ ix86_init_mmx_sse_builtins () def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsf", v2sf_ftype_v2sf, IX86_BUILTIN_PSWAPDSF); def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsi", v2si_ftype_v2si, IX86_BUILTIN_PSWAPDSI); - def_builtin (MASK_SSE1, "__builtin_ia32_setzerops", v4sf_ftype_void, IX86_BUILTIN_SSE_ZERO); + def_builtin (MASK_SSE, "__builtin_ia32_setzerops", v4sf_ftype_void, IX86_BUILTIN_SSE_ZERO); /* SSE2 */ def_builtin (MASK_SSE2, "__builtin_ia32_pextrw128", int_ftype_v8hi_int, IX86_BUILTIN_PEXTRW128); @@ -13580,15 +13537,15 @@ ix86_init_mmx_sse_builtins () def_builtin (MASK_SSE2, "__builtin_ia32_cvtsd2si", int_ftype_v2df, IX86_BUILTIN_CVTSD2SI); def_builtin (MASK_SSE2, "__builtin_ia32_cvttsd2si", int_ftype_v2df, IX86_BUILTIN_CVTTSD2SI); - def_builtin (MASK_SSE264, "__builtin_ia32_cvtsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTSD2SI64); - def_builtin (MASK_SSE264, "__builtin_ia32_cvttsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTTSD2SI64); + def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTSD2SI64); + def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvttsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTTSD2SI64); def_builtin (MASK_SSE2, "__builtin_ia32_cvtps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTPS2DQ); def_builtin (MASK_SSE2, "__builtin_ia32_cvtps2pd", v2df_ftype_v4sf, IX86_BUILTIN_CVTPS2PD); def_builtin (MASK_SSE2, "__builtin_ia32_cvttps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTTPS2DQ); def_builtin (MASK_SSE2, "__builtin_ia32_cvtsi2sd", v2df_ftype_v2df_int, IX86_BUILTIN_CVTSI2SD); - def_builtin (MASK_SSE264, "__builtin_ia32_cvtsi642sd", v2df_ftype_v2df_int64, IX86_BUILTIN_CVTSI642SD); + def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsi642sd", v2df_ftype_v2df_int64, IX86_BUILTIN_CVTSI642SD); def_builtin (MASK_SSE2, "__builtin_ia32_cvtsd2ss", v4sf_ftype_v4sf_v2df, IX86_BUILTIN_CVTSD2SS); def_builtin (MASK_SSE2, "__builtin_ia32_cvtss2sd", v2df_ftype_v2df_v4sf, IX86_BUILTIN_CVTSS2SD); @@ -13612,7 +13569,7 @@ ix86_init_mmx_sse_builtins () def_builtin (MASK_SSE2, "__builtin_ia32_stored", void_ftype_pcint_v4si, IX86_BUILTIN_STORED); def_builtin (MASK_SSE2, "__builtin_ia32_movq", v2di_ftype_v2di, IX86_BUILTIN_MOVQ); - def_builtin (MASK_SSE1, "__builtin_ia32_setzero128", v2di_ftype_void, IX86_BUILTIN_CLRTI); + def_builtin (MASK_SSE, "__builtin_ia32_setzero128", v2di_ftype_void, IX86_BUILTIN_CLRTI); def_builtin (MASK_SSE2, "__builtin_ia32_psllw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSLLW128); def_builtin (MASK_SSE2, "__builtin_ia32_pslld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSLLD128); @@ -13639,15 +13596,33 @@ ix86_init_mmx_sse_builtins () def_builtin (MASK_SSE2, "__builtin_ia32_psradi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRADI128); def_builtin (MASK_SSE2, "__builtin_ia32_pmaddwd128", v4si_ftype_v8hi_v8hi, IX86_BUILTIN_PMADDWD128); + + /* Prescott New Instructions. */ + def_builtin (MASK_SSE3, "__builtin_ia32_monitor", + void_ftype_pcvoid_unsigned_unsigned, + IX86_BUILTIN_MONITOR); + def_builtin (MASK_SSE3, "__builtin_ia32_mwait", + void_ftype_unsigned_unsigned, + IX86_BUILTIN_MWAIT); + def_builtin (MASK_SSE3, "__builtin_ia32_movshdup", + v4sf_ftype_v4sf, + IX86_BUILTIN_MOVSHDUP); + def_builtin (MASK_SSE3, "__builtin_ia32_movsldup", + v4sf_ftype_v4sf, + IX86_BUILTIN_MOVSLDUP); + def_builtin (MASK_SSE3, "__builtin_ia32_lddqu", + v16qi_ftype_pcchar, IX86_BUILTIN_LDDQU); + def_builtin (MASK_SSE3, "__builtin_ia32_loadddup", + v2df_ftype_pcdouble, IX86_BUILTIN_LOADDDUP); + def_builtin (MASK_SSE3, "__builtin_ia32_movddup", + v2df_ftype_v2df, IX86_BUILTIN_MOVDDUP); } /* Errors in the source file can cause expand_expr to return const0_rtx where we expect a vector. To avoid crashing, use one of the vector clear instructions. */ static rtx -safe_vector_operand (x, mode) - rtx x; - enum machine_mode mode; +safe_vector_operand (rtx x, enum machine_mode mode) { if (x != const0_rtx) return x; @@ -13666,10 +13641,7 @@ safe_vector_operand (x, mode) /* Subroutine of ix86_expand_builtin to take care of binop insns. */ static rtx -ix86_expand_binop_builtin (icode, arglist, target) - enum insn_code icode; - tree arglist; - rtx target; +ix86_expand_binop_builtin (enum insn_code icode, tree arglist, rtx target) { rtx pat; tree arg0 = TREE_VALUE (arglist); @@ -13699,7 +13671,8 @@ ix86_expand_binop_builtin (icode, arglist, target) /* In case the insn wants input operands in modes different from the result, abort. */ - if (GET_MODE (op0) != mode0 || GET_MODE (op1) != mode1) + if ((GET_MODE (op0) != mode0 && GET_MODE (op0) != VOIDmode) + || (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)) abort (); if (! (*insn_data[icode].operand[1].predicate) (op0, mode0)) @@ -13723,9 +13696,7 @@ ix86_expand_binop_builtin (icode, arglist, target) /* Subroutine of ix86_expand_builtin to take care of stores. */ static rtx -ix86_expand_store_builtin (icode, arglist) - enum insn_code icode; - tree arglist; +ix86_expand_store_builtin (enum insn_code icode, tree arglist) { rtx pat; tree arg0 = TREE_VALUE (arglist); @@ -13750,11 +13721,8 @@ ix86_expand_store_builtin (icode, arglist) /* Subroutine of ix86_expand_builtin to take care of unop insns. */ static rtx -ix86_expand_unop_builtin (icode, arglist, target, do_load) - enum insn_code icode; - tree arglist; - rtx target; - int do_load; +ix86_expand_unop_builtin (enum insn_code icode, tree arglist, + rtx target, int do_load) { rtx pat; tree arg0 = TREE_VALUE (arglist); @@ -13788,10 +13756,7 @@ ix86_expand_unop_builtin (icode, arglist, target, do_load) sqrtss, rsqrtss, rcpss. */ static rtx -ix86_expand_unop1_builtin (icode, arglist, target) - enum insn_code icode; - tree arglist; - rtx target; +ix86_expand_unop1_builtin (enum insn_code icode, tree arglist, rtx target) { rtx pat; tree arg0 = TREE_VALUE (arglist); @@ -13824,10 +13789,8 @@ ix86_expand_unop1_builtin (icode, arglist, target) /* Subroutine of ix86_expand_builtin to take care of comparison insns. */ static rtx -ix86_expand_sse_compare (d, arglist, target) - const struct builtin_description *d; - tree arglist; - rtx target; +ix86_expand_sse_compare (const struct builtin_description *d, tree arglist, + rtx target) { rtx pat; tree arg0 = TREE_VALUE (arglist); @@ -13876,10 +13839,8 @@ ix86_expand_sse_compare (d, arglist, target) /* Subroutine of ix86_expand_builtin to take care of comi insns. */ static rtx -ix86_expand_sse_comi (d, arglist, target) - const struct builtin_description *d; - tree arglist; - rtx target; +ix86_expand_sse_comi (const struct builtin_description *d, tree arglist, + rtx target) { rtx pat; tree arg0 = TREE_VALUE (arglist); @@ -13935,12 +13896,9 @@ ix86_expand_sse_comi (d, arglist, target) IGNORE is nonzero if the value is to be ignored. */ rtx -ix86_expand_builtin (exp, target, subtarget, mode, ignore) - tree exp; - rtx target; - rtx subtarget ATTRIBUTE_UNUSED; - enum machine_mode mode ATTRIBUTE_UNUSED; - int ignore ATTRIBUTE_UNUSED; +ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED, + enum machine_mode mode ATTRIBUTE_UNUSED, + int ignore ATTRIBUTE_UNUSED) { const struct builtin_description *d; size_t i; @@ -13979,8 +13937,8 @@ ix86_expand_builtin (exp, target, subtarget, mode, ignore) op0 = copy_to_mode_reg (mode0, op0); if (! (*insn_data[icode].operand[2].predicate) (op1, mode1)) { - /* @@@ better error message */ - error ("selector must be an immediate"); + error ("selector must be an integer constant in the range 0..%i", + fcode == IX86_BUILTIN_PEXTRW ? 3:7); return gen_reg_rtx (tmode); } if (target == 0 @@ -14015,8 +13973,8 @@ ix86_expand_builtin (exp, target, subtarget, mode, ignore) op1 = copy_to_mode_reg (mode1, op1); if (! (*insn_data[icode].operand[3].predicate) (op2, mode2)) { - /* @@@ better error message */ - error ("selector must be an immediate"); + error ("selector must be an integer constant in the range 0..%i", + fcode == IX86_BUILTIN_PINSRW ? 15:255); return const0_rtx; } if (target == 0 @@ -14090,7 +14048,7 @@ ix86_expand_builtin (exp, target, subtarget, mode, ignore) icode = (fcode == IX86_BUILTIN_LOADHPS ? CODE_FOR_sse_movhps : fcode == IX86_BUILTIN_LOADLPS ? CODE_FOR_sse_movlps : fcode == IX86_BUILTIN_LOADHPD ? CODE_FOR_sse2_movhpd - : CODE_FOR_sse2_movlpd); + : CODE_FOR_sse2_movsd); arg0 = TREE_VALUE (arglist); arg1 = TREE_VALUE (TREE_CHAIN (arglist)); op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); @@ -14119,7 +14077,7 @@ ix86_expand_builtin (exp, target, subtarget, mode, ignore) icode = (fcode == IX86_BUILTIN_STOREHPS ? CODE_FOR_sse_movhps : fcode == IX86_BUILTIN_STORELPS ? CODE_FOR_sse_movlps : fcode == IX86_BUILTIN_STOREHPD ? CODE_FOR_sse2_movhpd - : CODE_FOR_sse2_movlpd); + : CODE_FOR_sse2_movsd); arg0 = TREE_VALUE (arglist); arg1 = TREE_VALUE (TREE_CHAIN (arglist)); op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); @@ -14373,7 +14331,7 @@ ix86_expand_builtin (exp, target, subtarget, mode, ignore) expand_expr (arg0, NULL_RTX, VOIDmode, 0)); op0 = gen_reg_rtx (V2DFmode); emit_insn (gen_sse2_loadsd (op0, adjust_address (target, V2DFmode, 0))); - emit_insn (gen_sse2_shufpd (op0, op0, op0, GEN_INT (0))); + emit_insn (gen_sse2_shufpd (op0, op0, op0, const0_rtx)); return op0; case IX86_BUILTIN_SETPD: @@ -14391,7 +14349,7 @@ ix86_expand_builtin (exp, target, subtarget, mode, ignore) case IX86_BUILTIN_LOADRPD: target = ix86_expand_unop_builtin (CODE_FOR_sse2_movapd, arglist, gen_reg_rtx (V2DFmode), 1); - emit_insn (gen_sse2_shufpd (target, target, target, GEN_INT (1))); + emit_insn (gen_sse2_shufpd (target, target, target, const1_rtx)); return target; case IX86_BUILTIN_LOADPD1: @@ -14448,6 +14406,41 @@ ix86_expand_builtin (exp, target, subtarget, mode, ignore) case IX86_BUILTIN_STORED: return ix86_expand_store_builtin (CODE_FOR_sse2_stored, arglist); + case IX86_BUILTIN_MONITOR: + arg0 = TREE_VALUE (arglist); + arg1 = TREE_VALUE (TREE_CHAIN (arglist)); + arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))); + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); + op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0); + if (!REG_P (op0)) + op0 = copy_to_mode_reg (SImode, op0); + if (!REG_P (op1)) + op1 = copy_to_mode_reg (SImode, op1); + if (!REG_P (op2)) + op2 = copy_to_mode_reg (SImode, op2); + emit_insn (gen_monitor (op0, op1, op2)); + return 0; + + case IX86_BUILTIN_MWAIT: + arg0 = TREE_VALUE (arglist); + arg1 = TREE_VALUE (TREE_CHAIN (arglist)); + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); + if (!REG_P (op0)) + op0 = copy_to_mode_reg (SImode, op0); + if (!REG_P (op1)) + op1 = copy_to_mode_reg (SImode, op1); + emit_insn (gen_mwait (op0, op1)); + return 0; + + case IX86_BUILTIN_LOADDDUP: + return ix86_expand_unop_builtin (CODE_FOR_loadddup, arglist, target, 1); + + case IX86_BUILTIN_LDDQU: + return ix86_expand_unop_builtin (CODE_FOR_lddqu, arglist, target, + 1); + default: break; } @@ -14484,9 +14477,7 @@ ix86_expand_builtin (exp, target, subtarget, mode, ignore) /* Store OPERAND to the memory after reload is completed. This means that we can't easily use assign_stack_local. */ rtx -ix86_force_to_memory (mode, operand) - enum machine_mode mode; - rtx operand; +ix86_force_to_memory (enum machine_mode mode, rtx operand) { rtx result; if (!reload_completed) @@ -14565,8 +14556,7 @@ ix86_force_to_memory (mode, operand) /* Free operand from the memory. */ void -ix86_free_from_memory (mode) - enum machine_mode mode; +ix86_free_from_memory (enum machine_mode mode) { if (!TARGET_RED_ZONE) { @@ -14591,9 +14581,7 @@ ix86_free_from_memory (mode) Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and movdf to do mem-to-mem moves through integer regs. */ enum reg_class -ix86_preferred_reload_class (x, class) - rtx x; - enum reg_class class; +ix86_preferred_reload_class (rtx x, enum reg_class class) { if (GET_CODE (x) == CONST_VECTOR && x != CONST0_RTX (GET_MODE (x))) return NO_REGS; @@ -14637,10 +14625,8 @@ ix86_preferred_reload_class (x, class) When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not enforce these sanity checks. */ int -ix86_secondary_memory_needed (class1, class2, mode, strict) - enum reg_class class1, class2; - enum machine_mode mode; - int strict; +ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2, + enum machine_mode mode, int strict) { if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1) || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2) @@ -14667,12 +14653,11 @@ ix86_secondary_memory_needed (class1, class2, mode, strict) on some machines it is expensive to move between registers if they are not general registers. */ int -ix86_register_move_cost (mode, class1, class2) - enum machine_mode mode; - enum reg_class class1, class2; +ix86_register_move_cost (enum machine_mode mode, enum reg_class class1, + enum reg_class class2) { /* In case we require secondary memory, compute cost of the store followed - by load. In order to avoid bad register allocation choices, we need + by load. In order to avoid bad register allocation choices, we need for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */ if (ix86_secondary_memory_needed (class1, class2, mode, 0)) @@ -14683,7 +14668,7 @@ ix86_register_move_cost (mode, class1, class2) MEMORY_MOVE_COST (mode, class1, 1)); cost += MAX (MEMORY_MOVE_COST (mode, class2, 0), MEMORY_MOVE_COST (mode, class2, 1)); - + /* In case of copying from general_purpose_register we may emit multiple stores followed by single load causing memory size mismatch stall. Count this as arbitrarily high cost of 20. */ @@ -14714,9 +14699,7 @@ ix86_register_move_cost (mode, class1, class2) /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */ int -ix86_hard_regno_mode_ok (regno, mode) - int regno; - enum machine_mode mode; +ix86_hard_regno_mode_ok (int regno, enum machine_mode mode) { /* Flags and only flags can only hold CCmode values. */ if (CC_REGNO_P (regno)) @@ -14755,10 +14738,7 @@ ix86_hard_regno_mode_ok (regno, mode) Q_REGS classes. */ int -ix86_memory_move_cost (mode, class, in) - enum machine_mode mode; - enum reg_class class; - int in; +ix86_memory_move_cost (enum machine_mode mode, enum reg_class class, int in) { if (FLOAT_CLASS_P (class)) { @@ -14772,7 +14752,6 @@ ix86_memory_move_cost (mode, class, in) index = 1; break; case XFmode: - case TFmode: index = 2; break; default: @@ -14832,8 +14811,8 @@ ix86_memory_move_cost (mode, class, in) if (mode == TFmode) mode = XFmode; return ((in ? ix86_cost->int_load[2] : ix86_cost->int_store[2]) - * ((int) GET_MODE_SIZE (mode) - + UNITS_PER_WORD -1 ) / UNITS_PER_WORD); + * (((int) GET_MODE_SIZE (mode) + + UNITS_PER_WORD - 1) / UNITS_PER_WORD)); } } @@ -14842,10 +14821,7 @@ ix86_memory_move_cost (mode, class, in) scanned. In either case, *TOTAL contains the cost result. */ static bool -ix86_rtx_costs (x, code, outer_code, total) - rtx x; - int code, outer_code; - int *total; +ix86_rtx_costs (rtx x, int code, int outer_code, int *total) { enum machine_mode mode = GET_MODE (x); @@ -14960,25 +14936,54 @@ ix86_rtx_costs (x, code, outer_code, total) case MULT: if (FLOAT_MODE_P (mode)) - *total = COSTS_N_INSNS (ix86_cost->fmul); - else if (GET_CODE (XEXP (x, 1)) == CONST_INT) { - unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1)); - int nbits; - - for (nbits = 0; value != 0; value >>= 1) - nbits++; - - *total = COSTS_N_INSNS (ix86_cost->mult_init[MODE_INDEX (mode)] - + nbits * ix86_cost->mult_bit); + *total = COSTS_N_INSNS (ix86_cost->fmul); + return false; } else { - /* This is arbitrary */ - *total = COSTS_N_INSNS (ix86_cost->mult_init[MODE_INDEX (mode)] - + 7 * ix86_cost->mult_bit); + rtx op0 = XEXP (x, 0); + rtx op1 = XEXP (x, 1); + int nbits; + if (GET_CODE (XEXP (x, 1)) == CONST_INT) + { + unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1)); + for (nbits = 0; value != 0; value &= value - 1) + nbits++; + } + else + /* This is arbitrary. */ + nbits = 7; + + /* Compute costs correctly for widening multiplication. */ + if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op1) == ZERO_EXTEND) + && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2 + == GET_MODE_SIZE (mode)) + { + int is_mulwiden = 0; + enum machine_mode inner_mode = GET_MODE (op0); + + if (GET_CODE (op0) == GET_CODE (op1)) + is_mulwiden = 1, op1 = XEXP (op1, 0); + else if (GET_CODE (op1) == CONST_INT) + { + if (GET_CODE (op0) == SIGN_EXTEND) + is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode) + == INTVAL (op1); + else + is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode)); + } + + if (is_mulwiden) + op0 = XEXP (op0, 0), mode = GET_MODE (op0); + } + + *total = COSTS_N_INSNS (ix86_cost->mult_init[MODE_INDEX (mode)] + + nbits * ix86_cost->mult_bit) + + rtx_cost (op0, outer_code) + rtx_cost (op1, outer_code); + + return true; } - return false; case DIV: case UDIV: @@ -15053,7 +15058,7 @@ ix86_rtx_costs (x, code, outer_code, total) + (rtx_cost (XEXP (x, 0), outer_code) << (GET_MODE (XEXP (x, 0)) != DImode)) + (rtx_cost (XEXP (x, 1), outer_code) - << (GET_MODE (XEXP (x, 1)) != DImode))); + << (GET_MODE (XEXP (x, 1)) != DImode))); return true; } /* FALLTHRU */ @@ -15088,6 +15093,11 @@ ix86_rtx_costs (x, code, outer_code, total) *total = COSTS_N_INSNS (ix86_cost->fsqrt); return false; + case UNSPEC: + if (XINT (x, 1) == UNSPEC_TP) + *total = 0; + return false; + default: return false; } @@ -15095,9 +15105,7 @@ ix86_rtx_costs (x, code, outer_code, total) #if defined (DO_GLOBAL_CTORS_BODY) && defined (HAS_INIT_SECTION) static void -ix86_svr3_asm_out_constructor (symbol, priority) - rtx symbol; - int priority ATTRIBUTE_UNUSED; +ix86_svr3_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED) { init_section (); fputs ("\tpushl $", asm_out_file); @@ -15114,9 +15122,7 @@ static int current_machopic_label_num; definition of the stub. */ void -machopic_output_stub (file, symb, stub) - FILE *file; - const char *symb, *stub; +machopic_output_stub (FILE *file, const char *symb, const char *stub) { unsigned int length; char *binder_name, *symbol_name, lazy_ptr_name[32]; @@ -15151,9 +15157,9 @@ machopic_output_stub (file, symb, stub) } else fprintf (file, "\tjmp *%s\n", lazy_ptr_name); - + fprintf (file, "%s:\n", binder_name); - + if (MACHOPIC_PURE) { fprintf (file, "\tlea %s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label); @@ -15174,7 +15180,7 @@ machopic_output_stub (file, symb, stub) /* Order the registers for register allocator. */ void -x86_order_regs_for_local_alloc () +x86_order_regs_for_local_alloc (void) { int pos = 0; int i; @@ -15222,12 +15228,9 @@ x86_order_regs_for_local_alloc () /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in struct attribute_spec.handler. */ static tree -ix86_handle_struct_attribute (node, name, args, flags, no_add_attrs) - tree *node; - tree name; - tree args ATTRIBUTE_UNUSED; - int flags ATTRIBUTE_UNUSED; - bool *no_add_attrs; +ix86_handle_struct_attribute (tree *node, tree name, + tree args ATTRIBUTE_UNUSED, + int flags ATTRIBUTE_UNUSED, bool *no_add_attrs) { tree *type = NULL; if (DECL_P (*node)) @@ -15259,8 +15262,7 @@ ix86_handle_struct_attribute (node, name, args, flags, no_add_attrs) } static bool -ix86_ms_bitfield_layout_p (record_type) - tree record_type; +ix86_ms_bitfield_layout_p (tree record_type) { return (TARGET_USE_MS_BITFIELD_LAYOUT && !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type))) @@ -15271,18 +15273,17 @@ ix86_ms_bitfield_layout_p (record_type) located on entry to the FUNCTION. */ static rtx -x86_this_parameter (function) - tree function; +x86_this_parameter (tree function) { tree type = TREE_TYPE (function); if (TARGET_64BIT) { - int n = aggregate_value_p (TREE_TYPE (type)) != 0; + int n = aggregate_value_p (TREE_TYPE (type), type) != 0; return gen_rtx_REG (DImode, x86_64_int_parameter_registers[n]); } - if (ix86_fntype_regparm (type) > 0) + if (ix86_function_regparm (type, function) > 0) { tree parm; @@ -15292,12 +15293,17 @@ x86_this_parameter (function) for (; parm; parm = TREE_CHAIN (parm)) if (TREE_VALUE (parm) == void_type_node) break; - /* If not, the this parameter is in %eax. */ + /* If not, the this parameter is in the first argument. */ if (parm) - return gen_rtx_REG (SImode, 0); + { + int regno = 0; + if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type))) + regno = 2; + return gen_rtx_REG (SImode, regno); + } } - if (aggregate_value_p (TREE_TYPE (type))) + if (aggregate_value_p (TREE_TYPE (type), type)) return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 8)); else return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 4)); @@ -15306,18 +15312,16 @@ x86_this_parameter (function) /* Determine whether x86_output_mi_thunk can succeed. */ static bool -x86_can_output_mi_thunk (thunk, delta, vcall_offset, function) - tree thunk ATTRIBUTE_UNUSED; - HOST_WIDE_INT delta ATTRIBUTE_UNUSED; - HOST_WIDE_INT vcall_offset; - tree function; +x86_can_output_mi_thunk (tree thunk ATTRIBUTE_UNUSED, + HOST_WIDE_INT delta ATTRIBUTE_UNUSED, + HOST_WIDE_INT vcall_offset, tree function) { /* 64-bit can handle anything. */ if (TARGET_64BIT) return true; /* For 32-bit, everything's fine if we have one free register. */ - if (ix86_fntype_regparm (TREE_TYPE (function)) < 3) + if (ix86_function_regparm (TREE_TYPE (function), function) < 3) return true; /* Need a free register for vcall_offset. */ @@ -15339,12 +15343,9 @@ x86_can_output_mi_thunk (thunk, delta, vcall_offset, function) *(*this + vcall_offset) should be added to THIS. */ static void -x86_output_mi_thunk (file, thunk, delta, vcall_offset, function) - FILE *file ATTRIBUTE_UNUSED; - tree thunk ATTRIBUTE_UNUSED; - HOST_WIDE_INT delta; - HOST_WIDE_INT vcall_offset; - tree function; +x86_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED, + tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta, + HOST_WIDE_INT vcall_offset, tree function) { rtx xops[3]; rtx this = x86_this_parameter (function); @@ -15391,7 +15392,13 @@ x86_output_mi_thunk (file, thunk, delta, vcall_offset, function) if (TARGET_64BIT) tmp = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 2 /* R10 */); else - tmp = gen_rtx_REG (SImode, 2 /* ECX */); + { + int tmp_regno = 2 /* ECX */; + if (lookup_attribute ("fastcall", + TYPE_ATTRIBUTES (TREE_TYPE (function)))) + tmp_regno = 0 /* EAX */; + tmp = gen_rtx_REG (SImode, tmp_regno); + } xops[0] = gen_rtx_MEM (Pmode, this_reg); xops[1] = tmp; @@ -15425,15 +15432,14 @@ x86_output_mi_thunk (file, thunk, delta, vcall_offset, function) output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops); } - xops[0] = DECL_RTL (function); + xops[0] = XEXP (DECL_RTL (function), 0); if (TARGET_64BIT) { if (!flag_pic || (*targetm.binds_local_p) (function)) output_asm_insn ("jmp\t%P0", xops); else { - tmp = XEXP (xops[0], 0); - tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, tmp), UNSPEC_GOTPCREL); + tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL); tmp = gen_rtx_CONST (Pmode, tmp); tmp = gen_rtx_MEM (QImode, tmp); xops[0] = tmp; @@ -15448,7 +15454,7 @@ x86_output_mi_thunk (file, thunk, delta, vcall_offset, function) #if TARGET_MACHO if (TARGET_MACHO) { - char *ip = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (function)); + const char *ip = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (function)); tmp = gen_rtx_SYMBOL_REF (Pmode, machopic_stub_name (ip)); tmp = gen_rtx_MEM (QImode, tmp); xops[0] = tmp; @@ -15467,10 +15473,20 @@ x86_output_mi_thunk (file, thunk, delta, vcall_offset, function) } } +static void +x86_file_start (void) +{ + default_file_start (); + if (X86_FILE_START_VERSION_DIRECTIVE) + fputs ("\t.version\t\"01.01\"\n", asm_out_file); + if (X86_FILE_START_FLTUSED) + fputs ("\t.global\t__fltused\n", asm_out_file); + if (ix86_asm_dialect == ASM_INTEL) + fputs ("\t.intel_syntax\n", asm_out_file); +} + int -x86_field_alignment (field, computed) - tree field; - int computed; +x86_field_alignment (tree field, int computed) { enum machine_mode mode; tree type = TREE_TYPE (field); @@ -15489,9 +15505,7 @@ x86_field_alignment (field, computed) /* Output assembler code to FILE to increment profiler label # LABELNO for profiling a function entry. */ void -x86_function_profiler (file, labelno) - FILE *file; - int labelno ATTRIBUTE_UNUSED; +x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED) { if (TARGET_64BIT) if (flag_pic) @@ -15526,22 +15540,130 @@ x86_function_profiler (file, labelno) } } -/* Implement machine specific optimizations. - At the moment we implement single transformation: AMD Athlon works faster +/* We don't have exact information about the insn sizes, but we may assume + quite safely that we are informed about all 1 byte insns and memory + address sizes. This is enough to eliminate unnecessary padding in + 99% of cases. */ + +static int +min_insn_size (rtx insn) +{ + int l = 0; + + if (!INSN_P (insn) || !active_insn_p (insn)) + return 0; + + /* Discard alignments we've emit and jump instructions. */ + if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE + && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN) + return 0; + if (GET_CODE (insn) == JUMP_INSN + && (GET_CODE (PATTERN (insn)) == ADDR_VEC + || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)) + return 0; + + /* Important case - calls are always 5 bytes. + It is common to have many calls in the row. */ + if (GET_CODE (insn) == CALL_INSN + && symbolic_reference_mentioned_p (PATTERN (insn)) + && !SIBLING_CALL_P (insn)) + return 5; + if (get_attr_length (insn) <= 1) + return 1; + + /* For normal instructions we may rely on the sizes of addresses + and the presence of symbol to require 4 bytes of encoding. + This is not the case for jumps where references are PC relative. */ + if (GET_CODE (insn) != JUMP_INSN) + { + l = get_attr_length_address (insn); + if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn))) + l = 4; + } + if (l) + return 1+l; + else + return 2; +} + +/* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte + window. */ + +static void +ix86_avoid_jump_misspredicts (void) +{ + rtx insn, start = get_insns (); + int nbytes = 0, njumps = 0; + int isjump = 0; + + /* Look for all minimal intervals of instructions containing 4 jumps. + The intervals are bounded by START and INSN. NBYTES is the total + size of instructions in the interval including INSN and not including + START. When the NBYTES is smaller than 16 bytes, it is possible + that the end of START and INSN ends up in the same 16byte page. + + The smallest offset in the page INSN can start is the case where START + ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN). + We add p2align to 16byte window with maxskip 17 - NBYTES + sizeof (INSN). + */ + for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) + { + + nbytes += min_insn_size (insn); + if (dump_file) + fprintf(dump_file, "Insn %i estimated to %i bytes\n", + INSN_UID (insn), min_insn_size (insn)); + if ((GET_CODE (insn) == JUMP_INSN + && GET_CODE (PATTERN (insn)) != ADDR_VEC + && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC) + || GET_CODE (insn) == CALL_INSN) + njumps++; + else + continue; + + while (njumps > 3) + { + start = NEXT_INSN (start); + if ((GET_CODE (start) == JUMP_INSN + && GET_CODE (PATTERN (start)) != ADDR_VEC + && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC) + || GET_CODE (start) == CALL_INSN) + njumps--, isjump = 1; + else + isjump = 0; + nbytes -= min_insn_size (start); + } + if (njumps < 0) + abort (); + if (dump_file) + fprintf (dump_file, "Interval %i to %i has %i bytes\n", + INSN_UID (start), INSN_UID (insn), nbytes); + + if (njumps == 3 && isjump && nbytes < 16) + { + int padsize = 15 - nbytes + min_insn_size (insn); + + if (dump_file) + fprintf (dump_file, "Padding insn %i by %i bytes!\n", + INSN_UID (insn), padsize); + emit_insn_before (gen_align (GEN_INT (padsize)), insn); + } + } +} + +/* AMD Athlon works faster when RET is not destination of conditional jump or directly preceded by other jump instruction. We avoid the penalty by inserting NOP just before the RET instructions in such cases. */ static void -ix86_reorg () +ix86_pad_returns (void) { edge e; - if (!TARGET_ATHLON_K8 || !optimize || optimize_size) - return; for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next) { basic_block bb = e->src; - rtx ret = bb->end; + rtx ret = BB_END (bb); rtx prev; bool replace = false; @@ -15566,7 +15688,7 @@ ix86_reorg () && ((GET_CODE (prev) == JUMP_INSN && any_condjump_p (prev)) || GET_CODE (prev) == CALL_INSN)) replace = true; - /* Empty functions get branch misspredict even when the jump destination + /* Empty functions get branch mispredict even when the jump destination is not visible to us. */ if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED) replace = true; @@ -15579,11 +15701,21 @@ ix86_reorg () } } +/* Implement machine specific optimizations. We implement padding of returns + for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */ +static void +ix86_reorg (void) +{ + if (TARGET_ATHLON_K8 && optimize && !optimize_size) + ix86_pad_returns (); + if (TARGET_FOUR_JUMP_LIMIT && optimize && !optimize_size) + ix86_avoid_jump_misspredicts (); +} + /* Return nonzero when QImode register that must be represented via REX prefix is used. */ bool -x86_extended_QIreg_mentioned_p (insn) - rtx insn; +x86_extended_QIreg_mentioned_p (rtx insn) { int i; extract_insn_cached (insn); @@ -15597,9 +15729,7 @@ x86_extended_QIreg_mentioned_p (insn) /* Return nonzero when P points to register encoded via REX prefix. Called via for_each_rtx. */ static int -extended_reg_mentioned_1 (p, data) - rtx *p; - void *data ATTRIBUTE_UNUSED; +extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED) { unsigned int regno; if (!REG_P (*p)) @@ -15611,24 +15741,27 @@ extended_reg_mentioned_1 (p, data) /* Return true when INSN mentions register that must be encoded using REX prefix. */ bool -x86_extended_reg_mentioned_p (insn) - rtx insn; +x86_extended_reg_mentioned_p (rtx insn) { return for_each_rtx (&PATTERN (insn), extended_reg_mentioned_1, NULL); } -/* Generate an unsigned DImode to FP conversion. This is the same code +/* Generate an unsigned DImode/SImode to FP conversion. This is the same code optabs would emit if we didn't have TFmode patterns. */ void -x86_emit_floatuns (operands) - rtx operands[2]; +x86_emit_floatuns (rtx operands[2]) { rtx neglab, donelab, i0, i1, f0, in, out; - enum machine_mode mode; + enum machine_mode mode, inmode; + + inmode = GET_MODE (operands[1]); + if (inmode != SImode + && inmode != DImode) + abort (); out = operands[0]; - in = force_reg (DImode, operands[1]); + in = force_reg (inmode, operands[1]); mode = GET_MODE (out); neglab = gen_label_rtx (); donelab = gen_label_rtx (); @@ -15654,13 +15787,145 @@ x86_emit_floatuns (operands) /* Return if we do not know how to pass TYPE solely in registers. */ bool -ix86_must_pass_in_stack (mode, type) - enum machine_mode mode; - tree type; +ix86_must_pass_in_stack (enum machine_mode mode, tree type) { if (default_must_pass_in_stack (mode, type)) return true; return (!TARGET_64BIT && type && mode == TImode); } +/* Initialize vector TARGET via VALS. */ +void +ix86_expand_vector_init (rtx target, rtx vals) +{ + enum machine_mode mode = GET_MODE (target); + int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode)); + int n_elts = (GET_MODE_SIZE (mode) / elt_size); + int i; + + for (i = n_elts - 1; i >= 0; i--) + if (GET_CODE (XVECEXP (vals, 0, i)) != CONST_INT + && GET_CODE (XVECEXP (vals, 0, i)) != CONST_DOUBLE) + break; + + /* Few special cases first... + ... constants are best loaded from constant pool. */ + if (i < 0) + { + emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0))); + return; + } + + /* ... values where only first field is non-constant are best loaded + from the pool and overwritten via move later. */ + if (!i) + { + rtx op = simplify_gen_subreg (mode, XVECEXP (vals, 0, 0), + GET_MODE_INNER (mode), 0); + + op = force_reg (mode, op); + XVECEXP (vals, 0, 0) = CONST0_RTX (GET_MODE_INNER (mode)); + emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0))); + switch (GET_MODE (target)) + { + case V2DFmode: + emit_insn (gen_sse2_movsd (target, target, op)); + break; + case V4SFmode: + emit_insn (gen_sse_movss (target, target, op)); + break; + default: + break; + } + return; + } + + /* And the busy sequence doing rotations. */ + switch (GET_MODE (target)) + { + case V2DFmode: + { + rtx vecop0 = + simplify_gen_subreg (V2DFmode, XVECEXP (vals, 0, 0), DFmode, 0); + rtx vecop1 = + simplify_gen_subreg (V2DFmode, XVECEXP (vals, 0, 1), DFmode, 0); + + vecop0 = force_reg (V2DFmode, vecop0); + vecop1 = force_reg (V2DFmode, vecop1); + emit_insn (gen_sse2_unpcklpd (target, vecop0, vecop1)); + } + break; + case V4SFmode: + { + rtx vecop0 = + simplify_gen_subreg (V4SFmode, XVECEXP (vals, 0, 0), SFmode, 0); + rtx vecop1 = + simplify_gen_subreg (V4SFmode, XVECEXP (vals, 0, 1), SFmode, 0); + rtx vecop2 = + simplify_gen_subreg (V4SFmode, XVECEXP (vals, 0, 2), SFmode, 0); + rtx vecop3 = + simplify_gen_subreg (V4SFmode, XVECEXP (vals, 0, 3), SFmode, 0); + rtx tmp1 = gen_reg_rtx (V4SFmode); + rtx tmp2 = gen_reg_rtx (V4SFmode); + + vecop0 = force_reg (V4SFmode, vecop0); + vecop1 = force_reg (V4SFmode, vecop1); + vecop2 = force_reg (V4SFmode, vecop2); + vecop3 = force_reg (V4SFmode, vecop3); + emit_insn (gen_sse_unpcklps (tmp1, vecop1, vecop3)); + emit_insn (gen_sse_unpcklps (tmp2, vecop0, vecop2)); + emit_insn (gen_sse_unpcklps (target, tmp2, tmp1)); + } + break; + default: + abort (); + } +} + +/* Worker function for TARGET_MD_ASM_CLOBBERS. + + We do this in the new i386 backend to maintain source compatibility + with the old cc0-based compiler. */ + +static tree +ix86_md_asm_clobbers (tree clobbers) +{ + clobbers = tree_cons (NULL_TREE, build_string (5, "flags"), + clobbers); + clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"), + clobbers); + clobbers = tree_cons (NULL_TREE, build_string (7, "dirflag"), + clobbers); + return clobbers; +} + +/* Worker function for REVERSE_CONDITION. */ + +enum rtx_code +ix86_reverse_condition (enum rtx_code code, enum machine_mode mode) +{ + return (mode != CCFPmode && mode != CCFPUmode + ? reverse_condition (code) + : reverse_condition_maybe_unordered (code)); +} + +/* Output code to perform an x87 FP register move, from OPERANDS[1] + to OPERANDS[0]. */ + +const char * +output_387_reg_move (rtx insn, rtx *operands) +{ + if (REG_P (operands[1]) + && find_regno_note (insn, REG_DEAD, REGNO (operands[1]))) + { + if (REGNO (operands[0]) == FIRST_STACK_REG + && TARGET_USE_FFREEP) + return "ffreep\t%y0"; + return "fstp\t%y0"; + } + if (STACK_TOP_P (operands[0])) + return "fld%z1\t%y1"; + return "fst\t%y0"; +} + #include "gt-i386.h"