X-Git-Url: http://git.sourceforge.jp/view?p=pf3gnuchains%2Fgcc-fork.git;a=blobdiff_plain;f=gcc%2Fbuiltins.c;h=04980ccad55ac59ae2ec30116645d5287190389a;hp=7b24a0ce70395fac8d23069dbdb8ddc231fc5645;hb=760ff94e3788e67aab56bcc7b3dd3507ddda6aca;hpb=0810ff17f49369f44d8739a426fdb77a90ee83f0 diff --git a/gcc/builtins.c b/gcc/builtins.c index 7b24a0ce703..04980ccad55 100644 --- a/gcc/builtins.c +++ b/gcc/builtins.c @@ -1,7 +1,7 @@ /* Expand builtin functions. Copyright (C) 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, - 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 - Free Software Foundation, Inc. + 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, + 2012 Free Software Foundation, Inc. This file is part of GCC. @@ -75,11 +75,7 @@ const char * built_in_names[(int) END_BUILTINS] = /* Setup an array of _DECL trees, make sure each element is initialized to NULL_TREE. */ -tree built_in_decls[(int) END_BUILTINS]; -/* Declarations used when constructing the builtin implicitly in the compiler. - It may be NULL_TREE when this is invalid (for instance runtime is not - required to implement the function call in all cases). */ -tree implicit_built_in_decls[(int) END_BUILTINS]; +builtin_info_type builtin_info; static const char *c_getstr (tree); static rtx c_readstr (const char *, enum machine_mode); @@ -227,16 +223,19 @@ static tree do_mpfr_bessel_n (tree, tree, tree, const REAL_VALUE_TYPE *, bool); static tree do_mpfr_remquo (tree, tree, tree); static tree do_mpfr_lgamma_r (tree, tree, tree); +static void expand_builtin_sync_synchronize (void); /* Return true if NAME starts with __builtin_ or __sync_. */ -bool +static bool is_builtin_name (const char *name) { if (strncmp (name, "__builtin_", 10) == 0) return true; if (strncmp (name, "__sync_", 7) == 0) return true; + if (strncmp (name, "__atomic_", 9) == 0) + return true; return false; } @@ -264,8 +263,15 @@ called_as_built_in (tree node) return is_builtin_name (name); } -/* Return the alignment in bits of EXP, an object. - Don't return more than MAX_ALIGN no matter what. */ +/* Compute values M and N such that M divides (address of EXP - N) and + such that N < M. Store N in *BITPOSP and return M. + + Note that the address (and thus the alignment) computed here is based + on the address to which a symbol resolves, whereas DECL_ALIGN is based + on the address at which an object is actually located. These two + addresses are not always the same. For example, on ARM targets, + the address &foo of a Thumb function foo() has the lowest bit set, + whereas foo() itself starts on an even address. */ unsigned int get_object_alignment_1 (tree exp, unsigned HOST_WIDE_INT *bitposp) @@ -287,7 +293,21 @@ get_object_alignment_1 (tree exp, unsigned HOST_WIDE_INT *bitposp) exp = DECL_INITIAL (exp); if (DECL_P (exp) && TREE_CODE (exp) != LABEL_DECL) - align = DECL_ALIGN (exp); + { + if (TREE_CODE (exp) == FUNCTION_DECL) + { + /* Function addresses can encode extra information besides their + alignment. However, if TARGET_PTRMEMFUNC_VBIT_LOCATION + allows the low bit to be used as a virtual bit, we know + that the address itself must be 2-byte aligned. */ + if (TARGET_PTRMEMFUNC_VBIT_LOCATION == ptrmemfunc_vbit_in_pfn) + align = 2 * BITS_PER_UNIT; + else + align = BITS_PER_UNIT; + } + else + align = DECL_ALIGN (exp); + } else if (CONSTANT_CLASS_P (exp)) { align = TYPE_ALIGN (TREE_TYPE (exp)); @@ -320,7 +340,7 @@ get_object_alignment_1 (tree exp, unsigned HOST_WIDE_INT *bitposp) align = MAX (pi->align * BITS_PER_UNIT, align); } else if (TREE_CODE (addr) == ADDR_EXPR) - align = MAX (align, get_object_alignment (TREE_OPERAND (addr, 0), ~0U)); + align = MAX (align, get_object_alignment (TREE_OPERAND (addr, 0))); bitpos += mem_ref_offset (exp).low * BITS_PER_UNIT; } else if (TREE_CODE (exp) == TARGET_MEM_REF) @@ -344,7 +364,7 @@ get_object_alignment_1 (tree exp, unsigned HOST_WIDE_INT *bitposp) align = MAX (pi->align * BITS_PER_UNIT, align); } else if (TREE_CODE (addr) == ADDR_EXPR) - align = MAX (align, get_object_alignment (TREE_OPERAND (addr, 0), ~0U)); + align = MAX (align, get_object_alignment (TREE_OPERAND (addr, 0))); if (TMR_OFFSET (exp)) bitpos += TREE_INT_CST_LOW (TMR_OFFSET (exp)) * BITS_PER_UNIT; if (TMR_INDEX (exp) && TMR_STEP (exp)) @@ -413,11 +433,10 @@ get_object_alignment_1 (tree exp, unsigned HOST_WIDE_INT *bitposp) return align; } -/* Return the alignment in bits of EXP, an object. - Don't return more than MAX_ALIGN no matter what. */ +/* Return the alignment in bits of EXP, an object. */ unsigned int -get_object_alignment (tree exp, unsigned int max_align) +get_object_alignment (tree exp) { unsigned HOST_WIDE_INT bitpos = 0; unsigned int align; @@ -430,51 +449,87 @@ get_object_alignment (tree exp, unsigned int max_align) if (bitpos != 0) align = (bitpos & -bitpos); - return MIN (align, max_align); + return align; } -/* Returns true iff we can trust that alignment information has been - calculated properly. */ +/* Return the alignment of object EXP, also considering its type when we do + not know of explicit misalignment. Only handle MEM_REF and TARGET_MEM_REF. -bool -can_trust_pointer_alignment (void) + ??? Note that, in the general case, the type of an expression is not kept + consistent with misalignment information by the front-end, for example when + taking the address of a member of a packed structure. However, in most of + the cases, expressions have the alignment of their type so we optimistically + fall back to this alignment when we cannot compute a misalignment. */ + +unsigned int +get_object_or_type_alignment (tree exp) { - /* We rely on TER to compute accurate alignment information. */ - return (optimize && flag_tree_ter); + unsigned HOST_WIDE_INT misalign; + unsigned int align = get_object_alignment_1 (exp, &misalign); + + gcc_assert (TREE_CODE (exp) == MEM_REF || TREE_CODE (exp) == TARGET_MEM_REF); + + if (misalign != 0) + align = (misalign & -misalign); + else + align = MAX (TYPE_ALIGN (TREE_TYPE (exp)), align); + + return align; } -/* Return the alignment in bits of EXP, a pointer valued expression. - But don't return more than MAX_ALIGN no matter what. - The alignment returned is, by default, the alignment of the thing that - EXP points to. If it is not a POINTER_TYPE, 0 is returned. +/* For a pointer valued expression EXP compute values M and N such that + M divides (EXP - N) and such that N < M. Store N in *BITPOSP and return M. - Otherwise, look at the expression to see if we can do better, i.e., if the - expression is actually pointing at an object whose alignment is tighter. */ + If EXP is not a pointer, 0 is returned. */ unsigned int -get_pointer_alignment (tree exp, unsigned int max_align) +get_pointer_alignment_1 (tree exp, unsigned HOST_WIDE_INT *bitposp) { STRIP_NOPS (exp); if (TREE_CODE (exp) == ADDR_EXPR) - return get_object_alignment (TREE_OPERAND (exp, 0), max_align); + return get_object_alignment_1 (TREE_OPERAND (exp, 0), bitposp); else if (TREE_CODE (exp) == SSA_NAME && POINTER_TYPE_P (TREE_TYPE (exp))) { struct ptr_info_def *pi = SSA_NAME_PTR_INFO (exp); - unsigned align; if (!pi) - return BITS_PER_UNIT; - if (pi->misalign != 0) - align = (pi->misalign & -pi->misalign); - else - align = pi->align; - return MIN (max_align, align * BITS_PER_UNIT); + { + *bitposp = 0; + return BITS_PER_UNIT; + } + *bitposp = pi->misalign * BITS_PER_UNIT; + return pi->align * BITS_PER_UNIT; } + *bitposp = 0; return POINTER_TYPE_P (TREE_TYPE (exp)) ? BITS_PER_UNIT : 0; } +/* Return the alignment in bits of EXP, a pointer valued expression. + The alignment returned is, by default, the alignment of the thing that + EXP points to. If it is not a POINTER_TYPE, 0 is returned. + + Otherwise, look at the expression to see if we can do better, i.e., if the + expression is actually pointing at an object whose alignment is tighter. */ + +unsigned int +get_pointer_alignment (tree exp) +{ + unsigned HOST_WIDE_INT bitpos = 0; + unsigned int align; + + align = get_pointer_alignment_1 (exp, &bitpos); + + /* align and bitpos now specify known low bits of the pointer. + ptr & (align - 1) == bitpos. */ + + if (bitpos != 0) + align = (bitpos & -bitpos); + + return align; +} + /* Compute the length of a C string. TREE_STRING_LENGTH is not the right way, because it could contain a zero byte in the middle. TREE_STRING_LENGTH is the size of the character array, not the string. @@ -1217,9 +1272,8 @@ get_memory_rtx (tree exp, tree len) gcc_assert (TREE_CODE (inner) == COMPONENT_REF); - if (MEM_OFFSET (mem) - && CONST_INT_P (MEM_OFFSET (mem))) - offset = INTVAL (MEM_OFFSET (mem)); + if (MEM_OFFSET_KNOWN_P (mem)) + offset = MEM_OFFSET (mem); if (offset >= 0 && len && host_integerp (len, 0)) length = tree_low_cst (len, 0); @@ -1274,11 +1328,14 @@ get_memory_rtx (tree exp, tree len) if (mem_expr != MEM_EXPR (mem)) { set_mem_expr (mem, mem_expr); - set_mem_offset (mem, offset >= 0 ? GEN_INT (offset) : NULL_RTX); + if (offset >= 0) + set_mem_offset (mem, offset); + else + clear_mem_offset (mem); } } set_mem_alias_set (mem, 0); - set_mem_size (mem, NULL_RTX); + clear_mem_size (mem); } return mem; @@ -1669,6 +1726,7 @@ expand_builtin_apply (rtx function, rtx arguments, rtx argsize) else #endif emit_stack_restore (SAVE_BLOCK, old_stack_level); + fixup_args_size_notes (call_insn, get_last_insn(), 0); OK_DEFER_POP; @@ -1779,17 +1837,15 @@ expand_builtin_classify_type (tree exp) fcode = BUILT_IN_MATHFN##_R; fcodef = BUILT_IN_MATHFN##F_R ; \ fcodel = BUILT_IN_MATHFN##L_R ; break; -/* Return mathematic function equivalent to FN but operating directly - on TYPE, if available. If IMPLICIT is true find the function in - implicit_built_in_decls[], otherwise use built_in_decls[]. If we - can't do the conversion, return zero. */ +/* Return mathematic function equivalent to FN but operating directly on TYPE, + if available. If IMPLICIT is true use the implicit builtin declaration, + otherwise use the explicit declaration. If we can't do the conversion, + return zero. */ static tree -mathfn_built_in_1 (tree type, enum built_in_function fn, bool implicit) +mathfn_built_in_1 (tree type, enum built_in_function fn, bool implicit_p) { - tree const *const fn_arr - = implicit ? implicit_built_in_decls : built_in_decls; - enum built_in_function fcode, fcodef, fcodel; + enum built_in_function fcode, fcodef, fcodel, fcode2; switch (fn) { @@ -1826,7 +1882,11 @@ mathfn_built_in_1 (tree type, enum built_in_function fn, bool implicit) CASE_MATHFN (BUILT_IN_HUGE_VAL) CASE_MATHFN (BUILT_IN_HYPOT) CASE_MATHFN (BUILT_IN_ILOGB) + CASE_MATHFN (BUILT_IN_ICEIL) + CASE_MATHFN (BUILT_IN_IFLOOR) CASE_MATHFN (BUILT_IN_INF) + CASE_MATHFN (BUILT_IN_IRINT) + CASE_MATHFN (BUILT_IN_IROUND) CASE_MATHFN (BUILT_IN_ISINF) CASE_MATHFN (BUILT_IN_J0) CASE_MATHFN (BUILT_IN_J1) @@ -1882,13 +1942,18 @@ mathfn_built_in_1 (tree type, enum built_in_function fn, bool implicit) } if (TYPE_MAIN_VARIANT (type) == double_type_node) - return fn_arr[fcode]; + fcode2 = fcode; else if (TYPE_MAIN_VARIANT (type) == float_type_node) - return fn_arr[fcodef]; + fcode2 = fcodef; else if (TYPE_MAIN_VARIANT (type) == long_double_type_node) - return fn_arr[fcodel]; + fcode2 = fcodel; else return NULL_TREE; + + if (implicit_p && !builtin_decl_implicit_p (fcode2)) + return NULL_TREE; + + return builtin_decl_explicit (fcode2); } /* Like mathfn_built_in_1(), but always use the implicit array. */ @@ -2538,11 +2603,11 @@ expand_builtin_cexpi (tree exp, rtx target) rtx op1a, op2a; if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CEXPIF) - fn = built_in_decls[BUILT_IN_SINCOSF]; + fn = builtin_decl_explicit (BUILT_IN_SINCOSF); else if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CEXPI) - fn = built_in_decls[BUILT_IN_SINCOS]; + fn = builtin_decl_explicit (BUILT_IN_SINCOS); else if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CEXPIL) - fn = built_in_decls[BUILT_IN_SINCOSL]; + fn = builtin_decl_explicit (BUILT_IN_SINCOSL); else gcc_unreachable (); @@ -2564,11 +2629,11 @@ expand_builtin_cexpi (tree exp, rtx target) tree ctype = build_complex_type (type); if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CEXPIF) - fn = built_in_decls[BUILT_IN_CEXPF]; + fn = builtin_decl_explicit (BUILT_IN_CEXPF); else if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CEXPI) - fn = built_in_decls[BUILT_IN_CEXP]; + fn = builtin_decl_explicit (BUILT_IN_CEXP); else if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CEXPIL) - fn = built_in_decls[BUILT_IN_CEXPL]; + fn = builtin_decl_explicit (BUILT_IN_CEXPL); else gcc_unreachable (); @@ -2651,12 +2716,14 @@ expand_builtin_int_roundingfn (tree exp, rtx target) switch (DECL_FUNCTION_CODE (fndecl)) { + CASE_FLT_FN (BUILT_IN_ICEIL): CASE_FLT_FN (BUILT_IN_LCEIL): CASE_FLT_FN (BUILT_IN_LLCEIL): builtin_optab = lceil_optab; fallback_fn = BUILT_IN_CEIL; break; + CASE_FLT_FN (BUILT_IN_IFLOOR): CASE_FLT_FN (BUILT_IN_LFLOOR): CASE_FLT_FN (BUILT_IN_LLFLOOR): builtin_optab = lfloor_optab; @@ -2709,26 +2776,32 @@ expand_builtin_int_roundingfn (tree exp, rtx target) switch (DECL_FUNCTION_CODE (fndecl)) { + case BUILT_IN_ICEIL: case BUILT_IN_LCEIL: case BUILT_IN_LLCEIL: name = "ceil"; break; + case BUILT_IN_ICEILF: case BUILT_IN_LCEILF: case BUILT_IN_LLCEILF: name = "ceilf"; break; + case BUILT_IN_ICEILL: case BUILT_IN_LCEILL: case BUILT_IN_LLCEILL: name = "ceill"; break; + case BUILT_IN_IFLOOR: case BUILT_IN_LFLOOR: case BUILT_IN_LLFLOOR: name = "floor"; break; + case BUILT_IN_IFLOORF: case BUILT_IN_LFLOORF: case BUILT_IN_LLFLOORF: name = "floorf"; break; + case BUILT_IN_IFLOORL: case BUILT_IN_LFLOORL: case BUILT_IN_LLFLOORL: name = "floorl"; @@ -2768,10 +2841,7 @@ expand_builtin_int_roundingfn_2 (tree exp, rtx target) tree fndecl = get_callee_fndecl (exp); tree arg; enum machine_mode mode; - - /* There's no easy way to detect the case we need to set EDOM. */ - if (flag_errno_math) - return NULL_RTX; + enum built_in_function fallback_fn = BUILT_IN_NONE; if (!validate_arglist (exp, REAL_TYPE, VOID_TYPE)) gcc_unreachable (); @@ -2780,43 +2850,79 @@ expand_builtin_int_roundingfn_2 (tree exp, rtx target) switch (DECL_FUNCTION_CODE (fndecl)) { + CASE_FLT_FN (BUILT_IN_IRINT): + fallback_fn = BUILT_IN_LRINT; + /* FALLTHRU */ CASE_FLT_FN (BUILT_IN_LRINT): CASE_FLT_FN (BUILT_IN_LLRINT): - builtin_optab = lrint_optab; break; + builtin_optab = lrint_optab; + break; + + CASE_FLT_FN (BUILT_IN_IROUND): + fallback_fn = BUILT_IN_LROUND; + /* FALLTHRU */ CASE_FLT_FN (BUILT_IN_LROUND): CASE_FLT_FN (BUILT_IN_LLROUND): - builtin_optab = lround_optab; break; + builtin_optab = lround_optab; + break; + default: gcc_unreachable (); } + /* There's no easy way to detect the case we need to set EDOM. */ + if (flag_errno_math && fallback_fn == BUILT_IN_NONE) + return NULL_RTX; + /* Make a suitable register to place result in. */ mode = TYPE_MODE (TREE_TYPE (exp)); - target = gen_reg_rtx (mode); + /* There's no easy way to detect the case we need to set EDOM. */ + if (!flag_errno_math) + { + target = gen_reg_rtx (mode); - /* Wrap the computation of the argument in a SAVE_EXPR, as we may - need to expand the argument again. This way, we will not perform - side-effects more the once. */ - CALL_EXPR_ARG (exp, 0) = arg = builtin_save_expr (arg); + /* Wrap the computation of the argument in a SAVE_EXPR, as we may + need to expand the argument again. This way, we will not perform + side-effects more the once. */ + CALL_EXPR_ARG (exp, 0) = arg = builtin_save_expr (arg); - op0 = expand_expr (arg, NULL, VOIDmode, EXPAND_NORMAL); + op0 = expand_expr (arg, NULL, VOIDmode, EXPAND_NORMAL); - start_sequence (); + start_sequence (); - if (expand_sfix_optab (target, op0, builtin_optab)) - { - /* Output the entire sequence. */ - insns = get_insns (); + if (expand_sfix_optab (target, op0, builtin_optab)) + { + /* Output the entire sequence. */ + insns = get_insns (); + end_sequence (); + emit_insn (insns); + return target; + } + + /* If we were unable to expand via the builtin, stop the sequence + (without outputting the insns) and call to the library function + with the stabilized argument list. */ end_sequence (); - emit_insn (insns); - return target; } - /* If we were unable to expand via the builtin, stop the sequence - (without outputting the insns) and call to the library function - with the stabilized argument list. */ - end_sequence (); + if (fallback_fn != BUILT_IN_NONE) + { + /* Fall back to rounding to long int. Use implicit_p 0 - for non-C99 + targets, (int) round (x) should never be transformed into + BUILT_IN_IROUND and if __builtin_iround is called directly, emit + a call to lround in the hope that the target provides at least some + C99 functions. This should result in the best user experience for + not full C99 targets. */ + tree fallback_fndecl = mathfn_built_in_1 (TREE_TYPE (arg), + fallback_fn, 0); + + exp = build_call_nofold_loc (EXPR_LOCATION (exp), + fallback_fndecl, 1, arg); + + target = expand_call (exp, NULL_RTX, target == const0_rtx); + return convert_to_mode (mode, target, 0); + } target = expand_call (exp, target, target == const0_rtx); @@ -2903,7 +3009,7 @@ expand_builtin_strlen (tree exp, rtx target, return expand_expr (len, target, target_mode, EXPAND_NORMAL); } - align = get_pointer_alignment (src, BIGGEST_ALIGNMENT) / BITS_PER_UNIT; + align = get_pointer_alignment (src) / BITS_PER_UNIT; /* If SRC is not a pointer type, don't do this operation inline. */ if (align == 0) @@ -2939,9 +3045,16 @@ expand_builtin_strlen (tree exp, rtx target, /* Now that we are assured of success, expand the source. */ start_sequence (); - pat = expand_expr (src, src_reg, ptr_mode, EXPAND_NORMAL); + pat = expand_expr (src, src_reg, Pmode, EXPAND_NORMAL); if (pat != src_reg) - emit_move_insn (src_reg, pat); + { +#ifdef POINTERS_EXTEND_UNSIGNED + if (GET_MODE (pat) != Pmode) + pat = convert_to_mode (Pmode, pat, + POINTERS_EXTEND_UNSIGNED); +#endif + emit_move_insn (src_reg, pat); + } pat = get_insns (); end_sequence (); @@ -2996,9 +3109,8 @@ expand_builtin_memcpy (tree exp, rtx target) tree src = CALL_EXPR_ARG (exp, 1); tree len = CALL_EXPR_ARG (exp, 2); const char *src_str; - unsigned int src_align = get_pointer_alignment (src, BIGGEST_ALIGNMENT); - unsigned int dest_align - = get_pointer_alignment (dest, BIGGEST_ALIGNMENT); + unsigned int src_align = get_pointer_alignment (src); + unsigned int dest_align = get_pointer_alignment (dest); rtx dest_mem, src_mem, dest_addr, len_rtx; HOST_WIDE_INT expected_size = -1; unsigned int expected_align = 0; @@ -3095,9 +3207,9 @@ expand_builtin_mempcpy_args (tree dest, tree src, tree len, rtx target, enum machine_mode mode, int endp) { /* If return value is ignored, transform mempcpy into memcpy. */ - if (target == const0_rtx && implicit_built_in_decls[BUILT_IN_MEMCPY]) + if (target == const0_rtx && builtin_decl_implicit_p (BUILT_IN_MEMCPY)) { - tree fn = implicit_built_in_decls[BUILT_IN_MEMCPY]; + tree fn = builtin_decl_implicit (BUILT_IN_MEMCPY); tree result = build_call_nofold_loc (UNKNOWN_LOCATION, fn, 3, dest, src, len); return expand_expr (result, target, mode, EXPAND_NORMAL); @@ -3105,9 +3217,8 @@ expand_builtin_mempcpy_args (tree dest, tree src, tree len, else { const char *src_str; - unsigned int src_align = get_pointer_alignment (src, BIGGEST_ALIGNMENT); - unsigned int dest_align - = get_pointer_alignment (dest, BIGGEST_ALIGNMENT); + unsigned int src_align = get_pointer_alignment (src); + unsigned int dest_align = get_pointer_alignment (dest); rtx dest_mem, src_mem, len_rtx; /* If either SRC or DEST is not a pointer type, don't do this @@ -3259,9 +3370,9 @@ expand_builtin_stpcpy (tree exp, rtx target, enum machine_mode mode) src = CALL_EXPR_ARG (exp, 1); /* If return value is ignored, transform stpcpy into strcpy. */ - if (target == const0_rtx && implicit_built_in_decls[BUILT_IN_STRCPY]) + if (target == const0_rtx && builtin_decl_implicit (BUILT_IN_STRCPY)) { - tree fn = implicit_built_in_decls[BUILT_IN_STRCPY]; + tree fn = builtin_decl_implicit (BUILT_IN_STRCPY); tree result = build_call_nofold_loc (loc, fn, 2, dst, src); return expand_expr (result, target, mode, EXPAND_NORMAL); } @@ -3360,8 +3471,7 @@ expand_builtin_strncpy (tree exp, rtx target) use store_by_pieces, if it fails, punt. */ if (tree_int_cst_lt (slen, len)) { - unsigned int dest_align - = get_pointer_alignment (dest, BIGGEST_ALIGNMENT); + unsigned int dest_align = get_pointer_alignment (dest); const char *p = c_getstr (src); rtx dest_mem; @@ -3465,7 +3575,7 @@ expand_builtin_memset_args (tree dest, tree val, tree len, HOST_WIDE_INT expected_size = -1; unsigned int expected_align = 0; - dest_align = get_pointer_alignment (dest, BIGGEST_ALIGNMENT); + dest_align = get_pointer_alignment (dest); /* If DEST is not a pointer type, don't do this operation in-line. */ if (dest_align == 0) @@ -3599,14 +3709,15 @@ expand_builtin_bzero (tree exp) calling bzero instead of memset. */ return expand_builtin_memset_args (dest, integer_zero_node, - fold_convert_loc (loc, sizetype, size), + fold_convert_loc (loc, + size_type_node, size), const0_rtx, VOIDmode, exp); } /* Expand expression EXP, which is a call to the memcmp built-in function. - Return NULL_RTX if we failed and the - caller should emit a normal call, otherwise try to get the result in - TARGET, if convenient (and in mode MODE, if that's convenient). */ + Return NULL_RTX if we failed and the caller should emit a normal call, + otherwise try to get the result in TARGET, if convenient (and in mode + MODE, if that's convenient). */ static rtx expand_builtin_memcmp (tree exp, ATTRIBUTE_UNUSED rtx target, @@ -3618,7 +3729,10 @@ expand_builtin_memcmp (tree exp, ATTRIBUTE_UNUSED rtx target, POINTER_TYPE, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) return NULL_RTX; -#if defined HAVE_cmpmemsi || defined HAVE_cmpstrnsi + /* Note: The cmpstrnsi pattern, if it exists, is not suitable for + implementing memcmp because it will stop if it encounters two + zero bytes. */ +#if defined HAVE_cmpmemsi { rtx arg1_rtx, arg2_rtx, arg3_rtx; rtx result; @@ -3627,22 +3741,13 @@ expand_builtin_memcmp (tree exp, ATTRIBUTE_UNUSED rtx target, tree arg2 = CALL_EXPR_ARG (exp, 1); tree len = CALL_EXPR_ARG (exp, 2); - unsigned int arg1_align - = get_pointer_alignment (arg1, BIGGEST_ALIGNMENT) / BITS_PER_UNIT; - unsigned int arg2_align - = get_pointer_alignment (arg2, BIGGEST_ALIGNMENT) / BITS_PER_UNIT; + unsigned int arg1_align = get_pointer_alignment (arg1) / BITS_PER_UNIT; + unsigned int arg2_align = get_pointer_alignment (arg2) / BITS_PER_UNIT; enum machine_mode insn_mode; -#ifdef HAVE_cmpmemsi if (HAVE_cmpmemsi) insn_mode = insn_data[(int) CODE_FOR_cmpmemsi].operand[0].mode; else -#endif -#ifdef HAVE_cmpstrnsi - if (HAVE_cmpstrnsi) - insn_mode = insn_data[(int) CODE_FOR_cmpstrnsi].operand[0].mode; - else -#endif return NULL_RTX; /* If we don't have POINTER_TYPE, call the function. */ @@ -3663,22 +3768,14 @@ expand_builtin_memcmp (tree exp, ATTRIBUTE_UNUSED rtx target, /* Set MEM_SIZE as appropriate. */ if (CONST_INT_P (arg3_rtx)) { - set_mem_size (arg1_rtx, arg3_rtx); - set_mem_size (arg2_rtx, arg3_rtx); + set_mem_size (arg1_rtx, INTVAL (arg3_rtx)); + set_mem_size (arg2_rtx, INTVAL (arg3_rtx)); } -#ifdef HAVE_cmpmemsi if (HAVE_cmpmemsi) insn = gen_cmpmemsi (result, arg1_rtx, arg2_rtx, arg3_rtx, GEN_INT (MIN (arg1_align, arg2_align))); else -#endif -#ifdef HAVE_cmpstrnsi - if (HAVE_cmpstrnsi) - insn = gen_cmpstrnsi (result, arg1_rtx, arg2_rtx, arg3_rtx, - GEN_INT (MIN (arg1_align, arg2_align))); - else -#endif gcc_unreachable (); if (insn) @@ -3704,7 +3801,7 @@ expand_builtin_memcmp (tree exp, ATTRIBUTE_UNUSED rtx target, else return convert_to_mode (mode, result, 0); } -#endif +#endif /* HAVE_cmpmemsi. */ return NULL_RTX; } @@ -3729,10 +3826,8 @@ expand_builtin_strcmp (tree exp, ATTRIBUTE_UNUSED rtx target) tree arg1 = CALL_EXPR_ARG (exp, 0); tree arg2 = CALL_EXPR_ARG (exp, 1); - unsigned int arg1_align - = get_pointer_alignment (arg1, BIGGEST_ALIGNMENT) / BITS_PER_UNIT; - unsigned int arg2_align - = get_pointer_alignment (arg2, BIGGEST_ALIGNMENT) / BITS_PER_UNIT; + unsigned int arg1_align = get_pointer_alignment (arg1) / BITS_PER_UNIT; + unsigned int arg2_align = get_pointer_alignment (arg2) / BITS_PER_UNIT; /* If we don't have POINTER_TYPE, call the function. */ if (arg1_align == 0 || arg2_align == 0) @@ -3880,10 +3975,8 @@ expand_builtin_strncmp (tree exp, ATTRIBUTE_UNUSED rtx target, tree arg2 = CALL_EXPR_ARG (exp, 1); tree arg3 = CALL_EXPR_ARG (exp, 2); - unsigned int arg1_align - = get_pointer_alignment (arg1, BIGGEST_ALIGNMENT) / BITS_PER_UNIT; - unsigned int arg2_align - = get_pointer_alignment (arg2, BIGGEST_ALIGNMENT) / BITS_PER_UNIT; + unsigned int arg1_align = get_pointer_alignment (arg1) / BITS_PER_UNIT; + unsigned int arg2_align = get_pointer_alignment (arg2) / BITS_PER_UNIT; enum machine_mode insn_mode = insn_data[(int) CODE_FOR_cmpstrnsi].operand[0].mode; @@ -4208,16 +4301,13 @@ std_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p, && !integer_zerop (TYPE_SIZE (type))) { t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp, - fold_build2 (POINTER_PLUS_EXPR, - TREE_TYPE (valist), - valist_tmp, size_int (boundary - 1))); + fold_build_pointer_plus_hwi (valist_tmp, boundary - 1)); gimplify_and_add (t, pre_p); - t = fold_convert (sizetype, valist_tmp); t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp, - fold_convert (TREE_TYPE (valist), - fold_build2 (BIT_AND_EXPR, sizetype, t, - size_int (-boundary)))); + fold_build2 (BIT_AND_EXPR, TREE_TYPE (valist), + valist_tmp, + build_int_cst (TREE_TYPE (valist), -boundary))); gimplify_and_add (t, pre_p); } else @@ -4249,12 +4339,11 @@ std_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p, rounded_size, size_int (align)); t = fold_build3 (COND_EXPR, sizetype, t, size_zero_node, size_binop (MINUS_EXPR, rounded_size, type_size)); - addr = fold_build2 (POINTER_PLUS_EXPR, - TREE_TYPE (addr), addr, t); + addr = fold_build_pointer_plus (addr, t); } /* Compute new value for AP. */ - t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (valist), valist_tmp, rounded_size); + t = fold_build_pointer_plus (valist_tmp, rounded_size); t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t); gimplify_and_add (t, pre_p); @@ -4342,7 +4431,7 @@ gimplify_va_arg_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p) expression to exit or longjmp. */ gimplify_and_add (valist, pre_p); t = build_call_expr_loc (loc, - implicit_built_in_decls[BUILT_IN_TRAP], 0); + builtin_decl_implicit (BUILT_IN_TRAP), 0); gimplify_and_add (t, pre_p); /* This is dead code, but go ahead and finish so that the @@ -4505,20 +4594,33 @@ expand_builtin_alloca (tree exp, bool cannot_accumulate) { rtx op0; rtx result; + bool valid_arglist; + unsigned int align; + bool alloca_with_align = (DECL_FUNCTION_CODE (get_callee_fndecl (exp)) + == BUILT_IN_ALLOCA_WITH_ALIGN); - /* Emit normal call if marked not-inlineable. */ - if (CALL_CANNOT_INLINE_P (exp)) + /* Emit normal call if we use mudflap. */ + if (flag_mudflap) return NULL_RTX; - if (!validate_arglist (exp, INTEGER_TYPE, VOID_TYPE)) + valid_arglist + = (alloca_with_align + ? validate_arglist (exp, INTEGER_TYPE, INTEGER_TYPE, VOID_TYPE) + : validate_arglist (exp, INTEGER_TYPE, VOID_TYPE)); + + if (!valid_arglist) return NULL_RTX; /* Compute the argument. */ op0 = expand_normal (CALL_EXPR_ARG (exp, 0)); + /* Compute the alignment. */ + align = (alloca_with_align + ? TREE_INT_CST_LOW (CALL_EXPR_ARG (exp, 1)) + : BIGGEST_ALIGNMENT); + /* Allocate the desired space. */ - result = allocate_dynamic_stack_space (op0, 0, BIGGEST_ALIGNMENT, - cannot_accumulate); + result = allocate_dynamic_stack_space (op0, 0, align, cannot_accumulate); result = convert_memory_address (ptr_mode, result); return result; @@ -4571,7 +4673,7 @@ expand_builtin_unop (enum machine_mode target_mode, tree exp, rtx target, /* Compute op, into TARGET if possible. Set TARGET to wherever the result comes back. */ target = expand_unop (TYPE_MODE (TREE_TYPE (CALL_EXPR_ARG (exp, 0))), - op_optab, op0, target, 1); + op_optab, op0, target, op_optab != clrsb_optab); gcc_assert (target); return convert_to_mode (target_mode, target, 0); @@ -4597,6 +4699,23 @@ expand_builtin_expect (tree exp, rtx target) return target; } +/* Expand a call to __builtin_assume_aligned. We just return our first + argument as the builtin_assume_aligned semantic should've been already + executed by CCP. */ + +static rtx +expand_builtin_assume_aligned (tree exp, rtx target) +{ + if (call_expr_nargs (exp) < 2) + return const0_rtx; + target = expand_expr (CALL_EXPR_ARG (exp, 0), target, VOIDmode, + EXPAND_NORMAL); + gcc_assert (!TREE_SIDE_EFFECTS (CALL_EXPR_ARG (exp, 1)) + && (call_expr_nargs (exp) < 3 + || !TREE_SIDE_EFFECTS (CALL_EXPR_ARG (exp, 2)))); + return target; +} + void expand_builtin_trap (void) { @@ -4764,7 +4883,7 @@ round_trampoline_addr (rtx tramp) } static rtx -expand_builtin_init_trampoline (tree exp) +expand_builtin_init_trampoline (tree exp, bool onstack) { tree t_tramp, t_func, t_chain; rtx m_tramp, r_tramp, r_chain, tmp; @@ -4781,19 +4900,22 @@ expand_builtin_init_trampoline (tree exp) m_tramp = gen_rtx_MEM (BLKmode, r_tramp); MEM_NOTRAP_P (m_tramp) = 1; - /* The TRAMP argument should be the address of a field within the - local function's FRAME decl. Let's see if we can fill in the - to fill in the MEM_ATTRs for this memory. */ + /* If ONSTACK, the TRAMP argument should be the address of a field + within the local function's FRAME decl. Either way, let's see if + we can fill in the MEM_ATTRs for this memory. */ if (TREE_CODE (t_tramp) == ADDR_EXPR) set_mem_attributes_minus_bitpos (m_tramp, TREE_OPERAND (t_tramp, 0), true, 0); + /* Creator of a heap trampoline is responsible for making sure the + address is aligned to at least STACK_BOUNDARY. Normally malloc + will ensure this anyhow. */ tmp = round_trampoline_addr (r_tramp); if (tmp != r_tramp) { m_tramp = change_address (m_tramp, BLKmode, tmp); set_mem_align (m_tramp, TRAMPOLINE_ALIGNMENT); - set_mem_size (m_tramp, GEN_INT (TRAMPOLINE_SIZE)); + set_mem_size (m_tramp, TRAMPOLINE_SIZE); } /* The FUNC argument should be the address of the nested function. @@ -4807,10 +4929,13 @@ expand_builtin_init_trampoline (tree exp) /* Generate insns to initialize the trampoline. */ targetm.calls.trampoline_init (m_tramp, t_func, r_chain); - trampolines_created = 1; + if (onstack) + { + trampolines_created = 1; - warning_at (DECL_SOURCE_LOCATION (t_func), OPT_Wtrampolines, - "trampoline generated for nested function %qD", t_func); + warning_at (DECL_SOURCE_LOCATION (t_func), OPT_Wtrampolines, + "trampoline generated for nested function %qD", t_func); + } return const0_rtx; } @@ -5043,28 +5168,48 @@ get_builtin_sync_mem (tree loc, enum machine_mode mode) /* The alignment needs to be at least according to that of the mode. */ set_mem_align (mem, MAX (GET_MODE_ALIGNMENT (mode), - get_pointer_alignment (loc, BIGGEST_ALIGNMENT))); + get_pointer_alignment (loc))); set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER); MEM_VOLATILE_P (mem) = 1; return mem; } +/* Make sure an argument is in the right mode. + EXP is the tree argument. + MODE is the mode it should be in. */ + +static rtx +expand_expr_force_mode (tree exp, enum machine_mode mode) +{ + rtx val; + enum machine_mode old_mode; + + val = expand_expr (exp, NULL_RTX, mode, EXPAND_NORMAL); + /* If VAL is promoted to a wider mode, convert it back to MODE. Take care + of CONST_INTs, where we know the old_mode only from the call argument. */ + + old_mode = GET_MODE (val); + if (old_mode == VOIDmode) + old_mode = TYPE_MODE (TREE_TYPE (exp)); + val = convert_modes (mode, old_mode, val, 1); + return val; +} + + /* Expand the __sync_xxx_and_fetch and __sync_fetch_and_xxx intrinsics. EXP is the CALL_EXPR. CODE is the rtx code that corresponds to the arithmetic or logical operation from the name; an exception here is that NOT actually means NAND. TARGET is an optional place for us to store the results; AFTER is true if this is the - fetch_and_xxx form. IGNORE is true if we don't actually care about - the result of the operation at all. */ + fetch_and_xxx form. */ static rtx expand_builtin_sync_operation (enum machine_mode mode, tree exp, enum rtx_code code, bool after, - rtx target, bool ignore) + rtx target) { rtx val, mem; - enum machine_mode old_mode; location_t loc = EXPR_LOCATION (exp); if (code == NOT && warn_sync_nand) @@ -5076,181 +5221,567 @@ expand_builtin_sync_operation (enum machine_mode mode, tree exp, switch (fcode) { - case BUILT_IN_FETCH_AND_NAND_1: - case BUILT_IN_FETCH_AND_NAND_2: - case BUILT_IN_FETCH_AND_NAND_4: - case BUILT_IN_FETCH_AND_NAND_8: - case BUILT_IN_FETCH_AND_NAND_16: - + case BUILT_IN_SYNC_FETCH_AND_NAND_1: + case BUILT_IN_SYNC_FETCH_AND_NAND_2: + case BUILT_IN_SYNC_FETCH_AND_NAND_4: + case BUILT_IN_SYNC_FETCH_AND_NAND_8: + case BUILT_IN_SYNC_FETCH_AND_NAND_16: if (warned_f_a_n) break; - fndecl = implicit_built_in_decls[BUILT_IN_FETCH_AND_NAND_N]; + fndecl = builtin_decl_implicit (BUILT_IN_SYNC_FETCH_AND_NAND_N); inform (loc, "%qD changed semantics in GCC 4.4", fndecl); warned_f_a_n = true; break; - case BUILT_IN_NAND_AND_FETCH_1: - case BUILT_IN_NAND_AND_FETCH_2: - case BUILT_IN_NAND_AND_FETCH_4: - case BUILT_IN_NAND_AND_FETCH_8: - case BUILT_IN_NAND_AND_FETCH_16: + case BUILT_IN_SYNC_NAND_AND_FETCH_1: + case BUILT_IN_SYNC_NAND_AND_FETCH_2: + case BUILT_IN_SYNC_NAND_AND_FETCH_4: + case BUILT_IN_SYNC_NAND_AND_FETCH_8: + case BUILT_IN_SYNC_NAND_AND_FETCH_16: + if (warned_n_a_f) + break; + + fndecl = builtin_decl_implicit (BUILT_IN_SYNC_NAND_AND_FETCH_N); + inform (loc, "%qD changed semantics in GCC 4.4", fndecl); + warned_n_a_f = true; + break; + + default: + gcc_unreachable (); + } + } + + /* Expand the operands. */ + mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode); + val = expand_expr_force_mode (CALL_EXPR_ARG (exp, 1), mode); + + return expand_atomic_fetch_op (target, mem, val, code, MEMMODEL_SEQ_CST, + after); +} + +/* Expand the __sync_val_compare_and_swap and __sync_bool_compare_and_swap + intrinsics. EXP is the CALL_EXPR. IS_BOOL is + true if this is the boolean form. TARGET is a place for us to store the + results; this is NOT optional if IS_BOOL is true. */ + +static rtx +expand_builtin_compare_and_swap (enum machine_mode mode, tree exp, + bool is_bool, rtx target) +{ + rtx old_val, new_val, mem; + rtx *pbool, *poval; + + /* Expand the operands. */ + mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode); + old_val = expand_expr_force_mode (CALL_EXPR_ARG (exp, 1), mode); + new_val = expand_expr_force_mode (CALL_EXPR_ARG (exp, 2), mode); + + pbool = poval = NULL; + if (target != const0_rtx) + { + if (is_bool) + pbool = ⌖ + else + poval = ⌖ + } + if (!expand_atomic_compare_and_swap (pbool, poval, mem, old_val, new_val, + false, MEMMODEL_SEQ_CST, + MEMMODEL_SEQ_CST)) + return NULL_RTX; + + return target; +} + +/* Expand the __sync_lock_test_and_set intrinsic. Note that the most + general form is actually an atomic exchange, and some targets only + support a reduced form with the second argument being a constant 1. + EXP is the CALL_EXPR; TARGET is an optional place for us to store + the results. */ + +static rtx +expand_builtin_sync_lock_test_and_set (enum machine_mode mode, tree exp, + rtx target) +{ + rtx val, mem; + + /* Expand the operands. */ + mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode); + val = expand_expr_force_mode (CALL_EXPR_ARG (exp, 1), mode); + + return expand_sync_lock_test_and_set (target, mem, val); +} + +/* Expand the __sync_lock_release intrinsic. EXP is the CALL_EXPR. */ + +static void +expand_builtin_sync_lock_release (enum machine_mode mode, tree exp) +{ + rtx mem; + + /* Expand the operands. */ + mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode); + + expand_atomic_store (mem, const0_rtx, MEMMODEL_RELEASE, true); +} + +/* Given an integer representing an ``enum memmodel'', verify its + correctness and return the memory model enum. */ + +static enum memmodel +get_memmodel (tree exp) +{ + rtx op; + + /* If the parameter is not a constant, it's a run time value so we'll just + convert it to MEMMODEL_SEQ_CST to avoid annoying runtime checking. */ + if (TREE_CODE (exp) != INTEGER_CST) + return MEMMODEL_SEQ_CST; + + op = expand_normal (exp); + if (INTVAL (op) < 0 || INTVAL (op) >= MEMMODEL_LAST) + { + warning (OPT_Winvalid_memory_model, + "invalid memory model argument to builtin"); + return MEMMODEL_SEQ_CST; + } + return (enum memmodel) INTVAL (op); +} + +/* Expand the __atomic_exchange intrinsic: + TYPE __atomic_exchange (TYPE *object, TYPE desired, enum memmodel) + EXP is the CALL_EXPR. + TARGET is an optional place for us to store the results. */ + +static rtx +expand_builtin_atomic_exchange (enum machine_mode mode, tree exp, rtx target) +{ + rtx val, mem; + enum memmodel model; + + model = get_memmodel (CALL_EXPR_ARG (exp, 2)); + if (model == MEMMODEL_CONSUME) + { + error ("invalid memory model for %<__atomic_exchange%>"); + return NULL_RTX; + } + + if (!flag_inline_atomics) + return NULL_RTX; + + /* Expand the operands. */ + mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode); + val = expand_expr_force_mode (CALL_EXPR_ARG (exp, 1), mode); + + return expand_atomic_exchange (target, mem, val, model); +} + +/* Expand the __atomic_compare_exchange intrinsic: + bool __atomic_compare_exchange (TYPE *object, TYPE *expect, + TYPE desired, BOOL weak, + enum memmodel success, + enum memmodel failure) + EXP is the CALL_EXPR. + TARGET is an optional place for us to store the results. */ + +static rtx +expand_builtin_atomic_compare_exchange (enum machine_mode mode, tree exp, + rtx target) +{ + rtx expect, desired, mem, oldval; + enum memmodel success, failure; + tree weak; + bool is_weak; + + success = get_memmodel (CALL_EXPR_ARG (exp, 4)); + failure = get_memmodel (CALL_EXPR_ARG (exp, 5)); + + if (failure == MEMMODEL_RELEASE || failure == MEMMODEL_ACQ_REL) + { + error ("invalid failure memory model for %<__atomic_compare_exchange%>"); + return NULL_RTX; + } + + if (failure > success) + { + error ("failure memory model cannot be stronger than success " + "memory model for %<__atomic_compare_exchange%>"); + return NULL_RTX; + } + + if (!flag_inline_atomics) + return NULL_RTX; + + /* Expand the operands. */ + mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode); + + expect = expand_normal (CALL_EXPR_ARG (exp, 1)); + expect = convert_memory_address (Pmode, expect); + desired = expand_expr_force_mode (CALL_EXPR_ARG (exp, 2), mode); + + weak = CALL_EXPR_ARG (exp, 3); + is_weak = false; + if (host_integerp (weak, 0) && tree_low_cst (weak, 0) != 0) + is_weak = true; + + oldval = copy_to_reg (gen_rtx_MEM (mode, expect)); + + if (!expand_atomic_compare_and_swap ((target == const0_rtx ? NULL : &target), + &oldval, mem, oldval, desired, + is_weak, success, failure)) + return NULL_RTX; + + emit_move_insn (gen_rtx_MEM (mode, expect), oldval); + return target; +} + +/* Expand the __atomic_load intrinsic: + TYPE __atomic_load (TYPE *object, enum memmodel) + EXP is the CALL_EXPR. + TARGET is an optional place for us to store the results. */ + +static rtx +expand_builtin_atomic_load (enum machine_mode mode, tree exp, rtx target) +{ + rtx mem; + enum memmodel model; + + model = get_memmodel (CALL_EXPR_ARG (exp, 1)); + if (model == MEMMODEL_RELEASE + || model == MEMMODEL_ACQ_REL) + { + error ("invalid memory model for %<__atomic_load%>"); + return NULL_RTX; + } + + if (!flag_inline_atomics) + return NULL_RTX; + + /* Expand the operand. */ + mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode); + + return expand_atomic_load (target, mem, model); +} + + +/* Expand the __atomic_store intrinsic: + void __atomic_store (TYPE *object, TYPE desired, enum memmodel) + EXP is the CALL_EXPR. + TARGET is an optional place for us to store the results. */ + +static rtx +expand_builtin_atomic_store (enum machine_mode mode, tree exp) +{ + rtx mem, val; + enum memmodel model; + + model = get_memmodel (CALL_EXPR_ARG (exp, 2)); + if (model != MEMMODEL_RELAXED + && model != MEMMODEL_SEQ_CST + && model != MEMMODEL_RELEASE) + { + error ("invalid memory model for %<__atomic_store%>"); + return NULL_RTX; + } + + if (!flag_inline_atomics) + return NULL_RTX; + + /* Expand the operands. */ + mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode); + val = expand_expr_force_mode (CALL_EXPR_ARG (exp, 1), mode); + + return expand_atomic_store (mem, val, model, false); +} + +/* Expand the __atomic_fetch_XXX intrinsic: + TYPE __atomic_fetch_XXX (TYPE *object, TYPE val, enum memmodel) + EXP is the CALL_EXPR. + TARGET is an optional place for us to store the results. + CODE is the operation, PLUS, MINUS, ADD, XOR, or IOR. + FETCH_AFTER is true if returning the result of the operation. + FETCH_AFTER is false if returning the value before the operation. + IGNORE is true if the result is not used. + EXT_CALL is the correct builtin for an external call if this cannot be + resolved to an instruction sequence. */ + +static rtx +expand_builtin_atomic_fetch_op (enum machine_mode mode, tree exp, rtx target, + enum rtx_code code, bool fetch_after, + bool ignore, enum built_in_function ext_call) +{ + rtx val, mem, ret; + enum memmodel model; + tree fndecl; + tree addr; + + model = get_memmodel (CALL_EXPR_ARG (exp, 2)); + + /* Expand the operands. */ + mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode); + val = expand_expr_force_mode (CALL_EXPR_ARG (exp, 1), mode); + + /* Only try generating instructions if inlining is turned on. */ + if (flag_inline_atomics) + { + ret = expand_atomic_fetch_op (target, mem, val, code, model, fetch_after); + if (ret) + return ret; + } + + /* Return if a different routine isn't needed for the library call. */ + if (ext_call == BUILT_IN_NONE) + return NULL_RTX; + + /* Change the call to the specified function. */ + fndecl = get_callee_fndecl (exp); + addr = CALL_EXPR_FN (exp); + STRIP_NOPS (addr); + + gcc_assert (TREE_OPERAND (addr, 0) == fndecl); + TREE_OPERAND (addr, 0) = builtin_decl_explicit(ext_call); + + /* Expand the call here so we can emit trailing code. */ + ret = expand_call (exp, target, ignore); + + /* Replace the original function just in case it matters. */ + TREE_OPERAND (addr, 0) = fndecl; + + /* Then issue the arithmetic correction to return the right result. */ + if (!ignore) + { + if (code == NOT) + { + ret = expand_simple_binop (mode, AND, ret, val, NULL_RTX, true, + OPTAB_LIB_WIDEN); + ret = expand_simple_unop (mode, NOT, ret, target, true); + } + else + ret = expand_simple_binop (mode, code, ret, val, target, true, + OPTAB_LIB_WIDEN); + } + return ret; +} + - if (warned_n_a_f) - break; +#ifndef HAVE_atomic_clear +# define HAVE_atomic_clear 0 +# define gen_atomic_clear(x,y) (gcc_unreachable (), NULL_RTX) +#endif - fndecl = implicit_built_in_decls[BUILT_IN_NAND_AND_FETCH_N]; - inform (loc, "%qD changed semantics in GCC 4.4", fndecl); - warned_n_a_f = true; - break; +/* Expand an atomic clear operation. + void _atomic_clear (BOOL *obj, enum memmodel) + EXP is the call expression. */ - default: - gcc_unreachable (); - } - } +static rtx +expand_builtin_atomic_clear (tree exp) +{ + enum machine_mode mode; + rtx mem, ret; + enum memmodel model; - /* Expand the operands. */ + mode = mode_for_size (BOOL_TYPE_SIZE, MODE_INT, 0); mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode); + model = get_memmodel (CALL_EXPR_ARG (exp, 1)); - val = expand_expr (CALL_EXPR_ARG (exp, 1), NULL_RTX, mode, EXPAND_NORMAL); - /* If VAL is promoted to a wider mode, convert it back to MODE. Take care - of CONST_INTs, where we know the old_mode only from the call argument. */ - old_mode = GET_MODE (val); - if (old_mode == VOIDmode) - old_mode = TYPE_MODE (TREE_TYPE (CALL_EXPR_ARG (exp, 1))); - val = convert_modes (mode, old_mode, val, 1); + if (model == MEMMODEL_ACQUIRE || model == MEMMODEL_ACQ_REL) + { + error ("invalid memory model for %<__atomic_store%>"); + return const0_rtx; + } - if (ignore) - return expand_sync_operation (mem, val, code); - else - return expand_sync_fetch_operation (mem, val, code, after, target); + if (HAVE_atomic_clear) + { + emit_insn (gen_atomic_clear (mem, model)); + return const0_rtx; + } + + /* Try issuing an __atomic_store, and allow fallback to __sync_lock_release. + Failing that, a store is issued by __atomic_store. The only way this can + fail is if the bool type is larger than a word size. Unlikely, but + handle it anyway for completeness. Assume a single threaded model since + there is no atomic support in this case, and no barriers are required. */ + ret = expand_atomic_store (mem, const0_rtx, model, true); + if (!ret) + emit_move_insn (mem, const0_rtx); + return const0_rtx; } -/* Expand the __sync_val_compare_and_swap and __sync_bool_compare_and_swap - intrinsics. EXP is the CALL_EXPR. IS_BOOL is - true if this is the boolean form. TARGET is a place for us to store the - results; this is NOT optional if IS_BOOL is true. */ +/* Expand an atomic test_and_set operation. + bool _atomic_test_and_set (BOOL *obj, enum memmodel) + EXP is the call expression. */ static rtx -expand_builtin_compare_and_swap (enum machine_mode mode, tree exp, - bool is_bool, rtx target) +expand_builtin_atomic_test_and_set (tree exp, rtx target) { - rtx old_val, new_val, mem; - enum machine_mode old_mode; + rtx mem; + enum memmodel model; + enum machine_mode mode; - /* Expand the operands. */ + mode = mode_for_size (BOOL_TYPE_SIZE, MODE_INT, 0); mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode); + model = get_memmodel (CALL_EXPR_ARG (exp, 1)); + return expand_atomic_test_and_set (target, mem, model); +} - old_val = expand_expr (CALL_EXPR_ARG (exp, 1), NULL_RTX, - mode, EXPAND_NORMAL); - /* If VAL is promoted to a wider mode, convert it back to MODE. Take care - of CONST_INTs, where we know the old_mode only from the call argument. */ - old_mode = GET_MODE (old_val); - if (old_mode == VOIDmode) - old_mode = TYPE_MODE (TREE_TYPE (CALL_EXPR_ARG (exp, 1))); - old_val = convert_modes (mode, old_mode, old_val, 1); - new_val = expand_expr (CALL_EXPR_ARG (exp, 2), NULL_RTX, - mode, EXPAND_NORMAL); - /* If VAL is promoted to a wider mode, convert it back to MODE. Take care - of CONST_INTs, where we know the old_mode only from the call argument. */ - old_mode = GET_MODE (new_val); - if (old_mode == VOIDmode) - old_mode = TYPE_MODE (TREE_TYPE (CALL_EXPR_ARG (exp, 2))); - new_val = convert_modes (mode, old_mode, new_val, 1); +/* Return true if (optional) argument ARG1 of size ARG0 is always lock free on + this architecture. If ARG1 is NULL, use typical alignment for size ARG0. */ + +static tree +fold_builtin_atomic_always_lock_free (tree arg0, tree arg1) +{ + int size; + enum machine_mode mode; + unsigned int mode_align, type_align; + + if (TREE_CODE (arg0) != INTEGER_CST) + return NULL_TREE; + + size = INTVAL (expand_normal (arg0)) * BITS_PER_UNIT; + mode = mode_for_size (size, MODE_INT, 0); + mode_align = GET_MODE_ALIGNMENT (mode); + + if (TREE_CODE (arg1) == INTEGER_CST && INTVAL (expand_normal (arg1)) == 0) + type_align = mode_align; + else + { + tree ttype = TREE_TYPE (arg1); + + /* This function is usually invoked and folded immediately by the front + end before anything else has a chance to look at it. The pointer + parameter at this point is usually cast to a void *, so check for that + and look past the cast. */ + if (TREE_CODE (arg1) == NOP_EXPR && POINTER_TYPE_P (ttype) + && VOID_TYPE_P (TREE_TYPE (ttype))) + arg1 = TREE_OPERAND (arg1, 0); + + ttype = TREE_TYPE (arg1); + gcc_assert (POINTER_TYPE_P (ttype)); + + /* Get the underlying type of the object. */ + ttype = TREE_TYPE (ttype); + type_align = TYPE_ALIGN (ttype); + } + + /* If the object has smaller alignment, the the lock free routines cannot + be used. */ + if (type_align < mode_align) + return boolean_false_node; - if (is_bool) - return expand_bool_compare_and_swap (mem, old_val, new_val, target); + /* Check if a compare_and_swap pattern exists for the mode which represents + the required size. The pattern is not allowed to fail, so the existence + of the pattern indicates support is present. */ + if (can_compare_and_swap_p (mode, true)) + return boolean_true_node; else - return expand_val_compare_and_swap (mem, old_val, new_val, target); + return boolean_false_node; } -/* Expand the __sync_lock_test_and_set intrinsic. Note that the most - general form is actually an atomic exchange, and some targets only - support a reduced form with the second argument being a constant 1. - EXP is the CALL_EXPR; TARGET is an optional place for us to store - the results. */ +/* Return true if the parameters to call EXP represent an object which will + always generate lock free instructions. The first argument represents the + size of the object, and the second parameter is a pointer to the object + itself. If NULL is passed for the object, then the result is based on + typical alignment for an object of the specified size. Otherwise return + false. */ static rtx -expand_builtin_lock_test_and_set (enum machine_mode mode, tree exp, - rtx target) +expand_builtin_atomic_always_lock_free (tree exp) { - rtx val, mem; - enum machine_mode old_mode; + tree size; + tree arg0 = CALL_EXPR_ARG (exp, 0); + tree arg1 = CALL_EXPR_ARG (exp, 1); - /* Expand the operands. */ - mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode); - val = expand_expr (CALL_EXPR_ARG (exp, 1), NULL_RTX, mode, EXPAND_NORMAL); - /* If VAL is promoted to a wider mode, convert it back to MODE. Take care - of CONST_INTs, where we know the old_mode only from the call argument. */ - old_mode = GET_MODE (val); - if (old_mode == VOIDmode) - old_mode = TYPE_MODE (TREE_TYPE (CALL_EXPR_ARG (exp, 1))); - val = convert_modes (mode, old_mode, val, 1); + if (TREE_CODE (arg0) != INTEGER_CST) + { + error ("non-constant argument 1 to __atomic_always_lock_free"); + return const0_rtx; + } - return expand_sync_lock_test_and_set (mem, val, target); + size = fold_builtin_atomic_always_lock_free (arg0, arg1); + if (size == boolean_true_node) + return const1_rtx; + return const0_rtx; } -/* Expand the __sync_synchronize intrinsic. */ +/* Return a one or zero if it can be determined that object ARG1 of size ARG + is lock free on this architecture. */ -static void -expand_builtin_synchronize (void) +static tree +fold_builtin_atomic_is_lock_free (tree arg0, tree arg1) { - gimple x; - VEC (tree, gc) *v_clobbers; + if (!flag_inline_atomics) + return NULL_TREE; + + /* If it isn't always lock free, don't generate a result. */ + if (fold_builtin_atomic_always_lock_free (arg0, arg1) == boolean_true_node) + return boolean_true_node; -#ifdef HAVE_memory_barrier - if (HAVE_memory_barrier) - { - emit_insn (gen_memory_barrier ()); - return; - } -#endif + return NULL_TREE; +} + +/* Return true if the parameters to call EXP represent an object which will + always generate lock free instructions. The first argument represents the + size of the object, and the second parameter is a pointer to the object + itself. If NULL is passed for the object, then the result is based on + typical alignment for an object of the specified size. Otherwise return + NULL*/ - if (synchronize_libfunc != NULL_RTX) +static rtx +expand_builtin_atomic_is_lock_free (tree exp) +{ + tree size; + tree arg0 = CALL_EXPR_ARG (exp, 0); + tree arg1 = CALL_EXPR_ARG (exp, 1); + + if (!INTEGRAL_TYPE_P (TREE_TYPE (arg0))) { - emit_library_call (synchronize_libfunc, LCT_NORMAL, VOIDmode, 0); - return; + error ("non-integer argument 1 to __atomic_is_lock_free"); + return NULL_RTX; } - /* If no explicit memory barrier instruction is available, create an - empty asm stmt with a memory clobber. */ - v_clobbers = VEC_alloc (tree, gc, 1); - VEC_quick_push (tree, v_clobbers, - tree_cons (NULL, build_string (6, "memory"), NULL)); - x = gimple_build_asm_vec ("", NULL, NULL, v_clobbers, NULL); - gimple_asm_set_volatile (x, true); - expand_asm_stmt (x); + if (!flag_inline_atomics) + return NULL_RTX; + + /* If the value is known at compile time, return the RTX for it. */ + size = fold_builtin_atomic_is_lock_free (arg0, arg1); + if (size == boolean_true_node) + return const1_rtx; + + return NULL_RTX; } -/* Expand the __sync_lock_release intrinsic. EXP is the CALL_EXPR. */ +/* Expand the __atomic_thread_fence intrinsic: + void __atomic_thread_fence (enum memmodel) + EXP is the CALL_EXPR. */ static void -expand_builtin_lock_release (enum machine_mode mode, tree exp) +expand_builtin_atomic_thread_fence (tree exp) { - struct expand_operand ops[2]; - enum insn_code icode; - rtx mem; + enum memmodel model = get_memmodel (CALL_EXPR_ARG (exp, 0)); + expand_mem_thread_fence (model); +} - /* Expand the operands. */ - mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode); +/* Expand the __atomic_signal_fence intrinsic: + void __atomic_signal_fence (enum memmodel) + EXP is the CALL_EXPR. */ - /* If there is an explicit operation in the md file, use it. */ - icode = direct_optab_handler (sync_lock_release_optab, mode); - if (icode != CODE_FOR_nothing) - { - create_fixed_operand (&ops[0], mem); - create_input_operand (&ops[1], const0_rtx, mode); - if (maybe_expand_insn (icode, 2, ops)) - return; - } +static void +expand_builtin_atomic_signal_fence (tree exp) +{ + enum memmodel model = get_memmodel (CALL_EXPR_ARG (exp, 0)); + expand_mem_signal_fence (model); +} + +/* Expand the __sync_synchronize intrinsic. */ - /* Otherwise we can implement this operation by emitting a barrier - followed by a store of zero. */ - expand_builtin_synchronize (); - emit_move_insn (mem, const0_rtx); +static void +expand_builtin_sync_synchronize (void) +{ + expand_mem_thread_fence (MEMMODEL_SEQ_CST); } + /* Expand an expression EXP that calls a built-in function, with result going to TARGET if that's convenient @@ -5274,8 +5805,8 @@ expand_builtin (tree exp, rtx target, rtx subtarget, enum machine_mode mode, set of builtins. */ if (!optimize && !called_as_built_in (fndecl) - && DECL_ASSEMBLER_NAME_SET_P (fndecl) && fcode != BUILT_IN_ALLOCA + && fcode != BUILT_IN_ALLOCA_WITH_ALIGN && fcode != BUILT_IN_FREE) return expand_call (exp, target, ignore); @@ -5378,17 +5909,21 @@ expand_builtin (tree exp, rtx target, rtx subtarget, enum machine_mode mode, return target; break; + CASE_FLT_FN (BUILT_IN_ICEIL): CASE_FLT_FN (BUILT_IN_LCEIL): CASE_FLT_FN (BUILT_IN_LLCEIL): CASE_FLT_FN (BUILT_IN_LFLOOR): + CASE_FLT_FN (BUILT_IN_IFLOOR): CASE_FLT_FN (BUILT_IN_LLFLOOR): target = expand_builtin_int_roundingfn (exp, target); if (target) return target; break; + CASE_FLT_FN (BUILT_IN_IRINT): CASE_FLT_FN (BUILT_IN_LRINT): CASE_FLT_FN (BUILT_IN_LLRINT): + CASE_FLT_FN (BUILT_IN_IROUND): CASE_FLT_FN (BUILT_IN_LROUND): CASE_FLT_FN (BUILT_IN_LLROUND): target = expand_builtin_int_roundingfn_2 (exp, target); @@ -5527,6 +6062,7 @@ expand_builtin (tree exp, rtx target, rtx subtarget, enum machine_mode mode, return XEXP (DECL_RTL (DECL_RESULT (current_function_decl)), 0); case BUILT_IN_ALLOCA: + case BUILT_IN_ALLOCA_WITH_ALIGN: /* If the allocation stems from the declaration of a variable-sized object, it cannot accumulate. */ target = expand_builtin_alloca (exp, CALL_ALLOCA_FOR_VAR_P (exp)); @@ -5573,6 +6109,14 @@ expand_builtin (tree exp, rtx target, rtx subtarget, enum machine_mode mode, return target; break; + CASE_INT_FN (BUILT_IN_CLRSB): + case BUILT_IN_CLRSBIMAX: + target = expand_builtin_unop (target_mode, exp, target, + subtarget, clrsb_optab); + if (target) + return target; + break; + CASE_INT_FN (BUILT_IN_POPCOUNT): case BUILT_IN_POPCOUNTIMAX: target = expand_builtin_unop (target_mode, exp, target, @@ -5808,12 +6352,16 @@ expand_builtin (tree exp, rtx target, rtx subtarget, enum machine_mode mode, return expand_builtin_va_copy (exp); case BUILT_IN_EXPECT: return expand_builtin_expect (exp, target); + case BUILT_IN_ASSUME_ALIGNED: + return expand_builtin_assume_aligned (exp, target); case BUILT_IN_PREFETCH: expand_builtin_prefetch (exp); return const0_rtx; case BUILT_IN_INIT_TRAMPOLINE: - return expand_builtin_init_trampoline (exp); + return expand_builtin_init_trampoline (exp, true); + case BUILT_IN_INIT_HEAP_TRAMPOLINE: + return expand_builtin_init_trampoline (exp, false); case BUILT_IN_ADJUST_TRAMPOLINE: return expand_builtin_adjust_trampoline (exp); @@ -5829,199 +6377,441 @@ expand_builtin (tree exp, rtx target, rtx subtarget, enum machine_mode mode, return target; break; - case BUILT_IN_FETCH_AND_ADD_1: - case BUILT_IN_FETCH_AND_ADD_2: - case BUILT_IN_FETCH_AND_ADD_4: - case BUILT_IN_FETCH_AND_ADD_8: - case BUILT_IN_FETCH_AND_ADD_16: - mode = get_builtin_sync_mode (fcode - BUILT_IN_FETCH_AND_ADD_1); - target = expand_builtin_sync_operation (mode, exp, PLUS, - false, target, ignore); + case BUILT_IN_SYNC_FETCH_AND_ADD_1: + case BUILT_IN_SYNC_FETCH_AND_ADD_2: + case BUILT_IN_SYNC_FETCH_AND_ADD_4: + case BUILT_IN_SYNC_FETCH_AND_ADD_8: + case BUILT_IN_SYNC_FETCH_AND_ADD_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_FETCH_AND_ADD_1); + target = expand_builtin_sync_operation (mode, exp, PLUS, false, target); if (target) return target; break; - case BUILT_IN_FETCH_AND_SUB_1: - case BUILT_IN_FETCH_AND_SUB_2: - case BUILT_IN_FETCH_AND_SUB_4: - case BUILT_IN_FETCH_AND_SUB_8: - case BUILT_IN_FETCH_AND_SUB_16: - mode = get_builtin_sync_mode (fcode - BUILT_IN_FETCH_AND_SUB_1); - target = expand_builtin_sync_operation (mode, exp, MINUS, - false, target, ignore); + case BUILT_IN_SYNC_FETCH_AND_SUB_1: + case BUILT_IN_SYNC_FETCH_AND_SUB_2: + case BUILT_IN_SYNC_FETCH_AND_SUB_4: + case BUILT_IN_SYNC_FETCH_AND_SUB_8: + case BUILT_IN_SYNC_FETCH_AND_SUB_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_FETCH_AND_SUB_1); + target = expand_builtin_sync_operation (mode, exp, MINUS, false, target); if (target) return target; break; - case BUILT_IN_FETCH_AND_OR_1: - case BUILT_IN_FETCH_AND_OR_2: - case BUILT_IN_FETCH_AND_OR_4: - case BUILT_IN_FETCH_AND_OR_8: - case BUILT_IN_FETCH_AND_OR_16: - mode = get_builtin_sync_mode (fcode - BUILT_IN_FETCH_AND_OR_1); - target = expand_builtin_sync_operation (mode, exp, IOR, - false, target, ignore); + case BUILT_IN_SYNC_FETCH_AND_OR_1: + case BUILT_IN_SYNC_FETCH_AND_OR_2: + case BUILT_IN_SYNC_FETCH_AND_OR_4: + case BUILT_IN_SYNC_FETCH_AND_OR_8: + case BUILT_IN_SYNC_FETCH_AND_OR_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_FETCH_AND_OR_1); + target = expand_builtin_sync_operation (mode, exp, IOR, false, target); if (target) return target; break; - case BUILT_IN_FETCH_AND_AND_1: - case BUILT_IN_FETCH_AND_AND_2: - case BUILT_IN_FETCH_AND_AND_4: - case BUILT_IN_FETCH_AND_AND_8: - case BUILT_IN_FETCH_AND_AND_16: - mode = get_builtin_sync_mode (fcode - BUILT_IN_FETCH_AND_AND_1); - target = expand_builtin_sync_operation (mode, exp, AND, - false, target, ignore); + case BUILT_IN_SYNC_FETCH_AND_AND_1: + case BUILT_IN_SYNC_FETCH_AND_AND_2: + case BUILT_IN_SYNC_FETCH_AND_AND_4: + case BUILT_IN_SYNC_FETCH_AND_AND_8: + case BUILT_IN_SYNC_FETCH_AND_AND_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_FETCH_AND_AND_1); + target = expand_builtin_sync_operation (mode, exp, AND, false, target); if (target) return target; break; - case BUILT_IN_FETCH_AND_XOR_1: - case BUILT_IN_FETCH_AND_XOR_2: - case BUILT_IN_FETCH_AND_XOR_4: - case BUILT_IN_FETCH_AND_XOR_8: - case BUILT_IN_FETCH_AND_XOR_16: - mode = get_builtin_sync_mode (fcode - BUILT_IN_FETCH_AND_XOR_1); - target = expand_builtin_sync_operation (mode, exp, XOR, - false, target, ignore); + case BUILT_IN_SYNC_FETCH_AND_XOR_1: + case BUILT_IN_SYNC_FETCH_AND_XOR_2: + case BUILT_IN_SYNC_FETCH_AND_XOR_4: + case BUILT_IN_SYNC_FETCH_AND_XOR_8: + case BUILT_IN_SYNC_FETCH_AND_XOR_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_FETCH_AND_XOR_1); + target = expand_builtin_sync_operation (mode, exp, XOR, false, target); if (target) return target; break; - case BUILT_IN_FETCH_AND_NAND_1: - case BUILT_IN_FETCH_AND_NAND_2: - case BUILT_IN_FETCH_AND_NAND_4: - case BUILT_IN_FETCH_AND_NAND_8: - case BUILT_IN_FETCH_AND_NAND_16: - mode = get_builtin_sync_mode (fcode - BUILT_IN_FETCH_AND_NAND_1); - target = expand_builtin_sync_operation (mode, exp, NOT, - false, target, ignore); + case BUILT_IN_SYNC_FETCH_AND_NAND_1: + case BUILT_IN_SYNC_FETCH_AND_NAND_2: + case BUILT_IN_SYNC_FETCH_AND_NAND_4: + case BUILT_IN_SYNC_FETCH_AND_NAND_8: + case BUILT_IN_SYNC_FETCH_AND_NAND_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_FETCH_AND_NAND_1); + target = expand_builtin_sync_operation (mode, exp, NOT, false, target); if (target) return target; break; - case BUILT_IN_ADD_AND_FETCH_1: - case BUILT_IN_ADD_AND_FETCH_2: - case BUILT_IN_ADD_AND_FETCH_4: - case BUILT_IN_ADD_AND_FETCH_8: - case BUILT_IN_ADD_AND_FETCH_16: - mode = get_builtin_sync_mode (fcode - BUILT_IN_ADD_AND_FETCH_1); - target = expand_builtin_sync_operation (mode, exp, PLUS, - true, target, ignore); + case BUILT_IN_SYNC_ADD_AND_FETCH_1: + case BUILT_IN_SYNC_ADD_AND_FETCH_2: + case BUILT_IN_SYNC_ADD_AND_FETCH_4: + case BUILT_IN_SYNC_ADD_AND_FETCH_8: + case BUILT_IN_SYNC_ADD_AND_FETCH_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_ADD_AND_FETCH_1); + target = expand_builtin_sync_operation (mode, exp, PLUS, true, target); if (target) return target; break; - case BUILT_IN_SUB_AND_FETCH_1: - case BUILT_IN_SUB_AND_FETCH_2: - case BUILT_IN_SUB_AND_FETCH_4: - case BUILT_IN_SUB_AND_FETCH_8: - case BUILT_IN_SUB_AND_FETCH_16: - mode = get_builtin_sync_mode (fcode - BUILT_IN_SUB_AND_FETCH_1); - target = expand_builtin_sync_operation (mode, exp, MINUS, - true, target, ignore); + case BUILT_IN_SYNC_SUB_AND_FETCH_1: + case BUILT_IN_SYNC_SUB_AND_FETCH_2: + case BUILT_IN_SYNC_SUB_AND_FETCH_4: + case BUILT_IN_SYNC_SUB_AND_FETCH_8: + case BUILT_IN_SYNC_SUB_AND_FETCH_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_SUB_AND_FETCH_1); + target = expand_builtin_sync_operation (mode, exp, MINUS, true, target); if (target) return target; break; - case BUILT_IN_OR_AND_FETCH_1: - case BUILT_IN_OR_AND_FETCH_2: - case BUILT_IN_OR_AND_FETCH_4: - case BUILT_IN_OR_AND_FETCH_8: - case BUILT_IN_OR_AND_FETCH_16: - mode = get_builtin_sync_mode (fcode - BUILT_IN_OR_AND_FETCH_1); - target = expand_builtin_sync_operation (mode, exp, IOR, - true, target, ignore); + case BUILT_IN_SYNC_OR_AND_FETCH_1: + case BUILT_IN_SYNC_OR_AND_FETCH_2: + case BUILT_IN_SYNC_OR_AND_FETCH_4: + case BUILT_IN_SYNC_OR_AND_FETCH_8: + case BUILT_IN_SYNC_OR_AND_FETCH_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_OR_AND_FETCH_1); + target = expand_builtin_sync_operation (mode, exp, IOR, true, target); if (target) return target; break; - case BUILT_IN_AND_AND_FETCH_1: - case BUILT_IN_AND_AND_FETCH_2: - case BUILT_IN_AND_AND_FETCH_4: - case BUILT_IN_AND_AND_FETCH_8: - case BUILT_IN_AND_AND_FETCH_16: - mode = get_builtin_sync_mode (fcode - BUILT_IN_AND_AND_FETCH_1); - target = expand_builtin_sync_operation (mode, exp, AND, - true, target, ignore); + case BUILT_IN_SYNC_AND_AND_FETCH_1: + case BUILT_IN_SYNC_AND_AND_FETCH_2: + case BUILT_IN_SYNC_AND_AND_FETCH_4: + case BUILT_IN_SYNC_AND_AND_FETCH_8: + case BUILT_IN_SYNC_AND_AND_FETCH_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_AND_AND_FETCH_1); + target = expand_builtin_sync_operation (mode, exp, AND, true, target); if (target) return target; break; - case BUILT_IN_XOR_AND_FETCH_1: - case BUILT_IN_XOR_AND_FETCH_2: - case BUILT_IN_XOR_AND_FETCH_4: - case BUILT_IN_XOR_AND_FETCH_8: - case BUILT_IN_XOR_AND_FETCH_16: - mode = get_builtin_sync_mode (fcode - BUILT_IN_XOR_AND_FETCH_1); - target = expand_builtin_sync_operation (mode, exp, XOR, - true, target, ignore); + case BUILT_IN_SYNC_XOR_AND_FETCH_1: + case BUILT_IN_SYNC_XOR_AND_FETCH_2: + case BUILT_IN_SYNC_XOR_AND_FETCH_4: + case BUILT_IN_SYNC_XOR_AND_FETCH_8: + case BUILT_IN_SYNC_XOR_AND_FETCH_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_XOR_AND_FETCH_1); + target = expand_builtin_sync_operation (mode, exp, XOR, true, target); if (target) return target; break; - case BUILT_IN_NAND_AND_FETCH_1: - case BUILT_IN_NAND_AND_FETCH_2: - case BUILT_IN_NAND_AND_FETCH_4: - case BUILT_IN_NAND_AND_FETCH_8: - case BUILT_IN_NAND_AND_FETCH_16: - mode = get_builtin_sync_mode (fcode - BUILT_IN_NAND_AND_FETCH_1); - target = expand_builtin_sync_operation (mode, exp, NOT, - true, target, ignore); + case BUILT_IN_SYNC_NAND_AND_FETCH_1: + case BUILT_IN_SYNC_NAND_AND_FETCH_2: + case BUILT_IN_SYNC_NAND_AND_FETCH_4: + case BUILT_IN_SYNC_NAND_AND_FETCH_8: + case BUILT_IN_SYNC_NAND_AND_FETCH_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_NAND_AND_FETCH_1); + target = expand_builtin_sync_operation (mode, exp, NOT, true, target); if (target) return target; break; - case BUILT_IN_BOOL_COMPARE_AND_SWAP_1: - case BUILT_IN_BOOL_COMPARE_AND_SWAP_2: - case BUILT_IN_BOOL_COMPARE_AND_SWAP_4: - case BUILT_IN_BOOL_COMPARE_AND_SWAP_8: - case BUILT_IN_BOOL_COMPARE_AND_SWAP_16: + case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_1: + case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_2: + case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_4: + case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_8: + case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_16: if (mode == VOIDmode) mode = TYPE_MODE (boolean_type_node); if (!target || !register_operand (target, mode)) target = gen_reg_rtx (mode); - mode = get_builtin_sync_mode (fcode - BUILT_IN_BOOL_COMPARE_AND_SWAP_1); + mode = get_builtin_sync_mode + (fcode - BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_1); target = expand_builtin_compare_and_swap (mode, exp, true, target); if (target) return target; break; - case BUILT_IN_VAL_COMPARE_AND_SWAP_1: - case BUILT_IN_VAL_COMPARE_AND_SWAP_2: - case BUILT_IN_VAL_COMPARE_AND_SWAP_4: - case BUILT_IN_VAL_COMPARE_AND_SWAP_8: - case BUILT_IN_VAL_COMPARE_AND_SWAP_16: - mode = get_builtin_sync_mode (fcode - BUILT_IN_VAL_COMPARE_AND_SWAP_1); + case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_1: + case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_2: + case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_4: + case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_8: + case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_16: + mode = get_builtin_sync_mode + (fcode - BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_1); target = expand_builtin_compare_and_swap (mode, exp, false, target); if (target) return target; break; - case BUILT_IN_LOCK_TEST_AND_SET_1: - case BUILT_IN_LOCK_TEST_AND_SET_2: - case BUILT_IN_LOCK_TEST_AND_SET_4: - case BUILT_IN_LOCK_TEST_AND_SET_8: - case BUILT_IN_LOCK_TEST_AND_SET_16: - mode = get_builtin_sync_mode (fcode - BUILT_IN_LOCK_TEST_AND_SET_1); - target = expand_builtin_lock_test_and_set (mode, exp, target); + case BUILT_IN_SYNC_LOCK_TEST_AND_SET_1: + case BUILT_IN_SYNC_LOCK_TEST_AND_SET_2: + case BUILT_IN_SYNC_LOCK_TEST_AND_SET_4: + case BUILT_IN_SYNC_LOCK_TEST_AND_SET_8: + case BUILT_IN_SYNC_LOCK_TEST_AND_SET_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_LOCK_TEST_AND_SET_1); + target = expand_builtin_sync_lock_test_and_set (mode, exp, target); + if (target) + return target; + break; + + case BUILT_IN_SYNC_LOCK_RELEASE_1: + case BUILT_IN_SYNC_LOCK_RELEASE_2: + case BUILT_IN_SYNC_LOCK_RELEASE_4: + case BUILT_IN_SYNC_LOCK_RELEASE_8: + case BUILT_IN_SYNC_LOCK_RELEASE_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_LOCK_RELEASE_1); + expand_builtin_sync_lock_release (mode, exp); + return const0_rtx; + + case BUILT_IN_SYNC_SYNCHRONIZE: + expand_builtin_sync_synchronize (); + return const0_rtx; + + case BUILT_IN_ATOMIC_EXCHANGE_1: + case BUILT_IN_ATOMIC_EXCHANGE_2: + case BUILT_IN_ATOMIC_EXCHANGE_4: + case BUILT_IN_ATOMIC_EXCHANGE_8: + case BUILT_IN_ATOMIC_EXCHANGE_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_EXCHANGE_1); + target = expand_builtin_atomic_exchange (mode, exp, target); + if (target) + return target; + break; + + case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_1: + case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_2: + case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_4: + case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_8: + case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_16: + { + unsigned int nargs, z; + VEC(tree,gc) *vec; + + mode = + get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_COMPARE_EXCHANGE_1); + target = expand_builtin_atomic_compare_exchange (mode, exp, target); + if (target) + return target; + + /* If this is turned into an external library call, the weak parameter + must be dropped to match the expected parameter list. */ + nargs = call_expr_nargs (exp); + vec = VEC_alloc (tree, gc, nargs - 1); + for (z = 0; z < 3; z++) + VEC_quick_push (tree, vec, CALL_EXPR_ARG (exp, z)); + /* Skip the boolean weak parameter. */ + for (z = 4; z < 6; z++) + VEC_quick_push (tree, vec, CALL_EXPR_ARG (exp, z)); + exp = build_call_vec (TREE_TYPE (exp), CALL_EXPR_FN (exp), vec); + break; + } + + case BUILT_IN_ATOMIC_LOAD_1: + case BUILT_IN_ATOMIC_LOAD_2: + case BUILT_IN_ATOMIC_LOAD_4: + case BUILT_IN_ATOMIC_LOAD_8: + case BUILT_IN_ATOMIC_LOAD_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_LOAD_1); + target = expand_builtin_atomic_load (mode, exp, target); + if (target) + return target; + break; + + case BUILT_IN_ATOMIC_STORE_1: + case BUILT_IN_ATOMIC_STORE_2: + case BUILT_IN_ATOMIC_STORE_4: + case BUILT_IN_ATOMIC_STORE_8: + case BUILT_IN_ATOMIC_STORE_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_STORE_1); + target = expand_builtin_atomic_store (mode, exp); + if (target) + return const0_rtx; + break; + + case BUILT_IN_ATOMIC_ADD_FETCH_1: + case BUILT_IN_ATOMIC_ADD_FETCH_2: + case BUILT_IN_ATOMIC_ADD_FETCH_4: + case BUILT_IN_ATOMIC_ADD_FETCH_8: + case BUILT_IN_ATOMIC_ADD_FETCH_16: + { + enum built_in_function lib; + mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_ADD_FETCH_1); + lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_ADD_1 + + (fcode - BUILT_IN_ATOMIC_ADD_FETCH_1)); + target = expand_builtin_atomic_fetch_op (mode, exp, target, PLUS, true, + ignore, lib); + if (target) + return target; + break; + } + case BUILT_IN_ATOMIC_SUB_FETCH_1: + case BUILT_IN_ATOMIC_SUB_FETCH_2: + case BUILT_IN_ATOMIC_SUB_FETCH_4: + case BUILT_IN_ATOMIC_SUB_FETCH_8: + case BUILT_IN_ATOMIC_SUB_FETCH_16: + { + enum built_in_function lib; + mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_SUB_FETCH_1); + lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_SUB_1 + + (fcode - BUILT_IN_ATOMIC_SUB_FETCH_1)); + target = expand_builtin_atomic_fetch_op (mode, exp, target, MINUS, true, + ignore, lib); + if (target) + return target; + break; + } + case BUILT_IN_ATOMIC_AND_FETCH_1: + case BUILT_IN_ATOMIC_AND_FETCH_2: + case BUILT_IN_ATOMIC_AND_FETCH_4: + case BUILT_IN_ATOMIC_AND_FETCH_8: + case BUILT_IN_ATOMIC_AND_FETCH_16: + { + enum built_in_function lib; + mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_AND_FETCH_1); + lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_AND_1 + + (fcode - BUILT_IN_ATOMIC_AND_FETCH_1)); + target = expand_builtin_atomic_fetch_op (mode, exp, target, AND, true, + ignore, lib); + if (target) + return target; + break; + } + case BUILT_IN_ATOMIC_NAND_FETCH_1: + case BUILT_IN_ATOMIC_NAND_FETCH_2: + case BUILT_IN_ATOMIC_NAND_FETCH_4: + case BUILT_IN_ATOMIC_NAND_FETCH_8: + case BUILT_IN_ATOMIC_NAND_FETCH_16: + { + enum built_in_function lib; + mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_NAND_FETCH_1); + lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_NAND_1 + + (fcode - BUILT_IN_ATOMIC_NAND_FETCH_1)); + target = expand_builtin_atomic_fetch_op (mode, exp, target, NOT, true, + ignore, lib); + if (target) + return target; + break; + } + case BUILT_IN_ATOMIC_XOR_FETCH_1: + case BUILT_IN_ATOMIC_XOR_FETCH_2: + case BUILT_IN_ATOMIC_XOR_FETCH_4: + case BUILT_IN_ATOMIC_XOR_FETCH_8: + case BUILT_IN_ATOMIC_XOR_FETCH_16: + { + enum built_in_function lib; + mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_XOR_FETCH_1); + lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_XOR_1 + + (fcode - BUILT_IN_ATOMIC_XOR_FETCH_1)); + target = expand_builtin_atomic_fetch_op (mode, exp, target, XOR, true, + ignore, lib); + if (target) + return target; + break; + } + case BUILT_IN_ATOMIC_OR_FETCH_1: + case BUILT_IN_ATOMIC_OR_FETCH_2: + case BUILT_IN_ATOMIC_OR_FETCH_4: + case BUILT_IN_ATOMIC_OR_FETCH_8: + case BUILT_IN_ATOMIC_OR_FETCH_16: + { + enum built_in_function lib; + mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_OR_FETCH_1); + lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_OR_1 + + (fcode - BUILT_IN_ATOMIC_OR_FETCH_1)); + target = expand_builtin_atomic_fetch_op (mode, exp, target, IOR, true, + ignore, lib); + if (target) + return target; + break; + } + case BUILT_IN_ATOMIC_FETCH_ADD_1: + case BUILT_IN_ATOMIC_FETCH_ADD_2: + case BUILT_IN_ATOMIC_FETCH_ADD_4: + case BUILT_IN_ATOMIC_FETCH_ADD_8: + case BUILT_IN_ATOMIC_FETCH_ADD_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_FETCH_ADD_1); + target = expand_builtin_atomic_fetch_op (mode, exp, target, PLUS, false, + ignore, BUILT_IN_NONE); + if (target) + return target; + break; + + case BUILT_IN_ATOMIC_FETCH_SUB_1: + case BUILT_IN_ATOMIC_FETCH_SUB_2: + case BUILT_IN_ATOMIC_FETCH_SUB_4: + case BUILT_IN_ATOMIC_FETCH_SUB_8: + case BUILT_IN_ATOMIC_FETCH_SUB_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_FETCH_SUB_1); + target = expand_builtin_atomic_fetch_op (mode, exp, target, MINUS, false, + ignore, BUILT_IN_NONE); + if (target) + return target; + break; + + case BUILT_IN_ATOMIC_FETCH_AND_1: + case BUILT_IN_ATOMIC_FETCH_AND_2: + case BUILT_IN_ATOMIC_FETCH_AND_4: + case BUILT_IN_ATOMIC_FETCH_AND_8: + case BUILT_IN_ATOMIC_FETCH_AND_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_FETCH_AND_1); + target = expand_builtin_atomic_fetch_op (mode, exp, target, AND, false, + ignore, BUILT_IN_NONE); + if (target) + return target; + break; + + case BUILT_IN_ATOMIC_FETCH_NAND_1: + case BUILT_IN_ATOMIC_FETCH_NAND_2: + case BUILT_IN_ATOMIC_FETCH_NAND_4: + case BUILT_IN_ATOMIC_FETCH_NAND_8: + case BUILT_IN_ATOMIC_FETCH_NAND_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_FETCH_NAND_1); + target = expand_builtin_atomic_fetch_op (mode, exp, target, NOT, false, + ignore, BUILT_IN_NONE); + if (target) + return target; + break; + + case BUILT_IN_ATOMIC_FETCH_XOR_1: + case BUILT_IN_ATOMIC_FETCH_XOR_2: + case BUILT_IN_ATOMIC_FETCH_XOR_4: + case BUILT_IN_ATOMIC_FETCH_XOR_8: + case BUILT_IN_ATOMIC_FETCH_XOR_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_FETCH_XOR_1); + target = expand_builtin_atomic_fetch_op (mode, exp, target, XOR, false, + ignore, BUILT_IN_NONE); + if (target) + return target; + break; + + case BUILT_IN_ATOMIC_FETCH_OR_1: + case BUILT_IN_ATOMIC_FETCH_OR_2: + case BUILT_IN_ATOMIC_FETCH_OR_4: + case BUILT_IN_ATOMIC_FETCH_OR_8: + case BUILT_IN_ATOMIC_FETCH_OR_16: + mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_FETCH_OR_1); + target = expand_builtin_atomic_fetch_op (mode, exp, target, IOR, false, + ignore, BUILT_IN_NONE); if (target) return target; break; - case BUILT_IN_LOCK_RELEASE_1: - case BUILT_IN_LOCK_RELEASE_2: - case BUILT_IN_LOCK_RELEASE_4: - case BUILT_IN_LOCK_RELEASE_8: - case BUILT_IN_LOCK_RELEASE_16: - mode = get_builtin_sync_mode (fcode - BUILT_IN_LOCK_RELEASE_1); - expand_builtin_lock_release (mode, exp); + case BUILT_IN_ATOMIC_TEST_AND_SET: + return expand_builtin_atomic_test_and_set (exp, target); + + case BUILT_IN_ATOMIC_CLEAR: + return expand_builtin_atomic_clear (exp); + + case BUILT_IN_ATOMIC_ALWAYS_LOCK_FREE: + return expand_builtin_atomic_always_lock_free (exp); + + case BUILT_IN_ATOMIC_IS_LOCK_FREE: + target = expand_builtin_atomic_is_lock_free (exp); + if (target) + return target; + break; + + case BUILT_IN_ATOMIC_THREAD_FENCE: + expand_builtin_atomic_thread_fence (exp); return const0_rtx; - case BUILT_IN_SYNCHRONIZE: - expand_builtin_synchronize (); + case BUILT_IN_ATOMIC_SIGNAL_FENCE: + expand_builtin_atomic_signal_fence (exp); return const0_rtx; case BUILT_IN_OBJECT_SIZE: @@ -6039,6 +6829,7 @@ expand_builtin (tree exp, rtx target, rtx subtarget, enum machine_mode mode, case BUILT_IN_STRCPY_CHK: case BUILT_IN_STPCPY_CHK: case BUILT_IN_STRNCPY_CHK: + case BUILT_IN_STPNCPY_CHK: case BUILT_IN_STRCAT_CHK: case BUILT_IN_STRNCAT_CHK: case BUILT_IN_SNPRINTF_CHK: @@ -6052,7 +6843,8 @@ expand_builtin (tree exp, rtx target, rtx subtarget, enum machine_mode mode, break; case BUILT_IN_FREE: - maybe_emit_free_warning (exp); + if (warn_free_nonheap_object) + maybe_emit_free_warning (exp); break; default: /* just do library call, if unknown builtin */ @@ -6187,7 +6979,7 @@ build_builtin_expect_predicate (location_t loc, tree pred, tree expected) { tree fn, arg_types, pred_type, expected_type, call_expr, ret_type; - fn = built_in_decls[BUILT_IN_EXPECT]; + fn = builtin_decl_explicit (BUILT_IN_EXPECT); arg_types = TYPE_ARG_TYPES (TREE_TYPE (fn)); ret_type = TREE_TYPE (TREE_TYPE (fn)); pred_type = TREE_VALUE (arg_types); @@ -6207,13 +6999,22 @@ build_builtin_expect_predicate (location_t loc, tree pred, tree expected) static tree fold_builtin_expect (location_t loc, tree arg0, tree arg1) { - tree inner, fndecl; + tree inner, fndecl, inner_arg0; enum tree_code code; + /* Distribute the expected value over short-circuiting operators. + See through the cast from truthvalue_type_node to long. */ + inner_arg0 = arg0; + while (TREE_CODE (inner_arg0) == NOP_EXPR + && INTEGRAL_TYPE_P (TREE_TYPE (inner_arg0)) + && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (inner_arg0, 0)))) + inner_arg0 = TREE_OPERAND (inner_arg0, 0); + /* If this is a builtin_expect within a builtin_expect keep the inner one. See through a comparison against a constant. It might have been added to create a thruthvalue. */ - inner = arg0; + inner = inner_arg0; + if (COMPARISON_CLASS_P (inner) && TREE_CODE (TREE_OPERAND (inner, 1)) == INTEGER_CST) inner = TREE_OPERAND (inner, 0); @@ -6224,14 +7025,7 @@ fold_builtin_expect (location_t loc, tree arg0, tree arg1) && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_EXPECT) return arg0; - /* Distribute the expected value over short-circuiting operators. - See through the cast from truthvalue_type_node to long. */ - inner = arg0; - while (TREE_CODE (inner) == NOP_EXPR - && INTEGRAL_TYPE_P (TREE_TYPE (inner)) - && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (inner, 0)))) - inner = TREE_OPERAND (inner, 0); - + inner = inner_arg0; code = TREE_CODE (inner); if (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR) { @@ -6246,13 +7040,13 @@ fold_builtin_expect (location_t loc, tree arg0, tree arg1) } /* If the argument isn't invariant then there's nothing else we can do. */ - if (!TREE_CONSTANT (arg0)) + if (!TREE_CONSTANT (inner_arg0)) return NULL_TREE; /* If we expect that a comparison against the argument will fold to a constant return the constant. In practice, this means a true constant or the address of a non-weak symbol. */ - inner = arg0; + inner = inner_arg0; STRIP_NOPS (inner); if (TREE_CODE (inner) == ADDR_EXPR) { @@ -6484,6 +7278,42 @@ fold_fixed_mathfn (location_t loc, tree fndecl, tree arg) fold_convert_loc (loc, newtype, arg0)); } + /* Canonicalize iround (x) to lround (x) on ILP32 targets where + sizeof (int) == sizeof (long). */ + if (TYPE_PRECISION (integer_type_node) + == TYPE_PRECISION (long_integer_type_node)) + { + tree newfn = NULL_TREE; + switch (fcode) + { + CASE_FLT_FN (BUILT_IN_ICEIL): + newfn = mathfn_built_in (TREE_TYPE (arg), BUILT_IN_LCEIL); + break; + + CASE_FLT_FN (BUILT_IN_IFLOOR): + newfn = mathfn_built_in (TREE_TYPE (arg), BUILT_IN_LFLOOR); + break; + + CASE_FLT_FN (BUILT_IN_IROUND): + newfn = mathfn_built_in (TREE_TYPE (arg), BUILT_IN_LROUND); + break; + + CASE_FLT_FN (BUILT_IN_IRINT): + newfn = mathfn_built_in (TREE_TYPE (arg), BUILT_IN_LRINT); + break; + + default: + break; + } + + if (newfn) + { + tree newcall = build_call_expr_loc (loc, newfn, 1, arg); + return fold_convert_loc (loc, + TREE_TYPE (TREE_TYPE (fndecl)), newcall); + } + } + /* Canonicalize llround (x) to lround (x) on LP64 targets where sizeof (long long) == sizeof (long). */ if (TYPE_PRECISION (long_long_integer_type_node) @@ -7183,16 +8013,19 @@ fold_builtin_int_roundingfn (location_t loc, tree fndecl, tree arg) switch (DECL_FUNCTION_CODE (fndecl)) { + CASE_FLT_FN (BUILT_IN_IFLOOR): CASE_FLT_FN (BUILT_IN_LFLOOR): CASE_FLT_FN (BUILT_IN_LLFLOOR): real_floor (&r, TYPE_MODE (ftype), &x); break; + CASE_FLT_FN (BUILT_IN_ICEIL): CASE_FLT_FN (BUILT_IN_LCEIL): CASE_FLT_FN (BUILT_IN_LLCEIL): real_ceil (&r, TYPE_MODE (ftype), &x); break; + CASE_FLT_FN (BUILT_IN_IROUND): CASE_FLT_FN (BUILT_IN_LROUND): CASE_FLT_FN (BUILT_IN_LLROUND): real_round (&r, TYPE_MODE (ftype), &x); @@ -7250,7 +8083,8 @@ fold_builtin_bitop (tree fndecl, tree arg) { hi = TREE_INT_CST_HIGH (arg); if (width < 2 * HOST_BITS_PER_WIDE_INT) - hi &= ~((HOST_WIDE_INT) (-1) >> (width - HOST_BITS_PER_WIDE_INT)); + hi &= ~((unsigned HOST_WIDE_INT) (-1) + << (width - HOST_BITS_PER_WIDE_INT)); } else { @@ -7288,6 +8122,26 @@ fold_builtin_bitop (tree fndecl, tree arg) result = width; break; + CASE_INT_FN (BUILT_IN_CLRSB): + if (width > HOST_BITS_PER_WIDE_INT + && (hi & ((unsigned HOST_WIDE_INT) 1 + << (width - HOST_BITS_PER_WIDE_INT - 1))) != 0) + { + hi = ~hi & ~((unsigned HOST_WIDE_INT) (-1) + << (width - HOST_BITS_PER_WIDE_INT - 1)); + lo = ~lo; + } + else if (width <= HOST_BITS_PER_WIDE_INT + && (lo & ((unsigned HOST_WIDE_INT) 1 << (width - 1))) != 0) + lo = ~lo & ~((unsigned HOST_WIDE_INT) (-1) << (width - 1)); + if (hi != 0) + result = width - floor_log2 (hi) - 2 - HOST_BITS_PER_WIDE_INT; + else if (lo != 0) + result = width - floor_log2 (lo) - 2; + else + result = width - 1; + break; + CASE_INT_FN (BUILT_IN_POPCOUNT): result = 0; while (lo) @@ -7812,8 +8666,7 @@ fold_builtin_memset (location_t loc, tree dest, tree c, tree len, length = tree_low_cst (len, 1); if (GET_MODE_SIZE (TYPE_MODE (etype)) != length - || get_pointer_alignment (dest, BIGGEST_ALIGNMENT) / BITS_PER_UNIT - < length) + || get_pointer_alignment (dest) / BITS_PER_UNIT < length) return NULL_TREE; if (length > HOST_BITS_PER_WIDE_INT / BITS_PER_UNIT) @@ -7864,7 +8717,7 @@ fold_builtin_bzero (location_t loc, tree dest, tree size, bool ignore) calling bzero instead of memset. */ return fold_builtin_memset (loc, dest, integer_zero_node, - fold_convert_loc (loc, sizetype, size), + fold_convert_loc (loc, size_type_node, size), void_type_node, ignore); } @@ -7903,8 +8756,8 @@ fold_builtin_memory_op (location_t loc, tree dest, tree src, if (endp == 3) { - src_align = get_pointer_alignment (src, BIGGEST_ALIGNMENT); - dest_align = get_pointer_alignment (dest, BIGGEST_ALIGNMENT); + src_align = get_pointer_alignment (src); + dest_align = get_pointer_alignment (dest); /* Both DEST and SRC must be pointer types. ??? This is what old code did. Is the testing for pointer types @@ -7918,7 +8771,7 @@ fold_builtin_memory_op (location_t loc, tree dest, tree src, && (MIN (src_align, dest_align) / BITS_PER_UNIT >= (unsigned HOST_WIDE_INT) tree_low_cst (len, 1)))) { - tree fn = implicit_built_in_decls[BUILT_IN_MEMCPY]; + tree fn = builtin_decl_implicit (BUILT_IN_MEMCPY); if (!fn) return NULL_TREE; return build_call_expr_loc (loc, fn, 3, dest, src, len); @@ -7977,7 +8830,7 @@ fold_builtin_memory_op (location_t loc, tree dest, tree src, else return NULL_TREE; - fn = implicit_built_in_decls[BUILT_IN_MEMCPY]; + fn = builtin_decl_implicit (BUILT_IN_MEMCPY); if (!fn) return NULL_TREE; return build_call_expr_loc (loc, fn, 3, dest, src, len); @@ -7996,7 +8849,7 @@ fold_builtin_memory_op (location_t loc, tree dest, tree src, if (!refs_may_alias_p_1 (&destr, &srcr, false)) { tree fn; - fn = implicit_built_in_decls[BUILT_IN_MEMCPY]; + fn = builtin_decl_implicit (BUILT_IN_MEMCPY); if (!fn) return NULL_TREE; return build_call_expr_loc (loc, fn, 3, dest, src, len); @@ -8052,8 +8905,8 @@ fold_builtin_memory_op (location_t loc, tree dest, tree src, || TREE_ADDRESSABLE (desttype)) return NULL_TREE; - src_align = get_pointer_alignment (src, BIGGEST_ALIGNMENT); - dest_align = get_pointer_alignment (dest, BIGGEST_ALIGNMENT); + src_align = get_pointer_alignment (src); + dest_align = get_pointer_alignment (dest); if (dest_align < TYPE_ALIGN (desttype) || src_align < TYPE_ALIGN (srctype)) return NULL_TREE; @@ -8144,8 +8997,7 @@ fold_builtin_memory_op (location_t loc, tree dest, tree src, len = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (len), len, ssize_int (1)); - len = fold_convert_loc (loc, sizetype, len); - dest = fold_build2_loc (loc, POINTER_PLUS_EXPR, TREE_TYPE (dest), dest, len); + dest = fold_build_pointer_plus_loc (loc, dest, len); dest = fold_convert_loc (loc, type, dest); if (expr) dest = omit_one_operand_loc (loc, type, dest, expr); @@ -8172,7 +9024,7 @@ fold_builtin_strcpy (location_t loc, tree fndecl, tree dest, tree src, tree len) if (optimize_function_for_size_p (cfun)) return NULL_TREE; - fn = implicit_built_in_decls[BUILT_IN_MEMCPY]; + fn = builtin_decl_implicit (BUILT_IN_MEMCPY); if (!fn) return NULL_TREE; @@ -8183,7 +9035,8 @@ fold_builtin_strcpy (location_t loc, tree fndecl, tree dest, tree src, tree len) return NULL_TREE; } - len = size_binop_loc (loc, PLUS_EXPR, len, ssize_int (1)); + len = fold_convert_loc (loc, size_type_node, len); + len = size_binop_loc (loc, PLUS_EXPR, len, build_int_cst (size_type_node, 1)); return fold_convert_loc (loc, TREE_TYPE (TREE_TYPE (fndecl)), build_call_expr_loc (loc, fn, 3, dest, src, len)); } @@ -8210,19 +9063,20 @@ fold_builtin_stpcpy (location_t loc, tree fndecl, tree dest, tree src) && !integer_zerop (len)) return NULL_TREE; - fn = implicit_built_in_decls[BUILT_IN_MEMCPY]; + fn = builtin_decl_implicit (BUILT_IN_MEMCPY); if (!fn) return NULL_TREE; - lenp1 = size_binop_loc (loc, PLUS_EXPR, len, ssize_int (1)); + lenp1 = size_binop_loc (loc, PLUS_EXPR, + fold_convert_loc (loc, size_type_node, len), + build_int_cst (size_type_node, 1)); /* We use dest twice in building our expression. Save it from multiple expansions. */ dest = builtin_save_expr (dest); call = build_call_expr_loc (loc, fn, 3, dest, src, lenp1); type = TREE_TYPE (TREE_TYPE (fndecl)); - len = fold_convert_loc (loc, sizetype, len); - dest = fold_build2_loc (loc, POINTER_PLUS_EXPR, TREE_TYPE (dest), dest, len); + dest = fold_build_pointer_plus_loc (loc, dest, len); dest = fold_convert_loc (loc, type, dest); dest = omit_one_operand_loc (loc, type, dest, call); return dest; @@ -8268,9 +9122,11 @@ fold_builtin_strncpy (location_t loc, tree fndecl, tree dest, return NULL_TREE; /* OK transform into builtin memcpy. */ - fn = implicit_built_in_decls[BUILT_IN_MEMCPY]; + fn = builtin_decl_implicit (BUILT_IN_MEMCPY); if (!fn) return NULL_TREE; + + len = fold_convert_loc (loc, size_type_node, len); return fold_convert_loc (loc, TREE_TYPE (TREE_TYPE (fndecl)), build_call_expr_loc (loc, fn, 3, dest, src, len)); } @@ -8304,13 +9160,12 @@ fold_builtin_memchr (location_t loc, tree arg1, tree arg2, tree len, tree type) if (target_char_cast (arg2, &c)) return NULL_TREE; - r = (char *) memchr (p1, c, tree_low_cst (len, 1)); + r = (const char *) memchr (p1, c, tree_low_cst (len, 1)); if (r == NULL) return build_int_cst (TREE_TYPE (arg1), 0); - tem = fold_build2_loc (loc, POINTER_PLUS_EXPR, TREE_TYPE (arg1), arg1, - size_int (r - p1)); + tem = fold_build_pointer_plus_hwi_loc (loc, arg1, r - p1); return fold_convert_loc (loc, type, tem); } return NULL_TREE; @@ -8569,8 +9424,9 @@ fold_builtin_signbit (location_t loc, tree arg, tree type) /* If ARG's format doesn't have signed zeros, return "arg < 0.0". */ if (!HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg)))) - return fold_build2_loc (loc, LT_EXPR, type, arg, - build_real (TREE_TYPE (arg), dconst0)); + return fold_convert (type, + fold_build2_loc (loc, LT_EXPR, boolean_type_node, arg, + build_real (TREE_TYPE (arg), dconst0))); return NULL_TREE; } @@ -9089,7 +9945,7 @@ fold_builtin_interclass_mathfn (location_t loc, tree fndecl, tree arg) CASE_FLT_FN (BUILT_IN_ISINF): { /* isinf(x) -> isgreater(fabs(x),DBL_MAX). */ - tree const isgr_fn = built_in_decls[BUILT_IN_ISGREATER]; + tree const isgr_fn = builtin_decl_explicit (BUILT_IN_ISGREATER); tree const type = TREE_TYPE (arg); REAL_VALUE_TYPE r; char buf[128]; @@ -9105,7 +9961,7 @@ fold_builtin_interclass_mathfn (location_t loc, tree fndecl, tree arg) case BUILT_IN_ISFINITE: { /* isfinite(x) -> islessequal(fabs(x),DBL_MAX). */ - tree const isle_fn = built_in_decls[BUILT_IN_ISLESSEQUAL]; + tree const isle_fn = builtin_decl_explicit (BUILT_IN_ISLESSEQUAL); tree const type = TREE_TYPE (arg); REAL_VALUE_TYPE r; char buf[128]; @@ -9128,8 +9984,8 @@ fold_builtin_interclass_mathfn (location_t loc, tree fndecl, tree arg) { /* isnormal(x) -> isgreaterequal(fabs(x),DBL_MIN) & islessequal(fabs(x),DBL_MAX). */ - tree const isle_fn = built_in_decls[BUILT_IN_ISLESSEQUAL]; - tree const isge_fn = built_in_decls[BUILT_IN_ISGREATEREQUAL]; + tree const isle_fn = builtin_decl_explicit (BUILT_IN_ISLESSEQUAL); + tree const isge_fn = builtin_decl_explicit (BUILT_IN_ISGREATEREQUAL); tree const type = TREE_TYPE (arg); REAL_VALUE_TYPE rmax, rmin; char buf[128]; @@ -9190,7 +10046,7 @@ fold_builtin_classify (location_t loc, tree fndecl, tree arg, int builtin_index) 1. So e.g. "if (isinf_sign(x))" would be folded to just "if (isinf(x) ? 1 : 0)" which becomes "if (isinf(x))". */ tree signbit_fn = mathfn_built_in_1 (TREE_TYPE (arg), BUILT_IN_SIGNBIT, 0); - tree isinf_fn = built_in_decls[BUILT_IN_ISINF]; + tree isinf_fn = builtin_decl_explicit (BUILT_IN_ISINF); tree tmp = NULL_TREE; arg = builtin_save_expr (arg); @@ -9703,14 +10559,18 @@ fold_builtin_1 (location_t loc, tree fndecl, tree arg0, bool ignore) CASE_FLT_FN (BUILT_IN_RINT): return fold_trunc_transparent_mathfn (loc, fndecl, arg0); + CASE_FLT_FN (BUILT_IN_ICEIL): CASE_FLT_FN (BUILT_IN_LCEIL): CASE_FLT_FN (BUILT_IN_LLCEIL): CASE_FLT_FN (BUILT_IN_LFLOOR): + CASE_FLT_FN (BUILT_IN_IFLOOR): CASE_FLT_FN (BUILT_IN_LLFLOOR): + CASE_FLT_FN (BUILT_IN_IROUND): CASE_FLT_FN (BUILT_IN_LROUND): CASE_FLT_FN (BUILT_IN_LLROUND): return fold_builtin_int_roundingfn (loc, fndecl, arg0); + CASE_FLT_FN (BUILT_IN_IRINT): CASE_FLT_FN (BUILT_IN_LRINT): CASE_FLT_FN (BUILT_IN_LLRINT): return fold_fixed_mathfn (loc, fndecl, arg0); @@ -9722,6 +10582,7 @@ fold_builtin_1 (location_t loc, tree fndecl, tree arg0, bool ignore) CASE_INT_FN (BUILT_IN_FFS): CASE_INT_FN (BUILT_IN_CLZ): CASE_INT_FN (BUILT_IN_CTZ): + CASE_INT_FN (BUILT_IN_CLRSB): CASE_INT_FN (BUILT_IN_POPCOUNT): CASE_INT_FN (BUILT_IN_PARITY): return fold_builtin_bitop (fndecl, arg0); @@ -9908,7 +10769,7 @@ fold_builtin_2 (location_t loc, tree fndecl, tree arg0, tree arg1, bool ignore) case BUILT_IN_STPCPY: if (ignore) { - tree fn = implicit_built_in_decls[BUILT_IN_STRCPY]; + tree fn = builtin_decl_implicit (BUILT_IN_STRCPY); if (!fn) break; @@ -9993,6 +10854,12 @@ fold_builtin_2 (location_t loc, tree fndecl, tree arg0, tree arg1, bool ignore) return fold_builtin_fprintf (loc, fndecl, arg0, arg1, NULL_TREE, ignore, fcode); + case BUILT_IN_ATOMIC_ALWAYS_LOCK_FREE: + return fold_builtin_atomic_always_lock_free (arg0, arg1); + + case BUILT_IN_ATOMIC_IS_LOCK_FREE: + return fold_builtin_atomic_is_lock_free (arg0, arg1); + default: break; } @@ -10127,7 +10994,9 @@ fold_builtin_4 (location_t loc, tree fndecl, DECL_FUNCTION_CODE (fndecl)); case BUILT_IN_STRNCPY_CHK: - return fold_builtin_strncpy_chk (loc, arg0, arg1, arg2, arg3, NULL_TREE); + case BUILT_IN_STPNCPY_CHK: + return fold_builtin_stxncpy_chk (loc, arg0, arg1, arg2, arg3, NULL_TREE, + ignore, fcode); case BUILT_IN_STRNCAT_CHK: return fold_builtin_strncat_chk (loc, fndecl, arg0, arg1, arg2, arg3); @@ -10246,7 +11115,7 @@ fold_builtin_varargs (location_t loc, tree fndecl, tree exp, been inlined, otherwise e.g. -D_FORTIFY_SOURCE checking might not be performed. */ -static bool +bool avoid_folding_inline_builtin (tree fndecl) { return (DECL_DECLARED_INLINE_P (fndecl) @@ -10696,8 +11565,7 @@ fold_builtin_strstr (location_t loc, tree s1, tree s2, tree type) return build_int_cst (TREE_TYPE (s1), 0); /* Return an offset into the constant string argument. */ - tem = fold_build2_loc (loc, POINTER_PLUS_EXPR, TREE_TYPE (s1), - s1, size_int (r - p1)); + tem = fold_build_pointer_plus_hwi_loc (loc, s1, r - p1); return fold_convert_loc (loc, type, tem); } @@ -10709,7 +11577,7 @@ fold_builtin_strstr (location_t loc, tree s1, tree s2, tree type) if (p2[1] != '\0') return NULL_TREE; - fn = implicit_built_in_decls[BUILT_IN_STRCHR]; + fn = builtin_decl_implicit (BUILT_IN_STRCHR); if (!fn) return NULL_TREE; @@ -10767,8 +11635,7 @@ fold_builtin_strchr (location_t loc, tree s1, tree s2, tree type) return build_int_cst (TREE_TYPE (s1), 0); /* Return an offset into the constant string argument. */ - tem = fold_build2_loc (loc, POINTER_PLUS_EXPR, TREE_TYPE (s1), - s1, size_int (r - p1)); + tem = fold_build_pointer_plus_hwi_loc (loc, s1, r - p1); return fold_convert_loc (loc, type, tem); } return NULL_TREE; @@ -10823,15 +11690,14 @@ fold_builtin_strrchr (location_t loc, tree s1, tree s2, tree type) return build_int_cst (TREE_TYPE (s1), 0); /* Return an offset into the constant string argument. */ - tem = fold_build2_loc (loc, POINTER_PLUS_EXPR, TREE_TYPE (s1), - s1, size_int (r - p1)); + tem = fold_build_pointer_plus_hwi_loc (loc, s1, r - p1); return fold_convert_loc (loc, type, tem); } if (! integer_zerop (s2)) return NULL_TREE; - fn = implicit_built_in_decls[BUILT_IN_STRCHR]; + fn = builtin_decl_implicit (BUILT_IN_STRCHR); if (!fn) return NULL_TREE; @@ -10883,8 +11749,7 @@ fold_builtin_strpbrk (location_t loc, tree s1, tree s2, tree type) return build_int_cst (TREE_TYPE (s1), 0); /* Return an offset into the constant string argument. */ - tem = fold_build2_loc (loc, POINTER_PLUS_EXPR, TREE_TYPE (s1), - s1, size_int (r - p1)); + tem = fold_build_pointer_plus_hwi_loc (loc, s1, r - p1); return fold_convert_loc (loc, type, tem); } @@ -10896,7 +11761,7 @@ fold_builtin_strpbrk (location_t loc, tree s1, tree s2, tree type) if (p2[1] != '\0') return NULL_TREE; /* Really call strpbrk. */ - fn = implicit_built_in_decls[BUILT_IN_STRCHR]; + fn = builtin_decl_implicit (BUILT_IN_STRCHR); if (!fn) return NULL_TREE; @@ -10943,8 +11808,8 @@ fold_builtin_strcat (location_t loc ATTRIBUTE_UNUSED, tree dst, tree src) { /* See if we can store by pieces into (dst + strlen(dst)). */ tree newdst, call; - tree strlen_fn = implicit_built_in_decls[BUILT_IN_STRLEN]; - tree strcpy_fn = implicit_built_in_decls[BUILT_IN_STRCPY]; + tree strlen_fn = builtin_decl_implicit (BUILT_IN_STRLEN); + tree strcpy_fn = builtin_decl_implicit (BUILT_IN_STRCPY); if (!strlen_fn || !strcpy_fn) return NULL_TREE; @@ -10968,8 +11833,7 @@ fold_builtin_strcat (location_t loc ATTRIBUTE_UNUSED, tree dst, tree src) newdst = build_call_expr_loc (loc, strlen_fn, 1, dst); /* Create (dst p+ strlen (dst)). */ - newdst = fold_build2_loc (loc, POINTER_PLUS_EXPR, - TREE_TYPE (dst), dst, newdst); + newdst = fold_build_pointer_plus_loc (loc, dst, newdst); newdst = builtin_save_expr (newdst); call = build_call_expr_loc (loc, strcpy_fn, 2, newdst, src); @@ -11018,7 +11882,7 @@ fold_builtin_strncat (location_t loc, tree dst, tree src, tree len) if (TREE_CODE (len) == INTEGER_CST && p && compare_tree_int (len, strlen (p)) >= 0) { - tree fn = implicit_built_in_decls[BUILT_IN_STRCAT]; + tree fn = builtin_decl_implicit (BUILT_IN_STRCAT); /* If the replacement _DECL isn't initialized, don't do the transformation. */ @@ -11063,7 +11927,7 @@ fold_builtin_strspn (location_t loc, tree s1, tree s2) if (p1 && p2) { const size_t r = strspn (p1, p2); - return size_int (r); + return build_int_cst (size_type_node, r); } /* If either argument is "", return NULL_TREE. */ @@ -11108,7 +11972,7 @@ fold_builtin_strcspn (location_t loc, tree s1, tree s2) if (p1 && p2) { const size_t r = strcspn (p1, p2); - return size_int (r); + return build_int_cst (size_type_node, r); } /* If the first argument is "", return NULL_TREE. */ @@ -11123,7 +11987,7 @@ fold_builtin_strcspn (location_t loc, tree s1, tree s2) /* If the second argument is "", return __builtin_strlen(s1). */ if (p2 && *p2 == '\0') { - tree fn = implicit_built_in_decls[BUILT_IN_STRLEN]; + tree fn = builtin_decl_implicit (BUILT_IN_STRLEN); /* If the replacement _DECL isn't initialized, don't do the transformation. */ @@ -11149,10 +12013,12 @@ fold_builtin_fputs (location_t loc, tree arg0, tree arg1, { /* If we're using an unlocked function, assume the other unlocked functions exist explicitly. */ - tree const fn_fputc = unlocked ? built_in_decls[BUILT_IN_FPUTC_UNLOCKED] - : implicit_built_in_decls[BUILT_IN_FPUTC]; - tree const fn_fwrite = unlocked ? built_in_decls[BUILT_IN_FWRITE_UNLOCKED] - : implicit_built_in_decls[BUILT_IN_FWRITE]; + tree const fn_fputc = (unlocked + ? builtin_decl_explicit (BUILT_IN_FPUTC_UNLOCKED) + : builtin_decl_implicit (BUILT_IN_FPUTC)); + tree const fn_fwrite = (unlocked + ? builtin_decl_explicit (BUILT_IN_FWRITE_UNLOCKED) + : builtin_decl_implicit (BUILT_IN_FWRITE)); /* If the return value is used, don't do the transformation. */ if (!ignore) @@ -11346,7 +12212,7 @@ fold_builtin_sprintf (location_t loc, tree dest, tree fmt, /* If the format doesn't contain % args or %%, use strcpy. */ if (strchr (fmt_str, target_percent) == NULL) { - tree fn = implicit_built_in_decls[BUILT_IN_STRCPY]; + tree fn = builtin_decl_implicit (BUILT_IN_STRCPY); if (!fn) return NULL_TREE; @@ -11366,7 +12232,7 @@ fold_builtin_sprintf (location_t loc, tree dest, tree fmt, else if (fmt_str && strcmp (fmt_str, target_percent_s) == 0) { tree fn; - fn = implicit_built_in_decls[BUILT_IN_STRCPY]; + fn = builtin_decl_implicit (BUILT_IN_STRCPY); if (!fn) return NULL_TREE; @@ -11388,7 +12254,7 @@ fold_builtin_sprintf (location_t loc, tree dest, tree fmt, if (call && retval) { retval = fold_convert_loc - (loc, TREE_TYPE (TREE_TYPE (implicit_built_in_decls[BUILT_IN_SPRINTF])), + (loc, TREE_TYPE (TREE_TYPE (builtin_decl_implicit (BUILT_IN_SPRINTF))), retval); return build2 (COMPOUND_EXPR, TREE_TYPE (retval), call, retval); } @@ -11441,7 +12307,7 @@ fold_builtin_snprintf (location_t loc, tree dest, tree destsize, tree fmt, /* If the format doesn't contain % args or %%, use strcpy. */ if (strchr (fmt_str, target_percent) == NULL) { - tree fn = implicit_built_in_decls[BUILT_IN_STRCPY]; + tree fn = builtin_decl_implicit (BUILT_IN_STRCPY); size_t len = strlen (fmt_str); /* Don't optimize snprintf (buf, 4, "abc", ptr++). */ @@ -11473,7 +12339,7 @@ fold_builtin_snprintf (location_t loc, tree dest, tree destsize, tree fmt, /* If the format is "%s", use strcpy if the result isn't used. */ else if (fmt_str && strcmp (fmt_str, target_percent_s) == 0) { - tree fn = implicit_built_in_decls[BUILT_IN_STRCPY]; + tree fn = builtin_decl_implicit (BUILT_IN_STRCPY); unsigned HOST_WIDE_INT origlen; /* Don't crash on snprintf (str1, cst, "%s"). */ @@ -11508,7 +12374,7 @@ fold_builtin_snprintf (location_t loc, tree dest, tree destsize, tree fmt, if (call && retval) { - tree fn = built_in_decls[BUILT_IN_SNPRINTF]; + tree fn = builtin_decl_explicit (BUILT_IN_SNPRINTF); retval = fold_convert_loc (loc, TREE_TYPE (TREE_TYPE (fn)), retval); return build2 (COMPOUND_EXPR, TREE_TYPE (retval), call, retval); } @@ -11596,16 +12462,16 @@ expand_builtin_memory_chk (tree exp, rtx target, enum machine_mode mode, switch (fcode) { case BUILT_IN_MEMCPY_CHK: - fn = built_in_decls[BUILT_IN_MEMCPY]; + fn = builtin_decl_explicit (BUILT_IN_MEMCPY); break; case BUILT_IN_MEMPCPY_CHK: - fn = built_in_decls[BUILT_IN_MEMPCPY]; + fn = builtin_decl_explicit (BUILT_IN_MEMPCPY); break; case BUILT_IN_MEMMOVE_CHK: - fn = built_in_decls[BUILT_IN_MEMMOVE]; + fn = builtin_decl_explicit (BUILT_IN_MEMMOVE); break; case BUILT_IN_MEMSET_CHK: - fn = built_in_decls[BUILT_IN_MEMSET]; + fn = builtin_decl_explicit (BUILT_IN_MEMSET); break; default: break; @@ -11623,8 +12489,7 @@ expand_builtin_memory_chk (tree exp, rtx target, enum machine_mode mode, return NULL_RTX; else { - unsigned int dest_align - = get_pointer_alignment (dest, BIGGEST_ALIGNMENT); + unsigned int dest_align = get_pointer_alignment (dest); /* If DEST is not a pointer type, call the normal function. */ if (dest_align == 0) @@ -11642,15 +12507,14 @@ expand_builtin_memory_chk (tree exp, rtx target, enum machine_mode mode, return expand_expr (dest, target, mode, EXPAND_NORMAL); } - expr = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (dest), dest, len); + expr = fold_build_pointer_plus (dest, len); return expand_expr (expr, target, mode, EXPAND_NORMAL); } /* __memmove_chk special case. */ if (fcode == BUILT_IN_MEMMOVE_CHK) { - unsigned int src_align - = get_pointer_alignment (src, BIGGEST_ALIGNMENT); + unsigned int src_align = get_pointer_alignment (src); if (src_align == 0) return NULL_RTX; @@ -11659,7 +12523,7 @@ expand_builtin_memory_chk (tree exp, rtx target, enum machine_mode mode, normal __memcpy_chk. */ if (readonly_data_expr (src)) { - tree fn = built_in_decls[BUILT_IN_MEMCPY_CHK]; + tree fn = builtin_decl_explicit (BUILT_IN_MEMCPY_CHK); if (!fn) return NULL_RTX; fn = build_call_nofold_loc (EXPR_LOCATION (exp), fn, 4, @@ -11695,6 +12559,7 @@ maybe_emit_chk_warning (tree exp, enum built_in_function fcode) break; case BUILT_IN_STRNCAT_CHK: case BUILT_IN_STRNCPY_CHK: + case BUILT_IN_STPNCPY_CHK: len = CALL_EXPR_ARG (exp, 2); size = CALL_EXPR_ARG (exp, 3); break; @@ -11814,11 +12679,11 @@ maybe_emit_free_warning (tree exp) return; if (SSA_VAR_P (arg)) - warning_at (tree_nonartificial_location (exp), - 0, "%Kattempt to free a non-heap object %qD", exp, arg); + warning_at (tree_nonartificial_location (exp), OPT_Wfree_nonheap_object, + "%Kattempt to free a non-heap object %qD", exp, arg); else - warning_at (tree_nonartificial_location (exp), - 0, "%Kattempt to free a non-heap object", exp); + warning_at (tree_nonartificial_location (exp), OPT_Wfree_nonheap_object, + "%Kattempt to free a non-heap object", exp); } /* Fold a call to __builtin_object_size with arguments PTR and OST, @@ -11902,8 +12767,7 @@ fold_builtin_memory_chk (location_t loc, tree fndecl, dest, len); else { - tree temp = fold_build2_loc (loc, POINTER_PLUS_EXPR, TREE_TYPE (dest), - dest, len); + tree temp = fold_build_pointer_plus_loc (loc, dest, len); return fold_convert_loc (loc, TREE_TYPE (TREE_TYPE (fndecl)), temp); } } @@ -11924,7 +12788,7 @@ fold_builtin_memory_chk (location_t loc, tree fndecl, { /* (void) __mempcpy_chk () can be optimized into (void) __memcpy_chk (). */ - fn = built_in_decls[BUILT_IN_MEMCPY_CHK]; + fn = builtin_decl_explicit (BUILT_IN_MEMCPY_CHK); if (!fn) return NULL_TREE; @@ -11946,16 +12810,16 @@ fold_builtin_memory_chk (location_t loc, tree fndecl, switch (fcode) { case BUILT_IN_MEMCPY_CHK: - fn = built_in_decls[BUILT_IN_MEMCPY]; + fn = builtin_decl_explicit (BUILT_IN_MEMCPY); break; case BUILT_IN_MEMPCPY_CHK: - fn = built_in_decls[BUILT_IN_MEMPCPY]; + fn = builtin_decl_explicit (BUILT_IN_MEMPCPY); break; case BUILT_IN_MEMMOVE_CHK: - fn = built_in_decls[BUILT_IN_MEMMOVE]; + fn = builtin_decl_explicit (BUILT_IN_MEMMOVE); break; case BUILT_IN_MEMSET_CHK: - fn = built_in_decls[BUILT_IN_MEMSET]; + fn = builtin_decl_explicit (BUILT_IN_MEMSET); break; default: break; @@ -12010,7 +12874,7 @@ fold_builtin_stxcpy_chk (location_t loc, tree fndecl, tree dest, /* If return value of __stpcpy_chk is ignored, optimize into __strcpy_chk. */ - fn = built_in_decls[BUILT_IN_STRCPY_CHK]; + fn = builtin_decl_explicit (BUILT_IN_STRCPY_CHK); if (!fn) return NULL_TREE; @@ -12022,11 +12886,13 @@ fold_builtin_stxcpy_chk (location_t loc, tree fndecl, tree dest, /* If c_strlen returned something, but not a constant, transform __strcpy_chk into __memcpy_chk. */ - fn = built_in_decls[BUILT_IN_MEMCPY_CHK]; + fn = builtin_decl_explicit (BUILT_IN_MEMCPY_CHK); if (!fn) return NULL_TREE; - len = size_binop_loc (loc, PLUS_EXPR, len, ssize_int (1)); + len = fold_convert_loc (loc, size_type_node, len); + len = size_binop_loc (loc, PLUS_EXPR, len, + build_int_cst (size_type_node, 1)); return fold_convert_loc (loc, TREE_TYPE (TREE_TYPE (fndecl)), build_call_expr_loc (loc, fn, 4, dest, src, len, size)); @@ -12040,21 +12906,23 @@ fold_builtin_stxcpy_chk (location_t loc, tree fndecl, tree dest, } /* If __builtin_st{r,p}cpy_chk is used, assume st{r,p}cpy is available. */ - fn = built_in_decls[fcode == BUILT_IN_STPCPY_CHK - ? BUILT_IN_STPCPY : BUILT_IN_STRCPY]; + fn = builtin_decl_explicit (fcode == BUILT_IN_STPCPY_CHK + ? BUILT_IN_STPCPY : BUILT_IN_STRCPY); if (!fn) return NULL_TREE; return build_call_expr_loc (loc, fn, 2, dest, src); } -/* Fold a call to the __strncpy_chk builtin. DEST, SRC, LEN, and SIZE +/* Fold a call to the __st{r,p}ncpy_chk builtin. DEST, SRC, LEN, and SIZE are the arguments to the call. If MAXLEN is not NULL, it is maximum - length passed as third argument. */ + length passed as third argument. IGNORE is true if return value can be + ignored. FCODE is the BUILT_IN_* code of the builtin. */ tree -fold_builtin_strncpy_chk (location_t loc, tree dest, tree src, - tree len, tree size, tree maxlen) +fold_builtin_stxncpy_chk (location_t loc, tree dest, tree src, + tree len, tree size, tree maxlen, bool ignore, + enum built_in_function fcode) { tree fn; @@ -12064,6 +12932,15 @@ fold_builtin_strncpy_chk (location_t loc, tree dest, tree src, || !validate_arg (size, INTEGER_TYPE)) return NULL_TREE; + if (fcode == BUILT_IN_STPNCPY_CHK && ignore) + { + /* If return value of __stpncpy_chk is ignored, + optimize into __strncpy_chk. */ + fn = builtin_decl_explicit (BUILT_IN_STRNCPY_CHK); + if (fn) + return build_call_expr_loc (loc, fn, 4, dest, src, len, size); + } + if (! host_integerp (size, 1)) return NULL_TREE; @@ -12084,8 +12961,9 @@ fold_builtin_strncpy_chk (location_t loc, tree dest, tree src, return NULL_TREE; } - /* If __builtin_strncpy_chk is used, assume strncpy is available. */ - fn = built_in_decls[BUILT_IN_STRNCPY]; + /* If __builtin_st{r,p}ncpy_chk is used, assume st{r,p}ncpy is available. */ + fn = builtin_decl_explicit (fcode == BUILT_IN_STPNCPY_CHK + ? BUILT_IN_STPNCPY : BUILT_IN_STRNCPY); if (!fn) return NULL_TREE; @@ -12116,7 +12994,7 @@ fold_builtin_strcat_chk (location_t loc, tree fndecl, tree dest, return NULL_TREE; /* If __builtin_strcat_chk is used, assume strcat is available. */ - fn = built_in_decls[BUILT_IN_STRCAT]; + fn = builtin_decl_explicit (BUILT_IN_STRCAT); if (!fn) return NULL_TREE; @@ -12158,7 +13036,7 @@ fold_builtin_strncat_chk (location_t loc, tree fndecl, && ! tree_int_cst_lt (len, src_len)) { /* If LEN >= strlen (SRC), optimize into __strcat_chk. */ - fn = built_in_decls[BUILT_IN_STRCAT_CHK]; + fn = builtin_decl_explicit (BUILT_IN_STRCAT_CHK); if (!fn) return NULL_TREE; @@ -12168,7 +13046,7 @@ fold_builtin_strncat_chk (location_t loc, tree fndecl, } /* If __builtin_strncat_chk is used, assume strncat is available. */ - fn = built_in_decls[BUILT_IN_STRNCAT]; + fn = builtin_decl_explicit (BUILT_IN_STRNCAT); if (!fn) return NULL_TREE; @@ -12259,8 +13137,8 @@ fold_builtin_sprintf_chk_1 (location_t loc, int nargs, tree *args, } /* If __builtin_{,v}sprintf_chk is used, assume {,v}sprintf is available. */ - fn = built_in_decls[fcode == BUILT_IN_VSPRINTF_CHK - ? BUILT_IN_VSPRINTF : BUILT_IN_SPRINTF]; + fn = builtin_decl_explicit (fcode == BUILT_IN_VSPRINTF_CHK + ? BUILT_IN_VSPRINTF : BUILT_IN_SPRINTF); if (!fn) return NULL_TREE; @@ -12348,8 +13226,8 @@ fold_builtin_snprintf_chk_1 (location_t loc, int nargs, tree *args, /* If __builtin_{,v}snprintf_chk is used, assume {,v}snprintf is available. */ - fn = built_in_decls[fcode == BUILT_IN_VSNPRINTF_CHK - ? BUILT_IN_VSNPRINTF : BUILT_IN_SNPRINTF]; + fn = builtin_decl_explicit (fcode == BUILT_IN_VSNPRINTF_CHK + ? BUILT_IN_VSNPRINTF : BUILT_IN_SNPRINTF); if (!fn) return NULL_TREE; @@ -12403,13 +13281,13 @@ fold_builtin_printf (location_t loc, tree fndecl, tree fmt, { /* If we're using an unlocked function, assume the other unlocked functions exist explicitly. */ - fn_putchar = built_in_decls[BUILT_IN_PUTCHAR_UNLOCKED]; - fn_puts = built_in_decls[BUILT_IN_PUTS_UNLOCKED]; + fn_putchar = builtin_decl_explicit (BUILT_IN_PUTCHAR_UNLOCKED); + fn_puts = builtin_decl_explicit (BUILT_IN_PUTS_UNLOCKED); } else { - fn_putchar = implicit_built_in_decls[BUILT_IN_PUTCHAR]; - fn_puts = implicit_built_in_decls[BUILT_IN_PUTS]; + fn_putchar = builtin_decl_implicit (BUILT_IN_PUTCHAR); + fn_puts = builtin_decl_implicit (BUILT_IN_PUTS); } if (!init_target_chars ()) @@ -12554,13 +13432,13 @@ fold_builtin_fprintf (location_t loc, tree fndecl, tree fp, { /* If we're using an unlocked function, assume the other unlocked functions exist explicitly. */ - fn_fputc = built_in_decls[BUILT_IN_FPUTC_UNLOCKED]; - fn_fputs = built_in_decls[BUILT_IN_FPUTS_UNLOCKED]; + fn_fputc = builtin_decl_explicit (BUILT_IN_FPUTC_UNLOCKED); + fn_fputs = builtin_decl_explicit (BUILT_IN_FPUTS_UNLOCKED); } else { - fn_fputc = implicit_built_in_decls[BUILT_IN_FPUTC]; - fn_fputs = implicit_built_in_decls[BUILT_IN_FPUTS]; + fn_fputc = builtin_decl_implicit (BUILT_IN_FPUTC); + fn_fputs = builtin_decl_implicit (BUILT_IN_FPUTS); } if (!init_target_chars ()) @@ -13362,7 +14240,7 @@ fold_call_stmt (gimple stmt, bool ignore) return NULL_TREE; } -/* Look up the function in built_in_decls that corresponds to DECL +/* Look up the function in builtin_decl that corresponds to DECL and set ASMSPEC as its user assembler name. DECL must be a function decl that declares a builtin. */ @@ -13374,7 +14252,7 @@ set_builtin_user_assembler_name (tree decl, const char *asmspec) && DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL && asmspec != 0); - builtin = built_in_decls [DECL_FUNCTION_CODE (decl)]; + builtin = builtin_decl_explicit (DECL_FUNCTION_CODE (decl)); set_user_assembler_name (builtin, asmspec); switch (DECL_FUNCTION_CODE (decl)) { @@ -13422,6 +14300,7 @@ is_simple_builtin (tree decl) case BUILT_IN_OBJECT_SIZE: case BUILT_IN_UNREACHABLE: /* Simple register moves or loads from stack. */ + case BUILT_IN_ASSUME_ALIGNED: case BUILT_IN_RETURN_ADDRESS: case BUILT_IN_EXTRACT_RETURN_ADDR: case BUILT_IN_FROB_RETURN_ADDR: @@ -13459,6 +14338,7 @@ is_inexpensive_builtin (tree decl) { case BUILT_IN_ABS: case BUILT_IN_ALLOCA: + case BUILT_IN_ALLOCA_WITH_ALIGN: case BUILT_IN_BSWAP32: case BUILT_IN_BSWAP64: case BUILT_IN_CLZ: