#include "tree.h"
#include "cp-tree.h"
#include "flags.h"
-#include "real.h"
-#include "rtl.h"
#include "toplev.h"
-#include "insn-config.h"
-#include "integrate.h"
#include "tree-inline.h"
#include "debug.h"
-#include "target.h"
#include "convert.h"
-#include "tree-flow.h"
+#include "cgraph.h"
+#include "splay-tree.h"
+#include "gimple.h" /* gimple_has_body_p */
static tree bot_manip (tree *, int *, void *);
static tree bot_replace (tree *, int *, void *);
-static tree build_cplus_array_type_1 (tree, tree);
static int list_hash_eq (const void *, const void *);
static hashval_t list_hash_pieces (tree, tree, tree);
static hashval_t list_hash (const void *);
-static cp_lvalue_kind lvalue_p_1 (const_tree);
static tree build_target_expr (tree, tree);
static tree count_trees_r (tree *, int *, void *);
static tree verify_stmt_tree_r (tree *, int *, void *);
/* If REF is an lvalue, returns the kind of lvalue that REF is.
Otherwise, returns clk_none. */
-static cp_lvalue_kind
-lvalue_p_1 (const_tree ref)
+cp_lvalue_kind
+lvalue_kind (const_tree ref)
{
cp_lvalue_kind op1_lvalue_kind = clk_none;
cp_lvalue_kind op2_lvalue_kind = clk_none;
if (TREE_CODE (ref) == INDIRECT_REF
&& TREE_CODE (TREE_TYPE (TREE_OPERAND (ref, 0)))
== REFERENCE_TYPE)
- return lvalue_p_1 (TREE_OPERAND (ref, 0));
+ return lvalue_kind (TREE_OPERAND (ref, 0));
if (TREE_CODE (TREE_TYPE (ref)) == REFERENCE_TYPE)
{
case WITH_CLEANUP_EXPR:
case REALPART_EXPR:
case IMAGPART_EXPR:
- return lvalue_p_1 (TREE_OPERAND (ref, 0));
+ return lvalue_kind (TREE_OPERAND (ref, 0));
case COMPONENT_REF:
- op1_lvalue_kind = lvalue_p_1 (TREE_OPERAND (ref, 0));
+ op1_lvalue_kind = lvalue_kind (TREE_OPERAND (ref, 0));
/* Look at the member designator. */
if (!op1_lvalue_kind)
;
if (TREE_SIDE_EFFECTS (TREE_OPERAND (ref, 0))
|| TREE_SIDE_EFFECTS (TREE_OPERAND (ref, 1)))
return clk_none;
- op1_lvalue_kind = lvalue_p_1 (TREE_OPERAND (ref, 0));
- op2_lvalue_kind = lvalue_p_1 (TREE_OPERAND (ref, 1));
+ op1_lvalue_kind = lvalue_kind (TREE_OPERAND (ref, 0));
+ op2_lvalue_kind = lvalue_kind (TREE_OPERAND (ref, 1));
break;
case COND_EXPR:
- op1_lvalue_kind = lvalue_p_1 (TREE_OPERAND (ref, 1)
+ op1_lvalue_kind = lvalue_kind (TREE_OPERAND (ref, 1)
? TREE_OPERAND (ref, 1)
: TREE_OPERAND (ref, 0));
- op2_lvalue_kind = lvalue_p_1 (TREE_OPERAND (ref, 2));
+ op2_lvalue_kind = lvalue_kind (TREE_OPERAND (ref, 2));
break;
case MODIFY_EXPR:
return clk_ordinary;
case COMPOUND_EXPR:
- return lvalue_p_1 (TREE_OPERAND (ref, 1));
+ return lvalue_kind (TREE_OPERAND (ref, 1));
case TARGET_EXPR:
return clk_class;
with a BASELINK. */
/* This CONST_CAST is okay because BASELINK_FUNCTIONS returns
its argument unmodified and we assign it to a const_tree. */
- return lvalue_p_1 (BASELINK_FUNCTIONS (CONST_CAST_TREE (ref)));
+ return lvalue_kind (BASELINK_FUNCTIONS (CONST_CAST_TREE (ref)));
case NON_DEPENDENT_EXPR:
/* We must consider NON_DEPENDENT_EXPRs to be lvalues so that
computes the C++ definition of lvalue. */
cp_lvalue_kind
-real_lvalue_p (tree ref)
+real_lvalue_p (const_tree ref)
{
- cp_lvalue_kind kind = lvalue_p_1 (ref);
+ cp_lvalue_kind kind = lvalue_kind (ref);
if (kind & (clk_rvalueref|clk_class))
return clk_none;
else
bool
lvalue_p (const_tree ref)
{
- return (lvalue_p_1 (ref) != clk_none);
+ return (lvalue_kind (ref) != clk_none);
}
/* This differs from real_lvalue_p in that rvalues formed by dereferencing
bool
lvalue_or_rvalue_with_address_p (const_tree ref)
{
- cp_lvalue_kind kind = lvalue_p_1 (ref);
+ cp_lvalue_kind kind = lvalue_kind (ref);
if (kind & clk_class)
return false;
else
AGGR_INIT_EXPR_ARGP (init));
TREE_SIDE_EFFECTS (rval) = 1;
AGGR_INIT_VIA_CTOR_P (rval) = is_ctor;
+ TREE_NOTHROW (rval) = TREE_NOTHROW (init);
}
else
rval = init;
if (TREE_CODE (rval) == AGGR_INIT_EXPR)
slot = AGGR_INIT_EXPR_SLOT (rval);
- else if (TREE_CODE (rval) == CALL_EXPR)
+ else if (TREE_CODE (rval) == CALL_EXPR
+ || TREE_CODE (rval) == CONSTRUCTOR)
slot = build_local_temp (type);
else
return rval;
return rval;
}
+/* Return a TARGET_EXPR which expresses the initialization of an array to
+ be named later, either default-initialization or copy-initialization
+ from another array of the same type. */
+
+tree
+build_vec_init_expr (tree type, tree init)
+{
+ tree slot;
+ tree inner_type = strip_array_types (type);
+
+ gcc_assert (init == NULL_TREE
+ || (same_type_ignoring_top_level_qualifiers_p
+ (type, TREE_TYPE (init))));
+
+ /* Since we're deferring building the actual constructor calls until
+ gimplification time, we need to build one now and throw it away so
+ that the relevant constructor gets mark_used before cgraph decides
+ what functions are needed. Here we assume that init is either
+ NULL_TREE or another array to copy. */
+ if (CLASS_TYPE_P (inner_type))
+ {
+ VEC(tree,gc) *argvec = make_tree_vector ();
+ if (init)
+ {
+ tree dummy = build_dummy_object (inner_type);
+ if (!real_lvalue_p (init))
+ dummy = move (dummy);
+ VEC_quick_push (tree, argvec, dummy);
+ }
+ build_special_member_call (NULL_TREE, complete_ctor_identifier,
+ &argvec, inner_type, LOOKUP_NORMAL,
+ tf_warning_or_error);
+ }
+
+ slot = build_local_temp (type);
+ init = build2 (VEC_INIT_EXPR, type, slot, init);
+ SET_EXPR_LOCATION (init, input_location);
+ init = build_target_expr (slot, init);
+ TARGET_EXPR_IMPLICIT_P (init) = 1;
+
+ return init;
+}
+
+tree
+build_array_copy (tree init)
+{
+ return build_vec_init_expr (TREE_TYPE (init), init);
+}
+
/* Build a TARGET_EXPR using INIT to initialize a new temporary of the
indicated TYPE. */
{
gcc_assert (!VOID_TYPE_P (type));
- if (TREE_CODE (init) == TARGET_EXPR)
+ if (TREE_CODE (init) == TARGET_EXPR
+ || init == error_mark_node)
return init;
- else if (CLASS_TYPE_P (type) && !TYPE_HAS_TRIVIAL_INIT_REF (type)
+ else if (CLASS_TYPE_P (type) && type_has_nontrivial_copy_init (type)
&& !VOID_TYPE_P (TREE_TYPE (init))
&& TREE_CODE (init) != COND_EXPR
&& TREE_CODE (init) != CONSTRUCTOR
/* Like the above function, but without the checking. This function should
only be used by code which is deliberately trying to subvert the type
- system, such as call_builtin_trap. */
+ system, such as call_builtin_trap. Or build_over_call, to avoid
+ infinite recursion. */
tree
force_target_expr (tree type, tree init)
if (error_operand_p (expr))
return expr;
+ expr = mark_rvalue_use (expr);
+
/* [basic.lval]
Non-class rvalues always have cv-unqualified types. */
type = TREE_TYPE (expr);
- if (!CLASS_TYPE_P (type) && cp_type_quals (type))
- type = cp_build_qualified_type (type, TYPE_UNQUALIFIED);
+ if (!CLASS_TYPE_P (type) && cv_qualified_p (type))
+ type = cv_unqualified (type);
/* We need to do this for rvalue refs as well to get the right answer
from decltype; see c++/36628. */
return (TREE_TYPE (t1) == t2->type && TYPE_DOMAIN (t1) == t2->domain);
}
-/* Hash table containing all of the C++ array types, including
- dependent array types and array types whose element type is
- cv-qualified. */
+/* Hash table containing dependent array types, which are unsuitable for
+ the language-independent type hash table. */
static GTY ((param_is (union tree_node))) htab_t cplus_array_htab;
+/* Like build_array_type, but handle special C++ semantics. */
-static tree
-build_cplus_array_type_1 (tree elt_type, tree index_type)
+tree
+build_cplus_array_type (tree elt_type, tree index_type)
{
tree t;
else
{
/* Build a new array type. */
- t = make_node (ARRAY_TYPE);
+ t = cxx_make_type (ARRAY_TYPE);
TREE_TYPE (t) = elt_type;
TYPE_DOMAIN (t) = index_type;
else
t = build_array_type (elt_type, index_type);
+ /* We want TYPE_MAIN_VARIANT of an array to strip cv-quals from the
+ element type as well, so fix it up if needed. */
+ if (elt_type != TYPE_MAIN_VARIANT (elt_type))
+ {
+ tree m = build_cplus_array_type (TYPE_MAIN_VARIANT (elt_type),
+ index_type);
+ if (TYPE_MAIN_VARIANT (t) != m)
+ {
+ TYPE_MAIN_VARIANT (t) = m;
+ TYPE_NEXT_VARIANT (t) = TYPE_NEXT_VARIANT (m);
+ TYPE_NEXT_VARIANT (m) = t;
+ }
+ }
+
/* Push these needs up so that initialization takes place
more easily. */
TYPE_NEEDS_CONSTRUCTING (t)
return t;
}
-tree
-build_cplus_array_type (tree elt_type, tree index_type)
-{
- tree t;
- int type_quals = cp_type_quals (elt_type);
-
- if (type_quals != TYPE_UNQUALIFIED)
- elt_type = cp_build_qualified_type (elt_type, TYPE_UNQUALIFIED);
-
- t = build_cplus_array_type_1 (elt_type, index_type);
-
- if (type_quals != TYPE_UNQUALIFIED)
- t = cp_build_qualified_type (t, type_quals);
-
- return t;
-}
-
/* Return an ARRAY_TYPE with element type ELT and length N. */
tree
}
+/* Returns EXPR cast to rvalue reference type, like std::move. */
+
+tree
+move (tree expr)
+{
+ tree type = TREE_TYPE (expr);
+ gcc_assert (TREE_CODE (type) != REFERENCE_TYPE);
+ type = cp_build_reference_type (type, /*rval*/true);
+ return build_static_cast (type, expr, tf_warning_or_error);
+}
+
/* Used by the C++ front end to build qualified array types. However,
the C version of this function does not properly maintain canonical
types (which are not used in C). */
if (element_type == error_mark_node)
return error_mark_node;
- /* See if we already have an identically qualified type. */
+ /* See if we already have an identically qualified type. Tests
+ should be equivalent to those in check_qualified_type. */
for (t = TYPE_MAIN_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t))
- if (cp_type_quals (t) == type_quals
+ if (TREE_TYPE (t) == element_type
&& TYPE_NAME (t) == TYPE_NAME (type)
- && TYPE_CONTEXT (t) == TYPE_CONTEXT (type))
+ && TYPE_CONTEXT (t) == TYPE_CONTEXT (type)
+ && attribute_list_equal (TYPE_ATTRIBUTES (t),
+ TYPE_ATTRIBUTES (type)))
break;
if (!t)
- {
- t = build_cplus_array_type_1 (element_type, TYPE_DOMAIN (type));
+ {
+ t = build_cplus_array_type (element_type, TYPE_DOMAIN (type));
- if (TYPE_MAIN_VARIANT (t) != TYPE_MAIN_VARIANT (type))
- {
- /* Set the main variant of the newly-created ARRAY_TYPE
- (with cv-qualified element type) to the main variant of
- the unqualified ARRAY_TYPE we started with. */
- tree last_variant = t;
- tree m = TYPE_MAIN_VARIANT (type);
-
- /* Find the last variant on the new ARRAY_TYPEs list of
- variants, setting the main variant of each of the other
- types to the main variant of our unqualified
- ARRAY_TYPE. */
- while (TYPE_NEXT_VARIANT (last_variant))
- {
- TYPE_MAIN_VARIANT (last_variant) = m;
- last_variant = TYPE_NEXT_VARIANT (last_variant);
- }
-
- /* Splice in the newly-created variants. */
- TYPE_NEXT_VARIANT (last_variant) = TYPE_NEXT_VARIANT (m);
- TYPE_NEXT_VARIANT (m) = t;
- TYPE_MAIN_VARIANT (last_variant) = m;
- }
- }
+ /* Keep the typedef name. */
+ if (TYPE_NAME (t) != TYPE_NAME (type))
+ {
+ t = build_variant_type_copy (t);
+ TYPE_NAME (t) = TYPE_NAME (type);
+ }
+ }
/* Even if we already had this variant, we update
TYPE_NEEDS_CONSTRUCTING and TYPE_HAS_NONTRIVIAL_DESTRUCTOR in case
}
/* A reference or method type shall not be cv-qualified.
- [dcl.ref], [dcl.fct] */
+ [dcl.ref], [dcl.fct]. This used to be an error, but as of DR 295
+ (in CD1) we always ignore extra cv-quals on functions. */
if (type_quals & (TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE)
&& (TREE_CODE (type) == REFERENCE_TYPE
+ || TREE_CODE (type) == FUNCTION_TYPE
|| TREE_CODE (type) == METHOD_TYPE))
{
- bad_quals |= type_quals & (TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE);
+ if (TREE_CODE (type) == REFERENCE_TYPE)
+ bad_quals |= type_quals & (TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE);
type_quals &= ~(TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE);
}
+ /* But preserve any function-cv-quals on a FUNCTION_TYPE. */
+ if (TREE_CODE (type) == FUNCTION_TYPE)
+ type_quals |= type_memfn_quals (type);
+
/* A restrict-qualified type must be a pointer (or reference)
to object or incomplete type. */
if ((type_quals & TYPE_QUAL_RESTRICT)
type_quals &= ~TYPE_QUAL_RESTRICT;
}
- if (bad_quals == TYPE_UNQUALIFIED)
+ if (bad_quals == TYPE_UNQUALIFIED
+ || (complain & tf_ignore_bad_quals))
/*OK*/;
- else if (!(complain & (tf_error | tf_ignore_bad_quals)))
+ else if (!(complain & tf_error))
return error_mark_node;
else
{
- if (complain & tf_ignore_bad_quals)
- /* We're not going to warn about constifying things that can't
- be constified. */
- bad_quals &= ~TYPE_QUAL_CONST;
- if (bad_quals)
- {
- tree bad_type = build_qualified_type (ptr_type_node, bad_quals);
-
- if (!(complain & tf_ignore_bad_quals))
- error ("%qV qualifiers cannot be applied to %qT",
- bad_type, type);
- }
+ tree bad_type = build_qualified_type (ptr_type_node, bad_quals);
+ error ("%qV qualifiers cannot be applied to %qT",
+ bad_type, type);
}
/* Retrieve (or create) the appropriately qualified variant. */
&& (TYPE_LANG_SPECIFIC (TYPE_CANONICAL (result))
== TYPE_LANG_SPECIFIC (TYPE_CANONICAL (type))))
TYPE_LANG_SPECIFIC (TYPE_CANONICAL (result)) = NULL;
-
return result;
}
+/* Return TYPE with const and volatile removed. */
+
+tree
+cv_unqualified (tree type)
+{
+ int quals;
+
+ if (type == error_mark_node)
+ return type;
+
+ quals = cp_type_quals (type);
+ quals &= ~(TYPE_QUAL_CONST|TYPE_QUAL_VOLATILE);
+ return cp_build_qualified_type (type, quals);
+}
+
/* Builds a qualified variant of T that is not a typedef variant.
E.g. consider the following declarations:
typedef const int ConstInt;
TREE_CHAIN (arg_types));
}
else
+ {
result = build_function_type (type,
arg_types);
+ result = apply_memfn_quals (result, type_memfn_quals (t));
+ }
+
+ if (TYPE_RAISES_EXCEPTIONS (t))
+ result = build_exception_variant (result,
+ TYPE_RAISES_EXCEPTIONS (t));
}
break;
+ case TYPENAME_TYPE:
+ result = make_typename_type (strip_typedefs (TYPE_CONTEXT (t)),
+ TYPENAME_TYPE_FULLNAME (t),
+ typename_type, tf_none);
+ break;
default:
break;
}
if (!result)
result = TYPE_MAIN_VARIANT (t);
+ if (TYPE_ATTRIBUTES (t))
+ result = cp_build_type_attribute_variant (result, TYPE_ATTRIBUTES (t));
return cp_build_qualified_type (result, cp_type_quals (t));
}
-\f
/* Makes a copy of BINFO and TYPE, which is to be inherited into a
graph dominated by T. If BINFO is NULL, TYPE is a dependent base,
and we do a shallow copy. If BINFO is non-NULL, we do a deep copy.
return error_mark_node;
t = build2 (SCOPE_REF, type, scope, name);
QUALIFIED_NAME_IS_TEMPLATE (t) = template_p;
+ if (type)
+ t = convert_from_reference (t);
return t;
}
}
tree
-get_first_fn (tree from)
+get_fns (tree from)
{
gcc_assert (is_overloaded_fn (from));
/* A baselink is also considered an overloaded function. */
from = BASELINK_FUNCTIONS (from);
if (TREE_CODE (from) == TEMPLATE_ID_EXPR)
from = TREE_OPERAND (from, 0);
- return OVL_CURRENT (from);
+ return from;
+}
+
+tree
+get_first_fn (tree from)
+{
+ return OVL_CURRENT (get_fns (from));
}
/* Return a new OVL node, concatenating it with the old one. */
tree
build_exception_variant (tree type, tree raises)
{
- tree v = TYPE_MAIN_VARIANT (type);
- int type_quals = TYPE_QUALS (type);
+ tree v;
+ int type_quals;
- for (; v; v = TYPE_NEXT_VARIANT (v))
+ if (comp_except_specs (raises, TYPE_RAISES_EXCEPTIONS (type), ce_exact))
+ return type;
+
+ type_quals = TYPE_QUALS (type);
+ for (v = TYPE_MAIN_VARIANT (type); v; v = TYPE_NEXT_VARIANT (v))
if (check_qualified_type (v, type, type_quals)
- && comp_except_specs (raises, TYPE_RAISES_EXCEPTIONS (v), 1))
+ && comp_except_specs (raises, TYPE_RAISES_EXCEPTIONS (v), ce_exact))
return v;
/* Need to build a new variant. */
TEMPLATE_TYPE_PARM_INDEX (t2) = copy_node (TEMPLATE_TYPE_PARM_INDEX (t));
TEMPLATE_PARM_DECL (TEMPLATE_TYPE_PARM_INDEX (t2)) = decl;
TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO (t2)
- = tree_cons (TEMPLATE_TEMPLATE_PARM_TEMPLATE_DECL (t),
- newargs, NULL_TREE);
+ = build_template_info (TEMPLATE_TEMPLATE_PARM_TEMPLATE_DECL (t), newargs);
TREE_TYPE (decl) = t2;
TYPE_NAME (t2) = decl;
case RECORD_TYPE:
if (TYPE_PTRMEMFUNC_P (t))
goto ptrmem;
+ /* Lambda types that don't have mangling scope have no linkage. We
+ check CLASSTYPE_LAMBDA_EXPR here rather than LAMBDA_TYPE_P because
+ when we get here from pushtag none of the lambda information is
+ set up yet, so we want to assume that the lambda has linkage and
+ fix it up later if not. */
+ if (CLASSTYPE_LAMBDA_EXPR (t)
+ && LAMBDA_TYPE_EXTRA_SCOPE (t) == NULL_TREE)
+ return t;
/* Fall through. */
case UNION_TYPE:
if (!CLASS_TYPE_P (t))
return NULL_TREE;
-
- /* Check template type-arguments. I think that types with no linkage
- can't occur in non-type arguments, though that might change with
- constexpr. */
- r = CLASSTYPE_TEMPLATE_INFO (t);
- if (r)
- {
- tree args = INNERMOST_TEMPLATE_ARGS (TI_ARGS (r));
- int i;
-
- for (i = TREE_VEC_LENGTH (args); i-- > 0; )
- {
- tree elt = TREE_VEC_ELT (args, i);
- if (TYPE_P (elt)
- && (r = no_linkage_check (elt, relaxed_p), r))
- return r;
- }
- }
/* Fall through. */
case ENUMERAL_TYPE:
/* Only treat anonymous types as having no linkage if they're at
- namespace scope. This doesn't have a core issue number yet. */
+ namespace scope. This is core issue 966. */
if (TYPE_ANONYMOUS_P (t) && TYPE_NAMESPACE_SCOPE_P (t))
return t;
- r = CP_TYPE_CONTEXT (t);
- if (TYPE_P (r))
- return no_linkage_check (TYPE_CONTEXT (t), relaxed_p);
- else if (TREE_CODE (r) == FUNCTION_DECL)
+ for (r = CP_TYPE_CONTEXT (t); ; )
{
- if (!relaxed_p || !TREE_PUBLIC (r) || !vague_linkage_fn_p (r))
- return t;
+ /* If we're a nested type of a !TREE_PUBLIC class, we might not
+ have linkage, or we might just be in an anonymous namespace.
+ If we're in a TREE_PUBLIC class, we have linkage. */
+ if (TYPE_P (r) && !TREE_PUBLIC (TYPE_NAME (r)))
+ return no_linkage_check (TYPE_CONTEXT (t), relaxed_p);
+ else if (TREE_CODE (r) == FUNCTION_DECL)
+ {
+ if (!relaxed_p || !vague_linkage_p (r))
+ return t;
+ else
+ r = CP_DECL_CONTEXT (r);
+ }
else
- return no_linkage_check (CP_DECL_CONTEXT (r), relaxed_p);
+ break;
}
return NULL_TREE;
{
print_search_statistics ();
print_class_statistics ();
+ print_template_statistics ();
#ifdef GATHER_STATISTICS
fprintf (stderr, "maximum template instantiation depth reached: %d\n",
depth_reached);
return t;
}
-/* Similar to `build_call_list', but for template definitions of non-dependent
- expressions. NON_DEP is the non-dependent expression that has been
- built. */
+/* Similar to `build_nt_call_vec', but for template definitions of
+ non-dependent expressions. NON_DEP is the non-dependent expression
+ that has been built. */
tree
build_min_non_dep_call_vec (tree non_dep, tree fn, VEC(tree,gc) *argvec)
/* We need to do this when determining whether or not two
non-type pointer to member function template arguments
are the same. */
- if (!(same_type_p (TREE_TYPE (t1), TREE_TYPE (t2))
- /* The first operand is RTL. */
- && TREE_OPERAND (t1, 0) == TREE_OPERAND (t2, 0)))
+ if (!same_type_p (TREE_TYPE (t1), TREE_TYPE (t2))
+ || CONSTRUCTOR_NELTS (t1) != CONSTRUCTOR_NELTS (t2))
return false;
- return cp_tree_equal (TREE_OPERAND (t1, 1), TREE_OPERAND (t2, 1));
+ {
+ tree field, value;
+ unsigned int i;
+ FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (t1), i, field, value)
+ {
+ constructor_elt *elt2 = CONSTRUCTOR_ELT (t2, i);
+ if (!cp_tree_equal (field, elt2->index)
+ || !cp_tree_equal (value, elt2->value))
+ return false;
+ }
+ }
+ return true;
case TREE_LIST:
if (!cp_tree_equal (TREE_PURPOSE (t1), TREE_PURPOSE (t2)))
arg2 = next_call_expr_arg (&iter2))
if (!cp_tree_equal (arg1, arg2))
return false;
- return (arg1 || arg2);
+ if (arg1 || arg2)
+ return false;
+ return true;
}
case TARGET_EXPR:
case TEMPLATE_PARM_INDEX:
return (TEMPLATE_PARM_IDX (t1) == TEMPLATE_PARM_IDX (t2)
&& TEMPLATE_PARM_LEVEL (t1) == TEMPLATE_PARM_LEVEL (t2)
+ && (TEMPLATE_PARM_PARAMETER_PACK (t1)
+ == TEMPLATE_PARM_PARAMETER_PACK (t2))
&& same_type_p (TREE_TYPE (TEMPLATE_PARM_DECL (t1)),
TREE_TYPE (TEMPLATE_PARM_DECL (t2))));
return same_type_p (TRAIT_EXPR_TYPE1 (t1), TRAIT_EXPR_TYPE1 (t2))
&& same_type_p (TRAIT_EXPR_TYPE2 (t1), TRAIT_EXPR_TYPE2 (t2));
+ case CAST_EXPR:
+ case STATIC_CAST_EXPR:
+ case REINTERPRET_CAST_EXPR:
+ case CONST_CAST_EXPR:
+ case DYNAMIC_CAST_EXPR:
+ case NEW_EXPR:
+ if (!same_type_p (TREE_TYPE (t1), TREE_TYPE (t2)))
+ return false;
+ /* Now compare operands as usual. */
+ break;
+
default:
break;
}
int
varargs_function_p (const_tree function)
{
- const_tree parm = TYPE_ARG_TYPES (TREE_TYPE (function));
- for (; parm; parm = TREE_CHAIN (parm))
- if (TREE_VALUE (parm) == void_type_node)
- return 0;
- return 1;
+ return stdarg_p (TREE_TYPE (function));
}
/* Returns 1 if decl is a member of a class. */
build_dummy_object (tree type)
{
tree decl = build1 (NOP_EXPR, build_pointer_type (type), void_zero_node);
- return cp_build_indirect_ref (decl, NULL, tf_warning_or_error);
+ return cp_build_indirect_ref (decl, RO_NULL, tf_warning_or_error);
}
/* We've gotten a reference to a member of TYPE. Return *this if appropriate,
{
tree decl, context;
tree binfo;
+ tree current = current_nonlambda_class_type ();
- if (current_class_type
- && (binfo = lookup_base (current_class_type, type,
- ba_unique | ba_quiet, NULL)))
- context = current_class_type;
+ if (current
+ && (binfo = lookup_base (current, type, ba_any, NULL)))
+ context = current;
else
{
/* Reference from a nested class member function. */
&& same_type_p (TYPE_MAIN_VARIANT (TREE_TYPE (current_class_ref)),
current_class_type))
decl = current_class_ref;
+ else if (current != current_class_type
+ && context == nonlambda_method_basetype ())
+ /* In a lambda, need to go through 'this' capture. */
+ decl = (cp_build_indirect_ref
+ ((lambda_expr_this_capture
+ (CLASSTYPE_LAMBDA_EXPR (current_class_type))),
+ RO_NULL, tf_warning_or_error));
else
decl = build_dummy_object (context);
return 0;
}
-/* Returns true iff copying an object of type T is non-trivial. */
+/* Returns true iff copying an object of type T (including via move
+ constructor) is non-trivial. That is, T has no non-trivial copy
+ constructors and no non-trivial move constructors. */
bool
type_has_nontrivial_copy_init (const_tree t)
t = strip_array_types (CONST_CAST_TREE (t));
if (CLASS_TYPE_P (t))
- return TYPE_HAS_COMPLEX_INIT_REF (t);
+ {
+ gcc_assert (COMPLETE_TYPE_P (t));
+ return ((TYPE_HAS_COPY_CTOR (t)
+ && TYPE_HAS_COMPLEX_COPY_CTOR (t))
+ || TYPE_HAS_COMPLEX_MOVE_CTOR (t));
+ }
else
return 0;
}
-/* Returns 1 iff type T is a trivial type, as defined in [basic.types]. */
+/* Returns 1 iff type T is a trivially copyable type, as defined in
+ [basic.types] and [class]. */
+
+bool
+trivially_copyable_p (const_tree t)
+{
+ t = strip_array_types (CONST_CAST_TREE (t));
+
+ if (CLASS_TYPE_P (t))
+ return ((!TYPE_HAS_COPY_CTOR (t)
+ || !TYPE_HAS_COMPLEX_COPY_CTOR (t))
+ && !TYPE_HAS_COMPLEX_MOVE_CTOR (t)
+ && (!TYPE_HAS_COPY_ASSIGN (t)
+ || !TYPE_HAS_COMPLEX_COPY_ASSIGN (t))
+ && !TYPE_HAS_COMPLEX_MOVE_ASSIGN (t)
+ && TYPE_HAS_TRIVIAL_DESTRUCTOR (t));
+ else
+ return scalarish_type_p (t);
+}
+
+/* Returns 1 iff type T is a trivial type, as defined in [basic.types] and
+ [class]. */
bool
trivial_type_p (const_tree t)
if (CLASS_TYPE_P (t))
return (TYPE_HAS_TRIVIAL_DFLT (t)
- && TYPE_HAS_TRIVIAL_INIT_REF (t)
- && TYPE_HAS_TRIVIAL_ASSIGN_REF (t)
- && TYPE_HAS_TRIVIAL_DESTRUCTOR (t));
+ && trivially_copyable_p (t));
else
return scalarish_type_p (t);
}
argument unmodified and we assign it to a const_tree. */
t = strip_array_types (CONST_CAST_TREE(t));
- if (CLASS_TYPE_P (t))
+ if (!CLASS_TYPE_P (t))
+ return scalarish_type_p (t);
+ else if (cxx_dialect > cxx98)
/* [class]/10: A POD struct is a class that is both a trivial class and a
standard-layout class, and has no non-static data members of type
non-POD struct, non-POD union (or array of such types).
non-std-layout or non-trivial, the class will be too. */
return (std_layout_type_p (t) && trivial_type_p (t));
else
- return scalarish_type_p (t);
+ /* The C++98 definition of POD is different. */
+ return !CLASSTYPE_NON_LAYOUT_POD_P (t);
}
/* Returns true iff T is POD for the purpose of layout, as defined in the
new_type = build_type_attribute_variant (type, attributes);
if (TREE_CODE (new_type) == FUNCTION_TYPE
- && (TYPE_RAISES_EXCEPTIONS (new_type)
- != TYPE_RAISES_EXCEPTIONS (type)))
+ || TREE_CODE (new_type) == METHOD_TYPE)
new_type = build_exception_variant (new_type,
TYPE_RAISES_EXCEPTIONS (type));
gcc_assert (TREE_CODE (typea) == FUNCTION_TYPE);
return comp_except_specs (TYPE_RAISES_EXCEPTIONS (typea),
- TYPE_RAISES_EXCEPTIONS (typeb), 1);
+ TYPE_RAISES_EXCEPTIONS (typeb), ce_exact);
}
/* Apply FUNC to all language-specific sub-trees of TP in a pre-order
DECL_LANG_SPECIFIC. */
if (DECL_COPY_CONSTRUCTOR_P (decl))
return sfk_copy_constructor;
+ if (DECL_MOVE_CONSTRUCTOR_P (decl))
+ return sfk_move_constructor;
if (DECL_CONSTRUCTOR_P (decl))
return sfk_constructor;
if (DECL_OVERLOADED_OPERATOR_P (decl) == NOP_EXPR)
- return sfk_assignment_operator;
+ {
+ if (copy_fn_p (decl))
+ return sfk_copy_assignment;
+ if (move_fn_p (decl))
+ return sfk_move_assignment;
+ }
if (DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P (decl))
return sfk_destructor;
if (DECL_COMPLETE_DESTRUCTOR_P (decl))
/* Everything else has internal linkage. */
return lk_internal;
}
+
+/* Returns the storage duration of the object or reference associated with
+ the indicated DECL, which should be a VAR_DECL or PARM_DECL. */
+
+duration_kind
+decl_storage_duration (tree decl)
+{
+ if (TREE_CODE (decl) == PARM_DECL)
+ return dk_auto;
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ return dk_static;
+ gcc_assert (TREE_CODE (decl) == VAR_DECL);
+ if (!TREE_STATIC (decl)
+ && !DECL_EXTERNAL (decl))
+ return dk_auto;
+ if (DECL_THREAD_LOCAL_P (decl))
+ return dk_thread;
+ return dk_static;
+}
\f
/* EXP is an expression that we want to pre-evaluate. Returns (in
*INITP) an expression that will perform the pre-evaluation. The
if (!TREE_SIDE_EFFECTS (exp))
init_expr = NULL_TREE;
- else if (!real_lvalue_p (exp)
- || !TYPE_NEEDS_CONSTRUCTING (TREE_TYPE (exp)))
+ /* There are no expressions with REFERENCE_TYPE, but there can be call
+ arguments with such a type; just treat it as a pointer. */
+ else if (TREE_CODE (TREE_TYPE (exp)) == REFERENCE_TYPE
+ || !lvalue_or_rvalue_with_address_p (exp))
{
init_expr = get_target_expr (exp);
exp = TARGET_EXPR_SLOT (init_expr);
}
else
{
- exp = cp_build_unary_op (ADDR_EXPR, exp, 1, tf_warning_or_error);
+ bool xval = !real_lvalue_p (exp);
+ exp = cp_build_addr_expr (exp, tf_warning_or_error);
init_expr = get_target_expr (exp);
exp = TARGET_EXPR_SLOT (init_expr);
- exp = cp_build_indirect_ref (exp, 0, tf_warning_or_error);
+ exp = cp_build_indirect_ref (exp, RO_NULL, tf_warning_or_error);
+ if (xval)
+ exp = move (exp);
}
*initp = init_expr;
cast_valid_in_integral_constant_expression_p (tree type)
{
return (INTEGRAL_OR_ENUMERATION_TYPE_P (type)
+ || cxx_dialect >= cxx0x
|| dependent_type_p (type)
|| type == error_mark_node);
}
if (!gimple_has_body_p (decl)
&& !DECL_THUNK_P (decl)
&& !DECL_EXTERNAL (decl))
- return true;
+ {
+ struct cgraph_node *node = cgraph_get_node (decl);
+
+ /* Don't fix same_body aliases. Although they don't have their own
+ CFG, they share it with what they alias to. */
+ if (!node
+ || node->decl == decl
+ || !node->same_body)
+ return true;
+ }
return false;
}
DECL_EXTERNAL (t) = 1;
TREE_STATIC (t) = 0;
}
+ if (CP_AGGREGATE_TYPE_P (t)
+ && TYPE_NAME (t))
+ {
+ tree name = TYPE_NAME (t);
+ if (TREE_CODE (name) == TYPE_DECL)
+ name = DECL_NAME (name);
+ /* Drop anonymous names. */
+ if (name != NULL_TREE
+ && ANON_AGGRNAME_P (name))
+ TYPE_NAME (t) = NULL_TREE;
+ }
+ if (TREE_CODE (t) == NAMESPACE_DECL)
+ {
+ /* The list of users of a namespace isn't useful for the middle-end
+ or debug generators. */
+ DECL_NAMESPACE_USERS (t) = NULL_TREE;
+ /* Neither do we need the leftover chaining of namespaces
+ from the binding level. */
+ DECL_CHAIN (t) = NULL_TREE;
+ }
+}
+
+/* Stub for c-common. Please keep in sync with c-decl.c.
+ FIXME: If address space support is target specific, then this
+ should be a C target hook. But currently this is not possible,
+ because this function is called via REGISTER_TARGET_PRAGMAS. */
+void
+c_register_addr_space (const char *word ATTRIBUTE_UNUSED,
+ addr_space_t as ATTRIBUTE_UNUSED)
+{
}
\f