+ gcc_checking_assert (is_gimple_ip_invariant (input));
+ if (jfunc->value.pass_through.operation == NOP_EXPR)
+ return input;
+
+ if (TREE_CODE_CLASS (jfunc->value.pass_through.operation)
+ == tcc_comparison)
+ restype = boolean_type_node;
+ else
+ restype = TREE_TYPE (input);
+ res = fold_binary (jfunc->value.pass_through.operation, restype,
+ input, jfunc->value.pass_through.operand);
+
+ if (res && !is_gimple_ip_invariant (res))
+ return NULL_TREE;
+
+ return res;
+}
+
+/* Return the result of an ancestor jump function JFUNC on the constant value
+ INPUT. Return NULL_TREE if that cannot be determined. */
+
+static tree
+ipa_get_jf_ancestor_result (struct ipa_jump_func *jfunc, tree input)
+{
+ if (TREE_CODE (input) == ADDR_EXPR)
+ {
+ tree t = TREE_OPERAND (input, 0);
+ t = build_ref_for_offset (EXPR_LOCATION (t), t,
+ jfunc->value.ancestor.offset,
+ jfunc->value.ancestor.type, NULL, false);
+ return build_fold_addr_expr (t);
+ }
+ else
+ return NULL_TREE;
+}
+
+/* Extract the acual BINFO being described by JFUNC which must be a known type
+ jump function. */
+
+static tree
+ipa_value_from_known_type_jfunc (struct ipa_jump_func *jfunc)
+{
+ tree base_binfo = TYPE_BINFO (jfunc->value.known_type.base_type);
+ if (!base_binfo)
+ return NULL_TREE;
+ return get_binfo_at_offset (base_binfo,
+ jfunc->value.known_type.offset,
+ jfunc->value.known_type.component_type);
+}
+
+/* Determine whether JFUNC evaluates to a known value (that is either a
+ constant or a binfo) and if so, return it. Otherwise return NULL. INFO
+ describes the caller node so that pass-through jump functions can be
+ evaluated. */
+
+static tree
+ipa_value_from_jfunc (struct ipa_node_params *info, struct ipa_jump_func *jfunc)
+{
+ if (jfunc->type == IPA_JF_CONST)
+ return jfunc->value.constant;
+ else if (jfunc->type == IPA_JF_KNOWN_TYPE)
+ return ipa_value_from_known_type_jfunc (jfunc);
+ else if (jfunc->type == IPA_JF_PASS_THROUGH
+ || jfunc->type == IPA_JF_ANCESTOR)
+ {
+ tree input;
+ int idx;
+
+ if (jfunc->type == IPA_JF_PASS_THROUGH)
+ idx = jfunc->value.pass_through.formal_id;
+ else
+ idx = jfunc->value.ancestor.formal_id;
+
+ if (info->ipcp_orig_node)
+ input = VEC_index (tree, info->known_vals, idx);
+ else
+ {
+ struct ipcp_lattice *lat;
+
+ if (!info->lattices)
+ {
+ gcc_checking_assert (!flag_ipa_cp);
+ return NULL_TREE;
+ }
+ lat = ipa_get_lattice (info, idx);
+ if (!ipa_lat_is_single_const (lat))
+ return NULL_TREE;
+ input = lat->values->value;
+ }
+
+ if (!input)
+ return NULL_TREE;
+
+ if (jfunc->type == IPA_JF_PASS_THROUGH)
+ {
+ if (jfunc->value.pass_through.operation == NOP_EXPR)
+ return input;
+ else if (TREE_CODE (input) == TREE_BINFO)
+ return NULL_TREE;
+ else
+ return ipa_get_jf_pass_through_result (jfunc, input);
+ }
+ else
+ {
+ if (TREE_CODE (input) == TREE_BINFO)
+ return get_binfo_at_offset (input, jfunc->value.ancestor.offset,
+ jfunc->value.ancestor.type);
+ else
+ return ipa_get_jf_ancestor_result (jfunc, input);
+ }
+ }
+ else
+ return NULL_TREE;
+}
+
+/* Determine whether JFUNC evaluates to a constant and if so, return it.
+ Otherwise return NULL. INFO describes the caller node so that pass-through
+ jump functions can be evaluated. */
+
+tree
+ipa_cst_from_jfunc (struct ipa_node_params *info, struct ipa_jump_func *jfunc)
+{
+ tree res = ipa_value_from_jfunc (info, jfunc);
+
+ if (res && TREE_CODE (res) == TREE_BINFO)
+ return NULL_TREE;
+ else
+ return res;
+}
+
+
+/* If checking is enabled, verify that no lattice is in the TOP state, i.e. not
+ bottom, not containing a variable component and without any known value at
+ the same time. */
+
+DEBUG_FUNCTION void
+ipcp_verify_propagated_values (void)
+{
+ struct cgraph_node *node;
+
+ FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
+ {
+ struct ipa_node_params *info = IPA_NODE_REF (node);
+ int i, count = ipa_get_param_count (info);
+
+ for (i = 0; i < count; i++)
+ {
+ struct ipcp_lattice *lat = ipa_get_lattice (info, i);
+
+ if (!lat->bottom
+ && !lat->contains_variable
+ && lat->values_count == 0)
+ {
+ if (dump_file)
+ {
+ fprintf (dump_file, "\nIPA lattices after constant "
+ "propagation:\n");
+ print_all_lattices (dump_file, true, false);
+ }
+
+ gcc_unreachable ();
+ }
+ }
+ }
+}
+
+/* Return true iff X and Y should be considered equal values by IPA-CP. */
+
+static bool
+values_equal_for_ipcp_p (tree x, tree y)
+{
+ gcc_checking_assert (x != NULL_TREE && y != NULL_TREE);
+
+ if (x == y)
+ return true;
+
+ if (TREE_CODE (x) == TREE_BINFO || TREE_CODE (y) == TREE_BINFO)
+ return false;
+
+ if (TREE_CODE (x) == ADDR_EXPR
+ && TREE_CODE (y) == ADDR_EXPR
+ && TREE_CODE (TREE_OPERAND (x, 0)) == CONST_DECL
+ && TREE_CODE (TREE_OPERAND (y, 0)) == CONST_DECL)
+ return operand_equal_p (DECL_INITIAL (TREE_OPERAND (x, 0)),
+ DECL_INITIAL (TREE_OPERAND (y, 0)), 0);
+ else
+ return operand_equal_p (x, y, 0);
+}
+
+/* Add a new value source to VAL, marking that a value comes from edge CS and
+ (if the underlying jump function is a pass-through or an ancestor one) from
+ a caller value SRC_VAL of a caller parameter described by SRC_INDEX. */
+
+static void
+add_value_source (struct ipcp_value *val, struct cgraph_edge *cs,
+ struct ipcp_value *src_val, int src_idx)
+{
+ struct ipcp_value_source *src;
+
+ src = (struct ipcp_value_source *) pool_alloc (ipcp_sources_pool);
+ src->cs = cs;
+ src->val = src_val;
+ src->index = src_idx;
+
+ src->next = val->sources;
+ val->sources = src;
+}
+
+
+/* Try to add NEWVAL to LAT, potentially creating a new struct ipcp_value for
+ it. CS, SRC_VAL and SRC_INDEX are meant for add_value_source and have the
+ same meaning. */
+
+static bool
+add_value_to_lattice (struct ipcp_lattice *lat, tree newval,
+ struct cgraph_edge *cs, struct ipcp_value *src_val,
+ int src_idx)
+{
+ struct ipcp_value *val;
+
+ if (lat->bottom)
+ return false;
+
+
+ for (val = lat->values; val; val = val->next)
+ if (values_equal_for_ipcp_p (val->value, newval))
+ {
+ if (edge_within_scc (cs))
+ {
+ struct ipcp_value_source *s;
+ for (s = val->sources; s ; s = s->next)
+ if (s->cs == cs)
+ break;
+ if (s)
+ return false;
+ }
+
+ add_value_source (val, cs, src_val, src_idx);
+ return false;
+ }
+
+ if (lat->values_count == PARAM_VALUE (PARAM_IPA_CP_VALUE_LIST_SIZE))
+ {
+ /* We can only free sources, not the values themselves, because sources
+ of other values in this this SCC might point to them. */
+ for (val = lat->values; val; val = val->next)
+ {
+ while (val->sources)
+ {
+ struct ipcp_value_source *src = val->sources;
+ val->sources = src->next;
+ pool_free (ipcp_sources_pool, src);
+ }
+ }
+
+ lat->values = NULL;
+ return set_lattice_to_bottom (lat);
+ }
+
+ lat->values_count++;
+ val = (struct ipcp_value *) pool_alloc (ipcp_values_pool);
+ memset (val, 0, sizeof (*val));
+
+ add_value_source (val, cs, src_val, src_idx);
+ val->value = newval;
+ val->next = lat->values;
+ lat->values = val;
+ return true;
+}
+
+/* Propagate values through a pass-through jump function JFUNC associated with
+ edge CS, taking values from SRC_LAT and putting them into DEST_LAT. SRC_IDX
+ is the index of the source parameter. */
+
+static bool
+propagate_vals_accross_pass_through (struct cgraph_edge *cs,
+ struct ipa_jump_func *jfunc,
+ struct ipcp_lattice *src_lat,
+ struct ipcp_lattice *dest_lat,
+ int src_idx)
+{
+ struct ipcp_value *src_val;
+ bool ret = false;
+
+ if (jfunc->value.pass_through.operation == NOP_EXPR)
+ for (src_val = src_lat->values; src_val; src_val = src_val->next)
+ ret |= add_value_to_lattice (dest_lat, src_val->value, cs,
+ src_val, src_idx);
+ /* Do not create new values when propagating within an SCC because if there
+ arithmetic functions with circular dependencies, there is infinite number
+ of them and we would just make lattices bottom. */
+ else if (edge_within_scc (cs))
+ ret = set_lattice_contains_variable (dest_lat);
+ else
+ for (src_val = src_lat->values; src_val; src_val = src_val->next)
+ {
+ tree cstval = src_val->value;
+
+ if (TREE_CODE (cstval) == TREE_BINFO)
+ {
+ ret |= set_lattice_contains_variable (dest_lat);
+ continue;
+ }
+ cstval = ipa_get_jf_pass_through_result (jfunc, cstval);
+
+ if (cstval)
+ ret |= add_value_to_lattice (dest_lat, cstval, cs, src_val, src_idx);
+ else
+ ret |= set_lattice_contains_variable (dest_lat);
+ }
+
+ return ret;
+}
+
+/* Propagate values through an ancestor jump function JFUNC associated with
+ edge CS, taking values from SRC_LAT and putting them into DEST_LAT. SRC_IDX
+ is the index of the source parameter. */
+
+static bool
+propagate_vals_accross_ancestor (struct cgraph_edge *cs,
+ struct ipa_jump_func *jfunc,
+ struct ipcp_lattice *src_lat,
+ struct ipcp_lattice *dest_lat,
+ int src_idx)
+{
+ struct ipcp_value *src_val;
+ bool ret = false;
+
+ if (edge_within_scc (cs))
+ return set_lattice_contains_variable (dest_lat);
+
+ for (src_val = src_lat->values; src_val; src_val = src_val->next)
+ {
+ tree t = src_val->value;
+
+ if (TREE_CODE (t) == TREE_BINFO)
+ t = get_binfo_at_offset (t, jfunc->value.ancestor.offset,
+ jfunc->value.ancestor.type);
+ else
+ t = ipa_get_jf_ancestor_result (jfunc, t);
+
+ if (t)
+ ret |= add_value_to_lattice (dest_lat, t, cs, src_val, src_idx);
+ else
+ ret |= set_lattice_contains_variable (dest_lat);
+ }
+
+ return ret;
+}
+
+/* Propagate values across jump function JFUNC that is associated with edge CS
+ and put the values into DEST_LAT. */
+
+static bool
+propagate_accross_jump_function (struct cgraph_edge *cs,
+ struct ipa_jump_func *jfunc,
+ struct ipcp_lattice *dest_lat)
+{
+ if (dest_lat->bottom)
+ return false;
+
+ if (jfunc->type == IPA_JF_CONST
+ || jfunc->type == IPA_JF_KNOWN_TYPE)
+ {
+ tree val;
+
+ if (jfunc->type == IPA_JF_KNOWN_TYPE)
+ {
+ val = ipa_value_from_known_type_jfunc (jfunc);
+ if (!val)
+ return set_lattice_contains_variable (dest_lat);
+ }
+ else
+ val = jfunc->value.constant;
+ return add_value_to_lattice (dest_lat, val, cs, NULL, 0);
+ }
+ else if (jfunc->type == IPA_JF_PASS_THROUGH
+ || jfunc->type == IPA_JF_ANCESTOR)
+ {
+ struct ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
+ struct ipcp_lattice *src_lat;
+ int src_idx;
+ bool ret;
+
+ if (jfunc->type == IPA_JF_PASS_THROUGH)
+ src_idx = jfunc->value.pass_through.formal_id;
+ else
+ src_idx = jfunc->value.ancestor.formal_id;
+
+ src_lat = ipa_get_lattice (caller_info, src_idx);
+ if (src_lat->bottom)
+ return set_lattice_contains_variable (dest_lat);
+
+ /* If we would need to clone the caller and cannot, do not propagate. */
+ if (!ipcp_versionable_function_p (cs->caller)
+ && (src_lat->contains_variable
+ || (src_lat->values_count > 1)))
+ return set_lattice_contains_variable (dest_lat);
+
+ if (jfunc->type == IPA_JF_PASS_THROUGH)
+ ret = propagate_vals_accross_pass_through (cs, jfunc, src_lat,
+ dest_lat, src_idx);
+ else
+ ret = propagate_vals_accross_ancestor (cs, jfunc, src_lat, dest_lat,
+ src_idx);
+
+ if (src_lat->contains_variable)
+ ret |= set_lattice_contains_variable (dest_lat);
+
+ return ret;
+ }
+
+ /* TODO: We currently do not handle member method pointers in IPA-CP (we only
+ use it for indirect inlining), we should propagate them too. */
+ return set_lattice_contains_variable (dest_lat);
+}
+
+/* Propagate constants from the caller to the callee of CS. INFO describes the
+ caller. */
+
+static bool
+propagate_constants_accross_call (struct cgraph_edge *cs)
+{
+ struct ipa_node_params *callee_info;
+ enum availability availability;
+ struct cgraph_node *callee, *alias_or_thunk;
+ struct ipa_edge_args *args;
+ bool ret = false;
+ int i, args_count, parms_count;
+
+ callee = cgraph_function_node (cs->callee, &availability);
+ if (!callee->analyzed)
+ return false;
+ gcc_checking_assert (cgraph_function_with_gimple_body_p (callee));
+ callee_info = IPA_NODE_REF (callee);
+
+ args = IPA_EDGE_REF (cs);
+ args_count = ipa_get_cs_argument_count (args);
+ parms_count = ipa_get_param_count (callee_info);
+
+ /* If this call goes through a thunk we must not propagate to the first (0th)
+ parameter. However, we might need to uncover a thunk from below a series
+ of aliases first. */
+ alias_or_thunk = cs->callee;
+ while (alias_or_thunk->alias)
+ alias_or_thunk = cgraph_alias_aliased_node (alias_or_thunk);
+ if (alias_or_thunk->thunk.thunk_p)
+ {
+ ret |= set_lattice_contains_variable (ipa_get_lattice (callee_info, 0));
+ i = 1;
+ }
+ else
+ i = 0;
+
+ for (; (i < args_count) && (i < parms_count); i++)
+ {
+ struct ipa_jump_func *jump_func = ipa_get_ith_jump_func (args, i);
+ struct ipcp_lattice *dest_lat = ipa_get_lattice (callee_info, i);
+
+ if (availability == AVAIL_OVERWRITABLE)
+ ret |= set_lattice_contains_variable (dest_lat);
+ else
+ ret |= propagate_accross_jump_function (cs, jump_func, dest_lat);
+ }
+ for (; i < parms_count; i++)
+ ret |= set_lattice_contains_variable (ipa_get_lattice (callee_info, i));
+
+ return ret;
+}
+
+/* If an indirect edge IE can be turned into a direct one based on KNOWN_VALS
+ (which can contain both constants and binfos) or KNOWN_BINFOS (which can be
+ NULL) return the destination. */
+
+static tree
+get_indirect_edge_target (struct cgraph_edge *ie,
+ VEC (tree, heap) *known_vals,
+ VEC (tree, heap) *known_binfos)
+{
+ int param_index = ie->indirect_info->param_index;
+ HOST_WIDE_INT token, anc_offset;
+ tree otr_type;
+ tree t;
+
+ if (param_index == -1)
+ return NULL_TREE;
+
+ if (!ie->indirect_info->polymorphic)
+ {
+ tree t = VEC_index (tree, known_vals, param_index);
+ if (t &&
+ TREE_CODE (t) == ADDR_EXPR
+ && TREE_CODE (TREE_OPERAND (t, 0)) == FUNCTION_DECL)
+ return TREE_OPERAND (t, 0);
+ else
+ return NULL_TREE;
+ }
+
+ token = ie->indirect_info->otr_token;
+ anc_offset = ie->indirect_info->anc_offset;
+ otr_type = ie->indirect_info->otr_type;
+
+ t = VEC_index (tree, known_vals, param_index);
+ if (!t && known_binfos)
+ t = VEC_index (tree, known_binfos, param_index);
+ if (!t)
+ return NULL_TREE;
+
+ if (TREE_CODE (t) != TREE_BINFO)
+ {
+ tree binfo;
+ binfo = gimple_extract_devirt_binfo_from_cst (t);
+ if (!binfo)
+ return NULL_TREE;
+ binfo = get_binfo_at_offset (binfo, anc_offset, otr_type);
+ if (!binfo)
+ return NULL_TREE;
+ return gimple_get_virt_method_for_binfo (token, binfo);
+ }
+ else
+ {
+ tree binfo;
+
+ binfo = get_binfo_at_offset (t, anc_offset, otr_type);
+ if (!binfo)
+ return NULL_TREE;
+ return gimple_get_virt_method_for_binfo (token, binfo);
+ }
+}
+
+/* Calculate devirtualization time bonus for NODE, assuming we know KNOWN_CSTS
+ and KNOWN_BINFOS. */
+
+static int
+devirtualization_time_bonus (struct cgraph_node *node,
+ VEC (tree, heap) *known_csts,
+ VEC (tree, heap) *known_binfos)
+{
+ struct cgraph_edge *ie;
+ int res = 0;
+
+ for (ie = node->indirect_calls; ie; ie = ie->next_callee)
+ {
+ struct cgraph_node *callee;
+ struct inline_summary *isummary;
+ tree target;
+
+ target = get_indirect_edge_target (ie, known_csts, known_binfos);
+ if (!target)
+ continue;
+
+ /* Only bare minimum benefit for clearly un-inlineable targets. */
+ res += 1;
+ callee = cgraph_get_node (target);
+ if (!callee || !callee->analyzed)
+ continue;
+ isummary = inline_summary (callee);
+ if (!isummary->inlinable)
+ continue;
+
+ /* FIXME: The values below need re-considering and perhaps also
+ integrating into the cost metrics, at lest in some very basic way. */
+ if (isummary->size <= MAX_INLINE_INSNS_AUTO / 4)
+ res += 31;
+ else if (isummary->size <= MAX_INLINE_INSNS_AUTO / 2)
+ res += 15;
+ else if (isummary->size <= MAX_INLINE_INSNS_AUTO
+ || DECL_DECLARED_INLINE_P (callee->decl))
+ res += 7;
+ }
+
+ return res;
+}
+
+/* Return true if cloning NODE is a good idea, given the estimated TIME_BENEFIT
+ and SIZE_COST and with the sum of frequencies of incoming edges to the
+ potential new clone in FREQUENCIES. */
+
+static bool
+good_cloning_opportunity_p (struct cgraph_node *node, int time_benefit,
+ int freq_sum, gcov_type count_sum, int size_cost)
+{
+ if (time_benefit == 0
+ || !flag_ipa_cp_clone
+ || !optimize_function_for_speed_p (DECL_STRUCT_FUNCTION (node->decl)))
+ return false;
+
+ gcc_checking_assert (size_cost >= 0);
+
+ /* FIXME: These decisions need tuning. */
+ if (max_count)
+ {
+ int evaluation, factor = (count_sum * 1000) / max_count;
+
+ evaluation = (time_benefit * factor) / size_cost;
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " good_cloning_opportunity_p (time: %i, "
+ "size: %i, count_sum: " HOST_WIDE_INT_PRINT_DEC
+ ") -> evaluation: %i, threshold: %i\n",
+ time_benefit, size_cost, (HOST_WIDE_INT) count_sum,
+ evaluation, 500);
+
+ return evaluation >= PARAM_VALUE (PARAM_IPA_CP_EVAL_THRESHOLD);
+ }
+ else
+ {
+ int evaluation = (time_benefit * freq_sum) / size_cost;
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " good_cloning_opportunity_p (time: %i, "
+ "size: %i, freq_sum: %i) -> evaluation: %i, threshold: %i\n",
+ time_benefit, size_cost, freq_sum, evaluation,
+ CGRAPH_FREQ_BASE /2);
+
+ return evaluation >= PARAM_VALUE (PARAM_IPA_CP_EVAL_THRESHOLD);
+ }
+}
+
+
+/* Allocate KNOWN_CSTS and KNOWN_BINFOS and populate them with values of
+ parameters that are known independent of the context. INFO describes the
+ function. If REMOVABLE_PARAMS_COST is non-NULL, the movement cost of all
+ removable parameters will be stored in it. */
+
+static bool
+gather_context_independent_values (struct ipa_node_params *info,
+ VEC (tree, heap) **known_csts,
+ VEC (tree, heap) **known_binfos,
+ int *removable_params_cost)
+{
+ int i, count = ipa_get_param_count (info);
+ bool ret = false;
+
+ *known_csts = NULL;
+ *known_binfos = NULL;
+ VEC_safe_grow_cleared (tree, heap, *known_csts, count);
+ VEC_safe_grow_cleared (tree, heap, *known_binfos, count);
+
+ if (removable_params_cost)
+ *removable_params_cost = 0;
+
+ for (i = 0; i < count ; i++)
+ {
+ struct ipcp_lattice *lat = ipa_get_lattice (info, i);
+
+ if (ipa_lat_is_single_const (lat))
+ {
+ struct ipcp_value *val = lat->values;
+ if (TREE_CODE (val->value) != TREE_BINFO)
+ {
+ VEC_replace (tree, *known_csts, i, val->value);
+ if (removable_params_cost)
+ *removable_params_cost
+ += estimate_move_cost (TREE_TYPE (val->value));
+ ret = true;
+ }
+ else if (lat->virt_call)
+ {
+ VEC_replace (tree, *known_binfos, i, val->value);
+ ret = true;
+ }
+ else if (removable_params_cost
+ && !ipa_is_param_used (info, i))
+ *removable_params_cost
+ += estimate_move_cost (TREE_TYPE (ipa_get_param (info, i)));
+ }
+ else if (removable_params_cost
+ && !ipa_is_param_used (info, i))
+ *removable_params_cost
+ += estimate_move_cost (TREE_TYPE (ipa_get_param (info, i)));
+ }
+
+ return ret;
+}
+
+/* Iterate over known values of parameters of NODE and estimate the local
+ effects in terms of time and size they have. */
+
+static void
+estimate_local_effects (struct cgraph_node *node)
+{
+ struct ipa_node_params *info = IPA_NODE_REF (node);
+ int i, count = ipa_get_param_count (info);
+ VEC (tree, heap) *known_csts, *known_binfos;
+ bool always_const;
+ int base_time = inline_summary (node)->time;
+ int removable_params_cost;
+
+ if (!count || !ipcp_versionable_function_p (node))
+ return;
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, "\nEstimating effects for %s/%i, base_time: %i.\n",
+ cgraph_node_name (node), node->uid, base_time);
+
+ always_const = gather_context_independent_values (info, &known_csts,
+ &known_binfos,
+ &removable_params_cost);
+ if (always_const)
+ {
+ struct caller_statistics stats;
+ int time, size;
+
+ init_caller_stats (&stats);
+ cgraph_for_node_and_aliases (node, gather_caller_stats, &stats, false);
+ estimate_ipcp_clone_size_and_time (node, known_csts, &size, &time);
+ time -= devirtualization_time_bonus (node, known_csts, known_binfos);
+ time -= removable_params_cost;
+ size -= stats.n_calls * removable_params_cost;
+
+ if (dump_file)
+ fprintf (dump_file, " - context independent values, size: %i, "
+ "time_benefit: %i\n", size, base_time - time);
+
+ if (size <= 0
+ || cgraph_will_be_removed_from_program_if_no_direct_calls (node))
+ {
+ info->clone_for_all_contexts = true;
+ base_time = time;
+
+ if (dump_file)
+ fprintf (dump_file, " Decided to specialize for all "
+ "known contexts, code not going to grow.\n");
+ }
+ else if (good_cloning_opportunity_p (node, base_time - time,
+ stats.freq_sum, stats.count_sum,
+ size))
+ {
+ if (size + overall_size <= max_new_size)
+ {
+ info->clone_for_all_contexts = true;
+ base_time = time;
+ overall_size += size;
+
+ if (dump_file)
+ fprintf (dump_file, " Decided to specialize for all "
+ "known contexts, growth deemed beneficial.\n");
+ }
+ else if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " Not cloning for all contexts because "
+ "max_new_size would be reached with %li.\n",
+ size + overall_size);
+ }
+ }
+
+ for (i = 0; i < count ; i++)
+ {
+ struct ipcp_lattice *lat = ipa_get_lattice (info, i);
+ struct ipcp_value *val;
+ int emc;
+
+ if (lat->bottom
+ || !lat->values
+ || VEC_index (tree, known_csts, i)
+ || VEC_index (tree, known_binfos, i))
+ continue;
+
+ for (val = lat->values; val; val = val->next)
+ {
+ int time, size, time_benefit;
+
+ if (TREE_CODE (val->value) != TREE_BINFO)
+ {
+ VEC_replace (tree, known_csts, i, val->value);
+ VEC_replace (tree, known_binfos, i, NULL_TREE);
+ emc = estimate_move_cost (TREE_TYPE (val->value));
+ }
+ else if (lat->virt_call)
+ {
+ VEC_replace (tree, known_csts, i, NULL_TREE);
+ VEC_replace (tree, known_binfos, i, val->value);
+ emc = 0;
+ }
+ else
+ continue;
+
+ estimate_ipcp_clone_size_and_time (node, known_csts, &size, &time);
+ time_benefit = base_time - time
+ + devirtualization_time_bonus (node, known_csts, known_binfos)
+ + removable_params_cost + emc;
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, " - estimates for value ");
+ print_ipcp_constant_value (dump_file, val->value);
+ fprintf (dump_file, " for parameter ");
+ print_generic_expr (dump_file, ipa_get_param (info, i), 0);
+ fprintf (dump_file, ": time_benefit: %i, size: %i\n",
+ time_benefit, size);
+ }
+
+ val->local_time_benefit = time_benefit;
+ val->local_size_cost = size;
+ }
+ }
+
+ VEC_free (tree, heap, known_csts);
+ VEC_free (tree, heap, known_binfos);
+}
+
+
+/* Add value CUR_VAL and all yet-unsorted values it is dependent on to the
+ topological sort of values. */
+
+static void
+add_val_to_toposort (struct ipcp_value *cur_val)
+{
+ static int dfs_counter = 0;
+ static struct ipcp_value *stack;
+ struct ipcp_value_source *src;
+
+ if (cur_val->dfs)
+ return;
+
+ dfs_counter++;
+ cur_val->dfs = dfs_counter;
+ cur_val->low_link = dfs_counter;
+
+ cur_val->topo_next = stack;
+ stack = cur_val;
+ cur_val->on_stack = true;
+
+ for (src = cur_val->sources; src; src = src->next)
+ if (src->val)
+ {
+ if (src->val->dfs == 0)
+ {
+ add_val_to_toposort (src->val);
+ if (src->val->low_link < cur_val->low_link)
+ cur_val->low_link = src->val->low_link;
+ }
+ else if (src->val->on_stack
+ && src->val->dfs < cur_val->low_link)
+ cur_val->low_link = src->val->dfs;
+ }
+
+ if (cur_val->dfs == cur_val->low_link)
+ {
+ struct ipcp_value *v, *scc_list = NULL;
+
+ do
+ {
+ v = stack;
+ stack = v->topo_next;
+ v->on_stack = false;
+
+ v->scc_next = scc_list;
+ scc_list = v;
+ }
+ while (v != cur_val);
+
+ cur_val->topo_next = values_topo;
+ values_topo = cur_val;
+ }
+}
+
+/* Add all values in lattices associated with NODE to the topological sort if
+ they are not there yet. */
+
+static void
+add_all_node_vals_to_toposort (struct cgraph_node *node)
+{
+ struct ipa_node_params *info = IPA_NODE_REF (node);
+ int i, count = ipa_get_param_count (info);
+
+ for (i = 0; i < count ; i++)
+ {
+ struct ipcp_lattice *lat = ipa_get_lattice (info, i);
+ struct ipcp_value *val;
+
+ if (lat->bottom || !lat->values)
+ continue;
+ for (val = lat->values; val; val = val->next)
+ add_val_to_toposort (val);
+ }
+}
+
+/* One pass of constants propagation along the call graph edges, from callers
+ to callees (requires topological ordering in TOPO), iterate over strongly
+ connected components. */
+
+static void
+propagate_constants_topo (struct topo_info *topo)
+{
+ int i;
+
+ for (i = topo->nnodes - 1; i >= 0; i--)
+ {
+ struct cgraph_node *v, *node = topo->order[i];
+ struct ipa_dfs_info *node_dfs_info;
+
+ if (!cgraph_function_with_gimple_body_p (node))
+ continue;
+
+ node_dfs_info = (struct ipa_dfs_info *) node->aux;
+ /* First, iteratively propagate within the strongly connected component
+ until all lattices stabilize. */
+ v = node_dfs_info->next_cycle;
+ while (v)
+ {
+ push_node_to_stack (topo, v);
+ v = ((struct ipa_dfs_info *) v->aux)->next_cycle;
+ }
+
+ v = node;
+ while (v)
+ {
+ struct cgraph_edge *cs;
+
+ for (cs = v->callees; cs; cs = cs->next_callee)
+ if (edge_within_scc (cs)
+ && propagate_constants_accross_call (cs))
+ push_node_to_stack (topo, cs->callee);
+ v = pop_node_from_stack (topo);
+ }
+
+ /* Afterwards, propagate along edges leading out of the SCC, calculates
+ the local effects of the discovered constants and all valid values to
+ their topological sort. */
+ v = node;
+ while (v)
+ {
+ struct cgraph_edge *cs;
+
+ estimate_local_effects (v);
+ add_all_node_vals_to_toposort (v);
+ for (cs = v->callees; cs; cs = cs->next_callee)
+ if (!edge_within_scc (cs))
+ propagate_constants_accross_call (cs);
+
+ v = ((struct ipa_dfs_info *) v->aux)->next_cycle;
+ }
+ }
+}
+
+/* Propagate the estimated effects of individual values along the topological
+ from the dependant values to those they depend on. */
+
+static void
+propagate_effects (void)
+{
+ struct ipcp_value *base;
+
+ for (base = values_topo; base; base = base->topo_next)
+ {
+ struct ipcp_value_source *src;
+ struct ipcp_value *val;
+ int time = 0, size = 0;
+
+ for (val = base; val; val = val->scc_next)
+ {
+ time += val->local_time_benefit + val->prop_time_benefit;
+ size += val->local_size_cost + val->prop_size_cost;
+ }
+
+ for (val = base; val; val = val->scc_next)
+ for (src = val->sources; src; src = src->next)
+ if (src->val
+ && cgraph_maybe_hot_edge_p (src->cs))
+ {
+ src->val->prop_time_benefit += time;
+ src->val->prop_size_cost += size;
+ }
+ }
+}
+
+
+/* Propagate constants, binfos and their effects from the summaries
+ interprocedurally. */
+
+static void
+ipcp_propagate_stage (struct topo_info *topo)
+{
+ struct cgraph_node *node;
+
+ if (dump_file)
+ fprintf (dump_file, "\n Propagating constants:\n\n");
+
+ if (in_lto_p)
+ ipa_update_after_lto_read ();
+
+
+ FOR_EACH_DEFINED_FUNCTION (node)
+ {
+ struct ipa_node_params *info = IPA_NODE_REF (node);
+
+ determine_versionability (node);
+ if (cgraph_function_with_gimple_body_p (node))
+ {
+ info->lattices = XCNEWVEC (struct ipcp_lattice,
+ ipa_get_param_count (info));
+ initialize_node_lattices (node);
+ }
+ if (node->count > max_count)
+ max_count = node->count;
+ overall_size += inline_summary (node)->self_size;
+ }
+
+ max_new_size = overall_size;
+ if (max_new_size < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
+ max_new_size = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
+ max_new_size += max_new_size * PARAM_VALUE (PARAM_IPCP_UNIT_GROWTH) / 100 + 1;
+
+ if (dump_file)
+ fprintf (dump_file, "\noverall_size: %li, max_new_size: %li\n",
+ overall_size, max_new_size);
+
+ propagate_constants_topo (topo);
+#ifdef ENABLE_CHECKING
+ ipcp_verify_propagated_values ();
+#endif
+ propagate_effects ();
+
+ if (dump_file)
+ {
+ fprintf (dump_file, "\nIPA lattices after all propagation:\n");
+ print_all_lattices (dump_file, (dump_flags & TDF_DETAILS), true);
+ }
+}
+
+/* Discover newly direct outgoing edges from NODE which is a new clone with
+ known KNOWN_VALS and make them direct. */
+
+static void
+ipcp_discover_new_direct_edges (struct cgraph_node *node,
+ VEC (tree, heap) *known_vals)
+{
+ struct cgraph_edge *ie, *next_ie;
+
+ for (ie = node->indirect_calls; ie; ie = next_ie)
+ {
+ tree target;
+
+ next_ie = ie->next_callee;
+ target = get_indirect_edge_target (ie, known_vals, NULL);
+ if (target)
+ ipa_make_edge_direct_to_target (ie, target);
+ }
+}
+
+/* Vector of pointers which for linked lists of clones of an original crgaph
+ edge. */
+
+static VEC (cgraph_edge_p, heap) *next_edge_clone;
+
+static inline void
+grow_next_edge_clone_vector (void)
+{
+ if (VEC_length (cgraph_edge_p, next_edge_clone)
+ <= (unsigned) cgraph_edge_max_uid)
+ VEC_safe_grow_cleared (cgraph_edge_p, heap, next_edge_clone,
+ cgraph_edge_max_uid + 1);
+}
+
+/* Edge duplication hook to grow the appropriate linked list in
+ next_edge_clone. */
+
+static void
+ipcp_edge_duplication_hook (struct cgraph_edge *src, struct cgraph_edge *dst,
+ __attribute__((unused)) void *data)
+{
+ grow_next_edge_clone_vector ();
+ VEC_replace (cgraph_edge_p, next_edge_clone, dst->uid,
+ VEC_index (cgraph_edge_p, next_edge_clone, src->uid));
+ VEC_replace (cgraph_edge_p, next_edge_clone, src->uid, dst);
+}
+
+/* Get the next clone in the linked list of clones of an edge. */
+
+static inline struct cgraph_edge *
+get_next_cgraph_edge_clone (struct cgraph_edge *cs)
+{
+ return VEC_index (cgraph_edge_p, next_edge_clone, cs->uid);
+}
+
+/* Return true if edge CS does bring about the value described by SRC. */
+
+static bool
+cgraph_edge_brings_value_p (struct cgraph_edge *cs,
+ struct ipcp_value_source *src)
+{
+ struct ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
+
+ if (IPA_NODE_REF (cs->callee)->ipcp_orig_node
+ || caller_info->node_dead)
+ return false;
+ if (!src->val)
+ return true;
+
+ if (caller_info->ipcp_orig_node)
+ {
+ tree t = VEC_index (tree, caller_info->known_vals, src->index);
+ return (t != NULL_TREE
+ && values_equal_for_ipcp_p (src->val->value, t));
+ }
+ else
+ {
+ struct ipcp_lattice *lat = ipa_get_lattice (caller_info, src->index);
+ if (ipa_lat_is_single_const (lat)
+ && values_equal_for_ipcp_p (src->val->value, lat->values->value))
+ return true;
+ else
+ return false;
+ }
+}
+
+/* Given VAL, iterate over all its sources and if they still hold, add their
+ edge frequency and their number into *FREQUENCY and *CALLER_COUNT
+ respectively. */
+
+static bool
+get_info_about_necessary_edges (struct ipcp_value *val, int *freq_sum,
+ gcov_type *count_sum, int *caller_count)
+{
+ struct ipcp_value_source *src;
+ int freq = 0, count = 0;
+ gcov_type cnt = 0;
+ bool hot = false;
+
+ for (src = val->sources; src; src = src->next)
+ {
+ struct cgraph_edge *cs = src->cs;
+ while (cs)
+ {
+ if (cgraph_edge_brings_value_p (cs, src))