+
+ lat->values_count++;
+ val = (struct ipcp_value *) pool_alloc (ipcp_values_pool);
+ memset (val, 0, sizeof (*val));
+
+ add_value_source (val, cs, src_val, src_idx);
+ val->value = newval;
+ val->next = lat->values;
+ lat->values = val;
+ return true;
+}
+
+/* Propagate values through a pass-through jump function JFUNC associated with
+ edge CS, taking values from SRC_LAT and putting them into DEST_LAT. SRC_IDX
+ is the index of the source parameter. */
+
+static bool
+propagate_vals_accross_pass_through (struct cgraph_edge *cs,
+ struct ipa_jump_func *jfunc,
+ struct ipcp_lattice *src_lat,
+ struct ipcp_lattice *dest_lat,
+ int src_idx)
+{
+ struct ipcp_value *src_val;
+ bool ret = false;
+
+ if (jfunc->value.pass_through.operation == NOP_EXPR)
+ for (src_val = src_lat->values; src_val; src_val = src_val->next)
+ ret |= add_value_to_lattice (dest_lat, src_val->value, cs,
+ src_val, src_idx);
+ /* Do not create new values when propagating within an SCC because if there
+ arithmetic functions with circular dependencies, there is infinite number
+ of them and we would just make lattices bottom. */
+ else if (edge_within_scc (cs))
+ ret = set_lattice_contains_variable (dest_lat);
+ else
+ for (src_val = src_lat->values; src_val; src_val = src_val->next)
+ {
+ tree cstval = src_val->value;
+
+ if (TREE_CODE (cstval) == TREE_BINFO)
+ {
+ ret |= set_lattice_contains_variable (dest_lat);
+ continue;
+ }
+ cstval = ipa_get_jf_pass_through_result (jfunc, cstval);
+
+ if (cstval)
+ ret |= add_value_to_lattice (dest_lat, cstval, cs, src_val, src_idx);
+ else
+ ret |= set_lattice_contains_variable (dest_lat);
+ }
+
+ return ret;
+}
+
+/* Propagate values through an ancestor jump function JFUNC associated with
+ edge CS, taking values from SRC_LAT and putting them into DEST_LAT. SRC_IDX
+ is the index of the source parameter. */
+
+static bool
+propagate_vals_accross_ancestor (struct cgraph_edge *cs,
+ struct ipa_jump_func *jfunc,
+ struct ipcp_lattice *src_lat,
+ struct ipcp_lattice *dest_lat,
+ int src_idx)
+{
+ struct ipcp_value *src_val;
+ bool ret = false;
+
+ if (edge_within_scc (cs))
+ return set_lattice_contains_variable (dest_lat);
+
+ for (src_val = src_lat->values; src_val; src_val = src_val->next)
+ {
+ tree t = src_val->value;
+
+ if (TREE_CODE (t) == TREE_BINFO)
+ t = get_binfo_at_offset (t, jfunc->value.ancestor.offset,
+ jfunc->value.ancestor.type);
+ else
+ t = ipa_get_jf_ancestor_result (jfunc, t);
+
+ if (t)
+ ret |= add_value_to_lattice (dest_lat, t, cs, src_val, src_idx);
+ else
+ ret |= set_lattice_contains_variable (dest_lat);
+ }
+
+ return ret;
+}
+
+/* Propagate values across jump function JFUNC that is associated with edge CS
+ and put the values into DEST_LAT. */
+
+static bool
+propagate_accross_jump_function (struct cgraph_edge *cs,
+ struct ipa_jump_func *jfunc,
+ struct ipcp_lattice *dest_lat)
+{
+ if (dest_lat->bottom)
+ return false;
+
+ if (jfunc->type == IPA_JF_CONST
+ || jfunc->type == IPA_JF_KNOWN_TYPE)
+ {
+ tree val;
+
+ if (jfunc->type == IPA_JF_KNOWN_TYPE)
+ {
+ val = ipa_value_from_known_type_jfunc (jfunc);
+ if (!val)
+ return set_lattice_contains_variable (dest_lat);
+ }
+ else
+ val = jfunc->value.constant;
+ return add_value_to_lattice (dest_lat, val, cs, NULL, 0);
+ }
+ else if (jfunc->type == IPA_JF_PASS_THROUGH
+ || jfunc->type == IPA_JF_ANCESTOR)
+ {
+ struct ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
+ struct ipcp_lattice *src_lat;
+ int src_idx;
+ bool ret;
+
+ if (jfunc->type == IPA_JF_PASS_THROUGH)
+ src_idx = jfunc->value.pass_through.formal_id;
+ else
+ src_idx = jfunc->value.ancestor.formal_id;
+
+ src_lat = ipa_get_lattice (caller_info, src_idx);
+ if (src_lat->bottom)
+ return set_lattice_contains_variable (dest_lat);
+
+ /* If we would need to clone the caller and cannot, do not propagate. */
+ if (!ipcp_versionable_function_p (cs->caller)
+ && (src_lat->contains_variable
+ || (src_lat->values_count > 1)))
+ return set_lattice_contains_variable (dest_lat);
+
+ if (jfunc->type == IPA_JF_PASS_THROUGH)
+ ret = propagate_vals_accross_pass_through (cs, jfunc, src_lat,
+ dest_lat, src_idx);
+ else
+ ret = propagate_vals_accross_ancestor (cs, jfunc, src_lat, dest_lat,
+ src_idx);
+
+ if (src_lat->contains_variable)
+ ret |= set_lattice_contains_variable (dest_lat);
+
+ return ret;
+ }
+
+ /* TODO: We currently do not handle member method pointers in IPA-CP (we only
+ use it for indirect inlining), we should propagate them too. */
+ return set_lattice_contains_variable (dest_lat);
+}
+
+/* Propagate constants from the caller to the callee of CS. INFO describes the
+ caller. */
+
+static bool
+propagate_constants_accross_call (struct cgraph_edge *cs)
+{
+ struct ipa_node_params *callee_info;
+ enum availability availability;
+ struct cgraph_node *callee, *alias_or_thunk;
+ struct ipa_edge_args *args;
+ bool ret = false;
+ int i, args_count, parms_count;
+
+ callee = cgraph_function_node (cs->callee, &availability);
+ if (!callee->analyzed)
+ return false;
+ gcc_checking_assert (cgraph_function_with_gimple_body_p (callee));
+ callee_info = IPA_NODE_REF (callee);
+
+ args = IPA_EDGE_REF (cs);
+ args_count = ipa_get_cs_argument_count (args);
+ parms_count = ipa_get_param_count (callee_info);
+
+ /* If this call goes through a thunk we must not propagate to the first (0th)
+ parameter. However, we might need to uncover a thunk from below a series
+ of aliases first. */
+ alias_or_thunk = cs->callee;
+ while (alias_or_thunk->alias)
+ alias_or_thunk = cgraph_alias_aliased_node (alias_or_thunk);
+ if (alias_or_thunk->thunk.thunk_p)
+ {
+ ret |= set_lattice_contains_variable (ipa_get_lattice (callee_info, 0));
+ i = 1;
+ }
+ else
+ i = 0;
+
+ for (; (i < args_count) && (i < parms_count); i++)
+ {
+ struct ipa_jump_func *jump_func = ipa_get_ith_jump_func (args, i);
+ struct ipcp_lattice *dest_lat = ipa_get_lattice (callee_info, i);
+
+ if (availability == AVAIL_OVERWRITABLE)
+ ret |= set_lattice_contains_variable (dest_lat);
+ else
+ ret |= propagate_accross_jump_function (cs, jump_func, dest_lat);
+ }
+ for (; i < parms_count; i++)
+ ret |= set_lattice_contains_variable (ipa_get_lattice (callee_info, i));
+
+ return ret;
+}
+
+/* If an indirect edge IE can be turned into a direct one based on KNOWN_VALS
+ (which can contain both constants and binfos) or KNOWN_BINFOS (which can be
+ NULL) return the destination. */
+
+tree
+ipa_get_indirect_edge_target (struct cgraph_edge *ie,
+ VEC (tree, heap) *known_vals,
+ VEC (tree, heap) *known_binfos)
+{
+ int param_index = ie->indirect_info->param_index;
+ HOST_WIDE_INT token, anc_offset;
+ tree otr_type;
+ tree t;
+
+ if (param_index == -1)
+ return NULL_TREE;
+
+ if (!ie->indirect_info->polymorphic)
+ {
+ tree t = (VEC_length (tree, known_vals) > (unsigned int) param_index
+ ? VEC_index (tree, known_vals, param_index) : NULL);
+ if (t &&
+ TREE_CODE (t) == ADDR_EXPR
+ && TREE_CODE (TREE_OPERAND (t, 0)) == FUNCTION_DECL)
+ return TREE_OPERAND (t, 0);
+ else
+ return NULL_TREE;
+ }
+
+ token = ie->indirect_info->otr_token;
+ anc_offset = ie->indirect_info->anc_offset;
+ otr_type = ie->indirect_info->otr_type;
+
+ t = VEC_index (tree, known_vals, param_index);
+ if (!t && known_binfos
+ && VEC_length (tree, known_binfos) > (unsigned int) param_index)
+ t = VEC_index (tree, known_binfos, param_index);
+ if (!t)
+ return NULL_TREE;
+
+ if (TREE_CODE (t) != TREE_BINFO)
+ {
+ tree binfo;
+ binfo = gimple_extract_devirt_binfo_from_cst (t);
+ if (!binfo)
+ return NULL_TREE;
+ binfo = get_binfo_at_offset (binfo, anc_offset, otr_type);
+ if (!binfo)
+ return NULL_TREE;
+ return gimple_get_virt_method_for_binfo (token, binfo);
+ }
+ else
+ {
+ tree binfo;
+
+ binfo = get_binfo_at_offset (t, anc_offset, otr_type);
+ if (!binfo)
+ return NULL_TREE;
+ return gimple_get_virt_method_for_binfo (token, binfo);
+ }
+}
+
+/* Calculate devirtualization time bonus for NODE, assuming we know KNOWN_CSTS
+ and KNOWN_BINFOS. */
+
+static int
+devirtualization_time_bonus (struct cgraph_node *node,
+ VEC (tree, heap) *known_csts,
+ VEC (tree, heap) *known_binfos)
+{
+ struct cgraph_edge *ie;
+ int res = 0;
+
+ for (ie = node->indirect_calls; ie; ie = ie->next_callee)
+ {
+ struct cgraph_node *callee;
+ struct inline_summary *isummary;
+ tree target;
+
+ target = ipa_get_indirect_edge_target (ie, known_csts, known_binfos);
+ if (!target)
+ continue;
+
+ /* Only bare minimum benefit for clearly un-inlineable targets. */
+ res += 1;
+ callee = cgraph_get_node (target);
+ if (!callee || !callee->analyzed)
+ continue;
+ isummary = inline_summary (callee);
+ if (!isummary->inlinable)
+ continue;
+
+ /* FIXME: The values below need re-considering and perhaps also
+ integrating into the cost metrics, at lest in some very basic way. */
+ if (isummary->size <= MAX_INLINE_INSNS_AUTO / 4)
+ res += 31;
+ else if (isummary->size <= MAX_INLINE_INSNS_AUTO / 2)
+ res += 15;
+ else if (isummary->size <= MAX_INLINE_INSNS_AUTO
+ || DECL_DECLARED_INLINE_P (callee->decl))
+ res += 7;
+ }
+