OSDN Git Service

Do not FAIL in miscompiled runtime tests.
[pf3gnuchains/gcc-fork.git] / gcc / ipa-inline.c
index 662e13f..916c2a7 100644 (file)
@@ -207,6 +207,29 @@ cgraph_estimate_size_after_inlining (int times, struct cgraph_node *to,
   return size;
 }
 
+/* Scale frequency of NODE edges by FREQ_SCALE and increase loop nest
+   by NEST.  */
+
+static void
+update_noncloned_frequencies (struct cgraph_node *node,
+                             int freq_scale, int nest)
+{
+  struct cgraph_edge *e;
+
+  /* We do not want to ignore high loop nest after freq drops to 0.  */
+  if (!freq_scale)
+    freq_scale = 1;
+  for (e = node->callees; e; e = e->next_callee)
+    {
+      e->loop_nest += nest;
+      e->frequency = e->frequency * (gcov_type) freq_scale / CGRAPH_FREQ_BASE;
+      if (e->frequency > CGRAPH_FREQ_MAX)
+        e->frequency = CGRAPH_FREQ_MAX;
+      if (!e->inline_failed)
+        update_noncloned_frequencies (e->callee, freq_scale, nest);
+    }
+}
+
 /* E is expected to be an edge being inlined.  Clone destination node of
    the edge and redirect it to the new clone.
    DUPLICATE is used for bookkeeping on whether we are actually creating new
@@ -223,7 +246,11 @@ cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
       /* We may eliminate the need for out-of-line copy to be output.
         In that case just go ahead and re-use it.  */
       if (!e->callee->callers->next_caller
-         && !e->callee->needed
+         && cgraph_can_remove_if_no_direct_calls_p (e->callee)
+         /* Don't reuse if more than one function shares a comdat group.
+            If the other function(s) are needed, we need to emit even
+            this function out of line.  */
+         && !e->callee->same_comdat_group
          && !cgraph_new_nodes)
        {
          gcc_assert (!e->callee->global.inlined_to);
@@ -233,12 +260,14 @@ cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
              nfunctions_inlined++;
            }
          duplicate = false;
+         e->callee->local.externally_visible = false;
+          update_noncloned_frequencies (e->callee, e->frequency, e->loop_nest);
        }
       else
        {
          struct cgraph_node *n;
-         n = cgraph_clone_node (e->callee, e->count, e->frequency, e->loop_nest, 
-                                update_original);
+         n = cgraph_clone_node (e->callee, e->count, e->frequency, e->loop_nest,
+                                update_original, NULL);
          cgraph_redirect_edge_callee (e, n);
        }
     }
@@ -286,7 +315,8 @@ cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original,
   e->callee->global.inlined = true;
 
   if (e->callee->callers->next_caller
-      || e->callee->needed)
+      || !cgraph_can_remove_if_no_direct_calls_p (e->callee)
+      || e->callee->same_comdat_group)
     duplicate = true;
   cgraph_clone_inlined_nodes (e, true, update_original);
 
@@ -326,7 +356,7 @@ cgraph_mark_inline (struct cgraph_edge *edge)
   struct cgraph_node *what = edge->callee;
   struct cgraph_edge *e, *next;
 
-  gcc_assert (!gimple_call_cannot_inline_p (edge->call_stmt));
+  gcc_assert (!edge->call_stmt_cannot_inline_p);
   /* Look for all calls, mark them inline and clone recursively
      all inlined functions.  */
   for (e = what->callers; e; e = next)
@@ -368,7 +398,8 @@ cgraph_estimate_growth (struct cgraph_node *node)
      we decide to not inline for different reasons, but it is not big deal
      as in that case we will keep the body around, but we will also avoid
      some inlining.  */
-  if (!node->needed && !DECL_EXTERNAL (node->decl) && !self_recursive)
+  if (cgraph_only_called_directly_p (node)
+      && !DECL_EXTERNAL (node->decl) && !self_recursive)
     growth -= node->global.size;
 
   node->global.estimated_growth = growth;
@@ -376,7 +407,7 @@ cgraph_estimate_growth (struct cgraph_node *node)
 }
 
 /* Return false when inlining WHAT into TO is not good idea
-   as it would cause too large growth of function bodies.  
+   as it would cause too large growth of function bodies.
    When ONE_ONLY is true, assume that only one call site is going
    to be inlined, otherwise figure out how many call sites in
    TO calls WHAT and verify that all can be inlined.
@@ -545,7 +576,7 @@ cgraph_edge_badness (struct cgraph_edge *edge)
       badness = growth * 10000;
       div *= MIN (100 * inline_summary (edge->callee)->time_inlining_benefit
                  / (edge->callee->global.time + 1) + 1, 100);
-      
+
 
       /* Decrease badness if call is nested.  */
       /* Compress the range so we don't overflow.  */
@@ -569,7 +600,7 @@ cgraph_edge_badness (struct cgraph_edge *edge)
       badness = cgraph_estimate_growth (edge->callee) * 256;
 
       /* Decrease badness if call is nested.  */
-      if (badness > 0)    
+      if (badness > 0)
        badness >>= nest;
       else
         {
@@ -718,12 +749,13 @@ cgraph_decide_recursive_inlining (struct cgraph_node *node,
     }
 
   if (dump_file)
-    fprintf (dump_file, 
+    fprintf (dump_file,
             "  Performing recursive inlining on %s\n",
             cgraph_node_name (node));
 
   /* We need original clone to copy around.  */
-  master_clone = cgraph_clone_node (node, node->count, CGRAPH_FREQ_BASE, 1, false);
+  master_clone = cgraph_clone_node (node, node->count, CGRAPH_FREQ_BASE, 1,
+                                   false, NULL);
   master_clone->needed = true;
   for (e = master_clone->callees; e; e = e->next_callee)
     if (!e->inline_failed)
@@ -746,7 +778,7 @@ cgraph_decide_recursive_inlining (struct cgraph_node *node,
       if (depth > max_depth)
        {
           if (dump_file)
-           fprintf (dump_file, 
+           fprintf (dump_file,
                     "   maximal depth reached\n");
          continue;
        }
@@ -762,7 +794,7 @@ cgraph_decide_recursive_inlining (struct cgraph_node *node,
           if (curr->count * 100 / node->count < probability)
            {
              if (dump_file)
-               fprintf (dump_file, 
+               fprintf (dump_file,
                         "   Probability of edge is too small\n");
              continue;
            }
@@ -770,7 +802,7 @@ cgraph_decide_recursive_inlining (struct cgraph_node *node,
 
       if (dump_file)
        {
-         fprintf (dump_file, 
+         fprintf (dump_file,
                   "   Inlining call of depth %i", depth);
          if (node->count)
            {
@@ -789,7 +821,7 @@ cgraph_decide_recursive_inlining (struct cgraph_node *node,
 
   fibheap_delete (heap);
   if (dump_file)
-    fprintf (dump_file, 
+    fprintf (dump_file,
             "\n   Inlined %i times, body grown from size %i to %i, time %i to %i\n", n,
             master_clone->global.size, node->global.size,
             master_clone->global.time, node->global.time);
@@ -920,11 +952,11 @@ cgraph_decide_inlining_of_small_functions (void)
 
       if (dump_file)
        {
-         fprintf (dump_file, 
+         fprintf (dump_file,
                   "\nConsidering %s with %i size\n",
                   cgraph_node_name (edge->callee),
                   edge->callee->global.size);
-         fprintf (dump_file, 
+         fprintf (dump_file,
                   " to be inlined into %s in %s:%i\n"
                   " Estimated growth after inlined into all callees is %+i insns.\n"
                   " Estimated badness is %i, frequency %.2f.\n",
@@ -1030,7 +1062,7 @@ cgraph_decide_inlining_of_small_functions (void)
       else
        {
          struct cgraph_node *callee;
-         if (gimple_call_cannot_inline_p (edge->call_stmt)
+         if (edge->call_stmt_cannot_inline_p
              || !cgraph_check_inline_limits (edge->caller, edge->callee,
                                              &edge->inline_failed, true))
            {
@@ -1062,7 +1094,7 @@ cgraph_decide_inlining_of_small_functions (void)
 
       if (dump_file)
        {
-         fprintf (dump_file, 
+         fprintf (dump_file,
                   " Inlined into %s which now has size %i and self time %i,"
                   "net change of %+i.\n",
                   cgraph_node_name (edge->caller),
@@ -1111,6 +1143,8 @@ cgraph_decide_inlining (void)
   int initial_size = 0;
 
   cgraph_remove_function_insertion_hook (function_insertion_hook_holder);
+  if (in_lto_p && flag_indirect_inlining)
+    ipa_update_after_lto_read ();
 
   max_count = 0;
   max_benefit = 0;
@@ -1120,7 +1154,6 @@ cgraph_decide_inlining (void)
        struct cgraph_edge *e;
 
        gcc_assert (inline_summary (node)->self_size == node->global.size);
-       gcc_assert (node->needed || node->reachable);
        initial_size += node->global.size;
        for (e = node->callees; e; e = e->next_callee)
          if (max_count < e->count)
@@ -1128,7 +1161,9 @@ cgraph_decide_inlining (void)
        if (max_benefit < inline_summary (node)->time_inlining_benefit)
          max_benefit = inline_summary (node)->time_inlining_benefit;
       }
-  gcc_assert (!max_count || (profile_info && flag_branch_probabilities));
+  gcc_assert (in_lto_p
+             || !max_count
+             || (profile_info && flag_branch_probabilities));
   overall_size = initial_size;
 
   nnodes = cgraph_postorder (order);
@@ -1176,8 +1211,7 @@ cgraph_decide_inlining (void)
          for (e = node->callers; e; e = next)
            {
              next = e->next_caller;
-             if (!e->inline_failed
-                 || gimple_call_cannot_inline_p (e->call_stmt))
+             if (!e->inline_failed || e->call_stmt_cannot_inline_p)
                continue;
              if (cgraph_recursive_inlining_p (e->caller, e->callee,
                                               &e->inline_failed))
@@ -1199,7 +1233,7 @@ cgraph_decide_inlining (void)
            if (e->inline_failed)
              e->inline_failed = CIF_RECURSIVE_INLINING;
          if (dump_file)
-           fprintf (dump_file, 
+           fprintf (dump_file,
                     " Inlined for a net change of %+i size.\n",
                     overall_size - old_size);
        }
@@ -1219,15 +1253,16 @@ cgraph_decide_inlining (void)
 
          if (node->callers
              && !node->callers->next_caller
-             && !node->needed
+             && cgraph_only_called_directly_p (node)
              && node->local.inlinable
              && node->callers->inline_failed
              && node->callers->caller != node
              && node->callers->caller->global.inlined_to != node
-             && !gimple_call_cannot_inline_p (node->callers->call_stmt)
+             && !node->callers->call_stmt_cannot_inline_p
              && !DECL_EXTERNAL (node->decl)
              && !DECL_COMDAT (node->decl))
            {
+             cgraph_inline_failed_t reason;
              old_size = overall_size;
              if (dump_file)
                {
@@ -1241,7 +1276,7 @@ cgraph_decide_inlining (void)
                }
 
              if (cgraph_check_inline_limits (node->callers->caller, node,
-                                             NULL, false))
+                                             &reason, false))
                {
                  cgraph_mark_inline (node->callers);
                  if (dump_file)
@@ -1256,7 +1291,8 @@ cgraph_decide_inlining (void)
                {
                  if (dump_file)
                    fprintf (dump_file,
-                            " Inline limit reached, not inlined.\n");
+                            " Not inlining: %s.\n",
+                             cgraph_inline_failed_string (reason));
                }
            }
        }
@@ -1329,7 +1365,7 @@ try_inline (struct cgraph_edge *e, enum inlining_mode mode, int depth)
           return false;
        }
     }
-      
+
   callee->aux = (void *)(size_t) mode;
   if (dump_file)
     {
@@ -1344,7 +1380,7 @@ try_inline (struct cgraph_edge *e, enum inlining_mode mode, int depth)
 
       /* In order to fully inline always_inline functions, we need to
         recurse here, since the inlined functions might not be processed by
-        incremental inlining at all yet.  
+        incremental inlining at all yet.
 
         Also flattening needs to be done recursively.  */
 
@@ -1371,7 +1407,7 @@ leaf_node_p (struct cgraph_node *n)
 }
 
 /* Decide on the inlining.  We do so in the topological order to avoid
-   expenses on updating data structures.  
+   expenses on updating data structures.
    DEPTH is depth of recursion, used only for debug output.  */
 
 static bool
@@ -1410,7 +1446,7 @@ cgraph_decide_inlining_incrementally (struct cgraph_node *node,
        if (!e->callee->local.disregard_inline_limits
            && (mode != INLINE_ALL || !e->callee->local.inlinable))
          continue;
-       if (gimple_call_cannot_inline_p (e->call_stmt))
+       if (e->call_stmt_cannot_inline_p)
          continue;
        /* When the edge is already inlined, we just need to recurse into
           it in order to fully flatten the leaves.  */
@@ -1470,98 +1506,110 @@ cgraph_decide_inlining_incrementally (struct cgraph_node *node,
       }
 
   /* Now do the automatic inlining.  */
-  if (mode != INLINE_ALL && mode != INLINE_ALWAYS_INLINE)
-    for (e = node->callees; e; e = e->next_callee)
-      {
-        int allowed_growth = 0;
-       if (!e->callee->local.inlinable
-           || !e->inline_failed
-           || e->callee->local.disregard_inline_limits)
-         continue;
-       if (dump_file)
-         fprintf (dump_file, "Considering inline candidate %s.\n",
-                  cgraph_node_name (e->callee));
-       if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
-         {
-           if (dump_file)
-             {
-               indent_to (dump_file, depth);
-               fprintf (dump_file, "Not inlining: recursive call.\n");
-             }
+  if (mode != INLINE_ALL && mode != INLINE_ALWAYS_INLINE
+      /* Never inline regular functions into always-inline functions
+        during incremental inlining.  */
+      && !node->local.disregard_inline_limits)
+    {
+      bitmap visited = BITMAP_ALLOC (NULL);
+      for (e = node->callees; e; e = e->next_callee)
+       {
+         int allowed_growth = 0;
+         if (!e->callee->local.inlinable
+             || !e->inline_failed
+             || e->callee->local.disregard_inline_limits)
            continue;
-         }
-       if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
-           != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
-         {
-           if (dump_file)
-             {
-               indent_to (dump_file, depth);
-               fprintf (dump_file, "Not inlining: SSA form does not match.\n");
-             }
+         /* We are inlining a function to all call-sites in node
+            or to none.  So visit each candidate only once.  */
+         if (!bitmap_set_bit (visited, e->callee->uid))
            continue;
-         }
-
-       if (cgraph_maybe_hot_edge_p (e) && leaf_node_p (e->callee)
-           && optimize_function_for_speed_p (cfun))
-         allowed_growth = PARAM_VALUE (PARAM_EARLY_INLINING_INSNS);
+         if (dump_file)
+           fprintf (dump_file, "Considering inline candidate %s.\n",
+                    cgraph_node_name (e->callee));
+         if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
+           {
+             if (dump_file)
+               {
+                 indent_to (dump_file, depth);
+                 fprintf (dump_file, "Not inlining: recursive call.\n");
+               }
+             continue;
+           }
+         if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
+             != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
+           {
+             if (dump_file)
+               {
+                 indent_to (dump_file, depth);
+                 fprintf (dump_file,
+                          "Not inlining: SSA form does not match.\n");
+               }
+             continue;
+           }
 
-       /* When the function body would grow and inlining the function won't
-          eliminate the need for offline copy of the function, don't inline.
-        */
-       if (((mode == INLINE_SIZE || mode == INLINE_SIZE_NORECURSIVE)
-            || (!flag_inline_functions
-                && !DECL_DECLARED_INLINE_P (e->callee->decl)))
-           && (cgraph_estimate_size_after_inlining (1, e->caller, e->callee)
-               > e->caller->global.size + allowed_growth)
-           && cgraph_estimate_growth (e->callee) > allowed_growth)
-         {
-           if (dump_file)
-             {
-               indent_to (dump_file, depth);
-               fprintf (dump_file,
-                        "Not inlining: code size would grow by %i.\n",
-                        cgraph_estimate_size_after_inlining (1, e->caller,
-                                                             e->callee)
-                        - e->caller->global.size);
-             }
-           continue;
-         }
-       if (!cgraph_check_inline_limits (node, e->callee, &e->inline_failed,
-                                        false)
-           || gimple_call_cannot_inline_p (e->call_stmt))
-         {
-           if (dump_file)
-             {
-               indent_to (dump_file, depth);
-               fprintf (dump_file, "Not inlining: %s.\n",
-                        cgraph_inline_failed_string (e->inline_failed));
-             }
-           continue;
-         }
-       if (!e->callee->analyzed)
-         {
-           if (dump_file)
-             {
-               indent_to (dump_file, depth);
-               fprintf (dump_file,
-                        "Not inlining: Function body no longer available.\n");
-             }
-           continue;
-         }
-       if (!tree_can_inline_p (e))
-         {
-           if (dump_file)
-             {
-               indent_to (dump_file, depth);
-               fprintf (dump_file,
-                        "Not inlining: %s.",
-                         cgraph_inline_failed_string (e->inline_failed));
-             }
-           continue;
-         }
-       if (cgraph_default_inline_p (e->callee, &failed_reason))
-         inlined |= try_inline (e, mode, depth);
-      }
+         if (cgraph_maybe_hot_edge_p (e) && leaf_node_p (e->callee)
+             && optimize_function_for_speed_p (cfun))
+           allowed_growth = PARAM_VALUE (PARAM_EARLY_INLINING_INSNS);
+
+         /* When the function body would grow and inlining the function
+            won't eliminate the need for offline copy of the function,
+            don't inline.  */
+         if (((mode == INLINE_SIZE || mode == INLINE_SIZE_NORECURSIVE)
+              || (!flag_inline_functions
+                  && !DECL_DECLARED_INLINE_P (e->callee->decl)))
+             && (cgraph_estimate_size_after_inlining (1, e->caller, e->callee)
+                 > e->caller->global.size + allowed_growth)
+             && cgraph_estimate_growth (e->callee) > allowed_growth)
+           {
+             if (dump_file)
+               {
+                 indent_to (dump_file, depth);
+                 fprintf (dump_file,
+                          "Not inlining: code size would grow by %i.\n",
+                          cgraph_estimate_size_after_inlining (1, e->caller,
+                                                               e->callee)
+                          - e->caller->global.size);
+               }
+             continue;
+           }
+         if (!cgraph_check_inline_limits (node, e->callee, &e->inline_failed,
+                                          false)
+             || e->call_stmt_cannot_inline_p)
+           {
+             if (dump_file)
+               {
+                 indent_to (dump_file, depth);
+                 fprintf (dump_file, "Not inlining: %s.\n",
+                          cgraph_inline_failed_string (e->inline_failed));
+               }
+             continue;
+           }
+         if (!e->callee->analyzed)
+           {
+             if (dump_file)
+               {
+                 indent_to (dump_file, depth);
+                 fprintf (dump_file,
+                          "Not inlining: Function body no longer available.\n");
+               }
+             continue;
+           }
+         if (!tree_can_inline_p (e))
+           {
+             if (dump_file)
+               {
+                 indent_to (dump_file, depth);
+                 fprintf (dump_file,
+                          "Not inlining: %s.",
+                          cgraph_inline_failed_string (e->inline_failed));
+               }
+             continue;
+           }
+         if (cgraph_default_inline_p (e->callee, &failed_reason))
+           inlined |= try_inline (e, mode, depth);
+       }
+      BITMAP_FREE (visited);
+    }
   node->aux = (void *)(size_t) old_mode;
   return inlined;
 }
@@ -1584,10 +1632,10 @@ cgraph_early_inlining (void)
 
   if (sorrycount || errorcount)
     return 0;
-  while (cgraph_decide_inlining_incrementally (node,
-                                              iterations
-                                              ? INLINE_SIZE_NORECURSIVE : INLINE_SIZE, 0)
-        && iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS))
+  while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
+         && cgraph_decide_inlining_incrementally (node,
+                                                 iterations
+                                                 ? INLINE_SIZE_NORECURSIVE : INLINE_SIZE, 0))
     {
       timevar_push (TV_INTEGRATION);
       todo |= optimize_inline_calls (current_function_decl);
@@ -1607,7 +1655,7 @@ cgraph_gate_early_inlining (void)
   return flag_early_inlining;
 }
 
-struct gimple_opt_pass pass_early_inline = 
+struct gimple_opt_pass pass_early_inline =
 {
  {
   GIMPLE_PASS,
@@ -1631,13 +1679,14 @@ static bool
 cgraph_gate_ipa_early_inlining (void)
 {
   return (flag_early_inlining
+         && !in_lto_p
          && (flag_branch_probabilities || flag_test_coverage
              || profile_arc_flag));
 }
 
 /* IPA pass wrapper for early inlining pass.  We need to run early inlining
    before tree profiling so we have stand alone IPA pass for doing so.  */
-struct simple_ipa_opt_pass pass_ipa_early_inline = 
+struct simple_ipa_opt_pass pass_ipa_early_inline =
 {
  {
   SIMPLE_IPA_PASS,
@@ -1691,7 +1740,7 @@ likely_eliminated_by_inlining_p (gimple stmt)
            while (handled_component_p (inner_rhs)
                   || TREE_CODE (inner_rhs) == ADDR_EXPR || TREE_CODE (inner_rhs) == INDIRECT_REF)
              inner_rhs = TREE_OPERAND (inner_rhs, 0);
-               
+
 
            if (TREE_CODE (inner_rhs) == PARM_DECL
                || (TREE_CODE (inner_rhs) == SSA_NAME
@@ -1822,10 +1871,10 @@ compute_inline_parameters (struct cgraph_node *node)
   node->global.stack_frame_offset = 0;
 
   /* Can this function be inlined at all?  */
-  node->local.inlinable = tree_inlinable_function_p (current_function_decl);
+  node->local.inlinable = tree_inlinable_function_p (node->decl);
   if (node->local.inlinable && !node->local.disregard_inline_limits)
     node->local.disregard_inline_limits
-      = DECL_DISREGARD_INLINE_LIMITS (current_function_decl);
+      = DECL_DISREGARD_INLINE_LIMITS (node->decl);
   estimate_function_body_sizes (node);
   /* Inlining characteristics are maintained by the cgraph_mark_inline.  */
   node->global.time = inline_summary (node)->self_time;
@@ -1843,7 +1892,7 @@ compute_inline_parameters_for_current (void)
   return 0;
 }
 
-struct gimple_opt_pass pass_inline_parameters = 
+struct gimple_opt_pass pass_inline_parameters =
 {
  {
   GIMPLE_PASS,
@@ -1931,7 +1980,7 @@ inline_generate_summary (void)
   for (node = cgraph_nodes; node; node = node->next)
     if (node->analyzed)
       analyze_function (node);
-  
+
   return;
 }
 
@@ -1942,6 +1991,11 @@ inline_transform (struct cgraph_node *node)
   unsigned int todo = 0;
   struct cgraph_edge *e;
 
+  /* FIXME: Currently the passmanager is adding inline transform more than once to some
+     clones.  This needs revisiting after WPA cleanups.  */
+  if (cfun->after_inlining)
+    return 0;
+
   /* We might need the body of this function so that we can expand
      it inline somewhere else.  */
   if (cgraph_preserve_function_body_p (node->decl))
@@ -1962,6 +2016,34 @@ inline_transform (struct cgraph_node *node)
   return todo | execute_fixup_cfg ();
 }
 
+/* Read inline summary.  Jump functions are shared among ipa-cp
+   and inliner, so when ipa-cp is active, we don't need to write them
+   twice.  */
+
+static void
+inline_read_summary (void)
+{
+  if (flag_indirect_inlining)
+    {
+      ipa_register_cgraph_hooks ();
+      if (!flag_ipa_cp)
+        ipa_prop_read_jump_functions ();
+    }
+  function_insertion_hook_holder =
+      cgraph_add_function_insertion_hook (&add_new_function, NULL);
+}
+
+/* Write inline summary for node in SET.
+   Jump functions are shared among ipa-cp and inliner, so when ipa-cp is
+   active, we don't need to write them twice.  */
+
+static void
+inline_write_summary (cgraph_node_set set)
+{
+  if (flag_indirect_inlining && !flag_ipa_cp)
+    ipa_prop_write_jump_functions (set);
+}
+
 struct ipa_opt_pass_d pass_ipa_inline =
 {
  {
@@ -1981,9 +2063,10 @@ struct ipa_opt_pass_d pass_ipa_inline =
   | TODO_remove_functions              /* todo_flags_finish */
  },
  inline_generate_summary,              /* generate_summary */
NULL,                                 /* write_summary */
NULL,                                 /* read_summary */
inline_write_summary,                 /* write_summary */
inline_read_summary,                  /* read_summary */
  NULL,                                 /* function_read_summary */
+ lto_ipa_fixup_call_notes,             /* stmt_fixup */
  0,                                    /* TODOs */
  inline_transform,                     /* function_transform */
  NULL,                                 /* variable_transform */