OSDN Git Service

2009-05-12 Paolo Bonzini <bonzini@gnu.org>
[pf3gnuchains/gcc-fork.git] / gcc / ipa-inline.c
index 4c8096a..99640bf 100644 (file)
@@ -1,5 +1,5 @@
 /* Inlining decision heuristics.
-   Copyright (C) 2003, 2004, 2007, 2008 Free Software Foundation, Inc.
+   Copyright (C) 2003, 2004, 2007, 2008, 2009 Free Software Foundation, Inc.
    Contributed by Jan Hubicka
 
 This file is part of GCC.
@@ -212,7 +212,7 @@ cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
          && !cgraph_new_nodes)
        {
          gcc_assert (!e->callee->global.inlined_to);
-         if (gimple_body (e->callee->decl))
+         if (e->callee->analyzed)
            overall_insns -= e->callee->global.insns, nfunctions_inlined++;
          duplicate = false;
        }
@@ -243,21 +243,25 @@ cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
       cgraph_clone_inlined_nodes (e, duplicate, update_original);
 }
 
-/* Mark edge E as inlined and update callgraph accordingly. 
-   UPDATE_ORIGINAL specify whether profile of original function should be
-   updated. */
+/* Mark edge E as inlined and update callgraph accordingly.  UPDATE_ORIGINAL
+   specify whether profile of original function should be updated.  If any new
+   indirect edges are discovered in the process, add them to NEW_EDGES, unless
+   it is NULL.  Return true iff any new callgraph edges were discovered as a
+   result of inlining.  */
 
-void
-cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original)
+static bool
+cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original,
+                        VEC (cgraph_edge_p, heap) **new_edges)
 {
   int old_insns = 0, new_insns = 0;
   struct cgraph_node *to = NULL, *what;
+  struct cgraph_edge *curr = e;
 
   if (e->callee->inline_decl)
     cgraph_redirect_edge_callee (e, cgraph_node (e->callee->inline_decl));
 
   gcc_assert (e->inline_failed);
-  e->inline_failed = NULL;
+  e->inline_failed = CIF_OK;
 
   if (!e->callee->global.inlined)
     DECL_POSSIBLY_INLINED (e->callee->decl) = true;
@@ -281,6 +285,11 @@ cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original)
   if (new_insns > old_insns)
     overall_insns += new_insns - old_insns;
   ncalls_inlined++;
+
+  if (flag_indirect_inlining)
+    return ipa_propagate_indirect_call_infos (curr, new_edges);
+  else
+    return false;
 }
 
 /* Mark all calls of EDGE->CALLEE inlined into EDGE->CALLER.
@@ -302,7 +311,7 @@ cgraph_mark_inline (struct cgraph_edge *edge)
       next = e->next_caller;
       if (e->caller == to && e->inline_failed)
        {
-          cgraph_mark_inline_edge (e, true);
+          cgraph_mark_inline_edge (e, true, NULL);
          if (e == edge)
            edge = next;
        }
@@ -352,7 +361,7 @@ cgraph_estimate_growth (struct cgraph_node *node)
 
 static bool
 cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what,
-                           const char **reason, bool one_only)
+                           cgraph_inline_failed_t *reason, bool one_only)
 {
   int times = 0;
   struct cgraph_edge *e;
@@ -387,7 +396,7 @@ cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what,
       && newsize > limit)
     {
       if (reason)
-        *reason = N_("--param large-function-growth limit reached");
+        *reason = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
       return false;
     }
 
@@ -402,7 +411,7 @@ cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what,
       && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
     {
       if (reason)
-        *reason = N_("--param large-stack-frame-growth limit reached");
+        *reason = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
       return false;
     }
   return true;
@@ -410,8 +419,8 @@ cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what,
 
 /* Return true when function N is small enough to be inlined.  */
 
-bool
-cgraph_default_inline_p (struct cgraph_node *n, const char **reason)
+static bool
+cgraph_default_inline_p (struct cgraph_node *n, cgraph_inline_failed_t *reason)
 {
   tree decl = n->decl;
 
@@ -420,14 +429,14 @@ cgraph_default_inline_p (struct cgraph_node *n, const char **reason)
   if (!flag_inline_small_functions && !DECL_DECLARED_INLINE_P (decl))
     {
       if (reason)
-       *reason = N_("function not inline candidate");
+       *reason = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
       return false;
     }
 
-  if (!DECL_STRUCT_FUNCTION (decl)->cfg)
+  if (!n->analyzed)
     {
       if (reason)
-       *reason = N_("function body not available");
+       *reason = CIF_BODY_NOT_AVAILABLE;
       return false;
     }
 
@@ -436,7 +445,7 @@ cgraph_default_inline_p (struct cgraph_node *n, const char **reason)
       if (n->global.insns >= MAX_INLINE_INSNS_SINGLE)
        {
          if (reason)
-           *reason = N_("--param max-inline-insns-single limit reached");
+           *reason = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
          return false;
        }
     }
@@ -445,7 +454,7 @@ cgraph_default_inline_p (struct cgraph_node *n, const char **reason)
       if (n->global.insns >= MAX_INLINE_INSNS_AUTO)
        {
          if (reason)
-           *reason = N_("--param max-inline-insns-auto limit reached");
+           *reason = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
          return false;
        }
     }
@@ -460,7 +469,7 @@ cgraph_default_inline_p (struct cgraph_node *n, const char **reason)
 static bool
 cgraph_recursive_inlining_p (struct cgraph_node *to,
                             struct cgraph_node *what,
-                            const char **reason)
+                            cgraph_inline_failed_t *reason)
 {
   bool recursive;
   if (to->global.inlined_to)
@@ -471,7 +480,7 @@ cgraph_recursive_inlining_p (struct cgraph_node *to,
      not warn on it.  */
   if (recursive && reason)
     *reason = (what->local.disregard_inline_limits
-              ? N_("recursive inlining") : "");
+              ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
   return recursive;
 }
 
@@ -557,7 +566,7 @@ update_caller_keys (fibheap_t heap, struct cgraph_node *node,
                    bitmap updated_nodes)
 {
   struct cgraph_edge *edge;
-  const char *failed_reason;
+  cgraph_inline_failed_t failed_reason;
 
   if (!node->local.inlinable || node->local.disregard_inline_limits
       || node->global.inlined_to)
@@ -646,11 +655,12 @@ lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
 
 /* Decide on recursive inlining: in the case function has recursive calls,
    inline until body size reaches given argument.  If any new indirect edges
-   are discovered in the process, add them to NEW_EDGES, unless it is NULL.  */
+   are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
+   is NULL.  */
 
 static bool
 cgraph_decide_recursive_inlining (struct cgraph_node *node,
-                                 VEC (cgraph_edge_p, heap) *new_edges)
+                                 VEC (cgraph_edge_p, heap) **new_edges)
 {
   int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
   int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
@@ -746,9 +756,7 @@ cgraph_decide_recursive_inlining (struct cgraph_node *node,
          fprintf (dump_file, "\n");
        }
       cgraph_redirect_edge_callee (curr, master_clone);
-      cgraph_mark_inline_edge (curr, false);
-      if (flag_indirect_inlining)
-       ipa_propagate_indirect_call_infos (curr, new_edges);
+      cgraph_mark_inline_edge (curr, false, new_edges);
       lookup_recursive_calls (node, curr->callee, heap);
       n++;
     }
@@ -782,12 +790,14 @@ cgraph_decide_recursive_inlining (struct cgraph_node *node,
 /* Set inline_failed for all callers of given function to REASON.  */
 
 static void
-cgraph_set_inline_failed (struct cgraph_node *node, const char *reason)
+cgraph_set_inline_failed (struct cgraph_node *node,
+                         cgraph_inline_failed_t reason)
 {
   struct cgraph_edge *e;
 
   if (dump_file)
-    fprintf (dump_file, "Inlining failed: %s\n", reason);
+    fprintf (dump_file, "Inlining failed: %s\n",
+            cgraph_inline_failed_string (reason));
   for (e = node->callers; e; e = e->next_caller)
     if (e->inline_failed)
       e->inline_failed = reason;
@@ -832,7 +842,7 @@ cgraph_decide_inlining_of_small_functions (void)
 {
   struct cgraph_node *node;
   struct cgraph_edge *edge;
-  const char *failed_reason;
+  cgraph_inline_failed_t failed_reason;
   fibheap_t heap = fibheap_new ();
   bitmap updated_nodes = BITMAP_ALLOC (NULL);
   int min_insns, max_insns;
@@ -879,7 +889,7 @@ cgraph_decide_inlining_of_small_functions (void)
       struct cgraph_node *where;
       int growth =
        cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
-      const char *not_good = NULL;
+      cgraph_inline_failed_t not_good = CIF_OK;
 
       growth -= edge->caller->global.insns;
 
@@ -890,10 +900,12 @@ cgraph_decide_inlining_of_small_functions (void)
                   cgraph_node_name (edge->callee),
                   edge->callee->global.insns);
          fprintf (dump_file, 
-                  " to be inlined into %s\n"
+                  " to be inlined into %s in %s:%i\n"
                   " Estimated growth after inlined into all callees is %+i insns.\n"
                   " Estimated badness is %i, frequency %.2f.\n",
                   cgraph_node_name (edge->caller),
+                  gimple_filename ((const_gimple) edge->call_stmt),
+                  gimple_lineno ((const_gimple) edge->call_stmt),
                   cgraph_estimate_growth (edge->callee),
                   cgraph_edge_badness (edge),
                   edge->frequency / (double)CGRAPH_FREQ_BASE);
@@ -931,7 +943,8 @@ cgraph_decide_inlining_of_small_functions (void)
          if (where->global.inlined_to)
            {
              edge->inline_failed
-               = (edge->callee->local.disregard_inline_limits ? N_("recursive inlining") : "");
+               = (edge->callee->local.disregard_inline_limits
+                  ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
              if (dump_file)
                fprintf (dump_file, " inline_failed:Recursive inlining performed only for function itself.\n");
              continue;
@@ -939,12 +952,12 @@ cgraph_decide_inlining_of_small_functions (void)
        }
 
       if (!cgraph_maybe_hot_edge_p (edge))
-       not_good = N_("call is unlikely and code size would grow");
+       not_good = CIF_UNLIKELY_CALL;
       if (!flag_inline_functions
          && !DECL_DECLARED_INLINE_P (edge->callee->decl))
-       not_good = N_("function not declared inline and code size would grow");
+       not_good = CIF_NOT_DECLARED_INLINED;
       if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION(edge->caller->decl)))
-       not_good = N_("optimizing for size and code size would grow");
+       not_good = CIF_OPTIMIZING_FOR_SIZE;
       if (not_good && growth > 0 && cgraph_estimate_growth (edge->callee) > 0)
        {
           if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
@@ -952,7 +965,8 @@ cgraph_decide_inlining_of_small_functions (void)
            {
              edge->inline_failed = not_good;
              if (dump_file)
-               fprintf (dump_file, " inline_failed:%s.\n", edge->inline_failed);
+               fprintf (dump_file, " inline_failed:%s.\n",
+                        cgraph_inline_failed_string (edge->inline_failed));
            }
          continue;
        }
@@ -962,16 +976,18 @@ cgraph_decide_inlining_of_small_functions (void)
                                            &edge->inline_failed))
            {
              if (dump_file)
-               fprintf (dump_file, " inline_failed:%s.\n", edge->inline_failed);
+               fprintf (dump_file, " inline_failed:%s.\n",
+                        cgraph_inline_failed_string (edge->inline_failed));
            }
          continue;
        }
       if (!tree_can_inline_p (edge->caller->decl, edge->callee->decl))
        {
          gimple_call_set_cannot_inline (edge->call_stmt, true);
-         edge->inline_failed = N_("target specific option mismatch");
+         edge->inline_failed = CIF_TARGET_OPTION_MISMATCH;
          if (dump_file)
-           fprintf (dump_file, " inline_failed:%s.\n", edge->inline_failed);
+           fprintf (dump_file, " inline_failed:%s.\n",
+                    cgraph_inline_failed_string (edge->inline_failed));
          continue;
        }
       if (cgraph_recursive_inlining_p (edge->caller, edge->callee,
@@ -980,7 +996,9 @@ cgraph_decide_inlining_of_small_functions (void)
          where = edge->caller;
          if (where->global.inlined_to)
            where = where->global.inlined_to;
-         if (!cgraph_decide_recursive_inlining (where, new_indirect_edges))
+         if (!cgraph_decide_recursive_inlining (where,
+                                                flag_indirect_inlining
+                                                ? &new_indirect_edges : NULL))
            continue;
          if (flag_indirect_inlining)
            add_new_edges_to_heap (heap, new_indirect_edges);
@@ -995,16 +1013,15 @@ cgraph_decide_inlining_of_small_functions (void)
            {
              if (dump_file)
                fprintf (dump_file, " Not inlining into %s:%s.\n",
-                        cgraph_node_name (edge->caller), edge->inline_failed);
+                        cgraph_node_name (edge->caller),
+                        cgraph_inline_failed_string (edge->inline_failed));
              continue;
            }
          callee = edge->callee;
-         cgraph_mark_inline_edge (edge, true);
+         cgraph_mark_inline_edge (edge, true, &new_indirect_edges);
          if (flag_indirect_inlining)
-           {
-             ipa_propagate_indirect_call_infos (edge, new_indirect_edges);
-             add_new_edges_to_heap (heap, new_indirect_edges);
-           }
+           add_new_edges_to_heap (heap, new_indirect_edges);
+
          update_callee_keys (heap, callee, updated_nodes);
        }
       where = edge->caller;
@@ -1045,7 +1062,7 @@ cgraph_decide_inlining_of_small_functions (void)
       if (!edge->callee->local.disregard_inline_limits && edge->inline_failed
           && !cgraph_recursive_inlining_p (edge->caller, edge->callee,
                                           &edge->inline_failed))
-       edge->inline_failed = N_("--param inline-unit-growth limit reached");
+       edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
     }
 
   if (new_indirect_edges)
@@ -1067,6 +1084,7 @@ cgraph_decide_inlining (void)
   int old_insns = 0;
   int i;
   int initial_insns = 0;
+  bool redo_always_inline = true;
 
   cgraph_remove_function_insertion_hook (function_insertion_hook_holder);
 
@@ -1100,69 +1118,70 @@ cgraph_decide_inlining (void)
 
   /* In the first pass mark all always_inline edges.  Do this with a priority
      so none of our later choices will make this impossible.  */
-  for (i = nnodes - 1; i >= 0; i--)
+  while (redo_always_inline)
     {
-      struct cgraph_edge *e, *next;
+      redo_always_inline = false;
+      for (i = nnodes - 1; i >= 0; i--)
+       {
+         struct cgraph_edge *e, *next;
 
-      node = order[i];
+         node = order[i];
 
-      /* Handle nodes to be flattened, but don't update overall unit size.  */
-      if (lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
-        {
-         if (dump_file)
-           fprintf (dump_file,
-                    "Flattening %s\n", cgraph_node_name (node));
-         cgraph_decide_inlining_incrementally (node, INLINE_ALL, 0);
-        }
+         /* Handle nodes to be flattened, but don't update overall unit
+            size.  */
+         if (lookup_attribute ("flatten",
+                               DECL_ATTRIBUTES (node->decl)) != NULL)
+           {
+             if (dump_file)
+               fprintf (dump_file,
+                        "Flattening %s\n", cgraph_node_name (node));
+             cgraph_decide_inlining_incrementally (node, INLINE_ALL, 0);
+           }
 
-      if (!node->local.disregard_inline_limits)
-       continue;
-      if (dump_file)
-       fprintf (dump_file,
-                "\nConsidering %s %i insns (always inline)\n",
-                cgraph_node_name (node), node->global.insns);
-      old_insns = overall_insns;
-      for (e = node->callers; e; e = next)
-       {
-         next = e->next_caller;
-         if (!e->inline_failed || gimple_call_cannot_inline_p (e->call_stmt))
+         if (!node->local.disregard_inline_limits)
            continue;
-         if (cgraph_recursive_inlining_p (e->caller, e->callee,
-                                          &e->inline_failed))
-           continue;
-         if (!tree_can_inline_p (e->caller->decl, e->callee->decl))
+         if (dump_file)
+           fprintf (dump_file,
+                    "\nConsidering %s %i insns (always inline)\n",
+                    cgraph_node_name (node), node->global.insns);
+         old_insns = overall_insns;
+         for (e = node->callers; e; e = next)
            {
-             gimple_call_set_cannot_inline (e->call_stmt, true);
-             continue;
+             next = e->next_caller;
+             if (!e->inline_failed
+                 || gimple_call_cannot_inline_p (e->call_stmt))
+               continue;
+             if (cgraph_recursive_inlining_p (e->caller, e->callee,
+                                              &e->inline_failed))
+               continue;
+             if (!tree_can_inline_p (e->caller->decl, e->callee->decl))
+               {
+                 gimple_call_set_cannot_inline (e->call_stmt, true);
+                 continue;
+               }
+             if (cgraph_mark_inline_edge (e, true, NULL))
+               redo_always_inline = true;
+             if (dump_file)
+               fprintf (dump_file,
+                        " Inlined into %s which now has %i insns.\n",
+                        cgraph_node_name (e->caller),
+                        e->caller->global.insns);
            }
-         cgraph_mark_inline_edge (e, true);
-         if (flag_indirect_inlining)
-           ipa_propagate_indirect_call_infos (e, NULL);
+         /* Inlining self recursive function might introduce new calls to
+            themselves we didn't see in the loop above.  Fill in the proper
+            reason why inline failed.  */
+         for (e = node->callers; e; e = e->next_caller)
+           if (e->inline_failed)
+             e->inline_failed = CIF_RECURSIVE_INLINING;
          if (dump_file)
            fprintf (dump_file, 
-                    " Inlined into %s which now has %i insns.\n",
-                    cgraph_node_name (e->caller),
-                    e->caller->global.insns);
+                    " Inlined for a net change of %+i insns.\n",
+                    overall_insns - old_insns);
        }
-      /* Inlining self recursive function might introduce new calls to
-        themselves we didn't see in the loop above.  Fill in the proper
-        reason why inline failed.  */
-      for (e = node->callers; e; e = e->next_caller)
-       if (e->inline_failed)
-         e->inline_failed = N_("recursive inlining");
-      if (dump_file)
-       fprintf (dump_file, 
-                " Inlined for a net change of %+i insns.\n",
-                overall_insns - old_insns);
     }
 
   cgraph_decide_inlining_of_small_functions ();
 
-  /* After this point, any edge discovery performed by indirect inlining is no
-     good so let's give up. */
-  if (flag_indirect_inlining)
-    free_all_ipa_structures_after_iinln ();
-
   if (flag_inline_functions_called_once)
     {
       if (dump_file)
@@ -1217,6 +1236,10 @@ cgraph_decide_inlining (void)
        }
     }
 
+  /* Free ipa-prop structures if they are no longer needed.  */
+  if (flag_indirect_inlining)
+    free_all_ipa_structures_after_iinln ();
+
   if (dump_file)
     fprintf (dump_file,
             "\nInlined %i calls, eliminated %i functions, "
@@ -1275,7 +1298,7 @@ try_inline (struct cgraph_edge *e, enum inlining_mode mode, int depth)
                       cgraph_node_name (e->caller));
            }
          e->inline_failed = (e->callee->local.disregard_inline_limits
-                             ? N_("recursive inlining") : "");
+                             ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
           return false;
        }
     }
@@ -1289,16 +1312,18 @@ try_inline (struct cgraph_edge *e, enum inlining_mode mode, int depth)
               cgraph_node_name (e->caller));
     }
   if (e->inline_failed)
-    cgraph_mark_inline (e);
+    {
+      cgraph_mark_inline (e);
 
-  /* In order to fully inline always_inline functions, we need to
-     recurse here, since the inlined functions might not be processed by
-     incremental inlining at all yet.  
+      /* In order to fully inline always_inline functions, we need to
+        recurse here, since the inlined functions might not be processed by
+        incremental inlining at all yet.  
 
-     Also flattening needs to be done recursively.  */
+        Also flattening needs to be done recursively.  */
 
-  if (mode == INLINE_ALL || always_inline)
-    cgraph_decide_inlining_incrementally (e->callee, mode, depth + 1);
+      if (mode == INLINE_ALL || always_inline)
+       cgraph_decide_inlining_incrementally (e->callee, mode, depth + 1);
+    }
   callee->aux = (void *)(size_t) callee_mode;
   return true;
 }
@@ -1314,7 +1339,7 @@ cgraph_decide_inlining_incrementally (struct cgraph_node *node,
 {
   struct cgraph_edge *e;
   bool inlined = false;
-  const char *failed_reason;
+  cgraph_inline_failed_t failed_reason;
   enum inlining_mode old_mode;
 
 #ifdef ENABLE_CHECKING
@@ -1388,7 +1413,7 @@ cgraph_decide_inlining_incrementally (struct cgraph_node *node,
            }
          continue;
        }
-      if (!gimple_body (e->callee->decl) && !e->callee->inline_decl)
+      if (!e->callee->analyzed && !e->callee->inline_decl)
        {
          if (dump_file)
            {
@@ -1459,11 +1484,12 @@ cgraph_decide_inlining_incrementally (struct cgraph_node *node,
            if (dump_file)
              {
                indent_to (dump_file, depth);
-               fprintf (dump_file, "Not inlining: %s.\n", e->inline_failed);
+               fprintf (dump_file, "Not inlining: %s.\n",
+                        cgraph_inline_failed_string (e->inline_failed));
              }
            continue;
          }
-       if (!gimple_body (e->callee->decl) && !e->callee->inline_decl)
+       if (!e->callee->analyzed && !e->callee->inline_decl)
          {
            if (dump_file)
              {
@@ -1514,6 +1540,7 @@ cgraph_early_inlining (void)
       todo = optimize_inline_calls (current_function_decl);
       timevar_pop (TV_INTEGRATION);
     }
+  cfun->always_inline_functions_inlined = true;
   return todo;
 }
 
@@ -1536,7 +1563,7 @@ struct gimple_opt_pass pass_early_inline =
   0,                                   /* static_pass_number */
   TV_INLINE_HEURISTICS,                        /* tv_id */
   0,                                   /* properties_required */
-  PROP_cfg,                            /* properties_provided */
+  0,                                   /* properties_provided */
   0,                                   /* properties_destroyed */
   0,                                   /* todo_flags_start */
   TODO_dump_func                       /* todo_flags_finish */
@@ -1566,7 +1593,7 @@ struct simple_ipa_opt_pass pass_ipa_early_inline =
   0,                                   /* static_pass_number */
   TV_INLINE_HEURISTICS,                        /* tv_id */
   0,                                   /* properties_required */
-  PROP_cfg,                            /* properties_provided */
+  0,                                   /* properties_provided */
   0,                                   /* properties_destroyed */
   0,                                   /* todo_flags_start */
   TODO_dump_cgraph                     /* todo_flags_finish */
@@ -1577,18 +1604,30 @@ struct simple_ipa_opt_pass pass_ipa_early_inline =
 unsigned int
 compute_inline_parameters (struct cgraph_node *node)
 {
+  HOST_WIDE_INT self_stack_size;
+
   gcc_assert (!node->global.inlined_to);
-  inline_summary (node)->estimated_self_stack_size
-    = estimated_stack_frame_size ();
-  node->global.estimated_stack_size
-    = inline_summary (node)->estimated_self_stack_size;
+
+  /* Estimate the stack size for the function.  But not at -O0
+     because estimated_stack_frame_size is a quadratic problem.  */
+  self_stack_size = optimize ? estimated_stack_frame_size () : 0;
+  inline_summary (node)->estimated_self_stack_size = self_stack_size;
+  node->global.estimated_stack_size = self_stack_size;
   node->global.stack_frame_offset = 0;
+
+  /* Can this function be inlined at all?  */
   node->local.inlinable = tree_inlinable_function_p (current_function_decl);
+
+  /* Estimate the number of instructions for this function.
+     ??? At -O0 we don't use this information except for the dumps, and
+        even then only for always_inline functions.  But disabling this
+        causes ICEs in the inline heuristics...  */
   inline_summary (node)->self_insns
       = estimate_num_insns_fn (current_function_decl, &eni_inlining_weights);
   if (node->local.inlinable && !node->local.disregard_inline_limits)
     node->local.disregard_inline_limits
       = DECL_DISREGARD_INLINE_LIMITS (current_function_decl);
+
   /* Inlining characteristics are maintained by the cgraph_mark_inline.  */
   node->global.insns = inline_summary (node)->self_insns;
   return 0;
@@ -1616,7 +1655,7 @@ struct gimple_opt_pass pass_inline_parameters =
   0,                                   /* static_pass_number */
   TV_INLINE_HEURISTICS,                        /* tv_id */
   0,                                   /* properties_required */
-  PROP_cfg,                            /* properties_provided */
+  0,                                   /* properties_provided */
   0,                                   /* properties_destroyed */
   0,                                   /* todo_flags_start */
   0                                    /* todo_flags_finish */
@@ -1632,8 +1671,7 @@ inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
 
   if (!flag_ipa_cp)
     {
-      ipa_count_formal_params (node);
-      ipa_create_param_decls_array (node);
+      ipa_initialize_node_params (node);
       ipa_detect_param_modifications (node);
     }
   ipa_analyze_params_uses (node);
@@ -1706,7 +1744,7 @@ inline_transform (struct cgraph_node *node)
 
   /* We might need the body of this function so that we can expand
      it inline somewhere else.  */
-  if (cgraph_preserve_function_body_p (current_function_decl))
+  if (cgraph_preserve_function_body_p (node->decl))
     save_inline_function_body (node);
 
   for (e = node->callees; e; e = e->next_callee)
@@ -1719,6 +1757,8 @@ inline_transform (struct cgraph_node *node)
       todo = optimize_inline_calls (current_function_decl);
       timevar_pop (TV_INTEGRATION);
     }
+  cfun->always_inline_functions_inlined = true;
+  cfun->after_inlining = true;
   return todo | execute_fixup_cfg ();
 }
 
@@ -1734,7 +1774,7 @@ struct ipa_opt_pass pass_ipa_inline =
   0,                                   /* static_pass_number */
   TV_INLINE_HEURISTICS,                        /* tv_id */
   0,                                   /* properties_required */
-  PROP_cfg,                            /* properties_provided */
+  0,                                   /* properties_provided */
   0,                                   /* properties_destroyed */
   TODO_remove_functions,               /* todo_flags_finish */
   TODO_dump_cgraph | TODO_dump_func