+/* Returns true when it is possible to generate a builtin pattern for
+ the PARTITION of RDG. For the moment we detect only the memset
+ zero pattern. */
+
+static bool
+can_generate_builtin (struct graph *rdg, bitmap partition)
+{
+ unsigned i;
+ bitmap_iterator bi;
+ int nb_reads = 0;
+ int nb_writes = 0;
+ int stores_zero = 0;
+
+ EXECUTE_IF_SET_IN_BITMAP (partition, 0, i, bi)
+ if (RDG_MEM_READS_STMT (rdg, i))
+ nb_reads++;
+ else if (RDG_MEM_WRITE_STMT (rdg, i))
+ {
+ nb_writes++;
+ if (stmt_with_adjacent_zero_store_dr_p (RDG_STMT (rdg, i)))
+ stores_zero++;
+ }
+
+ return stores_zero == 1 && nb_writes == 1 && nb_reads == 0;
+}
+
+/* Returns true when PARTITION1 and PARTITION2 have similar memory
+ accesses in RDG. */
+
+static bool
+similar_memory_accesses (struct graph *rdg, bitmap partition1,
+ bitmap partition2)
+{
+ unsigned i, j;
+ bitmap_iterator bi, bj;
+
+ EXECUTE_IF_SET_IN_BITMAP (partition1, 0, i, bi)
+ if (RDG_MEM_WRITE_STMT (rdg, i)
+ || RDG_MEM_READS_STMT (rdg, i))
+ EXECUTE_IF_SET_IN_BITMAP (partition2, 0, j, bj)
+ if (RDG_MEM_WRITE_STMT (rdg, j)
+ || RDG_MEM_READS_STMT (rdg, j))
+ if (rdg_has_similar_memory_accesses (rdg, i, j))
+ return true;
+
+ return false;
+}
+
+/* Fuse all the partitions from PARTITIONS that contain similar memory
+ references, i.e., we're taking care of cache locality. This
+ function does not fuse those partitions that contain patterns that
+ can be code generated with builtins. */
+
+static void
+fuse_partitions_with_similar_memory_accesses (struct graph *rdg,
+ VEC (bitmap, heap) **partitions)
+{
+ int p1, p2;
+ bitmap partition1, partition2;
+
+ FOR_EACH_VEC_ELT (bitmap, *partitions, p1, partition1)
+ if (!can_generate_builtin (rdg, partition1))
+ FOR_EACH_VEC_ELT (bitmap, *partitions, p2, partition2)
+ if (p1 != p2
+ && !can_generate_builtin (rdg, partition2)
+ && similar_memory_accesses (rdg, partition1, partition2))
+ {
+ bitmap_ior_into (partition1, partition2);
+ VEC_ordered_remove (bitmap, *partitions, p2);
+ p2--;
+ }
+}
+
+/* Returns true when STMT will be code generated in a partition of RDG
+ different than PART and that will not be code generated as a
+ builtin. */
+
+static bool
+stmt_generated_in_another_partition (struct graph *rdg, gimple stmt, int part,
+ VEC (bitmap, heap) *partitions)
+{
+ int p;
+ bitmap pp;
+ unsigned i;
+ bitmap_iterator bi;
+
+ FOR_EACH_VEC_ELT (bitmap, partitions, p, pp)
+ if (p != part
+ && !can_generate_builtin (rdg, pp))
+ EXECUTE_IF_SET_IN_BITMAP (pp, 0, i, bi)
+ if (stmt == RDG_STMT (rdg, i))
+ return true;
+
+ return false;
+}
+
+/* For each partition in PARTITIONS that will be code generated using
+ a builtin, add its scalar computations used after the loop to
+ PARTITION. */
+
+static void
+add_scalar_computations_to_partition (struct graph *rdg,
+ VEC (bitmap, heap) *partitions,
+ bitmap partition)
+{
+ int p;
+ bitmap pp;
+ unsigned i;
+ bitmap_iterator bi;
+ bitmap l = BITMAP_ALLOC (NULL);
+ bitmap pr = BITMAP_ALLOC (NULL);
+ bool f = false;
+
+ FOR_EACH_VEC_ELT (bitmap, partitions, p, pp)
+ if (can_generate_builtin (rdg, pp))
+ EXECUTE_IF_SET_IN_BITMAP (pp, 0, i, bi)
+ if (stmt_has_scalar_dependences_outside_loop (RDG_STMT (rdg, i))
+ && !stmt_generated_in_another_partition (rdg, RDG_STMT (rdg, i), p,
+ partitions))
+ rdg_flag_vertex_and_dependent (rdg, i, partition, l, pr, &f);
+
+ rdg_flag_loop_exits (rdg, l, partition, pr, &f);
+
+ BITMAP_FREE (pr);
+ BITMAP_FREE (l);
+}
+