1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
3 Contributed by Frank Ch. Eigler <fche@redhat.com>
4 and Graydon Hoare <graydon@redhat.com>
5 Splay Tree code originally by Mark Mitchell <mark@markmitchell.com>,
6 adapted from libiberty.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 2, or (at your option) any later
15 In addition to the permissions in the GNU General Public License, the
16 Free Software Foundation gives you unlimited permission to link the
17 compiled version of this file into combinations with other programs,
18 and to distribute those combinations without any restriction coming
19 from the use of this file. (The General Public License restrictions
20 do apply in other respects; for example, they cover modification of
21 the file, and distribution when not linked into a combine
24 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
25 WARRANTY; without even the implied warranty of MERCHANTABILITY or
26 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
29 You should have received a copy of the GNU General Public License
30 along with GCC; see the file COPYING. If not, write to the Free
31 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
36 /* These attempt to coax various unix flavours to declare all our
37 needed tidbits in the system headers. */
38 #if !defined(__FreeBSD__)
40 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
44 #define __EXTENSIONS__
46 #define _LARGE_FILE_API
47 #define _XOPEN_SOURCE_EXTENDED 1
51 #include <sys/types.h>
55 #ifdef HAVE_EXECINFO_H
65 #include <sys/types.h>
70 #include "mf-runtime.h"
74 /* ------------------------------------------------------------------------ */
75 /* Splay-tree implementation. */
77 typedef uintptr_t mfsplay_tree_key;
78 typedef void *mfsplay_tree_value;
80 /* Forward declaration for a node in the tree. */
81 typedef struct mfsplay_tree_node_s *mfsplay_tree_node;
83 /* The type of a function used to iterate over the tree. */
84 typedef int (*mfsplay_tree_foreach_fn) (mfsplay_tree_node, void *);
86 /* The nodes in the splay tree. */
87 struct mfsplay_tree_node_s
91 mfsplay_tree_value value;
93 mfsplay_tree_node left;
94 mfsplay_tree_node right;
95 /* XXX: The addition of a parent pointer may eliminate some recursion. */
98 /* The splay tree itself. */
101 /* The root of the tree. */
102 mfsplay_tree_node root;
104 /* The last key value for which the tree has been splayed, but not
106 mfsplay_tree_key last_splayed_key;
107 int last_splayed_key_p;
112 /* Traversal recursion control flags. */
115 unsigned rebalance_p;
117 typedef struct mfsplay_tree_s *mfsplay_tree;
119 static mfsplay_tree mfsplay_tree_new (void);
120 static mfsplay_tree_node mfsplay_tree_insert (mfsplay_tree, mfsplay_tree_key, mfsplay_tree_value);
121 static void mfsplay_tree_remove (mfsplay_tree, mfsplay_tree_key);
122 static mfsplay_tree_node mfsplay_tree_lookup (mfsplay_tree, mfsplay_tree_key);
123 static mfsplay_tree_node mfsplay_tree_predecessor (mfsplay_tree, mfsplay_tree_key);
124 static mfsplay_tree_node mfsplay_tree_successor (mfsplay_tree, mfsplay_tree_key);
125 static int mfsplay_tree_foreach (mfsplay_tree, mfsplay_tree_foreach_fn, void *);
126 static void mfsplay_tree_rebalance (mfsplay_tree sp);
128 /* ------------------------------------------------------------------------ */
131 #define CTOR __attribute__ ((constructor))
132 #define DTOR __attribute__ ((destructor))
135 /* Codes to describe the context in which a violation occurs. */
136 #define __MF_VIOL_UNKNOWN 0
137 #define __MF_VIOL_READ 1
138 #define __MF_VIOL_WRITE 2
139 #define __MF_VIOL_REGISTER 3
140 #define __MF_VIOL_UNREGISTER 4
141 #define __MF_VIOL_WATCH 5
143 /* Protect against recursive calls. */
144 #define BEGIN_RECURSION_PROTECT() do { \
145 if (UNLIKELY (__mf_state == reentrant)) { \
146 write (2, "mf: erroneous reentrancy detected in `", 38); \
147 write (2, __PRETTY_FUNCTION__, strlen(__PRETTY_FUNCTION__)); \
148 write (2, "'\n", 2); \
150 __mf_state = reentrant; \
153 #define END_RECURSION_PROTECT() do { \
154 __mf_state = active; \
159 /* ------------------------------------------------------------------------ */
160 /* Required globals. */
162 #define LOOKUP_CACHE_MASK_DFL 1023
163 #define LOOKUP_CACHE_SIZE_MAX 4096 /* Allows max CACHE_MASK 0x0FFF */
164 #define LOOKUP_CACHE_SHIFT_DFL 2
166 struct __mf_cache __mf_lookup_cache [LOOKUP_CACHE_SIZE_MAX];
167 uintptr_t __mf_lc_mask = LOOKUP_CACHE_MASK_DFL;
168 unsigned char __mf_lc_shift = LOOKUP_CACHE_SHIFT_DFL;
169 #define LOOKUP_CACHE_SIZE (__mf_lc_mask + 1)
171 struct __mf_options __mf_opts;
173 int __mf_starting_p = 1;
175 enum __mf_state_enum __mf_state = active;
177 /* See __mf_state_perthread() in mf-hooks.c. */
182 pthread_mutex_t __mf_biglock =
183 #ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
184 PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
186 PTHREAD_MUTEX_INITIALIZER;
190 /* Use HAVE_PTHREAD_H here instead of LIBMUDFLAPTH, so that even
191 the libmudflap.la (no threading support) can diagnose whether
192 the application is linked with -lpthread. See __mf_usage() below. */
194 #ifdef _POSIX_THREADS
195 #pragma weak pthread_join
197 #define pthread_join NULL
199 const void *threads_active_p = (void *) pthread_join;
203 /* ------------------------------------------------------------------------ */
204 /* stats-related globals. */
206 static unsigned long __mf_count_check;
207 static unsigned long __mf_lookup_cache_reusecount [LOOKUP_CACHE_SIZE_MAX];
208 static unsigned long __mf_count_register;
209 static unsigned long __mf_total_register_size [__MF_TYPE_MAX+1];
210 static unsigned long __mf_count_unregister;
211 static unsigned long __mf_total_unregister_size;
212 static unsigned long __mf_count_violation [__MF_VIOL_WATCH+1];
213 static unsigned long __mf_sigusr1_received;
214 static unsigned long __mf_sigusr1_handled;
215 /* not static */ unsigned long __mf_reentrancy;
217 /* not static */ unsigned long __mf_lock_contention;
221 /* ------------------------------------------------------------------------ */
222 /* mode-check-related globals. */
224 typedef struct __mf_object
226 uintptr_t low, high; /* __mf_register parameters */
228 char type; /* __MF_TYPE_something */
229 char watching_p; /* Trigger a VIOL_WATCH on access? */
230 unsigned read_count; /* Number of times __mf_check/read was called on this object. */
231 unsigned write_count; /* Likewise for __mf_check/write. */
232 unsigned liveness; /* A measure of recent checking activity. */
233 unsigned description_epoch; /* Last epoch __mf_describe_object printed this. */
236 struct timeval alloc_time;
237 char **alloc_backtrace;
238 size_t alloc_backtrace_size;
240 pthread_t alloc_thread;
244 uintptr_t dealloc_pc;
245 struct timeval dealloc_time;
246 char **dealloc_backtrace;
247 size_t dealloc_backtrace_size;
249 pthread_t dealloc_thread;
253 /* Live objects: splay trees, separated by type, ordered on .low (base address). */
254 /* Actually stored as static vars within lookup function below. */
256 /* Dead objects: circular arrays; _MIN_CEM .. _MAX_CEM only */
257 static unsigned __mf_object_dead_head[__MF_TYPE_MAX_CEM+1]; /* next empty spot */
258 static __mf_object_t *__mf_object_cemetary[__MF_TYPE_MAX_CEM+1][__MF_PERSIST_MAX];
261 /* ------------------------------------------------------------------------ */
262 /* Forward function declarations */
264 void __mf_init () CTOR;
265 static void __mf_sigusr1_respond ();
266 static unsigned __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
267 __mf_object_t **objs, unsigned max_objs);
268 static unsigned __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
269 __mf_object_t **objs, unsigned max_objs, int type);
270 static unsigned __mf_find_dead_objects (uintptr_t ptr_low, uintptr_t ptr_high,
271 __mf_object_t **objs, unsigned max_objs);
272 static void __mf_adapt_cache ();
273 static void __mf_describe_object (__mf_object_t *obj);
274 static unsigned __mf_watch_or_not (void *ptr, size_t sz, char flag);
275 static mfsplay_tree __mf_object_tree (int type);
276 static void __mf_link_object (__mf_object_t *node);
277 static void __mf_unlink_object (__mf_object_t *node);
280 /* ------------------------------------------------------------------------ */
281 /* Configuration engine */
284 __mf_set_default_options ()
286 memset (& __mf_opts, 0, sizeof (__mf_opts));
288 __mf_opts.adapt_cache = 1000003;
289 __mf_opts.abbreviate = 1;
290 __mf_opts.verbose_violations = 1;
291 __mf_opts.free_queue_length = 4;
292 __mf_opts.persistent_count = 100;
293 __mf_opts.crumple_zone = 32;
294 __mf_opts.backtrace = 4;
295 __mf_opts.timestamps = 1;
296 __mf_opts.mudflap_mode = mode_check;
297 __mf_opts.violation_mode = viol_nop;
298 __mf_opts.heur_std_data = 1;
300 __mf_opts.thread_stack = 0;
319 "mudflaps do nothing",
320 set_option, (int)mode_nop, (int *)&__mf_opts.mudflap_mode},
322 "mudflaps populate object tree",
323 set_option, (int)mode_populate, (int *)&__mf_opts.mudflap_mode},
325 "mudflaps check for memory violations",
326 set_option, (int)mode_check, (int *)&__mf_opts.mudflap_mode},
328 "mudflaps always cause violations (diagnostic)",
329 set_option, (int)mode_violate, (int *)&__mf_opts.mudflap_mode},
332 "violations do not change program execution",
333 set_option, (int)viol_nop, (int *)&__mf_opts.violation_mode},
335 "violations cause a call to abort()",
336 set_option, (int)viol_abort, (int *)&__mf_opts.violation_mode},
338 "violations are promoted to SIGSEGV signals",
339 set_option, (int)viol_segv, (int *)&__mf_opts.violation_mode},
341 "violations fork a gdb process attached to current program",
342 set_option, (int)viol_gdb, (int *)&__mf_opts.violation_mode},
344 "trace calls to mudflap runtime library",
345 set_option, 1, &__mf_opts.trace_mf_calls},
347 "trace internal events within mudflap runtime library",
348 set_option, 1, &__mf_opts.verbose_trace},
350 "collect statistics on mudflap's operation",
351 set_option, 1, &__mf_opts.collect_stats},
354 "print report upon SIGUSR1",
355 set_option, 1, &__mf_opts.sigusr1_report},
357 {"internal-checking",
358 "perform more expensive internal checking",
359 set_option, 1, &__mf_opts.internal_checking},
361 "print any memory leaks at program shutdown",
362 set_option, 1, &__mf_opts.print_leaks},
363 {"check-initialization",
364 "detect uninitialized object reads",
365 set_option, 1, &__mf_opts.check_initialization},
366 {"verbose-violations",
367 "print verbose messages when memory violations occur",
368 set_option, 1, &__mf_opts.verbose_violations},
370 "abbreviate repetitive listings",
371 set_option, 1, &__mf_opts.abbreviate},
373 "track object lifetime timestamps",
374 set_option, 1, &__mf_opts.timestamps},
376 "ignore read accesses - assume okay",
377 set_option, 1, &__mf_opts.ignore_reads},
379 "wipe stack objects at unwind",
380 set_option, 1, &__mf_opts.wipe_stack},
382 "wipe heap objects at free",
383 set_option, 1, &__mf_opts.wipe_heap},
385 "support /proc/self/map heuristics",
386 set_option, 1, &__mf_opts.heur_proc_map},
388 "enable a simple upper stack bound heuristic",
389 set_option, 1, &__mf_opts.heur_stack_bound},
391 "support _start.._end heuristics",
392 set_option, 1, &__mf_opts.heur_start_end},
394 "register standard library data (argv, errno, stdin, ...)",
395 set_option, 1, &__mf_opts.heur_std_data},
396 {"free-queue-length",
397 "queue N deferred free() calls before performing them",
398 read_integer_option, 0, &__mf_opts.free_queue_length},
400 "keep a history of N unregistered regions",
401 read_integer_option, 0, &__mf_opts.persistent_count},
403 "surround allocations with crumple zones of N bytes",
404 read_integer_option, 0, &__mf_opts.crumple_zone},
405 /* XXX: not type-safe.
407 "set lookup cache size mask to N (2**M - 1)",
408 read_integer_option, 0, (int *)(&__mf_lc_mask)},
410 "set lookup cache pointer shift",
411 read_integer_option, 0, (int *)(&__mf_lc_shift)},
414 "adapt mask/shift parameters after N cache misses",
415 read_integer_option, 1, &__mf_opts.adapt_cache},
417 "keep an N-level stack trace of each call context",
418 read_integer_option, 0, &__mf_opts.backtrace},
421 "override thread stacks allocation: N kB",
422 read_integer_option, 0, &__mf_opts.thread_stack},
424 {0, 0, set_option, 0, NULL}
433 "This is a %s%sGCC \"mudflap\" memory-checked binary.\n"
434 "Mudflap is Copyright (C) 2002-2004 Free Software Foundation, Inc.\n"
436 "The mudflap code can be controlled by an environment variable:\n"
438 "$ export MUDFLAP_OPTIONS='<options>'\n"
439 "$ <mudflapped_program>\n"
441 "where <options> is a space-separated list of \n"
442 "any of the following options. Use `-no-OPTION' to disable options.\n"
445 (threads_active_p ? "multi-threaded " : "single-threaded "),
455 /* XXX: The multi-threaded thread-unaware combination is bad. */
457 for (opt = options; opt->name; opt++)
459 int default_p = (opt->value == * opt->target);
465 fprintf (stderr, "-%-23.23s %s", opt->name, opt->description);
467 fprintf (stderr, " [active]\n");
469 fprintf (stderr, "\n");
471 case read_integer_option:
472 strncpy (buf, opt->name, 128);
473 strncpy (buf + strlen (opt->name), "=N", 2);
474 fprintf (stderr, "-%-23.23s %s", buf, opt->description);
475 fprintf (stderr, " [%d]\n", * opt->target);
481 fprintf (stderr, "\n");
486 __mf_set_options (const char *optstr)
490 BEGIN_RECURSION_PROTECT ();
491 rc = __mfu_set_options (optstr);
492 /* XXX: It's not really that easy. A change to a bunch of parameters
493 can require updating auxiliary state or risk crashing:
494 free_queue_length, crumple_zone ... */
495 END_RECURSION_PROTECT ();
502 __mfu_set_options (const char *optstr)
504 struct option *opts = 0;
508 const char *saved_optstr = optstr;
510 /* XXX: bounds-check for optstr! */
527 if (*optstr == '?' ||
528 strncmp (optstr, "help", 4) == 0)
530 /* Caller will print help and exit. */
534 if (strncmp (optstr, "no-", 3) == 0)
537 optstr = & optstr[3];
540 for (opts = options; opts->name; opts++)
542 if (strncmp (optstr, opts->name, strlen (opts->name)) == 0)
544 optstr += strlen (opts->name);
545 assert (opts->target);
552 *(opts->target) = opts->value;
554 case read_integer_option:
555 if (! negate && (*optstr == '=' && *(optstr+1)))
558 tmp = strtol (optstr, &nxt, 10);
559 if ((optstr != nxt) && (tmp != LONG_MAX))
562 *(opts->target) = (int)tmp;
576 "warning: unrecognized string '%s' in mudflap options\n",
578 optstr += strlen (optstr);
584 /* Special post-processing: bound __mf_lc_mask and free_queue_length for security. */
585 __mf_lc_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
586 __mf_opts.free_queue_length &= (__MF_FREEQ_MAX - 1);
588 /* Clear the lookup cache, in case the parameters got changed. */
590 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
592 __mf_lookup_cache[0].low = MAXPTR;
594 TRACE ("set options from `%s'\n", saved_optstr);
596 /* Call this unconditionally, in case -sigusr1-report was toggled. */
597 __mf_sigusr1_respond ();
606 __mf_resolve_single_dynamic (struct __mf_dynamic_entry *e)
611 if (e->pointer) return;
614 if (e->version != NULL && e->version[0] != '\0') /* non-null/empty */
615 e->pointer = dlvsym (RTLD_NEXT, e->name, e->version);
618 e->pointer = dlsym (RTLD_NEXT, e->name);
624 fprintf (stderr, "mf: error in dlsym(\"%s\"): %s\n",
630 fprintf (stderr, "mf: dlsym(\"%s\") = NULL\n", e->name);
637 __mf_resolve_dynamics ()
640 for (i = 0; i < dyn_INITRESOLVE; i++)
641 __mf_resolve_single_dynamic (& __mf_dynamic[i]);
645 /* NB: order must match enums in mf-impl.h */
646 struct __mf_dynamic_entry __mf_dynamic [] =
648 {NULL, "calloc", NULL},
649 {NULL, "free", NULL},
650 {NULL, "malloc", NULL},
651 {NULL, "mmap", NULL},
652 {NULL, "munmap", NULL},
653 {NULL, "realloc", NULL},
654 {NULL, "DUMMY", NULL}, /* dyn_INITRESOLVE */
656 {NULL, "pthread_create", PTHREAD_CREATE_VERSION},
657 {NULL, "pthread_join", NULL},
658 {NULL, "pthread_exit", NULL}
666 /* ------------------------------------------------------------------------ */
668 /* Lookup & manage automatic initialization of the five or so splay trees. */
670 __mf_object_tree (int type)
672 static mfsplay_tree trees [__MF_TYPE_MAX+1];
673 assert (type >= 0 && type <= __MF_TYPE_MAX);
674 if (UNLIKELY (trees[type] == NULL))
675 trees[type] = mfsplay_tree_new ();
685 /* Return if initialization has already been done. */
686 if (LIKELY (__mf_starting_p == 0))
689 /* This initial bootstrap phase requires that __mf_starting_p = 1. */
691 __mf_resolve_dynamics ();
695 __mf_set_default_options ();
697 ov = getenv ("MUDFLAP_OPTIONS");
700 int rc = __mfu_set_options (ov);
708 /* Initialize to a non-zero description epoch. */
709 __mf_describe_object (NULL);
711 #define REG_RESERVED(obj) \
712 __mf_register (& obj, sizeof(obj), __MF_TYPE_NOACCESS, # obj)
714 REG_RESERVED (__mf_lookup_cache);
715 REG_RESERVED (__mf_lc_mask);
716 REG_RESERVED (__mf_lc_shift);
717 /* XXX: others of our statics? */
719 /* Prevent access to *NULL. */
720 __mf_register (MINPTR, 1, __MF_TYPE_NOACCESS, "NULL");
721 __mf_lookup_cache[0].low = (uintptr_t) -1;
727 __wrap_main (int argc, char* argv[])
729 extern char **environ;
731 static int been_here = 0;
733 if (__mf_opts.heur_std_data && ! been_here)
738 __mf_register (argv, sizeof(char *)*(argc+1), __MF_TYPE_STATIC, "argv[]");
739 for (i=0; i<argc; i++)
741 unsigned j = strlen (argv[i]);
742 __mf_register (argv[i], j+1, __MF_TYPE_STATIC, "argv element");
747 char *e = environ[i];
749 if (e == NULL) break;
750 j = strlen (environ[i]);
751 __mf_register (environ[i], j+1, __MF_TYPE_STATIC, "environ element");
753 __mf_register (environ, sizeof(char *)*(i+1), __MF_TYPE_STATIC, "environ[]");
755 __mf_register (& errno, sizeof (errno), __MF_TYPE_STATIC, "errno area");
757 __mf_register (stdin, sizeof (*stdin), __MF_TYPE_STATIC, "stdin");
758 __mf_register (stdout, sizeof (*stdout), __MF_TYPE_STATIC, "stdout");
759 __mf_register (stderr, sizeof (*stderr), __MF_TYPE_STATIC, "stderr");
761 /* Make some effort to register ctype.h static arrays. */
762 /* XXX: e.g., on Solaris, may need to register __ctype, _ctype, __ctype_mask, __toupper, etc. */
763 /* On modern Linux GLIBC, these are thread-specific and changeable, and are dealt
764 with in mf-hooks2.c. */
768 return main (argc, argv, environ);
770 return __real_main (argc, argv, environ);
776 extern void __mf_fini () DTOR;
779 TRACE ("__mf_fini\n");
785 /* ------------------------------------------------------------------------ */
788 void __mf_check (void *ptr, size_t sz, int type, const char *location)
791 BEGIN_RECURSION_PROTECT ();
792 __mfu_check (ptr, sz, type, location);
793 END_RECURSION_PROTECT ();
798 void __mfu_check (void *ptr, size_t sz, int type, const char *location)
800 unsigned entry_idx = __MF_CACHE_INDEX (ptr);
801 struct __mf_cache *entry = & __mf_lookup_cache [entry_idx];
802 int judgement = 0; /* 0=undecided; <0=violation; >0=okay */
803 uintptr_t ptr_low = (uintptr_t) ptr;
804 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
805 struct __mf_cache old_entry = *entry;
807 if (UNLIKELY (__mf_opts.sigusr1_report))
808 __mf_sigusr1_respond ();
810 TRACE ("check ptr=%p b=%u size=%lu %s location=`%s'\n",
811 ptr, entry_idx, (unsigned long)sz,
812 (type == 0 ? "read" : "write"), location);
814 switch (__mf_opts.mudflap_mode)
817 /* It is tempting to poison the cache here similarly to
818 mode_populate. However that eliminates a valuable
819 distinction between these two modes. mode_nop is useful to
820 let a user count & trace every single check / registration
821 call. mode_populate is useful to let a program run fast
828 entry->low = ptr_low;
829 entry->high = ptr_high;
835 unsigned heuristics = 0;
837 /* Advance aging/adaptation counters. */
838 static unsigned adapt_count;
840 if (UNLIKELY (__mf_opts.adapt_cache > 0 &&
841 adapt_count > __mf_opts.adapt_cache))
847 /* Looping only occurs if heuristics were triggered. */
848 while (judgement == 0)
850 DECLARE (void, free, void *p);
851 __mf_object_t* ovr_obj[1];
853 __mf_object_t** all_ovr_obj = NULL;
854 __mf_object_t** dealloc_me = NULL;
857 /* Find all overlapping objects. Be optimistic that there is just one. */
858 obj_count = __mf_find_objects (ptr_low, ptr_high, ovr_obj, 1);
859 if (UNLIKELY (obj_count > 1))
861 /* Allocate a real buffer and do the search again. */
862 DECLARE (void *, malloc, size_t c);
864 all_ovr_obj = CALL_REAL (malloc, (sizeof (__mf_object_t *) *
866 if (all_ovr_obj == NULL) abort ();
867 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_obj, obj_count);
868 assert (n == obj_count);
869 dealloc_me = all_ovr_obj;
873 all_ovr_obj = ovr_obj;
877 /* Update object statistics. */
878 for (i = 0; i < obj_count; i++)
880 __mf_object_t *obj = all_ovr_obj[i];
881 assert (obj != NULL);
882 if (type == __MF_CHECK_READ)
889 /* Iterate over the various objects. There are a number of special cases. */
890 for (i = 0; i < obj_count; i++)
892 __mf_object_t *obj = all_ovr_obj[i];
894 /* Any __MF_TYPE_NOACCESS hit is bad. */
895 if (UNLIKELY (obj->type == __MF_TYPE_NOACCESS))
898 /* Any object with a watch flag is bad. */
899 if (UNLIKELY (obj->watching_p))
900 judgement = -2; /* trigger VIOL_WATCH */
902 /* A read from an uninitialized object is bad. */
903 if (UNLIKELY (__mf_opts.check_initialization
905 && type == __MF_CHECK_READ
907 && obj->write_count == 0
908 /* uninitialized (heap) */
909 && obj->type == __MF_TYPE_HEAP))
913 /* We now know that the access spans one or more valid objects. */
914 if (LIKELY (judgement >= 0))
915 for (i = 0; i < obj_count; i++)
917 __mf_object_t *obj = all_ovr_obj[i];
919 /* Is this access entirely contained within this object? */
920 if (LIKELY (ptr_low >= obj->low && ptr_high <= obj->high))
923 entry->low = obj->low;
924 entry->high = obj->high;
928 /* XXX: Access runs off left or right side of this
929 object. That could be okay, if there are
930 other objects that fill in all the holes. */
933 if (dealloc_me != NULL)
934 CALL_REAL (free, dealloc_me);
936 /* If the judgment is still unknown at this stage, loop
937 around at most one more time. */
940 if (heuristics++ < 2) /* XXX parametrize this number? */
941 judgement = __mf_heuristic_check (ptr_low, ptr_high);
955 if (__mf_opts.collect_stats)
959 if (LIKELY (old_entry.low != entry->low || old_entry.high != entry->high))
960 /* && (old_entry.low != 0) && (old_entry.high != 0)) */
961 __mf_lookup_cache_reusecount [entry_idx] ++;
964 if (UNLIKELY (judgement < 0))
965 __mf_violation (ptr, sz,
966 (uintptr_t) __builtin_return_address (0), location,
968 (type == __MF_CHECK_READ ? __MF_VIOL_READ : __MF_VIOL_WRITE) :
973 static __mf_object_t *
974 __mf_insert_new_object (uintptr_t low, uintptr_t high, int type,
975 const char *name, uintptr_t pc)
977 DECLARE (void *, calloc, size_t c, size_t n);
979 __mf_object_t *new_obj;
980 new_obj = CALL_REAL (calloc, 1, sizeof(__mf_object_t));
982 new_obj->high = high;
983 new_obj->type = type;
984 new_obj->name = name;
985 new_obj->alloc_pc = pc;
986 #if HAVE_GETTIMEOFDAY
987 if (__mf_opts.timestamps)
988 gettimeofday (& new_obj->alloc_time, NULL);
991 new_obj->alloc_thread = pthread_self ();
994 if (__mf_opts.backtrace > 0 && (type == __MF_TYPE_HEAP || type == __MF_TYPE_HEAP_I))
995 new_obj->alloc_backtrace_size =
996 __mf_backtrace (& new_obj->alloc_backtrace,
999 __mf_link_object (new_obj);
1005 __mf_uncache_object (__mf_object_t *old_obj)
1007 /* Remove any low/high pointers for this object from the lookup cache. */
1009 /* Can it possibly exist in the cache? */
1010 if (LIKELY (old_obj->read_count + old_obj->write_count))
1012 uintptr_t low = old_obj->low;
1013 uintptr_t high = old_obj->high;
1014 unsigned idx_low = __MF_CACHE_INDEX (low);
1015 unsigned idx_high = __MF_CACHE_INDEX (high);
1017 for (i = idx_low; i <= idx_high; i++)
1019 struct __mf_cache *entry = & __mf_lookup_cache [i];
1020 /* NB: the "||" in the following test permits this code to
1021 tolerate the situation introduced by __mf_check over
1022 contiguous objects, where a cache entry spans several
1024 if (entry->low == low || entry->high == high)
1026 entry->low = MAXPTR;
1027 entry->high = MINPTR;
1035 __mf_register (void *ptr, size_t sz, int type, const char *name)
1038 BEGIN_RECURSION_PROTECT ();
1039 __mfu_register (ptr, sz, type, name);
1040 END_RECURSION_PROTECT ();
1046 __mfu_register (void *ptr, size_t sz, int type, const char *name)
1048 TRACE ("register ptr=%p size=%lu type=%x name='%s'\n",
1049 ptr, (unsigned long) sz, type, name ? name : "");
1051 if (__mf_opts.collect_stats)
1053 __mf_count_register ++;
1054 __mf_total_register_size [(type < 0) ? 0 :
1055 (type > __MF_TYPE_MAX) ? 0 :
1059 if (UNLIKELY (__mf_opts.sigusr1_report))
1060 __mf_sigusr1_respond ();
1062 switch (__mf_opts.mudflap_mode)
1068 __mf_violation (ptr, sz, (uintptr_t) __builtin_return_address (0), NULL,
1069 __MF_VIOL_REGISTER);
1073 /* Clear the cache. */
1074 /* XXX: why the entire cache? */
1076 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1078 __mf_lookup_cache[0].low = MAXPTR;
1083 __mf_object_t *ovr_objs [1];
1084 unsigned num_overlapping_objs;
1085 uintptr_t low = (uintptr_t) ptr;
1086 uintptr_t high = CLAMPSZ (ptr, sz);
1087 uintptr_t pc = (uintptr_t) __builtin_return_address (0);
1089 /* Treat unknown size indication as 1. */
1090 if (UNLIKELY (sz == 0)) sz = 1;
1092 /* Look for objects only of the same type. This will e.g. permit a registration
1093 of a STATIC overlapping with a GUESS, and a HEAP with a NOACCESS. At
1094 __mf_check time however harmful overlaps will be detected. */
1095 num_overlapping_objs = __mf_find_objects2 (low, high, ovr_objs, 1, type);
1097 /* Handle overlaps. */
1098 if (UNLIKELY (num_overlapping_objs > 0))
1100 __mf_object_t *ovr_obj = ovr_objs[0];
1102 /* Accept certain specific duplication pairs. */
1103 if (((type == __MF_TYPE_STATIC) || (type == __MF_TYPE_GUESS))
1104 && ovr_obj->low == low
1105 && ovr_obj->high == high
1106 && ovr_obj->type == type)
1108 /* Duplicate registration for static objects may come
1109 from distinct compilation units. */
1110 VERBOSE_TRACE ("harmless duplicate reg %p-%p `%s'\n",
1111 (void *) low, (void *) high,
1112 (ovr_obj->name ? ovr_obj->name : ""));
1116 /* Alas, a genuine violation. */
1119 /* Two or more *real* mappings here. */
1120 __mf_violation ((void *) ptr, sz,
1121 (uintptr_t) __builtin_return_address (0), NULL,
1122 __MF_VIOL_REGISTER);
1125 else /* No overlapping objects: AOK. */
1126 __mf_insert_new_object (low, high, type, name, pc);
1128 /* We could conceivably call __mf_check() here to prime the cache,
1129 but then the read_count/write_count field is not reliable. */
1132 } /* end switch (__mf_opts.mudflap_mode) */
1137 __mf_unregister (void *ptr, size_t sz, int type)
1140 BEGIN_RECURSION_PROTECT ();
1141 __mfu_unregister (ptr, sz, type);
1142 END_RECURSION_PROTECT ();
1148 __mfu_unregister (void *ptr, size_t sz, int type)
1150 DECLARE (void, free, void *ptr);
1152 if (UNLIKELY (__mf_opts.sigusr1_report))
1153 __mf_sigusr1_respond ();
1155 TRACE ("unregister ptr=%p size=%lu type=%x\n", ptr, (unsigned long) sz, type);
1157 switch (__mf_opts.mudflap_mode)
1163 __mf_violation (ptr, sz,
1164 (uintptr_t) __builtin_return_address (0), NULL,
1165 __MF_VIOL_UNREGISTER);
1169 /* Clear the cache. */
1171 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1173 __mf_lookup_cache[0].low = MAXPTR;
1178 __mf_object_t *old_obj = NULL;
1179 __mf_object_t *del_obj = NULL; /* Object to actually delete. */
1180 __mf_object_t *objs[1] = {NULL};
1181 unsigned num_overlapping_objs;
1183 num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1184 CLAMPSZ (ptr, sz), objs, 1, type);
1186 /* Special case for HEAP_I - see free & realloc hook. They don't
1187 know whether the input region was HEAP or HEAP_I before
1188 unmapping it. Here we give HEAP a try in case HEAP_I
1190 if ((type == __MF_TYPE_HEAP_I) && (num_overlapping_objs == 0))
1192 num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1193 CLAMPSZ (ptr, sz), objs, 1, __MF_TYPE_HEAP);
1197 if (UNLIKELY ((num_overlapping_objs != 1) /* more than one overlap */
1198 || ((sz == 0) ? 0 : (sz != (old_obj->high - old_obj->low + 1))) /* size mismatch */
1199 || ((uintptr_t) ptr != old_obj->low))) /* base mismatch */
1201 __mf_violation (ptr, sz,
1202 (uintptr_t) __builtin_return_address (0), NULL,
1203 __MF_VIOL_UNREGISTER);
1207 __mf_unlink_object (old_obj);
1208 __mf_uncache_object (old_obj);
1210 /* Wipe buffer contents if desired. */
1211 if ((__mf_opts.wipe_stack && old_obj->type == __MF_TYPE_STACK)
1212 || (__mf_opts.wipe_heap && (old_obj->type == __MF_TYPE_HEAP
1213 || old_obj->type == __MF_TYPE_HEAP_I)))
1215 memset ((void *) old_obj->low,
1217 (size_t) (old_obj->high - old_obj->low + 1));
1220 /* Manage the object cemetary. */
1221 if (__mf_opts.persistent_count > 0 &&
1222 old_obj->type >= 0 &&
1223 old_obj->type <= __MF_TYPE_MAX_CEM)
1225 old_obj->deallocated_p = 1;
1226 old_obj->dealloc_pc = (uintptr_t) __builtin_return_address (0);
1227 #if HAVE_GETTIMEOFDAY
1228 if (__mf_opts.timestamps)
1229 gettimeofday (& old_obj->dealloc_time, NULL);
1232 old_obj->dealloc_thread = pthread_self ();
1235 if (__mf_opts.backtrace > 0 && old_obj->type == __MF_TYPE_HEAP)
1236 old_obj->dealloc_backtrace_size =
1237 __mf_backtrace (& old_obj->dealloc_backtrace,
1240 /* Encourage this object to be displayed again in current epoch. */
1241 old_obj->description_epoch --;
1243 /* Put this object into the cemetary. This may require this plot to
1244 be recycled, and the previous resident to be designated del_obj. */
1246 unsigned row = old_obj->type;
1247 unsigned plot = __mf_object_dead_head [row];
1249 del_obj = __mf_object_cemetary [row][plot];
1250 __mf_object_cemetary [row][plot] = old_obj;
1252 if (plot == __mf_opts.persistent_count) plot = 0;
1253 __mf_object_dead_head [row] = plot;
1259 if (__mf_opts.print_leaks)
1261 if ((old_obj->read_count + old_obj->write_count) == 0 &&
1262 (old_obj->type == __MF_TYPE_HEAP
1263 || old_obj->type == __MF_TYPE_HEAP_I))
1267 "mudflap warning: unaccessed registered object:\n");
1268 __mf_describe_object (old_obj);
1272 if (del_obj != NULL) /* May or may not equal old_obj. */
1274 if (__mf_opts.backtrace > 0)
1276 CALL_REAL(free, del_obj->alloc_backtrace);
1277 if (__mf_opts.persistent_count > 0)
1279 CALL_REAL(free, del_obj->dealloc_backtrace);
1282 CALL_REAL(free, del_obj);
1287 } /* end switch (__mf_opts.mudflap_mode) */
1290 if (__mf_opts.collect_stats)
1292 __mf_count_unregister ++;
1293 __mf_total_unregister_size += sz;
1302 unsigned long total_size;
1303 unsigned live_obj_count;
1304 double total_weight;
1305 double weighted_size;
1306 unsigned long weighted_address_bits [sizeof (uintptr_t) * 8][2];
1312 __mf_adapt_cache_fn (mfsplay_tree_node n, void *param)
1314 __mf_object_t *obj = (__mf_object_t *) n->value;
1315 struct tree_stats *s = (struct tree_stats *) param;
1317 assert (obj != NULL && s != NULL);
1319 /* Exclude never-accessed objects. */
1320 if (obj->read_count + obj->write_count)
1323 s->total_size += (obj->high - obj->low + 1);
1330 /* VERBOSE_TRACE ("analyze low=%p live=%u name=`%s'\n",
1331 (void *) obj->low, obj->liveness, obj->name); */
1333 s->live_obj_count ++;
1334 s->total_weight += (double) obj->liveness;
1336 (double) (obj->high - obj->low + 1) *
1337 (double) obj->liveness;
1340 for (i=0; i<sizeof(uintptr_t) * 8; i++)
1342 unsigned bit = addr & 1;
1343 s->weighted_address_bits[i][bit] += obj->liveness;
1347 /* Age the liveness value. */
1348 obj->liveness >>= 1;
1359 struct tree_stats s;
1360 uintptr_t new_mask = 0;
1361 unsigned char new_shift;
1362 float cache_utilization;
1364 static float smoothed_new_shift = -1.0;
1367 memset (&s, 0, sizeof (s));
1369 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP), __mf_adapt_cache_fn, (void *) & s);
1370 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I), __mf_adapt_cache_fn, (void *) & s);
1371 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STACK), __mf_adapt_cache_fn, (void *) & s);
1372 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STATIC), __mf_adapt_cache_fn, (void *) & s);
1373 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_GUESS), __mf_adapt_cache_fn, (void *) & s);
1375 /* Maybe we're dealing with funny aging/adaptation parameters, or an
1376 empty tree. Just leave the cache alone in such cases, rather
1377 than risk dying by division-by-zero. */
1378 if (! (s.obj_count > 0) && (s.live_obj_count > 0) && (s.total_weight > 0.0))
1381 /* Guess a good value for the shift parameter by finding an address bit that is a
1382 good discriminant of lively objects. */
1384 for (i=0; i<sizeof (uintptr_t)*8; i++)
1386 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1387 if (max_value < value) max_value = value;
1389 for (i=0; i<sizeof (uintptr_t)*8; i++)
1391 float shoulder_factor = 0.7; /* Include slightly less popular bits too. */
1392 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1393 if (value >= max_value * shoulder_factor)
1396 if (smoothed_new_shift < 0) smoothed_new_shift = __mf_lc_shift;
1397 /* Converge toward this slowly to reduce flapping. */
1398 smoothed_new_shift = 0.9*smoothed_new_shift + 0.1*i;
1399 new_shift = (unsigned) (smoothed_new_shift + 0.5);
1400 assert (new_shift < sizeof (uintptr_t)*8);
1402 /* Count number of used buckets. */
1403 cache_utilization = 0.0;
1404 for (i = 0; i < (1 + __mf_lc_mask); i++)
1405 if (__mf_lookup_cache[i].low != 0 || __mf_lookup_cache[i].high != 0)
1406 cache_utilization += 1.0;
1407 cache_utilization /= (1 + __mf_lc_mask);
1409 new_mask |= 0x3ff; /* XXX: force a large cache. */
1410 new_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
1412 VERBOSE_TRACE ("adapt cache obj=%u/%u sizes=%lu/%.0f/%.0f => "
1413 "util=%u%% m=%p s=%u\n",
1414 s.obj_count, s.live_obj_count, s.total_size, s.total_weight, s.weighted_size,
1415 (unsigned)(cache_utilization*100.0), (void *) new_mask, new_shift);
1417 /* We should reinitialize cache if its parameters have changed. */
1418 if (new_mask != __mf_lc_mask ||
1419 new_shift != __mf_lc_shift)
1421 __mf_lc_mask = new_mask;
1422 __mf_lc_shift = new_shift;
1424 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1426 __mf_lookup_cache[0].low = MAXPTR;
1432 /* __mf_find_object[s] */
1434 /* Find overlapping live objecs between [low,high]. Return up to
1435 max_objs of their pointers in objs[]. Return total count of
1436 overlaps (may exceed max_objs). */
1439 __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
1440 __mf_object_t **objs, unsigned max_objs, int type)
1443 mfsplay_tree t = __mf_object_tree (type);
1444 mfsplay_tree_key k = (mfsplay_tree_key) ptr_low;
1447 mfsplay_tree_node n = mfsplay_tree_lookup (t, k);
1448 /* An exact match for base address implies a hit. */
1451 if (count < max_objs)
1452 objs[count] = (__mf_object_t *) n->value;
1456 /* Iterate left then right near this key value to find all overlapping objects. */
1457 for (direction = 0; direction < 2; direction ++)
1459 /* Reset search origin. */
1460 k = (mfsplay_tree_key) ptr_low;
1466 n = (direction == 0 ? mfsplay_tree_successor (t, k) : mfsplay_tree_predecessor (t, k));
1467 if (n == NULL) break;
1468 obj = (__mf_object_t *) n->value;
1470 if (! (obj->low <= ptr_high && obj->high >= ptr_low)) /* No overlap? */
1473 if (count < max_objs)
1474 objs[count] = (__mf_object_t *) n->value;
1477 k = (mfsplay_tree_key) obj->low;
1486 __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
1487 __mf_object_t **objs, unsigned max_objs)
1492 /* Search each splay tree for overlaps. */
1493 for (type = __MF_TYPE_NOACCESS; type <= __MF_TYPE_GUESS; type++)
1495 unsigned c = __mf_find_objects2 (ptr_low, ptr_high, objs, max_objs, type);
1501 else /* NB: C may equal 0 */
1514 /* __mf_link_object */
1517 __mf_link_object (__mf_object_t *node)
1519 mfsplay_tree t = __mf_object_tree (node->type);
1520 mfsplay_tree_insert (t, (mfsplay_tree_key) node->low, (mfsplay_tree_value) node);
1523 /* __mf_unlink_object */
1526 __mf_unlink_object (__mf_object_t *node)
1528 mfsplay_tree t = __mf_object_tree (node->type);
1529 mfsplay_tree_remove (t, (mfsplay_tree_key) node->low);
1532 /* __mf_find_dead_objects */
1534 /* Find overlapping dead objecs between [low,high]. Return up to
1535 max_objs of their pointers in objs[]. Return total count of
1536 overlaps (may exceed max_objs). */
1539 __mf_find_dead_objects (uintptr_t low, uintptr_t high,
1540 __mf_object_t **objs, unsigned max_objs)
1542 if (__mf_opts.persistent_count > 0)
1545 unsigned recollection = 0;
1548 assert (low <= high);
1549 assert (max_objs == 0 || objs != NULL);
1551 /* Widen the search from the most recent plots in each row, looking
1552 backward in time. */
1554 while (recollection < __mf_opts.persistent_count)
1558 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1563 plot = __mf_object_dead_head [row];
1564 for (i = 0; i <= recollection; i ++)
1568 /* Look backward through row: it's a circular buffer. */
1569 if (plot > 0) plot --;
1570 else plot = __mf_opts.persistent_count - 1;
1572 obj = __mf_object_cemetary [row][plot];
1573 if (obj && obj->low <= high && obj->high >= low)
1575 /* Found an overlapping dead object! */
1576 if (count < max_objs)
1586 /* Look farther back in time. */
1587 recollection = (recollection * 2) + 1;
1596 /* __mf_describe_object */
1599 __mf_describe_object (__mf_object_t *obj)
1601 static unsigned epoch = 0;
1608 if (__mf_opts.abbreviate && obj->description_epoch == epoch)
1611 "mudflap object %p: name=`%s'\n",
1612 (void *) obj, (obj->name ? obj->name : ""));
1616 obj->description_epoch = epoch;
1619 "mudflap object %p: name=`%s'\n"
1620 "bounds=[%p,%p] size=%lu area=%s check=%ur/%uw liveness=%u%s\n"
1621 "alloc time=%lu.%06lu pc=%p"
1626 (void *) obj, (obj->name ? obj->name : ""),
1627 (void *) obj->low, (void *) obj->high,
1628 (unsigned long) (obj->high - obj->low + 1),
1629 (obj->type == __MF_TYPE_NOACCESS ? "no-access" :
1630 obj->type == __MF_TYPE_HEAP ? "heap" :
1631 obj->type == __MF_TYPE_HEAP_I ? "heap-init" :
1632 obj->type == __MF_TYPE_STACK ? "stack" :
1633 obj->type == __MF_TYPE_STATIC ? "static" :
1634 obj->type == __MF_TYPE_GUESS ? "guess" :
1636 obj->read_count, obj->write_count, obj->liveness,
1637 obj->watching_p ? " watching" : "",
1638 obj->alloc_time.tv_sec, obj->alloc_time.tv_usec,
1639 (void *) obj->alloc_pc
1641 , (unsigned) obj->alloc_thread
1645 if (__mf_opts.backtrace > 0)
1648 for (i=0; i<obj->alloc_backtrace_size; i++)
1649 fprintf (stderr, " %s\n", obj->alloc_backtrace[i]);
1652 if (__mf_opts.persistent_count > 0)
1654 if (obj->deallocated_p)
1656 fprintf (stderr, "dealloc time=%lu.%06lu pc=%p"
1661 obj->dealloc_time.tv_sec, obj->dealloc_time.tv_usec,
1662 (void *) obj->dealloc_pc
1664 , (unsigned) obj->dealloc_thread
1669 if (__mf_opts.backtrace > 0)
1672 for (i=0; i<obj->dealloc_backtrace_size; i++)
1673 fprintf (stderr, " %s\n", obj->dealloc_backtrace[i]);
1681 __mf_report_leaks_fn (mfsplay_tree_node n, void *param)
1683 __mf_object_t *node = (__mf_object_t *) n->value;
1684 unsigned *count = (unsigned *) param;
1689 fprintf (stderr, "Leaked object %u:\n", (*count));
1690 __mf_describe_object (node);
1697 __mf_report_leaks ()
1701 (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP),
1702 __mf_report_leaks_fn, & count);
1703 (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I),
1704 __mf_report_leaks_fn, & count);
1709 /* ------------------------------------------------------------------------ */
1716 BEGIN_RECURSION_PROTECT ();
1718 END_RECURSION_PROTECT ();
1725 if (__mf_opts.collect_stats)
1730 "calls to __mf_check: %lu\n"
1731 " __mf_register: %lu [%luB, %luB, %luB, %luB, %luB]\n"
1732 " __mf_unregister: %lu [%luB]\n"
1733 " __mf_violation: [%lu, %lu, %lu, %lu, %lu]\n",
1735 __mf_count_register,
1736 __mf_total_register_size[0], __mf_total_register_size[1],
1737 __mf_total_register_size[2], __mf_total_register_size[3],
1738 __mf_total_register_size[4], /* XXX */
1739 __mf_count_unregister, __mf_total_unregister_size,
1740 __mf_count_violation[0], __mf_count_violation[1],
1741 __mf_count_violation[2], __mf_count_violation[3],
1742 __mf_count_violation[4]);
1745 "calls with reentrancy: %lu\n", __mf_reentrancy);
1748 " lock contention: %lu\n", __mf_lock_contention);
1751 /* Lookup cache stats. */
1754 unsigned max_reuse = 0;
1755 unsigned num_used = 0;
1756 unsigned num_unused = 0;
1758 for (i = 0; i < LOOKUP_CACHE_SIZE; i++)
1760 if (__mf_lookup_cache_reusecount[i])
1764 if (max_reuse < __mf_lookup_cache_reusecount[i])
1765 max_reuse = __mf_lookup_cache_reusecount[i];
1767 fprintf (stderr, "lookup cache slots used: %u unused: %u peak-reuse: %u\n",
1768 num_used, num_unused, max_reuse);
1772 unsigned live_count;
1773 live_count = __mf_find_objects (MINPTR, MAXPTR, NULL, 0);
1774 fprintf (stderr, "number of live objects: %u\n", live_count);
1777 if (__mf_opts.persistent_count > 0)
1779 unsigned dead_count = 0;
1781 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1782 for (plot = 0 ; plot < __mf_opts.persistent_count; plot ++)
1783 if (__mf_object_cemetary [row][plot] != 0)
1785 fprintf (stderr, " zombie objects: %u\n", dead_count);
1788 if (__mf_opts.print_leaks && (__mf_opts.mudflap_mode == mode_check))
1791 extern void * __mf_wrap_alloca_indirect (size_t c);
1793 /* Free up any remaining alloca()'d blocks. */
1794 __mf_wrap_alloca_indirect (0);
1795 __mf_describe_object (NULL); /* Reset description epoch. */
1796 l = __mf_report_leaks ();
1797 fprintf (stderr, "number of leaked objects: %u\n", l);
1801 /* __mf_backtrace */
1804 __mf_backtrace (char ***symbols, void *guess_pc, unsigned guess_omit_levels)
1807 unsigned pc_array_size = __mf_opts.backtrace + guess_omit_levels;
1808 unsigned remaining_size;
1809 unsigned omitted_size = 0;
1811 DECLARE (void, free, void *ptr);
1812 DECLARE (void *, calloc, size_t c, size_t n);
1813 DECLARE (void *, malloc, size_t n);
1815 pc_array = CALL_REAL (calloc, pc_array_size, sizeof (void *) );
1816 #ifdef HAVE_BACKTRACE
1817 pc_array_size = backtrace (pc_array, pc_array_size);
1819 #define FETCH(n) do { if (pc_array_size >= n) { \
1820 pc_array[n] = __builtin_return_address(n); \
1821 if (pc_array[n] == 0) pc_array_size = n; } } while (0)
1823 /* Unroll some calls __builtin_return_address because this function
1824 only takes a literal integer parameter. */
1827 /* XXX: __builtin_return_address sometimes crashes (!) on >0 arguments,
1828 rather than simply returning 0. :-( */
1837 if (pc_array_size > 8) pc_array_size = 9;
1839 if (pc_array_size > 0) pc_array_size = 1;
1845 /* We want to trim the first few levels of the stack traceback,
1846 since they contain libmudflap wrappers and junk. If pc_array[]
1847 ends up containing a non-NULL guess_pc, then trim everything
1848 before that. Otherwise, omit the first guess_omit_levels
1851 if (guess_pc != NULL)
1852 for (i=0; i<pc_array_size; i++)
1853 if (pc_array [i] == guess_pc)
1856 if (omitted_size == 0) /* No match? */
1857 if (pc_array_size > guess_omit_levels)
1858 omitted_size = guess_omit_levels;
1860 remaining_size = pc_array_size - omitted_size;
1862 #ifdef HAVE_BACKTRACE_SYMBOLS
1863 *symbols = backtrace_symbols (pc_array + omitted_size, remaining_size);
1866 /* Let's construct a buffer by hand. It will have <remaining_size>
1867 char*'s at the front, pointing at individual strings immediately
1872 enum { perline = 30 };
1873 buffer = CALL_REAL (malloc, remaining_size * (perline + sizeof(char *)));
1874 pointers = (char **) buffer;
1875 chars = (char *)buffer + (remaining_size * sizeof (char *));
1876 for (i = 0; i < remaining_size; i++)
1878 pointers[i] = chars;
1879 sprintf (chars, "[0x%p]", pc_array [omitted_size + i]);
1880 chars = chars + perline;
1882 *symbols = pointers;
1885 CALL_REAL (free, pc_array);
1887 return remaining_size;
1890 /* ------------------------------------------------------------------------ */
1891 /* __mf_violation */
1894 __mf_violation (void *ptr, size_t sz, uintptr_t pc,
1895 const char *location, int type)
1898 static unsigned violation_number;
1899 DECLARE(void, free, void *ptr);
1901 TRACE ("violation pc=%p location=%s type=%d ptr=%p size=%lu\n",
1903 (location != NULL ? location : ""), type, ptr, (unsigned long) sz);
1905 if (__mf_opts.collect_stats)
1906 __mf_count_violation [(type < 0) ? 0 :
1907 (type > __MF_VIOL_WATCH) ? 0 :
1910 /* Print out a basic warning message. */
1911 if (__mf_opts.verbose_violations)
1914 unsigned num_helpful = 0;
1915 struct timeval now = { 0, 0 };
1916 #if HAVE_GETTIMEOFDAY
1917 gettimeofday (& now, NULL);
1920 violation_number ++;
1923 "mudflap violation %u (%s): time=%lu.%06lu "
1924 "ptr=%p size=%lu\npc=%p%s%s%s\n",
1926 ((type == __MF_VIOL_READ) ? "check/read" :
1927 (type == __MF_VIOL_WRITE) ? "check/write" :
1928 (type == __MF_VIOL_REGISTER) ? "register" :
1929 (type == __MF_VIOL_UNREGISTER) ? "unregister" :
1930 (type == __MF_VIOL_WATCH) ? "watch" : "unknown"),
1931 now.tv_sec, now.tv_usec,
1932 (void *) ptr, (unsigned long)sz, (void *) pc,
1933 (location != NULL ? " location=`" : ""),
1934 (location != NULL ? location : ""),
1935 (location != NULL ? "'" : ""));
1937 if (__mf_opts.backtrace > 0)
1942 num = __mf_backtrace (& symbols, (void *) pc, 2);
1943 /* Note: backtrace_symbols calls malloc(). But since we're in
1944 __mf_violation and presumably __mf_check, it'll detect
1945 recursion, and not put the new string into the database. */
1947 for (i=0; i<num; i++)
1948 fprintf (stderr, " %s\n", symbols[i]);
1950 /* Calling free() here would trigger a violation. */
1951 CALL_REAL(free, symbols);
1955 /* Look for nearby objects. For this, we start with s_low/s_high
1956 pointing to the given area, looking for overlapping objects.
1957 If none show up, widen the search area and keep looking. */
1959 if (sz == 0) sz = 1;
1961 for (dead_p = 0; dead_p <= 1; dead_p ++) /* for dead_p in 0 1 */
1963 enum {max_objs = 3}; /* magic */
1964 __mf_object_t *objs[max_objs];
1965 unsigned num_objs = 0;
1966 uintptr_t s_low, s_high;
1970 s_low = (uintptr_t) ptr;
1971 s_high = CLAMPSZ (ptr, sz);
1973 while (tries < 16) /* magic */
1976 num_objs = __mf_find_dead_objects (s_low, s_high, objs, max_objs);
1978 num_objs = __mf_find_objects (s_low, s_high, objs, max_objs);
1980 if (num_objs) /* good enough */
1985 /* XXX: tune this search strategy. It's too dependent on
1986 sz, which can vary from 1 to very big (when array index
1987 checking) numbers. */
1988 s_low = CLAMPSUB (s_low, (sz * tries * tries));
1989 s_high = CLAMPADD (s_high, (sz * tries * tries));
1992 for (i = 0; i < min (num_objs, max_objs); i++)
1994 __mf_object_t *obj = objs[i];
1995 uintptr_t low = (uintptr_t) ptr;
1996 uintptr_t high = CLAMPSZ (ptr, sz);
1997 unsigned before1 = (low < obj->low) ? obj->low - low : 0;
1998 unsigned after1 = (low > obj->high) ? low - obj->high : 0;
1999 unsigned into1 = (high >= obj->low && low <= obj->high) ? low - obj->low : 0;
2000 unsigned before2 = (high < obj->low) ? obj->low - high : 0;
2001 unsigned after2 = (high > obj->high) ? high - obj->high : 0;
2002 unsigned into2 = (high >= obj->low && low <= obj->high) ? high - obj->low : 0;
2004 fprintf (stderr, "Nearby object %u: checked region begins %uB %s and ends %uB %s\n",
2005 num_helpful + i + 1,
2006 (before1 ? before1 : after1 ? after1 : into1),
2007 (before1 ? "before" : after1 ? "after" : "into"),
2008 (before2 ? before2 : after2 ? after2 : into2),
2009 (before2 ? "before" : after2 ? "after" : "into"));
2010 __mf_describe_object (obj);
2012 num_helpful += num_objs;
2015 fprintf (stderr, "number of nearby objects: %u\n", num_helpful);
2018 /* How to finally handle this violation? */
2019 switch (__mf_opts.violation_mode)
2024 kill (getpid(), SIGSEGV);
2031 snprintf (buf, 128, "gdb --pid=%u", (unsigned) getpid ());
2033 /* XXX: should probably fork() && sleep(GDB_WAIT_PARAMETER)
2034 instead, and let the forked child execlp() gdb. That way, this
2035 subject process can be resumed under the supervision of gdb.
2036 This can't happen now, since system() only returns when gdb
2037 dies. In that case, we need to beware of starting a second
2038 concurrent gdb child upon the next violation. (But if the first
2039 gdb dies, then starting a new one is appropriate.) */
2044 /* ------------------------------------------------------------------------ */
2047 unsigned __mf_watch (void *ptr, size_t sz)
2051 BEGIN_RECURSION_PROTECT ();
2052 rc = __mf_watch_or_not (ptr, sz, 1);
2053 END_RECURSION_PROTECT ();
2058 unsigned __mf_unwatch (void *ptr, size_t sz)
2062 rc = __mf_watch_or_not (ptr, sz, 0);
2069 __mf_watch_or_not (void *ptr, size_t sz, char flag)
2071 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
2072 uintptr_t ptr_low = (uintptr_t) ptr;
2075 TRACE ("%s ptr=%p size=%lu\n",
2076 (flag ? "watch" : "unwatch"), ptr, (unsigned long) sz);
2078 switch (__mf_opts.mudflap_mode)
2088 __mf_object_t **all_ovr_objs;
2091 DECLARE (void *, malloc, size_t c);
2092 DECLARE (void, free, void *p);
2094 obj_count = __mf_find_objects (ptr_low, ptr_high, NULL, 0);
2095 VERBOSE_TRACE (" %u:", obj_count);
2097 all_ovr_objs = CALL_REAL (malloc, (sizeof (__mf_object_t *) * obj_count));
2098 if (all_ovr_objs == NULL) abort ();
2099 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_objs, obj_count);
2100 assert (n == obj_count);
2102 for (n = 0; n < obj_count; n ++)
2104 __mf_object_t *obj = all_ovr_objs[n];
2106 VERBOSE_TRACE (" [%p]", (void *) obj);
2107 if (obj->watching_p != flag)
2109 obj->watching_p = flag;
2112 /* Remove object from cache, to ensure next access
2113 goes through __mf_check(). */
2115 __mf_uncache_object (obj);
2118 CALL_REAL (free, all_ovr_objs);
2128 __mf_sigusr1_handler (int num)
2130 __mf_sigusr1_received ++;
2133 /* Install or remove SIGUSR1 handler as necessary.
2134 Also, respond to a received pending SIGUSR1. */
2136 __mf_sigusr1_respond ()
2138 static int handler_installed;
2141 /* Manage handler */
2142 if (__mf_opts.sigusr1_report && ! handler_installed)
2144 signal (SIGUSR1, __mf_sigusr1_handler);
2145 handler_installed = 1;
2147 else if(! __mf_opts.sigusr1_report && handler_installed)
2149 signal (SIGUSR1, SIG_DFL);
2150 handler_installed = 0;
2154 /* Manage enqueued signals */
2155 if (__mf_sigusr1_received > __mf_sigusr1_handled)
2157 __mf_sigusr1_handled ++;
2158 assert (__mf_state == reentrant);
2160 handler_installed = 0; /* We may need to re-enable signal; this might be a SysV library. */
2165 /* XXX: provide an alternative __assert_fail function that cannot
2166 fail due to libmudflap infinite recursion. */
2170 write_itoa (int fd, unsigned n)
2172 enum x { bufsize = sizeof(n)*4 };
2176 for (i=0; i<bufsize-1; i++)
2178 unsigned digit = n % 10;
2179 buf[bufsize-2-i] = digit + '0';
2183 char *m = & buf [bufsize-2-i];
2184 buf[bufsize-1] = '\0';
2185 write (fd, m, strlen(m));
2193 __assert_fail (const char *msg, const char *file, unsigned line, const char *func)
2195 #define write2(string) write (2, (string), strlen ((string)));
2199 write_itoa (2, (unsigned) pthread_self ());
2202 write2(": assertion failure: `");
2203 write (2, msg, strlen (msg));
2205 write (2, func, strlen (func));
2207 write (2, file, strlen (file));
2209 write_itoa (2, line);
2220 /* Adapted splay tree code, originally from libiberty. It has been
2221 specialized for libmudflap as requested by RMS. */
2224 mfsplay_tree_free (void *p)
2226 DECLARE (void, free, void *p);
2227 CALL_REAL (free, p);
2231 mfsplay_tree_xmalloc (size_t s)
2233 DECLARE (void *, malloc, size_t s);
2234 return CALL_REAL (malloc, s);
2238 static void mfsplay_tree_splay (mfsplay_tree, mfsplay_tree_key);
2239 static mfsplay_tree_node mfsplay_tree_splay_helper (mfsplay_tree,
2241 mfsplay_tree_node *,
2242 mfsplay_tree_node *,
2243 mfsplay_tree_node *);
2244 static void *mfsplay_tree_xmalloc (size_t size);
2245 static void mfsplay_tree_free (void *object);
2249 /* Inline comparison function specialized for libmudflap's key type. */
2251 compare_uintptr_t (mfsplay_tree_key k1, mfsplay_tree_key k2)
2253 if ((uintptr_t) k1 < (uintptr_t) k2)
2255 else if ((uintptr_t) k1 > (uintptr_t) k2)
2262 /* Help splay SP around KEY. PARENT and GRANDPARENT are the parent
2263 and grandparent, respectively, of NODE. */
2265 static mfsplay_tree_node
2266 mfsplay_tree_splay_helper (mfsplay_tree sp,
2267 mfsplay_tree_key key,
2268 mfsplay_tree_node * node,
2269 mfsplay_tree_node * parent,
2270 mfsplay_tree_node * grandparent)
2272 mfsplay_tree_node *next;
2273 mfsplay_tree_node n;
2281 comparison = compare_uintptr_t (key, n->key);
2283 if (comparison == 0)
2284 /* We've found the target. */
2286 else if (comparison < 0)
2287 /* The target is to the left. */
2290 /* The target is to the right. */
2295 /* Check whether our recursion depth is too high. Abort this search,
2296 and signal that a rebalance is required to continue. */
2297 if (sp->depth > sp->max_depth)
2299 sp->rebalance_p = 1;
2303 /* Continue down the tree. */
2305 n = mfsplay_tree_splay_helper (sp, key, next, node, parent);
2308 /* The recursive call will change the place to which NODE
2310 if (*node != n || sp->rebalance_p)
2315 /* NODE is the root. We are done. */
2318 /* First, handle the case where there is no grandparent (i.e.,
2319 *PARENT is the root of the tree.) */
2322 if (n == (*parent)->left)
2336 /* Next handle the cases where both N and *PARENT are left children,
2337 or where both are right children. */
2338 if (n == (*parent)->left && *parent == (*grandparent)->left)
2340 mfsplay_tree_node p = *parent;
2342 (*grandparent)->left = p->right;
2343 p->right = *grandparent;
2349 else if (n == (*parent)->right && *parent == (*grandparent)->right)
2351 mfsplay_tree_node p = *parent;
2353 (*grandparent)->right = p->left;
2354 p->left = *grandparent;
2361 /* Finally, deal with the case where N is a left child, but *PARENT
2362 is a right child, or vice versa. */
2363 if (n == (*parent)->left)
2365 (*parent)->left = n->right;
2367 (*grandparent)->right = n->left;
2368 n->left = *grandparent;
2374 (*parent)->right = n->left;
2376 (*grandparent)->left = n->right;
2377 n->right = *grandparent;
2386 mfsplay_tree_rebalance_helper1 (mfsplay_tree_node n, void *array_ptr)
2388 mfsplay_tree_node **p = array_ptr;
2395 static mfsplay_tree_node
2396 mfsplay_tree_rebalance_helper2 (mfsplay_tree_node * array, unsigned low,
2399 unsigned middle = low + (high - low) / 2;
2400 mfsplay_tree_node n = array[middle];
2402 /* Note that since we're producing a balanced binary tree, it is not a problem
2403 that this function is recursive. */
2404 if (low + 1 <= middle)
2405 n->left = mfsplay_tree_rebalance_helper2 (array, low, middle - 1);
2409 if (middle + 1 <= high)
2410 n->right = mfsplay_tree_rebalance_helper2 (array, middle + 1, high);
2418 /* Rebalance the entire tree. Do this by copying all the node
2419 pointers into an array, then cleverly re-linking them. */
2421 mfsplay_tree_rebalance (mfsplay_tree sp)
2423 mfsplay_tree_node *all_nodes, *all_nodes_1;
2425 if (sp->num_keys <= 2)
2428 all_nodes = mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node) * sp->num_keys);
2430 /* Traverse all nodes to copy their addresses into this array. */
2431 all_nodes_1 = all_nodes;
2432 mfsplay_tree_foreach (sp, mfsplay_tree_rebalance_helper1,
2433 (void *) &all_nodes_1);
2435 /* Relink all the nodes. */
2436 sp->root = mfsplay_tree_rebalance_helper2 (all_nodes, 0, sp->num_keys - 1);
2438 mfsplay_tree_free (all_nodes);
2442 /* Splay SP around KEY. */
2444 mfsplay_tree_splay (mfsplay_tree sp, mfsplay_tree_key key)
2449 /* If we just splayed the tree with the same key, do nothing. */
2450 if (sp->last_splayed_key_p &&
2451 compare_uintptr_t (sp->last_splayed_key, key) == 0)
2454 /* Compute a maximum recursion depth for a splay tree with NUM nodes.
2455 The idea is to limit excessive stack usage if we're facing
2456 degenerate access patterns. Unfortunately such patterns can occur
2457 e.g. during static initialization, where many static objects might
2458 be registered in increasing address sequence, or during a case where
2459 large tree-like heap data structures are allocated quickly.
2461 On x86, this corresponds to roughly 200K of stack usage.
2462 XXX: For libmudflapth, this could be a function of __mf_opts.thread_stack. */
2463 sp->max_depth = 2500;
2464 sp->rebalance_p = sp->depth = 0;
2466 mfsplay_tree_splay_helper (sp, key, &sp->root, NULL, NULL);
2467 if (sp->rebalance_p)
2469 mfsplay_tree_rebalance (sp);
2471 sp->rebalance_p = sp->depth = 0;
2472 mfsplay_tree_splay_helper (sp, key, &sp->root, NULL, NULL);
2474 if (sp->rebalance_p)
2479 /* Cache this splay key. */
2480 sp->last_splayed_key = key;
2481 sp->last_splayed_key_p = 1;
2486 /* Allocate a new splay tree. */
2490 mfsplay_tree sp = mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_s));
2492 sp->last_splayed_key_p = 0;
2500 /* Insert a new node (associating KEY with DATA) into SP. If a
2501 previous node with the indicated KEY exists, its data is replaced
2502 with the new value. Returns the new node. */
2503 static mfsplay_tree_node
2504 mfsplay_tree_insert (mfsplay_tree sp, mfsplay_tree_key key, mfsplay_tree_value value)
2508 mfsplay_tree_splay (sp, key);
2511 comparison = compare_uintptr_t (sp->root->key, key);
2513 if (sp->root && comparison == 0)
2515 /* If the root of the tree already has the indicated KEY, just
2516 replace the value with VALUE. */
2517 sp->root->value = value;
2521 /* Create a new node, and insert it at the root. */
2522 mfsplay_tree_node node;
2524 node = mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_node_s));
2526 node->value = value;
2529 node->left = node->right = 0;
2530 else if (comparison < 0)
2532 node->left = sp->root;
2533 node->right = node->left->right;
2534 node->left->right = 0;
2538 node->right = sp->root;
2539 node->left = node->right->left;
2540 node->right->left = 0;
2544 sp->last_splayed_key_p = 0;
2550 /* Remove KEY from SP. It is not an error if it did not exist. */
2553 mfsplay_tree_remove (mfsplay_tree sp, mfsplay_tree_key key)
2555 mfsplay_tree_splay (sp, key);
2556 sp->last_splayed_key_p = 0;
2557 if (sp->root && compare_uintptr_t (sp->root->key, key) == 0)
2559 mfsplay_tree_node left, right;
2560 left = sp->root->left;
2561 right = sp->root->right;
2562 /* Delete the root node itself. */
2563 mfsplay_tree_free (sp->root);
2565 /* One of the children is now the root. Doesn't matter much
2566 which, so long as we preserve the properties of the tree. */
2570 /* If there was a right child as well, hang it off the
2571 right-most leaf of the left child. */
2576 left->right = right;
2584 /* Lookup KEY in SP, returning VALUE if present, and NULL
2587 static mfsplay_tree_node
2588 mfsplay_tree_lookup (mfsplay_tree sp, mfsplay_tree_key key)
2590 mfsplay_tree_splay (sp, key);
2591 if (sp->root && compare_uintptr_t (sp->root->key, key) == 0)
2598 /* Return the immediate predecessor KEY, or NULL if there is no
2599 predecessor. KEY need not be present in the tree. */
2601 static mfsplay_tree_node
2602 mfsplay_tree_predecessor (mfsplay_tree sp, mfsplay_tree_key key)
2605 mfsplay_tree_node node;
2606 /* If the tree is empty, there is certainly no predecessor. */
2609 /* Splay the tree around KEY. That will leave either the KEY
2610 itself, its predecessor, or its successor at the root. */
2611 mfsplay_tree_splay (sp, key);
2612 comparison = compare_uintptr_t (sp->root->key, key);
2613 /* If the predecessor is at the root, just return it. */
2616 /* Otherwise, find the rightmost element of the left subtree. */
2617 node = sp->root->left;
2624 /* Return the immediate successor KEY, or NULL if there is no
2625 successor. KEY need not be present in the tree. */
2627 static mfsplay_tree_node
2628 mfsplay_tree_successor (mfsplay_tree sp, mfsplay_tree_key key)
2631 mfsplay_tree_node node;
2632 /* If the tree is empty, there is certainly no successor. */
2635 /* Splay the tree around KEY. That will leave either the KEY
2636 itself, its predecessor, or its successor at the root. */
2637 mfsplay_tree_splay (sp, key);
2638 comparison = compare_uintptr_t (sp->root->key, key);
2639 /* If the successor is at the root, just return it. */
2642 /* Otherwise, find the leftmost element of the right subtree. */
2643 node = sp->root->right;
2650 /* Call FN, passing it the DATA, for every node in SP, following an
2651 in-order traversal. If FN every returns a non-zero value, the
2652 iteration ceases immediately, and the value is returned.
2653 Otherwise, this function returns 0.
2655 This function simulates recursion using dynamically allocated
2656 arrays, since it may be called from mfsplay_tree_rebalance(), which
2657 in turn means that the tree is already uncomfortably deep for stack
2660 mfsplay_tree_foreach (mfsplay_tree st, mfsplay_tree_foreach_fn fn, void *data)
2662 mfsplay_tree_node *stack1;
2666 enum s { s_left, s_here, s_right, s_up };
2668 if (st->root == NULL) /* => num_keys == 0 */
2671 stack1 = mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node) * st->num_keys);
2672 stack2 = mfsplay_tree_xmalloc (sizeof (char) * st->num_keys);
2675 stack1 [sp] = st->root;
2676 stack2 [sp] = s_left;
2680 mfsplay_tree_node n;
2686 /* Handle each of the four possible states separately. */
2688 /* 1: We're here to traverse the left subtree (if any). */
2691 stack2 [sp] = s_here;
2692 if (n->left != NULL)
2695 stack1 [sp] = n->left;
2696 stack2 [sp] = s_left;
2700 /* 2: We're here to traverse this node. */
2701 else if (s == s_here)
2703 stack2 [sp] = s_right;
2704 val = (*fn) (n, data);
2708 /* 3: We're here to traverse the right subtree (if any). */
2709 else if (s == s_right)
2712 if (n->right != NULL)
2715 stack1 [sp] = n->right;
2716 stack2 [sp] = s_left;
2720 /* 4: We're here after both subtrees (if any) have been traversed. */
2723 /* Pop the stack. */
2724 if (sp == 0) break; /* Popping off the root note: we're finished! */
2732 mfsplay_tree_free (stack1);
2733 mfsplay_tree_free (stack2);