1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003, 2004, 2005, 2006, 2008, 2009
3 Free Software Foundation, Inc.
4 Contributed by Frank Ch. Eigler <fche@redhat.com>
5 and Graydon Hoare <graydon@redhat.com>
6 Splay Tree code originally by Mark Mitchell <mark@markmitchell.com>,
7 adapted from libiberty.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 Under Section 7 of GPL version 3, you are granted additional
22 permissions described in the GCC Runtime Library Exception, version
23 3.1, as published by the Free Software Foundation.
25 You should have received a copy of the GNU General Public License and
26 a copy of the GCC Runtime Library Exception along with this program;
27 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
28 <http://www.gnu.org/licenses/>. */
32 /* These attempt to coax various unix flavours to declare all our
33 needed tidbits in the system headers. */
34 #if !defined(__FreeBSD__) && !defined(__APPLE__)
36 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
40 #define __EXTENSIONS__
42 #define _LARGE_FILE_API
43 #define _XOPEN_SOURCE_EXTENDED 1
47 #include <sys/types.h>
51 #ifdef HAVE_EXECINFO_H
61 #include <sys/types.h>
66 #include "mf-runtime.h"
70 /* ------------------------------------------------------------------------ */
71 /* Splay-tree implementation. */
73 typedef uintptr_t mfsplay_tree_key;
74 typedef void *mfsplay_tree_value;
76 /* Forward declaration for a node in the tree. */
77 typedef struct mfsplay_tree_node_s *mfsplay_tree_node;
79 /* The type of a function used to iterate over the tree. */
80 typedef int (*mfsplay_tree_foreach_fn) (mfsplay_tree_node, void *);
82 /* The nodes in the splay tree. */
83 struct mfsplay_tree_node_s
87 mfsplay_tree_value value;
89 mfsplay_tree_node left;
90 mfsplay_tree_node right;
91 /* XXX: The addition of a parent pointer may eliminate some recursion. */
94 /* The splay tree itself. */
97 /* The root of the tree. */
98 mfsplay_tree_node root;
100 /* The last key value for which the tree has been splayed, but not
102 mfsplay_tree_key last_splayed_key;
103 int last_splayed_key_p;
108 /* Traversal recursion control flags. */
111 unsigned rebalance_p;
113 typedef struct mfsplay_tree_s *mfsplay_tree;
115 static mfsplay_tree mfsplay_tree_new (void);
116 static mfsplay_tree_node mfsplay_tree_insert (mfsplay_tree, mfsplay_tree_key, mfsplay_tree_value);
117 static void mfsplay_tree_remove (mfsplay_tree, mfsplay_tree_key);
118 static mfsplay_tree_node mfsplay_tree_lookup (mfsplay_tree, mfsplay_tree_key);
119 static mfsplay_tree_node mfsplay_tree_predecessor (mfsplay_tree, mfsplay_tree_key);
120 static mfsplay_tree_node mfsplay_tree_successor (mfsplay_tree, mfsplay_tree_key);
121 static int mfsplay_tree_foreach (mfsplay_tree, mfsplay_tree_foreach_fn, void *);
122 static void mfsplay_tree_rebalance (mfsplay_tree sp);
124 /* ------------------------------------------------------------------------ */
127 #define CTOR __attribute__ ((constructor))
128 #define DTOR __attribute__ ((destructor))
131 /* Codes to describe the context in which a violation occurs. */
132 #define __MF_VIOL_UNKNOWN 0
133 #define __MF_VIOL_READ 1
134 #define __MF_VIOL_WRITE 2
135 #define __MF_VIOL_REGISTER 3
136 #define __MF_VIOL_UNREGISTER 4
137 #define __MF_VIOL_WATCH 5
139 /* Protect against recursive calls. */
142 begin_recursion_protect1 (const char *pf)
144 if (__mf_get_state () == reentrant)
146 write (2, "mf: erroneous reentrancy detected in `", 38);
147 write (2, pf, strlen(pf));
148 write (2, "'\n", 2); \
151 __mf_set_state (reentrant);
154 #define BEGIN_RECURSION_PROTECT() \
155 begin_recursion_protect1 (__PRETTY_FUNCTION__)
157 #define END_RECURSION_PROTECT() \
158 __mf_set_state (active)
160 /* ------------------------------------------------------------------------ */
161 /* Required globals. */
163 #define LOOKUP_CACHE_MASK_DFL 1023
164 #define LOOKUP_CACHE_SIZE_MAX 65536 /* Allows max CACHE_MASK 0xFFFF */
165 #define LOOKUP_CACHE_SHIFT_DFL 2
167 struct __mf_cache __mf_lookup_cache [LOOKUP_CACHE_SIZE_MAX];
168 uintptr_t __mf_lc_mask = LOOKUP_CACHE_MASK_DFL;
169 unsigned char __mf_lc_shift = LOOKUP_CACHE_SHIFT_DFL;
170 #define LOOKUP_CACHE_SIZE (__mf_lc_mask + 1)
172 struct __mf_options __mf_opts;
173 int __mf_starting_p = 1;
176 #if defined(HAVE_TLS) && !defined(USE_EMUTLS)
177 __thread enum __mf_state_enum __mf_state_1 = reentrant;
180 enum __mf_state_enum __mf_state_1 = reentrant;
184 pthread_mutex_t __mf_biglock =
185 #ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
186 PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
188 PTHREAD_MUTEX_INITIALIZER;
192 /* Use HAVE_PTHREAD_H here instead of LIBMUDFLAPTH, so that even
193 the libmudflap.la (no threading support) can diagnose whether
194 the application is linked with -lpthread. See __mf_usage() below. */
196 #ifdef _POSIX_THREADS
197 #pragma weak pthread_join
199 #define pthread_join NULL
204 /* ------------------------------------------------------------------------ */
205 /* stats-related globals. */
207 static unsigned long __mf_count_check;
208 static unsigned long __mf_lookup_cache_reusecount [LOOKUP_CACHE_SIZE_MAX];
209 static unsigned long __mf_count_register;
210 static unsigned long __mf_total_register_size [__MF_TYPE_MAX+1];
211 static unsigned long __mf_count_unregister;
212 static unsigned long __mf_total_unregister_size;
213 static unsigned long __mf_count_violation [__MF_VIOL_WATCH+1];
214 static unsigned long __mf_sigusr1_received;
215 static unsigned long __mf_sigusr1_handled;
216 /* not static */ unsigned long __mf_reentrancy;
218 /* not static */ unsigned long __mf_lock_contention;
222 /* ------------------------------------------------------------------------ */
223 /* mode-check-related globals. */
225 typedef struct __mf_object
227 uintptr_t low, high; /* __mf_register parameters */
229 char type; /* __MF_TYPE_something */
230 char watching_p; /* Trigger a VIOL_WATCH on access? */
231 unsigned read_count; /* Number of times __mf_check/read was called on this object. */
232 unsigned write_count; /* Likewise for __mf_check/write. */
233 unsigned liveness; /* A measure of recent checking activity. */
234 unsigned description_epoch; /* Last epoch __mf_describe_object printed this. */
237 struct timeval alloc_time;
238 char **alloc_backtrace;
239 size_t alloc_backtrace_size;
241 pthread_t alloc_thread;
245 uintptr_t dealloc_pc;
246 struct timeval dealloc_time;
247 char **dealloc_backtrace;
248 size_t dealloc_backtrace_size;
250 pthread_t dealloc_thread;
254 /* Live objects: splay trees, separated by type, ordered on .low (base address). */
255 /* Actually stored as static vars within lookup function below. */
257 /* Dead objects: circular arrays; _MIN_CEM .. _MAX_CEM only */
258 static unsigned __mf_object_dead_head[__MF_TYPE_MAX_CEM+1]; /* next empty spot */
259 static __mf_object_t *__mf_object_cemetary[__MF_TYPE_MAX_CEM+1][__MF_PERSIST_MAX];
262 /* ------------------------------------------------------------------------ */
263 /* Forward function declarations */
265 void __mf_init () CTOR;
266 static void __mf_sigusr1_respond ();
267 static unsigned __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
268 __mf_object_t **objs, unsigned max_objs);
269 static unsigned __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
270 __mf_object_t **objs, unsigned max_objs, int type);
271 static unsigned __mf_find_dead_objects (uintptr_t ptr_low, uintptr_t ptr_high,
272 __mf_object_t **objs, unsigned max_objs);
273 static void __mf_adapt_cache ();
274 static void __mf_describe_object (__mf_object_t *obj);
275 static unsigned __mf_watch_or_not (void *ptr, size_t sz, char flag);
276 static mfsplay_tree __mf_object_tree (int type);
277 static void __mf_link_object (__mf_object_t *node);
278 static void __mf_unlink_object (__mf_object_t *node);
281 /* ------------------------------------------------------------------------ */
282 /* Configuration engine */
285 __mf_set_default_options ()
287 memset (& __mf_opts, 0, sizeof (__mf_opts));
289 __mf_opts.adapt_cache = 1000003;
290 __mf_opts.abbreviate = 1;
291 __mf_opts.verbose_violations = 1;
292 __mf_opts.free_queue_length = 4;
293 __mf_opts.persistent_count = 100;
294 __mf_opts.crumple_zone = 32;
295 __mf_opts.backtrace = 4;
296 __mf_opts.timestamps = 1;
297 __mf_opts.mudflap_mode = mode_check;
298 __mf_opts.violation_mode = viol_nop;
299 #ifdef HAVE___LIBC_FREERES
300 __mf_opts.call_libc_freeres = 1;
302 __mf_opts.heur_std_data = 1;
304 __mf_opts.thread_stack = 0;
308 static struct mudoption
323 "mudflaps do nothing",
324 set_option, (unsigned)mode_nop, (unsigned *)&__mf_opts.mudflap_mode},
326 "mudflaps populate object tree",
327 set_option, (unsigned)mode_populate, (unsigned *)&__mf_opts.mudflap_mode},
329 "mudflaps check for memory violations",
330 set_option, (unsigned)mode_check, (unsigned *)&__mf_opts.mudflap_mode},
332 "mudflaps always cause violations (diagnostic)",
333 set_option, (unsigned)mode_violate, (unsigned *)&__mf_opts.mudflap_mode},
336 "violations do not change program execution",
337 set_option, (unsigned)viol_nop, (unsigned *)&__mf_opts.violation_mode},
339 "violations cause a call to abort()",
340 set_option, (unsigned)viol_abort, (unsigned *)&__mf_opts.violation_mode},
342 "violations are promoted to SIGSEGV signals",
343 set_option, (unsigned)viol_segv, (unsigned *)&__mf_opts.violation_mode},
345 "violations fork a gdb process attached to current program",
346 set_option, (unsigned)viol_gdb, (unsigned *)&__mf_opts.violation_mode},
348 "trace calls to mudflap runtime library",
349 set_option, 1, &__mf_opts.trace_mf_calls},
351 "trace internal events within mudflap runtime library",
352 set_option, 1, &__mf_opts.verbose_trace},
354 "collect statistics on mudflap's operation",
355 set_option, 1, &__mf_opts.collect_stats},
358 "print report upon SIGUSR1",
359 set_option, 1, &__mf_opts.sigusr1_report},
361 {"internal-checking",
362 "perform more expensive internal checking",
363 set_option, 1, &__mf_opts.internal_checking},
365 "print any memory leaks at program shutdown",
366 set_option, 1, &__mf_opts.print_leaks},
367 #ifdef HAVE___LIBC_FREERES
369 "call glibc __libc_freeres at shutdown for better leak data",
370 set_option, 1, &__mf_opts.call_libc_freeres},
372 {"check-initialization",
373 "detect uninitialized object reads",
374 set_option, 1, &__mf_opts.check_initialization},
375 {"verbose-violations",
376 "print verbose messages when memory violations occur",
377 set_option, 1, &__mf_opts.verbose_violations},
379 "abbreviate repetitive listings",
380 set_option, 1, &__mf_opts.abbreviate},
382 "track object lifetime timestamps",
383 set_option, 1, &__mf_opts.timestamps},
385 "ignore read accesses - assume okay",
386 set_option, 1, &__mf_opts.ignore_reads},
388 "wipe stack objects at unwind",
389 set_option, 1, &__mf_opts.wipe_stack},
391 "wipe heap objects at free",
392 set_option, 1, &__mf_opts.wipe_heap},
394 "support /proc/self/map heuristics",
395 set_option, 1, &__mf_opts.heur_proc_map},
397 "enable a simple upper stack bound heuristic",
398 set_option, 1, &__mf_opts.heur_stack_bound},
400 "support _start.._end heuristics",
401 set_option, 1, &__mf_opts.heur_start_end},
403 "register standard library data (argv, errno, stdin, ...)",
404 set_option, 1, &__mf_opts.heur_std_data},
405 {"free-queue-length",
406 "queue N deferred free() calls before performing them",
407 read_integer_option, 0, &__mf_opts.free_queue_length},
409 "keep a history of N unregistered regions",
410 read_integer_option, 0, &__mf_opts.persistent_count},
412 "surround allocations with crumple zones of N bytes",
413 read_integer_option, 0, &__mf_opts.crumple_zone},
414 /* XXX: not type-safe.
416 "set lookup cache size mask to N (2**M - 1)",
417 read_integer_option, 0, (int *)(&__mf_lc_mask)},
419 "set lookup cache pointer shift",
420 read_integer_option, 0, (int *)(&__mf_lc_shift)},
423 "adapt mask/shift parameters after N cache misses",
424 read_integer_option, 1, &__mf_opts.adapt_cache},
426 "keep an N-level stack trace of each call context",
427 read_integer_option, 0, &__mf_opts.backtrace},
430 "override thread stacks allocation: N kB",
431 read_integer_option, 0, &__mf_opts.thread_stack},
433 {0, 0, set_option, 0, NULL}
439 struct mudoption *opt;
442 "This is a %s%sGCC \"mudflap\" memory-checked binary.\n"
443 "Mudflap is Copyright (C) 2002-2009 Free Software Foundation, Inc.\n"
445 "The mudflap code can be controlled by an environment variable:\n"
447 "$ export MUDFLAP_OPTIONS='<options>'\n"
448 "$ <mudflapped_program>\n"
450 "where <options> is a space-separated list of \n"
451 "any of the following options. Use `-no-OPTION' to disable options.\n"
454 (pthread_join ? "multi-threaded " : "single-threaded "),
464 /* XXX: The multi-threaded thread-unaware combination is bad. */
466 for (opt = options; opt->name; opt++)
468 int default_p = (opt->value == * opt->target);
474 fprintf (stderr, "-%-23.23s %s", opt->name, opt->description);
476 fprintf (stderr, " [active]\n");
478 fprintf (stderr, "\n");
480 case read_integer_option:
481 strncpy (buf, opt->name, 128);
482 strncpy (buf + strlen (opt->name), "=N", 2);
483 fprintf (stderr, "-%-23.23s %s", buf, opt->description);
484 fprintf (stderr, " [%d]\n", * opt->target);
490 fprintf (stderr, "\n");
495 __mf_set_options (const char *optstr)
499 BEGIN_RECURSION_PROTECT ();
500 rc = __mfu_set_options (optstr);
501 /* XXX: It's not really that easy. A change to a bunch of parameters
502 can require updating auxiliary state or risk crashing:
503 free_queue_length, crumple_zone ... */
504 END_RECURSION_PROTECT ();
511 __mfu_set_options (const char *optstr)
513 struct mudoption *opts = 0;
517 const char *saved_optstr = optstr;
519 /* XXX: bounds-check for optstr! */
536 if (*optstr == '?' ||
537 strncmp (optstr, "help", 4) == 0)
539 /* Caller will print help and exit. */
543 if (strncmp (optstr, "no-", 3) == 0)
546 optstr = & optstr[3];
549 for (opts = options; opts->name; opts++)
551 if (strncmp (optstr, opts->name, strlen (opts->name)) == 0)
553 optstr += strlen (opts->name);
554 assert (opts->target);
561 *(opts->target) = opts->value;
563 case read_integer_option:
564 if (! negate && (*optstr == '=' && *(optstr+1)))
567 tmp = strtol (optstr, &nxt, 10);
568 if ((optstr != nxt) && (tmp != LONG_MAX))
571 *(opts->target) = (int)tmp;
585 "warning: unrecognized string '%s' in mudflap options\n",
587 optstr += strlen (optstr);
593 /* Special post-processing: bound __mf_lc_mask and free_queue_length for security. */
594 __mf_lc_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
595 __mf_opts.free_queue_length &= (__MF_FREEQ_MAX - 1);
597 /* Clear the lookup cache, in case the parameters got changed. */
599 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
601 __mf_lookup_cache[0].low = MAXPTR;
603 TRACE ("set options from `%s'\n", saved_optstr);
605 /* Call this unconditionally, in case -sigusr1-report was toggled. */
606 __mf_sigusr1_respond ();
615 __mf_resolve_single_dynamic (struct __mf_dynamic_entry *e)
620 if (e->pointer) return;
623 if (e->version != NULL && e->version[0] != '\0') /* non-null/empty */
624 e->pointer = dlvsym (RTLD_NEXT, e->name, e->version);
627 e->pointer = dlsym (RTLD_NEXT, e->name);
633 fprintf (stderr, "mf: error in dlsym(\"%s\"): %s\n",
639 fprintf (stderr, "mf: dlsym(\"%s\") = NULL\n", e->name);
646 __mf_resolve_dynamics ()
649 for (i = 0; i < dyn_INITRESOLVE; i++)
650 __mf_resolve_single_dynamic (& __mf_dynamic[i]);
654 /* NB: order must match enums in mf-impl.h */
655 struct __mf_dynamic_entry __mf_dynamic [] =
657 {NULL, "calloc", NULL},
658 {NULL, "free", NULL},
659 {NULL, "malloc", NULL},
660 {NULL, "mmap", NULL},
661 {NULL, "munmap", NULL},
662 {NULL, "realloc", NULL},
663 {NULL, "DUMMY", NULL}, /* dyn_INITRESOLVE */
665 {NULL, "pthread_create", PTHREAD_CREATE_VERSION},
666 {NULL, "pthread_join", NULL},
667 {NULL, "pthread_exit", NULL}
675 /* ------------------------------------------------------------------------ */
677 /* Lookup & manage automatic initialization of the five or so splay trees. */
679 __mf_object_tree (int type)
681 static mfsplay_tree trees [__MF_TYPE_MAX+1];
682 assert (type >= 0 && type <= __MF_TYPE_MAX);
683 if (UNLIKELY (trees[type] == NULL))
684 trees[type] = mfsplay_tree_new ();
694 /* Return if initialization has already been done. */
695 if (LIKELY (__mf_starting_p == 0))
698 #if defined(__FreeBSD__) && defined(LIBMUDFLAPTH)
702 #endif /* Prime mutex which calls calloc upon first lock to avoid deadlock. */
704 /* This initial bootstrap phase requires that __mf_starting_p = 1. */
706 __mf_resolve_dynamics ();
710 __mf_set_state (active);
712 __mf_set_default_options ();
714 ov = getenv ("MUDFLAP_OPTIONS");
717 int rc = __mfu_set_options (ov);
725 /* Initialize to a non-zero description epoch. */
726 __mf_describe_object (NULL);
728 #define REG_RESERVED(obj) \
729 __mf_register (& obj, sizeof(obj), __MF_TYPE_NOACCESS, # obj)
731 REG_RESERVED (__mf_lookup_cache);
732 REG_RESERVED (__mf_lc_mask);
733 REG_RESERVED (__mf_lc_shift);
734 /* XXX: others of our statics? */
736 /* Prevent access to *NULL. */
737 __mf_register (MINPTR, 1, __MF_TYPE_NOACCESS, "NULL");
738 __mf_lookup_cache[0].low = (uintptr_t) -1;
744 __wrap_main (int argc, char* argv[])
746 extern char **environ;
748 extern int __real_main ();
749 static int been_here = 0;
751 if (__mf_opts.heur_std_data && ! been_here)
756 __mf_register (argv, sizeof(char *)*(argc+1), __MF_TYPE_STATIC, "argv[]");
757 for (i=0; i<argc; i++)
759 unsigned j = strlen (argv[i]);
760 __mf_register (argv[i], j+1, __MF_TYPE_STATIC, "argv element");
765 char *e = environ[i];
767 if (e == NULL) break;
768 j = strlen (environ[i]);
769 __mf_register (environ[i], j+1, __MF_TYPE_STATIC, "environ element");
771 __mf_register (environ, sizeof(char *)*(i+1), __MF_TYPE_STATIC, "environ[]");
773 __mf_register (& errno, sizeof (errno), __MF_TYPE_STATIC, "errno area");
775 __mf_register (stdin, sizeof (*stdin), __MF_TYPE_STATIC, "stdin");
776 __mf_register (stdout, sizeof (*stdout), __MF_TYPE_STATIC, "stdout");
777 __mf_register (stderr, sizeof (*stderr), __MF_TYPE_STATIC, "stderr");
779 /* Make some effort to register ctype.h static arrays. */
780 /* XXX: e.g., on Solaris, may need to register __ctype, _ctype, __ctype_mask, __toupper, etc. */
781 /* On modern Linux GLIBC, these are thread-specific and changeable, and are dealt
782 with in mf-hooks2.c. */
786 return main (argc, argv, environ);
788 return __real_main (argc, argv, environ);
794 extern void __mf_fini () DTOR;
797 TRACE ("__mf_fini\n");
801 /* Since we didn't populate the tree for allocations in constructors
802 before __mf_init, we cannot check destructors after __mf_fini. */
803 __mf_opts.mudflap_mode = mode_nop;
809 /* ------------------------------------------------------------------------ */
812 void __mf_check (void *ptr, size_t sz, int type, const char *location)
815 BEGIN_RECURSION_PROTECT ();
816 __mfu_check (ptr, sz, type, location);
817 END_RECURSION_PROTECT ();
822 void __mfu_check (void *ptr, size_t sz, int type, const char *location)
824 unsigned entry_idx = __MF_CACHE_INDEX (ptr);
825 struct __mf_cache *entry = & __mf_lookup_cache [entry_idx];
826 int judgement = 0; /* 0=undecided; <0=violation; >0=okay */
827 uintptr_t ptr_low = (uintptr_t) ptr;
828 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
829 struct __mf_cache old_entry = *entry;
831 if (UNLIKELY (__mf_opts.sigusr1_report))
832 __mf_sigusr1_respond ();
833 if (UNLIKELY (__mf_opts.ignore_reads && type == 0))
836 TRACE ("check ptr=%p b=%u size=%lu %s location=`%s'\n",
837 ptr, entry_idx, (unsigned long)sz,
838 (type == 0 ? "read" : "write"), location);
840 switch (__mf_opts.mudflap_mode)
843 /* It is tempting to poison the cache here similarly to
844 mode_populate. However that eliminates a valuable
845 distinction between these two modes. mode_nop is useful to
846 let a user count & trace every single check / registration
847 call. mode_populate is useful to let a program run fast
854 entry->low = ptr_low;
855 entry->high = ptr_high;
861 unsigned heuristics = 0;
863 /* Advance aging/adaptation counters. */
864 static unsigned adapt_count;
866 if (UNLIKELY (__mf_opts.adapt_cache > 0 &&
867 adapt_count > __mf_opts.adapt_cache))
873 /* Looping only occurs if heuristics were triggered. */
874 while (judgement == 0)
876 DECLARE (void, free, void *p);
877 __mf_object_t* ovr_obj[1];
879 __mf_object_t** all_ovr_obj = NULL;
880 __mf_object_t** dealloc_me = NULL;
883 /* Find all overlapping objects. Be optimistic that there is just one. */
884 obj_count = __mf_find_objects (ptr_low, ptr_high, ovr_obj, 1);
885 if (UNLIKELY (obj_count > 1))
887 /* Allocate a real buffer and do the search again. */
888 DECLARE (void *, malloc, size_t c);
890 all_ovr_obj = CALL_REAL (malloc, (sizeof (__mf_object_t *) *
892 if (all_ovr_obj == NULL) abort ();
893 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_obj, obj_count);
894 assert (n == obj_count);
895 dealloc_me = all_ovr_obj;
899 all_ovr_obj = ovr_obj;
903 /* Update object statistics. */
904 for (i = 0; i < obj_count; i++)
906 __mf_object_t *obj = all_ovr_obj[i];
907 assert (obj != NULL);
908 if (type == __MF_CHECK_READ)
915 /* Iterate over the various objects. There are a number of special cases. */
916 for (i = 0; i < obj_count; i++)
918 __mf_object_t *obj = all_ovr_obj[i];
920 /* Any __MF_TYPE_NOACCESS hit is bad. */
921 if (UNLIKELY (obj->type == __MF_TYPE_NOACCESS))
924 /* Any object with a watch flag is bad. */
925 if (UNLIKELY (obj->watching_p))
926 judgement = -2; /* trigger VIOL_WATCH */
928 /* A read from an uninitialized object is bad. */
929 if (UNLIKELY (__mf_opts.check_initialization
931 && type == __MF_CHECK_READ
933 && obj->write_count == 0
934 /* uninitialized (heap) */
935 && obj->type == __MF_TYPE_HEAP))
939 /* We now know that the access spans no invalid objects. */
940 if (LIKELY (judgement >= 0))
941 for (i = 0; i < obj_count; i++)
943 __mf_object_t *obj = all_ovr_obj[i];
945 /* Is this access entirely contained within this object? */
946 if (LIKELY (ptr_low >= obj->low && ptr_high <= obj->high))
949 entry->low = obj->low;
950 entry->high = obj->high;
955 /* This access runs off the end of one valid object. That
956 could be okay, if other valid objects fill in all the
957 holes. We allow this only for HEAP and GUESS type
958 objects. Accesses to STATIC and STACK variables
959 should not be allowed to span. */
960 if (UNLIKELY ((judgement == 0) && (obj_count > 1)))
962 unsigned uncovered = 0;
963 for (i = 0; i < obj_count; i++)
965 __mf_object_t *obj = all_ovr_obj[i];
966 int j, uncovered_low_p, uncovered_high_p;
967 uintptr_t ptr_lower, ptr_higher;
969 uncovered_low_p = ptr_low < obj->low;
970 ptr_lower = CLAMPSUB (obj->low, 1);
971 uncovered_high_p = ptr_high > obj->high;
972 ptr_higher = CLAMPADD (obj->high, 1);
974 for (j = 0; j < obj_count; j++)
976 __mf_object_t *obj2 = all_ovr_obj[j];
978 if (i == j) continue;
980 /* Filter out objects that cannot be spanned across. */
981 if (obj2->type == __MF_TYPE_STACK
982 || obj2->type == __MF_TYPE_STATIC)
985 /* Consider a side "covered" if obj2 includes
986 the next byte on that side. */
988 && (ptr_lower >= obj2->low && ptr_lower <= obj2->high))
991 && (ptr_high >= obj2->low && ptr_higher <= obj2->high))
992 uncovered_high_p = 0;
995 if (uncovered_low_p || uncovered_high_p)
999 /* Success if no overlapping objects are uncovered. */
1005 if (dealloc_me != NULL)
1006 CALL_REAL (free, dealloc_me);
1008 /* If the judgment is still unknown at this stage, loop
1009 around at most one more time. */
1012 if (heuristics++ < 2) /* XXX parametrize this number? */
1013 judgement = __mf_heuristic_check (ptr_low, ptr_high);
1027 if (__mf_opts.collect_stats)
1029 __mf_count_check ++;
1031 if (LIKELY (old_entry.low != entry->low || old_entry.high != entry->high))
1032 /* && (old_entry.low != 0) && (old_entry.high != 0)) */
1033 __mf_lookup_cache_reusecount [entry_idx] ++;
1036 if (UNLIKELY (judgement < 0))
1037 __mf_violation (ptr, sz,
1038 (uintptr_t) __builtin_return_address (0), location,
1039 ((judgement == -1) ?
1040 (type == __MF_CHECK_READ ? __MF_VIOL_READ : __MF_VIOL_WRITE) :
1045 static __mf_object_t *
1046 __mf_insert_new_object (uintptr_t low, uintptr_t high, int type,
1047 const char *name, uintptr_t pc)
1049 DECLARE (void *, calloc, size_t c, size_t n);
1051 __mf_object_t *new_obj;
1052 new_obj = CALL_REAL (calloc, 1, sizeof(__mf_object_t));
1054 new_obj->high = high;
1055 new_obj->type = type;
1056 new_obj->name = name;
1057 new_obj->alloc_pc = pc;
1058 #if HAVE_GETTIMEOFDAY
1059 if (__mf_opts.timestamps)
1060 gettimeofday (& new_obj->alloc_time, NULL);
1063 new_obj->alloc_thread = pthread_self ();
1066 if (__mf_opts.backtrace > 0 && (type == __MF_TYPE_HEAP || type == __MF_TYPE_HEAP_I))
1067 new_obj->alloc_backtrace_size =
1068 __mf_backtrace (& new_obj->alloc_backtrace,
1071 __mf_link_object (new_obj);
1077 __mf_uncache_object (__mf_object_t *old_obj)
1079 /* Remove any low/high pointers for this object from the lookup cache. */
1081 /* Can it possibly exist in the cache? */
1082 if (LIKELY (old_obj->read_count + old_obj->write_count))
1084 uintptr_t low = old_obj->low;
1085 uintptr_t high = old_obj->high;
1086 struct __mf_cache *entry;
1088 if ((high - low) >= (__mf_lc_mask << __mf_lc_shift))
1090 /* For large objects (>= cache size - 1) check the whole cache. */
1091 entry = & __mf_lookup_cache [0];
1092 for (i = 0; i <= __mf_lc_mask; i++, entry++)
1094 /* NB: the "||" in the following test permits this code to
1095 tolerate the situation introduced by __mf_check over
1096 contiguous objects, where a cache entry spans several
1098 if (entry->low == low || entry->high == high)
1100 entry->low = MAXPTR;
1101 entry->high = MINPTR;
1107 /* Object is now smaller then cache size. */
1108 unsigned entry_low_idx = __MF_CACHE_INDEX (low);
1109 unsigned entry_high_idx = __MF_CACHE_INDEX (high);
1110 if (entry_low_idx <= entry_high_idx)
1112 entry = & __mf_lookup_cache [entry_low_idx];
1113 for (i = entry_low_idx; i <= entry_high_idx; i++, entry++)
1115 /* NB: the "||" in the following test permits this code to
1116 tolerate the situation introduced by __mf_check over
1117 contiguous objects, where a cache entry spans several
1119 if (entry->low == low || entry->high == high)
1121 entry->low = MAXPTR;
1122 entry->high = MINPTR;
1128 /* Object wrapped around the end of the cache. First search
1129 from low to end of cache and then from 0 to high. */
1130 entry = & __mf_lookup_cache [entry_low_idx];
1131 for (i = entry_low_idx; i <= __mf_lc_mask; i++, entry++)
1133 /* NB: the "||" in the following test permits this code to
1134 tolerate the situation introduced by __mf_check over
1135 contiguous objects, where a cache entry spans several
1137 if (entry->low == low || entry->high == high)
1139 entry->low = MAXPTR;
1140 entry->high = MINPTR;
1143 entry = & __mf_lookup_cache [0];
1144 for (i = 0; i <= entry_high_idx; i++, entry++)
1146 /* NB: the "||" in the following test permits this code to
1147 tolerate the situation introduced by __mf_check over
1148 contiguous objects, where a cache entry spans several
1150 if (entry->low == low || entry->high == high)
1152 entry->low = MAXPTR;
1153 entry->high = MINPTR;
1163 __mf_register (void *ptr, size_t sz, int type, const char *name)
1166 BEGIN_RECURSION_PROTECT ();
1167 __mfu_register (ptr, sz, type, name);
1168 END_RECURSION_PROTECT ();
1174 __mfu_register (void *ptr, size_t sz, int type, const char *name)
1176 TRACE ("register ptr=%p size=%lu type=%x name='%s'\n",
1177 ptr, (unsigned long) sz, type, name ? name : "");
1179 if (__mf_opts.collect_stats)
1181 __mf_count_register ++;
1182 __mf_total_register_size [(type < 0) ? 0 :
1183 (type > __MF_TYPE_MAX) ? 0 :
1187 if (UNLIKELY (__mf_opts.sigusr1_report))
1188 __mf_sigusr1_respond ();
1190 switch (__mf_opts.mudflap_mode)
1196 __mf_violation (ptr, sz, (uintptr_t) __builtin_return_address (0), NULL,
1197 __MF_VIOL_REGISTER);
1201 /* Clear the cache. */
1202 /* XXX: why the entire cache? */
1204 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1206 __mf_lookup_cache[0].low = MAXPTR;
1211 __mf_object_t *ovr_objs [1];
1212 unsigned num_overlapping_objs;
1213 uintptr_t low = (uintptr_t) ptr;
1214 uintptr_t high = CLAMPSZ (ptr, sz);
1215 uintptr_t pc = (uintptr_t) __builtin_return_address (0);
1217 /* Treat unknown size indication as 1. */
1218 if (UNLIKELY (sz == 0)) sz = 1;
1220 /* Look for objects only of the same type. This will e.g. permit a registration
1221 of a STATIC overlapping with a GUESS, and a HEAP with a NOACCESS. At
1222 __mf_check time however harmful overlaps will be detected. */
1223 num_overlapping_objs = __mf_find_objects2 (low, high, ovr_objs, 1, type);
1225 /* Handle overlaps. */
1226 if (UNLIKELY (num_overlapping_objs > 0))
1228 __mf_object_t *ovr_obj = ovr_objs[0];
1230 /* Accept certain specific duplication pairs. */
1231 if (((type == __MF_TYPE_STATIC) || (type == __MF_TYPE_GUESS))
1232 && ovr_obj->low == low
1233 && ovr_obj->high == high
1234 && ovr_obj->type == type)
1236 /* Duplicate registration for static objects may come
1237 from distinct compilation units. */
1238 VERBOSE_TRACE ("harmless duplicate reg %p-%p `%s'\n",
1239 (void *) low, (void *) high,
1240 (ovr_obj->name ? ovr_obj->name : ""));
1244 /* Alas, a genuine violation. */
1247 /* Two or more *real* mappings here. */
1248 __mf_violation ((void *) ptr, sz,
1249 (uintptr_t) __builtin_return_address (0), NULL,
1250 __MF_VIOL_REGISTER);
1253 else /* No overlapping objects: AOK. */
1254 __mf_insert_new_object (low, high, type, name, pc);
1256 /* We could conceivably call __mf_check() here to prime the cache,
1257 but then the read_count/write_count field is not reliable. */
1260 } /* end switch (__mf_opts.mudflap_mode) */
1265 __mf_unregister (void *ptr, size_t sz, int type)
1268 BEGIN_RECURSION_PROTECT ();
1269 __mfu_unregister (ptr, sz, type);
1270 END_RECURSION_PROTECT ();
1276 __mfu_unregister (void *ptr, size_t sz, int type)
1278 DECLARE (void, free, void *ptr);
1280 if (UNLIKELY (__mf_opts.sigusr1_report))
1281 __mf_sigusr1_respond ();
1283 TRACE ("unregister ptr=%p size=%lu type=%x\n", ptr, (unsigned long) sz, type);
1285 switch (__mf_opts.mudflap_mode)
1291 __mf_violation (ptr, sz,
1292 (uintptr_t) __builtin_return_address (0), NULL,
1293 __MF_VIOL_UNREGISTER);
1297 /* Clear the cache. */
1299 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1301 __mf_lookup_cache[0].low = MAXPTR;
1306 __mf_object_t *old_obj = NULL;
1307 __mf_object_t *del_obj = NULL; /* Object to actually delete. */
1308 __mf_object_t *objs[1] = {NULL};
1309 unsigned num_overlapping_objs;
1311 num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1312 CLAMPSZ (ptr, sz), objs, 1, type);
1314 /* Special case for HEAP_I - see free & realloc hook. They don't
1315 know whether the input region was HEAP or HEAP_I before
1316 unmapping it. Here we give HEAP a try in case HEAP_I
1318 if ((type == __MF_TYPE_HEAP_I) && (num_overlapping_objs == 0))
1320 num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1321 CLAMPSZ (ptr, sz), objs, 1, __MF_TYPE_HEAP);
1325 if (UNLIKELY ((num_overlapping_objs != 1) /* more than one overlap */
1326 || ((sz == 0) ? 0 : (sz != (old_obj->high - old_obj->low + 1))) /* size mismatch */
1327 || ((uintptr_t) ptr != old_obj->low))) /* base mismatch */
1329 __mf_violation (ptr, sz,
1330 (uintptr_t) __builtin_return_address (0), NULL,
1331 __MF_VIOL_UNREGISTER);
1335 __mf_unlink_object (old_obj);
1336 __mf_uncache_object (old_obj);
1338 /* Wipe buffer contents if desired. */
1339 if ((__mf_opts.wipe_stack && old_obj->type == __MF_TYPE_STACK)
1340 || (__mf_opts.wipe_heap && (old_obj->type == __MF_TYPE_HEAP
1341 || old_obj->type == __MF_TYPE_HEAP_I)))
1343 memset ((void *) old_obj->low,
1345 (size_t) (old_obj->high - old_obj->low + 1));
1348 /* Manage the object cemetary. */
1349 if (__mf_opts.persistent_count > 0
1350 && (unsigned) old_obj->type <= __MF_TYPE_MAX_CEM)
1352 old_obj->deallocated_p = 1;
1353 old_obj->dealloc_pc = (uintptr_t) __builtin_return_address (0);
1354 #if HAVE_GETTIMEOFDAY
1355 if (__mf_opts.timestamps)
1356 gettimeofday (& old_obj->dealloc_time, NULL);
1359 old_obj->dealloc_thread = pthread_self ();
1362 if (__mf_opts.backtrace > 0 && old_obj->type == __MF_TYPE_HEAP)
1363 old_obj->dealloc_backtrace_size =
1364 __mf_backtrace (& old_obj->dealloc_backtrace,
1367 /* Encourage this object to be displayed again in current epoch. */
1368 old_obj->description_epoch --;
1370 /* Put this object into the cemetary. This may require this plot to
1371 be recycled, and the previous resident to be designated del_obj. */
1373 unsigned row = old_obj->type;
1374 unsigned plot = __mf_object_dead_head [row];
1376 del_obj = __mf_object_cemetary [row][plot];
1377 __mf_object_cemetary [row][plot] = old_obj;
1379 if (plot == __mf_opts.persistent_count) plot = 0;
1380 __mf_object_dead_head [row] = plot;
1386 if (__mf_opts.print_leaks)
1388 if ((old_obj->read_count + old_obj->write_count) == 0 &&
1389 (old_obj->type == __MF_TYPE_HEAP
1390 || old_obj->type == __MF_TYPE_HEAP_I))
1392 /* The problem with a warning message here is that we may not
1393 be privy to accesses to such objects that occur within
1394 uninstrumented libraries. */
1398 "mudflap warning: unaccessed registered object:\n");
1399 __mf_describe_object (old_obj);
1404 if (del_obj != NULL) /* May or may not equal old_obj. */
1406 if (__mf_opts.backtrace > 0)
1408 CALL_REAL(free, del_obj->alloc_backtrace);
1409 if (__mf_opts.persistent_count > 0)
1411 CALL_REAL(free, del_obj->dealloc_backtrace);
1414 CALL_REAL(free, del_obj);
1419 } /* end switch (__mf_opts.mudflap_mode) */
1422 if (__mf_opts.collect_stats)
1424 __mf_count_unregister ++;
1425 __mf_total_unregister_size += sz;
1434 unsigned long total_size;
1435 unsigned live_obj_count;
1436 double total_weight;
1437 double weighted_size;
1438 unsigned long weighted_address_bits [sizeof (uintptr_t) * 8][2];
1444 __mf_adapt_cache_fn (mfsplay_tree_node n, void *param)
1446 __mf_object_t *obj = (__mf_object_t *) n->value;
1447 struct tree_stats *s = (struct tree_stats *) param;
1449 assert (obj != NULL && s != NULL);
1451 /* Exclude never-accessed objects. */
1452 if (obj->read_count + obj->write_count)
1455 s->total_size += (obj->high - obj->low + 1);
1462 /* VERBOSE_TRACE ("analyze low=%p live=%u name=`%s'\n",
1463 (void *) obj->low, obj->liveness, obj->name); */
1465 s->live_obj_count ++;
1466 s->total_weight += (double) obj->liveness;
1468 (double) (obj->high - obj->low + 1) *
1469 (double) obj->liveness;
1472 for (i=0; i<sizeof(uintptr_t) * 8; i++)
1474 unsigned bit = addr & 1;
1475 s->weighted_address_bits[i][bit] += obj->liveness;
1479 /* Age the liveness value. */
1480 obj->liveness >>= 1;
1491 struct tree_stats s;
1492 uintptr_t new_mask = 0;
1493 unsigned char new_shift;
1494 float cache_utilization;
1496 static float smoothed_new_shift = -1.0;
1499 memset (&s, 0, sizeof (s));
1501 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP), __mf_adapt_cache_fn, (void *) & s);
1502 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I), __mf_adapt_cache_fn, (void *) & s);
1503 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STACK), __mf_adapt_cache_fn, (void *) & s);
1504 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STATIC), __mf_adapt_cache_fn, (void *) & s);
1505 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_GUESS), __mf_adapt_cache_fn, (void *) & s);
1507 /* Maybe we're dealing with funny aging/adaptation parameters, or an
1508 empty tree. Just leave the cache alone in such cases, rather
1509 than risk dying by division-by-zero. */
1510 if (! (s.obj_count > 0) && (s.live_obj_count > 0) && (s.total_weight > 0.0))
1513 /* Guess a good value for the shift parameter by finding an address bit that is a
1514 good discriminant of lively objects. */
1516 for (i=0; i<sizeof (uintptr_t)*8; i++)
1518 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1519 if (max_value < value) max_value = value;
1521 for (i=0; i<sizeof (uintptr_t)*8; i++)
1523 float shoulder_factor = 0.7; /* Include slightly less popular bits too. */
1524 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1525 if (value >= max_value * shoulder_factor)
1528 if (smoothed_new_shift < 0) smoothed_new_shift = __mf_lc_shift;
1529 /* Converge toward this slowly to reduce flapping. */
1530 smoothed_new_shift = 0.9*smoothed_new_shift + 0.1*i;
1531 new_shift = (unsigned) (smoothed_new_shift + 0.5);
1532 assert (new_shift < sizeof (uintptr_t)*8);
1534 /* Count number of used buckets. */
1535 cache_utilization = 0.0;
1536 for (i = 0; i < (1 + __mf_lc_mask); i++)
1537 if (__mf_lookup_cache[i].low != 0 || __mf_lookup_cache[i].high != 0)
1538 cache_utilization += 1.0;
1539 cache_utilization /= (1 + __mf_lc_mask);
1541 new_mask |= 0xffff; /* XXX: force a large cache. */
1542 new_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
1544 VERBOSE_TRACE ("adapt cache obj=%u/%u sizes=%lu/%.0f/%.0f => "
1545 "util=%u%% m=%p s=%u\n",
1546 s.obj_count, s.live_obj_count, s.total_size, s.total_weight, s.weighted_size,
1547 (unsigned)(cache_utilization*100.0), (void *) new_mask, new_shift);
1549 /* We should reinitialize cache if its parameters have changed. */
1550 if (new_mask != __mf_lc_mask ||
1551 new_shift != __mf_lc_shift)
1553 __mf_lc_mask = new_mask;
1554 __mf_lc_shift = new_shift;
1556 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1558 __mf_lookup_cache[0].low = MAXPTR;
1564 /* __mf_find_object[s] */
1566 /* Find overlapping live objecs between [low,high]. Return up to
1567 max_objs of their pointers in objs[]. Return total count of
1568 overlaps (may exceed max_objs). */
1571 __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
1572 __mf_object_t **objs, unsigned max_objs, int type)
1575 mfsplay_tree t = __mf_object_tree (type);
1576 mfsplay_tree_key k = (mfsplay_tree_key) ptr_low;
1579 mfsplay_tree_node n = mfsplay_tree_lookup (t, k);
1580 /* An exact match for base address implies a hit. */
1583 if (count < max_objs)
1584 objs[count] = (__mf_object_t *) n->value;
1588 /* Iterate left then right near this key value to find all overlapping objects. */
1589 for (direction = 0; direction < 2; direction ++)
1591 /* Reset search origin. */
1592 k = (mfsplay_tree_key) ptr_low;
1598 n = (direction == 0 ? mfsplay_tree_successor (t, k) : mfsplay_tree_predecessor (t, k));
1599 if (n == NULL) break;
1600 obj = (__mf_object_t *) n->value;
1602 if (! (obj->low <= ptr_high && obj->high >= ptr_low)) /* No overlap? */
1605 if (count < max_objs)
1606 objs[count] = (__mf_object_t *) n->value;
1609 k = (mfsplay_tree_key) obj->low;
1618 __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
1619 __mf_object_t **objs, unsigned max_objs)
1624 /* Search each splay tree for overlaps. */
1625 for (type = __MF_TYPE_NOACCESS; type <= __MF_TYPE_GUESS; type++)
1627 unsigned c = __mf_find_objects2 (ptr_low, ptr_high, objs, max_objs, type);
1633 else /* NB: C may equal 0 */
1646 /* __mf_link_object */
1649 __mf_link_object (__mf_object_t *node)
1651 mfsplay_tree t = __mf_object_tree (node->type);
1652 mfsplay_tree_insert (t, (mfsplay_tree_key) node->low, (mfsplay_tree_value) node);
1655 /* __mf_unlink_object */
1658 __mf_unlink_object (__mf_object_t *node)
1660 mfsplay_tree t = __mf_object_tree (node->type);
1661 mfsplay_tree_remove (t, (mfsplay_tree_key) node->low);
1664 /* __mf_find_dead_objects */
1666 /* Find overlapping dead objecs between [low,high]. Return up to
1667 max_objs of their pointers in objs[]. Return total count of
1668 overlaps (may exceed max_objs). */
1671 __mf_find_dead_objects (uintptr_t low, uintptr_t high,
1672 __mf_object_t **objs, unsigned max_objs)
1674 if (__mf_opts.persistent_count > 0)
1677 unsigned recollection = 0;
1680 assert (low <= high);
1681 assert (max_objs == 0 || objs != NULL);
1683 /* Widen the search from the most recent plots in each row, looking
1684 backward in time. */
1686 while (recollection < __mf_opts.persistent_count)
1690 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1695 plot = __mf_object_dead_head [row];
1696 for (i = 0; i <= recollection; i ++)
1700 /* Look backward through row: it's a circular buffer. */
1701 if (plot > 0) plot --;
1702 else plot = __mf_opts.persistent_count - 1;
1704 obj = __mf_object_cemetary [row][plot];
1705 if (obj && obj->low <= high && obj->high >= low)
1707 /* Found an overlapping dead object! */
1708 if (count < max_objs)
1718 /* Look farther back in time. */
1719 recollection = (recollection * 2) + 1;
1728 /* __mf_describe_object */
1731 __mf_describe_object (__mf_object_t *obj)
1733 static unsigned epoch = 0;
1740 if (__mf_opts.abbreviate && obj->description_epoch == epoch)
1743 "mudflap %sobject %p: name=`%s'\n",
1744 (obj->deallocated_p ? "dead " : ""),
1745 (void *) obj, (obj->name ? obj->name : ""));
1749 obj->description_epoch = epoch;
1752 "mudflap %sobject %p: name=`%s'\n"
1753 "bounds=[%p,%p] size=%lu area=%s check=%ur/%uw liveness=%u%s\n"
1754 "alloc time=%lu.%06lu pc=%p"
1759 (obj->deallocated_p ? "dead " : ""),
1760 (void *) obj, (obj->name ? obj->name : ""),
1761 (void *) obj->low, (void *) obj->high,
1762 (unsigned long) (obj->high - obj->low + 1),
1763 (obj->type == __MF_TYPE_NOACCESS ? "no-access" :
1764 obj->type == __MF_TYPE_HEAP ? "heap" :
1765 obj->type == __MF_TYPE_HEAP_I ? "heap-init" :
1766 obj->type == __MF_TYPE_STACK ? "stack" :
1767 obj->type == __MF_TYPE_STATIC ? "static" :
1768 obj->type == __MF_TYPE_GUESS ? "guess" :
1770 obj->read_count, obj->write_count, obj->liveness,
1771 obj->watching_p ? " watching" : "",
1772 obj->alloc_time.tv_sec, obj->alloc_time.tv_usec,
1773 (void *) obj->alloc_pc
1775 , (unsigned) obj->alloc_thread
1779 if (__mf_opts.backtrace > 0)
1782 for (i=0; i<obj->alloc_backtrace_size; i++)
1783 fprintf (stderr, " %s\n", obj->alloc_backtrace[i]);
1786 if (__mf_opts.persistent_count > 0)
1788 if (obj->deallocated_p)
1790 fprintf (stderr, "dealloc time=%lu.%06lu pc=%p"
1795 obj->dealloc_time.tv_sec, obj->dealloc_time.tv_usec,
1796 (void *) obj->dealloc_pc
1798 , (unsigned) obj->dealloc_thread
1803 if (__mf_opts.backtrace > 0)
1806 for (i=0; i<obj->dealloc_backtrace_size; i++)
1807 fprintf (stderr, " %s\n", obj->dealloc_backtrace[i]);
1815 __mf_report_leaks_fn (mfsplay_tree_node n, void *param)
1817 __mf_object_t *node = (__mf_object_t *) n->value;
1818 unsigned *count = (unsigned *) param;
1823 fprintf (stderr, "Leaked object %u:\n", (*count));
1824 __mf_describe_object (node);
1831 __mf_report_leaks ()
1835 (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP),
1836 __mf_report_leaks_fn, & count);
1837 (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I),
1838 __mf_report_leaks_fn, & count);
1843 /* ------------------------------------------------------------------------ */
1850 BEGIN_RECURSION_PROTECT ();
1852 END_RECURSION_PROTECT ();
1859 if (__mf_opts.collect_stats)
1864 "calls to __mf_check: %lu\n"
1865 " __mf_register: %lu [%luB, %luB, %luB, %luB, %luB]\n"
1866 " __mf_unregister: %lu [%luB]\n"
1867 " __mf_violation: [%lu, %lu, %lu, %lu, %lu]\n",
1869 __mf_count_register,
1870 __mf_total_register_size[0], __mf_total_register_size[1],
1871 __mf_total_register_size[2], __mf_total_register_size[3],
1872 __mf_total_register_size[4], /* XXX */
1873 __mf_count_unregister, __mf_total_unregister_size,
1874 __mf_count_violation[0], __mf_count_violation[1],
1875 __mf_count_violation[2], __mf_count_violation[3],
1876 __mf_count_violation[4]);
1879 "calls with reentrancy: %lu\n", __mf_reentrancy);
1882 " lock contention: %lu\n", __mf_lock_contention);
1885 /* Lookup cache stats. */
1888 unsigned max_reuse = 0;
1889 unsigned num_used = 0;
1890 unsigned num_unused = 0;
1892 for (i = 0; i < LOOKUP_CACHE_SIZE; i++)
1894 if (__mf_lookup_cache_reusecount[i])
1898 if (max_reuse < __mf_lookup_cache_reusecount[i])
1899 max_reuse = __mf_lookup_cache_reusecount[i];
1901 fprintf (stderr, "lookup cache slots used: %u unused: %u peak-reuse: %u\n",
1902 num_used, num_unused, max_reuse);
1906 unsigned live_count;
1907 live_count = __mf_find_objects (MINPTR, MAXPTR, NULL, 0);
1908 fprintf (stderr, "number of live objects: %u\n", live_count);
1911 if (__mf_opts.persistent_count > 0)
1913 unsigned dead_count = 0;
1915 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1916 for (plot = 0 ; plot < __mf_opts.persistent_count; plot ++)
1917 if (__mf_object_cemetary [row][plot] != 0)
1919 fprintf (stderr, " zombie objects: %u\n", dead_count);
1922 if (__mf_opts.print_leaks && (__mf_opts.mudflap_mode == mode_check))
1925 extern void * __mf_wrap_alloca_indirect (size_t c);
1927 /* Free up any remaining alloca()'d blocks. */
1928 __mf_wrap_alloca_indirect (0);
1929 #ifdef HAVE___LIBC_FREERES
1930 if (__mf_opts.call_libc_freeres)
1932 extern void __libc_freeres (void);
1937 __mf_describe_object (NULL); /* Reset description epoch. */
1938 l = __mf_report_leaks ();
1939 fprintf (stderr, "number of leaked objects: %u\n", l);
1943 /* __mf_backtrace */
1946 __mf_backtrace (char ***symbols, void *guess_pc, unsigned guess_omit_levels)
1949 unsigned pc_array_size = __mf_opts.backtrace + guess_omit_levels;
1950 unsigned remaining_size;
1951 unsigned omitted_size = 0;
1953 DECLARE (void, free, void *ptr);
1954 DECLARE (void *, calloc, size_t c, size_t n);
1955 DECLARE (void *, malloc, size_t n);
1957 pc_array = CALL_REAL (calloc, pc_array_size, sizeof (void *) );
1958 #ifdef HAVE_BACKTRACE
1959 pc_array_size = backtrace (pc_array, pc_array_size);
1961 #define FETCH(n) do { if (pc_array_size >= n) { \
1962 pc_array[n] = __builtin_return_address(n); \
1963 if (pc_array[n] == 0) pc_array_size = n; } } while (0)
1965 /* Unroll some calls __builtin_return_address because this function
1966 only takes a literal integer parameter. */
1969 /* XXX: __builtin_return_address sometimes crashes (!) on >0 arguments,
1970 rather than simply returning 0. :-( */
1979 if (pc_array_size > 8) pc_array_size = 9;
1981 if (pc_array_size > 0) pc_array_size = 1;
1987 /* We want to trim the first few levels of the stack traceback,
1988 since they contain libmudflap wrappers and junk. If pc_array[]
1989 ends up containing a non-NULL guess_pc, then trim everything
1990 before that. Otherwise, omit the first guess_omit_levels
1993 if (guess_pc != NULL)
1994 for (i=0; i<pc_array_size; i++)
1995 if (pc_array [i] == guess_pc)
1998 if (omitted_size == 0) /* No match? */
1999 if (pc_array_size > guess_omit_levels)
2000 omitted_size = guess_omit_levels;
2002 remaining_size = pc_array_size - omitted_size;
2004 #ifdef HAVE_BACKTRACE_SYMBOLS
2005 *symbols = backtrace_symbols (pc_array + omitted_size, remaining_size);
2008 /* Let's construct a buffer by hand. It will have <remaining_size>
2009 char*'s at the front, pointing at individual strings immediately
2014 enum { perline = 30 };
2015 buffer = CALL_REAL (malloc, remaining_size * (perline + sizeof(char *)));
2016 pointers = (char **) buffer;
2017 chars = (char *)buffer + (remaining_size * sizeof (char *));
2018 for (i = 0; i < remaining_size; i++)
2020 pointers[i] = chars;
2021 sprintf (chars, "[0x%p]", pc_array [omitted_size + i]);
2022 chars = chars + perline;
2024 *symbols = pointers;
2027 CALL_REAL (free, pc_array);
2029 return remaining_size;
2032 /* ------------------------------------------------------------------------ */
2033 /* __mf_violation */
2036 __mf_violation (void *ptr, size_t sz, uintptr_t pc,
2037 const char *location, int type)
2040 static unsigned violation_number;
2041 DECLARE(void, free, void *ptr);
2043 TRACE ("violation pc=%p location=%s type=%d ptr=%p size=%lu\n",
2045 (location != NULL ? location : ""), type, ptr, (unsigned long) sz);
2047 if (__mf_opts.collect_stats)
2048 __mf_count_violation [(type < 0) ? 0 :
2049 (type > __MF_VIOL_WATCH) ? 0 :
2052 /* Print out a basic warning message. */
2053 if (__mf_opts.verbose_violations)
2056 unsigned num_helpful = 0;
2057 struct timeval now = { 0, 0 };
2058 #if HAVE_GETTIMEOFDAY
2059 gettimeofday (& now, NULL);
2062 violation_number ++;
2065 "mudflap violation %u (%s): time=%lu.%06lu "
2066 "ptr=%p size=%lu\npc=%p%s%s%s\n",
2068 ((type == __MF_VIOL_READ) ? "check/read" :
2069 (type == __MF_VIOL_WRITE) ? "check/write" :
2070 (type == __MF_VIOL_REGISTER) ? "register" :
2071 (type == __MF_VIOL_UNREGISTER) ? "unregister" :
2072 (type == __MF_VIOL_WATCH) ? "watch" : "unknown"),
2073 now.tv_sec, now.tv_usec,
2074 (void *) ptr, (unsigned long)sz, (void *) pc,
2075 (location != NULL ? " location=`" : ""),
2076 (location != NULL ? location : ""),
2077 (location != NULL ? "'" : ""));
2079 if (__mf_opts.backtrace > 0)
2084 num = __mf_backtrace (& symbols, (void *) pc, 2);
2085 /* Note: backtrace_symbols calls malloc(). But since we're in
2086 __mf_violation and presumably __mf_check, it'll detect
2087 recursion, and not put the new string into the database. */
2089 for (i=0; i<num; i++)
2090 fprintf (stderr, " %s\n", symbols[i]);
2092 /* Calling free() here would trigger a violation. */
2093 CALL_REAL(free, symbols);
2097 /* Look for nearby objects. For this, we start with s_low/s_high
2098 pointing to the given area, looking for overlapping objects.
2099 If none show up, widen the search area and keep looking. */
2101 if (sz == 0) sz = 1;
2103 for (dead_p = 0; dead_p <= 1; dead_p ++) /* for dead_p in 0 1 */
2105 enum {max_objs = 3}; /* magic */
2106 __mf_object_t *objs[max_objs];
2107 unsigned num_objs = 0;
2108 uintptr_t s_low, s_high;
2112 s_low = (uintptr_t) ptr;
2113 s_high = CLAMPSZ (ptr, sz);
2115 while (tries < 16) /* magic */
2118 num_objs = __mf_find_dead_objects (s_low, s_high, objs, max_objs);
2120 num_objs = __mf_find_objects (s_low, s_high, objs, max_objs);
2122 if (num_objs) /* good enough */
2127 /* XXX: tune this search strategy. It's too dependent on
2128 sz, which can vary from 1 to very big (when array index
2129 checking) numbers. */
2130 s_low = CLAMPSUB (s_low, (sz * tries * tries));
2131 s_high = CLAMPADD (s_high, (sz * tries * tries));
2134 for (i = 0; i < min (num_objs, max_objs); i++)
2136 __mf_object_t *obj = objs[i];
2137 uintptr_t low = (uintptr_t) ptr;
2138 uintptr_t high = CLAMPSZ (ptr, sz);
2139 unsigned before1 = (low < obj->low) ? obj->low - low : 0;
2140 unsigned after1 = (low > obj->high) ? low - obj->high : 0;
2141 unsigned into1 = (high >= obj->low && low <= obj->high) ? low - obj->low : 0;
2142 unsigned before2 = (high < obj->low) ? obj->low - high : 0;
2143 unsigned after2 = (high > obj->high) ? high - obj->high : 0;
2144 unsigned into2 = (high >= obj->low && low <= obj->high) ? high - obj->low : 0;
2146 fprintf (stderr, "Nearby object %u: checked region begins %uB %s and ends %uB %s\n",
2147 num_helpful + i + 1,
2148 (before1 ? before1 : after1 ? after1 : into1),
2149 (before1 ? "before" : after1 ? "after" : "into"),
2150 (before2 ? before2 : after2 ? after2 : into2),
2151 (before2 ? "before" : after2 ? "after" : "into"));
2152 __mf_describe_object (obj);
2154 num_helpful += num_objs;
2157 fprintf (stderr, "number of nearby objects: %u\n", num_helpful);
2160 /* How to finally handle this violation? */
2161 switch (__mf_opts.violation_mode)
2166 kill (getpid(), SIGSEGV);
2173 snprintf (buf, 128, "gdb --pid=%u", (unsigned) getpid ());
2175 /* XXX: should probably fork() && sleep(GDB_WAIT_PARAMETER)
2176 instead, and let the forked child execlp() gdb. That way, this
2177 subject process can be resumed under the supervision of gdb.
2178 This can't happen now, since system() only returns when gdb
2179 dies. In that case, we need to beware of starting a second
2180 concurrent gdb child upon the next violation. (But if the first
2181 gdb dies, then starting a new one is appropriate.) */
2186 /* ------------------------------------------------------------------------ */
2189 unsigned __mf_watch (void *ptr, size_t sz)
2193 BEGIN_RECURSION_PROTECT ();
2194 rc = __mf_watch_or_not (ptr, sz, 1);
2195 END_RECURSION_PROTECT ();
2200 unsigned __mf_unwatch (void *ptr, size_t sz)
2204 rc = __mf_watch_or_not (ptr, sz, 0);
2211 __mf_watch_or_not (void *ptr, size_t sz, char flag)
2213 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
2214 uintptr_t ptr_low = (uintptr_t) ptr;
2217 TRACE ("%s ptr=%p size=%lu\n",
2218 (flag ? "watch" : "unwatch"), ptr, (unsigned long) sz);
2220 switch (__mf_opts.mudflap_mode)
2230 __mf_object_t **all_ovr_objs;
2233 DECLARE (void *, malloc, size_t c);
2234 DECLARE (void, free, void *p);
2236 obj_count = __mf_find_objects (ptr_low, ptr_high, NULL, 0);
2237 VERBOSE_TRACE (" %u:", obj_count);
2239 all_ovr_objs = CALL_REAL (malloc, (sizeof (__mf_object_t *) * obj_count));
2240 if (all_ovr_objs == NULL) abort ();
2241 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_objs, obj_count);
2242 assert (n == obj_count);
2244 for (n = 0; n < obj_count; n ++)
2246 __mf_object_t *obj = all_ovr_objs[n];
2248 VERBOSE_TRACE (" [%p]", (void *) obj);
2249 if (obj->watching_p != flag)
2251 obj->watching_p = flag;
2254 /* Remove object from cache, to ensure next access
2255 goes through __mf_check(). */
2257 __mf_uncache_object (obj);
2260 CALL_REAL (free, all_ovr_objs);
2270 __mf_sigusr1_handler (int num)
2272 __mf_sigusr1_received ++;
2275 /* Install or remove SIGUSR1 handler as necessary.
2276 Also, respond to a received pending SIGUSR1. */
2278 __mf_sigusr1_respond ()
2280 static int handler_installed;
2283 /* Manage handler */
2284 if (__mf_opts.sigusr1_report && ! handler_installed)
2286 signal (SIGUSR1, __mf_sigusr1_handler);
2287 handler_installed = 1;
2289 else if(! __mf_opts.sigusr1_report && handler_installed)
2291 signal (SIGUSR1, SIG_DFL);
2292 handler_installed = 0;
2296 /* Manage enqueued signals */
2297 if (__mf_sigusr1_received > __mf_sigusr1_handled)
2299 __mf_sigusr1_handled ++;
2300 assert (__mf_get_state () == reentrant);
2302 handler_installed = 0; /* We may need to re-enable signal; this might be a SysV library. */
2307 /* XXX: provide an alternative __assert_fail function that cannot
2308 fail due to libmudflap infinite recursion. */
2312 write_itoa (int fd, unsigned n)
2314 enum x { bufsize = sizeof(n)*4 };
2318 for (i=0; i<bufsize-1; i++)
2320 unsigned digit = n % 10;
2321 buf[bufsize-2-i] = digit + '0';
2325 char *m = & buf [bufsize-2-i];
2326 buf[bufsize-1] = '\0';
2327 write (fd, m, strlen(m));
2335 __assert_fail (const char *msg, const char *file, unsigned line, const char *func)
2337 #define write2(string) write (2, (string), strlen ((string)));
2341 write_itoa (2, (unsigned) pthread_self ());
2344 write2(": assertion failure: `");
2345 write (2, msg, strlen (msg));
2347 write (2, func, strlen (func));
2349 write (2, file, strlen (file));
2351 write_itoa (2, line);
2362 /* Adapted splay tree code, originally from libiberty. It has been
2363 specialized for libmudflap as requested by RMS. */
2366 mfsplay_tree_free (void *p)
2368 DECLARE (void, free, void *p);
2369 CALL_REAL (free, p);
2373 mfsplay_tree_xmalloc (size_t s)
2375 DECLARE (void *, malloc, size_t s);
2376 return CALL_REAL (malloc, s);
2380 static void mfsplay_tree_splay (mfsplay_tree, mfsplay_tree_key);
2381 static mfsplay_tree_node mfsplay_tree_splay_helper (mfsplay_tree,
2383 mfsplay_tree_node *,
2384 mfsplay_tree_node *,
2385 mfsplay_tree_node *);
2388 /* Help splay SP around KEY. PARENT and GRANDPARENT are the parent
2389 and grandparent, respectively, of NODE. */
2391 static mfsplay_tree_node
2392 mfsplay_tree_splay_helper (mfsplay_tree sp,
2393 mfsplay_tree_key key,
2394 mfsplay_tree_node * node,
2395 mfsplay_tree_node * parent,
2396 mfsplay_tree_node * grandparent)
2398 mfsplay_tree_node *next;
2399 mfsplay_tree_node n;
2407 comparison = ((key > n->key) ? 1 : ((key < n->key) ? -1 : 0));
2409 if (comparison == 0)
2410 /* We've found the target. */
2412 else if (comparison < 0)
2413 /* The target is to the left. */
2416 /* The target is to the right. */
2421 /* Check whether our recursion depth is too high. Abort this search,
2422 and signal that a rebalance is required to continue. */
2423 if (sp->depth > sp->max_depth)
2425 sp->rebalance_p = 1;
2429 /* Continue down the tree. */
2431 n = mfsplay_tree_splay_helper (sp, key, next, node, parent);
2434 /* The recursive call will change the place to which NODE
2436 if (*node != n || sp->rebalance_p)
2441 /* NODE is the root. We are done. */
2444 /* First, handle the case where there is no grandparent (i.e.,
2445 *PARENT is the root of the tree.) */
2448 if (n == (*parent)->left)
2462 /* Next handle the cases where both N and *PARENT are left children,
2463 or where both are right children. */
2464 if (n == (*parent)->left && *parent == (*grandparent)->left)
2466 mfsplay_tree_node p = *parent;
2468 (*grandparent)->left = p->right;
2469 p->right = *grandparent;
2475 else if (n == (*parent)->right && *parent == (*grandparent)->right)
2477 mfsplay_tree_node p = *parent;
2479 (*grandparent)->right = p->left;
2480 p->left = *grandparent;
2487 /* Finally, deal with the case where N is a left child, but *PARENT
2488 is a right child, or vice versa. */
2489 if (n == (*parent)->left)
2491 (*parent)->left = n->right;
2493 (*grandparent)->right = n->left;
2494 n->left = *grandparent;
2500 (*parent)->right = n->left;
2502 (*grandparent)->left = n->right;
2503 n->right = *grandparent;
2512 mfsplay_tree_rebalance_helper1 (mfsplay_tree_node n, void *array_ptr)
2514 mfsplay_tree_node **p = array_ptr;
2521 static mfsplay_tree_node
2522 mfsplay_tree_rebalance_helper2 (mfsplay_tree_node * array, unsigned low,
2525 unsigned middle = low + (high - low) / 2;
2526 mfsplay_tree_node n = array[middle];
2528 /* Note that since we're producing a balanced binary tree, it is not a problem
2529 that this function is recursive. */
2530 if (low + 1 <= middle)
2531 n->left = mfsplay_tree_rebalance_helper2 (array, low, middle - 1);
2535 if (middle + 1 <= high)
2536 n->right = mfsplay_tree_rebalance_helper2 (array, middle + 1, high);
2544 /* Rebalance the entire tree. Do this by copying all the node
2545 pointers into an array, then cleverly re-linking them. */
2547 mfsplay_tree_rebalance (mfsplay_tree sp)
2549 mfsplay_tree_node *all_nodes, *all_nodes_1;
2551 if (sp->num_keys <= 2)
2554 all_nodes = mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node) * sp->num_keys);
2556 /* Traverse all nodes to copy their addresses into this array. */
2557 all_nodes_1 = all_nodes;
2558 mfsplay_tree_foreach (sp, mfsplay_tree_rebalance_helper1,
2559 (void *) &all_nodes_1);
2561 /* Relink all the nodes. */
2562 sp->root = mfsplay_tree_rebalance_helper2 (all_nodes, 0, sp->num_keys - 1);
2564 mfsplay_tree_free (all_nodes);
2568 /* Splay SP around KEY. */
2570 mfsplay_tree_splay (mfsplay_tree sp, mfsplay_tree_key key)
2575 /* If we just splayed the tree with the same key, do nothing. */
2576 if (sp->last_splayed_key_p &&
2577 (sp->last_splayed_key == key))
2580 /* Compute a maximum recursion depth for a splay tree with NUM nodes.
2581 The idea is to limit excessive stack usage if we're facing
2582 degenerate access patterns. Unfortunately such patterns can occur
2583 e.g. during static initialization, where many static objects might
2584 be registered in increasing address sequence, or during a case where
2585 large tree-like heap data structures are allocated quickly.
2587 On x86, this corresponds to roughly 200K of stack usage.
2588 XXX: For libmudflapth, this could be a function of __mf_opts.thread_stack. */
2589 sp->max_depth = 2500;
2590 sp->rebalance_p = sp->depth = 0;
2592 mfsplay_tree_splay_helper (sp, key, &sp->root, NULL, NULL);
2593 if (sp->rebalance_p)
2595 mfsplay_tree_rebalance (sp);
2597 sp->rebalance_p = sp->depth = 0;
2598 mfsplay_tree_splay_helper (sp, key, &sp->root, NULL, NULL);
2600 if (sp->rebalance_p)
2605 /* Cache this splay key. */
2606 sp->last_splayed_key = key;
2607 sp->last_splayed_key_p = 1;
2612 /* Allocate a new splay tree. */
2616 mfsplay_tree sp = mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_s));
2618 sp->last_splayed_key_p = 0;
2626 /* Insert a new node (associating KEY with DATA) into SP. If a
2627 previous node with the indicated KEY exists, its data is replaced
2628 with the new value. Returns the new node. */
2629 static mfsplay_tree_node
2630 mfsplay_tree_insert (mfsplay_tree sp, mfsplay_tree_key key, mfsplay_tree_value value)
2634 mfsplay_tree_splay (sp, key);
2637 comparison = ((sp->root->key > key) ? 1 :
2638 ((sp->root->key < key) ? -1 : 0));
2640 if (sp->root && comparison == 0)
2642 /* If the root of the tree already has the indicated KEY, just
2643 replace the value with VALUE. */
2644 sp->root->value = value;
2648 /* Create a new node, and insert it at the root. */
2649 mfsplay_tree_node node;
2651 node = mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_node_s));
2653 node->value = value;
2656 node->left = node->right = 0;
2657 else if (comparison < 0)
2659 node->left = sp->root;
2660 node->right = node->left->right;
2661 node->left->right = 0;
2665 node->right = sp->root;
2666 node->left = node->right->left;
2667 node->right->left = 0;
2671 sp->last_splayed_key_p = 0;
2677 /* Remove KEY from SP. It is not an error if it did not exist. */
2680 mfsplay_tree_remove (mfsplay_tree sp, mfsplay_tree_key key)
2682 mfsplay_tree_splay (sp, key);
2683 sp->last_splayed_key_p = 0;
2684 if (sp->root && (sp->root->key == key))
2686 mfsplay_tree_node left, right;
2687 left = sp->root->left;
2688 right = sp->root->right;
2689 /* Delete the root node itself. */
2690 mfsplay_tree_free (sp->root);
2692 /* One of the children is now the root. Doesn't matter much
2693 which, so long as we preserve the properties of the tree. */
2697 /* If there was a right child as well, hang it off the
2698 right-most leaf of the left child. */
2703 left->right = right;
2711 /* Lookup KEY in SP, returning VALUE if present, and NULL
2714 static mfsplay_tree_node
2715 mfsplay_tree_lookup (mfsplay_tree sp, mfsplay_tree_key key)
2717 mfsplay_tree_splay (sp, key);
2718 if (sp->root && (sp->root->key == key))
2725 /* Return the immediate predecessor KEY, or NULL if there is no
2726 predecessor. KEY need not be present in the tree. */
2728 static mfsplay_tree_node
2729 mfsplay_tree_predecessor (mfsplay_tree sp, mfsplay_tree_key key)
2732 mfsplay_tree_node node;
2733 /* If the tree is empty, there is certainly no predecessor. */
2736 /* Splay the tree around KEY. That will leave either the KEY
2737 itself, its predecessor, or its successor at the root. */
2738 mfsplay_tree_splay (sp, key);
2739 comparison = ((sp->root->key > key) ? 1 :
2740 ((sp->root->key < key) ? -1 : 0));
2742 /* If the predecessor is at the root, just return it. */
2745 /* Otherwise, find the rightmost element of the left subtree. */
2746 node = sp->root->left;
2753 /* Return the immediate successor KEY, or NULL if there is no
2754 successor. KEY need not be present in the tree. */
2756 static mfsplay_tree_node
2757 mfsplay_tree_successor (mfsplay_tree sp, mfsplay_tree_key key)
2760 mfsplay_tree_node node;
2761 /* If the tree is empty, there is certainly no successor. */
2764 /* Splay the tree around KEY. That will leave either the KEY
2765 itself, its predecessor, or its successor at the root. */
2766 mfsplay_tree_splay (sp, key);
2767 comparison = ((sp->root->key > key) ? 1 :
2768 ((sp->root->key < key) ? -1 : 0));
2769 /* If the successor is at the root, just return it. */
2772 /* Otherwise, find the leftmost element of the right subtree. */
2773 node = sp->root->right;
2780 /* Call FN, passing it the DATA, for every node in SP, following an
2781 in-order traversal. If FN every returns a non-zero value, the
2782 iteration ceases immediately, and the value is returned.
2783 Otherwise, this function returns 0.
2785 This function simulates recursion using dynamically allocated
2786 arrays, since it may be called from mfsplay_tree_rebalance(), which
2787 in turn means that the tree is already uncomfortably deep for stack
2790 mfsplay_tree_foreach (mfsplay_tree st, mfsplay_tree_foreach_fn fn, void *data)
2792 mfsplay_tree_node *stack1;
2796 enum s { s_left, s_here, s_right, s_up };
2798 if (st->root == NULL) /* => num_keys == 0 */
2801 stack1 = mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node) * st->num_keys);
2802 stack2 = mfsplay_tree_xmalloc (sizeof (char) * st->num_keys);
2805 stack1 [sp] = st->root;
2806 stack2 [sp] = s_left;
2810 mfsplay_tree_node n;
2816 /* Handle each of the four possible states separately. */
2818 /* 1: We're here to traverse the left subtree (if any). */
2821 stack2 [sp] = s_here;
2822 if (n->left != NULL)
2825 stack1 [sp] = n->left;
2826 stack2 [sp] = s_left;
2830 /* 2: We're here to traverse this node. */
2831 else if (s == s_here)
2833 stack2 [sp] = s_right;
2834 val = (*fn) (n, data);
2838 /* 3: We're here to traverse the right subtree (if any). */
2839 else if (s == s_right)
2842 if (n->right != NULL)
2845 stack1 [sp] = n->right;
2846 stack2 [sp] = s_left;
2850 /* 4: We're here after both subtrees (if any) have been traversed. */
2853 /* Pop the stack. */
2854 if (sp == 0) break; /* Popping off the root note: we're finished! */
2862 mfsplay_tree_free (stack1);
2863 mfsplay_tree_free (stack2);