1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
3 Contributed by Frank Ch. Eigler <fche@redhat.com>
4 and Graydon Hoare <graydon@redhat.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
13 In addition to the permissions in the GNU General Public License, the
14 Free Software Foundation gives you unlimited permission to link the
15 compiled version of this file into combinations with other programs,
16 and to distribute those combinations without any restriction coming
17 from the use of this file. (The General Public License restrictions
18 do apply in other respects; for example, they cover modification of
19 the file, and distribution when not linked into a combine
22 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
23 WARRANTY; without even the implied warranty of MERCHANTABILITY or
24 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
27 You should have received a copy of the GNU General Public License
28 along with GCC; see the file COPYING. If not, write to the Free
29 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
34 /* These attempt to coax various unix flavours to declare all our
35 needed tidbits in the system headers. */
36 #if !defined(__FreeBSD__)
38 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
42 #define __EXTENSIONS__
44 #define _LARGE_FILE_API
45 #define _XOPEN_SOURCE_EXTENDED 1
49 #include <sys/types.h>
53 #ifdef HAVE_EXECINFO_H
63 #include <sys/types.h>
68 #include "mf-runtime.h"
70 #include "splay-tree.h"
73 /* ------------------------------------------------------------------------ */
76 #define CTOR __attribute__ ((constructor))
77 #define DTOR __attribute__ ((destructor))
80 /* Codes to describe the context in which a violation occurs. */
81 #define __MF_VIOL_UNKNOWN 0
82 #define __MF_VIOL_READ 1
83 #define __MF_VIOL_WRITE 2
84 #define __MF_VIOL_REGISTER 3
85 #define __MF_VIOL_UNREGISTER 4
86 #define __MF_VIOL_WATCH 5
88 /* Protect against recursive calls. */
89 #define BEGIN_RECURSION_PROTECT() do { \
90 if (UNLIKELY (__mf_state == reentrant)) { \
91 write (2, "mf: erroneous reentrancy detected in `", 38); \
92 write (2, __PRETTY_FUNCTION__, strlen(__PRETTY_FUNCTION__)); \
93 write (2, "'\n", 2); \
95 __mf_state = reentrant; \
98 #define END_RECURSION_PROTECT() do { \
99 __mf_state = active; \
104 /* ------------------------------------------------------------------------ */
105 /* Required globals. */
107 #define LOOKUP_CACHE_MASK_DFL 1023
108 #define LOOKUP_CACHE_SIZE_MAX 4096 /* Allows max CACHE_MASK 0x0FFF */
109 #define LOOKUP_CACHE_SHIFT_DFL 2
111 struct __mf_cache __mf_lookup_cache [LOOKUP_CACHE_SIZE_MAX];
112 uintptr_t __mf_lc_mask = LOOKUP_CACHE_MASK_DFL;
113 unsigned char __mf_lc_shift = LOOKUP_CACHE_SHIFT_DFL;
114 #define LOOKUP_CACHE_SIZE (__mf_lc_mask + 1)
116 struct __mf_options __mf_opts;
118 int __mf_starting_p = 1;
120 enum __mf_state_enum __mf_state = active;
122 /* See __mf_state_perthread() in mf-hooks.c. */
127 pthread_mutex_t __mf_biglock =
128 #ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
129 PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
131 PTHREAD_MUTEX_INITIALIZER;
135 /* Use HAVE_PTHREAD_H here instead of LIBMUDFLAPTH, so that even
136 the libmudflap.la (no threading support) can diagnose whether
137 the application is linked with -lpthread. See __mf_usage() below. */
139 #ifdef _POSIX_THREADS
140 #pragma weak pthread_join
142 #define pthread_join NULL
144 const void *threads_active_p = (void *) pthread_join;
148 /* ------------------------------------------------------------------------ */
149 /* stats-related globals. */
151 static unsigned long __mf_count_check;
152 static unsigned long __mf_lookup_cache_reusecount [LOOKUP_CACHE_SIZE_MAX];
153 static unsigned long __mf_count_register;
154 static unsigned long __mf_total_register_size [__MF_TYPE_MAX+1];
155 static unsigned long __mf_count_unregister;
156 static unsigned long __mf_total_unregister_size;
157 static unsigned long __mf_count_violation [__MF_VIOL_WATCH+1];
158 static unsigned long __mf_sigusr1_received;
159 static unsigned long __mf_sigusr1_handled;
160 /* not static */ unsigned long __mf_reentrancy;
162 /* not static */ unsigned long __mf_lock_contention;
166 /* ------------------------------------------------------------------------ */
167 /* mode-check-related globals. */
169 typedef struct __mf_object
171 uintptr_t low, high; /* __mf_register parameters */
173 char type; /* __MF_TYPE_something */
174 char watching_p; /* Trigger a VIOL_WATCH on access? */
175 unsigned read_count; /* Number of times __mf_check/read was called on this object. */
176 unsigned write_count; /* Likewise for __mf_check/write. */
177 unsigned liveness; /* A measure of recent checking activity. */
178 unsigned description_epoch; /* Last epoch __mf_describe_object printed this. */
181 struct timeval alloc_time;
182 char **alloc_backtrace;
183 size_t alloc_backtrace_size;
185 pthread_t alloc_thread;
189 uintptr_t dealloc_pc;
190 struct timeval dealloc_time;
191 char **dealloc_backtrace;
192 size_t dealloc_backtrace_size;
194 pthread_t dealloc_thread;
198 /* Live objects: splay trees, separated by type, ordered on .low (base address). */
199 /* Actually stored as static vars within lookup function below. */
201 /* Dead objects: circular arrays; _MIN_CEM .. _MAX_CEM only */
202 static unsigned __mf_object_dead_head[__MF_TYPE_MAX_CEM+1]; /* next empty spot */
203 static __mf_object_t *__mf_object_cemetary[__MF_TYPE_MAX_CEM+1][__MF_PERSIST_MAX];
206 /* ------------------------------------------------------------------------ */
207 /* Forward function declarations */
209 void __mf_init () CTOR;
210 static void __mf_sigusr1_respond ();
211 static unsigned __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
212 __mf_object_t **objs, unsigned max_objs);
213 static unsigned __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
214 __mf_object_t **objs, unsigned max_objs, int type);
215 static unsigned __mf_find_dead_objects (uintptr_t ptr_low, uintptr_t ptr_high,
216 __mf_object_t **objs, unsigned max_objs);
217 static void __mf_adapt_cache ();
218 static void __mf_describe_object (__mf_object_t *obj);
219 static unsigned __mf_watch_or_not (void *ptr, size_t sz, char flag);
220 static splay_tree __mf_object_tree (int type);
221 static void __mf_link_object (__mf_object_t *node);
222 static void __mf_unlink_object (__mf_object_t *node);
225 /* ------------------------------------------------------------------------ */
226 /* Configuration engine */
229 __mf_set_default_options ()
231 memset (& __mf_opts, 0, sizeof (__mf_opts));
233 __mf_opts.adapt_cache = 1000003;
234 __mf_opts.abbreviate = 1;
235 __mf_opts.verbose_violations = 1;
236 __mf_opts.free_queue_length = 4;
237 __mf_opts.persistent_count = 100;
238 __mf_opts.crumple_zone = 32;
239 __mf_opts.backtrace = 4;
240 __mf_opts.timestamps = 1;
241 __mf_opts.mudflap_mode = mode_check;
242 __mf_opts.violation_mode = viol_nop;
243 __mf_opts.heur_std_data = 1;
245 __mf_opts.thread_stack = 0;
264 "mudflaps do nothing",
265 set_option, (int)mode_nop, (int *)&__mf_opts.mudflap_mode},
267 "mudflaps populate object tree",
268 set_option, (int)mode_populate, (int *)&__mf_opts.mudflap_mode},
270 "mudflaps check for memory violations",
271 set_option, (int)mode_check, (int *)&__mf_opts.mudflap_mode},
273 "mudflaps always cause violations (diagnostic)",
274 set_option, (int)mode_violate, (int *)&__mf_opts.mudflap_mode},
277 "violations do not change program execution",
278 set_option, (int)viol_nop, (int *)&__mf_opts.violation_mode},
280 "violations cause a call to abort()",
281 set_option, (int)viol_abort, (int *)&__mf_opts.violation_mode},
283 "violations are promoted to SIGSEGV signals",
284 set_option, (int)viol_segv, (int *)&__mf_opts.violation_mode},
286 "violations fork a gdb process attached to current program",
287 set_option, (int)viol_gdb, (int *)&__mf_opts.violation_mode},
289 "trace calls to mudflap runtime library",
290 set_option, 1, &__mf_opts.trace_mf_calls},
292 "trace internal events within mudflap runtime library",
293 set_option, 1, &__mf_opts.verbose_trace},
295 "collect statistics on mudflap's operation",
296 set_option, 1, &__mf_opts.collect_stats},
299 "print report upon SIGUSR1",
300 set_option, 1, &__mf_opts.sigusr1_report},
302 {"internal-checking",
303 "perform more expensive internal checking",
304 set_option, 1, &__mf_opts.internal_checking},
306 "print any memory leaks at program shutdown",
307 set_option, 1, &__mf_opts.print_leaks},
308 {"check-initialization",
309 "detect uninitialized object reads",
310 set_option, 1, &__mf_opts.check_initialization},
311 {"verbose-violations",
312 "print verbose messages when memory violations occur",
313 set_option, 1, &__mf_opts.verbose_violations},
315 "abbreviate repetitive listings",
316 set_option, 1, &__mf_opts.abbreviate},
318 "track object lifetime timestamps",
319 set_option, 1, &__mf_opts.timestamps},
321 "ignore read accesses - assume okay",
322 set_option, 1, &__mf_opts.ignore_reads},
324 "wipe stack objects at unwind",
325 set_option, 1, &__mf_opts.wipe_stack},
327 "wipe heap objects at free",
328 set_option, 1, &__mf_opts.wipe_heap},
330 "support /proc/self/map heuristics",
331 set_option, 1, &__mf_opts.heur_proc_map},
333 "enable a simple upper stack bound heuristic",
334 set_option, 1, &__mf_opts.heur_stack_bound},
336 "support _start.._end heuristics",
337 set_option, 1, &__mf_opts.heur_start_end},
339 "register standard library data (argv, errno, stdin, ...)",
340 set_option, 1, &__mf_opts.heur_std_data},
341 {"free-queue-length",
342 "queue N deferred free() calls before performing them",
343 read_integer_option, 0, &__mf_opts.free_queue_length},
345 "keep a history of N unregistered regions",
346 read_integer_option, 0, &__mf_opts.persistent_count},
348 "surround allocations with crumple zones of N bytes",
349 read_integer_option, 0, &__mf_opts.crumple_zone},
350 /* XXX: not type-safe.
352 "set lookup cache size mask to N (2**M - 1)",
353 read_integer_option, 0, (int *)(&__mf_lc_mask)},
355 "set lookup cache pointer shift",
356 read_integer_option, 0, (int *)(&__mf_lc_shift)},
359 "adapt mask/shift parameters after N cache misses",
360 read_integer_option, 1, &__mf_opts.adapt_cache},
362 "keep an N-level stack trace of each call context",
363 read_integer_option, 0, &__mf_opts.backtrace},
366 "override thread stacks allocation: N kB",
367 read_integer_option, 0, &__mf_opts.thread_stack},
369 {0, 0, set_option, 0, NULL}
378 "This is a %s%sGCC \"mudflap\" memory-checked binary.\n"
379 "Mudflap is Copyright (C) 2002-2004 Free Software Foundation, Inc.\n"
381 "The mudflap code can be controlled by an environment variable:\n"
383 "$ export MUDFLAP_OPTIONS='<options>'\n"
384 "$ <mudflapped_program>\n"
386 "where <options> is a space-separated list of \n"
387 "any of the following options. Use `-no-OPTION' to disable options.\n"
390 (threads_active_p ? "multi-threaded " : "single-threaded "),
400 /* XXX: The multi-threaded thread-unaware combination is bad. */
402 for (opt = options; opt->name; opt++)
404 int default_p = (opt->value == * opt->target);
410 fprintf (stderr, "-%-23.23s %s", opt->name, opt->description);
412 fprintf (stderr, " [active]\n");
414 fprintf (stderr, "\n");
416 case read_integer_option:
417 strncpy (buf, opt->name, 128);
418 strncpy (buf + strlen (opt->name), "=N", 2);
419 fprintf (stderr, "-%-23.23s %s", buf, opt->description);
420 fprintf (stderr, " [%d]\n", * opt->target);
426 fprintf (stderr, "\n");
431 __mf_set_options (const char *optstr)
435 BEGIN_RECURSION_PROTECT ();
436 rc = __mfu_set_options (optstr);
437 /* XXX: It's not really that easy. A change to a bunch of parameters
438 can require updating auxiliary state or risk crashing:
439 free_queue_length, crumple_zone ... */
440 END_RECURSION_PROTECT ();
447 __mfu_set_options (const char *optstr)
449 struct option *opts = 0;
453 const char *saved_optstr = optstr;
455 /* XXX: bounds-check for optstr! */
472 if (*optstr == '?' ||
473 strncmp (optstr, "help", 4) == 0)
475 /* Caller will print help and exit. */
479 if (strncmp (optstr, "no-", 3) == 0)
482 optstr = & optstr[3];
485 for (opts = options; opts->name; opts++)
487 if (strncmp (optstr, opts->name, strlen (opts->name)) == 0)
489 optstr += strlen (opts->name);
490 assert (opts->target);
497 *(opts->target) = opts->value;
499 case read_integer_option:
500 if (! negate && (*optstr == '=' && *(optstr+1)))
503 tmp = strtol (optstr, &nxt, 10);
504 if ((optstr != nxt) && (tmp != LONG_MAX))
507 *(opts->target) = (int)tmp;
521 "warning: unrecognized string '%s' in mudflap options\n",
523 optstr += strlen (optstr);
529 /* Special post-processing: bound __mf_lc_mask and free_queue_length for security. */
530 __mf_lc_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
531 __mf_opts.free_queue_length &= (__MF_FREEQ_MAX - 1);
533 /* Clear the lookup cache, in case the parameters got changed. */
535 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
537 __mf_lookup_cache[0].low = MAXPTR;
539 TRACE ("set options from `%s'\n", saved_optstr);
541 /* Call this unconditionally, in case -sigusr1-report was toggled. */
542 __mf_sigusr1_respond ();
551 __mf_resolve_single_dynamic (struct __mf_dynamic_entry *e)
556 if (e->pointer) return;
559 if (e->version != NULL && e->version[0] != '\0') /* non-null/empty */
560 e->pointer = dlvsym (RTLD_NEXT, e->name, e->version);
563 e->pointer = dlsym (RTLD_NEXT, e->name);
569 fprintf (stderr, "mf: error in dlsym(\"%s\"): %s\n",
575 fprintf (stderr, "mf: dlsym(\"%s\") = NULL\n", e->name);
582 __mf_resolve_dynamics ()
585 for (i = 0; i < dyn_INITRESOLVE; i++)
586 __mf_resolve_single_dynamic (& __mf_dynamic[i]);
590 /* NB: order must match enums in mf-impl.h */
591 struct __mf_dynamic_entry __mf_dynamic [] =
593 {NULL, "calloc", NULL},
594 {NULL, "free", NULL},
595 {NULL, "malloc", NULL},
596 {NULL, "mmap", NULL},
597 {NULL, "munmap", NULL},
598 {NULL, "realloc", NULL},
599 {NULL, "DUMMY", NULL}, /* dyn_INITRESOLVE */
601 {NULL, "pthread_create", PTHREAD_CREATE_VERSION},
602 {NULL, "pthread_join", NULL},
603 {NULL, "pthread_exit", NULL}
611 /* ------------------------------------------------------------------------ */
613 /* Lookup & manage automatic initialization of the five or so splay trees. */
615 __mf_object_tree (int type)
617 static splay_tree trees [__MF_TYPE_MAX+1];
618 assert (type >= 0 && type <= __MF_TYPE_MAX);
619 if (UNLIKELY (trees[type] == NULL))
620 trees[type] = splay_tree_new ();
630 /* Return if initialization has already been done. */
631 if (LIKELY (__mf_starting_p == 0))
634 /* This initial bootstrap phase requires that __mf_starting_p = 1. */
636 __mf_resolve_dynamics ();
640 __mf_set_default_options ();
642 ov = getenv ("MUDFLAP_OPTIONS");
645 int rc = __mfu_set_options (ov);
653 /* Initialize to a non-zero description epoch. */
654 __mf_describe_object (NULL);
656 #define REG_RESERVED(obj) \
657 __mf_register (& obj, sizeof(obj), __MF_TYPE_NOACCESS, # obj)
659 REG_RESERVED (__mf_lookup_cache);
660 REG_RESERVED (__mf_lc_mask);
661 REG_RESERVED (__mf_lc_shift);
662 /* XXX: others of our statics? */
664 /* Prevent access to *NULL. */
665 __mf_register (MINPTR, 1, __MF_TYPE_NOACCESS, "NULL");
666 __mf_lookup_cache[0].low = (uintptr_t) -1;
672 __wrap_main (int argc, char* argv[])
674 extern char **environ;
676 static int been_here = 0;
678 if (__mf_opts.heur_std_data && ! been_here)
683 __mf_register (argv, sizeof(char *)*(argc+1), __MF_TYPE_STATIC, "argv[]");
684 for (i=0; i<argc; i++)
686 unsigned j = strlen (argv[i]);
687 __mf_register (argv[i], j+1, __MF_TYPE_STATIC, "argv element");
692 char *e = environ[i];
694 if (e == NULL) break;
695 j = strlen (environ[i]);
696 __mf_register (environ[i], j+1, __MF_TYPE_STATIC, "environ element");
698 __mf_register (environ, sizeof(char *)*(i+1), __MF_TYPE_STATIC, "environ[]");
700 __mf_register (& errno, sizeof (errno), __MF_TYPE_STATIC, "errno area");
702 __mf_register (stdin, sizeof (*stdin), __MF_TYPE_STATIC, "stdin");
703 __mf_register (stdout, sizeof (*stdout), __MF_TYPE_STATIC, "stdout");
704 __mf_register (stderr, sizeof (*stderr), __MF_TYPE_STATIC, "stderr");
706 /* Make some effort to register ctype.h static arrays. */
707 /* XXX: e.g., on Solaris, may need to register __ctype, _ctype, __ctype_mask, __toupper, etc. */
708 /* On modern Linux GLIBC, these are thread-specific and changeable, and are dealt
709 with in mf-hooks2.c. */
713 return main (argc, argv, environ);
715 return __real_main (argc, argv, environ);
721 extern void __mf_fini () DTOR;
724 TRACE ("__mf_fini\n");
730 /* ------------------------------------------------------------------------ */
733 void __mf_check (void *ptr, size_t sz, int type, const char *location)
736 BEGIN_RECURSION_PROTECT ();
737 __mfu_check (ptr, sz, type, location);
738 END_RECURSION_PROTECT ();
743 void __mfu_check (void *ptr, size_t sz, int type, const char *location)
745 unsigned entry_idx = __MF_CACHE_INDEX (ptr);
746 struct __mf_cache *entry = & __mf_lookup_cache [entry_idx];
747 int judgement = 0; /* 0=undecided; <0=violation; >0=okay */
748 uintptr_t ptr_low = (uintptr_t) ptr;
749 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
750 struct __mf_cache old_entry = *entry;
752 if (UNLIKELY (__mf_opts.sigusr1_report))
753 __mf_sigusr1_respond ();
755 TRACE ("check ptr=%p b=%u size=%lu %s location=`%s'\n",
756 ptr, entry_idx, (unsigned long)sz,
757 (type == 0 ? "read" : "write"), location);
759 switch (__mf_opts.mudflap_mode)
762 /* It is tempting to poison the cache here similarly to
763 mode_populate. However that eliminates a valuable
764 distinction between these two modes. mode_nop is useful to
765 let a user count & trace every single check / registration
766 call. mode_populate is useful to let a program run fast
773 entry->low = ptr_low;
774 entry->high = ptr_high;
780 unsigned heuristics = 0;
782 /* Advance aging/adaptation counters. */
783 static unsigned adapt_count;
785 if (UNLIKELY (__mf_opts.adapt_cache > 0 &&
786 adapt_count > __mf_opts.adapt_cache))
792 /* Looping only occurs if heuristics were triggered. */
793 while (judgement == 0)
795 DECLARE (void, free, void *p);
796 __mf_object_t* ovr_obj[1];
798 __mf_object_t** all_ovr_obj = NULL;
799 __mf_object_t** dealloc_me = NULL;
802 /* Find all overlapping objects. Be optimistic that there is just one. */
803 obj_count = __mf_find_objects (ptr_low, ptr_high, ovr_obj, 1);
804 if (UNLIKELY (obj_count > 1))
806 /* Allocate a real buffer and do the search again. */
807 DECLARE (void *, malloc, size_t c);
809 all_ovr_obj = CALL_REAL (malloc, (sizeof (__mf_object_t *) *
811 if (all_ovr_obj == NULL) abort ();
812 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_obj, obj_count);
813 assert (n == obj_count);
814 dealloc_me = all_ovr_obj;
818 all_ovr_obj = ovr_obj;
822 /* Update object statistics. */
823 for (i = 0; i < obj_count; i++)
825 __mf_object_t *obj = all_ovr_obj[i];
826 assert (obj != NULL);
827 if (type == __MF_CHECK_READ)
834 /* Iterate over the various objects. There are a number of special cases. */
835 for (i = 0; i < obj_count; i++)
837 __mf_object_t *obj = all_ovr_obj[i];
839 /* Any __MF_TYPE_NOACCESS hit is bad. */
840 if (UNLIKELY (obj->type == __MF_TYPE_NOACCESS))
843 /* Any object with a watch flag is bad. */
844 if (UNLIKELY (obj->watching_p))
845 judgement = -2; /* trigger VIOL_WATCH */
847 /* A read from an uninitialized object is bad. */
848 if (UNLIKELY (__mf_opts.check_initialization
850 && type == __MF_CHECK_READ
852 && obj->write_count == 0
853 /* uninitialized (heap) */
854 && obj->type == __MF_TYPE_HEAP))
858 /* We now know that the access spans one or more valid objects. */
859 if (LIKELY (judgement >= 0))
860 for (i = 0; i < obj_count; i++)
862 __mf_object_t *obj = all_ovr_obj[i];
864 /* Is this access entirely contained within this object? */
865 if (LIKELY (ptr_low >= obj->low && ptr_high <= obj->high))
868 entry->low = obj->low;
869 entry->high = obj->high;
873 /* XXX: Access runs off left or right side of this
874 object. That could be okay, if there are
875 other objects that fill in all the holes. */
878 if (dealloc_me != NULL)
879 CALL_REAL (free, dealloc_me);
881 /* If the judgment is still unknown at this stage, loop
882 around at most one more time. */
885 if (heuristics++ < 2) /* XXX parametrize this number? */
886 judgement = __mf_heuristic_check (ptr_low, ptr_high);
900 if (__mf_opts.collect_stats)
904 if (LIKELY (old_entry.low != entry->low || old_entry.high != entry->high))
905 /* && (old_entry.low != 0) && (old_entry.high != 0)) */
906 __mf_lookup_cache_reusecount [entry_idx] ++;
909 if (UNLIKELY (judgement < 0))
910 __mf_violation (ptr, sz,
911 (uintptr_t) __builtin_return_address (0), location,
913 (type == __MF_CHECK_READ ? __MF_VIOL_READ : __MF_VIOL_WRITE) :
918 static __mf_object_t *
919 __mf_insert_new_object (uintptr_t low, uintptr_t high, int type,
920 const char *name, uintptr_t pc)
922 DECLARE (void *, calloc, size_t c, size_t n);
924 __mf_object_t *new_obj;
925 new_obj = CALL_REAL (calloc, 1, sizeof(__mf_object_t));
927 new_obj->high = high;
928 new_obj->type = type;
929 new_obj->name = name;
930 new_obj->alloc_pc = pc;
931 #if HAVE_GETTIMEOFDAY
932 if (__mf_opts.timestamps)
933 gettimeofday (& new_obj->alloc_time, NULL);
936 new_obj->alloc_thread = pthread_self ();
939 if (__mf_opts.backtrace > 0 && (type == __MF_TYPE_HEAP || type == __MF_TYPE_HEAP_I))
940 new_obj->alloc_backtrace_size =
941 __mf_backtrace (& new_obj->alloc_backtrace,
944 __mf_link_object (new_obj);
950 __mf_uncache_object (__mf_object_t *old_obj)
952 /* Remove any low/high pointers for this object from the lookup cache. */
954 /* Can it possibly exist in the cache? */
955 if (LIKELY (old_obj->read_count + old_obj->write_count))
957 uintptr_t low = old_obj->low;
958 uintptr_t high = old_obj->high;
959 unsigned idx_low = __MF_CACHE_INDEX (low);
960 unsigned idx_high = __MF_CACHE_INDEX (high);
962 for (i = idx_low; i <= idx_high; i++)
964 struct __mf_cache *entry = & __mf_lookup_cache [i];
965 /* NB: the "||" in the following test permits this code to
966 tolerate the situation introduced by __mf_check over
967 contiguous objects, where a cache entry spans several
969 if (entry->low == low || entry->high == high)
972 entry->high = MINPTR;
980 __mf_register (void *ptr, size_t sz, int type, const char *name)
983 BEGIN_RECURSION_PROTECT ();
984 __mfu_register (ptr, sz, type, name);
985 END_RECURSION_PROTECT ();
991 __mfu_register (void *ptr, size_t sz, int type, const char *name)
993 TRACE ("register ptr=%p size=%lu type=%x name='%s'\n",
994 ptr, (unsigned long) sz, type, name ? name : "");
996 if (__mf_opts.collect_stats)
998 __mf_count_register ++;
999 __mf_total_register_size [(type < 0) ? 0 :
1000 (type > __MF_TYPE_MAX) ? 0 :
1004 if (UNLIKELY (__mf_opts.sigusr1_report))
1005 __mf_sigusr1_respond ();
1007 switch (__mf_opts.mudflap_mode)
1013 __mf_violation (ptr, sz, (uintptr_t) __builtin_return_address (0), NULL,
1014 __MF_VIOL_REGISTER);
1018 /* Clear the cache. */
1019 /* XXX: why the entire cache? */
1021 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1023 __mf_lookup_cache[0].low = MAXPTR;
1028 __mf_object_t *ovr_objs [1];
1029 unsigned num_overlapping_objs;
1030 uintptr_t low = (uintptr_t) ptr;
1031 uintptr_t high = CLAMPSZ (ptr, sz);
1032 uintptr_t pc = (uintptr_t) __builtin_return_address (0);
1034 /* Treat unknown size indication as 1. */
1035 if (UNLIKELY (sz == 0)) sz = 1;
1037 /* Look for objects only of the same type. This will e.g. permit a registration
1038 of a STATIC overlapping with a GUESS, and a HEAP with a NOACCESS. At
1039 __mf_check time however harmful overlaps will be detected. */
1040 num_overlapping_objs = __mf_find_objects2 (low, high, ovr_objs, 1, type);
1042 /* Handle overlaps. */
1043 if (UNLIKELY (num_overlapping_objs > 0))
1045 __mf_object_t *ovr_obj = ovr_objs[0];
1047 /* Accept certain specific duplication pairs. */
1048 if (((type == __MF_TYPE_STATIC) || (type == __MF_TYPE_GUESS))
1049 && ovr_obj->low == low
1050 && ovr_obj->high == high
1051 && ovr_obj->type == type)
1053 /* Duplicate registration for static objects may come
1054 from distinct compilation units. */
1055 VERBOSE_TRACE ("harmless duplicate reg %p-%p `%s'\n",
1056 (void *) low, (void *) high,
1057 (ovr_obj->name ? ovr_obj->name : ""));
1061 /* Alas, a genuine violation. */
1064 /* Two or more *real* mappings here. */
1065 __mf_violation ((void *) ptr, sz,
1066 (uintptr_t) __builtin_return_address (0), NULL,
1067 __MF_VIOL_REGISTER);
1070 else /* No overlapping objects: AOK. */
1071 __mf_insert_new_object (low, high, type, name, pc);
1073 /* We could conceivably call __mf_check() here to prime the cache,
1074 but then the read_count/write_count field is not reliable. */
1077 } /* end switch (__mf_opts.mudflap_mode) */
1082 __mf_unregister (void *ptr, size_t sz, int type)
1085 BEGIN_RECURSION_PROTECT ();
1086 __mfu_unregister (ptr, sz, type);
1087 END_RECURSION_PROTECT ();
1093 __mfu_unregister (void *ptr, size_t sz, int type)
1095 DECLARE (void, free, void *ptr);
1097 if (UNLIKELY (__mf_opts.sigusr1_report))
1098 __mf_sigusr1_respond ();
1100 TRACE ("unregister ptr=%p size=%lu type=%x\n", ptr, (unsigned long) sz, type);
1102 switch (__mf_opts.mudflap_mode)
1108 __mf_violation (ptr, sz,
1109 (uintptr_t) __builtin_return_address (0), NULL,
1110 __MF_VIOL_UNREGISTER);
1114 /* Clear the cache. */
1116 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1118 __mf_lookup_cache[0].low = MAXPTR;
1123 __mf_object_t *old_obj = NULL;
1124 __mf_object_t *del_obj = NULL; /* Object to actually delete. */
1125 __mf_object_t *objs[1] = {NULL};
1126 unsigned num_overlapping_objs;
1128 num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1129 CLAMPSZ (ptr, sz), objs, 1, type);
1131 /* Special case for HEAP_I - see free & realloc hook. They don't
1132 know whether the input region was HEAP or HEAP_I before
1133 unmapping it. Here we give HEAP a try in case HEAP_I
1135 if ((type == __MF_TYPE_HEAP_I) && (num_overlapping_objs == 0))
1137 num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1138 CLAMPSZ (ptr, sz), objs, 1, __MF_TYPE_HEAP);
1142 if (UNLIKELY ((num_overlapping_objs != 1) /* more than one overlap */
1143 || ((sz == 0) ? 0 : (sz != (old_obj->high - old_obj->low + 1))) /* size mismatch */
1144 || ((uintptr_t) ptr != old_obj->low))) /* base mismatch */
1146 __mf_violation (ptr, sz,
1147 (uintptr_t) __builtin_return_address (0), NULL,
1148 __MF_VIOL_UNREGISTER);
1152 __mf_unlink_object (old_obj);
1153 __mf_uncache_object (old_obj);
1155 /* Wipe buffer contents if desired. */
1156 if ((__mf_opts.wipe_stack && old_obj->type == __MF_TYPE_STACK)
1157 || (__mf_opts.wipe_heap && (old_obj->type == __MF_TYPE_HEAP
1158 || old_obj->type == __MF_TYPE_HEAP_I)))
1160 memset ((void *) old_obj->low,
1162 (size_t) (old_obj->high - old_obj->low + 1));
1165 /* Manage the object cemetary. */
1166 if (__mf_opts.persistent_count > 0 &&
1167 old_obj->type >= 0 &&
1168 old_obj->type <= __MF_TYPE_MAX_CEM)
1170 old_obj->deallocated_p = 1;
1171 old_obj->dealloc_pc = (uintptr_t) __builtin_return_address (0);
1172 #if HAVE_GETTIMEOFDAY
1173 if (__mf_opts.timestamps)
1174 gettimeofday (& old_obj->dealloc_time, NULL);
1177 old_obj->dealloc_thread = pthread_self ();
1180 if (__mf_opts.backtrace > 0 && old_obj->type == __MF_TYPE_HEAP)
1181 old_obj->dealloc_backtrace_size =
1182 __mf_backtrace (& old_obj->dealloc_backtrace,
1185 /* Encourage this object to be displayed again in current epoch. */
1186 old_obj->description_epoch --;
1188 /* Put this object into the cemetary. This may require this plot to
1189 be recycled, and the previous resident to be designated del_obj. */
1191 unsigned row = old_obj->type;
1192 unsigned plot = __mf_object_dead_head [row];
1194 del_obj = __mf_object_cemetary [row][plot];
1195 __mf_object_cemetary [row][plot] = old_obj;
1197 if (plot == __mf_opts.persistent_count) plot = 0;
1198 __mf_object_dead_head [row] = plot;
1204 if (__mf_opts.print_leaks)
1206 if ((old_obj->read_count + old_obj->write_count) == 0 &&
1207 (old_obj->type == __MF_TYPE_HEAP
1208 || old_obj->type == __MF_TYPE_HEAP_I))
1212 "mudflap warning: unaccessed registered object:\n");
1213 __mf_describe_object (old_obj);
1217 if (del_obj != NULL) /* May or may not equal old_obj. */
1219 if (__mf_opts.backtrace > 0)
1221 CALL_REAL(free, del_obj->alloc_backtrace);
1222 if (__mf_opts.persistent_count > 0)
1224 CALL_REAL(free, del_obj->dealloc_backtrace);
1227 CALL_REAL(free, del_obj);
1232 } /* end switch (__mf_opts.mudflap_mode) */
1235 if (__mf_opts.collect_stats)
1237 __mf_count_unregister ++;
1238 __mf_total_unregister_size += sz;
1247 unsigned long total_size;
1248 unsigned live_obj_count;
1249 double total_weight;
1250 double weighted_size;
1251 unsigned long weighted_address_bits [sizeof (uintptr_t) * 8][2];
1257 __mf_adapt_cache_fn (splay_tree_node n, void *param)
1259 __mf_object_t *obj = (__mf_object_t *) n->value;
1260 struct tree_stats *s = (struct tree_stats *) param;
1262 assert (obj != NULL && s != NULL);
1264 /* Exclude never-accessed objects. */
1265 if (obj->read_count + obj->write_count)
1268 s->total_size += (obj->high - obj->low + 1);
1275 /* VERBOSE_TRACE ("analyze low=%p live=%u name=`%s'\n",
1276 (void *) obj->low, obj->liveness, obj->name); */
1278 s->live_obj_count ++;
1279 s->total_weight += (double) obj->liveness;
1281 (double) (obj->high - obj->low + 1) *
1282 (double) obj->liveness;
1285 for (i=0; i<sizeof(uintptr_t) * 8; i++)
1287 unsigned bit = addr & 1;
1288 s->weighted_address_bits[i][bit] += obj->liveness;
1292 /* Age the liveness value. */
1293 obj->liveness >>= 1;
1304 struct tree_stats s;
1305 uintptr_t new_mask = 0;
1306 unsigned char new_shift;
1307 float cache_utilization;
1309 static float smoothed_new_shift = -1.0;
1312 memset (&s, 0, sizeof (s));
1314 splay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP), __mf_adapt_cache_fn, (void *) & s);
1315 splay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I), __mf_adapt_cache_fn, (void *) & s);
1316 splay_tree_foreach (__mf_object_tree (__MF_TYPE_STACK), __mf_adapt_cache_fn, (void *) & s);
1317 splay_tree_foreach (__mf_object_tree (__MF_TYPE_STATIC), __mf_adapt_cache_fn, (void *) & s);
1318 splay_tree_foreach (__mf_object_tree (__MF_TYPE_GUESS), __mf_adapt_cache_fn, (void *) & s);
1320 /* Maybe we're dealing with funny aging/adaptation parameters, or an
1321 empty tree. Just leave the cache alone in such cases, rather
1322 than risk dying by division-by-zero. */
1323 if (! (s.obj_count > 0) && (s.live_obj_count > 0) && (s.total_weight > 0.0))
1326 /* Guess a good value for the shift parameter by finding an address bit that is a
1327 good discriminant of lively objects. */
1329 for (i=0; i<sizeof (uintptr_t)*8; i++)
1331 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1332 if (max_value < value) max_value = value;
1334 for (i=0; i<sizeof (uintptr_t)*8; i++)
1336 float shoulder_factor = 0.7; /* Include slightly less popular bits too. */
1337 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1338 if (value >= max_value * shoulder_factor)
1341 if (smoothed_new_shift < 0) smoothed_new_shift = __mf_lc_shift;
1342 /* Converge toward this slowly to reduce flapping. */
1343 smoothed_new_shift = 0.9*smoothed_new_shift + 0.1*i;
1344 new_shift = (unsigned) (smoothed_new_shift + 0.5);
1345 assert (new_shift < sizeof (uintptr_t)*8);
1347 /* Count number of used buckets. */
1348 cache_utilization = 0.0;
1349 for (i = 0; i < (1 + __mf_lc_mask); i++)
1350 if (__mf_lookup_cache[i].low != 0 || __mf_lookup_cache[i].high != 0)
1351 cache_utilization += 1.0;
1352 cache_utilization /= (1 + __mf_lc_mask);
1354 new_mask |= 0x3ff; /* XXX: force a large cache. */
1355 new_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
1357 VERBOSE_TRACE ("adapt cache obj=%u/%u sizes=%lu/%.0f/%.0f => "
1358 "util=%u%% m=%p s=%u\n",
1359 s.obj_count, s.live_obj_count, s.total_size, s.total_weight, s.weighted_size,
1360 (unsigned)(cache_utilization*100.0), (void *) new_mask, new_shift);
1362 /* We should reinitialize cache if its parameters have changed. */
1363 if (new_mask != __mf_lc_mask ||
1364 new_shift != __mf_lc_shift)
1366 __mf_lc_mask = new_mask;
1367 __mf_lc_shift = new_shift;
1369 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1371 __mf_lookup_cache[0].low = MAXPTR;
1377 /* __mf_find_object[s] */
1379 /* Find overlapping live objecs between [low,high]. Return up to
1380 max_objs of their pointers in objs[]. Return total count of
1381 overlaps (may exceed max_objs). */
1384 __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
1385 __mf_object_t **objs, unsigned max_objs, int type)
1388 splay_tree t = __mf_object_tree (type);
1389 splay_tree_key k = (splay_tree_key) ptr_low;
1392 splay_tree_node n = splay_tree_lookup (t, k);
1393 /* An exact match for base address implies a hit. */
1396 if (count < max_objs)
1397 objs[count] = (__mf_object_t *) n->value;
1401 /* Iterate left then right near this key value to find all overlapping objects. */
1402 for (direction = 0; direction < 2; direction ++)
1404 /* Reset search origin. */
1405 k = (splay_tree_key) ptr_low;
1411 n = (direction == 0 ? splay_tree_successor (t, k) : splay_tree_predecessor (t, k));
1412 if (n == NULL) break;
1413 obj = (__mf_object_t *) n->value;
1415 if (! (obj->low <= ptr_high && obj->high >= ptr_low)) /* No overlap? */
1418 if (count < max_objs)
1419 objs[count] = (__mf_object_t *) n->value;
1422 k = (splay_tree_key) obj->low;
1431 __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
1432 __mf_object_t **objs, unsigned max_objs)
1437 /* Search each splay tree for overlaps. */
1438 for (type = __MF_TYPE_NOACCESS; type <= __MF_TYPE_GUESS; type++)
1440 unsigned c = __mf_find_objects2 (ptr_low, ptr_high, objs, max_objs, type);
1446 else /* NB: C may equal 0 */
1459 /* __mf_link_object */
1462 __mf_link_object (__mf_object_t *node)
1464 splay_tree t = __mf_object_tree (node->type);
1465 splay_tree_insert (t, (splay_tree_key) node->low, (splay_tree_value) node);
1468 /* __mf_unlink_object */
1471 __mf_unlink_object (__mf_object_t *node)
1473 splay_tree t = __mf_object_tree (node->type);
1474 splay_tree_remove (t, (splay_tree_key) node->low);
1477 /* __mf_find_dead_objects */
1479 /* Find overlapping dead objecs between [low,high]. Return up to
1480 max_objs of their pointers in objs[]. Return total count of
1481 overlaps (may exceed max_objs). */
1484 __mf_find_dead_objects (uintptr_t low, uintptr_t high,
1485 __mf_object_t **objs, unsigned max_objs)
1487 if (__mf_opts.persistent_count > 0)
1490 unsigned recollection = 0;
1493 assert (low <= high);
1494 assert (max_objs == 0 || objs != NULL);
1496 /* Widen the search from the most recent plots in each row, looking
1497 backward in time. */
1499 while (recollection < __mf_opts.persistent_count)
1503 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1508 plot = __mf_object_dead_head [row];
1509 for (i = 0; i <= recollection; i ++)
1513 /* Look backward through row: it's a circular buffer. */
1514 if (plot > 0) plot --;
1515 else plot = __mf_opts.persistent_count - 1;
1517 obj = __mf_object_cemetary [row][plot];
1518 if (obj && obj->low <= high && obj->high >= low)
1520 /* Found an overlapping dead object! */
1521 if (count < max_objs)
1531 /* Look farther back in time. */
1532 recollection = (recollection * 2) + 1;
1541 /* __mf_describe_object */
1544 __mf_describe_object (__mf_object_t *obj)
1546 static unsigned epoch = 0;
1553 if (__mf_opts.abbreviate && obj->description_epoch == epoch)
1556 "mudflap object %p: name=`%s'\n",
1557 (void *) obj, (obj->name ? obj->name : ""));
1561 obj->description_epoch = epoch;
1564 "mudflap object %p: name=`%s'\n"
1565 "bounds=[%p,%p] size=%lu area=%s check=%ur/%uw liveness=%u%s\n"
1566 "alloc time=%lu.%06lu pc=%p"
1571 (void *) obj, (obj->name ? obj->name : ""),
1572 (void *) obj->low, (void *) obj->high,
1573 (unsigned long) (obj->high - obj->low + 1),
1574 (obj->type == __MF_TYPE_NOACCESS ? "no-access" :
1575 obj->type == __MF_TYPE_HEAP ? "heap" :
1576 obj->type == __MF_TYPE_HEAP_I ? "heap-init" :
1577 obj->type == __MF_TYPE_STACK ? "stack" :
1578 obj->type == __MF_TYPE_STATIC ? "static" :
1579 obj->type == __MF_TYPE_GUESS ? "guess" :
1581 obj->read_count, obj->write_count, obj->liveness,
1582 obj->watching_p ? " watching" : "",
1583 obj->alloc_time.tv_sec, obj->alloc_time.tv_usec,
1584 (void *) obj->alloc_pc
1586 , (unsigned) obj->alloc_thread
1590 if (__mf_opts.backtrace > 0)
1593 for (i=0; i<obj->alloc_backtrace_size; i++)
1594 fprintf (stderr, " %s\n", obj->alloc_backtrace[i]);
1597 if (__mf_opts.persistent_count > 0)
1599 if (obj->deallocated_p)
1601 fprintf (stderr, "dealloc time=%lu.%06lu pc=%p"
1606 obj->dealloc_time.tv_sec, obj->dealloc_time.tv_usec,
1607 (void *) obj->dealloc_pc
1609 , (unsigned) obj->dealloc_thread
1614 if (__mf_opts.backtrace > 0)
1617 for (i=0; i<obj->dealloc_backtrace_size; i++)
1618 fprintf (stderr, " %s\n", obj->dealloc_backtrace[i]);
1626 __mf_report_leaks_fn (splay_tree_node n, void *param)
1628 __mf_object_t *node = (__mf_object_t *) n->value;
1629 unsigned *count = (unsigned *) param;
1634 fprintf (stderr, "Leaked object %u:\n", (*count));
1635 __mf_describe_object (node);
1642 __mf_report_leaks ()
1646 (void) splay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP),
1647 __mf_report_leaks_fn, & count);
1648 (void) splay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I),
1649 __mf_report_leaks_fn, & count);
1654 /* ------------------------------------------------------------------------ */
1661 BEGIN_RECURSION_PROTECT ();
1663 END_RECURSION_PROTECT ();
1670 if (__mf_opts.collect_stats)
1675 "calls to __mf_check: %lu\n"
1676 " __mf_register: %lu [%luB, %luB, %luB, %luB, %luB]\n"
1677 " __mf_unregister: %lu [%luB]\n"
1678 " __mf_violation: [%lu, %lu, %lu, %lu, %lu]\n",
1680 __mf_count_register,
1681 __mf_total_register_size[0], __mf_total_register_size[1],
1682 __mf_total_register_size[2], __mf_total_register_size[3],
1683 __mf_total_register_size[4], /* XXX */
1684 __mf_count_unregister, __mf_total_unregister_size,
1685 __mf_count_violation[0], __mf_count_violation[1],
1686 __mf_count_violation[2], __mf_count_violation[3],
1687 __mf_count_violation[4]);
1690 "calls with reentrancy: %lu\n", __mf_reentrancy);
1693 " lock contention: %lu\n", __mf_lock_contention);
1696 /* Lookup cache stats. */
1699 unsigned max_reuse = 0;
1700 unsigned num_used = 0;
1701 unsigned num_unused = 0;
1703 for (i = 0; i < LOOKUP_CACHE_SIZE; i++)
1705 if (__mf_lookup_cache_reusecount[i])
1709 if (max_reuse < __mf_lookup_cache_reusecount[i])
1710 max_reuse = __mf_lookup_cache_reusecount[i];
1712 fprintf (stderr, "lookup cache slots used: %u unused: %u peak-reuse: %u\n",
1713 num_used, num_unused, max_reuse);
1717 unsigned live_count;
1718 live_count = __mf_find_objects (MINPTR, MAXPTR, NULL, 0);
1719 fprintf (stderr, "number of live objects: %u\n", live_count);
1722 if (__mf_opts.persistent_count > 0)
1724 unsigned dead_count = 0;
1726 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1727 for (plot = 0 ; plot < __mf_opts.persistent_count; plot ++)
1728 if (__mf_object_cemetary [row][plot] != 0)
1730 fprintf (stderr, " zombie objects: %u\n", dead_count);
1733 if (__mf_opts.print_leaks && (__mf_opts.mudflap_mode == mode_check))
1736 extern void * __mf_wrap_alloca_indirect (size_t c);
1738 /* Free up any remaining alloca()'d blocks. */
1739 __mf_wrap_alloca_indirect (0);
1740 __mf_describe_object (NULL); /* Reset description epoch. */
1741 l = __mf_report_leaks ();
1742 fprintf (stderr, "number of leaked objects: %u\n", l);
1746 /* __mf_backtrace */
1749 __mf_backtrace (char ***symbols, void *guess_pc, unsigned guess_omit_levels)
1752 unsigned pc_array_size = __mf_opts.backtrace + guess_omit_levels;
1753 unsigned remaining_size;
1754 unsigned omitted_size = 0;
1756 DECLARE (void, free, void *ptr);
1757 DECLARE (void *, calloc, size_t c, size_t n);
1758 DECLARE (void *, malloc, size_t n);
1760 pc_array = CALL_REAL (calloc, pc_array_size, sizeof (void *) );
1761 #ifdef HAVE_BACKTRACE
1762 pc_array_size = backtrace (pc_array, pc_array_size);
1764 #define FETCH(n) do { if (pc_array_size >= n) { \
1765 pc_array[n] = __builtin_return_address(n); \
1766 if (pc_array[n] == 0) pc_array_size = n; } } while (0)
1768 /* Unroll some calls __builtin_return_address because this function
1769 only takes a literal integer parameter. */
1772 /* XXX: __builtin_return_address sometimes crashes (!) on >0 arguments,
1773 rather than simply returning 0. :-( */
1782 if (pc_array_size > 8) pc_array_size = 9;
1784 if (pc_array_size > 0) pc_array_size = 1;
1790 /* We want to trim the first few levels of the stack traceback,
1791 since they contain libmudflap wrappers and junk. If pc_array[]
1792 ends up containing a non-NULL guess_pc, then trim everything
1793 before that. Otherwise, omit the first guess_omit_levels
1796 if (guess_pc != NULL)
1797 for (i=0; i<pc_array_size; i++)
1798 if (pc_array [i] == guess_pc)
1801 if (omitted_size == 0) /* No match? */
1802 if (pc_array_size > guess_omit_levels)
1803 omitted_size = guess_omit_levels;
1805 remaining_size = pc_array_size - omitted_size;
1807 #ifdef HAVE_BACKTRACE_SYMBOLS
1808 *symbols = backtrace_symbols (pc_array + omitted_size, remaining_size);
1811 /* Let's construct a buffer by hand. It will have <remaining_size>
1812 char*'s at the front, pointing at individual strings immediately
1817 enum { perline = 30 };
1818 buffer = CALL_REAL (malloc, remaining_size * (perline + sizeof(char *)));
1819 pointers = (char **) buffer;
1820 chars = (char *)buffer + (remaining_size * sizeof (char *));
1821 for (i = 0; i < remaining_size; i++)
1823 pointers[i] = chars;
1824 sprintf (chars, "[0x%p]", pc_array [omitted_size + i]);
1825 chars = chars + perline;
1827 *symbols = pointers;
1830 CALL_REAL (free, pc_array);
1832 return remaining_size;
1835 /* ------------------------------------------------------------------------ */
1836 /* __mf_violation */
1839 __mf_violation (void *ptr, size_t sz, uintptr_t pc,
1840 const char *location, int type)
1843 static unsigned violation_number;
1844 DECLARE(void, free, void *ptr);
1846 TRACE ("violation pc=%p location=%s type=%d ptr=%p size=%lu\n",
1848 (location != NULL ? location : ""), type, ptr, (unsigned long) sz);
1850 if (__mf_opts.collect_stats)
1851 __mf_count_violation [(type < 0) ? 0 :
1852 (type > __MF_VIOL_WATCH) ? 0 :
1855 /* Print out a basic warning message. */
1856 if (__mf_opts.verbose_violations)
1859 unsigned num_helpful = 0;
1860 struct timeval now = { 0, 0 };
1861 #if HAVE_GETTIMEOFDAY
1862 gettimeofday (& now, NULL);
1865 violation_number ++;
1868 "mudflap violation %u (%s): time=%lu.%06lu "
1869 "ptr=%p size=%lu\npc=%p%s%s%s\n",
1871 ((type == __MF_VIOL_READ) ? "check/read" :
1872 (type == __MF_VIOL_WRITE) ? "check/write" :
1873 (type == __MF_VIOL_REGISTER) ? "register" :
1874 (type == __MF_VIOL_UNREGISTER) ? "unregister" :
1875 (type == __MF_VIOL_WATCH) ? "watch" : "unknown"),
1876 now.tv_sec, now.tv_usec,
1877 (void *) ptr, (unsigned long)sz, (void *) pc,
1878 (location != NULL ? " location=`" : ""),
1879 (location != NULL ? location : ""),
1880 (location != NULL ? "'" : ""));
1882 if (__mf_opts.backtrace > 0)
1887 num = __mf_backtrace (& symbols, (void *) pc, 2);
1888 /* Note: backtrace_symbols calls malloc(). But since we're in
1889 __mf_violation and presumably __mf_check, it'll detect
1890 recursion, and not put the new string into the database. */
1892 for (i=0; i<num; i++)
1893 fprintf (stderr, " %s\n", symbols[i]);
1895 /* Calling free() here would trigger a violation. */
1896 CALL_REAL(free, symbols);
1900 /* Look for nearby objects. For this, we start with s_low/s_high
1901 pointing to the given area, looking for overlapping objects.
1902 If none show up, widen the search area and keep looking. */
1904 if (sz == 0) sz = 1;
1906 for (dead_p = 0; dead_p <= 1; dead_p ++) /* for dead_p in 0 1 */
1908 enum {max_objs = 3}; /* magic */
1909 __mf_object_t *objs[max_objs];
1910 unsigned num_objs = 0;
1911 uintptr_t s_low, s_high;
1915 s_low = (uintptr_t) ptr;
1916 s_high = CLAMPSZ (ptr, sz);
1918 while (tries < 16) /* magic */
1921 num_objs = __mf_find_dead_objects (s_low, s_high, objs, max_objs);
1923 num_objs = __mf_find_objects (s_low, s_high, objs, max_objs);
1925 if (num_objs) /* good enough */
1930 /* XXX: tune this search strategy. It's too dependent on
1931 sz, which can vary from 1 to very big (when array index
1932 checking) numbers. */
1933 s_low = CLAMPSUB (s_low, (sz * tries * tries));
1934 s_high = CLAMPADD (s_high, (sz * tries * tries));
1937 for (i = 0; i < min (num_objs, max_objs); i++)
1939 __mf_object_t *obj = objs[i];
1940 uintptr_t low = (uintptr_t) ptr;
1941 uintptr_t high = CLAMPSZ (ptr, sz);
1942 unsigned before1 = (low < obj->low) ? obj->low - low : 0;
1943 unsigned after1 = (low > obj->high) ? low - obj->high : 0;
1944 unsigned into1 = (high >= obj->low && low <= obj->high) ? low - obj->low : 0;
1945 unsigned before2 = (high < obj->low) ? obj->low - high : 0;
1946 unsigned after2 = (high > obj->high) ? high - obj->high : 0;
1947 unsigned into2 = (high >= obj->low && low <= obj->high) ? high - obj->low : 0;
1949 fprintf (stderr, "Nearby object %u: checked region begins %uB %s and ends %uB %s\n",
1950 num_helpful + i + 1,
1951 (before1 ? before1 : after1 ? after1 : into1),
1952 (before1 ? "before" : after1 ? "after" : "into"),
1953 (before2 ? before2 : after2 ? after2 : into2),
1954 (before2 ? "before" : after2 ? "after" : "into"));
1955 __mf_describe_object (obj);
1957 num_helpful += num_objs;
1960 fprintf (stderr, "number of nearby objects: %u\n", num_helpful);
1963 /* How to finally handle this violation? */
1964 switch (__mf_opts.violation_mode)
1969 kill (getpid(), SIGSEGV);
1976 snprintf (buf, 128, "gdb --pid=%u", (unsigned) getpid ());
1978 /* XXX: should probably fork() && sleep(GDB_WAIT_PARAMETER)
1979 instead, and let the forked child execlp() gdb. That way, this
1980 subject process can be resumed under the supervision of gdb.
1981 This can't happen now, since system() only returns when gdb
1982 dies. In that case, we need to beware of starting a second
1983 concurrent gdb child upon the next violation. (But if the first
1984 gdb dies, then starting a new one is appropriate.) */
1989 /* ------------------------------------------------------------------------ */
1992 unsigned __mf_watch (void *ptr, size_t sz)
1996 BEGIN_RECURSION_PROTECT ();
1997 rc = __mf_watch_or_not (ptr, sz, 1);
1998 END_RECURSION_PROTECT ();
2003 unsigned __mf_unwatch (void *ptr, size_t sz)
2007 rc = __mf_watch_or_not (ptr, sz, 0);
2014 __mf_watch_or_not (void *ptr, size_t sz, char flag)
2016 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
2017 uintptr_t ptr_low = (uintptr_t) ptr;
2020 TRACE ("%s ptr=%p size=%lu\n",
2021 (flag ? "watch" : "unwatch"), ptr, (unsigned long) sz);
2023 switch (__mf_opts.mudflap_mode)
2033 __mf_object_t **all_ovr_objs;
2036 DECLARE (void *, malloc, size_t c);
2037 DECLARE (void, free, void *p);
2039 obj_count = __mf_find_objects (ptr_low, ptr_high, NULL, 0);
2040 VERBOSE_TRACE (" %u:", obj_count);
2042 all_ovr_objs = CALL_REAL (malloc, (sizeof (__mf_object_t *) * obj_count));
2043 if (all_ovr_objs == NULL) abort ();
2044 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_objs, obj_count);
2045 assert (n == obj_count);
2047 for (n = 0; n < obj_count; n ++)
2049 __mf_object_t *obj = all_ovr_objs[n];
2051 VERBOSE_TRACE (" [%p]", (void *) obj);
2052 if (obj->watching_p != flag)
2054 obj->watching_p = flag;
2057 /* Remove object from cache, to ensure next access
2058 goes through __mf_check(). */
2060 __mf_uncache_object (obj);
2063 CALL_REAL (free, all_ovr_objs);
2073 __mf_sigusr1_handler (int num)
2075 __mf_sigusr1_received ++;
2078 /* Install or remove SIGUSR1 handler as necessary.
2079 Also, respond to a received pending SIGUSR1. */
2081 __mf_sigusr1_respond ()
2083 static int handler_installed;
2086 /* Manage handler */
2087 if (__mf_opts.sigusr1_report && ! handler_installed)
2089 signal (SIGUSR1, __mf_sigusr1_handler);
2090 handler_installed = 1;
2092 else if(! __mf_opts.sigusr1_report && handler_installed)
2094 signal (SIGUSR1, SIG_DFL);
2095 handler_installed = 0;
2099 /* Manage enqueued signals */
2100 if (__mf_sigusr1_received > __mf_sigusr1_handled)
2102 __mf_sigusr1_handled ++;
2103 assert (__mf_state == reentrant);
2105 handler_installed = 0; /* We may need to re-enable signal; this might be a SysV library. */
2110 /* XXX: provide an alternative __assert_fail function that cannot
2111 fail due to libmudflap infinite recursion. */
2115 write_itoa (int fd, unsigned n)
2117 enum x { bufsize = sizeof(n)*4 };
2121 for (i=0; i<bufsize-1; i++)
2123 unsigned digit = n % 10;
2124 buf[bufsize-2-i] = digit + '0';
2128 char *m = & buf [bufsize-2-i];
2129 buf[bufsize-1] = '\0';
2130 write (fd, m, strlen(m));
2138 __assert_fail (const char *msg, const char *file, unsigned line, const char *func)
2140 #define write2(string) write (2, (string), strlen ((string)));
2144 write_itoa (2, (unsigned) pthread_self ());
2147 write2(": assertion failure: `");
2148 write (2, msg, strlen (msg));
2150 write (2, func, strlen (func));
2152 write (2, file, strlen (file));
2154 write_itoa (2, line);
2167 /* #include the generic splay tree implementation from libiberty here, to
2168 ensure that it uses our memory allocation primitives. */
2171 splay_tree_free (void *p)
2173 DECLARE (void, free, void *p);
2174 CALL_REAL (free, p);
2178 splay_tree_xmalloc (size_t s)
2180 DECLARE (void *, malloc, size_t s);
2181 return CALL_REAL (malloc, s);
2184 #define free(z) splay_tree_free(z)
2185 #define xmalloc(z) splay_tree_xmalloc(z)
2186 #include "splay-tree.c"