1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
3 Contributed by Frank Ch. Eigler <fche@redhat.com>
4 and Graydon Hoare <graydon@redhat.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
13 In addition to the permissions in the GNU General Public License, the
14 Free Software Foundation gives you unlimited permission to link the
15 compiled version of this file into combinations with other programs,
16 and to distribute those combinations without any restriction coming
17 from the use of this file. (The General Public License restrictions
18 do apply in other respects; for example, they cover modification of
19 the file, and distribution when not linked into a combine
22 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
23 WARRANTY; without even the implied warranty of MERCHANTABILITY or
24 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
27 You should have received a copy of the GNU General Public License
28 along with GCC; see the file COPYING. If not, write to the Free
29 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
34 /* These attempt to coax various unix flavours to declare all our
35 needed tidbits in the system headers. */
36 #if !defined(__FreeBSD__)
38 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
42 #define __EXTENSIONS__
44 #define _LARGE_FILE_API
45 #define _XOPEN_SOURCE_EXTENDED 1
49 #include <sys/types.h>
53 #ifdef HAVE_EXECINFO_H
63 #include <sys/types.h>
68 #include "mf-runtime.h"
70 #include "splay-tree.h"
73 /* ------------------------------------------------------------------------ */
76 #define CTOR __attribute__ ((constructor))
77 #define DTOR __attribute__ ((destructor))
80 /* Codes to describe the context in which a violation occurs. */
81 #define __MF_VIOL_UNKNOWN 0
82 #define __MF_VIOL_READ 1
83 #define __MF_VIOL_WRITE 2
84 #define __MF_VIOL_REGISTER 3
85 #define __MF_VIOL_UNREGISTER 4
86 #define __MF_VIOL_WATCH 5
88 /* Protect against recursive calls. */
89 #define BEGIN_RECURSION_PROTECT() do { \
90 if (UNLIKELY (__mf_state == reentrant)) { \
91 write (2, "mf: erroneous reentrancy detected in `", 38); \
92 write (2, __PRETTY_FUNCTION__, strlen(__PRETTY_FUNCTION__)); \
93 write (2, "'\n", 2); \
95 __mf_state = reentrant; \
98 #define END_RECURSION_PROTECT() do { \
99 __mf_state = active; \
104 /* ------------------------------------------------------------------------ */
105 /* Required globals. */
107 #define LOOKUP_CACHE_MASK_DFL 1023
108 #define LOOKUP_CACHE_SIZE_MAX 4096 /* Allows max CACHE_MASK 0x0FFF */
109 #define LOOKUP_CACHE_SHIFT_DFL 2
111 struct __mf_cache __mf_lookup_cache [LOOKUP_CACHE_SIZE_MAX];
112 uintptr_t __mf_lc_mask = LOOKUP_CACHE_MASK_DFL;
113 unsigned char __mf_lc_shift = LOOKUP_CACHE_SHIFT_DFL;
114 #define LOOKUP_CACHE_SIZE (__mf_lc_mask + 1)
116 struct __mf_options __mf_opts;
118 int __mf_starting_p = 1;
120 enum __mf_state_enum __mf_state = active;
122 /* See __mf_state_perthread() in mf-hooks.c. */
127 pthread_mutex_t __mf_biglock =
128 #ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
129 PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
131 PTHREAD_MUTEX_INITIALIZER;
135 /* Use HAVE_PTHREAD_H here instead of LIBMUDFLAPTH, so that even
136 the libmudflap.la (no threading support) can diagnose whether
137 the application is linked with -lpthread. See __mf_usage() below. */
139 #pragma weak pthread_join
140 const void *threads_active_p = (void *) pthread_join;
144 /* ------------------------------------------------------------------------ */
145 /* stats-related globals. */
147 static unsigned long __mf_count_check;
148 static unsigned long __mf_lookup_cache_reusecount [LOOKUP_CACHE_SIZE_MAX];
149 static unsigned long __mf_count_register;
150 static unsigned long __mf_total_register_size [__MF_TYPE_MAX+1];
151 static unsigned long __mf_count_unregister;
152 static unsigned long __mf_total_unregister_size;
153 static unsigned long __mf_count_violation [__MF_VIOL_WATCH+1];
154 static unsigned long __mf_sigusr1_received;
155 static unsigned long __mf_sigusr1_handled;
156 /* not static */ unsigned long __mf_reentrancy;
158 /* not static */ unsigned long __mf_lock_contention;
162 /* ------------------------------------------------------------------------ */
163 /* mode-check-related globals. */
165 typedef struct __mf_object
167 uintptr_t low, high; /* __mf_register parameters */
169 char type; /* __MF_TYPE_something */
170 char watching_p; /* Trigger a VIOL_WATCH on access? */
171 unsigned read_count; /* Number of times __mf_check/read was called on this object. */
172 unsigned write_count; /* Likewise for __mf_check/write. */
173 unsigned liveness; /* A measure of recent checking activity. */
174 unsigned description_epoch; /* Last epoch __mf_describe_object printed this. */
177 struct timeval alloc_time;
178 char **alloc_backtrace;
179 size_t alloc_backtrace_size;
181 pthread_t alloc_thread;
185 uintptr_t dealloc_pc;
186 struct timeval dealloc_time;
187 char **dealloc_backtrace;
188 size_t dealloc_backtrace_size;
190 pthread_t dealloc_thread;
194 /* Live objects: splay trees, separated by type, ordered on .low (base address). */
195 /* Actually stored as static vars within lookup function below. */
197 /* Dead objects: circular arrays; _MIN_CEM .. _MAX_CEM only */
198 static unsigned __mf_object_dead_head[__MF_TYPE_MAX_CEM+1]; /* next empty spot */
199 static __mf_object_t *__mf_object_cemetary[__MF_TYPE_MAX_CEM+1][__MF_PERSIST_MAX];
202 /* ------------------------------------------------------------------------ */
203 /* Forward function declarations */
205 static void __mf_init () CTOR;
206 static void __mf_sigusr1_respond ();
207 static unsigned __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
208 __mf_object_t **objs, unsigned max_objs);
209 static unsigned __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
210 __mf_object_t **objs, unsigned max_objs, int type);
211 static unsigned __mf_find_dead_objects (uintptr_t ptr_low, uintptr_t ptr_high,
212 __mf_object_t **objs, unsigned max_objs);
213 static void __mf_adapt_cache ();
214 static void __mf_describe_object (__mf_object_t *obj);
215 static unsigned __mf_watch_or_not (void *ptr, size_t sz, char flag);
216 static splay_tree __mf_object_tree (int type);
217 static void __mf_link_object (__mf_object_t *node);
218 static void __mf_unlink_object (__mf_object_t *node);
221 /* ------------------------------------------------------------------------ */
222 /* Configuration engine */
225 __mf_set_default_options ()
227 memset (& __mf_opts, 0, sizeof (__mf_opts));
229 __mf_opts.adapt_cache = 1000003;
230 __mf_opts.abbreviate = 1;
231 __mf_opts.verbose_violations = 1;
232 __mf_opts.free_queue_length = 4;
233 __mf_opts.persistent_count = 100;
234 __mf_opts.crumple_zone = 32;
235 __mf_opts.backtrace = 4;
236 __mf_opts.mudflap_mode = mode_check;
237 __mf_opts.violation_mode = viol_nop;
238 __mf_opts.heur_std_data = 1;
240 __mf_opts.thread_stack = 0;
259 "mudflaps do nothing",
260 set_option, (int)mode_nop, (int *)&__mf_opts.mudflap_mode},
262 "mudflaps populate object tree",
263 set_option, (int)mode_populate, (int *)&__mf_opts.mudflap_mode},
265 "mudflaps check for memory violations",
266 set_option, (int)mode_check, (int *)&__mf_opts.mudflap_mode},
268 "mudflaps always cause violations (diagnostic)",
269 set_option, (int)mode_violate, (int *)&__mf_opts.mudflap_mode},
272 "violations do not change program execution",
273 set_option, (int)viol_nop, (int *)&__mf_opts.violation_mode},
275 "violations cause a call to abort()",
276 set_option, (int)viol_abort, (int *)&__mf_opts.violation_mode},
278 "violations are promoted to SIGSEGV signals",
279 set_option, (int)viol_segv, (int *)&__mf_opts.violation_mode},
281 "violations fork a gdb process attached to current program",
282 set_option, (int)viol_gdb, (int *)&__mf_opts.violation_mode},
284 "trace calls to mudflap runtime library",
285 set_option, 1, &__mf_opts.trace_mf_calls},
287 "trace internal events within mudflap runtime library",
288 set_option, 1, &__mf_opts.verbose_trace},
290 "collect statistics on mudflap's operation",
291 set_option, 1, &__mf_opts.collect_stats},
294 "print report upon SIGUSR1",
295 set_option, 1, &__mf_opts.sigusr1_report},
297 {"internal-checking",
298 "perform more expensive internal checking",
299 set_option, 1, &__mf_opts.internal_checking},
301 "print any memory leaks at program shutdown",
302 set_option, 1, &__mf_opts.print_leaks},
303 {"check-initialization",
304 "detect uninitialized object reads",
305 set_option, 1, &__mf_opts.check_initialization},
306 {"verbose-violations",
307 "print verbose messages when memory violations occur",
308 set_option, 1, &__mf_opts.verbose_violations},
310 "abbreviate repetitive listings",
311 set_option, 1, &__mf_opts.abbreviate},
313 "wipe stack objects at unwind",
314 set_option, 1, &__mf_opts.wipe_stack},
316 "wipe heap objects at free",
317 set_option, 1, &__mf_opts.wipe_heap},
319 "support /proc/self/map heuristics",
320 set_option, 1, &__mf_opts.heur_proc_map},
322 "enable a simple upper stack bound heuristic",
323 set_option, 1, &__mf_opts.heur_stack_bound},
325 "support _start.._end heuristics",
326 set_option, 1, &__mf_opts.heur_start_end},
328 "register standard library data (argv, errno, stdin, ...)",
329 set_option, 1, &__mf_opts.heur_std_data},
330 {"free-queue-length",
331 "queue N deferred free() calls before performing them",
332 read_integer_option, 0, &__mf_opts.free_queue_length},
334 "keep a history of N unregistered regions",
335 read_integer_option, 0, &__mf_opts.persistent_count},
337 "surround allocations with crumple zones of N bytes",
338 read_integer_option, 0, &__mf_opts.crumple_zone},
339 /* XXX: not type-safe.
341 "set lookup cache size mask to N (2**M - 1)",
342 read_integer_option, 0, (int *)(&__mf_lc_mask)},
344 "set lookup cache pointer shift",
345 read_integer_option, 0, (int *)(&__mf_lc_shift)},
348 "adapt mask/shift parameters after N cache misses",
349 read_integer_option, 1, &__mf_opts.adapt_cache},
351 "keep an N-level stack trace of each call context",
352 read_integer_option, 0, &__mf_opts.backtrace},
355 "override thread stacks allocation: N kB",
356 read_integer_option, 0, &__mf_opts.thread_stack},
358 {0, 0, set_option, 0, NULL}
367 "This is a %s%sGCC \"mudflap\" memory-checked binary.\n"
368 "Mudflap is Copyright (C) 2002-2003 Free Software Foundation, Inc.\n"
370 "The mudflap code can be controlled by an environment variable:\n"
372 "$ export MUDFLAP_OPTIONS='<options>'\n"
373 "$ <mudflapped_program>\n"
375 "where <options> is a space-separated list of \n"
376 "any of the following options. Use `-no-OPTION' to disable options.\n"
379 (threads_active_p ? "multi-threaded " : "single-threaded "),
389 /* XXX: The multi-threaded thread-unaware combination is bad. */
391 for (opt = options; opt->name; opt++)
393 int default_p = (opt->value == * opt->target);
399 fprintf (stderr, "-%-23.23s %s", opt->name, opt->description);
401 fprintf (stderr, " [active]\n");
403 fprintf (stderr, "\n");
405 case read_integer_option:
406 strncpy (buf, opt->name, 128);
407 strncpy (buf + strlen (opt->name), "=N", 2);
408 fprintf (stderr, "-%-23.23s %s", buf, opt->description);
409 fprintf (stderr, " [%d]\n", * opt->target);
415 fprintf (stderr, "\n");
420 __mf_set_options (const char *optstr)
424 BEGIN_RECURSION_PROTECT ();
425 rc = __mfu_set_options (optstr);
426 /* XXX: It's not really that easy. A change to a bunch of parameters
427 can require updating auxiliary state or risk crashing:
428 free_queue_length, crumple_zone ... */
429 END_RECURSION_PROTECT ();
436 __mfu_set_options (const char *optstr)
438 struct option *opts = 0;
442 const char *saved_optstr = optstr;
444 /* XXX: bounds-check for optstr! */
461 if (*optstr == '?' ||
462 strncmp (optstr, "help", 4) == 0)
464 /* Caller will print help and exit. */
468 if (strncmp (optstr, "no-", 3) == 0)
471 optstr = & optstr[3];
474 for (opts = options; opts->name; opts++)
476 if (strncmp (optstr, opts->name, strlen (opts->name)) == 0)
478 optstr += strlen (opts->name);
479 assert (opts->target);
486 *(opts->target) = opts->value;
488 case read_integer_option:
489 if (! negate && (*optstr == '=' && *(optstr+1)))
492 tmp = strtol (optstr, &nxt, 10);
493 if ((optstr != nxt) && (tmp != LONG_MAX))
496 *(opts->target) = (int)tmp;
510 "warning: unrecognized string '%s' in mudflap options\n",
512 optstr += strlen (optstr);
518 /* Special post-processing: bound __mf_lc_mask and free_queue_length for security. */
519 __mf_lc_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
520 __mf_opts.free_queue_length &= (__MF_FREEQ_MAX - 1);
522 /* Clear the lookup cache, in case the parameters got changed. */
524 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
526 __mf_lookup_cache[0].low = MAXPTR;
528 TRACE ("set options from `%s'\n", saved_optstr);
530 /* Call this unconditionally, in case -sigusr1-report was toggled. */
531 __mf_sigusr1_respond ();
540 __mf_resolve_single_dynamic (struct __mf_dynamic_entry *e)
545 if (e->pointer) return;
548 if (e->version != NULL && e->version[0] != '\0') /* non-null/empty */
549 e->pointer = dlvsym (RTLD_NEXT, e->name, e->version);
552 e->pointer = dlsym (RTLD_NEXT, e->name);
558 fprintf (stderr, "mf: error in dlsym(\"%s\"): %s\n",
564 fprintf (stderr, "mf: dlsym(\"%s\") = NULL\n", e->name);
571 __mf_resolve_dynamics ()
574 for (i = 0; i < dyn_INITRESOLVE; i++)
575 __mf_resolve_single_dynamic (& __mf_dynamic[i]);
579 /* NB: order must match enums in mf-impl.h */
580 struct __mf_dynamic_entry __mf_dynamic [] =
582 {NULL, "calloc", NULL},
583 {NULL, "free", NULL},
584 {NULL, "malloc", NULL},
585 {NULL, "mmap", NULL},
586 {NULL, "munmap", NULL},
587 {NULL, "realloc", NULL},
588 {NULL, "DUMMY", NULL}, /* dyn_INITRESOLVE */
590 {NULL, "pthread_create", PTHREAD_CREATE_VERSION},
591 {NULL, "pthread_join", NULL},
592 {NULL, "pthread_exit", NULL}
600 /* ------------------------------------------------------------------------ */
602 /* Lookup & manage automatic initialization of the five or so splay trees. */
604 __mf_object_tree (int type)
606 static splay_tree trees [__MF_TYPE_MAX+1];
607 assert (type >= 0 && type <= __MF_TYPE_MAX);
608 if (UNLIKELY (trees[type] == NULL))
609 trees[type] = splay_tree_new (splay_tree_compare_pointers, NULL, NULL);
619 /* This initial bootstrap phase requires that __mf_starting_p = 1. */
621 __mf_resolve_dynamics ();
625 __mf_set_default_options ();
627 ov = getenv ("MUDFLAP_OPTIONS");
630 int rc = __mfu_set_options (ov);
638 /* Initialize to a non-zero description epoch. */
639 __mf_describe_object (NULL);
641 #define REG_RESERVED(obj) \
642 __mf_register (& obj, sizeof(obj), __MF_TYPE_NOACCESS, # obj)
644 REG_RESERVED (__mf_lookup_cache);
645 REG_RESERVED (__mf_lc_mask);
646 REG_RESERVED (__mf_lc_shift);
647 /* XXX: others of our statics? */
649 /* Prevent access to *NULL. */
650 __mf_register (MINPTR, 1, __MF_TYPE_NOACCESS, "NULL");
651 __mf_lookup_cache[0].low = (uintptr_t) -1;
657 __wrap_main (int argc, char* argv[])
659 extern char **environ;
661 static int been_here = 0;
663 if (__mf_opts.heur_std_data && ! been_here)
668 __mf_register (argv, sizeof(char *)*(argc+1), __MF_TYPE_STATIC, "argv[]");
669 for (i=0; i<argc; i++)
671 unsigned j = strlen (argv[i]);
672 __mf_register (argv[i], j+1, __MF_TYPE_STATIC, "argv element");
677 char *e = environ[i];
679 if (e == NULL) break;
680 j = strlen (environ[i]);
681 __mf_register (environ[i], j+1, __MF_TYPE_STATIC, "environ element");
683 __mf_register (environ, sizeof(char *)*(i+1), __MF_TYPE_STATIC, "environ[]");
685 __mf_register (& errno, sizeof (errno), __MF_TYPE_STATIC, "errno area");
687 __mf_register (stdin, sizeof (*stdin), __MF_TYPE_STATIC, "stdin");
688 __mf_register (stdout, sizeof (*stdout), __MF_TYPE_STATIC, "stdout");
689 __mf_register (stderr, sizeof (*stderr), __MF_TYPE_STATIC, "stderr");
691 /* Make some effort to register ctype.h static arrays. */
692 /* XXX: e.g., on Solaris, may need to register __ctype, _ctype, __ctype_mask, __toupper, etc. */
693 /* On modern Linux GLIBC, these are thread-specific and changeable, and are dealt
694 with in mf-hooks2.c. */
698 return main (argc, argv, environ);
700 return __real_main (argc, argv, environ);
706 extern void __mf_fini () DTOR;
709 TRACE ("__mf_fini\n");
715 /* ------------------------------------------------------------------------ */
718 void __mf_check (void *ptr, size_t sz, int type, const char *location)
721 BEGIN_RECURSION_PROTECT ();
722 __mfu_check (ptr, sz, type, location);
723 END_RECURSION_PROTECT ();
728 void __mfu_check (void *ptr, size_t sz, int type, const char *location)
730 unsigned entry_idx = __MF_CACHE_INDEX (ptr);
731 struct __mf_cache *entry = & __mf_lookup_cache [entry_idx];
732 int judgement = 0; /* 0=undecided; <0=violation; >0=okay */
733 uintptr_t ptr_low = (uintptr_t) ptr;
734 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
735 struct __mf_cache old_entry = *entry;
737 if (UNLIKELY (__mf_opts.sigusr1_report))
738 __mf_sigusr1_respond ();
740 TRACE ("check ptr=%p b=%u size=%lu %s location=`%s'\n",
741 ptr, entry_idx, (unsigned long)sz,
742 (type == 0 ? "read" : "write"), location);
744 switch (__mf_opts.mudflap_mode)
748 entry->high = MAXPTR;
753 entry->low = ptr_low;
754 entry->high = ptr_high;
760 unsigned heuristics = 0;
762 /* Advance aging/adaptation counters. */
763 static unsigned adapt_count;
765 if (UNLIKELY (__mf_opts.adapt_cache > 0 &&
766 adapt_count > __mf_opts.adapt_cache))
772 /* Looping only occurs if heuristics were triggered. */
773 while (judgement == 0)
775 DECLARE (void, free, void *p);
776 __mf_object_t* ovr_obj[1];
778 __mf_object_t** all_ovr_obj = NULL;
779 __mf_object_t** dealloc_me = NULL;
782 /* Find all overlapping objects. Be optimistic that there is just one. */
783 obj_count = __mf_find_objects (ptr_low, ptr_high, ovr_obj, 1);
784 if (UNLIKELY (obj_count > 1))
786 /* Allocate a real buffer and do the search again. */
787 DECLARE (void *, malloc, size_t c);
789 all_ovr_obj = CALL_REAL (malloc, (sizeof (__mf_object_t *) *
791 if (all_ovr_obj == NULL) abort ();
792 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_obj, obj_count);
793 assert (n == obj_count);
794 dealloc_me = all_ovr_obj;
798 all_ovr_obj = ovr_obj;
802 /* Update object statistics. */
803 for (i = 0; i < obj_count; i++)
805 __mf_object_t *obj = all_ovr_obj[i];
806 assert (obj != NULL);
807 if (type == __MF_CHECK_READ)
814 /* Iterate over the various objects. There are a number of special cases. */
815 for (i = 0; i < obj_count; i++)
817 __mf_object_t *obj = all_ovr_obj[i];
819 /* Any __MF_TYPE_NOACCESS hit is bad. */
820 if (UNLIKELY (obj->type == __MF_TYPE_NOACCESS))
823 /* Any object with a watch flag is bad. */
824 if (UNLIKELY (obj->watching_p))
825 judgement = -2; /* trigger VIOL_WATCH */
827 /* A read from an uninitialized object is bad. */
828 if (UNLIKELY (__mf_opts.check_initialization
830 && type == __MF_CHECK_READ
832 && obj->write_count == 0
833 /* uninitialized (heap) */
834 && obj->type == __MF_TYPE_HEAP))
838 /* We now know that the access spans one or more valid objects. */
839 if (LIKELY (judgement >= 0))
840 for (i = 0; i < obj_count; i++)
842 __mf_object_t *obj = all_ovr_obj[i];
844 /* Is this access entirely contained within this object? */
845 if (LIKELY (ptr_low >= obj->low && ptr_high <= obj->high))
848 entry->low = obj->low;
849 entry->high = obj->high;
853 /* XXX: Access runs off left or right side of this
854 object. That could be okay, if there are
855 other objects that fill in all the holes. */
858 if (dealloc_me != NULL)
859 CALL_REAL (free, dealloc_me);
861 /* If the judgment is still unknown at this stage, loop
862 around at most one more time. */
865 if (heuristics++ < 2) /* XXX parametrize this number? */
866 judgement = __mf_heuristic_check (ptr_low, ptr_high);
880 if (__mf_opts.collect_stats)
884 if (LIKELY (old_entry.low != entry->low || old_entry.high != entry->high))
885 /* && (old_entry.low != 0) && (old_entry.high != 0)) */
886 __mf_lookup_cache_reusecount [entry_idx] ++;
889 if (UNLIKELY (judgement < 0))
890 __mf_violation (ptr, sz,
891 (uintptr_t) __builtin_return_address (0), location,
893 (type == __MF_CHECK_READ ? __MF_VIOL_READ : __MF_VIOL_WRITE) :
898 static __mf_object_t *
899 __mf_insert_new_object (uintptr_t low, uintptr_t high, int type,
900 const char *name, uintptr_t pc)
902 DECLARE (void *, calloc, size_t c, size_t n);
904 __mf_object_t *new_obj;
905 new_obj = CALL_REAL (calloc, 1, sizeof(__mf_object_t));
907 new_obj->high = high;
908 new_obj->type = type;
909 new_obj->name = name;
910 new_obj->alloc_pc = pc;
911 #if HAVE_GETTIMEOFDAY
912 gettimeofday (& new_obj->alloc_time, NULL);
915 new_obj->alloc_thread = pthread_self ();
918 if (__mf_opts.backtrace > 0 && (type == __MF_TYPE_HEAP || type == __MF_TYPE_HEAP_I))
919 new_obj->alloc_backtrace_size =
920 __mf_backtrace (& new_obj->alloc_backtrace,
923 __mf_link_object (new_obj);
929 __mf_uncache_object (__mf_object_t *old_obj)
931 /* Remove any low/high pointers for this object from the lookup cache. */
933 /* Can it possibly exist in the cache? */
934 if (LIKELY (old_obj->read_count + old_obj->write_count))
936 uintptr_t low = old_obj->low;
937 uintptr_t high = old_obj->high;
938 unsigned idx_low = __MF_CACHE_INDEX (low);
939 unsigned idx_high = __MF_CACHE_INDEX (high);
941 for (i = idx_low; i <= idx_high; i++)
943 struct __mf_cache *entry = & __mf_lookup_cache [i];
944 /* NB: the "||" in the following test permits this code to
945 tolerate the situation introduced by __mf_check over
946 contiguous objects, where a cache entry spans several
948 if (entry->low == low || entry->high == high)
951 entry->high = MINPTR;
959 __mf_register (void *ptr, size_t sz, int type, const char *name)
962 BEGIN_RECURSION_PROTECT ();
963 __mfu_register (ptr, sz, type, name);
964 END_RECURSION_PROTECT ();
970 __mfu_register (void *ptr, size_t sz, int type, const char *name)
972 TRACE ("register ptr=%p size=%lu type=%x name='%s'\n",
973 ptr, (unsigned long) sz, type, name ? name : "");
975 if (__mf_opts.collect_stats)
977 __mf_count_register ++;
978 __mf_total_register_size [(type < 0) ? 0 :
979 (type > __MF_TYPE_MAX) ? 0 :
983 if (UNLIKELY (__mf_opts.sigusr1_report))
984 __mf_sigusr1_respond ();
986 switch (__mf_opts.mudflap_mode)
992 __mf_violation (ptr, sz, (uintptr_t) __builtin_return_address (0), NULL,
997 /* Clear the cache. */
998 /* XXX: why the entire cache? */
1000 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1002 __mf_lookup_cache[0].low = MAXPTR;
1007 __mf_object_t *ovr_objs [1];
1008 unsigned num_overlapping_objs;
1009 uintptr_t low = (uintptr_t) ptr;
1010 uintptr_t high = CLAMPSZ (ptr, sz);
1011 uintptr_t pc = (uintptr_t) __builtin_return_address (0);
1013 /* Treat unknown size indication as 1. */
1014 if (UNLIKELY (sz == 0)) sz = 1;
1016 /* Look for objects only of the same type. This will e.g. permit a registration
1017 of a STATIC overlapping with a GUESS, and a HEAP with a NOACCESS. At
1018 __mf_check time however harmful overlaps will be detected. */
1019 num_overlapping_objs = __mf_find_objects2 (low, high, ovr_objs, 1, type);
1021 /* Handle overlaps. */
1022 if (UNLIKELY (num_overlapping_objs > 0))
1024 __mf_object_t *ovr_obj = ovr_objs[0];
1026 /* Accept certain specific duplication pairs. */
1027 if (((type == __MF_TYPE_STATIC) || (type == __MF_TYPE_GUESS))
1028 && ovr_obj->low == low
1029 && ovr_obj->high == high
1030 && ovr_obj->type == type)
1032 /* Duplicate registration for static objects may come
1033 from distinct compilation units. */
1034 VERBOSE_TRACE ("harmless duplicate reg %p-%p `%s'\n",
1035 (void *) low, (void *) high,
1036 (ovr_obj->name ? ovr_obj->name : ""));
1040 /* Alas, a genuine violation. */
1043 /* Two or more *real* mappings here. */
1044 __mf_violation ((void *) ptr, sz,
1045 (uintptr_t) __builtin_return_address (0), NULL,
1046 __MF_VIOL_REGISTER);
1049 else /* No overlapping objects: AOK. */
1050 __mf_insert_new_object (low, high, type, name, pc);
1052 /* We could conceivably call __mf_check() here to prime the cache,
1053 but then the read_count/write_count field is not reliable. */
1056 } /* end switch (__mf_opts.mudflap_mode) */
1061 __mf_unregister (void *ptr, size_t sz, int type)
1064 BEGIN_RECURSION_PROTECT ();
1065 __mfu_unregister (ptr, sz, type);
1066 END_RECURSION_PROTECT ();
1072 __mfu_unregister (void *ptr, size_t sz, int type)
1074 DECLARE (void, free, void *ptr);
1076 if (UNLIKELY (__mf_opts.sigusr1_report))
1077 __mf_sigusr1_respond ();
1079 TRACE ("unregister ptr=%p size=%lu type=%x\n", ptr, (unsigned long) sz, type);
1081 switch (__mf_opts.mudflap_mode)
1087 __mf_violation (ptr, sz,
1088 (uintptr_t) __builtin_return_address (0), NULL,
1089 __MF_VIOL_UNREGISTER);
1093 /* Clear the cache. */
1095 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1097 __mf_lookup_cache[0].low = MAXPTR;
1102 __mf_object_t *old_obj = NULL;
1103 __mf_object_t *del_obj = NULL; /* Object to actually delete. */
1104 __mf_object_t *objs[1] = {NULL};
1105 unsigned num_overlapping_objs;
1107 num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1108 CLAMPSZ (ptr, sz), objs, 1, type);
1110 /* Special case for HEAP_I - see free & realloc hook. They don't
1111 know whether the input region was HEAP or HEAP_I before
1112 unmapping it. Here we give HEAP a try in case HEAP_I
1114 if ((type == __MF_TYPE_HEAP_I) && (num_overlapping_objs == 0))
1116 num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1117 CLAMPSZ (ptr, sz), objs, 1, __MF_TYPE_HEAP);
1121 if (UNLIKELY ((num_overlapping_objs != 1) /* more than one overlap */
1122 || ((sz == 0) ? 0 : (sz != (old_obj->high - old_obj->low + 1))) /* size mismatch */
1123 || ((uintptr_t) ptr != old_obj->low))) /* base mismatch */
1125 __mf_violation (ptr, sz,
1126 (uintptr_t) __builtin_return_address (0), NULL,
1127 __MF_VIOL_UNREGISTER);
1131 __mf_unlink_object (old_obj);
1132 __mf_uncache_object (old_obj);
1134 /* Wipe buffer contents if desired. */
1135 if ((__mf_opts.wipe_stack && old_obj->type == __MF_TYPE_STACK)
1136 || (__mf_opts.wipe_heap && (old_obj->type == __MF_TYPE_HEAP
1137 || old_obj->type == __MF_TYPE_HEAP_I)))
1139 memset ((void *) old_obj->low,
1141 (size_t) (old_obj->high - old_obj->low + 1));
1144 /* Manage the object cemetary. */
1145 if (__mf_opts.persistent_count > 0 &&
1146 old_obj->type >= 0 &&
1147 old_obj->type <= __MF_TYPE_MAX_CEM)
1149 old_obj->deallocated_p = 1;
1150 old_obj->dealloc_pc = (uintptr_t) __builtin_return_address (0);
1151 #if HAVE_GETTIMEOFDAY
1152 gettimeofday (& old_obj->dealloc_time, NULL);
1155 old_obj->dealloc_thread = pthread_self ();
1158 if (__mf_opts.backtrace > 0 && old_obj->type == __MF_TYPE_HEAP)
1159 old_obj->dealloc_backtrace_size =
1160 __mf_backtrace (& old_obj->dealloc_backtrace,
1163 /* Encourage this object to be displayed again in current epoch. */
1164 old_obj->description_epoch --;
1166 /* Put this object into the cemetary. This may require this plot to
1167 be recycled, and the previous resident to be designated del_obj. */
1169 unsigned row = old_obj->type;
1170 unsigned plot = __mf_object_dead_head [row];
1172 del_obj = __mf_object_cemetary [row][plot];
1173 __mf_object_cemetary [row][plot] = old_obj;
1175 if (plot == __mf_opts.persistent_count) plot = 0;
1176 __mf_object_dead_head [row] = plot;
1182 if (__mf_opts.print_leaks)
1184 if ((old_obj->read_count + old_obj->write_count) == 0 &&
1185 (old_obj->type == __MF_TYPE_HEAP
1186 || old_obj->type == __MF_TYPE_HEAP_I))
1190 "mudflap warning: unaccessed registered object:\n");
1191 __mf_describe_object (old_obj);
1195 if (del_obj != NULL) /* May or may not equal old_obj. */
1197 if (__mf_opts.backtrace > 0)
1199 CALL_REAL(free, del_obj->alloc_backtrace);
1200 if (__mf_opts.persistent_count > 0)
1202 CALL_REAL(free, del_obj->dealloc_backtrace);
1205 CALL_REAL(free, del_obj);
1210 } /* end switch (__mf_opts.mudflap_mode) */
1213 if (__mf_opts.collect_stats)
1215 __mf_count_unregister ++;
1216 __mf_total_unregister_size += sz;
1225 unsigned long total_size;
1226 unsigned live_obj_count;
1227 double total_weight;
1228 double weighted_size;
1229 unsigned long weighted_address_bits [sizeof (uintptr_t) * 8][2];
1235 __mf_adapt_cache_fn (splay_tree_node n, void *param)
1237 __mf_object_t *obj = (__mf_object_t *) n->value;
1238 struct tree_stats *s = (struct tree_stats *) param;
1240 assert (obj != NULL && s != NULL);
1242 /* Exclude never-accessed objects. */
1243 if (obj->read_count + obj->write_count)
1246 s->total_size += (obj->high - obj->low + 1);
1253 /* VERBOSE_TRACE ("analyze low=%p live=%u name=`%s'\n",
1254 (void *) obj->low, obj->liveness, obj->name); */
1256 s->live_obj_count ++;
1257 s->total_weight += (double) obj->liveness;
1259 (double) (obj->high - obj->low + 1) *
1260 (double) obj->liveness;
1263 for (i=0; i<sizeof(uintptr_t) * 8; i++)
1265 unsigned bit = addr & 1;
1266 s->weighted_address_bits[i][bit] += obj->liveness;
1270 /* Age the liveness value. */
1271 obj->liveness >>= 1;
1282 struct tree_stats s;
1283 uintptr_t new_mask = 0;
1284 unsigned char new_shift;
1285 float cache_utilization;
1287 static float smoothed_new_shift = -1.0;
1290 memset (&s, 0, sizeof (s));
1292 splay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP), __mf_adapt_cache_fn, (void *) & s);
1293 splay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I), __mf_adapt_cache_fn, (void *) & s);
1294 splay_tree_foreach (__mf_object_tree (__MF_TYPE_STACK), __mf_adapt_cache_fn, (void *) & s);
1295 splay_tree_foreach (__mf_object_tree (__MF_TYPE_STATIC), __mf_adapt_cache_fn, (void *) & s);
1296 splay_tree_foreach (__mf_object_tree (__MF_TYPE_GUESS), __mf_adapt_cache_fn, (void *) & s);
1298 /* Maybe we're dealing with funny aging/adaptation parameters, or an
1299 empty tree. Just leave the cache alone in such cases, rather
1300 than risk dying by division-by-zero. */
1301 if (! (s.obj_count > 0) && (s.live_obj_count > 0) && (s.total_weight > 0.0))
1304 /* Guess a good value for the shift parameter by finding an address bit that is a
1305 good discriminant of lively objects. */
1307 for (i=0; i<sizeof (uintptr_t)*8; i++)
1309 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1310 if (max_value < value) max_value = value;
1312 for (i=0; i<sizeof (uintptr_t)*8; i++)
1314 float shoulder_factor = 0.7; /* Include slightly less popular bits too. */
1315 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1316 if (value >= max_value * shoulder_factor)
1319 if (smoothed_new_shift < 0) smoothed_new_shift = __mf_lc_shift;
1320 /* Converge toward this slowly to reduce flapping. */
1321 smoothed_new_shift = 0.9*smoothed_new_shift + 0.1*i;
1322 new_shift = (unsigned) (smoothed_new_shift + 0.5);
1323 assert (new_shift < sizeof (uintptr_t)*8);
1325 /* Count number of used buckets. */
1326 cache_utilization = 0.0;
1327 for (i = 0; i < (1 + __mf_lc_mask); i++)
1328 if (__mf_lookup_cache[i].low != 0 || __mf_lookup_cache[i].high != 0)
1329 cache_utilization += 1.0;
1330 cache_utilization /= (1 + __mf_lc_mask);
1332 new_mask |= 0x3ff; /* XXX: force a large cache. */
1333 new_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
1335 VERBOSE_TRACE ("adapt cache obj=%u/%u sizes=%lu/%.0f/%.0f => "
1336 "util=%u%% m=%p s=%u\n",
1337 s.obj_count, s.live_obj_count, s.total_size, s.total_weight, s.weighted_size,
1338 (unsigned)(cache_utilization*100.0), (void *) new_mask, new_shift);
1340 /* We should reinitialize cache if its parameters have changed. */
1341 if (new_mask != __mf_lc_mask ||
1342 new_shift != __mf_lc_shift)
1344 __mf_lc_mask = new_mask;
1345 __mf_lc_shift = new_shift;
1347 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1349 __mf_lookup_cache[0].low = MAXPTR;
1355 /* __mf_find_object[s] */
1357 /* Find overlapping live objecs between [low,high]. Return up to
1358 max_objs of their pointers in objs[]. Return total count of
1359 overlaps (may exceed max_objs). */
1362 __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
1363 __mf_object_t **objs, unsigned max_objs, int type)
1366 splay_tree t = __mf_object_tree (type);
1367 splay_tree_key k = (splay_tree_key) ptr_low;
1370 splay_tree_node n = splay_tree_lookup (t, k);
1371 /* An exact match for base address implies a hit. */
1374 if (count < max_objs)
1375 objs[count] = (__mf_object_t *) n->value;
1379 /* Iterate left then right near this key value to find all overlapping objects. */
1380 for (direction = 0; direction < 2; direction ++)
1382 /* Reset search origin. */
1383 k = (splay_tree_key) ptr_low;
1389 n = (direction == 0 ? splay_tree_predecessor (t, k) : splay_tree_successor (t, k));
1390 if (n == NULL) break;
1391 obj = (__mf_object_t *) n->value;
1393 if (! (obj->low <= ptr_high && obj->high >= ptr_low)) /* No overlap? */
1396 if (count < max_objs)
1397 objs[count] = (__mf_object_t *) n->value;
1400 k = (splay_tree_key) obj->low;
1409 __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
1410 __mf_object_t **objs, unsigned max_objs)
1415 /* Search each splay tree for overlaps. */
1416 for (type = __MF_TYPE_NOACCESS; type <= __MF_TYPE_GUESS; type++)
1418 unsigned c = __mf_find_objects2 (ptr_low, ptr_high, objs, max_objs, type);
1424 else /* NB: C may equal 0 */
1437 /* __mf_link_object */
1440 __mf_link_object (__mf_object_t *node)
1442 splay_tree t = __mf_object_tree (node->type);
1443 splay_tree_insert (t, (splay_tree_key) node->low, (splay_tree_value) node);
1446 /* __mf_unlink_object */
1449 __mf_unlink_object (__mf_object_t *node)
1451 splay_tree t = __mf_object_tree (node->type);
1452 splay_tree_remove (t, (splay_tree_key) node->low);
1455 /* __mf_find_dead_objects */
1457 /* Find overlapping dead objecs between [low,high]. Return up to
1458 max_objs of their pointers in objs[]. Return total count of
1459 overlaps (may exceed max_objs). */
1462 __mf_find_dead_objects (uintptr_t low, uintptr_t high,
1463 __mf_object_t **objs, unsigned max_objs)
1465 if (__mf_opts.persistent_count > 0)
1468 unsigned recollection = 0;
1471 assert (low <= high);
1472 assert (max_objs == 0 || objs != NULL);
1474 /* Widen the search from the most recent plots in each row, looking
1475 backward in time. */
1477 while (recollection < __mf_opts.persistent_count)
1481 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1486 plot = __mf_object_dead_head [row];
1487 for (i = 0; i <= recollection; i ++)
1491 /* Look backward through row: it's a circular buffer. */
1492 if (plot > 0) plot --;
1493 else plot = __mf_opts.persistent_count - 1;
1495 obj = __mf_object_cemetary [row][plot];
1496 if (obj && obj->low <= high && obj->high >= low)
1498 /* Found an overlapping dead object! */
1499 if (count < max_objs)
1509 /* Look farther back in time. */
1510 recollection = (recollection * 2) + 1;
1519 /* __mf_describe_object */
1522 __mf_describe_object (__mf_object_t *obj)
1524 static unsigned epoch = 0;
1531 if (__mf_opts.abbreviate && obj->description_epoch == epoch)
1534 "mudflap object %p: name=`%s'\n",
1535 (void *) obj, (obj->name ? obj->name : ""));
1539 obj->description_epoch = epoch;
1542 "mudflap object %p: name=`%s'\n"
1543 "bounds=[%p,%p] size=%lu area=%s check=%ur/%uw liveness=%u%s\n"
1544 "alloc time=%lu.%06lu pc=%p"
1549 (void *) obj, (obj->name ? obj->name : ""),
1550 (void *) obj->low, (void *) obj->high,
1551 (unsigned long) (obj->high - obj->low + 1),
1552 (obj->type == __MF_TYPE_NOACCESS ? "no-access" :
1553 obj->type == __MF_TYPE_HEAP ? "heap" :
1554 obj->type == __MF_TYPE_HEAP_I ? "heap-init" :
1555 obj->type == __MF_TYPE_STACK ? "stack" :
1556 obj->type == __MF_TYPE_STATIC ? "static" :
1557 obj->type == __MF_TYPE_GUESS ? "guess" :
1559 obj->read_count, obj->write_count, obj->liveness,
1560 obj->watching_p ? " watching" : "",
1561 obj->alloc_time.tv_sec, obj->alloc_time.tv_usec,
1562 (void *) obj->alloc_pc
1564 , (unsigned) obj->alloc_thread
1568 if (__mf_opts.backtrace > 0)
1571 for (i=0; i<obj->alloc_backtrace_size; i++)
1572 fprintf (stderr, " %s\n", obj->alloc_backtrace[i]);
1575 if (__mf_opts.persistent_count > 0)
1577 if (obj->deallocated_p)
1579 fprintf (stderr, "dealloc time=%lu.%06lu pc=%p"
1584 obj->dealloc_time.tv_sec, obj->dealloc_time.tv_usec,
1585 (void *) obj->dealloc_pc
1587 , (unsigned) obj->dealloc_thread
1592 if (__mf_opts.backtrace > 0)
1595 for (i=0; i<obj->dealloc_backtrace_size; i++)
1596 fprintf (stderr, " %s\n", obj->dealloc_backtrace[i]);
1604 __mf_report_leaks_fn (splay_tree_node n, void *param)
1606 __mf_object_t *node = (__mf_object_t *) n->value;
1607 unsigned *count = (unsigned *) param;
1612 fprintf (stderr, "Leaked object %u:\n", (*count));
1613 __mf_describe_object (node);
1620 __mf_report_leaks ()
1624 (void) splay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP),
1625 __mf_report_leaks_fn, & count);
1626 (void) splay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I),
1627 __mf_report_leaks_fn, & count);
1632 /* ------------------------------------------------------------------------ */
1639 BEGIN_RECURSION_PROTECT ();
1641 END_RECURSION_PROTECT ();
1648 if (__mf_opts.collect_stats)
1653 "calls to __mf_check: %lu\n"
1654 " __mf_register: %lu [%luB, %luB, %luB, %luB, %luB]\n"
1655 " __mf_unregister: %lu [%luB]\n"
1656 " __mf_violation: [%lu, %lu, %lu, %lu, %lu]\n",
1658 __mf_count_register,
1659 __mf_total_register_size[0], __mf_total_register_size[1],
1660 __mf_total_register_size[2], __mf_total_register_size[3],
1661 __mf_total_register_size[4], /* XXX */
1662 __mf_count_unregister, __mf_total_unregister_size,
1663 __mf_count_violation[0], __mf_count_violation[1],
1664 __mf_count_violation[2], __mf_count_violation[3],
1665 __mf_count_violation[4]);
1668 "calls with reentrancy: %lu\n", __mf_reentrancy);
1671 " lock contention: %lu\n", __mf_lock_contention);
1674 /* Lookup cache stats. */
1677 unsigned max_reuse = 0;
1678 unsigned num_used = 0;
1679 unsigned num_unused = 0;
1681 for (i = 0; i < LOOKUP_CACHE_SIZE; i++)
1683 if (__mf_lookup_cache_reusecount[i])
1687 if (max_reuse < __mf_lookup_cache_reusecount[i])
1688 max_reuse = __mf_lookup_cache_reusecount[i];
1690 fprintf (stderr, "lookup cache slots used: %u unused: %u peak-reuse: %u\n",
1691 num_used, num_unused, max_reuse);
1695 unsigned live_count;
1696 live_count = __mf_find_objects (MINPTR, MAXPTR, NULL, 0);
1697 fprintf (stderr, "number of live objects: %u\n", live_count);
1700 if (__mf_opts.persistent_count > 0)
1702 unsigned dead_count = 0;
1704 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1705 for (plot = 0 ; plot < __mf_opts.persistent_count; plot ++)
1706 if (__mf_object_cemetary [row][plot] != 0)
1708 fprintf (stderr, " zombie objects: %u\n", dead_count);
1711 if (__mf_opts.print_leaks && (__mf_opts.mudflap_mode == mode_check))
1714 extern void * __mf_wrap_alloca_indirect (size_t c);
1716 /* Free up any remaining alloca()'d blocks. */
1717 __mf_wrap_alloca_indirect (0);
1718 __mf_describe_object (NULL); /* Reset description epoch. */
1719 l = __mf_report_leaks ();
1720 fprintf (stderr, "number of leaked objects: %u\n", l);
1724 /* __mf_backtrace */
1727 __mf_backtrace (char ***symbols, void *guess_pc, unsigned guess_omit_levels)
1730 unsigned pc_array_size = __mf_opts.backtrace + guess_omit_levels;
1731 unsigned remaining_size;
1732 unsigned omitted_size = 0;
1734 DECLARE (void, free, void *ptr);
1735 DECLARE (void *, calloc, size_t c, size_t n);
1736 DECLARE (void *, malloc, size_t n);
1738 pc_array = CALL_REAL (calloc, pc_array_size, sizeof (void *) );
1739 #ifdef HAVE_BACKTRACE
1740 pc_array_size = backtrace (pc_array, pc_array_size);
1742 #define FETCH(n) do { if (pc_array_size >= n) { \
1743 pc_array[n] = __builtin_return_address(n); \
1744 if (pc_array[n] == 0) pc_array_size = n; } } while (0)
1746 /* Unroll some calls __builtin_return_address because this function
1747 only takes a literal integer parameter. */
1750 /* XXX: __builtin_return_address sometimes crashes (!) on >0 arguments,
1751 rather than simply returning 0. :-( */
1760 if (pc_array_size > 8) pc_array_size = 9;
1762 if (pc_array_size > 0) pc_array_size = 1;
1768 /* We want to trim the first few levels of the stack traceback,
1769 since they contain libmudflap wrappers and junk. If pc_array[]
1770 ends up containing a non-NULL guess_pc, then trim everything
1771 before that. Otherwise, omit the first guess_omit_levels
1774 if (guess_pc != NULL)
1775 for (i=0; i<pc_array_size; i++)
1776 if (pc_array [i] == guess_pc)
1779 if (omitted_size == 0) /* No match? */
1780 if (pc_array_size > guess_omit_levels)
1781 omitted_size = guess_omit_levels;
1783 remaining_size = pc_array_size - omitted_size;
1785 #ifdef HAVE_BACKTRACE_SYMBOLS
1786 *symbols = backtrace_symbols (pc_array + omitted_size, remaining_size);
1789 /* Let's construct a buffer by hand. It will have <remaining_size>
1790 char*'s at the front, pointing at individual strings immediately
1795 enum { perline = 30 };
1796 buffer = CALL_REAL (malloc, remaining_size * (perline + sizeof(char *)));
1797 pointers = (char **) buffer;
1798 chars = (char *)buffer + (remaining_size * sizeof (char *));
1799 for (i = 0; i < remaining_size; i++)
1801 pointers[i] = chars;
1802 sprintf (chars, "[0x%p]", pc_array [omitted_size + i]);
1803 chars = chars + perline;
1805 *symbols = pointers;
1808 CALL_REAL (free, pc_array);
1810 return remaining_size;
1813 /* ------------------------------------------------------------------------ */
1814 /* __mf_violation */
1817 __mf_violation (void *ptr, size_t sz, uintptr_t pc,
1818 const char *location, int type)
1821 static unsigned violation_number;
1822 DECLARE(void, free, void *ptr);
1824 TRACE ("violation pc=%p location=%s type=%d ptr=%p size=%lu\n",
1826 (location != NULL ? location : ""), type, ptr, (unsigned long) sz);
1828 if (__mf_opts.collect_stats)
1829 __mf_count_violation [(type < 0) ? 0 :
1830 (type > __MF_VIOL_WATCH) ? 0 :
1833 /* Print out a basic warning message. */
1834 if (__mf_opts.verbose_violations)
1837 unsigned num_helpful = 0;
1839 #if HAVE_GETTIMEOFDAY
1840 gettimeofday (& now, NULL);
1843 violation_number ++;
1846 "mudflap violation %u (%s): time=%lu.%06lu "
1847 "ptr=%p size=%lu\npc=%p%s%s%s\n",
1849 ((type == __MF_VIOL_READ) ? "check/read" :
1850 (type == __MF_VIOL_WRITE) ? "check/write" :
1851 (type == __MF_VIOL_REGISTER) ? "register" :
1852 (type == __MF_VIOL_UNREGISTER) ? "unregister" :
1853 (type == __MF_VIOL_WATCH) ? "watch" : "unknown"),
1854 now.tv_sec, now.tv_usec,
1855 (void *) ptr, (unsigned long)sz, (void *) pc,
1856 (location != NULL ? " location=`" : ""),
1857 (location != NULL ? location : ""),
1858 (location != NULL ? "'" : ""));
1860 if (__mf_opts.backtrace > 0)
1865 num = __mf_backtrace (& symbols, (void *) pc, 2);
1866 /* Note: backtrace_symbols calls malloc(). But since we're in
1867 __mf_violation and presumably __mf_check, it'll detect
1868 recursion, and not put the new string into the database. */
1870 for (i=0; i<num; i++)
1871 fprintf (stderr, " %s\n", symbols[i]);
1873 /* Calling free() here would trigger a violation. */
1874 CALL_REAL(free, symbols);
1878 /* Look for nearby objects. For this, we start with s_low/s_high
1879 pointing to the given area, looking for overlapping objects.
1880 If none show up, widen the search area and keep looking. */
1882 if (sz == 0) sz = 1;
1884 for (dead_p = 0; dead_p <= 1; dead_p ++) /* for dead_p in 0 1 */
1886 enum {max_objs = 3}; /* magic */
1887 __mf_object_t *objs[max_objs];
1888 unsigned num_objs = 0;
1889 uintptr_t s_low, s_high;
1893 s_low = (uintptr_t) ptr;
1894 s_high = CLAMPSZ (ptr, sz);
1896 while (tries < 16) /* magic */
1899 num_objs = __mf_find_dead_objects (s_low, s_high, objs, max_objs);
1901 num_objs = __mf_find_objects (s_low, s_high, objs, max_objs);
1903 if (num_objs) /* good enough */
1908 /* XXX: tune this search strategy. It's too dependent on
1909 sz, which can vary from 1 to very big (when array index
1910 checking) numbers. */
1911 s_low = CLAMPSUB (s_low, (sz * tries * tries));
1912 s_high = CLAMPADD (s_high, (sz * tries * tries));
1915 for (i = 0; i < min (num_objs, max_objs); i++)
1917 __mf_object_t *obj = objs[i];
1918 uintptr_t low = (uintptr_t) ptr;
1919 uintptr_t high = CLAMPSZ (ptr, sz);
1920 unsigned before1 = (low < obj->low) ? obj->low - low : 0;
1921 unsigned after1 = (low > obj->high) ? low - obj->high : 0;
1922 unsigned into1 = (high >= obj->low && low <= obj->high) ? low - obj->low : 0;
1923 unsigned before2 = (high < obj->low) ? obj->low - high : 0;
1924 unsigned after2 = (high > obj->high) ? high - obj->high : 0;
1925 unsigned into2 = (high >= obj->low && low <= obj->high) ? high - obj->low : 0;
1927 fprintf (stderr, "Nearby object %u: checked region begins %uB %s and ends %uB %s\n",
1928 num_helpful + i + 1,
1929 (before1 ? before1 : after1 ? after1 : into1),
1930 (before1 ? "before" : after1 ? "after" : "into"),
1931 (before2 ? before2 : after2 ? after2 : into2),
1932 (before2 ? "before" : after2 ? "after" : "into"));
1933 __mf_describe_object (obj);
1935 num_helpful += num_objs;
1938 fprintf (stderr, "number of nearby objects: %u\n", num_helpful);
1941 /* How to finally handle this violation? */
1942 switch (__mf_opts.violation_mode)
1947 kill (getpid(), SIGSEGV);
1954 snprintf (buf, 128, "gdb --pid=%u", (unsigned) getpid ());
1956 /* XXX: should probably fork() && sleep(GDB_WAIT_PARAMETER)
1957 instead, and let the forked child execlp() gdb. That way, this
1958 subject process can be resumed under the supervision of gdb.
1959 This can't happen now, since system() only returns when gdb
1960 dies. In that case, we need to beware of starting a second
1961 concurrent gdb child upon the next violation. (But if the first
1962 gdb dies, then starting a new one is appropriate.) */
1967 /* ------------------------------------------------------------------------ */
1970 unsigned __mf_watch (void *ptr, size_t sz)
1974 BEGIN_RECURSION_PROTECT ();
1975 rc = __mf_watch_or_not (ptr, sz, 1);
1976 END_RECURSION_PROTECT ();
1981 unsigned __mf_unwatch (void *ptr, size_t sz)
1985 rc = __mf_watch_or_not (ptr, sz, 0);
1992 __mf_watch_or_not (void *ptr, size_t sz, char flag)
1994 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
1995 uintptr_t ptr_low = (uintptr_t) ptr;
1998 TRACE ("%s ptr=%p size=%lu\n",
1999 (flag ? "watch" : "unwatch"), ptr, (unsigned long) sz);
2001 switch (__mf_opts.mudflap_mode)
2011 __mf_object_t **all_ovr_objs;
2014 DECLARE (void *, malloc, size_t c);
2015 DECLARE (void, free, void *p);
2017 obj_count = __mf_find_objects (ptr_low, ptr_high, NULL, 0);
2018 VERBOSE_TRACE (" %u:", obj_count);
2020 all_ovr_objs = CALL_REAL (malloc, (sizeof (__mf_object_t *) * obj_count));
2021 if (all_ovr_objs == NULL) abort ();
2022 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_objs, obj_count);
2023 assert (n == obj_count);
2025 for (n = 0; n < obj_count; n ++)
2027 __mf_object_t *obj = all_ovr_objs[n];
2029 VERBOSE_TRACE (" [%p]", (void *) obj);
2030 if (obj->watching_p != flag)
2032 obj->watching_p = flag;
2035 /* Remove object from cache, to ensure next access
2036 goes through __mf_check(). */
2038 __mf_uncache_object (obj);
2041 CALL_REAL (free, all_ovr_objs);
2051 __mf_sigusr1_handler (int num)
2053 __mf_sigusr1_received ++;
2056 /* Install or remove SIGUSR1 handler as necessary.
2057 Also, respond to a received pending SIGUSR1. */
2059 __mf_sigusr1_respond ()
2061 static int handler_installed;
2064 /* Manage handler */
2065 if (__mf_opts.sigusr1_report && ! handler_installed)
2067 signal (SIGUSR1, __mf_sigusr1_handler);
2068 handler_installed = 1;
2070 else if(! __mf_opts.sigusr1_report && handler_installed)
2072 signal (SIGUSR1, SIG_DFL);
2073 handler_installed = 0;
2077 /* Manage enqueued signals */
2078 if (__mf_sigusr1_received > __mf_sigusr1_handled)
2080 __mf_sigusr1_handled ++;
2081 assert (__mf_state == reentrant);
2083 handler_installed = 0; /* We may need to re-enable signal; this might be a SysV library. */
2088 /* XXX: provide an alternative __assert_fail function that cannot
2089 fail due to libmudflap infinite recursion. */
2093 write_itoa (int fd, unsigned n)
2095 enum x { bufsize = sizeof(n)*4 };
2099 for (i=0; i<bufsize-1; i++)
2101 unsigned digit = n % 10;
2102 buf[bufsize-2-i] = digit + '0';
2106 char *m = & buf [bufsize-2-i];
2107 buf[bufsize-1] = '\0';
2108 write (fd, m, strlen(m));
2116 __assert_fail (const char *msg, const char *file, unsigned line, const char *func)
2118 #define write2(string) write (2, (string), strlen ((string)));
2122 write_itoa (2, (unsigned) pthread_self ());
2125 write2(": assertion failure: `");
2126 write (2, msg, strlen (msg));
2128 write (2, func, strlen (func));
2130 write (2, file, strlen (file));
2132 write_itoa (2, line);
2145 /* #include the generic splay tree implementation from libiberty here, to
2146 ensure that it uses our memory allocation primitives. */
2149 splay_tree_free (void *p)
2151 DECLARE (void, free, void *p);
2152 CALL_REAL (free, p);
2156 splay_tree_xmalloc (size_t s)
2158 DECLARE (void *, malloc, size_t s);
2159 return CALL_REAL (malloc, s);
2162 #define free(z) splay_tree_free(z)
2163 #define xmalloc(z) splay_tree_xmalloc(z)
2164 #include "splay-tree.c"