1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
3 Contributed by Frank Ch. Eigler <fche@redhat.com>
4 and Graydon Hoare <graydon@redhat.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
13 In addition to the permissions in the GNU General Public License, the
14 Free Software Foundation gives you unlimited permission to link the
15 compiled version of this file into combinations with other programs,
16 and to distribute those combinations without any restriction coming
17 from the use of this file. (The General Public License restrictions
18 do apply in other respects; for example, they cover modification of
19 the file, and distribution when not linked into a combine
22 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
23 WARRANTY; without even the implied warranty of MERCHANTABILITY or
24 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
27 You should have received a copy of the GNU General Public License
28 along with GCC; see the file COPYING. If not, write to the Free
29 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
34 /* These attempt to coax various unix flavours to declare all our
35 needed tidbits in the system headers. */
36 #if !defined(__FreeBSD__)
38 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
42 #define __EXTENSIONS__
44 #define _LARGE_FILE_API
45 #define _XOPEN_SOURCE_EXTENDED 1
49 #include <sys/types.h>
53 #ifdef HAVE_EXECINFO_H
63 #include <sys/types.h>
68 #include "mf-runtime.h"
70 #include "splay-tree.h"
73 /* ------------------------------------------------------------------------ */
76 #define CTOR __attribute__ ((constructor))
77 #define DTOR __attribute__ ((destructor))
80 /* Codes to describe the context in which a violation occurs. */
81 #define __MF_VIOL_UNKNOWN 0
82 #define __MF_VIOL_READ 1
83 #define __MF_VIOL_WRITE 2
84 #define __MF_VIOL_REGISTER 3
85 #define __MF_VIOL_UNREGISTER 4
86 #define __MF_VIOL_WATCH 5
88 /* Protect against recursive calls. */
89 #define BEGIN_RECURSION_PROTECT() do { \
90 if (UNLIKELY (__mf_state == reentrant)) { \
91 write (2, "mf: erroneous reentrancy detected in `", 38); \
92 write (2, __PRETTY_FUNCTION__, strlen(__PRETTY_FUNCTION__)); \
93 write (2, "'\n", 2); \
95 __mf_state = reentrant; \
98 #define END_RECURSION_PROTECT() do { \
99 __mf_state = active; \
104 /* ------------------------------------------------------------------------ */
105 /* Required globals. */
107 #define LOOKUP_CACHE_MASK_DFL 1023
108 #define LOOKUP_CACHE_SIZE_MAX 4096 /* Allows max CACHE_MASK 0x0FFF */
109 #define LOOKUP_CACHE_SHIFT_DFL 2
111 struct __mf_cache __mf_lookup_cache [LOOKUP_CACHE_SIZE_MAX];
112 uintptr_t __mf_lc_mask = LOOKUP_CACHE_MASK_DFL;
113 unsigned char __mf_lc_shift = LOOKUP_CACHE_SHIFT_DFL;
114 #define LOOKUP_CACHE_SIZE (__mf_lc_mask + 1)
116 struct __mf_options __mf_opts;
118 int __mf_starting_p = 1;
120 enum __mf_state_enum __mf_state = active;
122 /* See __mf_state_perthread() in mf-hooks.c. */
127 pthread_mutex_t __mf_biglock =
128 #ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
129 PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
131 PTHREAD_MUTEX_INITIALIZER;
135 /* Use HAVE_PTHREAD_H here instead of LIBMUDFLAPTH, so that even
136 the libmudflap.la (no threading support) can diagnose whether
137 the application is linked with -lpthread. See __mf_usage() below. */
139 #ifdef _POSIX_THREADS
140 #pragma weak pthread_join
142 #define pthread_join NULL
144 const void *threads_active_p = (void *) pthread_join;
148 /* ------------------------------------------------------------------------ */
149 /* stats-related globals. */
151 static unsigned long __mf_count_check;
152 static unsigned long __mf_lookup_cache_reusecount [LOOKUP_CACHE_SIZE_MAX];
153 static unsigned long __mf_count_register;
154 static unsigned long __mf_total_register_size [__MF_TYPE_MAX+1];
155 static unsigned long __mf_count_unregister;
156 static unsigned long __mf_total_unregister_size;
157 static unsigned long __mf_count_violation [__MF_VIOL_WATCH+1];
158 static unsigned long __mf_sigusr1_received;
159 static unsigned long __mf_sigusr1_handled;
160 /* not static */ unsigned long __mf_reentrancy;
162 /* not static */ unsigned long __mf_lock_contention;
166 /* ------------------------------------------------------------------------ */
167 /* mode-check-related globals. */
169 typedef struct __mf_object
171 uintptr_t low, high; /* __mf_register parameters */
173 char type; /* __MF_TYPE_something */
174 char watching_p; /* Trigger a VIOL_WATCH on access? */
175 unsigned read_count; /* Number of times __mf_check/read was called on this object. */
176 unsigned write_count; /* Likewise for __mf_check/write. */
177 unsigned liveness; /* A measure of recent checking activity. */
178 unsigned description_epoch; /* Last epoch __mf_describe_object printed this. */
181 struct timeval alloc_time;
182 char **alloc_backtrace;
183 size_t alloc_backtrace_size;
185 pthread_t alloc_thread;
189 uintptr_t dealloc_pc;
190 struct timeval dealloc_time;
191 char **dealloc_backtrace;
192 size_t dealloc_backtrace_size;
194 pthread_t dealloc_thread;
198 /* Live objects: splay trees, separated by type, ordered on .low (base address). */
199 /* Actually stored as static vars within lookup function below. */
201 /* Dead objects: circular arrays; _MIN_CEM .. _MAX_CEM only */
202 static unsigned __mf_object_dead_head[__MF_TYPE_MAX_CEM+1]; /* next empty spot */
203 static __mf_object_t *__mf_object_cemetary[__MF_TYPE_MAX_CEM+1][__MF_PERSIST_MAX];
206 /* ------------------------------------------------------------------------ */
207 /* Forward function declarations */
209 static void __mf_init () CTOR;
210 static void __mf_sigusr1_respond ();
211 static unsigned __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
212 __mf_object_t **objs, unsigned max_objs);
213 static unsigned __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
214 __mf_object_t **objs, unsigned max_objs, int type);
215 static unsigned __mf_find_dead_objects (uintptr_t ptr_low, uintptr_t ptr_high,
216 __mf_object_t **objs, unsigned max_objs);
217 static void __mf_adapt_cache ();
218 static void __mf_describe_object (__mf_object_t *obj);
219 static unsigned __mf_watch_or_not (void *ptr, size_t sz, char flag);
220 static splay_tree __mf_object_tree (int type);
221 static void __mf_link_object (__mf_object_t *node);
222 static void __mf_unlink_object (__mf_object_t *node);
225 /* ------------------------------------------------------------------------ */
226 /* Configuration engine */
229 __mf_set_default_options ()
231 memset (& __mf_opts, 0, sizeof (__mf_opts));
233 __mf_opts.adapt_cache = 1000003;
234 __mf_opts.abbreviate = 1;
235 __mf_opts.verbose_violations = 1;
236 __mf_opts.free_queue_length = 4;
237 __mf_opts.persistent_count = 100;
238 __mf_opts.crumple_zone = 32;
239 __mf_opts.backtrace = 4;
240 __mf_opts.mudflap_mode = mode_check;
241 __mf_opts.violation_mode = viol_nop;
242 __mf_opts.heur_std_data = 1;
244 __mf_opts.thread_stack = 0;
263 "mudflaps do nothing",
264 set_option, (int)mode_nop, (int *)&__mf_opts.mudflap_mode},
266 "mudflaps populate object tree",
267 set_option, (int)mode_populate, (int *)&__mf_opts.mudflap_mode},
269 "mudflaps check for memory violations",
270 set_option, (int)mode_check, (int *)&__mf_opts.mudflap_mode},
272 "mudflaps always cause violations (diagnostic)",
273 set_option, (int)mode_violate, (int *)&__mf_opts.mudflap_mode},
276 "violations do not change program execution",
277 set_option, (int)viol_nop, (int *)&__mf_opts.violation_mode},
279 "violations cause a call to abort()",
280 set_option, (int)viol_abort, (int *)&__mf_opts.violation_mode},
282 "violations are promoted to SIGSEGV signals",
283 set_option, (int)viol_segv, (int *)&__mf_opts.violation_mode},
285 "violations fork a gdb process attached to current program",
286 set_option, (int)viol_gdb, (int *)&__mf_opts.violation_mode},
288 "trace calls to mudflap runtime library",
289 set_option, 1, &__mf_opts.trace_mf_calls},
291 "trace internal events within mudflap runtime library",
292 set_option, 1, &__mf_opts.verbose_trace},
294 "collect statistics on mudflap's operation",
295 set_option, 1, &__mf_opts.collect_stats},
298 "print report upon SIGUSR1",
299 set_option, 1, &__mf_opts.sigusr1_report},
301 {"internal-checking",
302 "perform more expensive internal checking",
303 set_option, 1, &__mf_opts.internal_checking},
305 "print any memory leaks at program shutdown",
306 set_option, 1, &__mf_opts.print_leaks},
307 {"check-initialization",
308 "detect uninitialized object reads",
309 set_option, 1, &__mf_opts.check_initialization},
310 {"verbose-violations",
311 "print verbose messages when memory violations occur",
312 set_option, 1, &__mf_opts.verbose_violations},
314 "abbreviate repetitive listings",
315 set_option, 1, &__mf_opts.abbreviate},
317 "wipe stack objects at unwind",
318 set_option, 1, &__mf_opts.wipe_stack},
320 "wipe heap objects at free",
321 set_option, 1, &__mf_opts.wipe_heap},
323 "support /proc/self/map heuristics",
324 set_option, 1, &__mf_opts.heur_proc_map},
326 "enable a simple upper stack bound heuristic",
327 set_option, 1, &__mf_opts.heur_stack_bound},
329 "support _start.._end heuristics",
330 set_option, 1, &__mf_opts.heur_start_end},
332 "register standard library data (argv, errno, stdin, ...)",
333 set_option, 1, &__mf_opts.heur_std_data},
334 {"free-queue-length",
335 "queue N deferred free() calls before performing them",
336 read_integer_option, 0, &__mf_opts.free_queue_length},
338 "keep a history of N unregistered regions",
339 read_integer_option, 0, &__mf_opts.persistent_count},
341 "surround allocations with crumple zones of N bytes",
342 read_integer_option, 0, &__mf_opts.crumple_zone},
343 /* XXX: not type-safe.
345 "set lookup cache size mask to N (2**M - 1)",
346 read_integer_option, 0, (int *)(&__mf_lc_mask)},
348 "set lookup cache pointer shift",
349 read_integer_option, 0, (int *)(&__mf_lc_shift)},
352 "adapt mask/shift parameters after N cache misses",
353 read_integer_option, 1, &__mf_opts.adapt_cache},
355 "keep an N-level stack trace of each call context",
356 read_integer_option, 0, &__mf_opts.backtrace},
359 "override thread stacks allocation: N kB",
360 read_integer_option, 0, &__mf_opts.thread_stack},
362 {0, 0, set_option, 0, NULL}
371 "This is a %s%sGCC \"mudflap\" memory-checked binary.\n"
372 "Mudflap is Copyright (C) 2002-2003 Free Software Foundation, Inc.\n"
374 "The mudflap code can be controlled by an environment variable:\n"
376 "$ export MUDFLAP_OPTIONS='<options>'\n"
377 "$ <mudflapped_program>\n"
379 "where <options> is a space-separated list of \n"
380 "any of the following options. Use `-no-OPTION' to disable options.\n"
383 (threads_active_p ? "multi-threaded " : "single-threaded "),
393 /* XXX: The multi-threaded thread-unaware combination is bad. */
395 for (opt = options; opt->name; opt++)
397 int default_p = (opt->value == * opt->target);
403 fprintf (stderr, "-%-23.23s %s", opt->name, opt->description);
405 fprintf (stderr, " [active]\n");
407 fprintf (stderr, "\n");
409 case read_integer_option:
410 strncpy (buf, opt->name, 128);
411 strncpy (buf + strlen (opt->name), "=N", 2);
412 fprintf (stderr, "-%-23.23s %s", buf, opt->description);
413 fprintf (stderr, " [%d]\n", * opt->target);
419 fprintf (stderr, "\n");
424 __mf_set_options (const char *optstr)
428 BEGIN_RECURSION_PROTECT ();
429 rc = __mfu_set_options (optstr);
430 /* XXX: It's not really that easy. A change to a bunch of parameters
431 can require updating auxiliary state or risk crashing:
432 free_queue_length, crumple_zone ... */
433 END_RECURSION_PROTECT ();
440 __mfu_set_options (const char *optstr)
442 struct option *opts = 0;
446 const char *saved_optstr = optstr;
448 /* XXX: bounds-check for optstr! */
465 if (*optstr == '?' ||
466 strncmp (optstr, "help", 4) == 0)
468 /* Caller will print help and exit. */
472 if (strncmp (optstr, "no-", 3) == 0)
475 optstr = & optstr[3];
478 for (opts = options; opts->name; opts++)
480 if (strncmp (optstr, opts->name, strlen (opts->name)) == 0)
482 optstr += strlen (opts->name);
483 assert (opts->target);
490 *(opts->target) = opts->value;
492 case read_integer_option:
493 if (! negate && (*optstr == '=' && *(optstr+1)))
496 tmp = strtol (optstr, &nxt, 10);
497 if ((optstr != nxt) && (tmp != LONG_MAX))
500 *(opts->target) = (int)tmp;
514 "warning: unrecognized string '%s' in mudflap options\n",
516 optstr += strlen (optstr);
522 /* Special post-processing: bound __mf_lc_mask and free_queue_length for security. */
523 __mf_lc_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
524 __mf_opts.free_queue_length &= (__MF_FREEQ_MAX - 1);
526 /* Clear the lookup cache, in case the parameters got changed. */
528 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
530 __mf_lookup_cache[0].low = MAXPTR;
532 TRACE ("set options from `%s'\n", saved_optstr);
534 /* Call this unconditionally, in case -sigusr1-report was toggled. */
535 __mf_sigusr1_respond ();
544 __mf_resolve_single_dynamic (struct __mf_dynamic_entry *e)
549 if (e->pointer) return;
552 if (e->version != NULL && e->version[0] != '\0') /* non-null/empty */
553 e->pointer = dlvsym (RTLD_NEXT, e->name, e->version);
556 e->pointer = dlsym (RTLD_NEXT, e->name);
562 fprintf (stderr, "mf: error in dlsym(\"%s\"): %s\n",
568 fprintf (stderr, "mf: dlsym(\"%s\") = NULL\n", e->name);
575 __mf_resolve_dynamics ()
578 for (i = 0; i < dyn_INITRESOLVE; i++)
579 __mf_resolve_single_dynamic (& __mf_dynamic[i]);
583 /* NB: order must match enums in mf-impl.h */
584 struct __mf_dynamic_entry __mf_dynamic [] =
586 {NULL, "calloc", NULL},
587 {NULL, "free", NULL},
588 {NULL, "malloc", NULL},
589 {NULL, "mmap", NULL},
590 {NULL, "munmap", NULL},
591 {NULL, "realloc", NULL},
592 {NULL, "DUMMY", NULL}, /* dyn_INITRESOLVE */
594 {NULL, "pthread_create", PTHREAD_CREATE_VERSION},
595 {NULL, "pthread_join", NULL},
596 {NULL, "pthread_exit", NULL}
604 /* ------------------------------------------------------------------------ */
606 /* Lookup & manage automatic initialization of the five or so splay trees. */
608 __mf_object_tree (int type)
610 static splay_tree trees [__MF_TYPE_MAX+1];
611 assert (type >= 0 && type <= __MF_TYPE_MAX);
612 if (UNLIKELY (trees[type] == NULL))
613 trees[type] = splay_tree_new ();
623 /* This initial bootstrap phase requires that __mf_starting_p = 1. */
625 __mf_resolve_dynamics ();
629 __mf_set_default_options ();
631 ov = getenv ("MUDFLAP_OPTIONS");
634 int rc = __mfu_set_options (ov);
642 /* Initialize to a non-zero description epoch. */
643 __mf_describe_object (NULL);
645 #define REG_RESERVED(obj) \
646 __mf_register (& obj, sizeof(obj), __MF_TYPE_NOACCESS, # obj)
648 REG_RESERVED (__mf_lookup_cache);
649 REG_RESERVED (__mf_lc_mask);
650 REG_RESERVED (__mf_lc_shift);
651 /* XXX: others of our statics? */
653 /* Prevent access to *NULL. */
654 __mf_register (MINPTR, 1, __MF_TYPE_NOACCESS, "NULL");
655 __mf_lookup_cache[0].low = (uintptr_t) -1;
661 __wrap_main (int argc, char* argv[])
663 extern char **environ;
665 static int been_here = 0;
667 if (__mf_opts.heur_std_data && ! been_here)
672 __mf_register (argv, sizeof(char *)*(argc+1), __MF_TYPE_STATIC, "argv[]");
673 for (i=0; i<argc; i++)
675 unsigned j = strlen (argv[i]);
676 __mf_register (argv[i], j+1, __MF_TYPE_STATIC, "argv element");
681 char *e = environ[i];
683 if (e == NULL) break;
684 j = strlen (environ[i]);
685 __mf_register (environ[i], j+1, __MF_TYPE_STATIC, "environ element");
687 __mf_register (environ, sizeof(char *)*(i+1), __MF_TYPE_STATIC, "environ[]");
689 __mf_register (& errno, sizeof (errno), __MF_TYPE_STATIC, "errno area");
691 __mf_register (stdin, sizeof (*stdin), __MF_TYPE_STATIC, "stdin");
692 __mf_register (stdout, sizeof (*stdout), __MF_TYPE_STATIC, "stdout");
693 __mf_register (stderr, sizeof (*stderr), __MF_TYPE_STATIC, "stderr");
695 /* Make some effort to register ctype.h static arrays. */
696 /* XXX: e.g., on Solaris, may need to register __ctype, _ctype, __ctype_mask, __toupper, etc. */
697 /* On modern Linux GLIBC, these are thread-specific and changeable, and are dealt
698 with in mf-hooks2.c. */
702 return main (argc, argv, environ);
704 return __real_main (argc, argv, environ);
710 extern void __mf_fini () DTOR;
713 TRACE ("__mf_fini\n");
719 /* ------------------------------------------------------------------------ */
722 void __mf_check (void *ptr, size_t sz, int type, const char *location)
725 BEGIN_RECURSION_PROTECT ();
726 __mfu_check (ptr, sz, type, location);
727 END_RECURSION_PROTECT ();
732 void __mfu_check (void *ptr, size_t sz, int type, const char *location)
734 unsigned entry_idx = __MF_CACHE_INDEX (ptr);
735 struct __mf_cache *entry = & __mf_lookup_cache [entry_idx];
736 int judgement = 0; /* 0=undecided; <0=violation; >0=okay */
737 uintptr_t ptr_low = (uintptr_t) ptr;
738 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
739 struct __mf_cache old_entry = *entry;
741 if (UNLIKELY (__mf_opts.sigusr1_report))
742 __mf_sigusr1_respond ();
744 TRACE ("check ptr=%p b=%u size=%lu %s location=`%s'\n",
745 ptr, entry_idx, (unsigned long)sz,
746 (type == 0 ? "read" : "write"), location);
748 switch (__mf_opts.mudflap_mode)
752 entry->high = MAXPTR;
757 entry->low = ptr_low;
758 entry->high = ptr_high;
764 unsigned heuristics = 0;
766 /* Advance aging/adaptation counters. */
767 static unsigned adapt_count;
769 if (UNLIKELY (__mf_opts.adapt_cache > 0 &&
770 adapt_count > __mf_opts.adapt_cache))
776 /* Looping only occurs if heuristics were triggered. */
777 while (judgement == 0)
779 DECLARE (void, free, void *p);
780 __mf_object_t* ovr_obj[1];
782 __mf_object_t** all_ovr_obj = NULL;
783 __mf_object_t** dealloc_me = NULL;
786 /* Find all overlapping objects. Be optimistic that there is just one. */
787 obj_count = __mf_find_objects (ptr_low, ptr_high, ovr_obj, 1);
788 if (UNLIKELY (obj_count > 1))
790 /* Allocate a real buffer and do the search again. */
791 DECLARE (void *, malloc, size_t c);
793 all_ovr_obj = CALL_REAL (malloc, (sizeof (__mf_object_t *) *
795 if (all_ovr_obj == NULL) abort ();
796 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_obj, obj_count);
797 assert (n == obj_count);
798 dealloc_me = all_ovr_obj;
802 all_ovr_obj = ovr_obj;
806 /* Update object statistics. */
807 for (i = 0; i < obj_count; i++)
809 __mf_object_t *obj = all_ovr_obj[i];
810 assert (obj != NULL);
811 if (type == __MF_CHECK_READ)
818 /* Iterate over the various objects. There are a number of special cases. */
819 for (i = 0; i < obj_count; i++)
821 __mf_object_t *obj = all_ovr_obj[i];
823 /* Any __MF_TYPE_NOACCESS hit is bad. */
824 if (UNLIKELY (obj->type == __MF_TYPE_NOACCESS))
827 /* Any object with a watch flag is bad. */
828 if (UNLIKELY (obj->watching_p))
829 judgement = -2; /* trigger VIOL_WATCH */
831 /* A read from an uninitialized object is bad. */
832 if (UNLIKELY (__mf_opts.check_initialization
834 && type == __MF_CHECK_READ
836 && obj->write_count == 0
837 /* uninitialized (heap) */
838 && obj->type == __MF_TYPE_HEAP))
842 /* We now know that the access spans one or more valid objects. */
843 if (LIKELY (judgement >= 0))
844 for (i = 0; i < obj_count; i++)
846 __mf_object_t *obj = all_ovr_obj[i];
848 /* Is this access entirely contained within this object? */
849 if (LIKELY (ptr_low >= obj->low && ptr_high <= obj->high))
852 entry->low = obj->low;
853 entry->high = obj->high;
857 /* XXX: Access runs off left or right side of this
858 object. That could be okay, if there are
859 other objects that fill in all the holes. */
862 if (dealloc_me != NULL)
863 CALL_REAL (free, dealloc_me);
865 /* If the judgment is still unknown at this stage, loop
866 around at most one more time. */
869 if (heuristics++ < 2) /* XXX parametrize this number? */
870 judgement = __mf_heuristic_check (ptr_low, ptr_high);
884 if (__mf_opts.collect_stats)
888 if (LIKELY (old_entry.low != entry->low || old_entry.high != entry->high))
889 /* && (old_entry.low != 0) && (old_entry.high != 0)) */
890 __mf_lookup_cache_reusecount [entry_idx] ++;
893 if (UNLIKELY (judgement < 0))
894 __mf_violation (ptr, sz,
895 (uintptr_t) __builtin_return_address (0), location,
897 (type == __MF_CHECK_READ ? __MF_VIOL_READ : __MF_VIOL_WRITE) :
902 static __mf_object_t *
903 __mf_insert_new_object (uintptr_t low, uintptr_t high, int type,
904 const char *name, uintptr_t pc)
906 DECLARE (void *, calloc, size_t c, size_t n);
908 __mf_object_t *new_obj;
909 new_obj = CALL_REAL (calloc, 1, sizeof(__mf_object_t));
911 new_obj->high = high;
912 new_obj->type = type;
913 new_obj->name = name;
914 new_obj->alloc_pc = pc;
915 #if HAVE_GETTIMEOFDAY
916 gettimeofday (& new_obj->alloc_time, NULL);
919 new_obj->alloc_thread = pthread_self ();
922 if (__mf_opts.backtrace > 0 && (type == __MF_TYPE_HEAP || type == __MF_TYPE_HEAP_I))
923 new_obj->alloc_backtrace_size =
924 __mf_backtrace (& new_obj->alloc_backtrace,
927 __mf_link_object (new_obj);
933 __mf_uncache_object (__mf_object_t *old_obj)
935 /* Remove any low/high pointers for this object from the lookup cache. */
937 /* Can it possibly exist in the cache? */
938 if (LIKELY (old_obj->read_count + old_obj->write_count))
940 uintptr_t low = old_obj->low;
941 uintptr_t high = old_obj->high;
942 unsigned idx_low = __MF_CACHE_INDEX (low);
943 unsigned idx_high = __MF_CACHE_INDEX (high);
945 for (i = idx_low; i <= idx_high; i++)
947 struct __mf_cache *entry = & __mf_lookup_cache [i];
948 /* NB: the "||" in the following test permits this code to
949 tolerate the situation introduced by __mf_check over
950 contiguous objects, where a cache entry spans several
952 if (entry->low == low || entry->high == high)
955 entry->high = MINPTR;
963 __mf_register (void *ptr, size_t sz, int type, const char *name)
966 BEGIN_RECURSION_PROTECT ();
967 __mfu_register (ptr, sz, type, name);
968 END_RECURSION_PROTECT ();
974 __mfu_register (void *ptr, size_t sz, int type, const char *name)
976 TRACE ("register ptr=%p size=%lu type=%x name='%s'\n",
977 ptr, (unsigned long) sz, type, name ? name : "");
979 if (__mf_opts.collect_stats)
981 __mf_count_register ++;
982 __mf_total_register_size [(type < 0) ? 0 :
983 (type > __MF_TYPE_MAX) ? 0 :
987 if (UNLIKELY (__mf_opts.sigusr1_report))
988 __mf_sigusr1_respond ();
990 switch (__mf_opts.mudflap_mode)
996 __mf_violation (ptr, sz, (uintptr_t) __builtin_return_address (0), NULL,
1001 /* Clear the cache. */
1002 /* XXX: why the entire cache? */
1004 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1006 __mf_lookup_cache[0].low = MAXPTR;
1011 __mf_object_t *ovr_objs [1];
1012 unsigned num_overlapping_objs;
1013 uintptr_t low = (uintptr_t) ptr;
1014 uintptr_t high = CLAMPSZ (ptr, sz);
1015 uintptr_t pc = (uintptr_t) __builtin_return_address (0);
1017 /* Treat unknown size indication as 1. */
1018 if (UNLIKELY (sz == 0)) sz = 1;
1020 /* Look for objects only of the same type. This will e.g. permit a registration
1021 of a STATIC overlapping with a GUESS, and a HEAP with a NOACCESS. At
1022 __mf_check time however harmful overlaps will be detected. */
1023 num_overlapping_objs = __mf_find_objects2 (low, high, ovr_objs, 1, type);
1025 /* Handle overlaps. */
1026 if (UNLIKELY (num_overlapping_objs > 0))
1028 __mf_object_t *ovr_obj = ovr_objs[0];
1030 /* Accept certain specific duplication pairs. */
1031 if (((type == __MF_TYPE_STATIC) || (type == __MF_TYPE_GUESS))
1032 && ovr_obj->low == low
1033 && ovr_obj->high == high
1034 && ovr_obj->type == type)
1036 /* Duplicate registration for static objects may come
1037 from distinct compilation units. */
1038 VERBOSE_TRACE ("harmless duplicate reg %p-%p `%s'\n",
1039 (void *) low, (void *) high,
1040 (ovr_obj->name ? ovr_obj->name : ""));
1044 /* Alas, a genuine violation. */
1047 /* Two or more *real* mappings here. */
1048 __mf_violation ((void *) ptr, sz,
1049 (uintptr_t) __builtin_return_address (0), NULL,
1050 __MF_VIOL_REGISTER);
1053 else /* No overlapping objects: AOK. */
1054 __mf_insert_new_object (low, high, type, name, pc);
1056 /* We could conceivably call __mf_check() here to prime the cache,
1057 but then the read_count/write_count field is not reliable. */
1060 } /* end switch (__mf_opts.mudflap_mode) */
1065 __mf_unregister (void *ptr, size_t sz, int type)
1068 BEGIN_RECURSION_PROTECT ();
1069 __mfu_unregister (ptr, sz, type);
1070 END_RECURSION_PROTECT ();
1076 __mfu_unregister (void *ptr, size_t sz, int type)
1078 DECLARE (void, free, void *ptr);
1080 if (UNLIKELY (__mf_opts.sigusr1_report))
1081 __mf_sigusr1_respond ();
1083 TRACE ("unregister ptr=%p size=%lu type=%x\n", ptr, (unsigned long) sz, type);
1085 switch (__mf_opts.mudflap_mode)
1091 __mf_violation (ptr, sz,
1092 (uintptr_t) __builtin_return_address (0), NULL,
1093 __MF_VIOL_UNREGISTER);
1097 /* Clear the cache. */
1099 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1101 __mf_lookup_cache[0].low = MAXPTR;
1106 __mf_object_t *old_obj = NULL;
1107 __mf_object_t *del_obj = NULL; /* Object to actually delete. */
1108 __mf_object_t *objs[1] = {NULL};
1109 unsigned num_overlapping_objs;
1111 num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1112 CLAMPSZ (ptr, sz), objs, 1, type);
1114 /* Special case for HEAP_I - see free & realloc hook. They don't
1115 know whether the input region was HEAP or HEAP_I before
1116 unmapping it. Here we give HEAP a try in case HEAP_I
1118 if ((type == __MF_TYPE_HEAP_I) && (num_overlapping_objs == 0))
1120 num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1121 CLAMPSZ (ptr, sz), objs, 1, __MF_TYPE_HEAP);
1125 if (UNLIKELY ((num_overlapping_objs != 1) /* more than one overlap */
1126 || ((sz == 0) ? 0 : (sz != (old_obj->high - old_obj->low + 1))) /* size mismatch */
1127 || ((uintptr_t) ptr != old_obj->low))) /* base mismatch */
1129 __mf_violation (ptr, sz,
1130 (uintptr_t) __builtin_return_address (0), NULL,
1131 __MF_VIOL_UNREGISTER);
1135 __mf_unlink_object (old_obj);
1136 __mf_uncache_object (old_obj);
1138 /* Wipe buffer contents if desired. */
1139 if ((__mf_opts.wipe_stack && old_obj->type == __MF_TYPE_STACK)
1140 || (__mf_opts.wipe_heap && (old_obj->type == __MF_TYPE_HEAP
1141 || old_obj->type == __MF_TYPE_HEAP_I)))
1143 memset ((void *) old_obj->low,
1145 (size_t) (old_obj->high - old_obj->low + 1));
1148 /* Manage the object cemetary. */
1149 if (__mf_opts.persistent_count > 0 &&
1150 old_obj->type >= 0 &&
1151 old_obj->type <= __MF_TYPE_MAX_CEM)
1153 old_obj->deallocated_p = 1;
1154 old_obj->dealloc_pc = (uintptr_t) __builtin_return_address (0);
1155 #if HAVE_GETTIMEOFDAY
1156 gettimeofday (& old_obj->dealloc_time, NULL);
1159 old_obj->dealloc_thread = pthread_self ();
1162 if (__mf_opts.backtrace > 0 && old_obj->type == __MF_TYPE_HEAP)
1163 old_obj->dealloc_backtrace_size =
1164 __mf_backtrace (& old_obj->dealloc_backtrace,
1167 /* Encourage this object to be displayed again in current epoch. */
1168 old_obj->description_epoch --;
1170 /* Put this object into the cemetary. This may require this plot to
1171 be recycled, and the previous resident to be designated del_obj. */
1173 unsigned row = old_obj->type;
1174 unsigned plot = __mf_object_dead_head [row];
1176 del_obj = __mf_object_cemetary [row][plot];
1177 __mf_object_cemetary [row][plot] = old_obj;
1179 if (plot == __mf_opts.persistent_count) plot = 0;
1180 __mf_object_dead_head [row] = plot;
1186 if (__mf_opts.print_leaks)
1188 if ((old_obj->read_count + old_obj->write_count) == 0 &&
1189 (old_obj->type == __MF_TYPE_HEAP
1190 || old_obj->type == __MF_TYPE_HEAP_I))
1194 "mudflap warning: unaccessed registered object:\n");
1195 __mf_describe_object (old_obj);
1199 if (del_obj != NULL) /* May or may not equal old_obj. */
1201 if (__mf_opts.backtrace > 0)
1203 CALL_REAL(free, del_obj->alloc_backtrace);
1204 if (__mf_opts.persistent_count > 0)
1206 CALL_REAL(free, del_obj->dealloc_backtrace);
1209 CALL_REAL(free, del_obj);
1214 } /* end switch (__mf_opts.mudflap_mode) */
1217 if (__mf_opts.collect_stats)
1219 __mf_count_unregister ++;
1220 __mf_total_unregister_size += sz;
1229 unsigned long total_size;
1230 unsigned live_obj_count;
1231 double total_weight;
1232 double weighted_size;
1233 unsigned long weighted_address_bits [sizeof (uintptr_t) * 8][2];
1239 __mf_adapt_cache_fn (splay_tree_node n, void *param)
1241 __mf_object_t *obj = (__mf_object_t *) n->value;
1242 struct tree_stats *s = (struct tree_stats *) param;
1244 assert (obj != NULL && s != NULL);
1246 /* Exclude never-accessed objects. */
1247 if (obj->read_count + obj->write_count)
1250 s->total_size += (obj->high - obj->low + 1);
1257 /* VERBOSE_TRACE ("analyze low=%p live=%u name=`%s'\n",
1258 (void *) obj->low, obj->liveness, obj->name); */
1260 s->live_obj_count ++;
1261 s->total_weight += (double) obj->liveness;
1263 (double) (obj->high - obj->low + 1) *
1264 (double) obj->liveness;
1267 for (i=0; i<sizeof(uintptr_t) * 8; i++)
1269 unsigned bit = addr & 1;
1270 s->weighted_address_bits[i][bit] += obj->liveness;
1274 /* Age the liveness value. */
1275 obj->liveness >>= 1;
1286 struct tree_stats s;
1287 uintptr_t new_mask = 0;
1288 unsigned char new_shift;
1289 float cache_utilization;
1291 static float smoothed_new_shift = -1.0;
1294 memset (&s, 0, sizeof (s));
1296 splay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP), __mf_adapt_cache_fn, (void *) & s);
1297 splay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I), __mf_adapt_cache_fn, (void *) & s);
1298 splay_tree_foreach (__mf_object_tree (__MF_TYPE_STACK), __mf_adapt_cache_fn, (void *) & s);
1299 splay_tree_foreach (__mf_object_tree (__MF_TYPE_STATIC), __mf_adapt_cache_fn, (void *) & s);
1300 splay_tree_foreach (__mf_object_tree (__MF_TYPE_GUESS), __mf_adapt_cache_fn, (void *) & s);
1302 /* Maybe we're dealing with funny aging/adaptation parameters, or an
1303 empty tree. Just leave the cache alone in such cases, rather
1304 than risk dying by division-by-zero. */
1305 if (! (s.obj_count > 0) && (s.live_obj_count > 0) && (s.total_weight > 0.0))
1308 /* Guess a good value for the shift parameter by finding an address bit that is a
1309 good discriminant of lively objects. */
1311 for (i=0; i<sizeof (uintptr_t)*8; i++)
1313 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1314 if (max_value < value) max_value = value;
1316 for (i=0; i<sizeof (uintptr_t)*8; i++)
1318 float shoulder_factor = 0.7; /* Include slightly less popular bits too. */
1319 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1320 if (value >= max_value * shoulder_factor)
1323 if (smoothed_new_shift < 0) smoothed_new_shift = __mf_lc_shift;
1324 /* Converge toward this slowly to reduce flapping. */
1325 smoothed_new_shift = 0.9*smoothed_new_shift + 0.1*i;
1326 new_shift = (unsigned) (smoothed_new_shift + 0.5);
1327 assert (new_shift < sizeof (uintptr_t)*8);
1329 /* Count number of used buckets. */
1330 cache_utilization = 0.0;
1331 for (i = 0; i < (1 + __mf_lc_mask); i++)
1332 if (__mf_lookup_cache[i].low != 0 || __mf_lookup_cache[i].high != 0)
1333 cache_utilization += 1.0;
1334 cache_utilization /= (1 + __mf_lc_mask);
1336 new_mask |= 0x3ff; /* XXX: force a large cache. */
1337 new_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
1339 VERBOSE_TRACE ("adapt cache obj=%u/%u sizes=%lu/%.0f/%.0f => "
1340 "util=%u%% m=%p s=%u\n",
1341 s.obj_count, s.live_obj_count, s.total_size, s.total_weight, s.weighted_size,
1342 (unsigned)(cache_utilization*100.0), (void *) new_mask, new_shift);
1344 /* We should reinitialize cache if its parameters have changed. */
1345 if (new_mask != __mf_lc_mask ||
1346 new_shift != __mf_lc_shift)
1348 __mf_lc_mask = new_mask;
1349 __mf_lc_shift = new_shift;
1351 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1353 __mf_lookup_cache[0].low = MAXPTR;
1359 /* __mf_find_object[s] */
1361 /* Find overlapping live objecs between [low,high]. Return up to
1362 max_objs of their pointers in objs[]. Return total count of
1363 overlaps (may exceed max_objs). */
1366 __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
1367 __mf_object_t **objs, unsigned max_objs, int type)
1370 splay_tree t = __mf_object_tree (type);
1371 splay_tree_key k = (splay_tree_key) ptr_low;
1374 splay_tree_node n = splay_tree_lookup (t, k);
1375 /* An exact match for base address implies a hit. */
1378 if (count < max_objs)
1379 objs[count] = (__mf_object_t *) n->value;
1383 /* Iterate left then right near this key value to find all overlapping objects. */
1384 for (direction = 0; direction < 2; direction ++)
1386 /* Reset search origin. */
1387 k = (splay_tree_key) ptr_low;
1393 n = (direction == 0 ? splay_tree_successor (t, k) : splay_tree_predecessor (t, k));
1394 if (n == NULL) break;
1395 obj = (__mf_object_t *) n->value;
1397 if (! (obj->low <= ptr_high && obj->high >= ptr_low)) /* No overlap? */
1400 if (count < max_objs)
1401 objs[count] = (__mf_object_t *) n->value;
1404 k = (splay_tree_key) obj->low;
1413 __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
1414 __mf_object_t **objs, unsigned max_objs)
1419 /* Search each splay tree for overlaps. */
1420 for (type = __MF_TYPE_NOACCESS; type <= __MF_TYPE_GUESS; type++)
1422 unsigned c = __mf_find_objects2 (ptr_low, ptr_high, objs, max_objs, type);
1428 else /* NB: C may equal 0 */
1441 /* __mf_link_object */
1444 __mf_link_object (__mf_object_t *node)
1446 splay_tree t = __mf_object_tree (node->type);
1447 splay_tree_insert (t, (splay_tree_key) node->low, (splay_tree_value) node);
1450 /* __mf_unlink_object */
1453 __mf_unlink_object (__mf_object_t *node)
1455 splay_tree t = __mf_object_tree (node->type);
1456 splay_tree_remove (t, (splay_tree_key) node->low);
1459 /* __mf_find_dead_objects */
1461 /* Find overlapping dead objecs between [low,high]. Return up to
1462 max_objs of their pointers in objs[]. Return total count of
1463 overlaps (may exceed max_objs). */
1466 __mf_find_dead_objects (uintptr_t low, uintptr_t high,
1467 __mf_object_t **objs, unsigned max_objs)
1469 if (__mf_opts.persistent_count > 0)
1472 unsigned recollection = 0;
1475 assert (low <= high);
1476 assert (max_objs == 0 || objs != NULL);
1478 /* Widen the search from the most recent plots in each row, looking
1479 backward in time. */
1481 while (recollection < __mf_opts.persistent_count)
1485 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1490 plot = __mf_object_dead_head [row];
1491 for (i = 0; i <= recollection; i ++)
1495 /* Look backward through row: it's a circular buffer. */
1496 if (plot > 0) plot --;
1497 else plot = __mf_opts.persistent_count - 1;
1499 obj = __mf_object_cemetary [row][plot];
1500 if (obj && obj->low <= high && obj->high >= low)
1502 /* Found an overlapping dead object! */
1503 if (count < max_objs)
1513 /* Look farther back in time. */
1514 recollection = (recollection * 2) + 1;
1523 /* __mf_describe_object */
1526 __mf_describe_object (__mf_object_t *obj)
1528 static unsigned epoch = 0;
1535 if (__mf_opts.abbreviate && obj->description_epoch == epoch)
1538 "mudflap object %p: name=`%s'\n",
1539 (void *) obj, (obj->name ? obj->name : ""));
1543 obj->description_epoch = epoch;
1546 "mudflap object %p: name=`%s'\n"
1547 "bounds=[%p,%p] size=%lu area=%s check=%ur/%uw liveness=%u%s\n"
1548 "alloc time=%lu.%06lu pc=%p"
1553 (void *) obj, (obj->name ? obj->name : ""),
1554 (void *) obj->low, (void *) obj->high,
1555 (unsigned long) (obj->high - obj->low + 1),
1556 (obj->type == __MF_TYPE_NOACCESS ? "no-access" :
1557 obj->type == __MF_TYPE_HEAP ? "heap" :
1558 obj->type == __MF_TYPE_HEAP_I ? "heap-init" :
1559 obj->type == __MF_TYPE_STACK ? "stack" :
1560 obj->type == __MF_TYPE_STATIC ? "static" :
1561 obj->type == __MF_TYPE_GUESS ? "guess" :
1563 obj->read_count, obj->write_count, obj->liveness,
1564 obj->watching_p ? " watching" : "",
1565 obj->alloc_time.tv_sec, obj->alloc_time.tv_usec,
1566 (void *) obj->alloc_pc
1568 , (unsigned) obj->alloc_thread
1572 if (__mf_opts.backtrace > 0)
1575 for (i=0; i<obj->alloc_backtrace_size; i++)
1576 fprintf (stderr, " %s\n", obj->alloc_backtrace[i]);
1579 if (__mf_opts.persistent_count > 0)
1581 if (obj->deallocated_p)
1583 fprintf (stderr, "dealloc time=%lu.%06lu pc=%p"
1588 obj->dealloc_time.tv_sec, obj->dealloc_time.tv_usec,
1589 (void *) obj->dealloc_pc
1591 , (unsigned) obj->dealloc_thread
1596 if (__mf_opts.backtrace > 0)
1599 for (i=0; i<obj->dealloc_backtrace_size; i++)
1600 fprintf (stderr, " %s\n", obj->dealloc_backtrace[i]);
1608 __mf_report_leaks_fn (splay_tree_node n, void *param)
1610 __mf_object_t *node = (__mf_object_t *) n->value;
1611 unsigned *count = (unsigned *) param;
1616 fprintf (stderr, "Leaked object %u:\n", (*count));
1617 __mf_describe_object (node);
1624 __mf_report_leaks ()
1628 (void) splay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP),
1629 __mf_report_leaks_fn, & count);
1630 (void) splay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I),
1631 __mf_report_leaks_fn, & count);
1636 /* ------------------------------------------------------------------------ */
1643 BEGIN_RECURSION_PROTECT ();
1645 END_RECURSION_PROTECT ();
1652 if (__mf_opts.collect_stats)
1657 "calls to __mf_check: %lu\n"
1658 " __mf_register: %lu [%luB, %luB, %luB, %luB, %luB]\n"
1659 " __mf_unregister: %lu [%luB]\n"
1660 " __mf_violation: [%lu, %lu, %lu, %lu, %lu]\n",
1662 __mf_count_register,
1663 __mf_total_register_size[0], __mf_total_register_size[1],
1664 __mf_total_register_size[2], __mf_total_register_size[3],
1665 __mf_total_register_size[4], /* XXX */
1666 __mf_count_unregister, __mf_total_unregister_size,
1667 __mf_count_violation[0], __mf_count_violation[1],
1668 __mf_count_violation[2], __mf_count_violation[3],
1669 __mf_count_violation[4]);
1672 "calls with reentrancy: %lu\n", __mf_reentrancy);
1675 " lock contention: %lu\n", __mf_lock_contention);
1678 /* Lookup cache stats. */
1681 unsigned max_reuse = 0;
1682 unsigned num_used = 0;
1683 unsigned num_unused = 0;
1685 for (i = 0; i < LOOKUP_CACHE_SIZE; i++)
1687 if (__mf_lookup_cache_reusecount[i])
1691 if (max_reuse < __mf_lookup_cache_reusecount[i])
1692 max_reuse = __mf_lookup_cache_reusecount[i];
1694 fprintf (stderr, "lookup cache slots used: %u unused: %u peak-reuse: %u\n",
1695 num_used, num_unused, max_reuse);
1699 unsigned live_count;
1700 live_count = __mf_find_objects (MINPTR, MAXPTR, NULL, 0);
1701 fprintf (stderr, "number of live objects: %u\n", live_count);
1704 if (__mf_opts.persistent_count > 0)
1706 unsigned dead_count = 0;
1708 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1709 for (plot = 0 ; plot < __mf_opts.persistent_count; plot ++)
1710 if (__mf_object_cemetary [row][plot] != 0)
1712 fprintf (stderr, " zombie objects: %u\n", dead_count);
1715 if (__mf_opts.print_leaks && (__mf_opts.mudflap_mode == mode_check))
1718 extern void * __mf_wrap_alloca_indirect (size_t c);
1720 /* Free up any remaining alloca()'d blocks. */
1721 __mf_wrap_alloca_indirect (0);
1722 __mf_describe_object (NULL); /* Reset description epoch. */
1723 l = __mf_report_leaks ();
1724 fprintf (stderr, "number of leaked objects: %u\n", l);
1728 /* __mf_backtrace */
1731 __mf_backtrace (char ***symbols, void *guess_pc, unsigned guess_omit_levels)
1734 unsigned pc_array_size = __mf_opts.backtrace + guess_omit_levels;
1735 unsigned remaining_size;
1736 unsigned omitted_size = 0;
1738 DECLARE (void, free, void *ptr);
1739 DECLARE (void *, calloc, size_t c, size_t n);
1740 DECLARE (void *, malloc, size_t n);
1742 pc_array = CALL_REAL (calloc, pc_array_size, sizeof (void *) );
1743 #ifdef HAVE_BACKTRACE
1744 pc_array_size = backtrace (pc_array, pc_array_size);
1746 #define FETCH(n) do { if (pc_array_size >= n) { \
1747 pc_array[n] = __builtin_return_address(n); \
1748 if (pc_array[n] == 0) pc_array_size = n; } } while (0)
1750 /* Unroll some calls __builtin_return_address because this function
1751 only takes a literal integer parameter. */
1754 /* XXX: __builtin_return_address sometimes crashes (!) on >0 arguments,
1755 rather than simply returning 0. :-( */
1764 if (pc_array_size > 8) pc_array_size = 9;
1766 if (pc_array_size > 0) pc_array_size = 1;
1772 /* We want to trim the first few levels of the stack traceback,
1773 since they contain libmudflap wrappers and junk. If pc_array[]
1774 ends up containing a non-NULL guess_pc, then trim everything
1775 before that. Otherwise, omit the first guess_omit_levels
1778 if (guess_pc != NULL)
1779 for (i=0; i<pc_array_size; i++)
1780 if (pc_array [i] == guess_pc)
1783 if (omitted_size == 0) /* No match? */
1784 if (pc_array_size > guess_omit_levels)
1785 omitted_size = guess_omit_levels;
1787 remaining_size = pc_array_size - omitted_size;
1789 #ifdef HAVE_BACKTRACE_SYMBOLS
1790 *symbols = backtrace_symbols (pc_array + omitted_size, remaining_size);
1793 /* Let's construct a buffer by hand. It will have <remaining_size>
1794 char*'s at the front, pointing at individual strings immediately
1799 enum { perline = 30 };
1800 buffer = CALL_REAL (malloc, remaining_size * (perline + sizeof(char *)));
1801 pointers = (char **) buffer;
1802 chars = (char *)buffer + (remaining_size * sizeof (char *));
1803 for (i = 0; i < remaining_size; i++)
1805 pointers[i] = chars;
1806 sprintf (chars, "[0x%p]", pc_array [omitted_size + i]);
1807 chars = chars + perline;
1809 *symbols = pointers;
1812 CALL_REAL (free, pc_array);
1814 return remaining_size;
1817 /* ------------------------------------------------------------------------ */
1818 /* __mf_violation */
1821 __mf_violation (void *ptr, size_t sz, uintptr_t pc,
1822 const char *location, int type)
1825 static unsigned violation_number;
1826 DECLARE(void, free, void *ptr);
1828 TRACE ("violation pc=%p location=%s type=%d ptr=%p size=%lu\n",
1830 (location != NULL ? location : ""), type, ptr, (unsigned long) sz);
1832 if (__mf_opts.collect_stats)
1833 __mf_count_violation [(type < 0) ? 0 :
1834 (type > __MF_VIOL_WATCH) ? 0 :
1837 /* Print out a basic warning message. */
1838 if (__mf_opts.verbose_violations)
1841 unsigned num_helpful = 0;
1843 #if HAVE_GETTIMEOFDAY
1844 gettimeofday (& now, NULL);
1847 violation_number ++;
1850 "mudflap violation %u (%s): time=%lu.%06lu "
1851 "ptr=%p size=%lu\npc=%p%s%s%s\n",
1853 ((type == __MF_VIOL_READ) ? "check/read" :
1854 (type == __MF_VIOL_WRITE) ? "check/write" :
1855 (type == __MF_VIOL_REGISTER) ? "register" :
1856 (type == __MF_VIOL_UNREGISTER) ? "unregister" :
1857 (type == __MF_VIOL_WATCH) ? "watch" : "unknown"),
1858 now.tv_sec, now.tv_usec,
1859 (void *) ptr, (unsigned long)sz, (void *) pc,
1860 (location != NULL ? " location=`" : ""),
1861 (location != NULL ? location : ""),
1862 (location != NULL ? "'" : ""));
1864 if (__mf_opts.backtrace > 0)
1869 num = __mf_backtrace (& symbols, (void *) pc, 2);
1870 /* Note: backtrace_symbols calls malloc(). But since we're in
1871 __mf_violation and presumably __mf_check, it'll detect
1872 recursion, and not put the new string into the database. */
1874 for (i=0; i<num; i++)
1875 fprintf (stderr, " %s\n", symbols[i]);
1877 /* Calling free() here would trigger a violation. */
1878 CALL_REAL(free, symbols);
1882 /* Look for nearby objects. For this, we start with s_low/s_high
1883 pointing to the given area, looking for overlapping objects.
1884 If none show up, widen the search area and keep looking. */
1886 if (sz == 0) sz = 1;
1888 for (dead_p = 0; dead_p <= 1; dead_p ++) /* for dead_p in 0 1 */
1890 enum {max_objs = 3}; /* magic */
1891 __mf_object_t *objs[max_objs];
1892 unsigned num_objs = 0;
1893 uintptr_t s_low, s_high;
1897 s_low = (uintptr_t) ptr;
1898 s_high = CLAMPSZ (ptr, sz);
1900 while (tries < 16) /* magic */
1903 num_objs = __mf_find_dead_objects (s_low, s_high, objs, max_objs);
1905 num_objs = __mf_find_objects (s_low, s_high, objs, max_objs);
1907 if (num_objs) /* good enough */
1912 /* XXX: tune this search strategy. It's too dependent on
1913 sz, which can vary from 1 to very big (when array index
1914 checking) numbers. */
1915 s_low = CLAMPSUB (s_low, (sz * tries * tries));
1916 s_high = CLAMPADD (s_high, (sz * tries * tries));
1919 for (i = 0; i < min (num_objs, max_objs); i++)
1921 __mf_object_t *obj = objs[i];
1922 uintptr_t low = (uintptr_t) ptr;
1923 uintptr_t high = CLAMPSZ (ptr, sz);
1924 unsigned before1 = (low < obj->low) ? obj->low - low : 0;
1925 unsigned after1 = (low > obj->high) ? low - obj->high : 0;
1926 unsigned into1 = (high >= obj->low && low <= obj->high) ? low - obj->low : 0;
1927 unsigned before2 = (high < obj->low) ? obj->low - high : 0;
1928 unsigned after2 = (high > obj->high) ? high - obj->high : 0;
1929 unsigned into2 = (high >= obj->low && low <= obj->high) ? high - obj->low : 0;
1931 fprintf (stderr, "Nearby object %u: checked region begins %uB %s and ends %uB %s\n",
1932 num_helpful + i + 1,
1933 (before1 ? before1 : after1 ? after1 : into1),
1934 (before1 ? "before" : after1 ? "after" : "into"),
1935 (before2 ? before2 : after2 ? after2 : into2),
1936 (before2 ? "before" : after2 ? "after" : "into"));
1937 __mf_describe_object (obj);
1939 num_helpful += num_objs;
1942 fprintf (stderr, "number of nearby objects: %u\n", num_helpful);
1945 /* How to finally handle this violation? */
1946 switch (__mf_opts.violation_mode)
1951 kill (getpid(), SIGSEGV);
1958 snprintf (buf, 128, "gdb --pid=%u", (unsigned) getpid ());
1960 /* XXX: should probably fork() && sleep(GDB_WAIT_PARAMETER)
1961 instead, and let the forked child execlp() gdb. That way, this
1962 subject process can be resumed under the supervision of gdb.
1963 This can't happen now, since system() only returns when gdb
1964 dies. In that case, we need to beware of starting a second
1965 concurrent gdb child upon the next violation. (But if the first
1966 gdb dies, then starting a new one is appropriate.) */
1971 /* ------------------------------------------------------------------------ */
1974 unsigned __mf_watch (void *ptr, size_t sz)
1978 BEGIN_RECURSION_PROTECT ();
1979 rc = __mf_watch_or_not (ptr, sz, 1);
1980 END_RECURSION_PROTECT ();
1985 unsigned __mf_unwatch (void *ptr, size_t sz)
1989 rc = __mf_watch_or_not (ptr, sz, 0);
1996 __mf_watch_or_not (void *ptr, size_t sz, char flag)
1998 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
1999 uintptr_t ptr_low = (uintptr_t) ptr;
2002 TRACE ("%s ptr=%p size=%lu\n",
2003 (flag ? "watch" : "unwatch"), ptr, (unsigned long) sz);
2005 switch (__mf_opts.mudflap_mode)
2015 __mf_object_t **all_ovr_objs;
2018 DECLARE (void *, malloc, size_t c);
2019 DECLARE (void, free, void *p);
2021 obj_count = __mf_find_objects (ptr_low, ptr_high, NULL, 0);
2022 VERBOSE_TRACE (" %u:", obj_count);
2024 all_ovr_objs = CALL_REAL (malloc, (sizeof (__mf_object_t *) * obj_count));
2025 if (all_ovr_objs == NULL) abort ();
2026 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_objs, obj_count);
2027 assert (n == obj_count);
2029 for (n = 0; n < obj_count; n ++)
2031 __mf_object_t *obj = all_ovr_objs[n];
2033 VERBOSE_TRACE (" [%p]", (void *) obj);
2034 if (obj->watching_p != flag)
2036 obj->watching_p = flag;
2039 /* Remove object from cache, to ensure next access
2040 goes through __mf_check(). */
2042 __mf_uncache_object (obj);
2045 CALL_REAL (free, all_ovr_objs);
2055 __mf_sigusr1_handler (int num)
2057 __mf_sigusr1_received ++;
2060 /* Install or remove SIGUSR1 handler as necessary.
2061 Also, respond to a received pending SIGUSR1. */
2063 __mf_sigusr1_respond ()
2065 static int handler_installed;
2068 /* Manage handler */
2069 if (__mf_opts.sigusr1_report && ! handler_installed)
2071 signal (SIGUSR1, __mf_sigusr1_handler);
2072 handler_installed = 1;
2074 else if(! __mf_opts.sigusr1_report && handler_installed)
2076 signal (SIGUSR1, SIG_DFL);
2077 handler_installed = 0;
2081 /* Manage enqueued signals */
2082 if (__mf_sigusr1_received > __mf_sigusr1_handled)
2084 __mf_sigusr1_handled ++;
2085 assert (__mf_state == reentrant);
2087 handler_installed = 0; /* We may need to re-enable signal; this might be a SysV library. */
2092 /* XXX: provide an alternative __assert_fail function that cannot
2093 fail due to libmudflap infinite recursion. */
2097 write_itoa (int fd, unsigned n)
2099 enum x { bufsize = sizeof(n)*4 };
2103 for (i=0; i<bufsize-1; i++)
2105 unsigned digit = n % 10;
2106 buf[bufsize-2-i] = digit + '0';
2110 char *m = & buf [bufsize-2-i];
2111 buf[bufsize-1] = '\0';
2112 write (fd, m, strlen(m));
2120 __assert_fail (const char *msg, const char *file, unsigned line, const char *func)
2122 #define write2(string) write (2, (string), strlen ((string)));
2126 write_itoa (2, (unsigned) pthread_self ());
2129 write2(": assertion failure: `");
2130 write (2, msg, strlen (msg));
2132 write (2, func, strlen (func));
2134 write (2, file, strlen (file));
2136 write_itoa (2, line);
2149 /* #include the generic splay tree implementation from libiberty here, to
2150 ensure that it uses our memory allocation primitives. */
2153 splay_tree_free (void *p)
2155 DECLARE (void, free, void *p);
2156 CALL_REAL (free, p);
2160 splay_tree_xmalloc (size_t s)
2162 DECLARE (void *, malloc, size_t s);
2163 return CALL_REAL (malloc, s);
2166 #define free(z) splay_tree_free(z)
2167 #define xmalloc(z) splay_tree_xmalloc(z)
2168 #include "splay-tree.c"