1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
3 Contributed by Frank Ch. Eigler <fche@redhat.com>
4 and Graydon Hoare <graydon@redhat.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
13 In addition to the permissions in the GNU General Public License, the
14 Free Software Foundation gives you unlimited permission to link the
15 compiled version of this file into combinations with other programs,
16 and to distribute those combinations without any restriction coming
17 from the use of this file. (The General Public License restrictions
18 do apply in other respects; for example, they cover modification of
19 the file, and distribution when not linked into a combine
22 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
23 WARRANTY; without even the implied warranty of MERCHANTABILITY or
24 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
27 You should have received a copy of the GNU General Public License
28 along with GCC; see the file COPYING. If not, write to the Free
29 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
34 /* These attempt to coax various unix flavours to declare all our
35 needed tidbits in the system headers. */
36 #if !defined(__FreeBSD__)
38 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
42 #define __EXTENSIONS__
44 #define _LARGE_FILE_API
45 #define _XOPEN_SOURCE_EXTENDED 1
49 #include <sys/types.h>
53 #ifdef HAVE_EXECINFO_H
63 #include <sys/types.h>
68 #include "mf-runtime.h"
70 #include "splay-tree.h"
73 /* ------------------------------------------------------------------------ */
76 #define CTOR __attribute__ ((constructor))
77 #define DTOR __attribute__ ((destructor))
80 /* Codes to describe the context in which a violation occurs. */
81 #define __MF_VIOL_UNKNOWN 0
82 #define __MF_VIOL_READ 1
83 #define __MF_VIOL_WRITE 2
84 #define __MF_VIOL_REGISTER 3
85 #define __MF_VIOL_UNREGISTER 4
86 #define __MF_VIOL_WATCH 5
88 /* Protect against recursive calls. */
89 #define BEGIN_RECURSION_PROTECT() do { \
90 if (UNLIKELY (__mf_state == reentrant)) { \
91 write (2, "mf: erroneous reentrancy detected in `", 38); \
92 write (2, __PRETTY_FUNCTION__, strlen(__PRETTY_FUNCTION__)); \
93 write (2, "'\n", 2); \
95 __mf_state = reentrant; \
98 #define END_RECURSION_PROTECT() do { \
99 __mf_state = active; \
104 /* ------------------------------------------------------------------------ */
105 /* Required globals. */
107 #define LOOKUP_CACHE_MASK_DFL 1023
108 #define LOOKUP_CACHE_SIZE_MAX 4096 /* Allows max CACHE_MASK 0x0FFF */
109 #define LOOKUP_CACHE_SHIFT_DFL 2
111 struct __mf_cache __mf_lookup_cache [LOOKUP_CACHE_SIZE_MAX];
112 uintptr_t __mf_lc_mask = LOOKUP_CACHE_MASK_DFL;
113 unsigned char __mf_lc_shift = LOOKUP_CACHE_SHIFT_DFL;
114 #define LOOKUP_CACHE_SIZE (__mf_lc_mask + 1)
116 struct __mf_options __mf_opts;
118 int __mf_starting_p = 1;
120 enum __mf_state_enum __mf_state = active;
122 /* See __mf_state_perthread() in mf-hooks.c. */
127 pthread_mutex_t __mf_biglock =
128 #ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
129 PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
131 PTHREAD_MUTEX_INITIALIZER;
135 /* Use HAVE_PTHREAD_H here instead of LIBMUDFLAPTH, so that even
136 the libmudflap.la (no threading support) can diagnose whether
137 the application is linked with -lpthread. See __mf_usage() below. */
139 #ifdef _POSIX_THREADS
140 #pragma weak pthread_join
142 #define pthread_join NULL
144 const void *threads_active_p = (void *) pthread_join;
148 /* ------------------------------------------------------------------------ */
149 /* stats-related globals. */
151 static unsigned long __mf_count_check;
152 static unsigned long __mf_lookup_cache_reusecount [LOOKUP_CACHE_SIZE_MAX];
153 static unsigned long __mf_count_register;
154 static unsigned long __mf_total_register_size [__MF_TYPE_MAX+1];
155 static unsigned long __mf_count_unregister;
156 static unsigned long __mf_total_unregister_size;
157 static unsigned long __mf_count_violation [__MF_VIOL_WATCH+1];
158 static unsigned long __mf_sigusr1_received;
159 static unsigned long __mf_sigusr1_handled;
160 /* not static */ unsigned long __mf_reentrancy;
162 /* not static */ unsigned long __mf_lock_contention;
166 /* ------------------------------------------------------------------------ */
167 /* mode-check-related globals. */
169 typedef struct __mf_object
171 uintptr_t low, high; /* __mf_register parameters */
173 char type; /* __MF_TYPE_something */
174 char watching_p; /* Trigger a VIOL_WATCH on access? */
175 unsigned read_count; /* Number of times __mf_check/read was called on this object. */
176 unsigned write_count; /* Likewise for __mf_check/write. */
177 unsigned liveness; /* A measure of recent checking activity. */
178 unsigned description_epoch; /* Last epoch __mf_describe_object printed this. */
181 struct timeval alloc_time;
182 char **alloc_backtrace;
183 size_t alloc_backtrace_size;
185 pthread_t alloc_thread;
189 uintptr_t dealloc_pc;
190 struct timeval dealloc_time;
191 char **dealloc_backtrace;
192 size_t dealloc_backtrace_size;
194 pthread_t dealloc_thread;
198 /* Live objects: splay trees, separated by type, ordered on .low (base address). */
199 /* Actually stored as static vars within lookup function below. */
201 /* Dead objects: circular arrays; _MIN_CEM .. _MAX_CEM only */
202 static unsigned __mf_object_dead_head[__MF_TYPE_MAX_CEM+1]; /* next empty spot */
203 static __mf_object_t *__mf_object_cemetary[__MF_TYPE_MAX_CEM+1][__MF_PERSIST_MAX];
206 /* ------------------------------------------------------------------------ */
207 /* Forward function declarations */
209 void __mf_init () CTOR;
210 static void __mf_sigusr1_respond ();
211 static unsigned __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
212 __mf_object_t **objs, unsigned max_objs);
213 static unsigned __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
214 __mf_object_t **objs, unsigned max_objs, int type);
215 static unsigned __mf_find_dead_objects (uintptr_t ptr_low, uintptr_t ptr_high,
216 __mf_object_t **objs, unsigned max_objs);
217 static void __mf_adapt_cache ();
218 static void __mf_describe_object (__mf_object_t *obj);
219 static unsigned __mf_watch_or_not (void *ptr, size_t sz, char flag);
220 static splay_tree __mf_object_tree (int type);
221 static void __mf_link_object (__mf_object_t *node);
222 static void __mf_unlink_object (__mf_object_t *node);
225 /* ------------------------------------------------------------------------ */
226 /* Configuration engine */
229 __mf_set_default_options ()
231 memset (& __mf_opts, 0, sizeof (__mf_opts));
233 __mf_opts.adapt_cache = 1000003;
234 __mf_opts.abbreviate = 1;
235 __mf_opts.verbose_violations = 1;
236 __mf_opts.free_queue_length = 4;
237 __mf_opts.persistent_count = 100;
238 __mf_opts.crumple_zone = 32;
239 __mf_opts.backtrace = 4;
240 __mf_opts.mudflap_mode = mode_check;
241 __mf_opts.violation_mode = viol_nop;
242 __mf_opts.heur_std_data = 1;
244 __mf_opts.thread_stack = 0;
263 "mudflaps do nothing",
264 set_option, (int)mode_nop, (int *)&__mf_opts.mudflap_mode},
266 "mudflaps populate object tree",
267 set_option, (int)mode_populate, (int *)&__mf_opts.mudflap_mode},
269 "mudflaps check for memory violations",
270 set_option, (int)mode_check, (int *)&__mf_opts.mudflap_mode},
272 "mudflaps always cause violations (diagnostic)",
273 set_option, (int)mode_violate, (int *)&__mf_opts.mudflap_mode},
276 "violations do not change program execution",
277 set_option, (int)viol_nop, (int *)&__mf_opts.violation_mode},
279 "violations cause a call to abort()",
280 set_option, (int)viol_abort, (int *)&__mf_opts.violation_mode},
282 "violations are promoted to SIGSEGV signals",
283 set_option, (int)viol_segv, (int *)&__mf_opts.violation_mode},
285 "violations fork a gdb process attached to current program",
286 set_option, (int)viol_gdb, (int *)&__mf_opts.violation_mode},
288 "trace calls to mudflap runtime library",
289 set_option, 1, &__mf_opts.trace_mf_calls},
291 "trace internal events within mudflap runtime library",
292 set_option, 1, &__mf_opts.verbose_trace},
294 "collect statistics on mudflap's operation",
295 set_option, 1, &__mf_opts.collect_stats},
298 "print report upon SIGUSR1",
299 set_option, 1, &__mf_opts.sigusr1_report},
301 {"internal-checking",
302 "perform more expensive internal checking",
303 set_option, 1, &__mf_opts.internal_checking},
305 "print any memory leaks at program shutdown",
306 set_option, 1, &__mf_opts.print_leaks},
307 {"check-initialization",
308 "detect uninitialized object reads",
309 set_option, 1, &__mf_opts.check_initialization},
310 {"verbose-violations",
311 "print verbose messages when memory violations occur",
312 set_option, 1, &__mf_opts.verbose_violations},
314 "abbreviate repetitive listings",
315 set_option, 1, &__mf_opts.abbreviate},
317 "wipe stack objects at unwind",
318 set_option, 1, &__mf_opts.wipe_stack},
320 "wipe heap objects at free",
321 set_option, 1, &__mf_opts.wipe_heap},
323 "support /proc/self/map heuristics",
324 set_option, 1, &__mf_opts.heur_proc_map},
326 "enable a simple upper stack bound heuristic",
327 set_option, 1, &__mf_opts.heur_stack_bound},
329 "support _start.._end heuristics",
330 set_option, 1, &__mf_opts.heur_start_end},
332 "register standard library data (argv, errno, stdin, ...)",
333 set_option, 1, &__mf_opts.heur_std_data},
334 {"free-queue-length",
335 "queue N deferred free() calls before performing them",
336 read_integer_option, 0, &__mf_opts.free_queue_length},
338 "keep a history of N unregistered regions",
339 read_integer_option, 0, &__mf_opts.persistent_count},
341 "surround allocations with crumple zones of N bytes",
342 read_integer_option, 0, &__mf_opts.crumple_zone},
343 /* XXX: not type-safe.
345 "set lookup cache size mask to N (2**M - 1)",
346 read_integer_option, 0, (int *)(&__mf_lc_mask)},
348 "set lookup cache pointer shift",
349 read_integer_option, 0, (int *)(&__mf_lc_shift)},
352 "adapt mask/shift parameters after N cache misses",
353 read_integer_option, 1, &__mf_opts.adapt_cache},
355 "keep an N-level stack trace of each call context",
356 read_integer_option, 0, &__mf_opts.backtrace},
359 "override thread stacks allocation: N kB",
360 read_integer_option, 0, &__mf_opts.thread_stack},
362 {0, 0, set_option, 0, NULL}
371 "This is a %s%sGCC \"mudflap\" memory-checked binary.\n"
372 "Mudflap is Copyright (C) 2002-2003 Free Software Foundation, Inc.\n"
374 "The mudflap code can be controlled by an environment variable:\n"
376 "$ export MUDFLAP_OPTIONS='<options>'\n"
377 "$ <mudflapped_program>\n"
379 "where <options> is a space-separated list of \n"
380 "any of the following options. Use `-no-OPTION' to disable options.\n"
383 (threads_active_p ? "multi-threaded " : "single-threaded "),
393 /* XXX: The multi-threaded thread-unaware combination is bad. */
395 for (opt = options; opt->name; opt++)
397 int default_p = (opt->value == * opt->target);
403 fprintf (stderr, "-%-23.23s %s", opt->name, opt->description);
405 fprintf (stderr, " [active]\n");
407 fprintf (stderr, "\n");
409 case read_integer_option:
410 strncpy (buf, opt->name, 128);
411 strncpy (buf + strlen (opt->name), "=N", 2);
412 fprintf (stderr, "-%-23.23s %s", buf, opt->description);
413 fprintf (stderr, " [%d]\n", * opt->target);
419 fprintf (stderr, "\n");
424 __mf_set_options (const char *optstr)
428 BEGIN_RECURSION_PROTECT ();
429 rc = __mfu_set_options (optstr);
430 /* XXX: It's not really that easy. A change to a bunch of parameters
431 can require updating auxiliary state or risk crashing:
432 free_queue_length, crumple_zone ... */
433 END_RECURSION_PROTECT ();
440 __mfu_set_options (const char *optstr)
442 struct option *opts = 0;
446 const char *saved_optstr = optstr;
448 /* XXX: bounds-check for optstr! */
465 if (*optstr == '?' ||
466 strncmp (optstr, "help", 4) == 0)
468 /* Caller will print help and exit. */
472 if (strncmp (optstr, "no-", 3) == 0)
475 optstr = & optstr[3];
478 for (opts = options; opts->name; opts++)
480 if (strncmp (optstr, opts->name, strlen (opts->name)) == 0)
482 optstr += strlen (opts->name);
483 assert (opts->target);
490 *(opts->target) = opts->value;
492 case read_integer_option:
493 if (! negate && (*optstr == '=' && *(optstr+1)))
496 tmp = strtol (optstr, &nxt, 10);
497 if ((optstr != nxt) && (tmp != LONG_MAX))
500 *(opts->target) = (int)tmp;
514 "warning: unrecognized string '%s' in mudflap options\n",
516 optstr += strlen (optstr);
522 /* Special post-processing: bound __mf_lc_mask and free_queue_length for security. */
523 __mf_lc_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
524 __mf_opts.free_queue_length &= (__MF_FREEQ_MAX - 1);
526 /* Clear the lookup cache, in case the parameters got changed. */
528 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
530 __mf_lookup_cache[0].low = MAXPTR;
532 TRACE ("set options from `%s'\n", saved_optstr);
534 /* Call this unconditionally, in case -sigusr1-report was toggled. */
535 __mf_sigusr1_respond ();
544 __mf_resolve_single_dynamic (struct __mf_dynamic_entry *e)
549 if (e->pointer) return;
552 if (e->version != NULL && e->version[0] != '\0') /* non-null/empty */
553 e->pointer = dlvsym (RTLD_NEXT, e->name, e->version);
556 e->pointer = dlsym (RTLD_NEXT, e->name);
562 fprintf (stderr, "mf: error in dlsym(\"%s\"): %s\n",
568 fprintf (stderr, "mf: dlsym(\"%s\") = NULL\n", e->name);
575 __mf_resolve_dynamics ()
578 for (i = 0; i < dyn_INITRESOLVE; i++)
579 __mf_resolve_single_dynamic (& __mf_dynamic[i]);
583 /* NB: order must match enums in mf-impl.h */
584 struct __mf_dynamic_entry __mf_dynamic [] =
586 {NULL, "calloc", NULL},
587 {NULL, "free", NULL},
588 {NULL, "malloc", NULL},
589 {NULL, "mmap", NULL},
590 {NULL, "munmap", NULL},
591 {NULL, "realloc", NULL},
592 {NULL, "DUMMY", NULL}, /* dyn_INITRESOLVE */
594 {NULL, "pthread_create", PTHREAD_CREATE_VERSION},
595 {NULL, "pthread_join", NULL},
596 {NULL, "pthread_exit", NULL}
604 /* ------------------------------------------------------------------------ */
606 /* Lookup & manage automatic initialization of the five or so splay trees. */
608 __mf_object_tree (int type)
610 static splay_tree trees [__MF_TYPE_MAX+1];
611 assert (type >= 0 && type <= __MF_TYPE_MAX);
612 if (UNLIKELY (trees[type] == NULL))
613 trees[type] = splay_tree_new ();
623 /* Return if initialization has already been done. */
624 if (LIKELY (__mf_starting_p == 0))
627 /* This initial bootstrap phase requires that __mf_starting_p = 1. */
629 __mf_resolve_dynamics ();
633 __mf_set_default_options ();
635 ov = getenv ("MUDFLAP_OPTIONS");
638 int rc = __mfu_set_options (ov);
646 /* Initialize to a non-zero description epoch. */
647 __mf_describe_object (NULL);
649 #define REG_RESERVED(obj) \
650 __mf_register (& obj, sizeof(obj), __MF_TYPE_NOACCESS, # obj)
652 REG_RESERVED (__mf_lookup_cache);
653 REG_RESERVED (__mf_lc_mask);
654 REG_RESERVED (__mf_lc_shift);
655 /* XXX: others of our statics? */
657 /* Prevent access to *NULL. */
658 __mf_register (MINPTR, 1, __MF_TYPE_NOACCESS, "NULL");
659 __mf_lookup_cache[0].low = (uintptr_t) -1;
665 __wrap_main (int argc, char* argv[])
667 extern char **environ;
669 static int been_here = 0;
671 if (__mf_opts.heur_std_data && ! been_here)
676 __mf_register (argv, sizeof(char *)*(argc+1), __MF_TYPE_STATIC, "argv[]");
677 for (i=0; i<argc; i++)
679 unsigned j = strlen (argv[i]);
680 __mf_register (argv[i], j+1, __MF_TYPE_STATIC, "argv element");
685 char *e = environ[i];
687 if (e == NULL) break;
688 j = strlen (environ[i]);
689 __mf_register (environ[i], j+1, __MF_TYPE_STATIC, "environ element");
691 __mf_register (environ, sizeof(char *)*(i+1), __MF_TYPE_STATIC, "environ[]");
693 __mf_register (& errno, sizeof (errno), __MF_TYPE_STATIC, "errno area");
695 __mf_register (stdin, sizeof (*stdin), __MF_TYPE_STATIC, "stdin");
696 __mf_register (stdout, sizeof (*stdout), __MF_TYPE_STATIC, "stdout");
697 __mf_register (stderr, sizeof (*stderr), __MF_TYPE_STATIC, "stderr");
699 /* Make some effort to register ctype.h static arrays. */
700 /* XXX: e.g., on Solaris, may need to register __ctype, _ctype, __ctype_mask, __toupper, etc. */
701 /* On modern Linux GLIBC, these are thread-specific and changeable, and are dealt
702 with in mf-hooks2.c. */
706 return main (argc, argv, environ);
708 return __real_main (argc, argv, environ);
714 extern void __mf_fini () DTOR;
717 TRACE ("__mf_fini\n");
723 /* ------------------------------------------------------------------------ */
726 void __mf_check (void *ptr, size_t sz, int type, const char *location)
729 BEGIN_RECURSION_PROTECT ();
730 __mfu_check (ptr, sz, type, location);
731 END_RECURSION_PROTECT ();
736 void __mfu_check (void *ptr, size_t sz, int type, const char *location)
738 unsigned entry_idx = __MF_CACHE_INDEX (ptr);
739 struct __mf_cache *entry = & __mf_lookup_cache [entry_idx];
740 int judgement = 0; /* 0=undecided; <0=violation; >0=okay */
741 uintptr_t ptr_low = (uintptr_t) ptr;
742 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
743 struct __mf_cache old_entry = *entry;
745 if (UNLIKELY (__mf_opts.sigusr1_report))
746 __mf_sigusr1_respond ();
748 TRACE ("check ptr=%p b=%u size=%lu %s location=`%s'\n",
749 ptr, entry_idx, (unsigned long)sz,
750 (type == 0 ? "read" : "write"), location);
752 switch (__mf_opts.mudflap_mode)
756 entry->high = MAXPTR;
761 entry->low = ptr_low;
762 entry->high = ptr_high;
768 unsigned heuristics = 0;
770 /* Advance aging/adaptation counters. */
771 static unsigned adapt_count;
773 if (UNLIKELY (__mf_opts.adapt_cache > 0 &&
774 adapt_count > __mf_opts.adapt_cache))
780 /* Looping only occurs if heuristics were triggered. */
781 while (judgement == 0)
783 DECLARE (void, free, void *p);
784 __mf_object_t* ovr_obj[1];
786 __mf_object_t** all_ovr_obj = NULL;
787 __mf_object_t** dealloc_me = NULL;
790 /* Find all overlapping objects. Be optimistic that there is just one. */
791 obj_count = __mf_find_objects (ptr_low, ptr_high, ovr_obj, 1);
792 if (UNLIKELY (obj_count > 1))
794 /* Allocate a real buffer and do the search again. */
795 DECLARE (void *, malloc, size_t c);
797 all_ovr_obj = CALL_REAL (malloc, (sizeof (__mf_object_t *) *
799 if (all_ovr_obj == NULL) abort ();
800 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_obj, obj_count);
801 assert (n == obj_count);
802 dealloc_me = all_ovr_obj;
806 all_ovr_obj = ovr_obj;
810 /* Update object statistics. */
811 for (i = 0; i < obj_count; i++)
813 __mf_object_t *obj = all_ovr_obj[i];
814 assert (obj != NULL);
815 if (type == __MF_CHECK_READ)
822 /* Iterate over the various objects. There are a number of special cases. */
823 for (i = 0; i < obj_count; i++)
825 __mf_object_t *obj = all_ovr_obj[i];
827 /* Any __MF_TYPE_NOACCESS hit is bad. */
828 if (UNLIKELY (obj->type == __MF_TYPE_NOACCESS))
831 /* Any object with a watch flag is bad. */
832 if (UNLIKELY (obj->watching_p))
833 judgement = -2; /* trigger VIOL_WATCH */
835 /* A read from an uninitialized object is bad. */
836 if (UNLIKELY (__mf_opts.check_initialization
838 && type == __MF_CHECK_READ
840 && obj->write_count == 0
841 /* uninitialized (heap) */
842 && obj->type == __MF_TYPE_HEAP))
846 /* We now know that the access spans one or more valid objects. */
847 if (LIKELY (judgement >= 0))
848 for (i = 0; i < obj_count; i++)
850 __mf_object_t *obj = all_ovr_obj[i];
852 /* Is this access entirely contained within this object? */
853 if (LIKELY (ptr_low >= obj->low && ptr_high <= obj->high))
856 entry->low = obj->low;
857 entry->high = obj->high;
861 /* XXX: Access runs off left or right side of this
862 object. That could be okay, if there are
863 other objects that fill in all the holes. */
866 if (dealloc_me != NULL)
867 CALL_REAL (free, dealloc_me);
869 /* If the judgment is still unknown at this stage, loop
870 around at most one more time. */
873 if (heuristics++ < 2) /* XXX parametrize this number? */
874 judgement = __mf_heuristic_check (ptr_low, ptr_high);
888 if (__mf_opts.collect_stats)
892 if (LIKELY (old_entry.low != entry->low || old_entry.high != entry->high))
893 /* && (old_entry.low != 0) && (old_entry.high != 0)) */
894 __mf_lookup_cache_reusecount [entry_idx] ++;
897 if (UNLIKELY (judgement < 0))
898 __mf_violation (ptr, sz,
899 (uintptr_t) __builtin_return_address (0), location,
901 (type == __MF_CHECK_READ ? __MF_VIOL_READ : __MF_VIOL_WRITE) :
906 static __mf_object_t *
907 __mf_insert_new_object (uintptr_t low, uintptr_t high, int type,
908 const char *name, uintptr_t pc)
910 DECLARE (void *, calloc, size_t c, size_t n);
912 __mf_object_t *new_obj;
913 new_obj = CALL_REAL (calloc, 1, sizeof(__mf_object_t));
915 new_obj->high = high;
916 new_obj->type = type;
917 new_obj->name = name;
918 new_obj->alloc_pc = pc;
919 #if HAVE_GETTIMEOFDAY
920 gettimeofday (& new_obj->alloc_time, NULL);
923 new_obj->alloc_thread = pthread_self ();
926 if (__mf_opts.backtrace > 0 && (type == __MF_TYPE_HEAP || type == __MF_TYPE_HEAP_I))
927 new_obj->alloc_backtrace_size =
928 __mf_backtrace (& new_obj->alloc_backtrace,
931 __mf_link_object (new_obj);
937 __mf_uncache_object (__mf_object_t *old_obj)
939 /* Remove any low/high pointers for this object from the lookup cache. */
941 /* Can it possibly exist in the cache? */
942 if (LIKELY (old_obj->read_count + old_obj->write_count))
944 uintptr_t low = old_obj->low;
945 uintptr_t high = old_obj->high;
946 unsigned idx_low = __MF_CACHE_INDEX (low);
947 unsigned idx_high = __MF_CACHE_INDEX (high);
949 for (i = idx_low; i <= idx_high; i++)
951 struct __mf_cache *entry = & __mf_lookup_cache [i];
952 /* NB: the "||" in the following test permits this code to
953 tolerate the situation introduced by __mf_check over
954 contiguous objects, where a cache entry spans several
956 if (entry->low == low || entry->high == high)
959 entry->high = MINPTR;
967 __mf_register (void *ptr, size_t sz, int type, const char *name)
970 BEGIN_RECURSION_PROTECT ();
971 __mfu_register (ptr, sz, type, name);
972 END_RECURSION_PROTECT ();
978 __mfu_register (void *ptr, size_t sz, int type, const char *name)
980 TRACE ("register ptr=%p size=%lu type=%x name='%s'\n",
981 ptr, (unsigned long) sz, type, name ? name : "");
983 if (__mf_opts.collect_stats)
985 __mf_count_register ++;
986 __mf_total_register_size [(type < 0) ? 0 :
987 (type > __MF_TYPE_MAX) ? 0 :
991 if (UNLIKELY (__mf_opts.sigusr1_report))
992 __mf_sigusr1_respond ();
994 switch (__mf_opts.mudflap_mode)
1000 __mf_violation (ptr, sz, (uintptr_t) __builtin_return_address (0), NULL,
1001 __MF_VIOL_REGISTER);
1005 /* Clear the cache. */
1006 /* XXX: why the entire cache? */
1008 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1010 __mf_lookup_cache[0].low = MAXPTR;
1015 __mf_object_t *ovr_objs [1];
1016 unsigned num_overlapping_objs;
1017 uintptr_t low = (uintptr_t) ptr;
1018 uintptr_t high = CLAMPSZ (ptr, sz);
1019 uintptr_t pc = (uintptr_t) __builtin_return_address (0);
1021 /* Treat unknown size indication as 1. */
1022 if (UNLIKELY (sz == 0)) sz = 1;
1024 /* Look for objects only of the same type. This will e.g. permit a registration
1025 of a STATIC overlapping with a GUESS, and a HEAP with a NOACCESS. At
1026 __mf_check time however harmful overlaps will be detected. */
1027 num_overlapping_objs = __mf_find_objects2 (low, high, ovr_objs, 1, type);
1029 /* Handle overlaps. */
1030 if (UNLIKELY (num_overlapping_objs > 0))
1032 __mf_object_t *ovr_obj = ovr_objs[0];
1034 /* Accept certain specific duplication pairs. */
1035 if (((type == __MF_TYPE_STATIC) || (type == __MF_TYPE_GUESS))
1036 && ovr_obj->low == low
1037 && ovr_obj->high == high
1038 && ovr_obj->type == type)
1040 /* Duplicate registration for static objects may come
1041 from distinct compilation units. */
1042 VERBOSE_TRACE ("harmless duplicate reg %p-%p `%s'\n",
1043 (void *) low, (void *) high,
1044 (ovr_obj->name ? ovr_obj->name : ""));
1048 /* Alas, a genuine violation. */
1051 /* Two or more *real* mappings here. */
1052 __mf_violation ((void *) ptr, sz,
1053 (uintptr_t) __builtin_return_address (0), NULL,
1054 __MF_VIOL_REGISTER);
1057 else /* No overlapping objects: AOK. */
1058 __mf_insert_new_object (low, high, type, name, pc);
1060 /* We could conceivably call __mf_check() here to prime the cache,
1061 but then the read_count/write_count field is not reliable. */
1064 } /* end switch (__mf_opts.mudflap_mode) */
1069 __mf_unregister (void *ptr, size_t sz, int type)
1072 BEGIN_RECURSION_PROTECT ();
1073 __mfu_unregister (ptr, sz, type);
1074 END_RECURSION_PROTECT ();
1080 __mfu_unregister (void *ptr, size_t sz, int type)
1082 DECLARE (void, free, void *ptr);
1084 if (UNLIKELY (__mf_opts.sigusr1_report))
1085 __mf_sigusr1_respond ();
1087 TRACE ("unregister ptr=%p size=%lu type=%x\n", ptr, (unsigned long) sz, type);
1089 switch (__mf_opts.mudflap_mode)
1095 __mf_violation (ptr, sz,
1096 (uintptr_t) __builtin_return_address (0), NULL,
1097 __MF_VIOL_UNREGISTER);
1101 /* Clear the cache. */
1103 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1105 __mf_lookup_cache[0].low = MAXPTR;
1110 __mf_object_t *old_obj = NULL;
1111 __mf_object_t *del_obj = NULL; /* Object to actually delete. */
1112 __mf_object_t *objs[1] = {NULL};
1113 unsigned num_overlapping_objs;
1115 num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1116 CLAMPSZ (ptr, sz), objs, 1, type);
1118 /* Special case for HEAP_I - see free & realloc hook. They don't
1119 know whether the input region was HEAP or HEAP_I before
1120 unmapping it. Here we give HEAP a try in case HEAP_I
1122 if ((type == __MF_TYPE_HEAP_I) && (num_overlapping_objs == 0))
1124 num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1125 CLAMPSZ (ptr, sz), objs, 1, __MF_TYPE_HEAP);
1129 if (UNLIKELY ((num_overlapping_objs != 1) /* more than one overlap */
1130 || ((sz == 0) ? 0 : (sz != (old_obj->high - old_obj->low + 1))) /* size mismatch */
1131 || ((uintptr_t) ptr != old_obj->low))) /* base mismatch */
1133 __mf_violation (ptr, sz,
1134 (uintptr_t) __builtin_return_address (0), NULL,
1135 __MF_VIOL_UNREGISTER);
1139 __mf_unlink_object (old_obj);
1140 __mf_uncache_object (old_obj);
1142 /* Wipe buffer contents if desired. */
1143 if ((__mf_opts.wipe_stack && old_obj->type == __MF_TYPE_STACK)
1144 || (__mf_opts.wipe_heap && (old_obj->type == __MF_TYPE_HEAP
1145 || old_obj->type == __MF_TYPE_HEAP_I)))
1147 memset ((void *) old_obj->low,
1149 (size_t) (old_obj->high - old_obj->low + 1));
1152 /* Manage the object cemetary. */
1153 if (__mf_opts.persistent_count > 0 &&
1154 old_obj->type >= 0 &&
1155 old_obj->type <= __MF_TYPE_MAX_CEM)
1157 old_obj->deallocated_p = 1;
1158 old_obj->dealloc_pc = (uintptr_t) __builtin_return_address (0);
1159 #if HAVE_GETTIMEOFDAY
1160 gettimeofday (& old_obj->dealloc_time, NULL);
1163 old_obj->dealloc_thread = pthread_self ();
1166 if (__mf_opts.backtrace > 0 && old_obj->type == __MF_TYPE_HEAP)
1167 old_obj->dealloc_backtrace_size =
1168 __mf_backtrace (& old_obj->dealloc_backtrace,
1171 /* Encourage this object to be displayed again in current epoch. */
1172 old_obj->description_epoch --;
1174 /* Put this object into the cemetary. This may require this plot to
1175 be recycled, and the previous resident to be designated del_obj. */
1177 unsigned row = old_obj->type;
1178 unsigned plot = __mf_object_dead_head [row];
1180 del_obj = __mf_object_cemetary [row][plot];
1181 __mf_object_cemetary [row][plot] = old_obj;
1183 if (plot == __mf_opts.persistent_count) plot = 0;
1184 __mf_object_dead_head [row] = plot;
1190 if (__mf_opts.print_leaks)
1192 if ((old_obj->read_count + old_obj->write_count) == 0 &&
1193 (old_obj->type == __MF_TYPE_HEAP
1194 || old_obj->type == __MF_TYPE_HEAP_I))
1198 "mudflap warning: unaccessed registered object:\n");
1199 __mf_describe_object (old_obj);
1203 if (del_obj != NULL) /* May or may not equal old_obj. */
1205 if (__mf_opts.backtrace > 0)
1207 CALL_REAL(free, del_obj->alloc_backtrace);
1208 if (__mf_opts.persistent_count > 0)
1210 CALL_REAL(free, del_obj->dealloc_backtrace);
1213 CALL_REAL(free, del_obj);
1218 } /* end switch (__mf_opts.mudflap_mode) */
1221 if (__mf_opts.collect_stats)
1223 __mf_count_unregister ++;
1224 __mf_total_unregister_size += sz;
1233 unsigned long total_size;
1234 unsigned live_obj_count;
1235 double total_weight;
1236 double weighted_size;
1237 unsigned long weighted_address_bits [sizeof (uintptr_t) * 8][2];
1243 __mf_adapt_cache_fn (splay_tree_node n, void *param)
1245 __mf_object_t *obj = (__mf_object_t *) n->value;
1246 struct tree_stats *s = (struct tree_stats *) param;
1248 assert (obj != NULL && s != NULL);
1250 /* Exclude never-accessed objects. */
1251 if (obj->read_count + obj->write_count)
1254 s->total_size += (obj->high - obj->low + 1);
1261 /* VERBOSE_TRACE ("analyze low=%p live=%u name=`%s'\n",
1262 (void *) obj->low, obj->liveness, obj->name); */
1264 s->live_obj_count ++;
1265 s->total_weight += (double) obj->liveness;
1267 (double) (obj->high - obj->low + 1) *
1268 (double) obj->liveness;
1271 for (i=0; i<sizeof(uintptr_t) * 8; i++)
1273 unsigned bit = addr & 1;
1274 s->weighted_address_bits[i][bit] += obj->liveness;
1278 /* Age the liveness value. */
1279 obj->liveness >>= 1;
1290 struct tree_stats s;
1291 uintptr_t new_mask = 0;
1292 unsigned char new_shift;
1293 float cache_utilization;
1295 static float smoothed_new_shift = -1.0;
1298 memset (&s, 0, sizeof (s));
1300 splay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP), __mf_adapt_cache_fn, (void *) & s);
1301 splay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I), __mf_adapt_cache_fn, (void *) & s);
1302 splay_tree_foreach (__mf_object_tree (__MF_TYPE_STACK), __mf_adapt_cache_fn, (void *) & s);
1303 splay_tree_foreach (__mf_object_tree (__MF_TYPE_STATIC), __mf_adapt_cache_fn, (void *) & s);
1304 splay_tree_foreach (__mf_object_tree (__MF_TYPE_GUESS), __mf_adapt_cache_fn, (void *) & s);
1306 /* Maybe we're dealing with funny aging/adaptation parameters, or an
1307 empty tree. Just leave the cache alone in such cases, rather
1308 than risk dying by division-by-zero. */
1309 if (! (s.obj_count > 0) && (s.live_obj_count > 0) && (s.total_weight > 0.0))
1312 /* Guess a good value for the shift parameter by finding an address bit that is a
1313 good discriminant of lively objects. */
1315 for (i=0; i<sizeof (uintptr_t)*8; i++)
1317 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1318 if (max_value < value) max_value = value;
1320 for (i=0; i<sizeof (uintptr_t)*8; i++)
1322 float shoulder_factor = 0.7; /* Include slightly less popular bits too. */
1323 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1324 if (value >= max_value * shoulder_factor)
1327 if (smoothed_new_shift < 0) smoothed_new_shift = __mf_lc_shift;
1328 /* Converge toward this slowly to reduce flapping. */
1329 smoothed_new_shift = 0.9*smoothed_new_shift + 0.1*i;
1330 new_shift = (unsigned) (smoothed_new_shift + 0.5);
1331 assert (new_shift < sizeof (uintptr_t)*8);
1333 /* Count number of used buckets. */
1334 cache_utilization = 0.0;
1335 for (i = 0; i < (1 + __mf_lc_mask); i++)
1336 if (__mf_lookup_cache[i].low != 0 || __mf_lookup_cache[i].high != 0)
1337 cache_utilization += 1.0;
1338 cache_utilization /= (1 + __mf_lc_mask);
1340 new_mask |= 0x3ff; /* XXX: force a large cache. */
1341 new_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
1343 VERBOSE_TRACE ("adapt cache obj=%u/%u sizes=%lu/%.0f/%.0f => "
1344 "util=%u%% m=%p s=%u\n",
1345 s.obj_count, s.live_obj_count, s.total_size, s.total_weight, s.weighted_size,
1346 (unsigned)(cache_utilization*100.0), (void *) new_mask, new_shift);
1348 /* We should reinitialize cache if its parameters have changed. */
1349 if (new_mask != __mf_lc_mask ||
1350 new_shift != __mf_lc_shift)
1352 __mf_lc_mask = new_mask;
1353 __mf_lc_shift = new_shift;
1355 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1357 __mf_lookup_cache[0].low = MAXPTR;
1363 /* __mf_find_object[s] */
1365 /* Find overlapping live objecs between [low,high]. Return up to
1366 max_objs of their pointers in objs[]. Return total count of
1367 overlaps (may exceed max_objs). */
1370 __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
1371 __mf_object_t **objs, unsigned max_objs, int type)
1374 splay_tree t = __mf_object_tree (type);
1375 splay_tree_key k = (splay_tree_key) ptr_low;
1378 splay_tree_node n = splay_tree_lookup (t, k);
1379 /* An exact match for base address implies a hit. */
1382 if (count < max_objs)
1383 objs[count] = (__mf_object_t *) n->value;
1387 /* Iterate left then right near this key value to find all overlapping objects. */
1388 for (direction = 0; direction < 2; direction ++)
1390 /* Reset search origin. */
1391 k = (splay_tree_key) ptr_low;
1397 n = (direction == 0 ? splay_tree_successor (t, k) : splay_tree_predecessor (t, k));
1398 if (n == NULL) break;
1399 obj = (__mf_object_t *) n->value;
1401 if (! (obj->low <= ptr_high && obj->high >= ptr_low)) /* No overlap? */
1404 if (count < max_objs)
1405 objs[count] = (__mf_object_t *) n->value;
1408 k = (splay_tree_key) obj->low;
1417 __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
1418 __mf_object_t **objs, unsigned max_objs)
1423 /* Search each splay tree for overlaps. */
1424 for (type = __MF_TYPE_NOACCESS; type <= __MF_TYPE_GUESS; type++)
1426 unsigned c = __mf_find_objects2 (ptr_low, ptr_high, objs, max_objs, type);
1432 else /* NB: C may equal 0 */
1445 /* __mf_link_object */
1448 __mf_link_object (__mf_object_t *node)
1450 splay_tree t = __mf_object_tree (node->type);
1451 splay_tree_insert (t, (splay_tree_key) node->low, (splay_tree_value) node);
1454 /* __mf_unlink_object */
1457 __mf_unlink_object (__mf_object_t *node)
1459 splay_tree t = __mf_object_tree (node->type);
1460 splay_tree_remove (t, (splay_tree_key) node->low);
1463 /* __mf_find_dead_objects */
1465 /* Find overlapping dead objecs between [low,high]. Return up to
1466 max_objs of their pointers in objs[]. Return total count of
1467 overlaps (may exceed max_objs). */
1470 __mf_find_dead_objects (uintptr_t low, uintptr_t high,
1471 __mf_object_t **objs, unsigned max_objs)
1473 if (__mf_opts.persistent_count > 0)
1476 unsigned recollection = 0;
1479 assert (low <= high);
1480 assert (max_objs == 0 || objs != NULL);
1482 /* Widen the search from the most recent plots in each row, looking
1483 backward in time. */
1485 while (recollection < __mf_opts.persistent_count)
1489 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1494 plot = __mf_object_dead_head [row];
1495 for (i = 0; i <= recollection; i ++)
1499 /* Look backward through row: it's a circular buffer. */
1500 if (plot > 0) plot --;
1501 else plot = __mf_opts.persistent_count - 1;
1503 obj = __mf_object_cemetary [row][plot];
1504 if (obj && obj->low <= high && obj->high >= low)
1506 /* Found an overlapping dead object! */
1507 if (count < max_objs)
1517 /* Look farther back in time. */
1518 recollection = (recollection * 2) + 1;
1527 /* __mf_describe_object */
1530 __mf_describe_object (__mf_object_t *obj)
1532 static unsigned epoch = 0;
1539 if (__mf_opts.abbreviate && obj->description_epoch == epoch)
1542 "mudflap object %p: name=`%s'\n",
1543 (void *) obj, (obj->name ? obj->name : ""));
1547 obj->description_epoch = epoch;
1550 "mudflap object %p: name=`%s'\n"
1551 "bounds=[%p,%p] size=%lu area=%s check=%ur/%uw liveness=%u%s\n"
1552 "alloc time=%lu.%06lu pc=%p"
1557 (void *) obj, (obj->name ? obj->name : ""),
1558 (void *) obj->low, (void *) obj->high,
1559 (unsigned long) (obj->high - obj->low + 1),
1560 (obj->type == __MF_TYPE_NOACCESS ? "no-access" :
1561 obj->type == __MF_TYPE_HEAP ? "heap" :
1562 obj->type == __MF_TYPE_HEAP_I ? "heap-init" :
1563 obj->type == __MF_TYPE_STACK ? "stack" :
1564 obj->type == __MF_TYPE_STATIC ? "static" :
1565 obj->type == __MF_TYPE_GUESS ? "guess" :
1567 obj->read_count, obj->write_count, obj->liveness,
1568 obj->watching_p ? " watching" : "",
1569 obj->alloc_time.tv_sec, obj->alloc_time.tv_usec,
1570 (void *) obj->alloc_pc
1572 , (unsigned) obj->alloc_thread
1576 if (__mf_opts.backtrace > 0)
1579 for (i=0; i<obj->alloc_backtrace_size; i++)
1580 fprintf (stderr, " %s\n", obj->alloc_backtrace[i]);
1583 if (__mf_opts.persistent_count > 0)
1585 if (obj->deallocated_p)
1587 fprintf (stderr, "dealloc time=%lu.%06lu pc=%p"
1592 obj->dealloc_time.tv_sec, obj->dealloc_time.tv_usec,
1593 (void *) obj->dealloc_pc
1595 , (unsigned) obj->dealloc_thread
1600 if (__mf_opts.backtrace > 0)
1603 for (i=0; i<obj->dealloc_backtrace_size; i++)
1604 fprintf (stderr, " %s\n", obj->dealloc_backtrace[i]);
1612 __mf_report_leaks_fn (splay_tree_node n, void *param)
1614 __mf_object_t *node = (__mf_object_t *) n->value;
1615 unsigned *count = (unsigned *) param;
1620 fprintf (stderr, "Leaked object %u:\n", (*count));
1621 __mf_describe_object (node);
1628 __mf_report_leaks ()
1632 (void) splay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP),
1633 __mf_report_leaks_fn, & count);
1634 (void) splay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I),
1635 __mf_report_leaks_fn, & count);
1640 /* ------------------------------------------------------------------------ */
1647 BEGIN_RECURSION_PROTECT ();
1649 END_RECURSION_PROTECT ();
1656 if (__mf_opts.collect_stats)
1661 "calls to __mf_check: %lu\n"
1662 " __mf_register: %lu [%luB, %luB, %luB, %luB, %luB]\n"
1663 " __mf_unregister: %lu [%luB]\n"
1664 " __mf_violation: [%lu, %lu, %lu, %lu, %lu]\n",
1666 __mf_count_register,
1667 __mf_total_register_size[0], __mf_total_register_size[1],
1668 __mf_total_register_size[2], __mf_total_register_size[3],
1669 __mf_total_register_size[4], /* XXX */
1670 __mf_count_unregister, __mf_total_unregister_size,
1671 __mf_count_violation[0], __mf_count_violation[1],
1672 __mf_count_violation[2], __mf_count_violation[3],
1673 __mf_count_violation[4]);
1676 "calls with reentrancy: %lu\n", __mf_reentrancy);
1679 " lock contention: %lu\n", __mf_lock_contention);
1682 /* Lookup cache stats. */
1685 unsigned max_reuse = 0;
1686 unsigned num_used = 0;
1687 unsigned num_unused = 0;
1689 for (i = 0; i < LOOKUP_CACHE_SIZE; i++)
1691 if (__mf_lookup_cache_reusecount[i])
1695 if (max_reuse < __mf_lookup_cache_reusecount[i])
1696 max_reuse = __mf_lookup_cache_reusecount[i];
1698 fprintf (stderr, "lookup cache slots used: %u unused: %u peak-reuse: %u\n",
1699 num_used, num_unused, max_reuse);
1703 unsigned live_count;
1704 live_count = __mf_find_objects (MINPTR, MAXPTR, NULL, 0);
1705 fprintf (stderr, "number of live objects: %u\n", live_count);
1708 if (__mf_opts.persistent_count > 0)
1710 unsigned dead_count = 0;
1712 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1713 for (plot = 0 ; plot < __mf_opts.persistent_count; plot ++)
1714 if (__mf_object_cemetary [row][plot] != 0)
1716 fprintf (stderr, " zombie objects: %u\n", dead_count);
1719 if (__mf_opts.print_leaks && (__mf_opts.mudflap_mode == mode_check))
1722 extern void * __mf_wrap_alloca_indirect (size_t c);
1724 /* Free up any remaining alloca()'d blocks. */
1725 __mf_wrap_alloca_indirect (0);
1726 __mf_describe_object (NULL); /* Reset description epoch. */
1727 l = __mf_report_leaks ();
1728 fprintf (stderr, "number of leaked objects: %u\n", l);
1732 /* __mf_backtrace */
1735 __mf_backtrace (char ***symbols, void *guess_pc, unsigned guess_omit_levels)
1738 unsigned pc_array_size = __mf_opts.backtrace + guess_omit_levels;
1739 unsigned remaining_size;
1740 unsigned omitted_size = 0;
1742 DECLARE (void, free, void *ptr);
1743 DECLARE (void *, calloc, size_t c, size_t n);
1744 DECLARE (void *, malloc, size_t n);
1746 pc_array = CALL_REAL (calloc, pc_array_size, sizeof (void *) );
1747 #ifdef HAVE_BACKTRACE
1748 pc_array_size = backtrace (pc_array, pc_array_size);
1750 #define FETCH(n) do { if (pc_array_size >= n) { \
1751 pc_array[n] = __builtin_return_address(n); \
1752 if (pc_array[n] == 0) pc_array_size = n; } } while (0)
1754 /* Unroll some calls __builtin_return_address because this function
1755 only takes a literal integer parameter. */
1758 /* XXX: __builtin_return_address sometimes crashes (!) on >0 arguments,
1759 rather than simply returning 0. :-( */
1768 if (pc_array_size > 8) pc_array_size = 9;
1770 if (pc_array_size > 0) pc_array_size = 1;
1776 /* We want to trim the first few levels of the stack traceback,
1777 since they contain libmudflap wrappers and junk. If pc_array[]
1778 ends up containing a non-NULL guess_pc, then trim everything
1779 before that. Otherwise, omit the first guess_omit_levels
1782 if (guess_pc != NULL)
1783 for (i=0; i<pc_array_size; i++)
1784 if (pc_array [i] == guess_pc)
1787 if (omitted_size == 0) /* No match? */
1788 if (pc_array_size > guess_omit_levels)
1789 omitted_size = guess_omit_levels;
1791 remaining_size = pc_array_size - omitted_size;
1793 #ifdef HAVE_BACKTRACE_SYMBOLS
1794 *symbols = backtrace_symbols (pc_array + omitted_size, remaining_size);
1797 /* Let's construct a buffer by hand. It will have <remaining_size>
1798 char*'s at the front, pointing at individual strings immediately
1803 enum { perline = 30 };
1804 buffer = CALL_REAL (malloc, remaining_size * (perline + sizeof(char *)));
1805 pointers = (char **) buffer;
1806 chars = (char *)buffer + (remaining_size * sizeof (char *));
1807 for (i = 0; i < remaining_size; i++)
1809 pointers[i] = chars;
1810 sprintf (chars, "[0x%p]", pc_array [omitted_size + i]);
1811 chars = chars + perline;
1813 *symbols = pointers;
1816 CALL_REAL (free, pc_array);
1818 return remaining_size;
1821 /* ------------------------------------------------------------------------ */
1822 /* __mf_violation */
1825 __mf_violation (void *ptr, size_t sz, uintptr_t pc,
1826 const char *location, int type)
1829 static unsigned violation_number;
1830 DECLARE(void, free, void *ptr);
1832 TRACE ("violation pc=%p location=%s type=%d ptr=%p size=%lu\n",
1834 (location != NULL ? location : ""), type, ptr, (unsigned long) sz);
1836 if (__mf_opts.collect_stats)
1837 __mf_count_violation [(type < 0) ? 0 :
1838 (type > __MF_VIOL_WATCH) ? 0 :
1841 /* Print out a basic warning message. */
1842 if (__mf_opts.verbose_violations)
1845 unsigned num_helpful = 0;
1847 #if HAVE_GETTIMEOFDAY
1848 gettimeofday (& now, NULL);
1851 violation_number ++;
1854 "mudflap violation %u (%s): time=%lu.%06lu "
1855 "ptr=%p size=%lu\npc=%p%s%s%s\n",
1857 ((type == __MF_VIOL_READ) ? "check/read" :
1858 (type == __MF_VIOL_WRITE) ? "check/write" :
1859 (type == __MF_VIOL_REGISTER) ? "register" :
1860 (type == __MF_VIOL_UNREGISTER) ? "unregister" :
1861 (type == __MF_VIOL_WATCH) ? "watch" : "unknown"),
1862 now.tv_sec, now.tv_usec,
1863 (void *) ptr, (unsigned long)sz, (void *) pc,
1864 (location != NULL ? " location=`" : ""),
1865 (location != NULL ? location : ""),
1866 (location != NULL ? "'" : ""));
1868 if (__mf_opts.backtrace > 0)
1873 num = __mf_backtrace (& symbols, (void *) pc, 2);
1874 /* Note: backtrace_symbols calls malloc(). But since we're in
1875 __mf_violation and presumably __mf_check, it'll detect
1876 recursion, and not put the new string into the database. */
1878 for (i=0; i<num; i++)
1879 fprintf (stderr, " %s\n", symbols[i]);
1881 /* Calling free() here would trigger a violation. */
1882 CALL_REAL(free, symbols);
1886 /* Look for nearby objects. For this, we start with s_low/s_high
1887 pointing to the given area, looking for overlapping objects.
1888 If none show up, widen the search area and keep looking. */
1890 if (sz == 0) sz = 1;
1892 for (dead_p = 0; dead_p <= 1; dead_p ++) /* for dead_p in 0 1 */
1894 enum {max_objs = 3}; /* magic */
1895 __mf_object_t *objs[max_objs];
1896 unsigned num_objs = 0;
1897 uintptr_t s_low, s_high;
1901 s_low = (uintptr_t) ptr;
1902 s_high = CLAMPSZ (ptr, sz);
1904 while (tries < 16) /* magic */
1907 num_objs = __mf_find_dead_objects (s_low, s_high, objs, max_objs);
1909 num_objs = __mf_find_objects (s_low, s_high, objs, max_objs);
1911 if (num_objs) /* good enough */
1916 /* XXX: tune this search strategy. It's too dependent on
1917 sz, which can vary from 1 to very big (when array index
1918 checking) numbers. */
1919 s_low = CLAMPSUB (s_low, (sz * tries * tries));
1920 s_high = CLAMPADD (s_high, (sz * tries * tries));
1923 for (i = 0; i < min (num_objs, max_objs); i++)
1925 __mf_object_t *obj = objs[i];
1926 uintptr_t low = (uintptr_t) ptr;
1927 uintptr_t high = CLAMPSZ (ptr, sz);
1928 unsigned before1 = (low < obj->low) ? obj->low - low : 0;
1929 unsigned after1 = (low > obj->high) ? low - obj->high : 0;
1930 unsigned into1 = (high >= obj->low && low <= obj->high) ? low - obj->low : 0;
1931 unsigned before2 = (high < obj->low) ? obj->low - high : 0;
1932 unsigned after2 = (high > obj->high) ? high - obj->high : 0;
1933 unsigned into2 = (high >= obj->low && low <= obj->high) ? high - obj->low : 0;
1935 fprintf (stderr, "Nearby object %u: checked region begins %uB %s and ends %uB %s\n",
1936 num_helpful + i + 1,
1937 (before1 ? before1 : after1 ? after1 : into1),
1938 (before1 ? "before" : after1 ? "after" : "into"),
1939 (before2 ? before2 : after2 ? after2 : into2),
1940 (before2 ? "before" : after2 ? "after" : "into"));
1941 __mf_describe_object (obj);
1943 num_helpful += num_objs;
1946 fprintf (stderr, "number of nearby objects: %u\n", num_helpful);
1949 /* How to finally handle this violation? */
1950 switch (__mf_opts.violation_mode)
1955 kill (getpid(), SIGSEGV);
1962 snprintf (buf, 128, "gdb --pid=%u", (unsigned) getpid ());
1964 /* XXX: should probably fork() && sleep(GDB_WAIT_PARAMETER)
1965 instead, and let the forked child execlp() gdb. That way, this
1966 subject process can be resumed under the supervision of gdb.
1967 This can't happen now, since system() only returns when gdb
1968 dies. In that case, we need to beware of starting a second
1969 concurrent gdb child upon the next violation. (But if the first
1970 gdb dies, then starting a new one is appropriate.) */
1975 /* ------------------------------------------------------------------------ */
1978 unsigned __mf_watch (void *ptr, size_t sz)
1982 BEGIN_RECURSION_PROTECT ();
1983 rc = __mf_watch_or_not (ptr, sz, 1);
1984 END_RECURSION_PROTECT ();
1989 unsigned __mf_unwatch (void *ptr, size_t sz)
1993 rc = __mf_watch_or_not (ptr, sz, 0);
2000 __mf_watch_or_not (void *ptr, size_t sz, char flag)
2002 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
2003 uintptr_t ptr_low = (uintptr_t) ptr;
2006 TRACE ("%s ptr=%p size=%lu\n",
2007 (flag ? "watch" : "unwatch"), ptr, (unsigned long) sz);
2009 switch (__mf_opts.mudflap_mode)
2019 __mf_object_t **all_ovr_objs;
2022 DECLARE (void *, malloc, size_t c);
2023 DECLARE (void, free, void *p);
2025 obj_count = __mf_find_objects (ptr_low, ptr_high, NULL, 0);
2026 VERBOSE_TRACE (" %u:", obj_count);
2028 all_ovr_objs = CALL_REAL (malloc, (sizeof (__mf_object_t *) * obj_count));
2029 if (all_ovr_objs == NULL) abort ();
2030 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_objs, obj_count);
2031 assert (n == obj_count);
2033 for (n = 0; n < obj_count; n ++)
2035 __mf_object_t *obj = all_ovr_objs[n];
2037 VERBOSE_TRACE (" [%p]", (void *) obj);
2038 if (obj->watching_p != flag)
2040 obj->watching_p = flag;
2043 /* Remove object from cache, to ensure next access
2044 goes through __mf_check(). */
2046 __mf_uncache_object (obj);
2049 CALL_REAL (free, all_ovr_objs);
2059 __mf_sigusr1_handler (int num)
2061 __mf_sigusr1_received ++;
2064 /* Install or remove SIGUSR1 handler as necessary.
2065 Also, respond to a received pending SIGUSR1. */
2067 __mf_sigusr1_respond ()
2069 static int handler_installed;
2072 /* Manage handler */
2073 if (__mf_opts.sigusr1_report && ! handler_installed)
2075 signal (SIGUSR1, __mf_sigusr1_handler);
2076 handler_installed = 1;
2078 else if(! __mf_opts.sigusr1_report && handler_installed)
2080 signal (SIGUSR1, SIG_DFL);
2081 handler_installed = 0;
2085 /* Manage enqueued signals */
2086 if (__mf_sigusr1_received > __mf_sigusr1_handled)
2088 __mf_sigusr1_handled ++;
2089 assert (__mf_state == reentrant);
2091 handler_installed = 0; /* We may need to re-enable signal; this might be a SysV library. */
2096 /* XXX: provide an alternative __assert_fail function that cannot
2097 fail due to libmudflap infinite recursion. */
2101 write_itoa (int fd, unsigned n)
2103 enum x { bufsize = sizeof(n)*4 };
2107 for (i=0; i<bufsize-1; i++)
2109 unsigned digit = n % 10;
2110 buf[bufsize-2-i] = digit + '0';
2114 char *m = & buf [bufsize-2-i];
2115 buf[bufsize-1] = '\0';
2116 write (fd, m, strlen(m));
2124 __assert_fail (const char *msg, const char *file, unsigned line, const char *func)
2126 #define write2(string) write (2, (string), strlen ((string)));
2130 write_itoa (2, (unsigned) pthread_self ());
2133 write2(": assertion failure: `");
2134 write (2, msg, strlen (msg));
2136 write (2, func, strlen (func));
2138 write (2, file, strlen (file));
2140 write_itoa (2, line);
2153 /* #include the generic splay tree implementation from libiberty here, to
2154 ensure that it uses our memory allocation primitives. */
2157 splay_tree_free (void *p)
2159 DECLARE (void, free, void *p);
2160 CALL_REAL (free, p);
2164 splay_tree_xmalloc (size_t s)
2166 DECLARE (void *, malloc, size_t s);
2167 return CALL_REAL (malloc, s);
2170 #define free(z) splay_tree_free(z)
2171 #define xmalloc(z) splay_tree_xmalloc(z)
2172 #include "splay-tree.c"