1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003 Free Software Foundation, Inc.
3 Contributed by Frank Ch. Eigler <fche@redhat.com>
4 and Graydon Hoare <graydon@redhat.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
13 In addition to the permissions in the GNU General Public License, the
14 Free Software Foundation gives you unlimited permission to link the
15 compiled version of this file into combinations with other programs,
16 and to distribute those combinations without any restriction coming
17 from the use of this file. (The General Public License restrictions
18 do apply in other respects; for example, they cover modification of
19 the file, and distribution when not linked into a combine
22 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
23 WARRANTY; without even the implied warranty of MERCHANTABILITY or
24 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
27 You should have received a copy of the GNU General Public License
28 along with GCC; see the file COPYING. If not, write to the Free
29 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
34 /* These attempt to coax various unix flavours to declare all our
35 needed tidbits in the system headers. */
36 #if !defined(__FreeBSD__)
38 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
42 #define __EXTENSIONS__
44 #define _LARGE_FILE_API
45 #define _XOPEN_SOURCE_EXTENDED 1
49 #include <sys/types.h>
53 #ifdef HAVE_EXECINFO_H
63 #include <sys/types.h>
67 #include "mf-runtime.h"
71 /* ------------------------------------------------------------------------ */
74 #define CTOR __attribute__ ((constructor))
75 #define DTOR __attribute__ ((destructor))
78 /* Codes to describe the context in which a violation occurs. */
79 #define __MF_VIOL_UNKNOWN 0
80 #define __MF_VIOL_READ 1
81 #define __MF_VIOL_WRITE 2
82 #define __MF_VIOL_REGISTER 3
83 #define __MF_VIOL_UNREGISTER 4
84 #define __MF_VIOL_WATCH 5
86 /* Protect against recursive calls. */
87 #define BEGIN_RECURSION_PROTECT() do { \
88 if (UNLIKELY (__mf_state == reentrant)) { \
89 write (2, "mf: erroneous reentrancy detected in `", 38); \
90 write (2, __PRETTY_FUNCTION__, strlen(__PRETTY_FUNCTION__)); \
91 write (2, "'\n", 2); \
93 __mf_state = reentrant; \
96 #define END_RECURSION_PROTECT() do { \
97 __mf_state = active; \
102 /* ------------------------------------------------------------------------ */
103 /* Required globals. */
105 #define LOOKUP_CACHE_MASK_DFL 1023
106 #define LOOKUP_CACHE_SIZE_MAX 4096 /* Allows max CACHE_MASK 0x0FFF */
107 #define LOOKUP_CACHE_SHIFT_DFL 2
109 struct __mf_cache __mf_lookup_cache [LOOKUP_CACHE_SIZE_MAX];
110 uintptr_t __mf_lc_mask = LOOKUP_CACHE_MASK_DFL;
111 unsigned char __mf_lc_shift = LOOKUP_CACHE_SHIFT_DFL;
112 #define LOOKUP_CACHE_SIZE (__mf_lc_mask + 1)
114 struct __mf_options __mf_opts;
116 int __mf_starting_p = 1;
118 enum __mf_state_enum __mf_state = active;
120 /* See __mf_state_perthread() in mf-hooks.c. */
125 pthread_mutex_t __mf_biglock =
126 #ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
127 PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
129 PTHREAD_MUTEX_INITIALIZER;
133 /* Use HAVE_PTHREAD_H here instead of LIBMUDFLAPTH, so that even
134 the libmudflap.la (no threading support) can diagnose whether
135 the application is linked with -lpthread. See __mf_usage() below. */
137 #pragma weak pthread_join
138 const void *threads_active_p = (void *) pthread_join;
142 /* ------------------------------------------------------------------------ */
143 /* stats-related globals. */
145 static unsigned long __mf_count_check;
146 static unsigned long __mf_lookup_cache_reusecount [LOOKUP_CACHE_SIZE_MAX];
147 static unsigned long __mf_treerot_left, __mf_treerot_right;
148 static unsigned long __mf_count_register;
149 static unsigned long __mf_total_register_size [__MF_TYPE_MAX+1];
150 static unsigned long __mf_count_unregister;
151 static unsigned long __mf_total_unregister_size;
152 static unsigned long __mf_count_violation [__MF_VIOL_WATCH+1];
153 static unsigned long __mf_sigusr1_received;
154 static unsigned long __mf_sigusr1_handled;
155 /* not static */ unsigned long __mf_reentrancy;
157 /* not static */ unsigned long __mf_lock_contention;
161 /* ------------------------------------------------------------------------ */
162 /* mode-check-related globals. */
164 typedef struct __mf_object
166 uintptr_t low, high; /* __mf_register parameters */
168 char type; /* __MF_TYPE_something */
169 char watching_p; /* Trigger a VIOL_WATCH on access? */
170 unsigned read_count; /* Number of times __mf_check/read was called on this object. */
171 unsigned write_count; /* Likewise for __mf_check/write. */
172 unsigned liveness; /* A measure of recent checking activity. */
173 unsigned description_epoch; /* Last epoch __mf_describe_object printed this. */
176 struct timeval alloc_time;
177 char **alloc_backtrace;
178 size_t alloc_backtrace_size;
180 pthread_t alloc_thread;
184 uintptr_t dealloc_pc;
185 struct timeval dealloc_time;
186 char **dealloc_backtrace;
187 size_t dealloc_backtrace_size;
189 pthread_t dealloc_thread;
194 typedef struct __mf_object_tree
197 struct __mf_object_tree *left;
198 struct __mf_object_tree *right;
199 } __mf_object_tree_t;
201 /* Live objects: binary tree on __mf_object_t.low */
202 static __mf_object_tree_t *__mf_object_root;
204 /* Dead objects: circular arrays; _MIN_CEM .. _MAX_CEM only */
205 static unsigned __mf_object_dead_head[__MF_TYPE_MAX_CEM+1]; /* next empty spot */
206 static __mf_object_tree_t *__mf_object_cemetary[__MF_TYPE_MAX_CEM+1][__MF_PERSIST_MAX];
209 /* ------------------------------------------------------------------------ */
210 /* Forward function declarations */
212 static void __mf_init () CTOR;
213 static void __mf_sigusr1_respond ();
214 static unsigned __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
215 __mf_object_tree_t **objs, unsigned max_objs);
216 static unsigned __mf_find_dead_objects (uintptr_t ptr_low, uintptr_t ptr_high,
217 __mf_object_tree_t **objs, unsigned max_objs);
218 static void __mf_link_object (__mf_object_tree_t *obj);
219 static void __mf_age_tree (__mf_object_tree_t *obj);
220 static void __mf_adapt_cache ();
221 static void __mf_unlink_object (__mf_object_tree_t *obj);
222 static void __mf_describe_object (__mf_object_t *obj);
223 static unsigned __mf_watch_or_not (void *ptr, size_t sz, char flag);
227 /* ------------------------------------------------------------------------ */
228 /* Configuration engine */
231 __mf_set_default_options ()
233 memset (& __mf_opts, 0, sizeof (__mf_opts));
235 __mf_opts.tree_aging = 13037;
236 __mf_opts.adapt_cache = 1000003;
237 __mf_opts.abbreviate = 1;
238 __mf_opts.verbose_violations = 1;
239 __mf_opts.free_queue_length = 4;
240 __mf_opts.persistent_count = 100;
241 __mf_opts.crumple_zone = 32;
242 __mf_opts.backtrace = 4;
243 __mf_opts.mudflap_mode = mode_check;
244 __mf_opts.violation_mode = viol_nop;
245 __mf_opts.heur_std_data = 1;
247 __mf_opts.thread_stack = 0;
266 "mudflaps do nothing",
267 set_option, (int)mode_nop, (int *)&__mf_opts.mudflap_mode},
269 "mudflaps populate object tree",
270 set_option, (int)mode_populate, (int *)&__mf_opts.mudflap_mode},
272 "mudflaps check for memory violations",
273 set_option, (int)mode_check, (int *)&__mf_opts.mudflap_mode},
275 "mudflaps always cause violations (diagnostic)",
276 set_option, (int)mode_violate, (int *)&__mf_opts.mudflap_mode},
279 "violations do not change program execution",
280 set_option, (int)viol_nop, (int *)&__mf_opts.violation_mode},
282 "violations cause a call to abort()",
283 set_option, (int)viol_abort, (int *)&__mf_opts.violation_mode},
285 "violations are promoted to SIGSEGV signals",
286 set_option, (int)viol_segv, (int *)&__mf_opts.violation_mode},
288 "violations fork a gdb process attached to current program",
289 set_option, (int)viol_gdb, (int *)&__mf_opts.violation_mode},
291 "trace calls to mudflap runtime library",
292 set_option, 1, &__mf_opts.trace_mf_calls},
294 "trace internal events within mudflap runtime library",
295 set_option, 1, &__mf_opts.verbose_trace},
297 "collect statistics on mudflap's operation",
298 set_option, 1, &__mf_opts.collect_stats},
301 "print report upon SIGUSR1",
302 set_option, 1, &__mf_opts.sigusr1_report},
304 {"internal-checking",
305 "perform more expensive internal checking",
306 set_option, 1, &__mf_opts.internal_checking},
308 "age the object tree after N accesses for working set",
309 read_integer_option, 1000000, &__mf_opts.tree_aging},
311 "print any memory leaks at program shutdown",
312 set_option, 1, &__mf_opts.print_leaks},
313 {"check-initialization",
314 "detect uninitialized object reads",
315 set_option, 1, &__mf_opts.check_initialization},
316 {"verbose-violations",
317 "print verbose messages when memory violations occur",
318 set_option, 1, &__mf_opts.verbose_violations},
320 "abbreviate repetitive listings",
321 set_option, 1, &__mf_opts.abbreviate},
323 "wipe stack objects at unwind",
324 set_option, 1, &__mf_opts.wipe_stack},
326 "wipe heap objects at free",
327 set_option, 1, &__mf_opts.wipe_heap},
329 "support /proc/self/map heuristics",
330 set_option, 1, &__mf_opts.heur_proc_map},
332 "enable a simple upper stack bound heuristic",
333 set_option, 1, &__mf_opts.heur_stack_bound},
335 "support _start.._end heuristics",
336 set_option, 1, &__mf_opts.heur_start_end},
338 "register standard library data (argv, errno, stdin, ...)",
339 set_option, 1, &__mf_opts.heur_std_data},
340 {"free-queue-length",
341 "queue N deferred free() calls before performing them",
342 read_integer_option, 0, &__mf_opts.free_queue_length},
344 "keep a history of N unregistered regions",
345 read_integer_option, 0, &__mf_opts.persistent_count},
347 "surround allocations with crumple zones of N bytes",
348 read_integer_option, 0, &__mf_opts.crumple_zone},
349 /* XXX: not type-safe.
351 "set lookup cache size mask to N (2**M - 1)",
352 read_integer_option, 0, (int *)(&__mf_lc_mask)},
354 "set lookup cache pointer shift",
355 read_integer_option, 0, (int *)(&__mf_lc_shift)},
358 "adapt mask/shift parameters after N cache misses",
359 read_integer_option, 1, &__mf_opts.adapt_cache},
361 "keep an N-level stack trace of each call context",
362 read_integer_option, 0, &__mf_opts.backtrace},
365 "override thread stacks allocation: N kB",
366 read_integer_option, 0, &__mf_opts.thread_stack},
368 {0, 0, set_option, 0, NULL}
377 "This is a %s%sGCC \"mudflap\" memory-checked binary.\n"
378 "Mudflap is Copyright (C) 2002-2003 Free Software Foundation, Inc.\n"
380 "The mudflap code can be controlled by an environment variable:\n"
382 "$ export MUDFLAP_OPTIONS='<options>'\n"
383 "$ <mudflapped_program>\n"
385 "where <options> is a space-separated list of \n"
386 "any of the following options. Use `-no-OPTION' to disable options.\n"
389 (threads_active_p ? "multi-threaded " : "single-threaded "),
399 /* XXX: The multi-threaded thread-unaware combination is bad. */
401 for (opt = options; opt->name; opt++)
403 int default_p = (opt->value == * opt->target);
409 fprintf (stderr, "-%-23.23s %s", opt->name, opt->description);
411 fprintf (stderr, " [active]\n");
413 fprintf (stderr, "\n");
415 case read_integer_option:
416 strncpy (buf, opt->name, 128);
417 strncpy (buf + strlen (opt->name), "=N", 2);
418 fprintf (stderr, "-%-23.23s %s", buf, opt->description);
419 fprintf (stderr, " [%d]\n", * opt->target);
425 fprintf (stderr, "\n");
430 __mf_set_options (const char *optstr)
434 BEGIN_RECURSION_PROTECT ();
435 rc = __mfu_set_options (optstr);
436 /* XXX: It's not really that easy. A change to a bunch of parameters
437 can require updating auxiliary state or risk crashing:
438 free_queue_length, crumple_zone ... */
439 END_RECURSION_PROTECT ();
446 __mfu_set_options (const char *optstr)
448 struct option *opts = 0;
452 const char *saved_optstr = optstr;
454 /* XXX: bounds-check for optstr! */
471 if (*optstr == '?' ||
472 strncmp (optstr, "help", 4) == 0)
474 /* Caller will print help and exit. */
478 if (strncmp (optstr, "no-", 3) == 0)
481 optstr = & optstr[3];
484 for (opts = options; opts->name; opts++)
486 if (strncmp (optstr, opts->name, strlen (opts->name)) == 0)
488 optstr += strlen (opts->name);
489 assert (opts->target);
496 *(opts->target) = opts->value;
498 case read_integer_option:
499 if (! negate && (*optstr == '=' && *(optstr+1)))
502 tmp = strtol (optstr, &nxt, 10);
503 if ((optstr != nxt) && (tmp != LONG_MAX))
506 *(opts->target) = (int)tmp;
520 "warning: unrecognized string '%s' in mudflap options\n",
522 optstr += strlen (optstr);
528 /* Special post-processing: bound __mf_lc_mask and free_queue_length for security. */
529 __mf_lc_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
530 __mf_opts.free_queue_length &= (__MF_FREEQ_MAX - 1);
532 /* Clear the lookup cache, in case the parameters got changed. */
534 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
536 __mf_lookup_cache[0].low = MAXPTR;
538 TRACE ("set options from `%s'\n", saved_optstr);
540 /* Call this unconditionally, in case -sigusr1-report was toggled. */
541 __mf_sigusr1_respond ();
550 __mf_resolve_single_dynamic (struct __mf_dynamic_entry *e)
555 if (e->pointer) return;
558 if (e->version != NULL && e->version[0] != '\0') /* non-null/empty */
559 e->pointer = dlvsym (RTLD_NEXT, e->name, e->version);
562 e->pointer = dlsym (RTLD_NEXT, e->name);
568 fprintf (stderr, "mf: error in dlsym(\"%s\"): %s\n",
574 fprintf (stderr, "mf: dlsym(\"%s\") = NULL\n", e->name);
581 __mf_resolve_dynamics ()
584 for (i = 0; i < dyn_INITRESOLVE; i++)
585 __mf_resolve_single_dynamic (& __mf_dynamic[i]);
589 /* NB: order must match enums in mf-impl.h */
590 struct __mf_dynamic_entry __mf_dynamic [] =
592 {NULL, "calloc", NULL},
593 {NULL, "free", NULL},
594 {NULL, "malloc", NULL},
595 {NULL, "mmap", NULL},
596 {NULL, "munmap", NULL},
597 {NULL, "realloc", NULL},
598 {NULL, "DUMMY", NULL}, /* dyn_INITRESOLVE */
600 {NULL, "pthread_create", PTHREAD_CREATE_VERSION},
601 {NULL, "pthread_join", NULL},
602 {NULL, "pthread_exit", NULL}
610 /* ------------------------------------------------------------------------ */
617 /* This initial bootstrap phase requires that __mf_starting_p = 1. */
619 __mf_resolve_dynamics ();
623 __mf_set_default_options ();
625 ov = getenv ("MUDFLAP_OPTIONS");
628 int rc = __mfu_set_options (ov);
636 /* Initialize to a non-zero description epoch. */
637 __mf_describe_object (NULL);
639 #define REG_RESERVED(obj) \
640 __mf_register (& obj, sizeof(obj), __MF_TYPE_NOACCESS, # obj)
642 REG_RESERVED (__mf_lookup_cache);
643 REG_RESERVED (__mf_lc_mask);
644 REG_RESERVED (__mf_lc_shift);
645 /* XXX: others of our statics? */
647 /* Prevent access to *NULL. */
648 __mf_register (MINPTR, 1, __MF_TYPE_NOACCESS, "NULL");
649 __mf_lookup_cache[0].low = (uintptr_t) -1;
655 __wrap_main (int argc, char* argv[])
657 extern char **environ;
659 static int been_here = 0;
661 if (__mf_opts.heur_std_data && ! been_here)
666 __mf_register (argv, sizeof(char *)*(argc+1), __MF_TYPE_STATIC, "argv[]");
667 for (i=0; i<argc; i++)
669 unsigned j = strlen (argv[i]);
670 __mf_register (argv[i], j+1, __MF_TYPE_STATIC, "argv element");
675 char *e = environ[i];
677 if (e == NULL) break;
678 j = strlen (environ[i]);
679 __mf_register (environ[i], j+1, __MF_TYPE_STATIC, "environ element");
681 __mf_register (environ, sizeof(char *)*(i+1), __MF_TYPE_STATIC, "environ[]");
683 __mf_register (& errno, sizeof (errno), __MF_TYPE_STATIC, "errno area");
685 __mf_register (stdin, sizeof (*stdin), __MF_TYPE_STATIC, "stdin");
686 __mf_register (stdout, sizeof (*stdout), __MF_TYPE_STATIC, "stdout");
687 __mf_register (stderr, sizeof (*stderr), __MF_TYPE_STATIC, "stderr");
691 return main (argc, argv, environ);
693 return __real_main (argc, argv, environ);
699 extern void __mf_fini () DTOR;
702 TRACE ("__mf_fini\n");
708 /* ------------------------------------------------------------------------ */
711 void __mf_check (void *ptr, size_t sz, int type, const char *location)
714 BEGIN_RECURSION_PROTECT ();
715 __mfu_check (ptr, sz, type, location);
716 END_RECURSION_PROTECT ();
721 void __mfu_check (void *ptr, size_t sz, int type, const char *location)
723 unsigned entry_idx = __MF_CACHE_INDEX (ptr);
724 struct __mf_cache *entry = & __mf_lookup_cache [entry_idx];
725 int judgement = 0; /* 0=undecided; <0=violation; >0=okay */
726 uintptr_t ptr_low = (uintptr_t) ptr;
727 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
728 struct __mf_cache old_entry = *entry;
730 if (UNLIKELY (__mf_opts.sigusr1_report))
731 __mf_sigusr1_respond ();
733 TRACE ("check ptr=%p b=%u size=%lu %s location=`%s'\n",
734 ptr, entry_idx, (unsigned long)sz,
735 (type == 0 ? "read" : "write"), location);
737 switch (__mf_opts.mudflap_mode)
741 entry->high = MAXPTR;
746 entry->low = ptr_low;
747 entry->high = ptr_high;
753 unsigned heuristics = 0;
755 /* Advance aging/adaptation counters. */
756 if (__mf_object_root)
758 static unsigned aging_count;
759 static unsigned adapt_count;
762 if (UNLIKELY (__mf_opts.tree_aging > 0 &&
763 aging_count > __mf_opts.tree_aging))
766 __mf_age_tree (__mf_object_root);
768 if (UNLIKELY (__mf_opts.adapt_cache > 0 &&
769 adapt_count > __mf_opts.adapt_cache))
776 /* Looping only occurs if heuristics were triggered. */
777 while (judgement == 0)
779 __mf_object_tree_t* ovr_obj[1];
782 obj_count = __mf_find_objects (ptr_low, ptr_high, ovr_obj, 1);
784 if (LIKELY (obj_count == 1)) /* A single hit! */
786 __mf_object_t *obj = & ovr_obj[0]->data;
787 assert (obj != NULL);
788 if (LIKELY (ptr_low >= obj->low && ptr_high <= obj->high))
790 /* XXX: hope for no overflow! */
791 if (type == __MF_CHECK_READ)
798 if (UNLIKELY (obj->type == __MF_TYPE_NOACCESS))
800 else if (UNLIKELY (obj->watching_p))
801 judgement = -2; /* trigger VIOL_WATCH */
802 else if (UNLIKELY (__mf_opts.check_initialization
804 && type == __MF_CHECK_READ
806 && obj->write_count == 0
807 /* uninitialized (heap) */
808 && obj->type == __MF_TYPE_HEAP))
813 entry->low = obj->low;
814 entry->high = obj->high;
818 /* The object did not cover the entire accessed region. */
820 else if (LIKELY (obj_count > 1))
822 __mf_object_tree_t **all_ovr_objs;
824 DECLARE (void *, malloc, size_t c);
825 DECLARE (void, free, void *p);
827 all_ovr_objs = CALL_REAL (malloc, (sizeof (__mf_object_tree_t *) *
829 if (all_ovr_objs == NULL) abort ();
830 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_objs, obj_count);
831 assert (n == obj_count);
833 /* Confirm that accessed range is covered by first/last object. */
834 if (LIKELY ((ptr_low >= all_ovr_objs[0]->data.low) &&
835 (ptr_high <= all_ovr_objs[obj_count-1]->data.high)))
837 /* Presume valid access. */
840 /* Confirm that intermediate objects are
841 contiguous and share a single name. Thus they
842 are likely split up GUESS regions, or mmap
843 pages. The idea of the name check is to
844 prevent an oversize access to a
845 stack-registered object (followed by some GUESS
846 type) from being accepted as a hit. */
847 for (n=0; n<obj_count-1; n++)
849 __mf_object_t *obj = & (all_ovr_objs[n]->data);
850 __mf_object_t *nextobj = & (all_ovr_objs[n+1]->data);
852 if (UNLIKELY (obj->type == __MF_TYPE_NOACCESS))
853 judgement = -1; /* Force error. */
855 if (UNLIKELY (judgement == 1 &&
856 (obj->high + 1 != nextobj->low)))
857 judgement = 0; /* Cancel presumption. */
859 if (UNLIKELY (judgement == 1 &&
860 (obj->name != nextobj->name)))
861 judgement = 0; /* Cancel presumption. */
862 /* NB: strcmp above is not necessary since the
863 same literal string pointer is normally
864 used when creating regions. */
866 /* XXX: hope for no overflow! */
867 if (type == __MF_CHECK_READ)
874 /* If the access is otherwise successful, check whether
875 any of the covered objects are being watched. */
879 for (i=0; i<obj_count; i++)
880 if (all_ovr_objs[i]->data.watching_p)
881 judgement = -2; /* trigger VIOL_WATCH */
884 /* Check for uninitialized reads. */
886 __mf_opts.check_initialization &&
887 type == __MF_CHECK_READ)
890 unsigned written_count = 0;
892 for (i=0; i<obj_count; i++)
894 __mf_object_t *obj = & all_ovr_objs[i]->data;
897 || obj->type == __MF_TYPE_HEAP_I
898 || obj->type == __MF_TYPE_GUESS)
902 /* Check for ALL pieces having been written-to.
903 XXX: should this be ANY instead? */
904 if (written_count != obj_count)
908 /* Fill out the cache with the bounds of the first
909 object and the last object that covers this
910 cache line (== includes the same __MF_CACHE_INDEX).
911 This could let this cache line span *two* distinct
912 registered objects: a peculiar but reasonable
913 situation. The cache line may not include the
914 entire object though. */
918 entry->low = all_ovr_objs[0]->data.low;
919 for (i=0; i<obj_count; i++)
921 uintptr_t high = all_ovr_objs[i]->data.high;
922 if (__MF_CACHE_INDEX (high) == entry_idx)
928 CALL_REAL (free, all_ovr_objs);
933 if (heuristics++ < 2) /* XXX parametrize this number? */
934 judgement = __mf_heuristic_check (ptr_low, ptr_high);
948 if (__mf_opts.collect_stats)
952 if (LIKELY (old_entry.low != entry->low || old_entry.high != entry->high))
953 /* && (old_entry.low != 0) && (old_entry.high != 0)) */
954 __mf_lookup_cache_reusecount [entry_idx] ++;
957 if (UNLIKELY (judgement < 0))
958 __mf_violation (ptr, sz,
959 (uintptr_t) __builtin_return_address (0), location,
961 (type == __MF_CHECK_READ ? __MF_VIOL_READ : __MF_VIOL_WRITE) :
966 static __mf_object_tree_t *
967 __mf_insert_new_object (uintptr_t low, uintptr_t high, int type,
968 const char *name, uintptr_t pc)
970 DECLARE (void *, calloc, size_t c, size_t n);
972 __mf_object_tree_t *new_obj;
973 new_obj = CALL_REAL (calloc, 1, sizeof(__mf_object_tree_t));
974 new_obj->data.low = low;
975 new_obj->data.high = high;
976 new_obj->data.type = type;
977 new_obj->data.name = name;
978 new_obj->data.alloc_pc = pc;
979 #if HAVE_GETTIMEOFDAY
980 gettimeofday (& new_obj->data.alloc_time, NULL);
983 new_obj->data.alloc_thread = pthread_self ();
986 if (__mf_opts.backtrace > 0 && (type == __MF_TYPE_HEAP || type == __MF_TYPE_HEAP_I))
987 new_obj->data.alloc_backtrace_size =
988 __mf_backtrace (& new_obj->data.alloc_backtrace,
991 __mf_link_object (new_obj);
997 __mf_uncache_object (__mf_object_t *old_obj)
999 /* Remove any low/high pointers for this object from the lookup cache. */
1001 /* Can it possibly exist in the cache? */
1002 if (LIKELY (old_obj->read_count + old_obj->write_count))
1004 uintptr_t low = old_obj->low;
1005 uintptr_t high = old_obj->high;
1006 unsigned idx_low = __MF_CACHE_INDEX (low);
1007 unsigned idx_high = __MF_CACHE_INDEX (high);
1009 for (i = idx_low; i <= idx_high; i++)
1011 struct __mf_cache *entry = & __mf_lookup_cache [i];
1012 /* NB: the "||" in the following test permits this code to
1013 tolerate the situation introduced by __mf_check over
1014 contiguous objects, where a cache entry spans several
1016 if (entry->low == low || entry->high == high)
1018 entry->low = MAXPTR;
1019 entry->high = MINPTR;
1027 __mf_register (void *ptr, size_t sz, int type, const char *name)
1030 BEGIN_RECURSION_PROTECT ();
1031 __mfu_register (ptr, sz, type, name);
1032 END_RECURSION_PROTECT ();
1038 __mfu_register (void *ptr, size_t sz, int type, const char *name)
1040 TRACE ("register ptr=%p size=%lu type=%x name='%s'\n",
1041 ptr, (unsigned long) sz, type, name ? name : "");
1043 if (__mf_opts.collect_stats)
1045 __mf_count_register ++;
1046 __mf_total_register_size [(type < 0) ? 0 :
1047 (type > __MF_TYPE_MAX) ? 0 :
1051 if (UNLIKELY (__mf_opts.sigusr1_report))
1052 __mf_sigusr1_respond ();
1054 switch (__mf_opts.mudflap_mode)
1060 __mf_violation (ptr, sz, (uintptr_t) __builtin_return_address (0), NULL,
1061 __MF_VIOL_REGISTER);
1065 /* Clear the cache. */
1066 /* XXX: why the entire cache? */
1068 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1070 __mf_lookup_cache[0].low = MAXPTR;
1075 __mf_object_tree_t *ovr_objs [1];
1076 unsigned num_overlapping_objs;
1077 uintptr_t low = (uintptr_t) ptr;
1078 uintptr_t high = CLAMPSZ (ptr, sz);
1079 uintptr_t pc = (uintptr_t) __builtin_return_address (0);
1081 /* Treat unknown size indication as 1. */
1082 if (UNLIKELY (sz == 0)) sz = 1;
1084 num_overlapping_objs = __mf_find_objects (low, high, ovr_objs, 1);
1086 /* Handle overlaps. */
1087 if (UNLIKELY (num_overlapping_objs > 0))
1089 __mf_object_tree_t *ovr_obj = ovr_objs[0];
1091 /* Quietly accept a single duplicate registration for
1092 static objects, since these may come from distinct
1093 compilation units. */
1094 if (type == __MF_TYPE_STATIC
1095 && ovr_obj->data.type == __MF_TYPE_STATIC
1096 && ovr_obj->data.low == low
1097 && ovr_obj->data.high == high)
1100 VERBOSE_TRACE ("duplicate static reg %p-%p `%s'\n",
1101 (void *) low, (void *) high,
1102 (ovr_obj->data.name ? ovr_obj->data.name : ""));
1106 /* Quietly accept a single duplicate registration for
1107 guess objects too. */
1108 if (type == __MF_TYPE_GUESS &&
1109 ovr_obj->data.type == __MF_TYPE_GUESS &&
1110 ovr_obj->data.low == low &&
1111 ovr_obj->data.high == high)
1114 VERBOSE_TRACE ("duplicate guess reg %p-%p\n",
1115 (void *) low, (void *) high);
1119 /* Quietly accept new a guess registration that overlaps
1120 at least one existing object. Trim it down to size. */
1121 else if (type == __MF_TYPE_GUESS)
1123 /* We need to split this new GUESS region into some
1124 smaller ones. Or we may not need to insert it at
1125 all if it is covered by the overlapping region. */
1127 /* First, identify all the overlapping objects. */
1128 __mf_object_tree_t **all_ovr_objs;
1129 unsigned num_ovr_objs, n;
1131 DECLARE (void *, malloc, size_t c);
1132 DECLARE (void, free, void *p);
1134 all_ovr_objs = CALL_REAL (malloc, (sizeof (__mf_object_tree_t *) *
1135 num_overlapping_objs));
1136 if (all_ovr_objs == NULL) abort ();
1137 num_ovr_objs = __mf_find_objects (low, high, all_ovr_objs,
1138 num_overlapping_objs);
1139 assert (num_ovr_objs == num_overlapping_objs);
1141 VERBOSE_TRACE ("splitting guess %p-%p, # overlaps: %u\n",
1142 (void *) low, (void *) high, num_ovr_objs);
1144 /* Add GUESS regions between the holes: before each
1145 overlapping region. */
1148 /* This makes use of the assumption that __mf_find_objects() returns
1149 overlapping objects in an increasing sequence. */
1150 for (n=0; n < min (num_ovr_objs, num_overlapping_objs); n++)
1152 if (all_ovr_objs[n]->data.low > next_low) /* Gap? */
1154 uintptr_t next_high = CLAMPSUB (all_ovr_objs[n]->data.low, 1);
1155 __mfu_register ((void *) next_low, next_high-next_low+1,
1156 __MF_TYPE_GUESS, name);
1158 next_low = CLAMPADD (all_ovr_objs[n]->data.high, 1);
1160 /* Add in any leftover room at the top. */
1161 if (next_low <= high)
1162 __mfu_register ((void *) next_low, high-next_low+1,
1163 __MF_TYPE_GUESS, name);
1165 /* XXX: future optimization: allow consecutive GUESS regions to
1166 be glued together. */
1167 CALL_REAL (free, all_ovr_objs);
1171 /* Quietly accept a non-GUESS region overlaying a GUESS
1172 region. Handle it by removing the GUESS region
1173 temporarily, then recursively adding this new object,
1174 and then the GUESS back. The latter will be split up
1175 by the recursive process above. */
1176 else if (ovr_obj->data.type == __MF_TYPE_GUESS)
1178 uintptr_t old_low = ovr_obj->data.low;
1179 uintptr_t old_high = ovr_obj->data.high;
1180 const char* old_name = ovr_obj->data.name;
1182 /* Now to recursively remove the guess piece, and
1183 reinsert them in the opposite order. Recursion
1184 should bottom out if another non-GUESS overlapping
1185 region is found for this new object (resulting in a
1186 violation), or if no further overlap occurs. The
1187 located GUESS region should end up being split up
1189 __mfu_unregister ((void *) old_low, old_high-old_low+1);
1190 __mfu_register ((void *) low, sz, type, name);
1191 __mfu_register ((void *) old_low, old_high-old_low+1,
1192 __MF_TYPE_GUESS, old_name);
1196 /* Alas, a genuine violation. */
1199 /* Two or more *real* mappings here. */
1200 __mf_violation ((void *) ptr, sz,
1201 (uintptr_t) __builtin_return_address (0), NULL,
1202 __MF_VIOL_REGISTER);
1206 /* No overlapping objects: AOK. */
1209 __mf_insert_new_object (low, high, type, name, pc);
1212 /* We could conceivably call __mf_check() here to prime the cache,
1213 but then the read_count/write_count field is not reliable. */
1217 } /* end switch (__mf_opts.mudflap_mode) */
1222 __mf_unregister (void *ptr, size_t sz)
1225 BEGIN_RECURSION_PROTECT ();
1226 __mfu_unregister (ptr, sz);
1227 END_RECURSION_PROTECT ();
1233 __mfu_unregister (void *ptr, size_t sz)
1235 DECLARE (void, free, void *ptr);
1237 if (UNLIKELY (__mf_opts.sigusr1_report))
1238 __mf_sigusr1_respond ();
1240 TRACE ("unregister ptr=%p size=%lu\n", ptr, (unsigned long) sz);
1242 switch (__mf_opts.mudflap_mode)
1248 __mf_violation (ptr, sz,
1249 (uintptr_t) __builtin_return_address (0), NULL,
1250 __MF_VIOL_UNREGISTER);
1254 /* Clear the cache. */
1256 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1258 __mf_lookup_cache[0].low = MAXPTR;
1263 __mf_object_tree_t *old_obj = NULL;
1264 __mf_object_tree_t *del_obj = NULL; /* Object to actually delete. */
1265 __mf_object_tree_t *objs[1] = {NULL};
1266 unsigned num_overlapping_objs;
1268 /* Treat unknown size indication as 1. */
1269 if (sz == 0) sz = 1;
1271 num_overlapping_objs = __mf_find_objects ((uintptr_t) ptr,
1272 CLAMPSZ (ptr, sz), objs, 1);
1274 /* XXX: handle unregistration of big old GUESS region, that has since
1278 if (UNLIKELY (num_overlapping_objs != 1 ||
1279 (uintptr_t)ptr != old_obj->data.low)) /* XXX: what about sz? */
1281 __mf_violation (ptr, sz,
1282 (uintptr_t) __builtin_return_address (0), NULL,
1283 __MF_VIOL_UNREGISTER);
1287 __mf_unlink_object (old_obj);
1288 __mf_uncache_object (& old_obj->data);
1290 /* Wipe buffer contents if desired. */
1291 if ((__mf_opts.wipe_stack && old_obj->data.type == __MF_TYPE_STACK)
1292 || (__mf_opts.wipe_heap && (old_obj->data.type == __MF_TYPE_HEAP
1293 || old_obj->data.type == __MF_TYPE_HEAP_I)))
1295 memset ((void *) old_obj->data.low,
1297 (size_t) (old_obj->data.high - old_obj->data.low + 1));
1300 /* Manage the object cemetary. */
1301 if (__mf_opts.persistent_count > 0 &&
1302 old_obj->data.type >= 0 &&
1303 old_obj->data.type <= __MF_TYPE_MAX_CEM)
1305 old_obj->data.deallocated_p = 1;
1306 old_obj->left = old_obj->right = NULL;
1307 old_obj->data.dealloc_pc = (uintptr_t) __builtin_return_address (0);
1308 #if HAVE_GETTIMEOFDAY
1309 gettimeofday (& old_obj->data.dealloc_time, NULL);
1312 old_obj->data.dealloc_thread = pthread_self ();
1315 if (__mf_opts.backtrace > 0 && old_obj->data.type == __MF_TYPE_HEAP)
1316 old_obj->data.dealloc_backtrace_size =
1317 __mf_backtrace (& old_obj->data.dealloc_backtrace,
1320 /* Encourage this object to be displayed again in current epoch. */
1321 old_obj->data.description_epoch --;
1323 /* Put this object into the cemetary. This may require this plot to
1324 be recycled, and the previous resident to be designated del_obj. */
1326 unsigned row = old_obj->data.type;
1327 unsigned plot = __mf_object_dead_head [row];
1329 del_obj = __mf_object_cemetary [row][plot];
1330 __mf_object_cemetary [row][plot] = old_obj;
1332 if (plot == __mf_opts.persistent_count) plot = 0;
1333 __mf_object_dead_head [row] = plot;
1339 if (__mf_opts.print_leaks)
1341 if ((old_obj->data.read_count + old_obj->data.write_count) == 0 &&
1342 (old_obj->data.type == __MF_TYPE_HEAP
1343 || old_obj->data.type == __MF_TYPE_HEAP_I))
1347 "mudflap warning: unaccessed registered object:\n");
1348 __mf_describe_object (& old_obj->data);
1352 if (del_obj != NULL) /* May or may not equal old_obj. */
1354 if (__mf_opts.backtrace > 0)
1356 CALL_REAL(free, del_obj->data.alloc_backtrace);
1357 if (__mf_opts.persistent_count > 0)
1359 CALL_REAL(free, del_obj->data.dealloc_backtrace);
1362 CALL_REAL(free, del_obj);
1367 } /* end switch (__mf_opts.mudflap_mode) */
1370 if (__mf_opts.collect_stats)
1372 __mf_count_unregister ++;
1373 __mf_total_unregister_size += sz;
1377 /* ------------------------------------------------------------------------ */
1378 /* __mf_validate_live_object_tree, _object_cemetary */
1381 __mf_validate_live_object_tree (__mf_object_tree_t *obj)
1383 assert (obj != NULL);
1385 if (__mf_opts.persistent_count > 0)
1386 assert (! obj->data.deallocated_p);
1390 assert (obj->left->data.high < obj->data.low);
1391 __mf_validate_live_object_tree (obj->left);
1395 assert (obj->right->data.low > obj->data.high);
1396 __mf_validate_live_object_tree (obj->right);
1401 __mf_validate_object_cemetary ()
1406 for (cls = 0; cls <= __MF_TYPE_MAX_CEM; cls++)
1408 assert (__mf_object_dead_head [cls] >= 0 &&
1409 __mf_object_dead_head [cls] < __mf_opts.persistent_count);
1410 for (i = 0; i < __mf_opts.persistent_count; i++)
1412 __mf_object_tree_t *obj = __mf_object_cemetary [cls][i];
1415 assert (obj->data.deallocated_p);
1416 assert (obj->left == NULL);
1417 assert (obj->right == NULL);
1424 __mf_validate_objects ()
1426 if (__mf_object_root)
1427 __mf_validate_live_object_tree (__mf_object_root);
1429 if (__mf_opts.persistent_count > 0)
1430 __mf_validate_object_cemetary ();
1435 __mf_age_tree (__mf_object_tree_t *obj)
1437 assert (obj != NULL);
1438 obj->data.liveness = obj->data.liveness >> 1;
1441 __mf_age_tree (obj->left);
1443 __mf_age_tree (obj->right);
1451 unsigned long total_size;
1452 unsigned live_obj_count;
1453 double total_weight;
1454 double weighted_size;
1455 unsigned long weighted_address_bits [sizeof (uintptr_t) * 8][2];
1460 __mf_tree_analyze (__mf_object_tree_t *obj, struct tree_stats* s)
1462 assert (obj != NULL);
1465 __mf_tree_analyze (obj->left, s);
1467 /* Exclude never-accessed objects. */
1468 if (obj->data.read_count + obj->data.write_count)
1471 s->total_size += (obj->data.high - obj->data.low + 1);
1473 if (obj->data.liveness)
1478 VERBOSE_TRACE ("analyze low=%p live=%u name=`%s'\n",
1479 (void *) obj->data.low, obj->data.liveness, obj->data.name);
1481 s->live_obj_count ++;
1482 s->total_weight += (double) obj->data.liveness;
1484 (double) (obj->data.high - obj->data.low + 1) *
1485 (double) obj->data.liveness;
1487 addr = obj->data.low;
1488 for (i=0; i<sizeof(uintptr_t) * 8; i++)
1490 unsigned bit = addr & 1;
1491 s->weighted_address_bits[i][bit] += obj->data.liveness;
1498 __mf_tree_analyze (obj->right, s);
1505 struct tree_stats s;
1506 uintptr_t new_mask = 0;
1507 unsigned char new_shift;
1508 float cache_utilization;
1510 static float smoothed_new_shift = -1.0;
1513 memset (&s, 0, sizeof (s));
1514 if (__mf_object_root)
1515 __mf_tree_analyze (__mf_object_root, & s);
1517 /* Maybe we're dealing with funny aging/adaptation parameters, or an
1518 empty tree. Just leave the cache alone in such cases, rather
1519 than risk dying by division-by-zero. */
1520 if (! (s.obj_count > 0) && (s.live_obj_count > 0) && (s.total_weight > 0.0))
1523 /* Guess a good value for the shift parameter by finding an address bit that is a
1524 good discriminant of lively objects. */
1526 for (i=0; i<sizeof (uintptr_t)*8; i++)
1528 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1529 if (max_value < value) max_value = value;
1531 for (i=0; i<sizeof (uintptr_t)*8; i++)
1533 float shoulder_factor = 0.7; /* Include slightly less popular bits too. */
1534 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1535 if (value >= max_value * shoulder_factor)
1538 if (smoothed_new_shift < 0) smoothed_new_shift = __mf_lc_shift;
1539 /* Converge toward this slowly to reduce flapping. */
1540 smoothed_new_shift = 0.9*smoothed_new_shift + 0.1*i;
1541 new_shift = (unsigned) (smoothed_new_shift + 0.5);
1542 assert (new_shift < sizeof (uintptr_t)*8);
1544 /* Count number of used buckets. */
1545 cache_utilization = 0.0;
1546 for (i = 0; i < (1 + __mf_lc_mask); i++)
1547 if (__mf_lookup_cache[i].low != 0 || __mf_lookup_cache[i].high != 0)
1548 cache_utilization += 1.0;
1549 cache_utilization /= (1 + __mf_lc_mask);
1551 new_mask |= 0x3ff; /* XXX: force a large cache. */
1552 new_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
1554 VERBOSE_TRACE ("adapt cache obj=%u/%u sizes=%lu/%.0f/%.0f => "
1555 "util=%u%% m=%p s=%u\n",
1556 s.obj_count, s.live_obj_count, s.total_size, s.total_weight, s.weighted_size,
1557 (unsigned)(cache_utilization*100.0), (void *) new_mask, new_shift);
1559 /* We should reinitialize cache if its parameters have changed. */
1560 if (new_mask != __mf_lc_mask ||
1561 new_shift != __mf_lc_shift)
1563 __mf_lc_mask = new_mask;
1564 __mf_lc_shift = new_shift;
1566 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1568 __mf_lookup_cache[0].low = MAXPTR;
1575 /* __mf_find_object[s] */
1577 /* Find overlapping live objecs between [low,high]. Return up to
1578 max_objs of their pointers in objs[]. Return total count of
1579 overlaps (may exceed max_objs). */
1581 /* XXX: track traversal statistics, like average depth, balance. */
1584 __mf_find_objects_rec (uintptr_t low, uintptr_t high, __mf_object_tree_t **nodep,
1585 __mf_object_tree_t **objs, unsigned max_objs)
1588 __mf_object_tree_t *node = *nodep;
1590 assert (low <= high);
1591 assert (max_objs == 0 || objs != NULL);
1593 if (UNLIKELY (node == NULL)) return 0;
1595 /* Traverse down left subtree. */
1597 if (low < node->data.low)
1598 count += __mf_find_objects_rec (low, min(high, node->data.low),
1599 & node->left, objs, max_objs);
1601 /* Track the used slots of objs[]. */
1602 if (count < max_objs)
1612 /* Check for overlap with this node. */
1613 if (high >= node->data.low && low <= node->data.high)
1616 if (max_objs > 0) /* Any room left? */
1624 /* Traverse down right subtree. */
1625 if (high > node->data.high)
1626 count += __mf_find_objects_rec (max (low, node->data.high), high,
1627 & node->right, objs, max_objs);
1628 /* There is no need to manipulate objs/max_objs any further. */
1631 /* Rotate a child node up if its access count is higher. */
1632 if (UNLIKELY ((node->left && node->left->data.liveness > node->data.liveness) &&
1633 ((!node->right || (node->right &&
1634 node->left->data.liveness >
1635 node->right->data.liveness)))))
1637 __mf_object_tree_t *l = node->left;
1638 __mf_object_tree_t *l_r = l->right;
1643 __mf_treerot_left ++;
1645 else if (UNLIKELY ((node->right && node->right->data.liveness > node->data.liveness) &&
1646 ((!node->left || (node->left &&
1647 node->right->data.liveness >
1648 node->left->data.liveness)))))
1650 __mf_object_tree_t *r = node->right;
1651 __mf_object_tree_t *r_l = r->left;
1656 __mf_treerot_right ++;
1664 __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
1665 __mf_object_tree_t **objs, unsigned max_objs)
1667 if (UNLIKELY(__mf_opts.internal_checking))
1668 __mf_validate_objects ();
1670 return __mf_find_objects_rec (ptr_low, ptr_high, & __mf_object_root, objs, max_objs);
1673 /* __mf_link_object */
1676 __mf_link_object2 (__mf_object_tree_t *ptr, __mf_object_tree_t **link)
1678 __mf_object_tree_t *node = *link;
1680 assert (ptr != NULL);
1681 if (UNLIKELY(node == NULL))
1687 if (ptr->data.high < node->data.low)
1688 return __mf_link_object2 (ptr, & node->left);
1689 else if (ptr->data.low > node->data.high)
1690 return __mf_link_object2 (ptr, & node->right);
1692 abort (); /* XXX: duplicate object */
1697 __mf_link_object (__mf_object_tree_t *ptr)
1699 if (UNLIKELY(__mf_opts.internal_checking))
1700 __mf_validate_objects ();
1702 return __mf_link_object2 (ptr, & __mf_object_root);
1705 /* __mf_unlink_object */
1708 __mf_unlink_object2 (__mf_object_tree_t *ptr, __mf_object_tree_t **link)
1710 __mf_object_tree_t *node = *link;
1712 assert (ptr != NULL);
1713 if (UNLIKELY(node == ptr))
1715 static unsigned promote_left_p = 0;
1716 promote_left_p = 1 - promote_left_p;
1718 /* Alternate promoting the left & right subtrees. */
1722 if (ptr->right != NULL)
1723 __mf_link_object2 (ptr->right, link);
1728 if (ptr->left != NULL)
1729 __mf_link_object2 (ptr->left, link);
1735 if (ptr->data.high < node->data.low)
1736 return __mf_unlink_object2 (ptr, & node->left);
1737 else if (ptr->data.low > node->data.high)
1738 return __mf_unlink_object2 (ptr, & node->right);
1740 abort (); /* XXX: missing object; should fail more gracefully. */
1744 __mf_unlink_object (__mf_object_tree_t *node)
1746 __mf_unlink_object2 (node, & __mf_object_root);
1749 /* __mf_find_dead_objects */
1751 /* Find overlapping dead objecs between [low,high]. Return up to
1752 max_objs of their pointers in objs[]. Return total count of
1753 overlaps (may exceed max_objs). */
1756 __mf_find_dead_objects (uintptr_t low, uintptr_t high,
1757 __mf_object_tree_t **objs, unsigned max_objs)
1759 if (__mf_opts.persistent_count > 0)
1762 unsigned recollection = 0;
1765 assert (low <= high);
1766 assert (max_objs == 0 || objs != NULL);
1768 /* Widen the search from the most recent plots in each row, looking
1769 backward in time. */
1771 while (recollection < __mf_opts.persistent_count)
1775 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1780 plot = __mf_object_dead_head [row];
1781 for (i = 0; i <= recollection; i ++)
1783 __mf_object_tree_t *obj;
1785 /* Look backward through row: it's a circular buffer. */
1786 if (plot > 0) plot --;
1787 else plot = __mf_opts.persistent_count - 1;
1789 obj = __mf_object_cemetary [row][plot];
1790 if (obj && obj->data.low <= high && obj->data.high >= low)
1792 /* Found an overlapping dead object! */
1793 if (count < max_objs)
1803 /* Look farther back in time. */
1804 recollection = (recollection * 2) + 1;
1813 /* __mf_describe_object */
1816 __mf_describe_object (__mf_object_t *obj)
1818 static unsigned epoch = 0;
1825 if (__mf_opts.abbreviate && obj->description_epoch == epoch)
1828 "mudflap object %p: name=`%s'\n",
1829 (void *) obj, (obj->name ? obj->name : ""));
1833 obj->description_epoch = epoch;
1836 "mudflap object %p: name=`%s'\n"
1837 "bounds=[%p,%p] size=%lu area=%s check=%ur/%uw liveness=%u%s\n"
1838 "alloc time=%lu.%06lu pc=%p"
1843 (void *) obj, (obj->name ? obj->name : ""),
1844 (void *) obj->low, (void *) obj->high,
1845 (unsigned long) (obj->high - obj->low + 1),
1846 (obj->type == __MF_TYPE_NOACCESS ? "no-access" :
1847 obj->type == __MF_TYPE_HEAP ? "heap" :
1848 obj->type == __MF_TYPE_HEAP_I ? "heap-init" :
1849 obj->type == __MF_TYPE_STACK ? "stack" :
1850 obj->type == __MF_TYPE_STATIC ? "static" :
1851 obj->type == __MF_TYPE_GUESS ? "guess" :
1853 obj->read_count, obj->write_count, obj->liveness,
1854 obj->watching_p ? " watching" : "",
1855 obj->alloc_time.tv_sec, obj->alloc_time.tv_usec,
1856 (void *) obj->alloc_pc
1858 , (unsigned) obj->alloc_thread
1862 if (__mf_opts.backtrace > 0)
1865 for (i=0; i<obj->alloc_backtrace_size; i++)
1866 fprintf (stderr, " %s\n", obj->alloc_backtrace[i]);
1869 if (__mf_opts.persistent_count > 0)
1871 if (obj->deallocated_p)
1873 fprintf (stderr, "dealloc time=%lu.%06lu pc=%p"
1878 obj->dealloc_time.tv_sec, obj->dealloc_time.tv_usec,
1879 (void *) obj->dealloc_pc
1881 , (unsigned) obj->dealloc_thread
1886 if (__mf_opts.backtrace > 0)
1889 for (i=0; i<obj->dealloc_backtrace_size; i++)
1890 fprintf (stderr, " %s\n", obj->dealloc_backtrace[i]);
1897 __mf_report_leaks (__mf_object_tree_t *node)
1899 /* The counter is amongst recursive calls, so
1900 that cumulative numbers are printed below. */
1901 static unsigned count = 0;
1903 if (node == NULL) /* Reset */
1909 /* Inorder traversal. */
1911 __mf_report_leaks (node->left);
1912 if (node->data.type == __MF_TYPE_HEAP
1913 || node->data.type == __MF_TYPE_HEAP_I)
1916 fprintf (stderr, "Leaked object %u:\n", count);
1917 __mf_describe_object (& node->data);
1920 __mf_report_leaks (node->right);
1925 /* ------------------------------------------------------------------------ */
1932 BEGIN_RECURSION_PROTECT ();
1934 END_RECURSION_PROTECT ();
1941 if (__mf_opts.collect_stats)
1946 "calls to __mf_check: %lu rot: %lu/%lu\n"
1947 " __mf_register: %lu [%luB, %luB, %luB, %luB, %luB]\n"
1948 " __mf_unregister: %lu [%luB]\n"
1949 " __mf_violation: [%lu, %lu, %lu, %lu, %lu]\n",
1950 __mf_count_check, __mf_treerot_left, __mf_treerot_right,
1951 __mf_count_register,
1952 __mf_total_register_size[0], __mf_total_register_size[1],
1953 __mf_total_register_size[2], __mf_total_register_size[3],
1954 __mf_total_register_size[4], /* XXX */
1955 __mf_count_unregister, __mf_total_unregister_size,
1956 __mf_count_violation[0], __mf_count_violation[1],
1957 __mf_count_violation[2], __mf_count_violation[3],
1958 __mf_count_violation[4]);
1961 "calls with reentrancy: %lu\n", __mf_reentrancy);
1964 " lock contention: %lu\n", __mf_lock_contention);
1967 /* Lookup cache stats. */
1970 unsigned max_reuse = 0;
1971 unsigned num_used = 0;
1972 unsigned num_unused = 0;
1974 for (i = 0; i < LOOKUP_CACHE_SIZE; i++)
1976 if (__mf_lookup_cache_reusecount[i])
1980 if (max_reuse < __mf_lookup_cache_reusecount[i])
1981 max_reuse = __mf_lookup_cache_reusecount[i];
1983 fprintf (stderr, "lookup cache slots used: %u unused: %u peak-reuse: %u\n",
1984 num_used, num_unused, max_reuse);
1988 unsigned live_count;
1989 live_count = __mf_find_objects (MINPTR, MAXPTR, NULL, 0);
1990 fprintf (stderr, "number of live objects: %u\n", live_count);
1993 if (__mf_opts.persistent_count > 0)
1995 unsigned dead_count = 0;
1997 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1998 for (plot = 0 ; plot < __mf_opts.persistent_count; plot ++)
1999 if (__mf_object_cemetary [row][plot] != 0)
2001 fprintf (stderr, " zombie objects: %u\n", dead_count);
2004 if (__mf_opts.print_leaks && (__mf_opts.mudflap_mode == mode_check))
2007 extern void * __mf_wrap_alloca_indirect (size_t c);
2009 /* Free up any remaining alloca()'d blocks. */
2010 __mf_wrap_alloca_indirect (0);
2011 __mf_describe_object (NULL); /* Reset description epoch. */
2012 __mf_report_leaks (NULL); /* Reset cumulative count. */
2013 l = __mf_report_leaks (__mf_object_root);
2014 fprintf (stderr, "number of leaked objects: %u\n", l);
2018 /* __mf_backtrace */
2021 __mf_backtrace (char ***symbols, void *guess_pc, unsigned guess_omit_levels)
2024 unsigned pc_array_size = __mf_opts.backtrace + guess_omit_levels;
2025 unsigned remaining_size;
2026 unsigned omitted_size = 0;
2028 DECLARE (void, free, void *ptr);
2029 DECLARE (void *, calloc, size_t c, size_t n);
2030 DECLARE (void *, malloc, size_t n);
2032 pc_array = CALL_REAL (calloc, pc_array_size, sizeof (void *) );
2033 #ifdef HAVE_BACKTRACE
2034 pc_array_size = backtrace (pc_array, pc_array_size);
2036 #define FETCH(n) do { if (pc_array_size >= n) { \
2037 pc_array[n] = __builtin_return_address(n); \
2038 if (pc_array[n] == 0) pc_array_size = n; } } while (0)
2040 /* Unroll some calls __builtin_return_address because this function
2041 only takes a literal integer parameter. */
2044 /* XXX: __builtin_return_address sometimes crashes (!) on >0 arguments,
2045 rather than simply returning 0. :-( */
2054 if (pc_array_size > 8) pc_array_size = 9;
2056 if (pc_array_size > 0) pc_array_size = 1;
2062 /* We want to trim the first few levels of the stack traceback,
2063 since they contain libmudflap wrappers and junk. If pc_array[]
2064 ends up containing a non-NULL guess_pc, then trim everything
2065 before that. Otherwise, omit the first guess_omit_levels
2068 if (guess_pc != NULL)
2069 for (i=0; i<pc_array_size; i++)
2070 if (pc_array [i] == guess_pc)
2073 if (omitted_size == 0) /* No match? */
2074 if (pc_array_size > guess_omit_levels)
2075 omitted_size = guess_omit_levels;
2077 remaining_size = pc_array_size - omitted_size;
2079 #ifdef HAVE_BACKTRACE_SYMBOLS
2080 *symbols = backtrace_symbols (pc_array + omitted_size, remaining_size);
2083 /* Let's construct a buffer by hand. It will have <remaining_size>
2084 char*'s at the front, pointing at individual strings immediately
2089 enum { perline = 30 };
2090 buffer = CALL_REAL (malloc, remaining_size * (perline + sizeof(char *)));
2091 pointers = (char **) buffer;
2092 chars = (char *)buffer + (remaining_size * sizeof (char *));
2093 for (i = 0; i < remaining_size; i++)
2095 pointers[i] = chars;
2096 sprintf (chars, "[0x%p]", pc_array [omitted_size + i]);
2097 chars = chars + perline;
2099 *symbols = pointers;
2102 CALL_REAL (free, pc_array);
2104 return remaining_size;
2107 /* ------------------------------------------------------------------------ */
2108 /* __mf_violation */
2111 __mf_violation (void *ptr, size_t sz, uintptr_t pc,
2112 const char *location, int type)
2115 static unsigned violation_number;
2116 DECLARE(void, free, void *ptr);
2118 TRACE ("violation pc=%p location=%s type=%d ptr=%p size=%lu\n",
2120 (location != NULL ? location : ""), type, ptr, (unsigned long) sz);
2122 if (__mf_opts.collect_stats)
2123 __mf_count_violation [(type < 0) ? 0 :
2124 (type > __MF_VIOL_WATCH) ? 0 :
2127 /* Print out a basic warning message. */
2128 if (__mf_opts.verbose_violations)
2131 unsigned num_helpful = 0;
2133 #if HAVE_GETTIMEOFDAY
2134 gettimeofday (& now, NULL);
2137 violation_number ++;
2140 "mudflap violation %u (%s): time=%lu.%06lu "
2141 "ptr=%p size=%lu\npc=%p%s%s%s\n",
2143 ((type == __MF_VIOL_READ) ? "check/read" :
2144 (type == __MF_VIOL_WRITE) ? "check/write" :
2145 (type == __MF_VIOL_REGISTER) ? "register" :
2146 (type == __MF_VIOL_UNREGISTER) ? "unregister" :
2147 (type == __MF_VIOL_WATCH) ? "watch" : "unknown"),
2148 now.tv_sec, now.tv_usec,
2149 (void *) ptr, (unsigned long)sz, (void *) pc,
2150 (location != NULL ? " location=`" : ""),
2151 (location != NULL ? location : ""),
2152 (location != NULL ? "'" : ""));
2154 if (__mf_opts.backtrace > 0)
2159 num = __mf_backtrace (& symbols, (void *) pc, 2);
2160 /* Note: backtrace_symbols calls malloc(). But since we're in
2161 __mf_violation and presumably __mf_check, it'll detect
2162 recursion, and not put the new string into the database. */
2164 for (i=0; i<num; i++)
2165 fprintf (stderr, " %s\n", symbols[i]);
2167 /* Calling free() here would trigger a violation. */
2168 CALL_REAL(free, symbols);
2172 /* Look for nearby objects. For this, we start with s_low/s_high
2173 pointing to the given area, looking for overlapping objects.
2174 If none show up, widen the search area and keep looking. */
2176 if (sz == 0) sz = 1;
2178 for (dead_p = 0; dead_p <= 1; dead_p ++) /* for dead_p in 0 1 */
2180 enum {max_objs = 3}; /* magic */
2181 __mf_object_tree_t *objs[max_objs];
2182 unsigned num_objs = 0;
2183 uintptr_t s_low, s_high;
2187 s_low = (uintptr_t) ptr;
2188 s_high = CLAMPSZ (ptr, sz);
2190 while (tries < 16) /* magic */
2193 num_objs = __mf_find_dead_objects (s_low, s_high, objs, max_objs);
2195 num_objs = __mf_find_objects (s_low, s_high, objs, max_objs);
2197 if (num_objs) /* good enough */
2202 /* XXX: tune this search strategy. It's too dependent on
2203 sz, which can vary from 1 to very big (when array index
2204 checking) numbers. */
2205 s_low = CLAMPSUB (s_low, (sz * tries * tries));
2206 s_high = CLAMPADD (s_high, (sz * tries * tries));
2209 for (i = 0; i < min (num_objs, max_objs); i++)
2211 __mf_object_t *obj = & objs[i]->data;
2212 uintptr_t low = (uintptr_t) ptr;
2213 uintptr_t high = CLAMPSZ (ptr, sz);
2214 unsigned before1 = (low < obj->low) ? obj->low - low : 0;
2215 unsigned after1 = (low > obj->high) ? low - obj->high : 0;
2216 unsigned into1 = (high >= obj->low && low <= obj->high) ? low - obj->low : 0;
2217 unsigned before2 = (high < obj->low) ? obj->low - high : 0;
2218 unsigned after2 = (high > obj->high) ? high - obj->high : 0;
2219 unsigned into2 = (high >= obj->low && low <= obj->high) ? high - obj->low : 0;
2221 fprintf (stderr, "Nearby object %u: checked region begins %uB %s and ends %uB %s\n",
2222 num_helpful + i + 1,
2223 (before1 ? before1 : after1 ? after1 : into1),
2224 (before1 ? "before" : after1 ? "after" : "into"),
2225 (before2 ? before2 : after2 ? after2 : into2),
2226 (before2 ? "before" : after2 ? "after" : "into"));
2227 __mf_describe_object (obj);
2229 num_helpful += num_objs;
2232 fprintf (stderr, "number of nearby objects: %u\n", num_helpful);
2235 /* How to finally handle this violation? */
2236 switch (__mf_opts.violation_mode)
2241 kill (getpid(), SIGSEGV);
2247 snprintf (buf, 128, "gdb --pid=%d", getpid ());
2249 /* XXX: should probably fork() && sleep(GDB_WAIT_PARAMETER)
2250 instead, and let the forked child execlp() gdb. That way, this
2251 subject process can be resumed under the supervision of gdb.
2252 This can't happen now, since system() only returns when gdb
2253 dies. In that case, we need to beware of starting a second
2254 concurrent gdb child upon the next violation. (But if the first
2255 gdb dies, then starting a new one is appropriate.) */
2260 /* ------------------------------------------------------------------------ */
2263 unsigned __mf_watch (void *ptr, size_t sz)
2267 BEGIN_RECURSION_PROTECT ();
2268 rc = __mf_watch_or_not (ptr, sz, 1);
2269 END_RECURSION_PROTECT ();
2274 unsigned __mf_unwatch (void *ptr, size_t sz)
2278 rc = __mf_watch_or_not (ptr, sz, 0);
2285 __mf_watch_or_not (void *ptr, size_t sz, char flag)
2287 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
2288 uintptr_t ptr_low = (uintptr_t) ptr;
2291 TRACE ("%s ptr=%p size=%lu\n",
2292 (flag ? "watch" : "unwatch"), ptr, (unsigned long) sz);
2294 switch (__mf_opts.mudflap_mode)
2304 __mf_object_tree_t **all_ovr_objs;
2307 DECLARE (void *, malloc, size_t c);
2308 DECLARE (void, free, void *p);
2310 obj_count = __mf_find_objects (ptr_low, ptr_high, NULL, 0);
2311 VERBOSE_TRACE (" %u:", obj_count);
2313 all_ovr_objs = CALL_REAL (malloc, (sizeof (__mf_object_tree_t *) *
2315 if (all_ovr_objs == NULL) abort ();
2316 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_objs, obj_count);
2317 assert (n == obj_count);
2319 for (n = 0; n < obj_count; n ++)
2321 __mf_object_t *obj = & (all_ovr_objs[n]->data);
2323 VERBOSE_TRACE (" [%p]", (void *) obj);
2324 if (obj->watching_p != flag)
2326 obj->watching_p = flag;
2329 /* Remove object from cache, to ensure next access
2330 goes through __mf_check(). */
2332 __mf_uncache_object (obj);
2335 CALL_REAL (free, all_ovr_objs);
2345 __mf_sigusr1_handler (int num)
2347 __mf_sigusr1_received ++;
2350 /* Install or remove SIGUSR1 handler as necessary.
2351 Also, respond to a received pending SIGUSR1. */
2353 __mf_sigusr1_respond ()
2355 static int handler_installed;
2358 /* Manage handler */
2359 if (__mf_opts.sigusr1_report && ! handler_installed)
2361 signal (SIGUSR1, __mf_sigusr1_handler);
2362 handler_installed = 1;
2364 else if(! __mf_opts.sigusr1_report && handler_installed)
2366 signal (SIGUSR1, SIG_DFL);
2367 handler_installed = 0;
2371 /* Manage enqueued signals */
2372 if (__mf_sigusr1_received > __mf_sigusr1_handled)
2374 __mf_sigusr1_handled ++;
2375 assert (__mf_state == reentrant);
2377 handler_installed = 0; /* We may need to re-enable signal; this might be a SysV library. */
2382 /* XXX: provide an alternative __assert_fail function that cannot
2383 fail due to libmudflap infinite recursion. */
2387 write_itoa (int fd, unsigned n)
2389 enum x { bufsize = sizeof(n)*4 };
2393 for (i=0; i<bufsize-1; i++)
2395 unsigned digit = n % 10;
2396 buf[bufsize-2-i] = digit + '0';
2400 char *m = & buf [bufsize-2-i];
2401 buf[bufsize-1] = '\0';
2402 write (fd, m, strlen(m));
2410 __assert_fail (const char *msg, const char *file, unsigned line, const char *func)
2412 #define write2(string) write (2, (string), strlen ((string)));
2416 write_itoa (2, (unsigned) pthread_self ());
2419 write2(": assertion failure: `");
2420 write (2, msg, strlen (msg));
2422 write (2, func, strlen (func));
2424 write (2, file, strlen (file));
2426 write_itoa (2, line);