1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
3 Contributed by Frank Ch. Eigler <fche@redhat.com>
4 and Graydon Hoare <graydon@redhat.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
13 In addition to the permissions in the GNU General Public License, the
14 Free Software Foundation gives you unlimited permission to link the
15 compiled version of this file into combinations with other programs,
16 and to distribute those combinations without any restriction coming
17 from the use of this file. (The General Public License restrictions
18 do apply in other respects; for example, they cover modification of
19 the file, and distribution when not linked into a combine
22 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
23 WARRANTY; without even the implied warranty of MERCHANTABILITY or
24 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
27 You should have received a copy of the GNU General Public License
28 along with GCC; see the file COPYING. If not, write to the Free
29 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
34 /* These attempt to coax various unix flavours to declare all our
35 needed tidbits in the system headers. */
36 #if !defined(__FreeBSD__)
38 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
42 #define __EXTENSIONS__
44 #define _LARGE_FILE_API
45 #define _XOPEN_SOURCE_EXTENDED 1
49 #include <sys/types.h>
53 #ifdef HAVE_EXECINFO_H
63 #include <sys/types.h>
68 #include "mf-runtime.h"
72 /* ------------------------------------------------------------------------ */
75 #define CTOR __attribute__ ((constructor))
76 #define DTOR __attribute__ ((destructor))
79 /* Codes to describe the context in which a violation occurs. */
80 #define __MF_VIOL_UNKNOWN 0
81 #define __MF_VIOL_READ 1
82 #define __MF_VIOL_WRITE 2
83 #define __MF_VIOL_REGISTER 3
84 #define __MF_VIOL_UNREGISTER 4
85 #define __MF_VIOL_WATCH 5
87 /* Protect against recursive calls. */
88 #define BEGIN_RECURSION_PROTECT() do { \
89 if (UNLIKELY (__mf_state == reentrant)) { \
90 write (2, "mf: erroneous reentrancy detected in `", 38); \
91 write (2, __PRETTY_FUNCTION__, strlen(__PRETTY_FUNCTION__)); \
92 write (2, "'\n", 2); \
94 __mf_state = reentrant; \
97 #define END_RECURSION_PROTECT() do { \
98 __mf_state = active; \
103 /* ------------------------------------------------------------------------ */
104 /* Required globals. */
106 #define LOOKUP_CACHE_MASK_DFL 1023
107 #define LOOKUP_CACHE_SIZE_MAX 4096 /* Allows max CACHE_MASK 0x0FFF */
108 #define LOOKUP_CACHE_SHIFT_DFL 2
110 struct __mf_cache __mf_lookup_cache [LOOKUP_CACHE_SIZE_MAX];
111 uintptr_t __mf_lc_mask = LOOKUP_CACHE_MASK_DFL;
112 unsigned char __mf_lc_shift = LOOKUP_CACHE_SHIFT_DFL;
113 #define LOOKUP_CACHE_SIZE (__mf_lc_mask + 1)
115 struct __mf_options __mf_opts;
117 int __mf_starting_p = 1;
119 enum __mf_state_enum __mf_state = active;
121 /* See __mf_state_perthread() in mf-hooks.c. */
126 pthread_mutex_t __mf_biglock =
127 #ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
128 PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
130 PTHREAD_MUTEX_INITIALIZER;
134 /* Use HAVE_PTHREAD_H here instead of LIBMUDFLAPTH, so that even
135 the libmudflap.la (no threading support) can diagnose whether
136 the application is linked with -lpthread. See __mf_usage() below. */
138 #pragma weak pthread_join
139 const void *threads_active_p = (void *) pthread_join;
143 /* ------------------------------------------------------------------------ */
144 /* stats-related globals. */
146 static unsigned long __mf_count_check;
147 static unsigned long __mf_lookup_cache_reusecount [LOOKUP_CACHE_SIZE_MAX];
148 static unsigned long __mf_treerot_left, __mf_treerot_right;
149 static unsigned long __mf_count_register;
150 static unsigned long __mf_total_register_size [__MF_TYPE_MAX+1];
151 static unsigned long __mf_count_unregister;
152 static unsigned long __mf_total_unregister_size;
153 static unsigned long __mf_count_violation [__MF_VIOL_WATCH+1];
154 static unsigned long __mf_sigusr1_received;
155 static unsigned long __mf_sigusr1_handled;
156 /* not static */ unsigned long __mf_reentrancy;
158 /* not static */ unsigned long __mf_lock_contention;
162 /* ------------------------------------------------------------------------ */
163 /* mode-check-related globals. */
165 typedef struct __mf_object
167 uintptr_t low, high; /* __mf_register parameters */
169 char type; /* __MF_TYPE_something */
170 char watching_p; /* Trigger a VIOL_WATCH on access? */
171 unsigned read_count; /* Number of times __mf_check/read was called on this object. */
172 unsigned write_count; /* Likewise for __mf_check/write. */
173 unsigned liveness; /* A measure of recent checking activity. */
174 unsigned description_epoch; /* Last epoch __mf_describe_object printed this. */
177 struct timeval alloc_time;
178 char **alloc_backtrace;
179 size_t alloc_backtrace_size;
181 pthread_t alloc_thread;
185 uintptr_t dealloc_pc;
186 struct timeval dealloc_time;
187 char **dealloc_backtrace;
188 size_t dealloc_backtrace_size;
190 pthread_t dealloc_thread;
195 typedef struct __mf_object_tree
198 struct __mf_object_tree *left;
199 struct __mf_object_tree *right;
200 } __mf_object_tree_t;
202 /* Live objects: binary tree on __mf_object_t.low */
203 static __mf_object_tree_t *__mf_object_root;
205 /* Dead objects: circular arrays; _MIN_CEM .. _MAX_CEM only */
206 static unsigned __mf_object_dead_head[__MF_TYPE_MAX_CEM+1]; /* next empty spot */
207 static __mf_object_tree_t *__mf_object_cemetary[__MF_TYPE_MAX_CEM+1][__MF_PERSIST_MAX];
210 /* ------------------------------------------------------------------------ */
211 /* Forward function declarations */
213 static void __mf_init () CTOR;
214 static void __mf_sigusr1_respond ();
215 static unsigned __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
216 __mf_object_tree_t **objs, unsigned max_objs);
217 static unsigned __mf_find_dead_objects (uintptr_t ptr_low, uintptr_t ptr_high,
218 __mf_object_tree_t **objs, unsigned max_objs);
219 static void __mf_link_object (__mf_object_tree_t *obj);
220 static void __mf_age_tree (__mf_object_tree_t *obj);
221 static void __mf_adapt_cache ();
222 static void __mf_unlink_object (__mf_object_tree_t *obj);
223 static void __mf_describe_object (__mf_object_t *obj);
224 static unsigned __mf_watch_or_not (void *ptr, size_t sz, char flag);
228 /* ------------------------------------------------------------------------ */
229 /* Configuration engine */
232 __mf_set_default_options ()
234 memset (& __mf_opts, 0, sizeof (__mf_opts));
236 __mf_opts.tree_aging = 13037;
237 __mf_opts.adapt_cache = 1000003;
238 __mf_opts.abbreviate = 1;
239 __mf_opts.verbose_violations = 1;
240 __mf_opts.free_queue_length = 4;
241 __mf_opts.persistent_count = 100;
242 __mf_opts.crumple_zone = 32;
243 __mf_opts.backtrace = 4;
244 __mf_opts.mudflap_mode = mode_check;
245 __mf_opts.violation_mode = viol_nop;
246 __mf_opts.heur_std_data = 1;
248 __mf_opts.thread_stack = 0;
267 "mudflaps do nothing",
268 set_option, (int)mode_nop, (int *)&__mf_opts.mudflap_mode},
270 "mudflaps populate object tree",
271 set_option, (int)mode_populate, (int *)&__mf_opts.mudflap_mode},
273 "mudflaps check for memory violations",
274 set_option, (int)mode_check, (int *)&__mf_opts.mudflap_mode},
276 "mudflaps always cause violations (diagnostic)",
277 set_option, (int)mode_violate, (int *)&__mf_opts.mudflap_mode},
280 "violations do not change program execution",
281 set_option, (int)viol_nop, (int *)&__mf_opts.violation_mode},
283 "violations cause a call to abort()",
284 set_option, (int)viol_abort, (int *)&__mf_opts.violation_mode},
286 "violations are promoted to SIGSEGV signals",
287 set_option, (int)viol_segv, (int *)&__mf_opts.violation_mode},
289 "violations fork a gdb process attached to current program",
290 set_option, (int)viol_gdb, (int *)&__mf_opts.violation_mode},
292 "trace calls to mudflap runtime library",
293 set_option, 1, &__mf_opts.trace_mf_calls},
295 "trace internal events within mudflap runtime library",
296 set_option, 1, &__mf_opts.verbose_trace},
298 "collect statistics on mudflap's operation",
299 set_option, 1, &__mf_opts.collect_stats},
302 "print report upon SIGUSR1",
303 set_option, 1, &__mf_opts.sigusr1_report},
305 {"internal-checking",
306 "perform more expensive internal checking",
307 set_option, 1, &__mf_opts.internal_checking},
309 "age the object tree after N accesses for working set",
310 read_integer_option, 1000000, &__mf_opts.tree_aging},
312 "print any memory leaks at program shutdown",
313 set_option, 1, &__mf_opts.print_leaks},
314 {"check-initialization",
315 "detect uninitialized object reads",
316 set_option, 1, &__mf_opts.check_initialization},
317 {"verbose-violations",
318 "print verbose messages when memory violations occur",
319 set_option, 1, &__mf_opts.verbose_violations},
321 "abbreviate repetitive listings",
322 set_option, 1, &__mf_opts.abbreviate},
324 "wipe stack objects at unwind",
325 set_option, 1, &__mf_opts.wipe_stack},
327 "wipe heap objects at free",
328 set_option, 1, &__mf_opts.wipe_heap},
330 "support /proc/self/map heuristics",
331 set_option, 1, &__mf_opts.heur_proc_map},
333 "enable a simple upper stack bound heuristic",
334 set_option, 1, &__mf_opts.heur_stack_bound},
336 "support _start.._end heuristics",
337 set_option, 1, &__mf_opts.heur_start_end},
339 "register standard library data (argv, errno, stdin, ...)",
340 set_option, 1, &__mf_opts.heur_std_data},
341 {"free-queue-length",
342 "queue N deferred free() calls before performing them",
343 read_integer_option, 0, &__mf_opts.free_queue_length},
345 "keep a history of N unregistered regions",
346 read_integer_option, 0, &__mf_opts.persistent_count},
348 "surround allocations with crumple zones of N bytes",
349 read_integer_option, 0, &__mf_opts.crumple_zone},
350 /* XXX: not type-safe.
352 "set lookup cache size mask to N (2**M - 1)",
353 read_integer_option, 0, (int *)(&__mf_lc_mask)},
355 "set lookup cache pointer shift",
356 read_integer_option, 0, (int *)(&__mf_lc_shift)},
359 "adapt mask/shift parameters after N cache misses",
360 read_integer_option, 1, &__mf_opts.adapt_cache},
362 "keep an N-level stack trace of each call context",
363 read_integer_option, 0, &__mf_opts.backtrace},
366 "override thread stacks allocation: N kB",
367 read_integer_option, 0, &__mf_opts.thread_stack},
369 {0, 0, set_option, 0, NULL}
378 "This is a %s%sGCC \"mudflap\" memory-checked binary.\n"
379 "Mudflap is Copyright (C) 2002-2003 Free Software Foundation, Inc.\n"
381 "The mudflap code can be controlled by an environment variable:\n"
383 "$ export MUDFLAP_OPTIONS='<options>'\n"
384 "$ <mudflapped_program>\n"
386 "where <options> is a space-separated list of \n"
387 "any of the following options. Use `-no-OPTION' to disable options.\n"
390 (threads_active_p ? "multi-threaded " : "single-threaded "),
400 /* XXX: The multi-threaded thread-unaware combination is bad. */
402 for (opt = options; opt->name; opt++)
404 int default_p = (opt->value == * opt->target);
410 fprintf (stderr, "-%-23.23s %s", opt->name, opt->description);
412 fprintf (stderr, " [active]\n");
414 fprintf (stderr, "\n");
416 case read_integer_option:
417 strncpy (buf, opt->name, 128);
418 strncpy (buf + strlen (opt->name), "=N", 2);
419 fprintf (stderr, "-%-23.23s %s", buf, opt->description);
420 fprintf (stderr, " [%d]\n", * opt->target);
426 fprintf (stderr, "\n");
431 __mf_set_options (const char *optstr)
435 BEGIN_RECURSION_PROTECT ();
436 rc = __mfu_set_options (optstr);
437 /* XXX: It's not really that easy. A change to a bunch of parameters
438 can require updating auxiliary state or risk crashing:
439 free_queue_length, crumple_zone ... */
440 END_RECURSION_PROTECT ();
447 __mfu_set_options (const char *optstr)
449 struct option *opts = 0;
453 const char *saved_optstr = optstr;
455 /* XXX: bounds-check for optstr! */
472 if (*optstr == '?' ||
473 strncmp (optstr, "help", 4) == 0)
475 /* Caller will print help and exit. */
479 if (strncmp (optstr, "no-", 3) == 0)
482 optstr = & optstr[3];
485 for (opts = options; opts->name; opts++)
487 if (strncmp (optstr, opts->name, strlen (opts->name)) == 0)
489 optstr += strlen (opts->name);
490 assert (opts->target);
497 *(opts->target) = opts->value;
499 case read_integer_option:
500 if (! negate && (*optstr == '=' && *(optstr+1)))
503 tmp = strtol (optstr, &nxt, 10);
504 if ((optstr != nxt) && (tmp != LONG_MAX))
507 *(opts->target) = (int)tmp;
521 "warning: unrecognized string '%s' in mudflap options\n",
523 optstr += strlen (optstr);
529 /* Special post-processing: bound __mf_lc_mask and free_queue_length for security. */
530 __mf_lc_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
531 __mf_opts.free_queue_length &= (__MF_FREEQ_MAX - 1);
533 /* Clear the lookup cache, in case the parameters got changed. */
535 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
537 __mf_lookup_cache[0].low = MAXPTR;
539 TRACE ("set options from `%s'\n", saved_optstr);
541 /* Call this unconditionally, in case -sigusr1-report was toggled. */
542 __mf_sigusr1_respond ();
551 __mf_resolve_single_dynamic (struct __mf_dynamic_entry *e)
556 if (e->pointer) return;
559 if (e->version != NULL && e->version[0] != '\0') /* non-null/empty */
560 e->pointer = dlvsym (RTLD_NEXT, e->name, e->version);
563 e->pointer = dlsym (RTLD_NEXT, e->name);
569 fprintf (stderr, "mf: error in dlsym(\"%s\"): %s\n",
575 fprintf (stderr, "mf: dlsym(\"%s\") = NULL\n", e->name);
582 __mf_resolve_dynamics ()
585 for (i = 0; i < dyn_INITRESOLVE; i++)
586 __mf_resolve_single_dynamic (& __mf_dynamic[i]);
590 /* NB: order must match enums in mf-impl.h */
591 struct __mf_dynamic_entry __mf_dynamic [] =
593 {NULL, "calloc", NULL},
594 {NULL, "free", NULL},
595 {NULL, "malloc", NULL},
596 {NULL, "mmap", NULL},
597 {NULL, "munmap", NULL},
598 {NULL, "realloc", NULL},
599 {NULL, "DUMMY", NULL}, /* dyn_INITRESOLVE */
601 {NULL, "pthread_create", PTHREAD_CREATE_VERSION},
602 {NULL, "pthread_join", NULL},
603 {NULL, "pthread_exit", NULL}
611 /* ------------------------------------------------------------------------ */
618 /* This initial bootstrap phase requires that __mf_starting_p = 1. */
620 __mf_resolve_dynamics ();
624 __mf_set_default_options ();
626 ov = getenv ("MUDFLAP_OPTIONS");
629 int rc = __mfu_set_options (ov);
637 /* Initialize to a non-zero description epoch. */
638 __mf_describe_object (NULL);
640 #define REG_RESERVED(obj) \
641 __mf_register (& obj, sizeof(obj), __MF_TYPE_NOACCESS, # obj)
643 REG_RESERVED (__mf_lookup_cache);
644 REG_RESERVED (__mf_lc_mask);
645 REG_RESERVED (__mf_lc_shift);
646 /* XXX: others of our statics? */
648 /* Prevent access to *NULL. */
649 __mf_register (MINPTR, 1, __MF_TYPE_NOACCESS, "NULL");
650 __mf_lookup_cache[0].low = (uintptr_t) -1;
656 __wrap_main (int argc, char* argv[])
658 extern char **environ;
660 static int been_here = 0;
662 if (__mf_opts.heur_std_data && ! been_here)
667 __mf_register (argv, sizeof(char *)*(argc+1), __MF_TYPE_STATIC, "argv[]");
668 for (i=0; i<argc; i++)
670 unsigned j = strlen (argv[i]);
671 __mf_register (argv[i], j+1, __MF_TYPE_STATIC, "argv element");
676 char *e = environ[i];
678 if (e == NULL) break;
679 j = strlen (environ[i]);
680 __mf_register (environ[i], j+1, __MF_TYPE_STATIC, "environ element");
682 __mf_register (environ, sizeof(char *)*(i+1), __MF_TYPE_STATIC, "environ[]");
684 __mf_register (& errno, sizeof (errno), __MF_TYPE_STATIC, "errno area");
686 __mf_register (stdin, sizeof (*stdin), __MF_TYPE_STATIC, "stdin");
687 __mf_register (stdout, sizeof (*stdout), __MF_TYPE_STATIC, "stdout");
688 __mf_register (stderr, sizeof (*stderr), __MF_TYPE_STATIC, "stderr");
690 /* Make some effort to register ctype.h static arrays. */
691 /* XXX: e.g., on Solaris, may need to register __ctype, _ctype, __ctype_mask, __toupper, etc. */
692 /* On modern Linux GLIBC, these are thread-specific and changeable, and are dealt
693 with in mf-hooks2.c. */
697 return main (argc, argv, environ);
699 return __real_main (argc, argv, environ);
705 extern void __mf_fini () DTOR;
708 TRACE ("__mf_fini\n");
714 /* ------------------------------------------------------------------------ */
717 void __mf_check (void *ptr, size_t sz, int type, const char *location)
720 BEGIN_RECURSION_PROTECT ();
721 __mfu_check (ptr, sz, type, location);
722 END_RECURSION_PROTECT ();
727 void __mfu_check (void *ptr, size_t sz, int type, const char *location)
729 unsigned entry_idx = __MF_CACHE_INDEX (ptr);
730 struct __mf_cache *entry = & __mf_lookup_cache [entry_idx];
731 int judgement = 0; /* 0=undecided; <0=violation; >0=okay */
732 uintptr_t ptr_low = (uintptr_t) ptr;
733 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
734 struct __mf_cache old_entry = *entry;
736 if (UNLIKELY (__mf_opts.sigusr1_report))
737 __mf_sigusr1_respond ();
739 TRACE ("check ptr=%p b=%u size=%lu %s location=`%s'\n",
740 ptr, entry_idx, (unsigned long)sz,
741 (type == 0 ? "read" : "write"), location);
743 switch (__mf_opts.mudflap_mode)
747 entry->high = MAXPTR;
752 entry->low = ptr_low;
753 entry->high = ptr_high;
759 unsigned heuristics = 0;
761 /* Advance aging/adaptation counters. */
762 if (__mf_object_root)
764 static unsigned aging_count;
765 static unsigned adapt_count;
768 if (UNLIKELY (__mf_opts.tree_aging > 0 &&
769 aging_count > __mf_opts.tree_aging))
772 __mf_age_tree (__mf_object_root);
774 if (UNLIKELY (__mf_opts.adapt_cache > 0 &&
775 adapt_count > __mf_opts.adapt_cache))
782 /* Looping only occurs if heuristics were triggered. */
783 while (judgement == 0)
785 __mf_object_tree_t* ovr_obj[1];
788 obj_count = __mf_find_objects (ptr_low, ptr_high, ovr_obj, 1);
790 if (LIKELY (obj_count == 1)) /* A single hit! */
792 __mf_object_t *obj = & ovr_obj[0]->data;
793 assert (obj != NULL);
794 if (LIKELY (ptr_low >= obj->low && ptr_high <= obj->high))
796 /* XXX: hope for no overflow! */
797 if (type == __MF_CHECK_READ)
804 if (UNLIKELY (obj->type == __MF_TYPE_NOACCESS))
806 else if (UNLIKELY (obj->watching_p))
807 judgement = -2; /* trigger VIOL_WATCH */
808 else if (UNLIKELY (__mf_opts.check_initialization
810 && type == __MF_CHECK_READ
812 && obj->write_count == 0
813 /* uninitialized (heap) */
814 && obj->type == __MF_TYPE_HEAP))
819 entry->low = obj->low;
820 entry->high = obj->high;
824 /* The object did not cover the entire accessed region. */
826 else if (LIKELY (obj_count > 1))
828 __mf_object_tree_t **all_ovr_objs;
830 DECLARE (void *, malloc, size_t c);
831 DECLARE (void, free, void *p);
833 all_ovr_objs = CALL_REAL (malloc, (sizeof (__mf_object_tree_t *) *
835 if (all_ovr_objs == NULL) abort ();
836 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_objs, obj_count);
837 assert (n == obj_count);
839 /* Confirm that accessed range is covered by first/last object. */
840 if (LIKELY ((ptr_low >= all_ovr_objs[0]->data.low) &&
841 (ptr_high <= all_ovr_objs[obj_count-1]->data.high)))
843 /* Presume valid access. */
846 /* Confirm that intermediate objects are
847 contiguous and share a single name. Thus they
848 are likely split up GUESS regions, or mmap
849 pages. The idea of the name check is to
850 prevent an oversize access to a
851 stack-registered object (followed by some GUESS
852 type) from being accepted as a hit. */
853 for (n=0; n<obj_count-1; n++)
855 __mf_object_t *obj = & (all_ovr_objs[n]->data);
856 __mf_object_t *nextobj = & (all_ovr_objs[n+1]->data);
858 if (UNLIKELY (obj->type == __MF_TYPE_NOACCESS))
859 judgement = -1; /* Force error. */
861 if (UNLIKELY (judgement == 1 &&
862 (obj->high + 1 != nextobj->low)))
863 judgement = 0; /* Cancel presumption. */
865 if (UNLIKELY (judgement == 1 &&
866 (obj->name != nextobj->name)))
867 judgement = 0; /* Cancel presumption. */
868 /* NB: strcmp above is not necessary since the
869 same literal string pointer is normally
870 used when creating regions. */
872 /* XXX: hope for no overflow! */
873 if (type == __MF_CHECK_READ)
880 /* If the access is otherwise successful, check whether
881 any of the covered objects are being watched. */
885 for (i=0; i<obj_count; i++)
886 if (all_ovr_objs[i]->data.watching_p)
887 judgement = -2; /* trigger VIOL_WATCH */
890 /* Check for uninitialized reads. */
892 __mf_opts.check_initialization &&
893 type == __MF_CHECK_READ)
896 unsigned written_count = 0;
898 for (i=0; i<obj_count; i++)
900 __mf_object_t *obj = & all_ovr_objs[i]->data;
903 || obj->type == __MF_TYPE_HEAP_I
904 || obj->type == __MF_TYPE_GUESS)
908 /* Check for ALL pieces having been written-to.
909 XXX: should this be ANY instead? */
910 if (written_count != obj_count)
914 /* Fill out the cache with the bounds of the first
915 object and the last object that covers this
916 cache line (== includes the same __MF_CACHE_INDEX).
917 This could let this cache line span *two* distinct
918 registered objects: a peculiar but reasonable
919 situation. The cache line may not include the
920 entire object though. */
924 entry->low = all_ovr_objs[0]->data.low;
925 for (i=0; i<obj_count; i++)
927 uintptr_t high = all_ovr_objs[i]->data.high;
928 if (__MF_CACHE_INDEX (high) == entry_idx)
934 CALL_REAL (free, all_ovr_objs);
939 if (heuristics++ < 2) /* XXX parametrize this number? */
940 judgement = __mf_heuristic_check (ptr_low, ptr_high);
954 if (__mf_opts.collect_stats)
958 if (LIKELY (old_entry.low != entry->low || old_entry.high != entry->high))
959 /* && (old_entry.low != 0) && (old_entry.high != 0)) */
960 __mf_lookup_cache_reusecount [entry_idx] ++;
963 if (UNLIKELY (judgement < 0))
964 __mf_violation (ptr, sz,
965 (uintptr_t) __builtin_return_address (0), location,
967 (type == __MF_CHECK_READ ? __MF_VIOL_READ : __MF_VIOL_WRITE) :
972 static __mf_object_tree_t *
973 __mf_insert_new_object (uintptr_t low, uintptr_t high, int type,
974 const char *name, uintptr_t pc)
976 DECLARE (void *, calloc, size_t c, size_t n);
978 __mf_object_tree_t *new_obj;
979 new_obj = CALL_REAL (calloc, 1, sizeof(__mf_object_tree_t));
980 new_obj->data.low = low;
981 new_obj->data.high = high;
982 new_obj->data.type = type;
983 new_obj->data.name = name;
984 new_obj->data.alloc_pc = pc;
985 #if HAVE_GETTIMEOFDAY
986 gettimeofday (& new_obj->data.alloc_time, NULL);
989 new_obj->data.alloc_thread = pthread_self ();
992 if (__mf_opts.backtrace > 0 && (type == __MF_TYPE_HEAP || type == __MF_TYPE_HEAP_I))
993 new_obj->data.alloc_backtrace_size =
994 __mf_backtrace (& new_obj->data.alloc_backtrace,
997 __mf_link_object (new_obj);
1003 __mf_uncache_object (__mf_object_t *old_obj)
1005 /* Remove any low/high pointers for this object from the lookup cache. */
1007 /* Can it possibly exist in the cache? */
1008 if (LIKELY (old_obj->read_count + old_obj->write_count))
1010 uintptr_t low = old_obj->low;
1011 uintptr_t high = old_obj->high;
1012 unsigned idx_low = __MF_CACHE_INDEX (low);
1013 unsigned idx_high = __MF_CACHE_INDEX (high);
1015 for (i = idx_low; i <= idx_high; i++)
1017 struct __mf_cache *entry = & __mf_lookup_cache [i];
1018 /* NB: the "||" in the following test permits this code to
1019 tolerate the situation introduced by __mf_check over
1020 contiguous objects, where a cache entry spans several
1022 if (entry->low == low || entry->high == high)
1024 entry->low = MAXPTR;
1025 entry->high = MINPTR;
1033 __mf_register (void *ptr, size_t sz, int type, const char *name)
1036 BEGIN_RECURSION_PROTECT ();
1037 __mfu_register (ptr, sz, type, name);
1038 END_RECURSION_PROTECT ();
1044 __mfu_register (void *ptr, size_t sz, int type, const char *name)
1046 TRACE ("register ptr=%p size=%lu type=%x name='%s'\n",
1047 ptr, (unsigned long) sz, type, name ? name : "");
1049 if (__mf_opts.collect_stats)
1051 __mf_count_register ++;
1052 __mf_total_register_size [(type < 0) ? 0 :
1053 (type > __MF_TYPE_MAX) ? 0 :
1057 if (UNLIKELY (__mf_opts.sigusr1_report))
1058 __mf_sigusr1_respond ();
1060 switch (__mf_opts.mudflap_mode)
1066 __mf_violation (ptr, sz, (uintptr_t) __builtin_return_address (0), NULL,
1067 __MF_VIOL_REGISTER);
1071 /* Clear the cache. */
1072 /* XXX: why the entire cache? */
1074 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1076 __mf_lookup_cache[0].low = MAXPTR;
1081 __mf_object_tree_t *ovr_objs [1];
1082 unsigned num_overlapping_objs;
1083 uintptr_t low = (uintptr_t) ptr;
1084 uintptr_t high = CLAMPSZ (ptr, sz);
1085 uintptr_t pc = (uintptr_t) __builtin_return_address (0);
1087 /* Treat unknown size indication as 1. */
1088 if (UNLIKELY (sz == 0)) sz = 1;
1090 num_overlapping_objs = __mf_find_objects (low, high, ovr_objs, 1);
1092 /* Handle overlaps. */
1093 if (UNLIKELY (num_overlapping_objs > 0))
1095 __mf_object_tree_t *ovr_obj = ovr_objs[0];
1097 /* Quietly accept a single duplicate registration for
1098 static objects, since these may come from distinct
1099 compilation units. */
1100 if (type == __MF_TYPE_STATIC
1101 && ovr_obj->data.type == __MF_TYPE_STATIC
1102 && ovr_obj->data.low == low
1103 && ovr_obj->data.high == high)
1106 VERBOSE_TRACE ("duplicate static reg %p-%p `%s'\n",
1107 (void *) low, (void *) high,
1108 (ovr_obj->data.name ? ovr_obj->data.name : ""));
1112 /* Quietly accept a single duplicate registration for
1113 guess objects too. */
1114 if (type == __MF_TYPE_GUESS &&
1115 ovr_obj->data.type == __MF_TYPE_GUESS &&
1116 ovr_obj->data.low == low &&
1117 ovr_obj->data.high == high)
1120 VERBOSE_TRACE ("duplicate guess reg %p-%p\n",
1121 (void *) low, (void *) high);
1125 /* Quietly accept new a guess registration that overlaps
1126 at least one existing object. Trim it down to size. */
1127 else if (type == __MF_TYPE_GUESS)
1129 /* We need to split this new GUESS region into some
1130 smaller ones. Or we may not need to insert it at
1131 all if it is covered by the overlapping region. */
1133 /* First, identify all the overlapping objects. */
1134 __mf_object_tree_t **all_ovr_objs;
1135 unsigned num_ovr_objs, n;
1137 DECLARE (void *, malloc, size_t c);
1138 DECLARE (void, free, void *p);
1140 all_ovr_objs = CALL_REAL (malloc, (sizeof (__mf_object_tree_t *) *
1141 num_overlapping_objs));
1142 if (all_ovr_objs == NULL) abort ();
1143 num_ovr_objs = __mf_find_objects (low, high, all_ovr_objs,
1144 num_overlapping_objs);
1145 assert (num_ovr_objs == num_overlapping_objs);
1147 VERBOSE_TRACE ("splitting guess %p-%p, # overlaps: %u\n",
1148 (void *) low, (void *) high, num_ovr_objs);
1150 /* Add GUESS regions between the holes: before each
1151 overlapping region. */
1154 /* This makes use of the assumption that __mf_find_objects() returns
1155 overlapping objects in an increasing sequence. */
1156 for (n=0; n < min (num_ovr_objs, num_overlapping_objs); n++)
1158 if (all_ovr_objs[n]->data.low > next_low) /* Gap? */
1160 uintptr_t next_high = CLAMPSUB (all_ovr_objs[n]->data.low, 1);
1161 __mfu_register ((void *) next_low, next_high-next_low+1,
1162 __MF_TYPE_GUESS, name);
1164 next_low = CLAMPADD (all_ovr_objs[n]->data.high, 1);
1166 /* Add in any leftover room at the top. */
1167 if (next_low <= high)
1168 __mfu_register ((void *) next_low, high-next_low+1,
1169 __MF_TYPE_GUESS, name);
1171 /* XXX: future optimization: allow consecutive GUESS regions to
1172 be glued together. */
1173 CALL_REAL (free, all_ovr_objs);
1177 /* Quietly accept a non-GUESS region overlaying a GUESS
1178 region. Handle it by removing the GUESS region
1179 temporarily, then recursively adding this new object,
1180 and then the GUESS back. The latter will be split up
1181 by the recursive process above. */
1182 else if (ovr_obj->data.type == __MF_TYPE_GUESS)
1184 uintptr_t old_low = ovr_obj->data.low;
1185 uintptr_t old_high = ovr_obj->data.high;
1186 const char* old_name = ovr_obj->data.name;
1188 /* Now to recursively remove the guess piece, and
1189 reinsert them in the opposite order. Recursion
1190 should bottom out if another non-GUESS overlapping
1191 region is found for this new object (resulting in a
1192 violation), or if no further overlap occurs. The
1193 located GUESS region should end up being split up
1195 __mfu_unregister ((void *) old_low, old_high-old_low+1);
1196 __mfu_register ((void *) low, sz, type, name);
1197 __mfu_register ((void *) old_low, old_high-old_low+1,
1198 __MF_TYPE_GUESS, old_name);
1202 /* Alas, a genuine violation. */
1205 /* Two or more *real* mappings here. */
1206 __mf_violation ((void *) ptr, sz,
1207 (uintptr_t) __builtin_return_address (0), NULL,
1208 __MF_VIOL_REGISTER);
1212 /* No overlapping objects: AOK. */
1215 __mf_insert_new_object (low, high, type, name, pc);
1218 /* We could conceivably call __mf_check() here to prime the cache,
1219 but then the read_count/write_count field is not reliable. */
1223 } /* end switch (__mf_opts.mudflap_mode) */
1228 __mf_unregister (void *ptr, size_t sz)
1231 BEGIN_RECURSION_PROTECT ();
1232 __mfu_unregister (ptr, sz);
1233 END_RECURSION_PROTECT ();
1239 __mfu_unregister (void *ptr, size_t sz)
1241 DECLARE (void, free, void *ptr);
1243 if (UNLIKELY (__mf_opts.sigusr1_report))
1244 __mf_sigusr1_respond ();
1246 TRACE ("unregister ptr=%p size=%lu\n", ptr, (unsigned long) sz);
1248 switch (__mf_opts.mudflap_mode)
1254 __mf_violation (ptr, sz,
1255 (uintptr_t) __builtin_return_address (0), NULL,
1256 __MF_VIOL_UNREGISTER);
1260 /* Clear the cache. */
1262 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1264 __mf_lookup_cache[0].low = MAXPTR;
1269 __mf_object_tree_t *old_obj = NULL;
1270 __mf_object_tree_t *del_obj = NULL; /* Object to actually delete. */
1271 __mf_object_tree_t *objs[1] = {NULL};
1272 unsigned num_overlapping_objs;
1274 /* Treat unknown size indication as 1. */
1275 if (sz == 0) sz = 1;
1277 num_overlapping_objs = __mf_find_objects ((uintptr_t) ptr,
1278 CLAMPSZ (ptr, sz), objs, 1);
1280 /* XXX: handle unregistration of big old GUESS region, that has since
1284 if (UNLIKELY (num_overlapping_objs != 1 ||
1285 (uintptr_t)ptr != old_obj->data.low)) /* XXX: what about sz? */
1287 __mf_violation (ptr, sz,
1288 (uintptr_t) __builtin_return_address (0), NULL,
1289 __MF_VIOL_UNREGISTER);
1293 __mf_unlink_object (old_obj);
1294 __mf_uncache_object (& old_obj->data);
1296 /* Wipe buffer contents if desired. */
1297 if ((__mf_opts.wipe_stack && old_obj->data.type == __MF_TYPE_STACK)
1298 || (__mf_opts.wipe_heap && (old_obj->data.type == __MF_TYPE_HEAP
1299 || old_obj->data.type == __MF_TYPE_HEAP_I)))
1301 memset ((void *) old_obj->data.low,
1303 (size_t) (old_obj->data.high - old_obj->data.low + 1));
1306 /* Manage the object cemetary. */
1307 if (__mf_opts.persistent_count > 0 &&
1308 old_obj->data.type >= 0 &&
1309 old_obj->data.type <= __MF_TYPE_MAX_CEM)
1311 old_obj->data.deallocated_p = 1;
1312 old_obj->left = old_obj->right = NULL;
1313 old_obj->data.dealloc_pc = (uintptr_t) __builtin_return_address (0);
1314 #if HAVE_GETTIMEOFDAY
1315 gettimeofday (& old_obj->data.dealloc_time, NULL);
1318 old_obj->data.dealloc_thread = pthread_self ();
1321 if (__mf_opts.backtrace > 0 && old_obj->data.type == __MF_TYPE_HEAP)
1322 old_obj->data.dealloc_backtrace_size =
1323 __mf_backtrace (& old_obj->data.dealloc_backtrace,
1326 /* Encourage this object to be displayed again in current epoch. */
1327 old_obj->data.description_epoch --;
1329 /* Put this object into the cemetary. This may require this plot to
1330 be recycled, and the previous resident to be designated del_obj. */
1332 unsigned row = old_obj->data.type;
1333 unsigned plot = __mf_object_dead_head [row];
1335 del_obj = __mf_object_cemetary [row][plot];
1336 __mf_object_cemetary [row][plot] = old_obj;
1338 if (plot == __mf_opts.persistent_count) plot = 0;
1339 __mf_object_dead_head [row] = plot;
1345 if (__mf_opts.print_leaks)
1347 if ((old_obj->data.read_count + old_obj->data.write_count) == 0 &&
1348 (old_obj->data.type == __MF_TYPE_HEAP
1349 || old_obj->data.type == __MF_TYPE_HEAP_I))
1353 "mudflap warning: unaccessed registered object:\n");
1354 __mf_describe_object (& old_obj->data);
1358 if (del_obj != NULL) /* May or may not equal old_obj. */
1360 if (__mf_opts.backtrace > 0)
1362 CALL_REAL(free, del_obj->data.alloc_backtrace);
1363 if (__mf_opts.persistent_count > 0)
1365 CALL_REAL(free, del_obj->data.dealloc_backtrace);
1368 CALL_REAL(free, del_obj);
1373 } /* end switch (__mf_opts.mudflap_mode) */
1376 if (__mf_opts.collect_stats)
1378 __mf_count_unregister ++;
1379 __mf_total_unregister_size += sz;
1383 /* ------------------------------------------------------------------------ */
1384 /* __mf_validate_live_object_tree, _object_cemetary */
1387 __mf_validate_live_object_tree (__mf_object_tree_t *obj)
1389 assert (obj != NULL);
1391 if (__mf_opts.persistent_count > 0)
1392 assert (! obj->data.deallocated_p);
1396 assert (obj->left->data.high < obj->data.low);
1397 __mf_validate_live_object_tree (obj->left);
1401 assert (obj->right->data.low > obj->data.high);
1402 __mf_validate_live_object_tree (obj->right);
1407 __mf_validate_object_cemetary ()
1412 for (cls = 0; cls <= __MF_TYPE_MAX_CEM; cls++)
1414 assert (__mf_object_dead_head [cls] >= 0 &&
1415 __mf_object_dead_head [cls] < __mf_opts.persistent_count);
1416 for (i = 0; i < __mf_opts.persistent_count; i++)
1418 __mf_object_tree_t *obj = __mf_object_cemetary [cls][i];
1421 assert (obj->data.deallocated_p);
1422 assert (obj->left == NULL);
1423 assert (obj->right == NULL);
1430 __mf_validate_objects ()
1432 if (__mf_object_root)
1433 __mf_validate_live_object_tree (__mf_object_root);
1435 if (__mf_opts.persistent_count > 0)
1436 __mf_validate_object_cemetary ();
1441 __mf_age_tree (__mf_object_tree_t *obj)
1443 assert (obj != NULL);
1444 obj->data.liveness = obj->data.liveness >> 1;
1447 __mf_age_tree (obj->left);
1449 __mf_age_tree (obj->right);
1457 unsigned long total_size;
1458 unsigned live_obj_count;
1459 double total_weight;
1460 double weighted_size;
1461 unsigned long weighted_address_bits [sizeof (uintptr_t) * 8][2];
1466 __mf_tree_analyze (__mf_object_tree_t *obj, struct tree_stats* s)
1468 assert (obj != NULL);
1471 __mf_tree_analyze (obj->left, s);
1473 /* Exclude never-accessed objects. */
1474 if (obj->data.read_count + obj->data.write_count)
1477 s->total_size += (obj->data.high - obj->data.low + 1);
1479 if (obj->data.liveness)
1484 VERBOSE_TRACE ("analyze low=%p live=%u name=`%s'\n",
1485 (void *) obj->data.low, obj->data.liveness, obj->data.name);
1487 s->live_obj_count ++;
1488 s->total_weight += (double) obj->data.liveness;
1490 (double) (obj->data.high - obj->data.low + 1) *
1491 (double) obj->data.liveness;
1493 addr = obj->data.low;
1494 for (i=0; i<sizeof(uintptr_t) * 8; i++)
1496 unsigned bit = addr & 1;
1497 s->weighted_address_bits[i][bit] += obj->data.liveness;
1504 __mf_tree_analyze (obj->right, s);
1511 struct tree_stats s;
1512 uintptr_t new_mask = 0;
1513 unsigned char new_shift;
1514 float cache_utilization;
1516 static float smoothed_new_shift = -1.0;
1519 memset (&s, 0, sizeof (s));
1520 if (__mf_object_root)
1521 __mf_tree_analyze (__mf_object_root, & s);
1523 /* Maybe we're dealing with funny aging/adaptation parameters, or an
1524 empty tree. Just leave the cache alone in such cases, rather
1525 than risk dying by division-by-zero. */
1526 if (! (s.obj_count > 0) && (s.live_obj_count > 0) && (s.total_weight > 0.0))
1529 /* Guess a good value for the shift parameter by finding an address bit that is a
1530 good discriminant of lively objects. */
1532 for (i=0; i<sizeof (uintptr_t)*8; i++)
1534 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1535 if (max_value < value) max_value = value;
1537 for (i=0; i<sizeof (uintptr_t)*8; i++)
1539 float shoulder_factor = 0.7; /* Include slightly less popular bits too. */
1540 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1541 if (value >= max_value * shoulder_factor)
1544 if (smoothed_new_shift < 0) smoothed_new_shift = __mf_lc_shift;
1545 /* Converge toward this slowly to reduce flapping. */
1546 smoothed_new_shift = 0.9*smoothed_new_shift + 0.1*i;
1547 new_shift = (unsigned) (smoothed_new_shift + 0.5);
1548 assert (new_shift < sizeof (uintptr_t)*8);
1550 /* Count number of used buckets. */
1551 cache_utilization = 0.0;
1552 for (i = 0; i < (1 + __mf_lc_mask); i++)
1553 if (__mf_lookup_cache[i].low != 0 || __mf_lookup_cache[i].high != 0)
1554 cache_utilization += 1.0;
1555 cache_utilization /= (1 + __mf_lc_mask);
1557 new_mask |= 0x3ff; /* XXX: force a large cache. */
1558 new_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
1560 VERBOSE_TRACE ("adapt cache obj=%u/%u sizes=%lu/%.0f/%.0f => "
1561 "util=%u%% m=%p s=%u\n",
1562 s.obj_count, s.live_obj_count, s.total_size, s.total_weight, s.weighted_size,
1563 (unsigned)(cache_utilization*100.0), (void *) new_mask, new_shift);
1565 /* We should reinitialize cache if its parameters have changed. */
1566 if (new_mask != __mf_lc_mask ||
1567 new_shift != __mf_lc_shift)
1569 __mf_lc_mask = new_mask;
1570 __mf_lc_shift = new_shift;
1572 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1574 __mf_lookup_cache[0].low = MAXPTR;
1581 /* __mf_find_object[s] */
1583 /* Find overlapping live objecs between [low,high]. Return up to
1584 max_objs of their pointers in objs[]. Return total count of
1585 overlaps (may exceed max_objs). */
1587 /* XXX: track traversal statistics, like average depth, balance. */
1590 __mf_find_objects_rec (uintptr_t low, uintptr_t high, __mf_object_tree_t **nodep,
1591 __mf_object_tree_t **objs, unsigned max_objs)
1594 __mf_object_tree_t *node = *nodep;
1596 assert (low <= high);
1597 assert (max_objs == 0 || objs != NULL);
1599 if (UNLIKELY (node == NULL)) return 0;
1601 /* Traverse down left subtree. */
1603 if (low < node->data.low)
1604 count += __mf_find_objects_rec (low, min(high, node->data.low),
1605 & node->left, objs, max_objs);
1607 /* Track the used slots of objs[]. */
1608 if (count < max_objs)
1618 /* Check for overlap with this node. */
1619 if (high >= node->data.low && low <= node->data.high)
1622 if (max_objs > 0) /* Any room left? */
1630 /* Traverse down right subtree. */
1631 if (high > node->data.high)
1632 count += __mf_find_objects_rec (max (low, node->data.high), high,
1633 & node->right, objs, max_objs);
1634 /* There is no need to manipulate objs/max_objs any further. */
1637 /* Rotate a child node up if its access count is higher. */
1638 if (UNLIKELY ((node->left && node->left->data.liveness > node->data.liveness) &&
1639 ((!node->right || (node->right &&
1640 node->left->data.liveness >
1641 node->right->data.liveness)))))
1643 __mf_object_tree_t *l = node->left;
1644 __mf_object_tree_t *l_r = l->right;
1649 __mf_treerot_left ++;
1651 else if (UNLIKELY ((node->right && node->right->data.liveness > node->data.liveness) &&
1652 ((!node->left || (node->left &&
1653 node->right->data.liveness >
1654 node->left->data.liveness)))))
1656 __mf_object_tree_t *r = node->right;
1657 __mf_object_tree_t *r_l = r->left;
1662 __mf_treerot_right ++;
1670 __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
1671 __mf_object_tree_t **objs, unsigned max_objs)
1673 if (UNLIKELY(__mf_opts.internal_checking))
1674 __mf_validate_objects ();
1676 return __mf_find_objects_rec (ptr_low, ptr_high, & __mf_object_root, objs, max_objs);
1679 /* __mf_link_object */
1682 __mf_link_object2 (__mf_object_tree_t *ptr, __mf_object_tree_t **link)
1684 __mf_object_tree_t *node = *link;
1686 assert (ptr != NULL);
1687 if (UNLIKELY(node == NULL))
1693 if (ptr->data.high < node->data.low)
1694 return __mf_link_object2 (ptr, & node->left);
1695 else if (ptr->data.low > node->data.high)
1696 return __mf_link_object2 (ptr, & node->right);
1698 abort (); /* XXX: duplicate object */
1703 __mf_link_object (__mf_object_tree_t *ptr)
1705 if (UNLIKELY(__mf_opts.internal_checking))
1706 __mf_validate_objects ();
1708 return __mf_link_object2 (ptr, & __mf_object_root);
1711 /* __mf_unlink_object */
1714 __mf_unlink_object2 (__mf_object_tree_t *ptr, __mf_object_tree_t **link)
1716 __mf_object_tree_t *node = *link;
1718 assert (ptr != NULL);
1719 if (UNLIKELY(node == ptr))
1721 static unsigned promote_left_p = 0;
1722 promote_left_p = 1 - promote_left_p;
1724 /* Alternate promoting the left & right subtrees. */
1728 if (ptr->right != NULL)
1729 __mf_link_object2 (ptr->right, link);
1734 if (ptr->left != NULL)
1735 __mf_link_object2 (ptr->left, link);
1741 if (ptr->data.high < node->data.low)
1742 return __mf_unlink_object2 (ptr, & node->left);
1743 else if (ptr->data.low > node->data.high)
1744 return __mf_unlink_object2 (ptr, & node->right);
1746 abort (); /* XXX: missing object; should fail more gracefully. */
1750 __mf_unlink_object (__mf_object_tree_t *node)
1752 __mf_unlink_object2 (node, & __mf_object_root);
1755 /* __mf_find_dead_objects */
1757 /* Find overlapping dead objecs between [low,high]. Return up to
1758 max_objs of their pointers in objs[]. Return total count of
1759 overlaps (may exceed max_objs). */
1762 __mf_find_dead_objects (uintptr_t low, uintptr_t high,
1763 __mf_object_tree_t **objs, unsigned max_objs)
1765 if (__mf_opts.persistent_count > 0)
1768 unsigned recollection = 0;
1771 assert (low <= high);
1772 assert (max_objs == 0 || objs != NULL);
1774 /* Widen the search from the most recent plots in each row, looking
1775 backward in time. */
1777 while (recollection < __mf_opts.persistent_count)
1781 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1786 plot = __mf_object_dead_head [row];
1787 for (i = 0; i <= recollection; i ++)
1789 __mf_object_tree_t *obj;
1791 /* Look backward through row: it's a circular buffer. */
1792 if (plot > 0) plot --;
1793 else plot = __mf_opts.persistent_count - 1;
1795 obj = __mf_object_cemetary [row][plot];
1796 if (obj && obj->data.low <= high && obj->data.high >= low)
1798 /* Found an overlapping dead object! */
1799 if (count < max_objs)
1809 /* Look farther back in time. */
1810 recollection = (recollection * 2) + 1;
1819 /* __mf_describe_object */
1822 __mf_describe_object (__mf_object_t *obj)
1824 static unsigned epoch = 0;
1831 if (__mf_opts.abbreviate && obj->description_epoch == epoch)
1834 "mudflap object %p: name=`%s'\n",
1835 (void *) obj, (obj->name ? obj->name : ""));
1839 obj->description_epoch = epoch;
1842 "mudflap object %p: name=`%s'\n"
1843 "bounds=[%p,%p] size=%lu area=%s check=%ur/%uw liveness=%u%s\n"
1844 "alloc time=%lu.%06lu pc=%p"
1849 (void *) obj, (obj->name ? obj->name : ""),
1850 (void *) obj->low, (void *) obj->high,
1851 (unsigned long) (obj->high - obj->low + 1),
1852 (obj->type == __MF_TYPE_NOACCESS ? "no-access" :
1853 obj->type == __MF_TYPE_HEAP ? "heap" :
1854 obj->type == __MF_TYPE_HEAP_I ? "heap-init" :
1855 obj->type == __MF_TYPE_STACK ? "stack" :
1856 obj->type == __MF_TYPE_STATIC ? "static" :
1857 obj->type == __MF_TYPE_GUESS ? "guess" :
1859 obj->read_count, obj->write_count, obj->liveness,
1860 obj->watching_p ? " watching" : "",
1861 obj->alloc_time.tv_sec, obj->alloc_time.tv_usec,
1862 (void *) obj->alloc_pc
1864 , (unsigned) obj->alloc_thread
1868 if (__mf_opts.backtrace > 0)
1871 for (i=0; i<obj->alloc_backtrace_size; i++)
1872 fprintf (stderr, " %s\n", obj->alloc_backtrace[i]);
1875 if (__mf_opts.persistent_count > 0)
1877 if (obj->deallocated_p)
1879 fprintf (stderr, "dealloc time=%lu.%06lu pc=%p"
1884 obj->dealloc_time.tv_sec, obj->dealloc_time.tv_usec,
1885 (void *) obj->dealloc_pc
1887 , (unsigned) obj->dealloc_thread
1892 if (__mf_opts.backtrace > 0)
1895 for (i=0; i<obj->dealloc_backtrace_size; i++)
1896 fprintf (stderr, " %s\n", obj->dealloc_backtrace[i]);
1903 __mf_report_leaks (__mf_object_tree_t *node)
1905 /* The counter is amongst recursive calls, so
1906 that cumulative numbers are printed below. */
1907 static unsigned count = 0;
1909 if (node == NULL) /* Reset */
1915 /* Inorder traversal. */
1917 __mf_report_leaks (node->left);
1918 if (node->data.type == __MF_TYPE_HEAP
1919 || node->data.type == __MF_TYPE_HEAP_I)
1922 fprintf (stderr, "Leaked object %u:\n", count);
1923 __mf_describe_object (& node->data);
1926 __mf_report_leaks (node->right);
1931 /* ------------------------------------------------------------------------ */
1938 BEGIN_RECURSION_PROTECT ();
1940 END_RECURSION_PROTECT ();
1947 if (__mf_opts.collect_stats)
1952 "calls to __mf_check: %lu rot: %lu/%lu\n"
1953 " __mf_register: %lu [%luB, %luB, %luB, %luB, %luB]\n"
1954 " __mf_unregister: %lu [%luB]\n"
1955 " __mf_violation: [%lu, %lu, %lu, %lu, %lu]\n",
1956 __mf_count_check, __mf_treerot_left, __mf_treerot_right,
1957 __mf_count_register,
1958 __mf_total_register_size[0], __mf_total_register_size[1],
1959 __mf_total_register_size[2], __mf_total_register_size[3],
1960 __mf_total_register_size[4], /* XXX */
1961 __mf_count_unregister, __mf_total_unregister_size,
1962 __mf_count_violation[0], __mf_count_violation[1],
1963 __mf_count_violation[2], __mf_count_violation[3],
1964 __mf_count_violation[4]);
1967 "calls with reentrancy: %lu\n", __mf_reentrancy);
1970 " lock contention: %lu\n", __mf_lock_contention);
1973 /* Lookup cache stats. */
1976 unsigned max_reuse = 0;
1977 unsigned num_used = 0;
1978 unsigned num_unused = 0;
1980 for (i = 0; i < LOOKUP_CACHE_SIZE; i++)
1982 if (__mf_lookup_cache_reusecount[i])
1986 if (max_reuse < __mf_lookup_cache_reusecount[i])
1987 max_reuse = __mf_lookup_cache_reusecount[i];
1989 fprintf (stderr, "lookup cache slots used: %u unused: %u peak-reuse: %u\n",
1990 num_used, num_unused, max_reuse);
1994 unsigned live_count;
1995 live_count = __mf_find_objects (MINPTR, MAXPTR, NULL, 0);
1996 fprintf (stderr, "number of live objects: %u\n", live_count);
1999 if (__mf_opts.persistent_count > 0)
2001 unsigned dead_count = 0;
2003 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
2004 for (plot = 0 ; plot < __mf_opts.persistent_count; plot ++)
2005 if (__mf_object_cemetary [row][plot] != 0)
2007 fprintf (stderr, " zombie objects: %u\n", dead_count);
2010 if (__mf_opts.print_leaks && (__mf_opts.mudflap_mode == mode_check))
2013 extern void * __mf_wrap_alloca_indirect (size_t c);
2015 /* Free up any remaining alloca()'d blocks. */
2016 __mf_wrap_alloca_indirect (0);
2017 __mf_describe_object (NULL); /* Reset description epoch. */
2018 __mf_report_leaks (NULL); /* Reset cumulative count. */
2019 l = __mf_report_leaks (__mf_object_root);
2020 fprintf (stderr, "number of leaked objects: %u\n", l);
2024 /* __mf_backtrace */
2027 __mf_backtrace (char ***symbols, void *guess_pc, unsigned guess_omit_levels)
2030 unsigned pc_array_size = __mf_opts.backtrace + guess_omit_levels;
2031 unsigned remaining_size;
2032 unsigned omitted_size = 0;
2034 DECLARE (void, free, void *ptr);
2035 DECLARE (void *, calloc, size_t c, size_t n);
2036 DECLARE (void *, malloc, size_t n);
2038 pc_array = CALL_REAL (calloc, pc_array_size, sizeof (void *) );
2039 #ifdef HAVE_BACKTRACE
2040 pc_array_size = backtrace (pc_array, pc_array_size);
2042 #define FETCH(n) do { if (pc_array_size >= n) { \
2043 pc_array[n] = __builtin_return_address(n); \
2044 if (pc_array[n] == 0) pc_array_size = n; } } while (0)
2046 /* Unroll some calls __builtin_return_address because this function
2047 only takes a literal integer parameter. */
2050 /* XXX: __builtin_return_address sometimes crashes (!) on >0 arguments,
2051 rather than simply returning 0. :-( */
2060 if (pc_array_size > 8) pc_array_size = 9;
2062 if (pc_array_size > 0) pc_array_size = 1;
2068 /* We want to trim the first few levels of the stack traceback,
2069 since they contain libmudflap wrappers and junk. If pc_array[]
2070 ends up containing a non-NULL guess_pc, then trim everything
2071 before that. Otherwise, omit the first guess_omit_levels
2074 if (guess_pc != NULL)
2075 for (i=0; i<pc_array_size; i++)
2076 if (pc_array [i] == guess_pc)
2079 if (omitted_size == 0) /* No match? */
2080 if (pc_array_size > guess_omit_levels)
2081 omitted_size = guess_omit_levels;
2083 remaining_size = pc_array_size - omitted_size;
2085 #ifdef HAVE_BACKTRACE_SYMBOLS
2086 *symbols = backtrace_symbols (pc_array + omitted_size, remaining_size);
2089 /* Let's construct a buffer by hand. It will have <remaining_size>
2090 char*'s at the front, pointing at individual strings immediately
2095 enum { perline = 30 };
2096 buffer = CALL_REAL (malloc, remaining_size * (perline + sizeof(char *)));
2097 pointers = (char **) buffer;
2098 chars = (char *)buffer + (remaining_size * sizeof (char *));
2099 for (i = 0; i < remaining_size; i++)
2101 pointers[i] = chars;
2102 sprintf (chars, "[0x%p]", pc_array [omitted_size + i]);
2103 chars = chars + perline;
2105 *symbols = pointers;
2108 CALL_REAL (free, pc_array);
2110 return remaining_size;
2113 /* ------------------------------------------------------------------------ */
2114 /* __mf_violation */
2117 __mf_violation (void *ptr, size_t sz, uintptr_t pc,
2118 const char *location, int type)
2121 static unsigned violation_number;
2122 DECLARE(void, free, void *ptr);
2124 TRACE ("violation pc=%p location=%s type=%d ptr=%p size=%lu\n",
2126 (location != NULL ? location : ""), type, ptr, (unsigned long) sz);
2128 if (__mf_opts.collect_stats)
2129 __mf_count_violation [(type < 0) ? 0 :
2130 (type > __MF_VIOL_WATCH) ? 0 :
2133 /* Print out a basic warning message. */
2134 if (__mf_opts.verbose_violations)
2137 unsigned num_helpful = 0;
2139 #if HAVE_GETTIMEOFDAY
2140 gettimeofday (& now, NULL);
2143 violation_number ++;
2146 "mudflap violation %u (%s): time=%lu.%06lu "
2147 "ptr=%p size=%lu\npc=%p%s%s%s\n",
2149 ((type == __MF_VIOL_READ) ? "check/read" :
2150 (type == __MF_VIOL_WRITE) ? "check/write" :
2151 (type == __MF_VIOL_REGISTER) ? "register" :
2152 (type == __MF_VIOL_UNREGISTER) ? "unregister" :
2153 (type == __MF_VIOL_WATCH) ? "watch" : "unknown"),
2154 now.tv_sec, now.tv_usec,
2155 (void *) ptr, (unsigned long)sz, (void *) pc,
2156 (location != NULL ? " location=`" : ""),
2157 (location != NULL ? location : ""),
2158 (location != NULL ? "'" : ""));
2160 if (__mf_opts.backtrace > 0)
2165 num = __mf_backtrace (& symbols, (void *) pc, 2);
2166 /* Note: backtrace_symbols calls malloc(). But since we're in
2167 __mf_violation and presumably __mf_check, it'll detect
2168 recursion, and not put the new string into the database. */
2170 for (i=0; i<num; i++)
2171 fprintf (stderr, " %s\n", symbols[i]);
2173 /* Calling free() here would trigger a violation. */
2174 CALL_REAL(free, symbols);
2178 /* Look for nearby objects. For this, we start with s_low/s_high
2179 pointing to the given area, looking for overlapping objects.
2180 If none show up, widen the search area and keep looking. */
2182 if (sz == 0) sz = 1;
2184 for (dead_p = 0; dead_p <= 1; dead_p ++) /* for dead_p in 0 1 */
2186 enum {max_objs = 3}; /* magic */
2187 __mf_object_tree_t *objs[max_objs];
2188 unsigned num_objs = 0;
2189 uintptr_t s_low, s_high;
2193 s_low = (uintptr_t) ptr;
2194 s_high = CLAMPSZ (ptr, sz);
2196 while (tries < 16) /* magic */
2199 num_objs = __mf_find_dead_objects (s_low, s_high, objs, max_objs);
2201 num_objs = __mf_find_objects (s_low, s_high, objs, max_objs);
2203 if (num_objs) /* good enough */
2208 /* XXX: tune this search strategy. It's too dependent on
2209 sz, which can vary from 1 to very big (when array index
2210 checking) numbers. */
2211 s_low = CLAMPSUB (s_low, (sz * tries * tries));
2212 s_high = CLAMPADD (s_high, (sz * tries * tries));
2215 for (i = 0; i < min (num_objs, max_objs); i++)
2217 __mf_object_t *obj = & objs[i]->data;
2218 uintptr_t low = (uintptr_t) ptr;
2219 uintptr_t high = CLAMPSZ (ptr, sz);
2220 unsigned before1 = (low < obj->low) ? obj->low - low : 0;
2221 unsigned after1 = (low > obj->high) ? low - obj->high : 0;
2222 unsigned into1 = (high >= obj->low && low <= obj->high) ? low - obj->low : 0;
2223 unsigned before2 = (high < obj->low) ? obj->low - high : 0;
2224 unsigned after2 = (high > obj->high) ? high - obj->high : 0;
2225 unsigned into2 = (high >= obj->low && low <= obj->high) ? high - obj->low : 0;
2227 fprintf (stderr, "Nearby object %u: checked region begins %uB %s and ends %uB %s\n",
2228 num_helpful + i + 1,
2229 (before1 ? before1 : after1 ? after1 : into1),
2230 (before1 ? "before" : after1 ? "after" : "into"),
2231 (before2 ? before2 : after2 ? after2 : into2),
2232 (before2 ? "before" : after2 ? "after" : "into"));
2233 __mf_describe_object (obj);
2235 num_helpful += num_objs;
2238 fprintf (stderr, "number of nearby objects: %u\n", num_helpful);
2241 /* How to finally handle this violation? */
2242 switch (__mf_opts.violation_mode)
2247 kill (getpid(), SIGSEGV);
2254 snprintf (buf, 128, "gdb --pid=%u", (unsigned) getpid ());
2256 /* XXX: should probably fork() && sleep(GDB_WAIT_PARAMETER)
2257 instead, and let the forked child execlp() gdb. That way, this
2258 subject process can be resumed under the supervision of gdb.
2259 This can't happen now, since system() only returns when gdb
2260 dies. In that case, we need to beware of starting a second
2261 concurrent gdb child upon the next violation. (But if the first
2262 gdb dies, then starting a new one is appropriate.) */
2267 /* ------------------------------------------------------------------------ */
2270 unsigned __mf_watch (void *ptr, size_t sz)
2274 BEGIN_RECURSION_PROTECT ();
2275 rc = __mf_watch_or_not (ptr, sz, 1);
2276 END_RECURSION_PROTECT ();
2281 unsigned __mf_unwatch (void *ptr, size_t sz)
2285 rc = __mf_watch_or_not (ptr, sz, 0);
2292 __mf_watch_or_not (void *ptr, size_t sz, char flag)
2294 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
2295 uintptr_t ptr_low = (uintptr_t) ptr;
2298 TRACE ("%s ptr=%p size=%lu\n",
2299 (flag ? "watch" : "unwatch"), ptr, (unsigned long) sz);
2301 switch (__mf_opts.mudflap_mode)
2311 __mf_object_tree_t **all_ovr_objs;
2314 DECLARE (void *, malloc, size_t c);
2315 DECLARE (void, free, void *p);
2317 obj_count = __mf_find_objects (ptr_low, ptr_high, NULL, 0);
2318 VERBOSE_TRACE (" %u:", obj_count);
2320 all_ovr_objs = CALL_REAL (malloc, (sizeof (__mf_object_tree_t *) *
2322 if (all_ovr_objs == NULL) abort ();
2323 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_objs, obj_count);
2324 assert (n == obj_count);
2326 for (n = 0; n < obj_count; n ++)
2328 __mf_object_t *obj = & (all_ovr_objs[n]->data);
2330 VERBOSE_TRACE (" [%p]", (void *) obj);
2331 if (obj->watching_p != flag)
2333 obj->watching_p = flag;
2336 /* Remove object from cache, to ensure next access
2337 goes through __mf_check(). */
2339 __mf_uncache_object (obj);
2342 CALL_REAL (free, all_ovr_objs);
2352 __mf_sigusr1_handler (int num)
2354 __mf_sigusr1_received ++;
2357 /* Install or remove SIGUSR1 handler as necessary.
2358 Also, respond to a received pending SIGUSR1. */
2360 __mf_sigusr1_respond ()
2362 static int handler_installed;
2365 /* Manage handler */
2366 if (__mf_opts.sigusr1_report && ! handler_installed)
2368 signal (SIGUSR1, __mf_sigusr1_handler);
2369 handler_installed = 1;
2371 else if(! __mf_opts.sigusr1_report && handler_installed)
2373 signal (SIGUSR1, SIG_DFL);
2374 handler_installed = 0;
2378 /* Manage enqueued signals */
2379 if (__mf_sigusr1_received > __mf_sigusr1_handled)
2381 __mf_sigusr1_handled ++;
2382 assert (__mf_state == reentrant);
2384 handler_installed = 0; /* We may need to re-enable signal; this might be a SysV library. */
2389 /* XXX: provide an alternative __assert_fail function that cannot
2390 fail due to libmudflap infinite recursion. */
2394 write_itoa (int fd, unsigned n)
2396 enum x { bufsize = sizeof(n)*4 };
2400 for (i=0; i<bufsize-1; i++)
2402 unsigned digit = n % 10;
2403 buf[bufsize-2-i] = digit + '0';
2407 char *m = & buf [bufsize-2-i];
2408 buf[bufsize-1] = '\0';
2409 write (fd, m, strlen(m));
2417 __assert_fail (const char *msg, const char *file, unsigned line, const char *func)
2419 #define write2(string) write (2, (string), strlen ((string)));
2423 write_itoa (2, (unsigned) pthread_self ());
2426 write2(": assertion failure: `");
2427 write (2, msg, strlen (msg));
2429 write (2, func, strlen (func));
2431 write (2, file, strlen (file));
2433 write_itoa (2, line);