X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;f=libmudflap%2Fmf-runtime.c;h=5d3e3a2275f06d4c0dc5bbdcfb01dcb1c7adcbad;hb=1e1a2ac6404675c91ff15a11750d693651a9b274;hp=176898830e1a1305d3ea11711519fb182660e09d;hpb=d69eac3ea7870377bbc2abbc1b64c1375bb02c86;p=pf3gnuchains%2Fgcc-fork.git diff --git a/libmudflap/mf-runtime.c b/libmudflap/mf-runtime.c index 176898830e1..5d3e3a2275f 100644 --- a/libmudflap/mf-runtime.c +++ b/libmudflap/mf-runtime.c @@ -1,5 +1,6 @@ /* Mudflap: narrow-pointer bounds-checking by tree rewriting. - Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc. + Copyright (C) 2002, 2003, 2004, 2005, 2006, 2008, 2009, 2010, 2011, 2012 + Free Software Foundation, Inc. Contributed by Frank Ch. Eigler and Graydon Hoare Splay Tree code originally by Mark Mitchell , @@ -9,36 +10,31 @@ This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free -Software Foundation; either version 2, or (at your option) any later +Software Foundation; either version 3, or (at your option) any later version. -In addition to the permissions in the GNU General Public License, the -Free Software Foundation gives you unlimited permission to link the -compiled version of this file into combinations with other programs, -and to distribute those combinations without any restriction coming -from the use of this file. (The General Public License restrictions -do apply in other respects; for example, they cover modification of -the file, and distribution when not linked into a combine -executable.) - GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. -You should have received a copy of the GNU General Public License -along with GCC; see the file COPYING. If not, write to the Free -Software Foundation, 59 Temple Place - Suite 330, Boston, MA -02111-1307, USA. */ +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ #include "config.h" /* These attempt to coax various unix flavours to declare all our needed tidbits in the system headers. */ -#if !defined(__FreeBSD__) +#if !defined(__FreeBSD__) && !defined(__APPLE__) #define _POSIX_SOURCE #endif /* Some BSDs break if this is defined. */ -#define _GNU_SOURCE +#define _GNU_SOURCE #define _XOPEN_SOURCE #define _BSD_TYPES #define __EXTENSIONS__ @@ -141,26 +137,31 @@ static void mfsplay_tree_rebalance (mfsplay_tree sp); #define __MF_VIOL_WATCH 5 /* Protect against recursive calls. */ -#define BEGIN_RECURSION_PROTECT() do { \ - if (UNLIKELY (__mf_state == reentrant)) { \ - write (2, "mf: erroneous reentrancy detected in `", 38); \ - write (2, __PRETTY_FUNCTION__, strlen(__PRETTY_FUNCTION__)); \ - write (2, "'\n", 2); \ - abort (); } \ - __mf_state = reentrant; \ - } while (0) -#define END_RECURSION_PROTECT() do { \ - __mf_state = active; \ - } while (0) +static void +begin_recursion_protect1 (const char *pf) +{ + if (__mf_get_state () == reentrant) + { + write (2, "mf: erroneous reentrancy detected in `", 38); + write (2, pf, strlen(pf)); + write (2, "'\n", 2); \ + abort (); + } + __mf_set_state (reentrant); +} +#define BEGIN_RECURSION_PROTECT() \ + begin_recursion_protect1 (__PRETTY_FUNCTION__) +#define END_RECURSION_PROTECT() \ + __mf_set_state (active) /* ------------------------------------------------------------------------ */ /* Required globals. */ #define LOOKUP_CACHE_MASK_DFL 1023 -#define LOOKUP_CACHE_SIZE_MAX 4096 /* Allows max CACHE_MASK 0x0FFF */ +#define LOOKUP_CACHE_SIZE_MAX 65536 /* Allows max CACHE_MASK 0xFFFF */ #define LOOKUP_CACHE_SHIFT_DFL 2 struct __mf_cache __mf_lookup_cache [LOOKUP_CACHE_SIZE_MAX]; @@ -169,15 +170,16 @@ unsigned char __mf_lc_shift = LOOKUP_CACHE_SHIFT_DFL; #define LOOKUP_CACHE_SIZE (__mf_lc_mask + 1) struct __mf_options __mf_opts; - int __mf_starting_p = 1; -#ifndef LIBMUDFLAPTH -enum __mf_state_enum __mf_state = active; + +#ifdef LIBMUDFLAPTH +#if defined(HAVE_TLS) && !defined(USE_EMUTLS) +__thread enum __mf_state_enum __mf_state_1 = reentrant; +#endif #else -/* See __mf_state_perthread() in mf-hooks.c. */ +enum __mf_state_enum __mf_state_1 = reentrant; #endif - #ifdef LIBMUDFLAPTH pthread_mutex_t __mf_biglock = #ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP @@ -196,7 +198,6 @@ pthread_mutex_t __mf_biglock = #else #define pthread_join NULL #endif -const void *threads_active_p = (void *) pthread_join; #endif @@ -263,11 +264,11 @@ static __mf_object_t *__mf_object_cemetary[__MF_TYPE_MAX_CEM+1][__MF_PERSIST_MAX void __mf_init () CTOR; static void __mf_sigusr1_respond (); -static unsigned __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high, +static unsigned __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high, __mf_object_t **objs, unsigned max_objs); -static unsigned __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high, +static unsigned __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high, __mf_object_t **objs, unsigned max_objs, int type); -static unsigned __mf_find_dead_objects (uintptr_t ptr_low, uintptr_t ptr_high, +static unsigned __mf_find_dead_objects (uintptr_t ptr_low, uintptr_t ptr_high, __mf_object_t **objs, unsigned max_objs); static void __mf_adapt_cache (); static void __mf_describe_object (__mf_object_t *obj); @@ -295,13 +296,24 @@ __mf_set_default_options () __mf_opts.timestamps = 1; __mf_opts.mudflap_mode = mode_check; __mf_opts.violation_mode = viol_nop; +#ifdef HAVE___LIBC_FREERES + __mf_opts.call_libc_freeres = 1; +#endif __mf_opts.heur_std_data = 1; #ifdef LIBMUDFLAPTH __mf_opts.thread_stack = 0; #endif + + /* PR41443: Beware that the above flags will be applied to + setuid/setgid binaries, and cannot be overriden with + $MUDFLAP_OPTIONS. So the defaults must be non-exploitable. + + Should we consider making the default violation_mode something + harsher than viol_nop? OTOH, glibc's MALLOC_CHECK_ is disabled + by default for these same programs. */ } -static struct option +static struct mudoption { char *name; char *description; @@ -310,43 +322,43 @@ static struct option set_option, read_integer_option, } type; - int value; - int *target; -} + unsigned value; + unsigned *target; +} options [] = { - {"mode-nop", - "mudflaps do nothing", - set_option, (int)mode_nop, (int *)&__mf_opts.mudflap_mode}, - {"mode-populate", - "mudflaps populate object tree", - set_option, (int)mode_populate, (int *)&__mf_opts.mudflap_mode}, - {"mode-check", + {"mode-nop", + "mudflaps do nothing", + set_option, (unsigned)mode_nop, (unsigned *)&__mf_opts.mudflap_mode}, + {"mode-populate", + "mudflaps populate object tree", + set_option, (unsigned)mode_populate, (unsigned *)&__mf_opts.mudflap_mode}, + {"mode-check", "mudflaps check for memory violations", - set_option, (int)mode_check, (int *)&__mf_opts.mudflap_mode}, - {"mode-violate", + set_option, (unsigned)mode_check, (unsigned *)&__mf_opts.mudflap_mode}, + {"mode-violate", "mudflaps always cause violations (diagnostic)", - set_option, (int)mode_violate, (int *)&__mf_opts.mudflap_mode}, - - {"viol-nop", + set_option, (unsigned)mode_violate, (unsigned *)&__mf_opts.mudflap_mode}, + + {"viol-nop", "violations do not change program execution", - set_option, (int)viol_nop, (int *)&__mf_opts.violation_mode}, - {"viol-abort", + set_option, (unsigned)viol_nop, (unsigned *)&__mf_opts.violation_mode}, + {"viol-abort", "violations cause a call to abort()", - set_option, (int)viol_abort, (int *)&__mf_opts.violation_mode}, - {"viol-segv", + set_option, (unsigned)viol_abort, (unsigned *)&__mf_opts.violation_mode}, + {"viol-segv", "violations are promoted to SIGSEGV signals", - set_option, (int)viol_segv, (int *)&__mf_opts.violation_mode}, - {"viol-gdb", + set_option, (unsigned)viol_segv, (unsigned *)&__mf_opts.violation_mode}, + {"viol-gdb", "violations fork a gdb process attached to current program", - set_option, (int)viol_gdb, (int *)&__mf_opts.violation_mode}, - {"trace-calls", + set_option, (unsigned)viol_gdb, (unsigned *)&__mf_opts.violation_mode}, + {"trace-calls", "trace calls to mudflap runtime library", set_option, 1, &__mf_opts.trace_mf_calls}, - {"verbose-trace", + {"verbose-trace", "trace internal events within mudflap runtime library", set_option, 1, &__mf_opts.verbose_trace}, - {"collect-stats", + {"collect-stats", "collect statistics on mudflap's operation", set_option, 1, &__mf_opts.collect_stats}, #ifdef SIGUSR1 @@ -354,25 +366,30 @@ options [] = "print report upon SIGUSR1", set_option, 1, &__mf_opts.sigusr1_report}, #endif - {"internal-checking", + {"internal-checking", "perform more expensive internal checking", set_option, 1, &__mf_opts.internal_checking}, - {"print-leaks", + {"print-leaks", "print any memory leaks at program shutdown", set_option, 1, &__mf_opts.print_leaks}, - {"check-initialization", +#ifdef HAVE___LIBC_FREERES + {"libc-freeres", + "call glibc __libc_freeres at shutdown for better leak data", + set_option, 1, &__mf_opts.call_libc_freeres}, +#endif + {"check-initialization", "detect uninitialized object reads", set_option, 1, &__mf_opts.check_initialization}, - {"verbose-violations", + {"verbose-violations", "print verbose messages when memory violations occur", set_option, 1, &__mf_opts.verbose_violations}, - {"abbreviate", + {"abbreviate", "abbreviate repetitive listings", set_option, 1, &__mf_opts.abbreviate}, - {"timestamps", + {"timestamps", "track object lifetime timestamps", set_option, 1, &__mf_opts.timestamps}, - {"ignore-reads", + {"ignore-reads", "ignore read accesses - assume okay", set_option, 1, &__mf_opts.ignore_reads}, {"wipe-stack", @@ -381,43 +398,43 @@ options [] = {"wipe-heap", "wipe heap objects at free", set_option, 1, &__mf_opts.wipe_heap}, - {"heur-proc-map", + {"heur-proc-map", "support /proc/self/map heuristics", set_option, 1, &__mf_opts.heur_proc_map}, {"heur-stack-bound", "enable a simple upper stack bound heuristic", set_option, 1, &__mf_opts.heur_stack_bound}, - {"heur-start-end", + {"heur-start-end", "support _start.._end heuristics", set_option, 1, &__mf_opts.heur_start_end}, - {"heur-stdlib", + {"heur-stdlib", "register standard library data (argv, errno, stdin, ...)", set_option, 1, &__mf_opts.heur_std_data}, - {"free-queue-length", + {"free-queue-length", "queue N deferred free() calls before performing them", read_integer_option, 0, &__mf_opts.free_queue_length}, - {"persistent-count", + {"persistent-count", "keep a history of N unregistered regions", read_integer_option, 0, &__mf_opts.persistent_count}, - {"crumple-zone", + {"crumple-zone", "surround allocations with crumple zones of N bytes", read_integer_option, 0, &__mf_opts.crumple_zone}, /* XXX: not type-safe. - {"lc-mask", + {"lc-mask", "set lookup cache size mask to N (2**M - 1)", read_integer_option, 0, (int *)(&__mf_lc_mask)}, - {"lc-shift", + {"lc-shift", "set lookup cache pointer shift", read_integer_option, 0, (int *)(&__mf_lc_shift)}, */ - {"lc-adapt", + {"lc-adapt", "adapt mask/shift parameters after N cache misses", read_integer_option, 1, &__mf_opts.adapt_cache}, - {"backtrace", + {"backtrace", "keep an N-level stack trace of each call context", read_integer_option, 0, &__mf_opts.backtrace}, #ifdef LIBMUDFLAPTH - {"thread-stack", + {"thread-stack", "override thread stacks allocation: N kB", read_integer_option, 0, &__mf_opts.thread_stack}, #endif @@ -427,13 +444,13 @@ options [] = static void __mf_usage () { - struct option *opt; + struct mudoption *opt; - fprintf (stderr, + fprintf (stderr, "This is a %s%sGCC \"mudflap\" memory-checked binary.\n" - "Mudflap is Copyright (C) 2002-2004 Free Software Foundation, Inc.\n" + "Mudflap is Copyright (C) 2002-2012 Free Software Foundation, Inc.\n" "\n" - "The mudflap code can be controlled by an environment variable:\n" + "Unless setuid, a program's mudflap options be set by an environment variable:\n" "\n" "$ export MUDFLAP_OPTIONS=''\n" "$ \n" @@ -442,7 +459,7 @@ __mf_usage () "any of the following options. Use `-no-OPTION' to disable options.\n" "\n", #if HAVE_PTHREAD_H - (threads_active_p ? "multi-threaded " : "single-threaded "), + (pthread_join ? "multi-threaded " : "single-threaded "), #else "", #endif @@ -473,7 +490,7 @@ __mf_usage () strncpy (buf + strlen (opt->name), "=N", 2); fprintf (stderr, "-%-23.23s %s", buf, opt->description); fprintf (stderr, " [%d]\n", * opt->target); - break; + break; default: abort(); } } @@ -482,7 +499,7 @@ __mf_usage () } -int +int __mf_set_options (const char *optstr) { int rc; @@ -490,7 +507,7 @@ __mf_set_options (const char *optstr) BEGIN_RECURSION_PROTECT (); rc = __mfu_set_options (optstr); /* XXX: It's not really that easy. A change to a bunch of parameters - can require updating auxiliary state or risk crashing: + can require updating auxiliary state or risk crashing: free_queue_length, crumple_zone ... */ END_RECURSION_PROTECT (); UNLOCKTH (); @@ -498,10 +515,10 @@ __mf_set_options (const char *optstr) } -int +int __mfu_set_options (const char *optstr) { - struct option *opts = 0; + struct mudoption *opts = 0; char *nxt = 0; long tmp = 0; int rc = 0; @@ -520,30 +537,30 @@ __mfu_set_options (const char *optstr) case '-': if (*optstr+1) - { + { int negate = 0; optstr++; - if (*optstr == '?' || + if (*optstr == '?' || strncmp (optstr, "help", 4) == 0) { /* Caller will print help and exit. */ return -1; } - + if (strncmp (optstr, "no-", 3) == 0) { negate = 1; optstr = & optstr[3]; } - + for (opts = options; opts->name; opts++) { if (strncmp (optstr, opts->name, strlen (opts->name)) == 0) { optstr += strlen (opts->name); assert (opts->target); - switch (opts->type) + switch (opts->type) { case set_option: if (negate) @@ -558,7 +575,7 @@ __mfu_set_options (const char *optstr) tmp = strtol (optstr, &nxt, 10); if ((optstr != nxt) && (tmp != LONG_MAX)) { - optstr = nxt; + optstr = nxt; *(opts->target) = (int)tmp; } } @@ -570,9 +587,9 @@ __mfu_set_options (const char *optstr) } } break; - + default: - fprintf (stderr, + fprintf (stderr, "warning: unrecognized string '%s' in mudflap options\n", optstr); optstr += strlen (optstr); @@ -602,7 +619,7 @@ __mfu_set_options (const char *optstr) #ifdef PIC -void +void __mf_resolve_single_dynamic (struct __mf_dynamic_entry *e) { char *err; @@ -616,7 +633,7 @@ __mf_resolve_single_dynamic (struct __mf_dynamic_entry *e) else #endif e->pointer = dlsym (RTLD_NEXT, e->name); - + err = dlerror (); if (err) @@ -624,7 +641,7 @@ __mf_resolve_single_dynamic (struct __mf_dynamic_entry *e) fprintf (stderr, "mf: error in dlsym(\"%s\"): %s\n", e->name, err); abort (); - } + } if (! e->pointer) { fprintf (stderr, "mf: dlsym(\"%s\") = NULL\n", e->name); @@ -633,8 +650,8 @@ __mf_resolve_single_dynamic (struct __mf_dynamic_entry *e) } -static void -__mf_resolve_dynamics () +static void +__mf_resolve_dynamics () { int i; for (i = 0; i < dyn_INITRESOLVE; i++) @@ -649,6 +666,9 @@ struct __mf_dynamic_entry __mf_dynamic [] = {NULL, "free", NULL}, {NULL, "malloc", NULL}, {NULL, "mmap", NULL}, +#ifdef HAVE_MMAP64 + {NULL, "mmap64", NULL}, +#endif {NULL, "munmap", NULL}, {NULL, "realloc", NULL}, {NULL, "DUMMY", NULL}, /* dyn_INITRESOLVE */ @@ -686,15 +706,24 @@ __mf_init () if (LIKELY (__mf_starting_p == 0)) return; +#if defined(__FreeBSD__) && defined(LIBMUDFLAPTH) + pthread_self(); + LOCKTH (); + UNLOCKTH (); +#endif /* Prime mutex which calls calloc upon first lock to avoid deadlock. */ + /* This initial bootstrap phase requires that __mf_starting_p = 1. */ #ifdef PIC __mf_resolve_dynamics (); #endif __mf_starting_p = 0; + __mf_set_state (active); + __mf_set_default_options (); - ov = getenv ("MUDFLAP_OPTIONS"); + if (getuid () == geteuid () && getgid () == getegid ()) /* PR41433, not setuid */ + ov = getenv ("MUDFLAP_OPTIONS"); if (ov) { int rc = __mfu_set_options (ov); @@ -728,6 +757,7 @@ __wrap_main (int argc, char* argv[]) { extern char **environ; extern int main (); + extern int __real_main (); static int been_here = 0; if (__mf_opts.heur_std_data && ! been_here) @@ -754,12 +784,22 @@ __wrap_main (int argc, char* argv[]) __mf_register (& errno, sizeof (errno), __MF_TYPE_STATIC, "errno area"); +#if !(defined(__sun__) && defined(__svr4__)) + /* Conflicts with the automatic registration of __iob[]. */ __mf_register (stdin, sizeof (*stdin), __MF_TYPE_STATIC, "stdin"); __mf_register (stdout, sizeof (*stdout), __MF_TYPE_STATIC, "stdout"); __mf_register (stderr, sizeof (*stderr), __MF_TYPE_STATIC, "stderr"); +#endif /* Make some effort to register ctype.h static arrays. */ - /* XXX: e.g., on Solaris, may need to register __ctype, _ctype, __ctype_mask, __toupper, etc. */ +#if defined(__sun__) && defined(__svr4__) + /* __ctype[] is declared without size, but MB_CUR_MAX is the last + member. There seems to be no proper way to determine the size. */ + __mf_register (__ctype, &MB_CUR_MAX - &__ctype[0] + 1, __MF_TYPE_STATIC, "__ctype"); + /* __ctype_mask points at _C_masks[1]. The size can only determined + using nm on libc.so.1. */ + __mf_register (__ctype_mask - 1, 1028, __MF_TYPE_STATIC, "_C_masks"); +#endif /* On modern Linux GLIBC, these are thread-specific and changeable, and are dealt with in mf-hooks2.c. */ } @@ -778,6 +818,12 @@ void __mf_fini () { TRACE ("__mf_fini\n"); __mfu_report (); + +#ifndef PIC +/* Since we didn't populate the tree for allocations in constructors + before __mf_init, we cannot check destructors after __mf_fini. */ + __mf_opts.mudflap_mode = mode_nop; +#endif } @@ -806,11 +852,13 @@ void __mfu_check (void *ptr, size_t sz, int type, const char *location) if (UNLIKELY (__mf_opts.sigusr1_report)) __mf_sigusr1_respond (); + if (UNLIKELY (__mf_opts.ignore_reads && type == 0)) + return; TRACE ("check ptr=%p b=%u size=%lu %s location=`%s'\n", ptr, entry_idx, (unsigned long)sz, (type == 0 ? "read" : "write"), location); - + switch (__mf_opts.mudflap_mode) { case mode_nop: @@ -833,7 +881,7 @@ void __mfu_check (void *ptr, size_t sz, int type, const char *location) case mode_check: { unsigned heuristics = 0; - + /* Advance aging/adaptation counters. */ static unsigned adapt_count; adapt_count ++; @@ -843,7 +891,7 @@ void __mfu_check (void *ptr, size_t sz, int type, const char *location) adapt_count = 0; __mf_adapt_cache (); } - + /* Looping only occurs if heuristics were triggered. */ while (judgement == 0) { @@ -868,7 +916,7 @@ void __mfu_check (void *ptr, size_t sz, int type, const char *location) assert (n == obj_count); dealloc_me = all_ovr_obj; } - else + else { all_ovr_obj = ovr_obj; dealloc_me = NULL; @@ -885,7 +933,7 @@ void __mfu_check (void *ptr, size_t sz, int type, const char *location) obj->write_count ++; obj->liveness ++; } - + /* Iterate over the various objects. There are a number of special cases. */ for (i = 0; i < obj_count; i++) { @@ -898,7 +946,7 @@ void __mfu_check (void *ptr, size_t sz, int type, const char *location) /* Any object with a watch flag is bad. */ if (UNLIKELY (obj->watching_p)) judgement = -2; /* trigger VIOL_WATCH */ - + /* A read from an uninitialized object is bad. */ if (UNLIKELY (__mf_opts.check_initialization /* reading */ @@ -910,12 +958,12 @@ void __mfu_check (void *ptr, size_t sz, int type, const char *location) judgement = -1; } - /* We now know that the access spans one or more valid objects. */ + /* We now know that the access spans no invalid objects. */ if (LIKELY (judgement >= 0)) for (i = 0; i < obj_count; i++) { __mf_object_t *obj = all_ovr_obj[i]; - + /* Is this access entirely contained within this object? */ if (LIKELY (ptr_low >= obj->low && ptr_high <= obj->high)) { @@ -924,12 +972,58 @@ void __mfu_check (void *ptr, size_t sz, int type, const char *location) entry->high = obj->high; judgement = 1; } + } - /* XXX: Access runs off left or right side of this - object. That could be okay, if there are - other objects that fill in all the holes. */ + /* This access runs off the end of one valid object. That + could be okay, if other valid objects fill in all the + holes. We allow this only for HEAP and GUESS type + objects. Accesses to STATIC and STACK variables + should not be allowed to span. */ + if (UNLIKELY ((judgement == 0) && (obj_count > 1))) + { + unsigned uncovered = 0; + for (i = 0; i < obj_count; i++) + { + __mf_object_t *obj = all_ovr_obj[i]; + int j, uncovered_low_p, uncovered_high_p; + uintptr_t ptr_lower, ptr_higher; + + uncovered_low_p = ptr_low < obj->low; + ptr_lower = CLAMPSUB (obj->low, 1); + uncovered_high_p = ptr_high > obj->high; + ptr_higher = CLAMPADD (obj->high, 1); + + for (j = 0; j < obj_count; j++) + { + __mf_object_t *obj2 = all_ovr_obj[j]; + + if (i == j) continue; + + /* Filter out objects that cannot be spanned across. */ + if (obj2->type == __MF_TYPE_STACK + || obj2->type == __MF_TYPE_STATIC) + continue; + + /* Consider a side "covered" if obj2 includes + the next byte on that side. */ + if (uncovered_low_p + && (ptr_lower >= obj2->low && ptr_lower <= obj2->high)) + uncovered_low_p = 0; + if (uncovered_high_p + && (ptr_high >= obj2->low && ptr_higher <= obj2->high)) + uncovered_high_p = 0; + } + + if (uncovered_low_p || uncovered_high_p) + uncovered ++; + } + + /* Success if no overlapping objects are uncovered. */ + if (uncovered == 0) + judgement = 1; } + if (dealloc_me != NULL) CALL_REAL (free, dealloc_me); @@ -955,23 +1049,23 @@ void __mfu_check (void *ptr, size_t sz, int type, const char *location) if (__mf_opts.collect_stats) { __mf_count_check ++; - + if (LIKELY (old_entry.low != entry->low || old_entry.high != entry->high)) /* && (old_entry.low != 0) && (old_entry.high != 0)) */ - __mf_lookup_cache_reusecount [entry_idx] ++; + __mf_lookup_cache_reusecount [entry_idx] ++; } - + if (UNLIKELY (judgement < 0)) __mf_violation (ptr, sz, (uintptr_t) __builtin_return_address (0), location, - ((judgement == -1) ? + ((judgement == -1) ? (type == __MF_CHECK_READ ? __MF_VIOL_READ : __MF_VIOL_WRITE) : __MF_VIOL_WATCH)); } static __mf_object_t * -__mf_insert_new_object (uintptr_t low, uintptr_t high, int type, +__mf_insert_new_object (uintptr_t low, uintptr_t high, int type, const char *name, uintptr_t pc) { DECLARE (void *, calloc, size_t c, size_t n); @@ -992,41 +1086,97 @@ __mf_insert_new_object (uintptr_t low, uintptr_t high, int type, #endif if (__mf_opts.backtrace > 0 && (type == __MF_TYPE_HEAP || type == __MF_TYPE_HEAP_I)) - new_obj->alloc_backtrace_size = + new_obj->alloc_backtrace_size = __mf_backtrace (& new_obj->alloc_backtrace, (void *) pc, 2); - + __mf_link_object (new_obj); return new_obj; } -static void +static void __mf_uncache_object (__mf_object_t *old_obj) { /* Remove any low/high pointers for this object from the lookup cache. */ - + /* Can it possibly exist in the cache? */ if (LIKELY (old_obj->read_count + old_obj->write_count)) { uintptr_t low = old_obj->low; uintptr_t high = old_obj->high; - unsigned idx_low = __MF_CACHE_INDEX (low); - unsigned idx_high = __MF_CACHE_INDEX (high); + struct __mf_cache *entry; unsigned i; - for (i = idx_low; i <= idx_high; i++) - { - struct __mf_cache *entry = & __mf_lookup_cache [i]; - /* NB: the "||" in the following test permits this code to - tolerate the situation introduced by __mf_check over - contiguous objects, where a cache entry spans several - objects. */ - if (entry->low == low || entry->high == high) + if ((high - low) >= (__mf_lc_mask << __mf_lc_shift)) + { + /* For large objects (>= cache size - 1) check the whole cache. */ + entry = & __mf_lookup_cache [0]; + for (i = 0; i <= __mf_lc_mask; i++, entry++) { - entry->low = MAXPTR; - entry->high = MINPTR; + /* NB: the "||" in the following test permits this code to + tolerate the situation introduced by __mf_check over + contiguous objects, where a cache entry spans several + objects. */ + if (entry->low == low || entry->high == high) + { + entry->low = MAXPTR; + entry->high = MINPTR; + } } } + else + { + /* Object is now smaller then cache size. */ + unsigned entry_low_idx = __MF_CACHE_INDEX (low); + unsigned entry_high_idx = __MF_CACHE_INDEX (high); + if (entry_low_idx <= entry_high_idx) + { + entry = & __mf_lookup_cache [entry_low_idx]; + for (i = entry_low_idx; i <= entry_high_idx; i++, entry++) + { + /* NB: the "||" in the following test permits this code to + tolerate the situation introduced by __mf_check over + contiguous objects, where a cache entry spans several + objects. */ + if (entry->low == low || entry->high == high) + { + entry->low = MAXPTR; + entry->high = MINPTR; + } + } + } + else + { + /* Object wrapped around the end of the cache. First search + from low to end of cache and then from 0 to high. */ + entry = & __mf_lookup_cache [entry_low_idx]; + for (i = entry_low_idx; i <= __mf_lc_mask; i++, entry++) + { + /* NB: the "||" in the following test permits this code to + tolerate the situation introduced by __mf_check over + contiguous objects, where a cache entry spans several + objects. */ + if (entry->low == low || entry->high == high) + { + entry->low = MAXPTR; + entry->high = MINPTR; + } + } + entry = & __mf_lookup_cache [0]; + for (i = 0; i <= entry_high_idx; i++, entry++) + { + /* NB: the "||" in the following test permits this code to + tolerate the situation introduced by __mf_check over + contiguous objects, where a cache entry spans several + objects. */ + if (entry->low == low || entry->high == high) + { + entry->low = MAXPTR; + entry->high = MINPTR; + } + } + } + } } } @@ -1045,14 +1195,14 @@ __mf_register (void *ptr, size_t sz, int type, const char *name) void __mfu_register (void *ptr, size_t sz, int type, const char *name) { - TRACE ("register ptr=%p size=%lu type=%x name='%s'\n", + TRACE ("register ptr=%p size=%lu type=%x name='%s'\n", ptr, (unsigned long) sz, type, name ? name : ""); if (__mf_opts.collect_stats) { __mf_count_register ++; __mf_total_register_size [(type < 0) ? 0 : - (type > __MF_TYPE_MAX) ? 0 : + (type > __MF_TYPE_MAX) ? 0 : type] += sz; } @@ -1063,7 +1213,7 @@ __mfu_register (void *ptr, size_t sz, int type, const char *name) { case mode_nop: break; - + case mode_violate: __mf_violation (ptr, sz, (uintptr_t) __builtin_return_address (0), NULL, __MF_VIOL_REGISTER); @@ -1085,7 +1235,7 @@ __mfu_register (void *ptr, size_t sz, int type, const char *name) uintptr_t low = (uintptr_t) ptr; uintptr_t high = CLAMPSZ (ptr, sz); uintptr_t pc = (uintptr_t) __builtin_return_address (0); - + /* Treat unknown size indication as 1. */ if (UNLIKELY (sz == 0)) sz = 1; @@ -1098,7 +1248,7 @@ __mfu_register (void *ptr, size_t sz, int type, const char *name) if (UNLIKELY (num_overlapping_objs > 0)) { __mf_object_t *ovr_obj = ovr_objs[0]; - + /* Accept certain specific duplication pairs. */ if (((type == __MF_TYPE_STATIC) || (type == __MF_TYPE_GUESS)) && ovr_obj->low == low @@ -1107,8 +1257,8 @@ __mfu_register (void *ptr, size_t sz, int type, const char *name) { /* Duplicate registration for static objects may come from distinct compilation units. */ - VERBOSE_TRACE ("harmless duplicate reg %p-%p `%s'\n", - (void *) low, (void *) high, + VERBOSE_TRACE ("harmless duplicate reg %p-%p `%s'\n", + (void *) low, (void *) high, (ovr_obj->name ? ovr_obj->name : "")); break; } @@ -1124,7 +1274,7 @@ __mfu_register (void *ptr, size_t sz, int type, const char *name) } else /* No overlapping objects: AOK. */ __mf_insert_new_object (low, high, type, name, pc); - + /* We could conceivably call __mf_check() here to prime the cache, but then the read_count/write_count field is not reliable. */ break; @@ -1155,7 +1305,7 @@ __mfu_unregister (void *ptr, size_t sz, int type) TRACE ("unregister ptr=%p size=%lu type=%x\n", ptr, (unsigned long) sz, type); switch (__mf_opts.mudflap_mode) - { + { case mode_nop: break; @@ -1209,18 +1359,17 @@ __mfu_unregister (void *ptr, size_t sz, int type) /* Wipe buffer contents if desired. */ if ((__mf_opts.wipe_stack && old_obj->type == __MF_TYPE_STACK) - || (__mf_opts.wipe_heap && (old_obj->type == __MF_TYPE_HEAP + || (__mf_opts.wipe_heap && (old_obj->type == __MF_TYPE_HEAP || old_obj->type == __MF_TYPE_HEAP_I))) { memset ((void *) old_obj->low, 0, (size_t) (old_obj->high - old_obj->low + 1)); } - + /* Manage the object cemetary. */ - if (__mf_opts.persistent_count > 0 && - old_obj->type >= 0 && - old_obj->type <= __MF_TYPE_MAX_CEM) + if (__mf_opts.persistent_count > 0 + && (unsigned) old_obj->type <= __MF_TYPE_MAX_CEM) { old_obj->deallocated_p = 1; old_obj->dealloc_pc = (uintptr_t) __builtin_return_address (0); @@ -1233,7 +1382,7 @@ __mfu_unregister (void *ptr, size_t sz, int type) #endif if (__mf_opts.backtrace > 0 && old_obj->type == __MF_TYPE_HEAP) - old_obj->dealloc_backtrace_size = + old_obj->dealloc_backtrace_size = __mf_backtrace (& old_obj->dealloc_backtrace, NULL, 2); @@ -1245,7 +1394,7 @@ __mfu_unregister (void *ptr, size_t sz, int type) { unsigned row = old_obj->type; unsigned plot = __mf_object_dead_head [row]; - + del_obj = __mf_object_cemetary [row][plot]; __mf_object_cemetary [row][plot] = old_obj; plot ++; @@ -1255,20 +1404,25 @@ __mfu_unregister (void *ptr, size_t sz, int type) } else del_obj = old_obj; - + if (__mf_opts.print_leaks) { if ((old_obj->read_count + old_obj->write_count) == 0 && - (old_obj->type == __MF_TYPE_HEAP + (old_obj->type == __MF_TYPE_HEAP || old_obj->type == __MF_TYPE_HEAP_I)) { - fprintf (stderr, + /* The problem with a warning message here is that we may not + be privy to accesses to such objects that occur within + uninstrumented libraries. */ +#if 0 + fprintf (stderr, "*******\n" "mudflap warning: unaccessed registered object:\n"); __mf_describe_object (old_obj); +#endif } } - + if (del_obj != NULL) /* May or may not equal old_obj. */ { if (__mf_opts.backtrace > 0) @@ -1281,7 +1435,7 @@ __mfu_unregister (void *ptr, size_t sz, int type) } CALL_REAL(free, del_obj); } - + break; } } /* end switch (__mf_opts.mudflap_mode) */ @@ -1315,7 +1469,7 @@ __mf_adapt_cache_fn (mfsplay_tree_node n, void *param) struct tree_stats *s = (struct tree_stats *) param; assert (obj != NULL && s != NULL); - + /* Exclude never-accessed objects. */ if (obj->read_count + obj->write_count) { @@ -1394,7 +1548,7 @@ __mf_adapt_cache () break; } if (smoothed_new_shift < 0) smoothed_new_shift = __mf_lc_shift; - /* Converge toward this slowly to reduce flapping. */ + /* Converge toward this slowly to reduce flapping. */ smoothed_new_shift = 0.9*smoothed_new_shift + 0.1*i; new_shift = (unsigned) (smoothed_new_shift + 0.5); assert (new_shift < sizeof (uintptr_t)*8); @@ -1406,7 +1560,7 @@ __mf_adapt_cache () cache_utilization += 1.0; cache_utilization /= (1 + __mf_lc_mask); - new_mask |= 0x3ff; /* XXX: force a large cache. */ + new_mask |= 0xffff; /* XXX: force a large cache. */ new_mask &= (LOOKUP_CACHE_SIZE_MAX - 1); VERBOSE_TRACE ("adapt cache obj=%u/%u sizes=%lu/%.0f/%.0f => " @@ -1435,8 +1589,8 @@ __mf_adapt_cache () max_objs of their pointers in objs[]. Return total count of overlaps (may exceed max_objs). */ -unsigned -__mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high, +unsigned +__mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high, __mf_object_t **objs, unsigned max_objs, int type) { unsigned count = 0; @@ -1462,14 +1616,14 @@ __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high, while (1) { __mf_object_t *obj; - + n = (direction == 0 ? mfsplay_tree_successor (t, k) : mfsplay_tree_predecessor (t, k)); if (n == NULL) break; obj = (__mf_object_t *) n->value; - + if (! (obj->low <= ptr_high && obj->high >= ptr_low)) /* No overlap? */ break; - + if (count < max_objs) objs[count] = (__mf_object_t *) n->value; count ++; @@ -1544,31 +1698,31 @@ __mf_find_dead_objects (uintptr_t low, uintptr_t high, unsigned count = 0; unsigned recollection = 0; unsigned row = 0; - + assert (low <= high); assert (max_objs == 0 || objs != NULL); - + /* Widen the search from the most recent plots in each row, looking backward in time. */ recollection = 0; while (recollection < __mf_opts.persistent_count) { count = 0; - + for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++) { unsigned plot; unsigned i; - + plot = __mf_object_dead_head [row]; for (i = 0; i <= recollection; i ++) { __mf_object_t *obj; - + /* Look backward through row: it's a circular buffer. */ if (plot > 0) plot --; else plot = __mf_opts.persistent_count - 1; - + obj = __mf_object_cemetary [row][plot]; if (obj && obj->low <= high && obj->high >= low) { @@ -1579,14 +1733,14 @@ __mf_find_dead_objects (uintptr_t low, uintptr_t high, } } } - + if (count) break; - + /* Look farther back in time. */ recollection = (recollection * 2) + 1; } - + return count; } else { return 0; @@ -1608,7 +1762,8 @@ __mf_describe_object (__mf_object_t *obj) if (__mf_opts.abbreviate && obj->description_epoch == epoch) { fprintf (stderr, - "mudflap object %p: name=`%s'\n", + "mudflap %sobject %p: name=`%s'\n", + (obj->deallocated_p ? "dead " : ""), (void *) obj, (obj->name ? obj->name : "")); return; } @@ -1616,14 +1771,15 @@ __mf_describe_object (__mf_object_t *obj) obj->description_epoch = epoch; fprintf (stderr, - "mudflap object %p: name=`%s'\n" + "mudflap %sobject %p: name=`%s'\n" "bounds=[%p,%p] size=%lu area=%s check=%ur/%uw liveness=%u%s\n" "alloc time=%lu.%06lu pc=%p" #ifdef LIBMUDFLAPTH " thread=%u" #endif "\n", - (void *) obj, (obj->name ? obj->name : ""), + (obj->deallocated_p ? "dead " : ""), + (void *) obj, (obj->name ? obj->name : ""), (void *) obj->low, (void *) obj->high, (unsigned long) (obj->high - obj->low + 1), (obj->type == __MF_TYPE_NOACCESS ? "no-access" : @@ -1633,9 +1789,9 @@ __mf_describe_object (__mf_object_t *obj) obj->type == __MF_TYPE_STATIC ? "static" : obj->type == __MF_TYPE_GUESS ? "guess" : "unknown"), - obj->read_count, obj->write_count, obj->liveness, + obj->read_count, obj->write_count, obj->liveness, obj->watching_p ? " watching" : "", - obj->alloc_time.tv_sec, obj->alloc_time.tv_usec, + obj->alloc_time.tv_sec, obj->alloc_time.tv_usec, (void *) obj->alloc_pc #ifdef LIBMUDFLAPTH , (unsigned) obj->alloc_thread @@ -1658,7 +1814,7 @@ __mf_describe_object (__mf_object_t *obj) " thread=%u" #endif "\n", - obj->dealloc_time.tv_sec, obj->dealloc_time.tv_usec, + obj->dealloc_time.tv_sec, obj->dealloc_time.tv_usec, (void *) obj->dealloc_pc #ifdef LIBMUDFLAPTH , (unsigned) obj->dealloc_thread @@ -1792,6 +1948,14 @@ __mfu_report () /* Free up any remaining alloca()'d blocks. */ __mf_wrap_alloca_indirect (0); +#ifdef HAVE___LIBC_FREERES + if (__mf_opts.call_libc_freeres) + { + extern void __libc_freeres (void); + __libc_freeres (); + } +#endif + __mf_describe_object (NULL); /* Reset description epoch. */ l = __mf_report_leaks (); fprintf (stderr, "number of leaked objects: %u\n", l); @@ -1847,7 +2011,7 @@ __mf_backtrace (char ***symbols, void *guess_pc, unsigned guess_omit_levels) ends up containing a non-NULL guess_pc, then trim everything before that. Otherwise, omit the first guess_omit_levels entries. */ - + if (guess_pc != NULL) for (i=0; i __mf_sigusr1_handled) { __mf_sigusr1_handled ++; - assert (__mf_state == reentrant); + assert (__mf_get_state () == reentrant); __mfu_report (); handler_installed = 0; /* We may need to re-enable signal; this might be a SysV library. */ } @@ -2178,7 +2342,7 @@ write_itoa (int fd, unsigned n) unsigned digit = n % 10; buf[bufsize-2-i] = digit + '0'; n /= 10; - if (n == 0) + if (n == 0) { char *m = & buf [bufsize-2-i]; buf[bufsize-1] = '\0'; @@ -2196,7 +2360,7 @@ __assert_fail (const char *msg, const char *file, unsigned line, const char *fun write2("mf"); #ifdef LIBMUDFLAPTH write2("("); - write_itoa (2, (unsigned) pthread_self ()); + write_itoa (2, (unsigned) pthread_self ()); write2(")"); #endif write2(": assertion failure: `"); @@ -2241,22 +2405,6 @@ static mfsplay_tree_node mfsplay_tree_splay_helper (mfsplay_tree, mfsplay_tree_node *, mfsplay_tree_node *, mfsplay_tree_node *); -static void *mfsplay_tree_xmalloc (size_t size); -static void mfsplay_tree_free (void *object); - - - -/* Inline comparison function specialized for libmudflap's key type. */ -static inline int -compare_uintptr_t (mfsplay_tree_key k1, mfsplay_tree_key k2) -{ - if ((uintptr_t) k1 < (uintptr_t) k2) - return -1; - else if ((uintptr_t) k1 > (uintptr_t) k2) - return 1; - else - return 0; -} /* Help splay SP around KEY. PARENT and GRANDPARENT are the parent @@ -2278,7 +2426,7 @@ mfsplay_tree_splay_helper (mfsplay_tree sp, if (!n) return *parent; - comparison = compare_uintptr_t (key, n->key); + comparison = ((key > n->key) ? 1 : ((key < n->key) ? -1 : 0)); if (comparison == 0) /* We've found the target. */ @@ -2448,7 +2596,7 @@ mfsplay_tree_splay (mfsplay_tree sp, mfsplay_tree_key key) /* If we just splayed the tree with the same key, do nothing. */ if (sp->last_splayed_key_p && - compare_uintptr_t (sp->last_splayed_key, key) == 0) + (sp->last_splayed_key == key)) return; /* Compute a maximum recursion depth for a splay tree with NUM nodes. @@ -2456,9 +2604,9 @@ mfsplay_tree_splay (mfsplay_tree sp, mfsplay_tree_key key) degenerate access patterns. Unfortunately such patterns can occur e.g. during static initialization, where many static objects might be registered in increasing address sequence, or during a case where - large tree-like heap data structures are allocated quickly. + large tree-like heap data structures are allocated quickly. - On x86, this corresponds to roughly 200K of stack usage. + On x86, this corresponds to roughly 200K of stack usage. XXX: For libmudflapth, this could be a function of __mf_opts.thread_stack. */ sp->max_depth = 2500; sp->rebalance_p = sp->depth = 0; @@ -2508,7 +2656,8 @@ mfsplay_tree_insert (mfsplay_tree sp, mfsplay_tree_key key, mfsplay_tree_value v mfsplay_tree_splay (sp, key); if (sp->root) - comparison = compare_uintptr_t (sp->root->key, key); + comparison = ((sp->root->key > key) ? 1 : + ((sp->root->key < key) ? -1 : 0)); if (sp->root && comparison == 0) { @@ -2554,7 +2703,7 @@ mfsplay_tree_remove (mfsplay_tree sp, mfsplay_tree_key key) { mfsplay_tree_splay (sp, key); sp->last_splayed_key_p = 0; - if (sp->root && compare_uintptr_t (sp->root->key, key) == 0) + if (sp->root && (sp->root->key == key)) { mfsplay_tree_node left, right; left = sp->root->left; @@ -2567,7 +2716,7 @@ mfsplay_tree_remove (mfsplay_tree sp, mfsplay_tree_key key) if (left) { sp->root = left; - /* If there was a right child as well, hang it off the + /* If there was a right child as well, hang it off the right-most leaf of the left child. */ if (right) { @@ -2581,14 +2730,14 @@ mfsplay_tree_remove (mfsplay_tree sp, mfsplay_tree_key key) } } -/* Lookup KEY in SP, returning VALUE if present, and NULL +/* Lookup KEY in SP, returning VALUE if present, and NULL otherwise. */ static mfsplay_tree_node mfsplay_tree_lookup (mfsplay_tree sp, mfsplay_tree_key key) { mfsplay_tree_splay (sp, key); - if (sp->root && compare_uintptr_t (sp->root->key, key) == 0) + if (sp->root && (sp->root->key == key)) return sp->root; else return 0; @@ -2609,7 +2758,9 @@ mfsplay_tree_predecessor (mfsplay_tree sp, mfsplay_tree_key key) /* Splay the tree around KEY. That will leave either the KEY itself, its predecessor, or its successor at the root. */ mfsplay_tree_splay (sp, key); - comparison = compare_uintptr_t (sp->root->key, key); + comparison = ((sp->root->key > key) ? 1 : + ((sp->root->key < key) ? -1 : 0)); + /* If the predecessor is at the root, just return it. */ if (comparison < 0) return sp->root; @@ -2635,7 +2786,8 @@ mfsplay_tree_successor (mfsplay_tree sp, mfsplay_tree_key key) /* Splay the tree around KEY. That will leave either the KEY itself, its predecessor, or its successor at the root. */ mfsplay_tree_splay (sp, key); - comparison = compare_uintptr_t (sp->root->key, key); + comparison = ((sp->root->key > key) ? 1 : + ((sp->root->key < key) ? -1 : 0)); /* If the successor is at the root, just return it. */ if (comparison > 0) return sp->root; @@ -2651,7 +2803,7 @@ mfsplay_tree_successor (mfsplay_tree sp, mfsplay_tree_key key) in-order traversal. If FN every returns a non-zero value, the iteration ceases immediately, and the value is returned. Otherwise, this function returns 0. - + This function simulates recursion using dynamically allocated arrays, since it may be called from mfsplay_tree_rebalance(), which in turn means that the tree is already uncomfortably deep for stack