2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
17 # include "private/gc_priv.h"
19 # if defined(LINUX) && !defined(POWERPC)
20 # include <linux/version.h>
21 # if (LINUX_VERSION_CODE <= 0x10400)
22 /* Ugly hack to get struct sigcontext_struct definition. Required */
23 /* for some early 1.3.X releases. Will hopefully go away soon. */
24 /* in some later Linux releases, asm/sigcontext.h may have to */
25 /* be included instead. */
27 # include <asm/signal.h>
30 /* Kernels prior to 2.1.1 defined struct sigcontext_struct instead of */
31 /* struct sigcontext. libc6 (glibc2) uses "struct sigcontext" in */
32 /* prototypes, so we have to include the top-level sigcontext.h to */
33 /* make sure the former gets defined to be the latter if appropriate. */
34 # include <features.h>
36 # if 2 == __GLIBC__ && 0 == __GLIBC_MINOR__
37 /* glibc 2.1 no longer has sigcontext.h. But signal.h */
38 /* has the right declaration for glibc 2.1. */
39 # include <sigcontext.h>
40 # endif /* 0 == __GLIBC_MINOR__ */
41 # else /* not 2 <= __GLIBC__ */
42 /* libc5 doesn't have <sigcontext.h>: go directly with the kernel */
43 /* one. Check LINUX_VERSION_CODE to see which we should reference. */
44 # include <asm/sigcontext.h>
45 # endif /* 2 <= __GLIBC__ */
48 # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) && !defined(MACOS) \
50 # include <sys/types.h>
51 # if !defined(MSWIN32) && !defined(SUNOS4)
58 # define SIGSEGV 0 /* value is irrelevant */
63 #if defined(LINUX) || defined(LINUX_STACKBOTTOM)
67 /* Blatantly OS dependent routines, except for those that are related */
68 /* to dynamic loading. */
70 # if defined(HEURISTIC2) || defined(SEARCH_FOR_DATA_START)
71 # define NEED_FIND_LIMIT
74 # if !defined(STACKBOTTOM) && defined(HEURISTIC2)
75 # define NEED_FIND_LIMIT
78 # if (defined(SUNOS4) && defined(DYNAMIC_LOADING)) && !defined(PCR)
79 # define NEED_FIND_LIMIT
82 # if (defined(SVR4) || defined(AUX) || defined(DGUX) \
83 || (defined(LINUX) && defined(SPARC))) && !defined(PCR)
84 # define NEED_FIND_LIMIT
87 #if defined(FREEBSD) && (defined(I386) || defined(X86_64) || defined(powerpc) || defined(__powerpc__))
88 # include <machine/trap.h>
90 # define NEED_FIND_LIMIT
94 #if (defined(NETBSD) || defined(OPENBSD)) && defined(__ELF__) \
95 && !defined(NEED_FIND_LIMIT)
96 /* Used by GC_init_netbsd_elf() below. */
97 # define NEED_FIND_LIMIT
100 #ifdef NEED_FIND_LIMIT
105 # define GC_AMIGA_DEF
106 # include "AmigaOS.c"
110 #if defined(MSWIN32) || defined(MSWINCE)
111 # define WIN32_LEAN_AND_MEAN
113 # include <windows.h>
117 # include <Processes.h>
121 # include <sys/uio.h>
122 # include <malloc.h> /* for locking */
124 #if defined(USE_MMAP) || defined(USE_MUNMAP)
126 --> USE_MUNMAP requires USE_MMAP
128 # include <sys/types.h>
129 # include <sys/mman.h>
130 # include <sys/stat.h>
136 # if defined(SUNOS5SIGS) && !defined(FREEBSD)
137 # include <sys/siginfo.h>
139 /* Define SETJMP and friends to be the version that restores */
140 /* the signal mask. */
141 # define SETJMP(env) sigsetjmp(env, 1)
142 # define LONGJMP(env, val) siglongjmp(env, val)
143 # define JMP_BUF sigjmp_buf
145 # define SETJMP(env) setjmp(env)
146 # define LONGJMP(env, val) longjmp(env, val)
147 # define JMP_BUF jmp_buf
151 /* for get_etext and friends */
152 #include <mach-o/getsect.h>
156 /* Apparently necessary for djgpp 2.01. May cause problems with */
157 /* other versions. */
158 typedef long unsigned int caddr_t;
162 # include "il/PCR_IL.h"
163 # include "th/PCR_ThCtl.h"
164 # include "mm/PCR_MM.h"
167 #if !defined(NO_EXECUTE_PERMISSION)
168 # define OPT_PROT_EXEC PROT_EXEC
170 # define OPT_PROT_EXEC 0
173 #if defined(LINUX) && \
174 (defined(USE_PROC_FOR_LIBRARIES) || defined(IA64) || !defined(SMALL_CONFIG))
176 /* We need to parse /proc/self/maps, either to find dynamic libraries, */
177 /* and/or to find the register backing store base (IA64). Do it once */
182 /* Repeatedly perform a read call until the buffer is filled or */
183 /* we encounter EOF. */
184 ssize_t GC_repeat_read(int fd, char *buf, size_t count)
186 ssize_t num_read = 0;
189 while (num_read < count) {
190 result = READ(fd, buf + num_read, count - num_read);
191 if (result < 0) return result;
192 if (result == 0) break;
199 * Apply fn to a buffer containing the contents of /proc/self/maps.
200 * Return the result of fn or, if we failed, 0.
201 * We currently do nothing to /proc/self/maps other than simply read
202 * it. This code could be simplified if we could determine its size
206 word GC_apply_to_maps(word (*fn)(char *))
210 size_t maps_size = 4000; /* Initial guess. */
211 static char init_buf[1];
212 static char *maps_buf = init_buf;
213 static size_t maps_buf_sz = 1;
215 /* Read /proc/self/maps, growing maps_buf as necessary. */
216 /* Note that we may not allocate conventionally, and */
217 /* thus can't use stdio. */
219 if (maps_size >= maps_buf_sz) {
220 /* Grow only by powers of 2, since we leak "too small" buffers. */
221 while (maps_size >= maps_buf_sz) maps_buf_sz *= 2;
222 maps_buf = GC_scratch_alloc(maps_buf_sz);
223 if (maps_buf == 0) return 0;
225 f = open("/proc/self/maps", O_RDONLY);
226 if (-1 == f) return 0;
229 result = GC_repeat_read(f, maps_buf, maps_buf_sz-1);
230 if (result <= 0) return 0;
232 } while (result == maps_buf_sz-1);
234 } while (maps_size >= maps_buf_sz);
235 maps_buf[maps_size] = '\0';
237 /* Apply fn to result. */
241 #endif /* Need GC_apply_to_maps */
243 #if defined(LINUX) && (defined(USE_PROC_FOR_LIBRARIES) || defined(IA64))
245 // GC_parse_map_entry parses an entry from /proc/self/maps so we can
246 // locate all writable data segments that belong to shared libraries.
247 // The format of one of these entries and the fields we care about
249 // XXXXXXXX-XXXXXXXX r-xp 00000000 30:05 260537 name of mapping...\n
250 // ^^^^^^^^ ^^^^^^^^ ^^^^ ^^
251 // start end prot maj_dev
253 // Note that since about auguat 2003 kernels, the columns no longer have
254 // fixed offsets on 64-bit kernels. Hence we no longer rely on fixed offsets
255 // anywhere, which is safer anyway.
259 * Assign various fields of the first line in buf_ptr to *start, *end,
260 * *prot_buf and *maj_dev. Only *prot_buf may be set for unwritable maps.
262 char *GC_parse_map_entry(char *buf_ptr, word *start, word *end,
263 char *prot_buf, unsigned int *maj_dev)
265 char *start_start, *end_start, *prot_start, *maj_dev_start;
269 if (buf_ptr == NULL || *buf_ptr == '\0') {
274 while (isspace(*p)) ++p;
276 GC_ASSERT(isxdigit(*start_start));
277 *start = strtoul(start_start, &endp, 16); p = endp;
282 GC_ASSERT(isxdigit(*end_start));
283 *end = strtoul(end_start, &endp, 16); p = endp;
284 GC_ASSERT(isspace(*p));
286 while (isspace(*p)) ++p;
288 GC_ASSERT(*prot_start == 'r' || *prot_start == '-');
289 memcpy(prot_buf, prot_start, 4);
291 if (prot_buf[1] == 'w') {/* we can skip the rest if it's not writable. */
292 /* Skip past protection field to offset field */
293 while (!isspace(*p)) ++p; while (isspace(*p)) ++p;
294 GC_ASSERT(isxdigit(*p));
295 /* Skip past offset field, which we ignore */
296 while (!isspace(*p)) ++p; while (isspace(*p)) ++p;
298 GC_ASSERT(isxdigit(*maj_dev_start));
299 *maj_dev = strtoul(maj_dev_start, NULL, 16);
302 while (*p && *p++ != '\n');
307 #endif /* Need to parse /proc/self/maps. */
309 #if defined(SEARCH_FOR_DATA_START)
310 /* The I386 case can be handled without a search. The Alpha case */
311 /* used to be handled differently as well, but the rules changed */
312 /* for recent Linux versions. This seems to be the easiest way to */
313 /* cover all versions. */
316 /* Some Linux distributions arrange to define __data_start. Some */
317 /* define data_start as a weak symbol. The latter is technically */
318 /* broken, since the user program may define data_start, in which */
319 /* case we lose. Nonetheless, we try both, prefering __data_start. */
320 /* We assume gcc-compatible pragmas. */
321 # pragma weak __data_start
322 extern int __data_start[];
323 # pragma weak data_start
324 extern int data_start[];
330 void GC_init_linux_data_start()
332 extern ptr_t GC_find_limit();
335 /* Try the easy approaches first: */
336 if ((ptr_t)__data_start != 0) {
337 GC_data_start = (ptr_t)(__data_start);
340 if ((ptr_t)data_start != 0) {
341 GC_data_start = (ptr_t)(data_start);
345 GC_data_start = GC_find_limit((ptr_t)(_end), FALSE);
351 # ifndef ECOS_GC_MEMORY_SIZE
352 # define ECOS_GC_MEMORY_SIZE (448 * 1024)
353 # endif /* ECOS_GC_MEMORY_SIZE */
355 // setjmp() function, as described in ANSI para 7.6.1.1
357 #define SETJMP( __env__ ) hal_setjmp( __env__ )
359 // FIXME: This is a simple way of allocating memory which is
360 // compatible with ECOS early releases. Later releases use a more
361 // sophisticated means of allocating memory than this simple static
362 // allocator, but this method is at least bound to work.
363 static char memory[ECOS_GC_MEMORY_SIZE];
364 static char *brk = memory;
366 static void *tiny_sbrk(ptrdiff_t increment)
372 if (brk > memory + sizeof memory)
380 #define sbrk tiny_sbrk
383 #if (defined(NETBSD) || defined(OPENBSD)) && defined(__ELF__)
386 void GC_init_netbsd_elf()
388 extern ptr_t GC_find_limit();
389 extern char **environ;
390 /* This may need to be environ, without the underscore, for */
392 GC_data_start = GC_find_limit((ptr_t)&environ, FALSE);
400 # if !defined(__IBMC__) && !defined(__WATCOMC__) /* e.g. EMX */
403 unsigned short magic_number;
404 unsigned short padding[29];
408 #define E_MAGIC(x) (x).magic_number
409 #define EMAGIC 0x5A4D
410 #define E_LFANEW(x) (x).new_exe_offset
413 unsigned char magic_number[2];
414 unsigned char byte_order;
415 unsigned char word_order;
416 unsigned long exe_format_level;
419 unsigned long padding1[13];
420 unsigned long object_table_offset;
421 unsigned long object_count;
422 unsigned long padding2[31];
425 #define E32_MAGIC1(x) (x).magic_number[0]
426 #define E32MAGIC1 'L'
427 #define E32_MAGIC2(x) (x).magic_number[1]
428 #define E32MAGIC2 'X'
429 #define E32_BORDER(x) (x).byte_order
431 #define E32_WORDER(x) (x).word_order
433 #define E32_CPU(x) (x).cpu
435 #define E32_OBJTAB(x) (x).object_table_offset
436 #define E32_OBJCNT(x) (x).object_count
442 unsigned long pagemap;
443 unsigned long mapsize;
444 unsigned long reserved;
447 #define O32_FLAGS(x) (x).flags
448 #define OBJREAD 0x0001L
449 #define OBJWRITE 0x0002L
450 #define OBJINVALID 0x0080L
451 #define O32_SIZE(x) (x).size
452 #define O32_BASE(x) (x).base
454 # else /* IBM's compiler */
456 /* A kludge to get around what appears to be a header file bug */
458 # define WORD unsigned short
461 # define DWORD unsigned long
468 # endif /* __IBMC__ */
470 # define INCL_DOSEXCEPTIONS
471 # define INCL_DOSPROCESS
472 # define INCL_DOSERRORS
473 # define INCL_DOSMODULEMGR
474 # define INCL_DOSMEMMGR
478 /* Disable and enable signals during nontrivial allocations */
480 void GC_disable_signals(void)
484 DosEnterMustComplete(&nest);
485 if (nest != 1) ABORT("nested GC_disable_signals");
488 void GC_enable_signals(void)
492 DosExitMustComplete(&nest);
493 if (nest != 0) ABORT("GC_enable_signals");
499 # if !defined(PCR) && !defined(AMIGA) && !defined(MSWIN32) \
500 && !defined(MSWINCE) \
501 && !defined(MACOS) && !defined(DJGPP) && !defined(DOS4GW) \
502 && !defined(NOSYS) && !defined(ECOS)
504 # if defined(sigmask) && !defined(UTS4) && !defined(HURD)
505 /* Use the traditional BSD interface */
506 # define SIGSET_T int
507 # define SIG_DEL(set, signal) (set) &= ~(sigmask(signal))
508 # define SIG_FILL(set) (set) = 0x7fffffff
509 /* Setting the leading bit appears to provoke a bug in some */
510 /* longjmp implementations. Most systems appear not to have */
512 # define SIGSETMASK(old, new) (old) = sigsetmask(new)
514 /* Use POSIX/SYSV interface */
515 # define SIGSET_T sigset_t
516 # define SIG_DEL(set, signal) sigdelset(&(set), (signal))
517 # define SIG_FILL(set) sigfillset(&set)
518 # define SIGSETMASK(old, new) sigprocmask(SIG_SETMASK, &(new), &(old))
521 static GC_bool mask_initialized = FALSE;
523 static SIGSET_T new_mask;
525 static SIGSET_T old_mask;
527 static SIGSET_T dummy;
529 #if defined(PRINTSTATS) && !defined(THREADS)
530 # define CHECK_SIGNALS
531 int GC_sig_disabled = 0;
534 void GC_disable_signals()
536 if (!mask_initialized) {
539 SIG_DEL(new_mask, SIGSEGV);
540 SIG_DEL(new_mask, SIGILL);
541 SIG_DEL(new_mask, SIGQUIT);
543 SIG_DEL(new_mask, SIGBUS);
546 SIG_DEL(new_mask, SIGIOT);
549 SIG_DEL(new_mask, SIGEMT);
552 SIG_DEL(new_mask, SIGTRAP);
554 mask_initialized = TRUE;
556 # ifdef CHECK_SIGNALS
557 if (GC_sig_disabled != 0) ABORT("Nested disables");
560 SIGSETMASK(old_mask,new_mask);
563 void GC_enable_signals()
565 # ifdef CHECK_SIGNALS
566 if (GC_sig_disabled != 1) ABORT("Unmatched enable");
569 SIGSETMASK(dummy,old_mask);
576 /* Ivan Demakov: simplest way (to me) */
578 void GC_disable_signals() { }
579 void GC_enable_signals() { }
582 /* Find the page size */
585 # if defined(MSWIN32) || defined(MSWINCE)
586 void GC_setpagesize()
588 GetSystemInfo(&GC_sysinfo);
589 GC_page_size = GC_sysinfo.dwPageSize;
593 # if defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(USE_MMAP) \
594 || defined(USE_MUNMAP)
595 void GC_setpagesize()
597 GC_page_size = GETPAGESIZE();
600 /* It's acceptable to fake it. */
601 void GC_setpagesize()
603 GC_page_size = HBLKSIZE;
609 * Find the base of the stack.
610 * Used only in single-threaded environment.
611 * With threads, GC_mark_roots needs to know how to do this.
612 * Called with allocator lock held.
614 # if defined(MSWIN32) || defined(MSWINCE)
615 # define is_writable(prot) ((prot) == PAGE_READWRITE \
616 || (prot) == PAGE_WRITECOPY \
617 || (prot) == PAGE_EXECUTE_READWRITE \
618 || (prot) == PAGE_EXECUTE_WRITECOPY)
619 /* Return the number of bytes that are writable starting at p. */
620 /* The pointer p is assumed to be page aligned. */
621 /* If base is not 0, *base becomes the beginning of the */
622 /* allocation region containing p. */
623 word GC_get_writable_length(ptr_t p, ptr_t *base)
625 MEMORY_BASIC_INFORMATION buf;
629 result = VirtualQuery(p, &buf, sizeof(buf));
630 if (result != sizeof(buf)) ABORT("Weird VirtualQuery result");
631 if (base != 0) *base = (ptr_t)(buf.AllocationBase);
632 protect = (buf.Protect & ~(PAGE_GUARD | PAGE_NOCACHE));
633 if (!is_writable(protect)) {
636 if (buf.State != MEM_COMMIT) return(0);
637 return(buf.RegionSize);
640 ptr_t GC_get_stack_base()
643 ptr_t sp = (ptr_t)(&dummy);
644 ptr_t trunc_sp = (ptr_t)((word)sp & ~(GC_page_size - 1));
645 word size = GC_get_writable_length(trunc_sp, 0);
647 return(trunc_sp + size);
651 # endif /* MS Windows */
654 # include <kernel/OS.h>
655 ptr_t GC_get_stack_base(){
657 get_thread_info(find_thread(NULL),&th);
665 ptr_t GC_get_stack_base()
670 if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
671 GC_err_printf0("DosGetInfoBlocks failed\n");
672 ABORT("DosGetInfoBlocks failed\n");
674 return((ptr_t)(ptib -> tib_pstacklimit));
681 # include "AmigaOS.c"
685 # if defined(NEED_FIND_LIMIT) || defined(UNIX_LIKE)
688 typedef void (*handler)(int);
690 typedef void (*handler)();
693 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1) \
694 || defined(HURD) || defined(NETBSD)
695 static struct sigaction old_segv_act;
696 # if defined(IRIX5) || defined(HPUX) \
697 || defined(HURD) || defined(NETBSD)
698 static struct sigaction old_bus_act;
701 static handler old_segv_handler, old_bus_handler;
705 void GC_set_and_save_fault_handler(handler h)
707 void GC_set_and_save_fault_handler(h)
711 # if defined(SUNOS5SIGS) || defined(IRIX5) \
712 || defined(OSF1) || defined(HURD) || defined(NETBSD)
713 struct sigaction act;
716 # if 0 /* Was necessary for Solaris 2.3 and very temporary */
718 act.sa_flags = SA_RESTART | SA_NODEFER;
720 act.sa_flags = SA_RESTART;
723 (void) sigemptyset(&act.sa_mask);
724 # ifdef GC_IRIX_THREADS
725 /* Older versions have a bug related to retrieving and */
726 /* and setting a handler at the same time. */
727 (void) sigaction(SIGSEGV, 0, &old_segv_act);
728 (void) sigaction(SIGSEGV, &act, 0);
729 (void) sigaction(SIGBUS, 0, &old_bus_act);
730 (void) sigaction(SIGBUS, &act, 0);
732 (void) sigaction(SIGSEGV, &act, &old_segv_act);
733 # if defined(IRIX5) \
734 || defined(HPUX) || defined(HURD) || defined(NETBSD)
735 /* Under Irix 5.x or HP/UX, we may get SIGBUS. */
736 /* Pthreads doesn't exist under Irix 5.x, so we */
737 /* don't have to worry in the threads case. */
738 (void) sigaction(SIGBUS, &act, &old_bus_act);
740 # endif /* GC_IRIX_THREADS */
742 old_segv_handler = signal(SIGSEGV, h);
744 old_bus_handler = signal(SIGBUS, h);
748 # endif /* NEED_FIND_LIMIT || UNIX_LIKE */
750 # ifdef NEED_FIND_LIMIT
751 /* Some tools to implement HEURISTIC2 */
752 # define MIN_PAGE_SIZE 256 /* Smallest conceivable page size, bytes */
753 /* static */ JMP_BUF GC_jmp_buf;
756 void GC_fault_handler(sig)
759 LONGJMP(GC_jmp_buf, 1);
762 void GC_setup_temporary_fault_handler()
764 GC_set_and_save_fault_handler(GC_fault_handler);
767 void GC_reset_fault_handler()
769 # if defined(SUNOS5SIGS) || defined(IRIX5) \
770 || defined(OSF1) || defined(HURD) || defined(NETBSD)
771 (void) sigaction(SIGSEGV, &old_segv_act, 0);
772 # if defined(IRIX5) \
773 || defined(HPUX) || defined(HURD) || defined(NETBSD)
774 (void) sigaction(SIGBUS, &old_bus_act, 0);
777 (void) signal(SIGSEGV, old_segv_handler);
779 (void) signal(SIGBUS, old_bus_handler);
784 /* Return the first nonaddressible location > p (up) or */
785 /* the smallest location q s.t. [q,p) is addressable (!up). */
786 /* We assume that p (up) or p-1 (!up) is addressable. */
787 ptr_t GC_find_limit(p, up)
791 static VOLATILE ptr_t result;
792 /* Needs to be static, since otherwise it may not be */
793 /* preserved across the longjmp. Can safely be */
794 /* static since it's only called once, with the */
795 /* allocation lock held. */
798 GC_setup_temporary_fault_handler();
799 if (SETJMP(GC_jmp_buf) == 0) {
800 result = (ptr_t)(((word)(p))
801 & ~(MIN_PAGE_SIZE-1));
804 result += MIN_PAGE_SIZE;
806 result -= MIN_PAGE_SIZE;
808 GC_noop1((word)(*result));
811 GC_reset_fault_handler();
813 result += MIN_PAGE_SIZE;
819 #if defined(ECOS) || defined(NOSYS)
820 ptr_t GC_get_stack_base()
826 #ifdef HPUX_STACKBOTTOM
828 #include <sys/param.h>
829 #include <sys/pstat.h>
831 ptr_t GC_get_register_stack_base(void)
833 struct pst_vm_status vm_status;
836 while (pstat_getprocvm(&vm_status, sizeof(vm_status), 0, i++) == 1) {
837 if (vm_status.pst_type == PS_RSESTACK) {
838 return (ptr_t) vm_status.pst_vaddr;
842 /* old way to get the register stackbottom */
843 return (ptr_t)(((word)GC_stackbottom - BACKING_STORE_DISPLACEMENT - 1)
844 & ~(BACKING_STORE_ALIGNMENT - 1));
847 #endif /* HPUX_STACK_BOTTOM */
849 #ifdef LINUX_STACKBOTTOM
851 #include <sys/types.h>
852 #include <sys/stat.h>
854 # define STAT_SKIP 27 /* Number of fields preceding startstack */
855 /* field in /proc/self/stat */
857 #ifdef USE_LIBC_PRIVATES
858 # pragma weak __libc_stack_end
859 extern ptr_t __libc_stack_end;
863 /* Try to read the backing store base from /proc/self/maps. */
864 /* We look for the writable mapping with a 0 major device, */
865 /* which is as close to our frame as possible, but below it.*/
866 static word backing_store_base_from_maps(char *maps)
869 char *buf_ptr = maps;
871 unsigned int maj_dev;
872 word current_best = 0;
876 buf_ptr = GC_parse_map_entry(buf_ptr, &start, &end, prot_buf, &maj_dev);
877 if (buf_ptr == NULL) return current_best;
878 if (prot_buf[1] == 'w' && maj_dev == 0) {
879 if (end < (word)(&dummy) && start > current_best) current_best = start;
885 static word backing_store_base_from_proc(void)
887 return GC_apply_to_maps(backing_store_base_from_maps);
890 # ifdef USE_LIBC_PRIVATES
891 # pragma weak __libc_ia64_register_backing_store_base
892 extern ptr_t __libc_ia64_register_backing_store_base;
895 ptr_t GC_get_register_stack_base(void)
897 # ifdef USE_LIBC_PRIVATES
898 if (0 != &__libc_ia64_register_backing_store_base
899 && 0 != __libc_ia64_register_backing_store_base) {
900 /* Glibc 2.2.4 has a bug such that for dynamically linked */
901 /* executables __libc_ia64_register_backing_store_base is */
902 /* defined but uninitialized during constructor calls. */
903 /* Hence we check for both nonzero address and value. */
904 return __libc_ia64_register_backing_store_base;
907 word result = backing_store_base_from_proc();
909 /* Use dumb heuristics. Works only for default configuration. */
910 result = (word)GC_stackbottom - BACKING_STORE_DISPLACEMENT;
911 result += BACKING_STORE_ALIGNMENT - 1;
912 result &= ~(BACKING_STORE_ALIGNMENT - 1);
913 /* Verify that it's at least readable. If not, we goofed. */
914 GC_noop1(*(word *)result);
916 return (ptr_t)result;
920 ptr_t GC_linux_stack_base(void)
922 /* We read the stack base value from /proc/self/stat. We do this */
923 /* using direct I/O system calls in order to avoid calling malloc */
924 /* in case REDIRECT_MALLOC is defined. */
925 # define STAT_BUF_SIZE 4096
926 # define STAT_READ read
927 /* Should probably call the real read, if read is wrapped. */
928 char stat_buf[STAT_BUF_SIZE];
932 size_t i, buf_offset = 0;
934 /* First try the easy way. This should work for glibc 2.2 */
935 /* This fails in a prelinked ("prelink" command) executable */
936 /* since the correct value of __libc_stack_end never */
937 /* becomes visible to us. The second test works around */
939 # ifdef USE_LIBC_PRIVATES
940 if (0 != &__libc_stack_end && 0 != __libc_stack_end ) {
942 /* Some versions of glibc set the address 16 bytes too */
943 /* low while the initialization code is running. */
944 if (((word)__libc_stack_end & 0xfff) + 0x10 < 0x1000) {
945 return __libc_stack_end + 0x10;
946 } /* Otherwise it's not safe to add 16 bytes and we fall */
947 /* back to using /proc. */
950 /* Older versions of glibc for 64-bit Sparc do not set
951 * this variable correctly, it gets set to either zero
954 if (__libc_stack_end != (ptr_t) (unsigned long)0x1)
955 return __libc_stack_end;
957 return __libc_stack_end;
962 f = open("/proc/self/stat", O_RDONLY);
963 if (f < 0 || STAT_READ(f, stat_buf, STAT_BUF_SIZE) < 2 * STAT_SKIP) {
964 ABORT("Couldn't read /proc/self/stat");
966 c = stat_buf[buf_offset++];
967 /* Skip the required number of fields. This number is hopefully */
968 /* constant across all Linux implementations. */
969 for (i = 0; i < STAT_SKIP; ++i) {
970 while (isspace(c)) c = stat_buf[buf_offset++];
971 while (!isspace(c)) c = stat_buf[buf_offset++];
973 while (isspace(c)) c = stat_buf[buf_offset++];
977 c = stat_buf[buf_offset++];
980 if (result < 0x10000000) ABORT("Absurd stack bottom value");
981 return (ptr_t)result;
984 #endif /* LINUX_STACKBOTTOM */
986 #ifdef FREEBSD_STACKBOTTOM
988 /* This uses an undocumented sysctl call, but at least one expert */
989 /* believes it will stay. */
992 #include <sys/types.h>
993 #include <sys/sysctl.h>
995 ptr_t GC_freebsd_stack_base(void)
997 int nm[2] = {CTL_KERN, KERN_USRSTACK};
999 size_t len = sizeof(ptr_t);
1000 int r = sysctl(nm, 2, &base, &len, NULL, 0);
1002 if (r) ABORT("Error getting stack base");
1007 #endif /* FREEBSD_STACKBOTTOM */
1009 #if !defined(BEOS) && !defined(AMIGA) && !defined(MSWIN32) \
1010 && !defined(MSWINCE) && !defined(OS2) && !defined(NOSYS) && !defined(ECOS)
1012 ptr_t GC_get_stack_base()
1014 # if defined(HEURISTIC1) || defined(HEURISTIC2) || \
1015 defined(LINUX_STACKBOTTOM) || defined(FREEBSD_STACKBOTTOM)
1020 # define STACKBOTTOM_ALIGNMENT_M1 ((word)STACK_GRAN - 1)
1023 return(STACKBOTTOM);
1026 # ifdef STACK_GROWS_DOWN
1027 result = (ptr_t)((((word)(&dummy))
1028 + STACKBOTTOM_ALIGNMENT_M1)
1029 & ~STACKBOTTOM_ALIGNMENT_M1);
1031 result = (ptr_t)(((word)(&dummy))
1032 & ~STACKBOTTOM_ALIGNMENT_M1);
1034 # endif /* HEURISTIC1 */
1035 # ifdef LINUX_STACKBOTTOM
1036 result = GC_linux_stack_base();
1038 # ifdef FREEBSD_STACKBOTTOM
1039 result = GC_freebsd_stack_base();
1042 # ifdef STACK_GROWS_DOWN
1043 result = GC_find_limit((ptr_t)(&dummy), TRUE);
1044 # ifdef HEURISTIC2_LIMIT
1045 if (result > HEURISTIC2_LIMIT
1046 && (ptr_t)(&dummy) < HEURISTIC2_LIMIT) {
1047 result = HEURISTIC2_LIMIT;
1051 result = GC_find_limit((ptr_t)(&dummy), FALSE);
1052 # ifdef HEURISTIC2_LIMIT
1053 if (result < HEURISTIC2_LIMIT
1054 && (ptr_t)(&dummy) > HEURISTIC2_LIMIT) {
1055 result = HEURISTIC2_LIMIT;
1060 # endif /* HEURISTIC2 */
1061 # ifdef STACK_GROWS_DOWN
1062 if (result == 0) result = (ptr_t)(signed_word)(-sizeof(ptr_t));
1065 # endif /* STACKBOTTOM */
1068 # endif /* ! AMIGA, !OS 2, ! MS Windows, !BEOS, !NOSYS, !ECOS */
1071 * Register static data segment(s) as roots.
1072 * If more data segments are added later then they need to be registered
1073 * add that point (as we do with SunOS dynamic loading),
1074 * or GC_mark_roots needs to check for them (as we do with PCR).
1075 * Called with allocator lock held.
1080 void GC_register_data_segments()
1084 HMODULE module_handle;
1085 # define PBUFSIZ 512
1086 UCHAR path[PBUFSIZ];
1088 struct exe_hdr hdrdos; /* MSDOS header. */
1089 struct e32_exe hdr386; /* Real header for my executable */
1090 struct o32_obj seg; /* Currrent segment */
1094 if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
1095 GC_err_printf0("DosGetInfoBlocks failed\n");
1096 ABORT("DosGetInfoBlocks failed\n");
1098 module_handle = ppib -> pib_hmte;
1099 if (DosQueryModuleName(module_handle, PBUFSIZ, path) != NO_ERROR) {
1100 GC_err_printf0("DosQueryModuleName failed\n");
1101 ABORT("DosGetInfoBlocks failed\n");
1103 myexefile = fopen(path, "rb");
1104 if (myexefile == 0) {
1105 GC_err_puts("Couldn't open executable ");
1106 GC_err_puts(path); GC_err_puts("\n");
1107 ABORT("Failed to open executable\n");
1109 if (fread((char *)(&hdrdos), 1, sizeof hdrdos, myexefile) < sizeof hdrdos) {
1110 GC_err_puts("Couldn't read MSDOS header from ");
1111 GC_err_puts(path); GC_err_puts("\n");
1112 ABORT("Couldn't read MSDOS header");
1114 if (E_MAGIC(hdrdos) != EMAGIC) {
1115 GC_err_puts("Executable has wrong DOS magic number: ");
1116 GC_err_puts(path); GC_err_puts("\n");
1117 ABORT("Bad DOS magic number");
1119 if (fseek(myexefile, E_LFANEW(hdrdos), SEEK_SET) != 0) {
1120 GC_err_puts("Seek to new header failed in ");
1121 GC_err_puts(path); GC_err_puts("\n");
1122 ABORT("Bad DOS magic number");
1124 if (fread((char *)(&hdr386), 1, sizeof hdr386, myexefile) < sizeof hdr386) {
1125 GC_err_puts("Couldn't read MSDOS header from ");
1126 GC_err_puts(path); GC_err_puts("\n");
1127 ABORT("Couldn't read OS/2 header");
1129 if (E32_MAGIC1(hdr386) != E32MAGIC1 || E32_MAGIC2(hdr386) != E32MAGIC2) {
1130 GC_err_puts("Executable has wrong OS/2 magic number:");
1131 GC_err_puts(path); GC_err_puts("\n");
1132 ABORT("Bad OS/2 magic number");
1134 if ( E32_BORDER(hdr386) != E32LEBO || E32_WORDER(hdr386) != E32LEWO) {
1135 GC_err_puts("Executable %s has wrong byte order: ");
1136 GC_err_puts(path); GC_err_puts("\n");
1137 ABORT("Bad byte order");
1139 if ( E32_CPU(hdr386) == E32CPU286) {
1140 GC_err_puts("GC can't handle 80286 executables: ");
1141 GC_err_puts(path); GC_err_puts("\n");
1144 if (fseek(myexefile, E_LFANEW(hdrdos) + E32_OBJTAB(hdr386),
1146 GC_err_puts("Seek to object table failed: ");
1147 GC_err_puts(path); GC_err_puts("\n");
1148 ABORT("Seek to object table failed");
1150 for (nsegs = E32_OBJCNT(hdr386); nsegs > 0; nsegs--) {
1152 if (fread((char *)(&seg), 1, sizeof seg, myexefile) < sizeof seg) {
1153 GC_err_puts("Couldn't read obj table entry from ");
1154 GC_err_puts(path); GC_err_puts("\n");
1155 ABORT("Couldn't read obj table entry");
1157 flags = O32_FLAGS(seg);
1158 if (!(flags & OBJWRITE)) continue;
1159 if (!(flags & OBJREAD)) continue;
1160 if (flags & OBJINVALID) {
1161 GC_err_printf0("Object with invalid pages?\n");
1164 GC_add_roots_inner(O32_BASE(seg), O32_BASE(seg)+O32_SIZE(seg), FALSE);
1170 # if defined(MSWIN32) || defined(MSWINCE)
1173 /* Unfortunately, we have to handle win32s very differently from NT, */
1174 /* Since VirtualQuery has very different semantics. In particular, */
1175 /* under win32s a VirtualQuery call on an unmapped page returns an */
1176 /* invalid result. Under NT, GC_register_data_segments is a noop and */
1177 /* all real work is done by GC_register_dynamic_libraries. Under */
1178 /* win32s, we cannot find the data segments associated with dll's. */
1179 /* We register the main data segment here. */
1180 GC_bool GC_no_win32_dlls = FALSE;
1181 /* This used to be set for gcc, to avoid dealing with */
1182 /* the structured exception handling issues. But we now have */
1183 /* assembly code to do that right. */
1184 GC_bool GC_wnt = FALSE;
1185 /* This is a Windows NT derivative, i.e. NT, W2K, XP or later. */
1187 void GC_init_win32()
1189 /* if we're running under win32s, assume that no DLLs will be loaded */
1190 DWORD v = GetVersion();
1191 GC_wnt = !(v & 0x80000000);
1192 GC_no_win32_dlls |= ((!GC_wnt) && (v & 0xff) <= 3);
1195 /* Return the smallest address a such that VirtualQuery */
1196 /* returns correct results for all addresses between a and start. */
1197 /* Assumes VirtualQuery returns correct information for start. */
1198 ptr_t GC_least_described_address(ptr_t start)
1200 MEMORY_BASIC_INFORMATION buf;
1206 limit = GC_sysinfo.lpMinimumApplicationAddress;
1207 p = (ptr_t)((word)start & ~(GC_page_size - 1));
1209 q = (LPVOID)(p - GC_page_size);
1210 if ((ptr_t)q > (ptr_t)p /* underflow */ || q < limit) break;
1211 result = VirtualQuery(q, &buf, sizeof(buf));
1212 if (result != sizeof(buf) || buf.AllocationBase == 0) break;
1213 p = (ptr_t)(buf.AllocationBase);
1219 # ifndef REDIRECT_MALLOC
1220 /* We maintain a linked list of AllocationBase values that we know */
1221 /* correspond to malloc heap sections. Currently this is only called */
1222 /* during a GC. But there is some hope that for long running */
1223 /* programs we will eventually see most heap sections. */
1225 /* In the long run, it would be more reliable to occasionally walk */
1226 /* the malloc heap with HeapWalk on the default heap. But that */
1227 /* apparently works only for NT-based Windows. */
1229 /* In the long run, a better data structure would also be nice ... */
1230 struct GC_malloc_heap_list {
1231 void * allocation_base;
1232 struct GC_malloc_heap_list *next;
1233 } *GC_malloc_heap_l = 0;
1235 /* Is p the base of one of the malloc heap sections we already know */
1237 GC_bool GC_is_malloc_heap_base(ptr_t p)
1239 struct GC_malloc_heap_list *q = GC_malloc_heap_l;
1242 if (q -> allocation_base == p) return TRUE;
1248 void *GC_get_allocation_base(void *p)
1250 MEMORY_BASIC_INFORMATION buf;
1251 DWORD result = VirtualQuery(p, &buf, sizeof(buf));
1252 if (result != sizeof(buf)) {
1253 ABORT("Weird VirtualQuery result");
1255 return buf.AllocationBase;
1258 size_t GC_max_root_size = 100000; /* Appr. largest root size. */
1260 void GC_add_current_malloc_heap()
1262 struct GC_malloc_heap_list *new_l =
1263 malloc(sizeof(struct GC_malloc_heap_list));
1264 void * candidate = GC_get_allocation_base(new_l);
1266 if (new_l == 0) return;
1267 if (GC_is_malloc_heap_base(candidate)) {
1268 /* Try a little harder to find malloc heap. */
1269 size_t req_size = 10000;
1271 void *p = malloc(req_size);
1272 if (0 == p) { free(new_l); return; }
1273 candidate = GC_get_allocation_base(p);
1276 } while (GC_is_malloc_heap_base(candidate)
1277 && req_size < GC_max_root_size/10 && req_size < 500000);
1278 if (GC_is_malloc_heap_base(candidate)) {
1279 free(new_l); return;
1284 GC_printf1("Found new system malloc AllocationBase at 0x%lx\n",
1287 new_l -> allocation_base = candidate;
1288 new_l -> next = GC_malloc_heap_l;
1289 GC_malloc_heap_l = new_l;
1291 # endif /* REDIRECT_MALLOC */
1293 /* Is p the start of either the malloc heap, or of one of our */
1294 /* heap sections? */
1295 GC_bool GC_is_heap_base (ptr_t p)
1300 # ifndef REDIRECT_MALLOC
1301 static word last_gc_no = -1;
1303 if (last_gc_no != GC_gc_no) {
1304 GC_add_current_malloc_heap();
1305 last_gc_no = GC_gc_no;
1307 if (GC_root_size > GC_max_root_size) GC_max_root_size = GC_root_size;
1308 if (GC_is_malloc_heap_base(p)) return TRUE;
1310 for (i = 0; i < GC_n_heap_bases; i++) {
1311 if (GC_heap_bases[i] == p) return TRUE;
1317 void GC_register_root_section(ptr_t static_root)
1319 MEMORY_BASIC_INFORMATION buf;
1324 char * limit, * new_limit;
1326 if (!GC_no_win32_dlls) return;
1327 p = base = limit = GC_least_described_address(static_root);
1328 while (p < GC_sysinfo.lpMaximumApplicationAddress) {
1329 result = VirtualQuery(p, &buf, sizeof(buf));
1330 if (result != sizeof(buf) || buf.AllocationBase == 0
1331 || GC_is_heap_base(buf.AllocationBase)) break;
1332 new_limit = (char *)p + buf.RegionSize;
1333 protect = buf.Protect;
1334 if (buf.State == MEM_COMMIT
1335 && is_writable(protect)) {
1336 if ((char *)p == limit) {
1339 if (base != limit) GC_add_roots_inner(base, limit, FALSE);
1344 if (p > (LPVOID)new_limit /* overflow */) break;
1345 p = (LPVOID)new_limit;
1347 if (base != limit) GC_add_roots_inner(base, limit, FALSE);
1351 void GC_register_data_segments()
1355 GC_register_root_section((ptr_t)(&dummy));
1359 # else /* !OS2 && !Windows */
1361 # if (defined(SVR4) || defined(AUX) || defined(DGUX) \
1362 || (defined(LINUX) && defined(SPARC))) && !defined(PCR)
1363 ptr_t GC_SysVGetDataStart(max_page_size, etext_addr)
1367 word text_end = ((word)(etext_addr) + sizeof(word) - 1)
1368 & ~(sizeof(word) - 1);
1369 /* etext rounded to word boundary */
1370 word next_page = ((text_end + (word)max_page_size - 1)
1371 & ~((word)max_page_size - 1));
1372 word page_offset = (text_end & ((word)max_page_size - 1));
1373 VOLATILE char * result = (char *)(next_page + page_offset);
1374 /* Note that this isnt equivalent to just adding */
1375 /* max_page_size to &etext if &etext is at a page boundary */
1377 GC_setup_temporary_fault_handler();
1378 if (SETJMP(GC_jmp_buf) == 0) {
1379 /* Try writing to the address. */
1381 GC_reset_fault_handler();
1383 GC_reset_fault_handler();
1384 /* We got here via a longjmp. The address is not readable. */
1385 /* This is known to happen under Solaris 2.4 + gcc, which place */
1386 /* string constants in the text segment, but after etext. */
1387 /* Use plan B. Note that we now know there is a gap between */
1388 /* text and data segments, so plan A bought us something. */
1389 result = (char *)GC_find_limit((ptr_t)(DATAEND), FALSE);
1391 return((ptr_t)result);
1395 # if defined(FREEBSD) && (defined(I386) || defined(X86_64) || defined(powerpc) || defined(__powerpc__)) && !defined(PCR)
1396 /* Its unclear whether this should be identical to the above, or */
1397 /* whether it should apply to non-X86 architectures. */
1398 /* For now we don't assume that there is always an empty page after */
1399 /* etext. But in some cases there actually seems to be slightly more. */
1400 /* This also deals with holes between read-only data and writable data. */
1401 ptr_t GC_FreeBSDGetDataStart(max_page_size, etext_addr)
1405 word text_end = ((word)(etext_addr) + sizeof(word) - 1)
1406 & ~(sizeof(word) - 1);
1407 /* etext rounded to word boundary */
1408 VOLATILE word next_page = (text_end + (word)max_page_size - 1)
1409 & ~((word)max_page_size - 1);
1410 VOLATILE ptr_t result = (ptr_t)text_end;
1411 GC_setup_temporary_fault_handler();
1412 if (SETJMP(GC_jmp_buf) == 0) {
1413 /* Try reading at the address. */
1414 /* This should happen before there is another thread. */
1415 for (; next_page < (word)(DATAEND); next_page += (word)max_page_size)
1416 *(VOLATILE char *)next_page;
1417 GC_reset_fault_handler();
1419 GC_reset_fault_handler();
1420 /* As above, we go to plan B */
1421 result = GC_find_limit((ptr_t)(DATAEND), FALSE);
1431 # define GC_AMIGA_DS
1432 # include "AmigaOS.c"
1435 #else /* !OS2 && !Windows && !AMIGA */
1437 void GC_register_data_segments()
1439 # if !defined(PCR) && !defined(SRC_M3) && !defined(MACOS)
1440 # if defined(REDIRECT_MALLOC) && defined(GC_SOLARIS_THREADS)
1441 /* As of Solaris 2.3, the Solaris threads implementation */
1442 /* allocates the data structure for the initial thread with */
1443 /* sbrk at process startup. It needs to be scanned, so that */
1444 /* we don't lose some malloc allocated data structures */
1445 /* hanging from it. We're on thin ice here ... */
1446 extern caddr_t sbrk();
1448 GC_add_roots_inner(DATASTART, (char *)sbrk(0), FALSE);
1450 GC_add_roots_inner(DATASTART, (char *)(DATAEND), FALSE);
1451 # if defined(DATASTART2)
1452 GC_add_roots_inner(DATASTART2, (char *)(DATAEND2), FALSE);
1458 # if defined(THINK_C)
1459 extern void* GC_MacGetDataStart(void);
1460 /* globals begin above stack and end at a5. */
1461 GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
1462 (ptr_t)LMGetCurrentA5(), FALSE);
1464 # if defined(__MWERKS__)
1466 extern void* GC_MacGetDataStart(void);
1467 /* MATTHEW: Function to handle Far Globals (CW Pro 3) */
1468 # if __option(far_data)
1469 extern void* GC_MacGetDataEnd(void);
1471 /* globals begin above stack and end at a5. */
1472 GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
1473 (ptr_t)LMGetCurrentA5(), FALSE);
1474 /* MATTHEW: Handle Far Globals */
1475 # if __option(far_data)
1476 /* Far globals follow he QD globals: */
1477 GC_add_roots_inner((ptr_t)LMGetCurrentA5(),
1478 (ptr_t)GC_MacGetDataEnd(), FALSE);
1481 extern char __data_start__[], __data_end__[];
1482 GC_add_roots_inner((ptr_t)&__data_start__,
1483 (ptr_t)&__data_end__, FALSE);
1484 # endif /* __POWERPC__ */
1485 # endif /* __MWERKS__ */
1486 # endif /* !THINK_C */
1490 /* Dynamic libraries are added at every collection, since they may */
1494 # endif /* ! AMIGA */
1495 # endif /* ! MSWIN32 && ! MSWINCE*/
1499 * Auxiliary routines for obtaining memory from OS.
1502 # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) \
1503 && !defined(MSWIN32) && !defined(MSWINCE) \
1504 && !defined(MACOS) && !defined(DOS4GW)
1507 extern caddr_t sbrk();
1510 # define SBRK_ARG_T ptrdiff_t
1512 # define SBRK_ARG_T int
1516 # if 0 && defined(RS6000) /* We now use mmap */
1517 /* The compiler seems to generate speculative reads one past the end of */
1518 /* an allocated object. Hence we need to make sure that the page */
1519 /* following the last heap page is also mapped. */
1520 ptr_t GC_unix_get_mem(bytes)
1523 caddr_t cur_brk = (caddr_t)sbrk(0);
1525 SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1);
1526 static caddr_t my_brk_val = 0;
1528 if ((SBRK_ARG_T)bytes < 0) return(0); /* too big */
1530 if((caddr_t)(sbrk(GC_page_size - lsbs)) == (caddr_t)(-1)) return(0);
1532 if (cur_brk == my_brk_val) {
1533 /* Use the extra block we allocated last time. */
1534 result = (ptr_t)sbrk((SBRK_ARG_T)bytes);
1535 if (result == (caddr_t)(-1)) return(0);
1536 result -= GC_page_size;
1538 result = (ptr_t)sbrk(GC_page_size + (SBRK_ARG_T)bytes);
1539 if (result == (caddr_t)(-1)) return(0);
1541 my_brk_val = result + bytes + GC_page_size; /* Always page aligned */
1542 return((ptr_t)result);
1545 #else /* Not RS6000 */
1547 #if defined(USE_MMAP) || defined(USE_MUNMAP)
1549 #ifdef USE_MMAP_FIXED
1550 # define GC_MMAP_FLAGS MAP_FIXED | MAP_PRIVATE
1551 /* Seems to yield better performance on Solaris 2, but can */
1552 /* be unreliable if something is already mapped at the address. */
1554 # define GC_MMAP_FLAGS MAP_PRIVATE
1557 #ifdef USE_MMAP_ANON
1559 # if defined(MAP_ANONYMOUS)
1560 # define OPT_MAP_ANON MAP_ANONYMOUS
1562 # define OPT_MAP_ANON MAP_ANON
1566 # define OPT_MAP_ANON 0
1569 #endif /* defined(USE_MMAP) || defined(USE_MUNMAP) */
1571 #if defined(USE_MMAP)
1572 /* Tested only under Linux, IRIX5 and Solaris 2 */
1575 # define HEAP_START 0
1578 ptr_t GC_unix_get_mem(bytes)
1582 static ptr_t last_addr = HEAP_START;
1584 # ifndef USE_MMAP_ANON
1585 static GC_bool initialized = FALSE;
1588 zero_fd = open("/dev/zero", O_RDONLY);
1589 fcntl(zero_fd, F_SETFD, FD_CLOEXEC);
1594 if (bytes & (GC_page_size -1)) ABORT("Bad GET_MEM arg");
1595 result = mmap(last_addr, bytes, PROT_READ | PROT_WRITE | OPT_PROT_EXEC,
1596 GC_MMAP_FLAGS | OPT_MAP_ANON, zero_fd, 0/* offset */);
1597 if (result == MAP_FAILED) return(0);
1598 last_addr = (ptr_t)result + bytes + GC_page_size - 1;
1599 last_addr = (ptr_t)((word)last_addr & ~(GC_page_size - 1));
1600 # if !defined(LINUX)
1601 if (last_addr == 0) {
1602 /* Oops. We got the end of the address space. This isn't */
1603 /* usable by arbitrary C code, since one-past-end pointers */
1604 /* don't work, so we discard it and try again. */
1605 munmap(result, (size_t)(-GC_page_size) - (size_t)result);
1606 /* Leave last page mapped, so we can't repeat. */
1607 return GC_unix_get_mem(bytes);
1610 GC_ASSERT(last_addr != 0);
1612 return((ptr_t)result);
1615 #else /* Not RS6000, not USE_MMAP */
1616 ptr_t GC_unix_get_mem(bytes)
1621 /* Bare sbrk isn't thread safe. Play by malloc rules. */
1622 /* The equivalent may be needed on other systems as well. */
1626 ptr_t cur_brk = (ptr_t)sbrk(0);
1627 SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1);
1629 if ((SBRK_ARG_T)bytes < 0) return(0); /* too big */
1631 if((ptr_t)sbrk(GC_page_size - lsbs) == (ptr_t)(-1)) return(0);
1633 result = (ptr_t)sbrk((SBRK_ARG_T)bytes);
1634 if (result == (ptr_t)(-1)) result = 0;
1642 #endif /* Not USE_MMAP */
1643 #endif /* Not RS6000 */
1649 void * os2_alloc(size_t bytes)
1653 if (DosAllocMem(&result, bytes, PAG_EXECUTE | PAG_READ |
1654 PAG_WRITE | PAG_COMMIT)
1658 if (result == 0) return(os2_alloc(bytes));
1665 # if defined(MSWIN32) || defined(MSWINCE)
1666 SYSTEM_INFO GC_sysinfo;
1671 # ifdef USE_GLOBAL_ALLOC
1672 # define GLOBAL_ALLOC_TEST 1
1674 # define GLOBAL_ALLOC_TEST GC_no_win32_dlls
1677 word GC_n_heap_bases = 0;
1679 ptr_t GC_win32_get_mem(bytes)
1684 if (GLOBAL_ALLOC_TEST) {
1685 /* VirtualAlloc doesn't like PAGE_EXECUTE_READWRITE. */
1686 /* There are also unconfirmed rumors of other */
1687 /* problems, so we dodge the issue. */
1688 result = (ptr_t) GlobalAlloc(0, bytes + HBLKSIZE);
1689 result = (ptr_t)(((word)result + HBLKSIZE) & ~(HBLKSIZE-1));
1691 /* VirtualProtect only works on regions returned by a */
1692 /* single VirtualAlloc call. Thus we allocate one */
1693 /* extra page, which will prevent merging of blocks */
1694 /* in separate regions, and eliminate any temptation */
1695 /* to call VirtualProtect on a range spanning regions. */
1696 /* This wastes a small amount of memory, and risks */
1697 /* increased fragmentation. But better alternatives */
1698 /* would require effort. */
1699 result = (ptr_t) VirtualAlloc(NULL, bytes + 1,
1700 MEM_COMMIT | MEM_RESERVE,
1701 PAGE_EXECUTE_READWRITE);
1703 if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
1704 /* If I read the documentation correctly, this can */
1705 /* only happen if HBLKSIZE > 64k or not a power of 2. */
1706 if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections");
1707 GC_heap_bases[GC_n_heap_bases++] = result;
1711 void GC_win32_free_heap ()
1713 if (GC_no_win32_dlls) {
1714 while (GC_n_heap_bases > 0) {
1715 GlobalFree (GC_heap_bases[--GC_n_heap_bases]);
1716 GC_heap_bases[GC_n_heap_bases] = 0;
1723 # define GC_AMIGA_AM
1724 # include "AmigaOS.c"
1730 word GC_n_heap_bases = 0;
1732 ptr_t GC_wince_get_mem(bytes)
1738 /* Round up allocation size to multiple of page size */
1739 bytes = (bytes + GC_page_size-1) & ~(GC_page_size-1);
1741 /* Try to find reserved, uncommitted pages */
1742 for (i = 0; i < GC_n_heap_bases; i++) {
1743 if (((word)(-(signed_word)GC_heap_lengths[i])
1744 & (GC_sysinfo.dwAllocationGranularity-1))
1746 result = GC_heap_bases[i] + GC_heap_lengths[i];
1751 if (i == GC_n_heap_bases) {
1752 /* Reserve more pages */
1753 word res_bytes = (bytes + GC_sysinfo.dwAllocationGranularity-1)
1754 & ~(GC_sysinfo.dwAllocationGranularity-1);
1755 /* If we ever support MPROTECT_VDB here, we will probably need to */
1756 /* ensure that res_bytes is strictly > bytes, so that VirtualProtect */
1757 /* never spans regions. It seems to be OK for a VirtualFree argument */
1758 /* to span regions, so we should be OK for now. */
1759 result = (ptr_t) VirtualAlloc(NULL, res_bytes,
1760 MEM_RESERVE | MEM_TOP_DOWN,
1761 PAGE_EXECUTE_READWRITE);
1762 if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
1763 /* If I read the documentation correctly, this can */
1764 /* only happen if HBLKSIZE > 64k or not a power of 2. */
1765 if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections");
1766 GC_heap_bases[GC_n_heap_bases] = result;
1767 GC_heap_lengths[GC_n_heap_bases] = 0;
1772 result = (ptr_t) VirtualAlloc(result, bytes,
1774 PAGE_EXECUTE_READWRITE);
1775 if (result != NULL) {
1776 if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
1777 GC_heap_lengths[i] += bytes;
1786 /* For now, this only works on Win32/WinCE and some Unix-like */
1787 /* systems. If you have something else, don't define */
1789 /* We assume ANSI C to support this feature. */
1791 #if !defined(MSWIN32) && !defined(MSWINCE)
1794 #include <sys/mman.h>
1795 #include <sys/stat.h>
1796 #include <sys/types.h>
1800 /* Compute a page aligned starting address for the unmap */
1801 /* operation on a block of size bytes starting at start. */
1802 /* Return 0 if the block is too small to make this feasible. */
1803 ptr_t GC_unmap_start(ptr_t start, word bytes)
1805 ptr_t result = start;
1806 /* Round start to next page boundary. */
1807 result += GC_page_size - 1;
1808 result = (ptr_t)((word)result & ~(GC_page_size - 1));
1809 if (result + GC_page_size > start + bytes) return 0;
1813 /* Compute end address for an unmap operation on the indicated */
1815 ptr_t GC_unmap_end(ptr_t start, word bytes)
1817 ptr_t end_addr = start + bytes;
1818 end_addr = (ptr_t)((word)end_addr & ~(GC_page_size - 1));
1822 /* Under Win32/WinCE we commit (map) and decommit (unmap) */
1823 /* memory using VirtualAlloc and VirtualFree. These functions */
1824 /* work on individual allocations of virtual memory, made */
1825 /* previously using VirtualAlloc with the MEM_RESERVE flag. */
1826 /* The ranges we need to (de)commit may span several of these */
1827 /* allocations; therefore we use VirtualQuery to check */
1828 /* allocation lengths, and split up the range as necessary. */
1830 /* We assume that GC_remap is called on exactly the same range */
1831 /* as a previous call to GC_unmap. It is safe to consistently */
1832 /* round the endpoints in both places. */
1833 void GC_unmap(ptr_t start, word bytes)
1835 ptr_t start_addr = GC_unmap_start(start, bytes);
1836 ptr_t end_addr = GC_unmap_end(start, bytes);
1837 word len = end_addr - start_addr;
1838 if (0 == start_addr) return;
1839 # if defined(MSWIN32) || defined(MSWINCE)
1841 MEMORY_BASIC_INFORMATION mem_info;
1843 if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
1844 != sizeof(mem_info))
1845 ABORT("Weird VirtualQuery result");
1846 free_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
1847 if (!VirtualFree(start_addr, free_len, MEM_DECOMMIT))
1848 ABORT("VirtualFree failed");
1849 GC_unmapped_bytes += free_len;
1850 start_addr += free_len;
1854 /* We immediately remap it to prevent an intervening mmap from */
1855 /* accidentally grabbing the same address space. */
1858 result = mmap(start_addr, len, PROT_NONE,
1859 MAP_PRIVATE | MAP_FIXED | OPT_MAP_ANON,
1860 zero_fd, 0/* offset */);
1861 if (result != (void *)start_addr) ABORT("mmap(...PROT_NONE...) failed");
1863 GC_unmapped_bytes += len;
1868 void GC_remap(ptr_t start, word bytes)
1870 ptr_t start_addr = GC_unmap_start(start, bytes);
1871 ptr_t end_addr = GC_unmap_end(start, bytes);
1872 word len = end_addr - start_addr;
1874 # if defined(MSWIN32) || defined(MSWINCE)
1877 if (0 == start_addr) return;
1879 MEMORY_BASIC_INFORMATION mem_info;
1881 if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
1882 != sizeof(mem_info))
1883 ABORT("Weird VirtualQuery result");
1884 alloc_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
1885 result = VirtualAlloc(start_addr, alloc_len,
1887 PAGE_EXECUTE_READWRITE);
1888 if (result != start_addr) {
1889 ABORT("VirtualAlloc remapping failed");
1891 GC_unmapped_bytes -= alloc_len;
1892 start_addr += alloc_len;
1896 /* It was already remapped with PROT_NONE. */
1899 if (0 == start_addr) return;
1900 result = mprotect(start_addr, len,
1901 PROT_READ | PROT_WRITE | OPT_PROT_EXEC);
1904 "Mprotect failed at 0x%lx (length %ld) with errno %ld\n",
1905 start_addr, len, errno);
1906 ABORT("Mprotect remapping failed");
1908 GC_unmapped_bytes -= len;
1912 /* Two adjacent blocks have already been unmapped and are about to */
1913 /* be merged. Unmap the whole block. This typically requires */
1914 /* that we unmap a small section in the middle that was not previously */
1915 /* unmapped due to alignment constraints. */
1916 void GC_unmap_gap(ptr_t start1, word bytes1, ptr_t start2, word bytes2)
1918 ptr_t start1_addr = GC_unmap_start(start1, bytes1);
1919 ptr_t end1_addr = GC_unmap_end(start1, bytes1);
1920 ptr_t start2_addr = GC_unmap_start(start2, bytes2);
1921 ptr_t end2_addr = GC_unmap_end(start2, bytes2);
1922 ptr_t start_addr = end1_addr;
1923 ptr_t end_addr = start2_addr;
1925 GC_ASSERT(start1 + bytes1 == start2);
1926 if (0 == start1_addr) start_addr = GC_unmap_start(start1, bytes1 + bytes2);
1927 if (0 == start2_addr) end_addr = GC_unmap_end(start1, bytes1 + bytes2);
1928 if (0 == start_addr) return;
1929 len = end_addr - start_addr;
1930 # if defined(MSWIN32) || defined(MSWINCE)
1932 MEMORY_BASIC_INFORMATION mem_info;
1934 if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
1935 != sizeof(mem_info))
1936 ABORT("Weird VirtualQuery result");
1937 free_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
1938 if (!VirtualFree(start_addr, free_len, MEM_DECOMMIT))
1939 ABORT("VirtualFree failed");
1940 GC_unmapped_bytes += free_len;
1941 start_addr += free_len;
1945 if (len != 0 && munmap(start_addr, len) != 0) ABORT("munmap failed");
1946 GC_unmapped_bytes += len;
1950 #endif /* USE_MUNMAP */
1952 /* Routine for pushing any additional roots. In THREADS */
1953 /* environment, this is also responsible for marking from */
1954 /* thread stacks. */
1956 void (*GC_push_other_roots)() = 0;
1960 PCR_ERes GC_push_thread_stack(PCR_Th_T *t, PCR_Any dummy)
1962 struct PCR_ThCtl_TInfoRep info;
1965 info.ti_stkLow = info.ti_stkHi = 0;
1966 result = PCR_ThCtl_GetInfo(t, &info);
1967 GC_push_all_stack((ptr_t)(info.ti_stkLow), (ptr_t)(info.ti_stkHi));
1971 /* Push the contents of an old object. We treat this as stack */
1972 /* data only becasue that makes it robust against mark stack */
1974 PCR_ERes GC_push_old_obj(void *p, size_t size, PCR_Any data)
1976 GC_push_all_stack((ptr_t)p, (ptr_t)p + size);
1977 return(PCR_ERes_okay);
1981 void GC_default_push_other_roots GC_PROTO((void))
1983 /* Traverse data allocated by previous memory managers. */
1985 extern struct PCR_MM_ProcsRep * GC_old_allocator;
1987 if ((*(GC_old_allocator->mmp_enumerate))(PCR_Bool_false,
1990 ABORT("Old object enumeration failed");
1993 /* Traverse all thread stacks. */
1995 PCR_ThCtl_ApplyToAllOtherThreads(GC_push_thread_stack,0))
1996 || PCR_ERes_IsErr(GC_push_thread_stack(PCR_Th_CurrThread(), 0))) {
1997 ABORT("Thread stack marking failed\n");
2005 # ifdef ALL_INTERIOR_POINTERS
2009 void GC_push_thread_structures GC_PROTO((void))
2011 /* Not our responsibibility. */
2014 extern void ThreadF__ProcessStacks();
2016 void GC_push_thread_stack(start, stop)
2019 GC_push_all_stack((ptr_t)start, (ptr_t)stop + sizeof(word));
2022 /* Push routine with M3 specific calling convention. */
2023 GC_m3_push_root(dummy1, p, dummy2, dummy3)
2025 ptr_t dummy1, dummy2;
2030 GC_PUSH_ONE_STACK(q, p);
2033 /* M3 set equivalent to RTHeap.TracedRefTypes */
2034 typedef struct { int elts[1]; } RefTypeSet;
2035 RefTypeSet GC_TracedRefTypes = {{0x1}};
2037 void GC_default_push_other_roots GC_PROTO((void))
2039 /* Use the M3 provided routine for finding static roots. */
2040 /* This is a bit dubious, since it presumes no C roots. */
2041 /* We handle the collector roots explicitly in GC_push_roots */
2042 RTMain__GlobalMapProc(GC_m3_push_root, 0, GC_TracedRefTypes);
2043 if (GC_words_allocd > 0) {
2044 ThreadF__ProcessStacks(GC_push_thread_stack);
2046 /* Otherwise this isn't absolutely necessary, and we have */
2047 /* startup ordering problems. */
2050 # endif /* SRC_M3 */
2052 # if defined(GC_SOLARIS_THREADS) || defined(GC_PTHREADS) || \
2053 defined(GC_WIN32_THREADS)
2055 extern void GC_push_all_stacks();
2057 void GC_default_push_other_roots GC_PROTO((void))
2059 GC_push_all_stacks();
2062 # endif /* GC_SOLARIS_THREADS || GC_PTHREADS */
2064 void (*GC_push_other_roots) GC_PROTO((void)) = GC_default_push_other_roots;
2066 #endif /* THREADS */
2069 * Routines for accessing dirty bits on virtual pages.
2070 * We plan to eventually implement four strategies for doing so:
2071 * DEFAULT_VDB: A simple dummy implementation that treats every page
2072 * as possibly dirty. This makes incremental collection
2073 * useless, but the implementation is still correct.
2074 * PCR_VDB: Use PPCRs virtual dirty bit facility.
2075 * PROC_VDB: Use the /proc facility for reading dirty bits. Only
2076 * works under some SVR4 variants. Even then, it may be
2077 * too slow to be entirely satisfactory. Requires reading
2078 * dirty bits for entire address space. Implementations tend
2079 * to assume that the client is a (slow) debugger.
2080 * MPROTECT_VDB:Protect pages and then catch the faults to keep track of
2081 * dirtied pages. The implementation (and implementability)
2082 * is highly system dependent. This usually fails when system
2083 * calls write to a protected page. We prevent the read system
2084 * call from doing so. It is the clients responsibility to
2085 * make sure that other system calls are similarly protected
2086 * or write only to the stack.
2088 GC_bool GC_dirty_maintained = FALSE;
2092 /* All of the following assume the allocation lock is held, and */
2093 /* signals are disabled. */
2095 /* The client asserts that unallocated pages in the heap are never */
2098 /* Initialize virtual dirty bit implementation. */
2099 void GC_dirty_init()
2102 GC_printf0("Initializing DEFAULT_VDB...\n");
2104 GC_dirty_maintained = TRUE;
2107 /* Retrieve system dirty bits for heap to a local buffer. */
2108 /* Restore the systems notion of which pages are dirty. */
2109 void GC_read_dirty()
2112 /* Is the HBLKSIZE sized page at h marked dirty in the local buffer? */
2113 /* If the actual page size is different, this returns TRUE if any */
2114 /* of the pages overlapping h are dirty. This routine may err on the */
2115 /* side of labelling pages as dirty (and this implementation does). */
2117 GC_bool GC_page_was_dirty(h)
2124 * The following two routines are typically less crucial. They matter
2125 * most with large dynamic libraries, or if we can't accurately identify
2126 * stacks, e.g. under Solaris 2.X. Otherwise the following default
2127 * versions are adequate.
2130 /* Could any valid GC heap pointer ever have been written to this page? */
2132 GC_bool GC_page_was_ever_dirty(h)
2138 /* Reset the n pages starting at h to "was never dirty" status. */
2139 void GC_is_fresh(h, n)
2146 /* I) hints that [h, h+nblocks) is about to be written. */
2147 /* II) guarantees that protection is removed. */
2148 /* (I) may speed up some dirty bit implementations. */
2149 /* (II) may be essential if we need to ensure that */
2150 /* pointer-free system call buffers in the heap are */
2151 /* not protected. */
2153 void GC_remove_protection(h, nblocks, is_ptrfree)
2160 # endif /* DEFAULT_VDB */
2163 # ifdef MPROTECT_VDB
2166 * See DEFAULT_VDB for interface descriptions.
2170 * This implementation maintains dirty bits itself by catching write
2171 * faults and keeping track of them. We assume nobody else catches
2172 * SIGBUS or SIGSEGV. We assume no write faults occur in system calls.
2173 * This means that clients must ensure that system calls don't write
2174 * to the write-protected heap. Probably the best way to do this is to
2175 * ensure that system calls write at most to POINTERFREE objects in the
2176 * heap, and do even that only if we are on a platform on which those
2177 * are not protected. Another alternative is to wrap system calls
2178 * (see example for read below), but the current implementation holds
2179 * a lock across blocking calls, making it problematic for multithreaded
2181 * We assume the page size is a multiple of HBLKSIZE.
2182 * We prefer them to be the same. We avoid protecting POINTERFREE
2183 * objects only if they are the same.
2186 # if !defined(MSWIN32) && !defined(MSWINCE) && !defined(DARWIN)
2188 # include <sys/mman.h>
2189 # include <signal.h>
2190 # include <sys/syscall.h>
2192 # define PROTECT(addr, len) \
2193 if (mprotect((caddr_t)(addr), (size_t)(len), \
2194 PROT_READ | OPT_PROT_EXEC) < 0) { \
2195 ABORT("mprotect failed"); \
2197 # define UNPROTECT(addr, len) \
2198 if (mprotect((caddr_t)(addr), (size_t)(len), \
2199 PROT_WRITE | PROT_READ | OPT_PROT_EXEC ) < 0) { \
2200 ABORT("un-mprotect failed"); \
2206 /* Using vm_protect (mach syscall) over mprotect (BSD syscall) seems to
2207 decrease the likelihood of some of the problems described below. */
2208 #include <mach/vm_map.h>
2209 static mach_port_t GC_task_self;
2210 #define PROTECT(addr,len) \
2211 if(vm_protect(GC_task_self,(vm_address_t)(addr),(vm_size_t)(len), \
2212 FALSE,VM_PROT_READ) != KERN_SUCCESS) { \
2213 ABORT("vm_portect failed"); \
2215 #define UNPROTECT(addr,len) \
2216 if(vm_protect(GC_task_self,(vm_address_t)(addr),(vm_size_t)(len), \
2217 FALSE,VM_PROT_READ|VM_PROT_WRITE) != KERN_SUCCESS) { \
2218 ABORT("vm_portect failed"); \
2223 # include <signal.h>
2226 static DWORD protect_junk;
2227 # define PROTECT(addr, len) \
2228 if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READ, \
2230 DWORD last_error = GetLastError(); \
2231 GC_printf1("Last error code: %lx\n", last_error); \
2232 ABORT("VirtualProtect failed"); \
2234 # define UNPROTECT(addr, len) \
2235 if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READWRITE, \
2237 ABORT("un-VirtualProtect failed"); \
2239 # endif /* !DARWIN */
2240 # endif /* MSWIN32 || MSWINCE || DARWIN */
2242 #if defined(SUNOS4) || (defined(FREEBSD) && !defined(SUNOS5SIGS))
2243 typedef void (* SIG_PF)();
2244 #endif /* SUNOS4 || (FREEBSD && !SUNOS5SIGS) */
2246 #if defined(SUNOS5SIGS) || defined(OSF1) || defined(LINUX) \
2249 typedef void (* SIG_PF)(int);
2251 typedef void (* SIG_PF)();
2253 #endif /* SUNOS5SIGS || OSF1 || LINUX || HURD */
2255 #if defined(MSWIN32)
2256 typedef LPTOP_LEVEL_EXCEPTION_FILTER SIG_PF;
2258 # define SIG_DFL (LPTOP_LEVEL_EXCEPTION_FILTER) (-1)
2260 #if defined(MSWINCE)
2261 typedef LONG (WINAPI *SIG_PF)(struct _EXCEPTION_POINTERS *);
2263 # define SIG_DFL (SIG_PF) (-1)
2266 #if defined(IRIX5) || defined(OSF1) || defined(HURD)
2267 typedef void (* REAL_SIG_PF)(int, int, struct sigcontext *);
2268 #endif /* IRIX5 || OSF1 || HURD */
2270 #if defined(SUNOS5SIGS)
2271 # if defined(HPUX) || defined(FREEBSD)
2272 # define SIGINFO_T siginfo_t
2274 # define SIGINFO_T struct siginfo
2277 typedef void (* REAL_SIG_PF)(int, SIGINFO_T *, void *);
2279 typedef void (* REAL_SIG_PF)();
2281 #endif /* SUNOS5SIGS */
2284 # if __GLIBC__ > 2 || __GLIBC__ == 2 && __GLIBC_MINOR__ >= 2
2285 typedef struct sigcontext s_c;
2286 # else /* glibc < 2.2 */
2287 # include <linux/version.h>
2288 # if (LINUX_VERSION_CODE >= 0x20100) && !defined(M68K) || defined(ALPHA) || defined(ARM32)
2289 typedef struct sigcontext s_c;
2291 typedef struct sigcontext_struct s_c;
2293 # endif /* glibc < 2.2 */
2294 # if defined(ALPHA) || defined(M68K)
2295 typedef void (* REAL_SIG_PF)(int, int, s_c *);
2297 # if defined(IA64) || defined(HP_PA) || defined(X86_64)
2298 typedef void (* REAL_SIG_PF)(int, siginfo_t *, s_c *);
2300 /* According to SUSV3, the last argument should have type */
2301 /* void * or ucontext_t * */
2303 typedef void (* REAL_SIG_PF)(int, s_c);
2307 /* Retrieve fault address from sigcontext structure by decoding */
2309 char * get_fault_addr(s_c *sc) {
2313 instr = *((unsigned *)(sc->sc_pc));
2314 faultaddr = sc->sc_regs[(instr >> 16) & 0x1f];
2315 faultaddr += (word) (((int)instr << 16) >> 16);
2316 return (char *)faultaddr;
2318 # endif /* !ALPHA */
2322 SIG_PF GC_old_bus_handler;
2323 SIG_PF GC_old_segv_handler; /* Also old MSWIN32 ACCESS_VIOLATION filter */
2324 #endif /* !DARWIN */
2326 #if defined(THREADS)
2327 /* We need to lock around the bitmap update in the write fault handler */
2328 /* in order to avoid the risk of losing a bit. We do this with a */
2329 /* test-and-set spin lock if we know how to do that. Otherwise we */
2330 /* check whether we are already in the handler and use the dumb but */
2331 /* safe fallback algorithm of setting all bits in the word. */
2332 /* Contention should be very rare, so we do the minimum to handle it */
2334 #ifdef GC_TEST_AND_SET_DEFINED
2335 static VOLATILE unsigned int fault_handler_lock = 0;
2336 void async_set_pht_entry_from_index(VOLATILE page_hash_table db, int index) {
2337 while (GC_test_and_set(&fault_handler_lock)) {}
2338 /* Could also revert to set_pht_entry_from_index_safe if initial */
2339 /* GC_test_and_set fails. */
2340 set_pht_entry_from_index(db, index);
2341 GC_clear(&fault_handler_lock);
2343 #else /* !GC_TEST_AND_SET_DEFINED */
2344 /* THIS IS INCORRECT! The dirty bit vector may be temporarily wrong, */
2345 /* just before we notice the conflict and correct it. We may end up */
2346 /* looking at it while it's wrong. But this requires contention */
2347 /* exactly when a GC is triggered, which seems far less likely to */
2348 /* fail than the old code, which had no reported failures. Thus we */
2349 /* leave it this way while we think of something better, or support */
2350 /* GC_test_and_set on the remaining platforms. */
2351 static VOLATILE word currently_updating = 0;
2352 void async_set_pht_entry_from_index(VOLATILE page_hash_table db, int index) {
2353 unsigned int update_dummy;
2354 currently_updating = (word)(&update_dummy);
2355 set_pht_entry_from_index(db, index);
2356 /* If we get contention in the 10 or so instruction window here, */
2357 /* and we get stopped by a GC between the two updates, we lose! */
2358 if (currently_updating != (word)(&update_dummy)) {
2359 set_pht_entry_from_index_safe(db, index);
2360 /* We claim that if two threads concurrently try to update the */
2361 /* dirty bit vector, the first one to execute UPDATE_START */
2362 /* will see it changed when UPDATE_END is executed. (Note that */
2363 /* &update_dummy must differ in two distinct threads.) It */
2364 /* will then execute set_pht_entry_from_index_safe, thus */
2365 /* returning us to a safe state, though not soon enough. */
2368 #endif /* !GC_TEST_AND_SET_DEFINED */
2369 #else /* !THREADS */
2370 # define async_set_pht_entry_from_index(db, index) \
2371 set_pht_entry_from_index(db, index)
2372 #endif /* !THREADS */
2375 #if !defined(DARWIN)
2376 # if defined (SUNOS4) || (defined(FREEBSD) && !defined(SUNOS5SIGS))
2377 void GC_write_fault_handler(sig, code, scp, addr)
2379 struct sigcontext *scp;
2382 # define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
2383 # define CODE_OK (FC_CODE(code) == FC_PROT \
2384 || (FC_CODE(code) == FC_OBJERR \
2385 && FC_ERRNO(code) == FC_PROT))
2388 # define SIG_OK (sig == SIGBUS)
2389 # define CODE_OK TRUE
2391 # endif /* SUNOS4 || (FREEBSD && !SUNOS5SIGS) */
2393 # if defined(IRIX5) || defined(OSF1) || defined(HURD)
2395 void GC_write_fault_handler(int sig, int code, struct sigcontext *scp)
2397 # define SIG_OK (sig == SIGSEGV)
2398 # define CODE_OK (code == 2 /* experimentally determined */)
2401 # define SIG_OK (sig == SIGSEGV)
2402 # define CODE_OK (code == EACCES)
2405 # define SIG_OK (sig == SIGBUS || sig == SIGSEGV)
2406 # define CODE_OK TRUE
2408 # endif /* IRIX5 || OSF1 || HURD */
2411 # if defined(ALPHA) || defined(M68K)
2412 void GC_write_fault_handler(int sig, int code, s_c * sc)
2414 # if defined(IA64) || defined(HP_PA) || defined(X86_64)
2415 void GC_write_fault_handler(int sig, siginfo_t * si, s_c * scp)
2418 void GC_write_fault_handler(int sig, int a2, int a3, int a4, s_c sc)
2420 void GC_write_fault_handler(int sig, s_c sc)
2424 # define SIG_OK (sig == SIGSEGV)
2425 # define CODE_OK TRUE
2426 /* Empirically c.trapno == 14, on IA32, but is that useful? */
2427 /* Should probably consider alignment issues on other */
2428 /* architectures. */
2431 # if defined(SUNOS5SIGS)
2433 void GC_write_fault_handler(int sig, SIGINFO_T *scp, void * context)
2435 void GC_write_fault_handler(sig, scp, context)
2441 # define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
2442 # define CODE_OK (scp -> si_code == SEGV_ACCERR) \
2443 || (scp -> si_code == BUS_ADRERR) \
2444 || (scp -> si_code == BUS_UNKNOWN) \
2445 || (scp -> si_code == SEGV_UNKNOWN) \
2446 || (scp -> si_code == BUS_OBJERR)
2449 # define SIG_OK (sig == SIGBUS)
2450 # define CODE_OK (scp -> si_code == BUS_PAGE_FAULT)
2452 # define SIG_OK (sig == SIGSEGV)
2453 # define CODE_OK (scp -> si_code == SEGV_ACCERR)
2456 # endif /* SUNOS5SIGS */
2458 # if defined(MSWIN32) || defined(MSWINCE)
2459 LONG WINAPI GC_write_fault_handler(struct _EXCEPTION_POINTERS *exc_info)
2460 # define SIG_OK (exc_info -> ExceptionRecord -> ExceptionCode == \
2461 STATUS_ACCESS_VIOLATION)
2462 # define CODE_OK (exc_info -> ExceptionRecord -> ExceptionInformation[0] == 1)
2464 # endif /* MSWIN32 || MSWINCE */
2466 register unsigned i;
2468 char *addr = (char *) code;
2471 char * addr = (char *) (size_t) (scp -> sc_badvaddr);
2473 # if defined(OSF1) && defined(ALPHA)
2474 char * addr = (char *) (scp -> sc_traparg_a0);
2477 char * addr = (char *) (scp -> si_addr);
2481 char * addr = (char *) (sc.cr2);
2486 struct sigcontext *scp = (struct sigcontext *)(sc);
2488 int format = (scp->sc_formatvec >> 12) & 0xf;
2489 unsigned long *framedata = (unsigned long *)(scp + 1);
2492 if (format == 0xa || format == 0xb) {
2495 } else if (format == 7) {
2498 if (framedata[1] & 0x08000000) {
2499 /* correct addr on misaligned access */
2500 ea = (ea+4095)&(~4095);
2502 } else if (format == 4) {
2505 if (framedata[1] & 0x08000000) {
2506 /* correct addr on misaligned access */
2507 ea = (ea+4095)&(~4095);
2513 char * addr = get_fault_addr(sc);
2515 # if defined(IA64) || defined(HP_PA) || defined(X86_64)
2516 char * addr = si -> si_addr;
2517 /* I believe this is claimed to work on all platforms for */
2518 /* Linux 2.3.47 and later. Hopefully we don't have to */
2519 /* worry about earlier kernels on IA64. */
2521 # if defined(POWERPC)
2522 char * addr = (char *) (sc.regs->dar);
2525 char * addr = (char *)sc.fault_address;
2528 char * addr = (char *)sc.regs.csraddr;
2530 --> architecture not supported
2539 # if defined(MSWIN32) || defined(MSWINCE)
2540 char * addr = (char *) (exc_info -> ExceptionRecord
2541 -> ExceptionInformation[1]);
2542 # define sig SIGSEGV
2545 if (SIG_OK && CODE_OK) {
2546 register struct hblk * h =
2547 (struct hblk *)((word)addr & ~(GC_page_size-1));
2548 GC_bool in_allocd_block;
2551 /* Address is only within the correct physical page. */
2552 in_allocd_block = FALSE;
2553 for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
2554 if (HDR(h+i) != 0) {
2555 in_allocd_block = TRUE;
2559 in_allocd_block = (HDR(addr) != 0);
2561 if (!in_allocd_block) {
2562 /* FIXME - We should make sure that we invoke the */
2563 /* old handler with the appropriate calling */
2564 /* sequence, which often depends on SA_SIGINFO. */
2566 /* Heap blocks now begin and end on page boundaries */
2569 if (sig == SIGSEGV) {
2570 old_handler = GC_old_segv_handler;
2572 old_handler = GC_old_bus_handler;
2574 if (old_handler == SIG_DFL) {
2575 # if !defined(MSWIN32) && !defined(MSWINCE)
2576 GC_err_printf1("Segfault at 0x%lx\n", addr);
2577 ABORT("Unexpected bus error or segmentation fault");
2579 return(EXCEPTION_CONTINUE_SEARCH);
2582 # if defined (SUNOS4) \
2583 || (defined(FREEBSD) && !defined(SUNOS5SIGS))
2584 (*old_handler) (sig, code, scp, addr);
2587 # if defined (SUNOS5SIGS)
2589 * FIXME: For FreeBSD, this code should check if the
2590 * old signal handler used the traditional BSD style and
2591 * if so call it using that style.
2593 (*(REAL_SIG_PF)old_handler) (sig, scp, context);
2596 # if defined (LINUX)
2597 # if defined(ALPHA) || defined(M68K)
2598 (*(REAL_SIG_PF)old_handler) (sig, code, sc);
2600 # if defined(IA64) || defined(HP_PA) || defined(X86_64)
2601 (*(REAL_SIG_PF)old_handler) (sig, si, scp);
2603 (*(REAL_SIG_PF)old_handler) (sig, sc);
2608 # if defined (IRIX5) || defined(OSF1) || defined(HURD)
2609 (*(REAL_SIG_PF)old_handler) (sig, code, scp);
2613 return((*old_handler)(exc_info));
2617 UNPROTECT(h, GC_page_size);
2618 /* We need to make sure that no collection occurs between */
2619 /* the UNPROTECT and the setting of the dirty bit. Otherwise */
2620 /* a write by a third thread might go unnoticed. Reversing */
2621 /* the order is just as bad, since we would end up unprotecting */
2622 /* a page in a GC cycle during which it's not marked. */
2623 /* Currently we do this by disabling the thread stopping */
2624 /* signals while this handler is running. An alternative might */
2625 /* be to record the fact that we're about to unprotect, or */
2626 /* have just unprotected a page in the GC's thread structure, */
2627 /* and then to have the thread stopping code set the dirty */
2628 /* flag, if necessary. */
2629 for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
2630 register int index = PHT_HASH(h+i);
2632 async_set_pht_entry_from_index(GC_dirty_pages, index);
2635 /* These reset the signal handler each time by default. */
2636 signal(SIGSEGV, (SIG_PF) GC_write_fault_handler);
2638 /* The write may not take place before dirty bits are read. */
2639 /* But then we'll fault again ... */
2640 # if defined(MSWIN32) || defined(MSWINCE)
2641 return(EXCEPTION_CONTINUE_EXECUTION);
2646 #if defined(MSWIN32) || defined(MSWINCE)
2647 return EXCEPTION_CONTINUE_SEARCH;
2649 GC_err_printf1("Segfault at 0x%lx\n", addr);
2650 ABORT("Unexpected bus error or segmentation fault");
2653 #endif /* !DARWIN */
2656 * We hold the allocation lock. We expect block h to be written
2657 * shortly. Ensure that all pages containing any part of the n hblks
2658 * starting at h are no longer protected. If is_ptrfree is false,
2659 * also ensure that they will subsequently appear to be dirty.
2661 void GC_remove_protection(h, nblocks, is_ptrfree)
2666 struct hblk * h_trunc; /* Truncated to page boundary */
2667 struct hblk * h_end; /* Page boundary following block end */
2668 struct hblk * current;
2669 GC_bool found_clean;
2671 if (!GC_dirty_maintained) return;
2672 h_trunc = (struct hblk *)((word)h & ~(GC_page_size-1));
2673 h_end = (struct hblk *)(((word)(h + nblocks) + GC_page_size-1)
2674 & ~(GC_page_size-1));
2675 found_clean = FALSE;
2676 for (current = h_trunc; current < h_end; ++current) {
2677 int index = PHT_HASH(current);
2679 if (!is_ptrfree || current < h || current >= h + nblocks) {
2680 async_set_pht_entry_from_index(GC_dirty_pages, index);
2683 UNPROTECT(h_trunc, (ptr_t)h_end - (ptr_t)h_trunc);
2686 #if !defined(DARWIN)
2687 void GC_dirty_init()
2689 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(LINUX) || \
2690 defined(OSF1) || defined(HURD)
2691 struct sigaction act, oldact;
2692 /* We should probably specify SA_SIGINFO for Linux, and handle */
2693 /* the different architectures more uniformly. */
2694 # if defined(IRIX5) || defined(LINUX) && !defined(X86_64) \
2695 || defined(OSF1) || defined(HURD)
2696 act.sa_flags = SA_RESTART;
2697 act.sa_handler = (SIG_PF)GC_write_fault_handler;
2699 act.sa_flags = SA_RESTART | SA_SIGINFO;
2700 act.sa_sigaction = GC_write_fault_handler;
2702 (void)sigemptyset(&act.sa_mask);
2704 /* Arrange to postpone SIG_SUSPEND while we're in a write fault */
2705 /* handler. This effectively makes the handler atomic w.r.t. */
2706 /* stopping the world for GC. */
2707 (void)sigaddset(&act.sa_mask, SIG_SUSPEND);
2708 # endif /* SIG_SUSPEND */
2711 GC_printf0("Inititalizing mprotect virtual dirty bit implementation\n");
2713 GC_dirty_maintained = TRUE;
2714 if (GC_page_size % HBLKSIZE != 0) {
2715 GC_err_printf0("Page size not multiple of HBLKSIZE\n");
2716 ABORT("Page size not multiple of HBLKSIZE");
2718 # if defined(SUNOS4) || (defined(FREEBSD) && !defined(SUNOS5SIGS))
2719 GC_old_bus_handler = signal(SIGBUS, GC_write_fault_handler);
2720 if (GC_old_bus_handler == SIG_IGN) {
2721 GC_err_printf0("Previously ignored bus error!?");
2722 GC_old_bus_handler = SIG_DFL;
2724 if (GC_old_bus_handler != SIG_DFL) {
2726 GC_err_printf0("Replaced other SIGBUS handler\n");
2730 # if defined(SUNOS4)
2731 GC_old_segv_handler = signal(SIGSEGV, (SIG_PF)GC_write_fault_handler);
2732 if (GC_old_segv_handler == SIG_IGN) {
2733 GC_err_printf0("Previously ignored segmentation violation!?");
2734 GC_old_segv_handler = SIG_DFL;
2736 if (GC_old_segv_handler != SIG_DFL) {
2738 GC_err_printf0("Replaced other SIGSEGV handler\n");
2742 # if (defined(SUNOS5SIGS) && !defined(FREEBSD)) || defined(IRIX5) \
2743 || defined(LINUX) || defined(OSF1) || defined(HURD)
2744 /* SUNOS5SIGS includes HPUX */
2745 # if defined(GC_IRIX_THREADS)
2746 sigaction(SIGSEGV, 0, &oldact);
2747 sigaction(SIGSEGV, &act, 0);
2750 int res = sigaction(SIGSEGV, &act, &oldact);
2751 if (res != 0) ABORT("Sigaction failed");
2754 # if defined(_sigargs) || defined(HURD) || !defined(SA_SIGINFO)
2755 /* This is Irix 5.x, not 6.x. Irix 5.x does not have */
2757 GC_old_segv_handler = oldact.sa_handler;
2758 # else /* Irix 6.x or SUNOS5SIGS or LINUX */
2759 if (oldact.sa_flags & SA_SIGINFO) {
2760 GC_old_segv_handler = (SIG_PF)(oldact.sa_sigaction);
2762 GC_old_segv_handler = oldact.sa_handler;
2765 if (GC_old_segv_handler == SIG_IGN) {
2766 GC_err_printf0("Previously ignored segmentation violation!?");
2767 GC_old_segv_handler = SIG_DFL;
2769 if (GC_old_segv_handler != SIG_DFL) {
2771 GC_err_printf0("Replaced other SIGSEGV handler\n");
2774 # endif /* (SUNOS5SIGS && !FREEBSD) || IRIX5 || LINUX || OSF1 || HURD */
2775 # if defined(HPUX) || defined(LINUX) || defined(HURD) \
2776 || (defined(FREEBSD) && defined(SUNOS5SIGS))
2777 sigaction(SIGBUS, &act, &oldact);
2778 GC_old_bus_handler = oldact.sa_handler;
2779 if (GC_old_bus_handler == SIG_IGN) {
2780 GC_err_printf0("Previously ignored bus error!?");
2781 GC_old_bus_handler = SIG_DFL;
2783 if (GC_old_bus_handler != SIG_DFL) {
2785 GC_err_printf0("Replaced other SIGBUS handler\n");
2788 # endif /* HPUX || LINUX || HURD || (FREEBSD && SUNOS5SIGS) */
2789 # if defined(MSWIN32)
2790 GC_old_segv_handler = SetUnhandledExceptionFilter(GC_write_fault_handler);
2791 if (GC_old_segv_handler != NULL) {
2793 GC_err_printf0("Replaced other UnhandledExceptionFilter\n");
2796 GC_old_segv_handler = SIG_DFL;
2800 #endif /* !DARWIN */
2802 int GC_incremental_protection_needs()
2804 if (GC_page_size == HBLKSIZE) {
2805 return GC_PROTECTS_POINTER_HEAP;
2807 return GC_PROTECTS_POINTER_HEAP | GC_PROTECTS_PTRFREE_HEAP;
2811 #define HAVE_INCREMENTAL_PROTECTION_NEEDS
2813 #define IS_PTRFREE(hhdr) ((hhdr)->hb_descr == 0)
2815 #define PAGE_ALIGNED(x) !((word)(x) & (GC_page_size - 1))
2816 void GC_protect_heap()
2820 struct hblk * current;
2821 struct hblk * current_start; /* Start of block to be protected. */
2822 struct hblk * limit;
2824 GC_bool protect_all =
2825 (0 != (GC_incremental_protection_needs() & GC_PROTECTS_PTRFREE_HEAP));
2826 for (i = 0; i < GC_n_heap_sects; i++) {
2827 start = GC_heap_sects[i].hs_start;
2828 len = GC_heap_sects[i].hs_bytes;
2830 PROTECT(start, len);
2832 GC_ASSERT(PAGE_ALIGNED(len))
2833 GC_ASSERT(PAGE_ALIGNED(start))
2834 current_start = current = (struct hblk *)start;
2835 limit = (struct hblk *)(start + len);
2836 while (current < limit) {
2841 GC_ASSERT(PAGE_ALIGNED(current));
2842 GET_HDR(current, hhdr);
2843 if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
2844 /* This can happen only if we're at the beginning of a */
2845 /* heap segment, and a block spans heap segments. */
2846 /* We will handle that block as part of the preceding */
2848 GC_ASSERT(current_start == current);
2849 current_start = ++current;
2852 if (HBLK_IS_FREE(hhdr)) {
2853 GC_ASSERT(PAGE_ALIGNED(hhdr -> hb_sz));
2854 nhblks = divHBLKSZ(hhdr -> hb_sz);
2855 is_ptrfree = TRUE; /* dirty on alloc */
2857 nhblks = OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
2858 is_ptrfree = IS_PTRFREE(hhdr);
2861 if (current_start < current) {
2862 PROTECT(current_start, (ptr_t)current - (ptr_t)current_start);
2864 current_start = (current += nhblks);
2869 if (current_start < current) {
2870 PROTECT(current_start, (ptr_t)current - (ptr_t)current_start);
2876 /* We assume that either the world is stopped or its OK to lose dirty */
2877 /* bits while this is happenning (as in GC_enable_incremental). */
2878 void GC_read_dirty()
2880 BCOPY((word *)GC_dirty_pages, GC_grungy_pages,
2881 (sizeof GC_dirty_pages));
2882 BZERO((word *)GC_dirty_pages, (sizeof GC_dirty_pages));
2886 GC_bool GC_page_was_dirty(h)
2889 register word index = PHT_HASH(h);
2891 return(HDR(h) == 0 || get_pht_entry_from_index(GC_grungy_pages, index));
2895 * Acquiring the allocation lock here is dangerous, since this
2896 * can be called from within GC_call_with_alloc_lock, and the cord
2897 * package does so. On systems that allow nested lock acquisition, this
2899 * On other systems, SET_LOCK_HOLDER and friends must be suitably defined.
2902 static GC_bool syscall_acquired_lock = FALSE; /* Protected by GC lock. */
2904 void GC_begin_syscall()
2906 if (!I_HOLD_LOCK()) {
2908 syscall_acquired_lock = TRUE;
2912 void GC_end_syscall()
2914 if (syscall_acquired_lock) {
2915 syscall_acquired_lock = FALSE;
2920 void GC_unprotect_range(addr, len)
2924 struct hblk * start_block;
2925 struct hblk * end_block;
2926 register struct hblk *h;
2929 if (!GC_dirty_maintained) return;
2930 obj_start = GC_base(addr);
2931 if (obj_start == 0) return;
2932 if (GC_base(addr + len - 1) != obj_start) {
2933 ABORT("GC_unprotect_range(range bigger than object)");
2935 start_block = (struct hblk *)((word)addr & ~(GC_page_size - 1));
2936 end_block = (struct hblk *)((word)(addr + len - 1) & ~(GC_page_size - 1));
2937 end_block += GC_page_size/HBLKSIZE - 1;
2938 for (h = start_block; h <= end_block; h++) {
2939 register word index = PHT_HASH(h);
2941 async_set_pht_entry_from_index(GC_dirty_pages, index);
2943 UNPROTECT(start_block,
2944 ((ptr_t)end_block - (ptr_t)start_block) + HBLKSIZE);
2949 /* We no longer wrap read by default, since that was causing too many */
2950 /* problems. It is preferred that the client instead avoids writing */
2951 /* to the write-protected heap with a system call. */
2952 /* This still serves as sample code if you do want to wrap system calls.*/
2954 #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(GC_USE_LD_WRAP)
2955 /* Replacement for UNIX system call. */
2956 /* Other calls that write to the heap should be handled similarly. */
2957 /* Note that this doesn't work well for blocking reads: It will hold */
2958 /* the allocation lock for the entire duration of the call. Multithreaded */
2959 /* clients should really ensure that it won't block, either by setting */
2960 /* the descriptor nonblocking, or by calling select or poll first, to */
2961 /* make sure that input is available. */
2962 /* Another, preferred alternative is to ensure that system calls never */
2963 /* write to the protected heap (see above). */
2964 # if defined(__STDC__) && !defined(SUNOS4)
2965 # include <unistd.h>
2966 # include <sys/uio.h>
2967 ssize_t read(int fd, void *buf, size_t nbyte)
2970 int read(fd, buf, nbyte)
2972 int GC_read(fd, buf, nbyte)
2982 GC_unprotect_range(buf, (word)nbyte);
2983 # if defined(IRIX5) || defined(GC_LINUX_THREADS)
2984 /* Indirect system call may not always be easily available. */
2985 /* We could call _read, but that would interfere with the */
2986 /* libpthread interception of read. */
2987 /* On Linux, we have to be careful with the linuxthreads */
2988 /* read interception. */
2993 iov.iov_len = nbyte;
2994 result = readv(fd, &iov, 1);
2998 result = __read(fd, buf, nbyte);
3000 /* The two zero args at the end of this list are because one
3001 IA-64 syscall() implementation actually requires six args
3002 to be passed, even though they aren't always used. */
3003 result = syscall(SYS_read, fd, buf, nbyte, 0, 0);
3009 #endif /* !MSWIN32 && !MSWINCE && !GC_LINUX_THREADS */
3011 #if defined(GC_USE_LD_WRAP) && !defined(THREADS)
3012 /* We use the GNU ld call wrapping facility. */
3013 /* This requires that the linker be invoked with "--wrap read". */
3014 /* This can be done by passing -Wl,"--wrap read" to gcc. */
3015 /* I'm not sure that this actually wraps whatever version of read */
3016 /* is called by stdio. That code also mentions __read. */
3017 # include <unistd.h>
3018 ssize_t __wrap_read(int fd, void *buf, size_t nbyte)
3023 GC_unprotect_range(buf, (word)nbyte);
3024 result = __real_read(fd, buf, nbyte);
3029 /* We should probably also do this for __read, or whatever stdio */
3030 /* actually calls. */
3036 GC_bool GC_page_was_ever_dirty(h)
3042 /* Reset the n pages starting at h to "was never dirty" status. */
3044 void GC_is_fresh(h, n)
3050 # endif /* MPROTECT_VDB */
3055 * See DEFAULT_VDB for interface descriptions.
3059 * This implementaion assumes a Solaris 2.X like /proc pseudo-file-system
3060 * from which we can read page modified bits. This facility is far from
3061 * optimal (e.g. we would like to get the info for only some of the
3062 * address space), but it avoids intercepting system calls.
3066 #include <sys/types.h>
3067 #include <sys/signal.h>
3068 #include <sys/fault.h>
3069 #include <sys/syscall.h>
3070 #include <sys/procfs.h>
3071 #include <sys/stat.h>
3073 #define INITIAL_BUF_SZ 16384
3074 word GC_proc_buf_size = INITIAL_BUF_SZ;
3077 #ifdef GC_SOLARIS_THREADS
3078 /* We don't have exact sp values for threads. So we count on */
3079 /* occasionally declaring stack pages to be fresh. Thus we */
3080 /* need a real implementation of GC_is_fresh. We can't clear */
3081 /* entries in GC_written_pages, since that would declare all */
3082 /* pages with the given hash address to be fresh. */
3083 # define MAX_FRESH_PAGES 8*1024 /* Must be power of 2 */
3084 struct hblk ** GC_fresh_pages; /* A direct mapped cache. */
3085 /* Collisions are dropped. */
3087 # define FRESH_PAGE_SLOT(h) (divHBLKSZ((word)(h)) & (MAX_FRESH_PAGES-1))
3088 # define ADD_FRESH_PAGE(h) \
3089 GC_fresh_pages[FRESH_PAGE_SLOT(h)] = (h)
3090 # define PAGE_IS_FRESH(h) \
3091 (GC_fresh_pages[FRESH_PAGE_SLOT(h)] == (h) && (h) != 0)
3094 /* Add all pages in pht2 to pht1 */
3095 void GC_or_pages(pht1, pht2)
3096 page_hash_table pht1, pht2;
3100 for (i = 0; i < PHT_SIZE; i++) pht1[i] |= pht2[i];
3105 void GC_dirty_init()
3110 GC_dirty_maintained = TRUE;
3111 if (GC_words_allocd != 0 || GC_words_allocd_before_gc != 0) {
3114 for (i = 0; i < PHT_SIZE; i++) GC_written_pages[i] = (word)(-1);
3116 GC_printf1("Allocated words:%lu:all pages may have been written\n",
3118 (GC_words_allocd + GC_words_allocd_before_gc));
3121 sprintf(buf, "/proc/%d", getpid());
3122 fd = open(buf, O_RDONLY);
3124 ABORT("/proc open failed");
3126 GC_proc_fd = syscall(SYS_ioctl, fd, PIOCOPENPD, 0);
3128 syscall(SYS_fcntl, GC_proc_fd, F_SETFD, FD_CLOEXEC);
3129 if (GC_proc_fd < 0) {
3130 ABORT("/proc ioctl failed");
3132 GC_proc_buf = GC_scratch_alloc(GC_proc_buf_size);
3133 # ifdef GC_SOLARIS_THREADS
3134 GC_fresh_pages = (struct hblk **)
3135 GC_scratch_alloc(MAX_FRESH_PAGES * sizeof (struct hblk *));
3136 if (GC_fresh_pages == 0) {
3137 GC_err_printf0("No space for fresh pages\n");
3140 BZERO(GC_fresh_pages, MAX_FRESH_PAGES * sizeof (struct hblk *));
3144 /* Ignore write hints. They don't help us here. */
3146 void GC_remove_protection(h, nblocks, is_ptrfree)
3153 #ifdef GC_SOLARIS_THREADS
3154 # define READ(fd,buf,nbytes) syscall(SYS_read, fd, buf, nbytes)
3156 # define READ(fd,buf,nbytes) read(fd, buf, nbytes)
3159 void GC_read_dirty()
3161 unsigned long ps, np;
3164 struct prasmap * map;
3166 ptr_t current_addr, limit;
3170 BZERO(GC_grungy_pages, (sizeof GC_grungy_pages));
3173 if (READ(GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
3175 GC_printf1("/proc read failed: GC_proc_buf_size = %lu\n",
3179 /* Retry with larger buffer. */
3180 word new_size = 2 * GC_proc_buf_size;
3181 char * new_buf = GC_scratch_alloc(new_size);
3184 GC_proc_buf = bufp = new_buf;
3185 GC_proc_buf_size = new_size;
3187 if (READ(GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
3188 WARN("Insufficient space for /proc read\n", 0);
3190 memset(GC_grungy_pages, 0xff, sizeof (page_hash_table));
3191 memset(GC_written_pages, 0xff, sizeof(page_hash_table));
3192 # ifdef GC_SOLARIS_THREADS
3193 BZERO(GC_fresh_pages,
3194 MAX_FRESH_PAGES * sizeof (struct hblk *));
3200 /* Copy dirty bits into GC_grungy_pages */
3201 nmaps = ((struct prpageheader *)bufp) -> pr_nmap;
3202 /* printf( "nmaps = %d, PG_REFERENCED = %d, PG_MODIFIED = %d\n",
3203 nmaps, PG_REFERENCED, PG_MODIFIED); */
3204 bufp = bufp + sizeof(struct prpageheader);
3205 for (i = 0; i < nmaps; i++) {
3206 map = (struct prasmap *)bufp;
3207 vaddr = (ptr_t)(map -> pr_vaddr);
3208 ps = map -> pr_pagesize;
3209 np = map -> pr_npage;
3210 /* printf("vaddr = 0x%X, ps = 0x%X, np = 0x%X\n", vaddr, ps, np); */
3211 limit = vaddr + ps * np;
3212 bufp += sizeof (struct prasmap);
3213 for (current_addr = vaddr;
3214 current_addr < limit; current_addr += ps){
3215 if ((*bufp++) & PG_MODIFIED) {
3216 register struct hblk * h = (struct hblk *) current_addr;
3218 while ((ptr_t)h < current_addr + ps) {
3219 register word index = PHT_HASH(h);
3221 set_pht_entry_from_index(GC_grungy_pages, index);
3222 # ifdef GC_SOLARIS_THREADS
3224 register int slot = FRESH_PAGE_SLOT(h);
3226 if (GC_fresh_pages[slot] == h) {
3227 GC_fresh_pages[slot] = 0;
3235 bufp += sizeof(long) - 1;
3236 bufp = (char *)((unsigned long)bufp & ~(sizeof(long)-1));
3238 /* Update GC_written_pages. */
3239 GC_or_pages(GC_written_pages, GC_grungy_pages);
3240 # ifdef GC_SOLARIS_THREADS
3241 /* Make sure that old stacks are considered completely clean */
3242 /* unless written again. */
3243 GC_old_stacks_are_fresh();
3249 GC_bool GC_page_was_dirty(h)
3252 register word index = PHT_HASH(h);
3253 register GC_bool result;
3255 result = get_pht_entry_from_index(GC_grungy_pages, index);
3256 # ifdef GC_SOLARIS_THREADS
3257 if (result && PAGE_IS_FRESH(h)) result = FALSE;
3258 /* This happens only if page was declared fresh since */
3259 /* the read_dirty call, e.g. because it's in an unused */
3260 /* thread stack. It's OK to treat it as clean, in */
3261 /* that case. And it's consistent with */
3262 /* GC_page_was_ever_dirty. */
3267 GC_bool GC_page_was_ever_dirty(h)
3270 register word index = PHT_HASH(h);
3271 register GC_bool result;
3273 result = get_pht_entry_from_index(GC_written_pages, index);
3274 # ifdef GC_SOLARIS_THREADS
3275 if (result && PAGE_IS_FRESH(h)) result = FALSE;
3280 /* Caller holds allocation lock. */
3281 void GC_is_fresh(h, n)
3286 register word index;
3288 # ifdef GC_SOLARIS_THREADS
3291 if (GC_fresh_pages != 0) {
3292 for (i = 0; i < n; i++) {
3293 ADD_FRESH_PAGE(h + i);
3299 # endif /* PROC_VDB */
3304 # include "vd/PCR_VD.h"
3306 # define NPAGES (32*1024) /* 128 MB */
3308 PCR_VD_DB GC_grungy_bits[NPAGES];
3310 ptr_t GC_vd_base; /* Address corresponding to GC_grungy_bits[0] */
3311 /* HBLKSIZE aligned. */
3313 void GC_dirty_init()
3315 GC_dirty_maintained = TRUE;
3316 /* For the time being, we assume the heap generally grows up */
3317 GC_vd_base = GC_heap_sects[0].hs_start;
3318 if (GC_vd_base == 0) {
3319 ABORT("Bad initial heap segment");
3321 if (PCR_VD_Start(HBLKSIZE, GC_vd_base, NPAGES*HBLKSIZE)
3323 ABORT("dirty bit initialization failed");
3327 void GC_read_dirty()
3329 /* lazily enable dirty bits on newly added heap sects */
3331 static int onhs = 0;
3332 int nhs = GC_n_heap_sects;
3333 for( ; onhs < nhs; onhs++ ) {
3334 PCR_VD_WriteProtectEnable(
3335 GC_heap_sects[onhs].hs_start,
3336 GC_heap_sects[onhs].hs_bytes );
3341 if (PCR_VD_Clear(GC_vd_base, NPAGES*HBLKSIZE, GC_grungy_bits)
3343 ABORT("dirty bit read failed");
3347 GC_bool GC_page_was_dirty(h)
3350 if((ptr_t)h < GC_vd_base || (ptr_t)h >= GC_vd_base + NPAGES*HBLKSIZE) {
3353 return(GC_grungy_bits[h - (struct hblk *)GC_vd_base] & PCR_VD_DB_dirtyBit);
3357 void GC_remove_protection(h, nblocks, is_ptrfree)
3362 PCR_VD_WriteProtectDisable(h, nblocks*HBLKSIZE);
3363 PCR_VD_WriteProtectEnable(h, nblocks*HBLKSIZE);
3366 # endif /* PCR_VDB */
3368 #if defined(MPROTECT_VDB) && defined(DARWIN)
3369 /* The following sources were used as a *reference* for this exception handling
3371 1. Apple's mach/xnu documentation
3372 2. Timothy J. Wood's "Mach Exception Handlers 101" post to the
3373 omnigroup's macosx-dev list.
3374 www.omnigroup.com/mailman/archive/macosx-dev/2000-June/014178.html
3375 3. macosx-nat.c from Apple's GDB source code.
3378 /* The bug that caused all this trouble should now be fixed. This should
3379 eventually be removed if all goes well. */
3380 /* define BROKEN_EXCEPTION_HANDLING */
3382 #include <mach/mach.h>
3383 #include <mach/mach_error.h>
3384 #include <mach/thread_status.h>
3385 #include <mach/exception.h>
3386 #include <mach/task.h>
3387 #include <pthread.h>
3389 /* These are not defined in any header, although they are documented */
3390 extern boolean_t exc_server(mach_msg_header_t *,mach_msg_header_t *);
3391 extern kern_return_t exception_raise(
3392 mach_port_t,mach_port_t,mach_port_t,
3393 exception_type_t,exception_data_t,mach_msg_type_number_t);
3394 extern kern_return_t exception_raise_state(
3395 mach_port_t,mach_port_t,mach_port_t,
3396 exception_type_t,exception_data_t,mach_msg_type_number_t,
3397 thread_state_flavor_t*,thread_state_t,mach_msg_type_number_t,
3398 thread_state_t,mach_msg_type_number_t*);
3399 extern kern_return_t exception_raise_state_identity(
3400 mach_port_t,mach_port_t,mach_port_t,
3401 exception_type_t,exception_data_t,mach_msg_type_number_t,
3402 thread_state_flavor_t*,thread_state_t,mach_msg_type_number_t,
3403 thread_state_t,mach_msg_type_number_t*);
3406 #define MAX_EXCEPTION_PORTS 16
3409 mach_msg_type_number_t count;
3410 exception_mask_t masks[MAX_EXCEPTION_PORTS];
3411 exception_handler_t ports[MAX_EXCEPTION_PORTS];
3412 exception_behavior_t behaviors[MAX_EXCEPTION_PORTS];
3413 thread_state_flavor_t flavors[MAX_EXCEPTION_PORTS];
3417 mach_port_t exception;
3418 #if defined(THREADS)
3424 mach_msg_header_t head;
3428 GC_MP_NORMAL, GC_MP_DISCARDING, GC_MP_STOPPED
3429 } GC_mprotect_state_t;
3431 /* FIXME: 1 and 2 seem to be safe to use in the msgh_id field,
3432 but it isn't documented. Use the source and see if they
3437 /* These values are only used on the reply port */
3440 #if defined(THREADS)
3442 GC_mprotect_state_t GC_mprotect_state;
3444 /* The following should ONLY be called when the world is stopped */
3445 static void GC_mprotect_thread_notify(mach_msg_id_t id) {
3448 mach_msg_trailer_t trailer;
3450 mach_msg_return_t r;
3452 buf.msg.head.msgh_bits =
3453 MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND,0);
3454 buf.msg.head.msgh_size = sizeof(buf.msg);
3455 buf.msg.head.msgh_remote_port = GC_ports.exception;
3456 buf.msg.head.msgh_local_port = MACH_PORT_NULL;
3457 buf.msg.head.msgh_id = id;
3461 MACH_SEND_MSG|MACH_RCV_MSG|MACH_RCV_LARGE,
3465 MACH_MSG_TIMEOUT_NONE,
3467 if(r != MACH_MSG_SUCCESS)
3468 ABORT("mach_msg failed in GC_mprotect_thread_notify");
3469 if(buf.msg.head.msgh_id != ID_ACK)
3470 ABORT("invalid ack in GC_mprotect_thread_notify");
3473 /* Should only be called by the mprotect thread */
3474 static void GC_mprotect_thread_reply() {