aboutsummaryrefslogtreecommitdiff
path: root/boehm-gc
diff options
context:
space:
mode:
authortromey <tromey@138bc75d-0d04-0410-961f-82ee72b054a4>1999-11-01 20:48:52 +0000
committertromey <tromey@138bc75d-0d04-0410-961f-82ee72b054a4>1999-11-01 20:48:52 +0000
commite235d2a28d6c05a9725c4384ca4f312c87c50a14 (patch)
tree5663ccc579e052940a182e16c275105d94209f95 /boehm-gc
parentcbee312b23af31fc38149dcd9be45c7046367347 (diff)
Imported Boehm GC 5.0alpha4
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/branches/boehm@30327 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'boehm-gc')
-rw-r--r--boehm-gc/Makefile100
-rw-r--r--boehm-gc/README.QUICK4
-rw-r--r--boehm-gc/allchblk.c664
-rw-r--r--boehm-gc/alloc.c151
-rw-r--r--boehm-gc/blacklst.c7
-rw-r--r--boehm-gc/cord/cordxtra.c2
-rw-r--r--boehm-gc/dbg_mlc.c190
-rw-r--r--boehm-gc/dyn_load.c15
-rw-r--r--boehm-gc/finalize.c69
-rw-r--r--boehm-gc/headers.c86
-rw-r--r--boehm-gc/include/gc.h43
-rw-r--r--boehm-gc/include/gc_alloc.h2
-rw-r--r--boehm-gc/include/new_gc_alloc.h6
-rw-r--r--boehm-gc/include/private/gc_hdrs.h6
-rw-r--r--boehm-gc/include/private/gc_priv.h185
-rw-r--r--boehm-gc/include/private/gcconfig.h62
-rw-r--r--boehm-gc/linux_threads.c6
-rw-r--r--boehm-gc/mach_dep.c77
-rw-r--r--boehm-gc/malloc.c12
-rw-r--r--boehm-gc/mallocx.c12
-rw-r--r--boehm-gc/mark.c28
-rw-r--r--boehm-gc/mark_rts.c28
-rw-r--r--boehm-gc/misc.c37
-rw-r--r--boehm-gc/os_dep.c404
-rw-r--r--boehm-gc/reclaim.c294
-rw-r--r--boehm-gc/solaris_threads.c27
-rw-r--r--boehm-gc/sparc_sunos4_mach_dep.s2
-rw-r--r--boehm-gc/threadlibs.c3
-rw-r--r--boehm-gc/version.h6
29 files changed, 1960 insertions, 568 deletions
diff --git a/boehm-gc/Makefile b/boehm-gc/Makefile
index 063d394a9e9..d6eab33f088 100644
--- a/boehm-gc/Makefile
+++ b/boehm-gc/Makefile
@@ -7,9 +7,9 @@
# and runs some tests of collector and cords. Does not add cords or
# c++ interface to gc.a
# cord/de - builds dumb editor based on cords.
-ABI_FLAG=
+ABI_FLAG=
CC=cc $(ABI_FLAG)
-CXX=CC $(ABI_FLAG)
+CXX=g++ $(ABI_FLAG)
AS=as $(ABI_FLAG)
# The above doesn't work with gas, which doesn't run cpp.
# Define AS as `gcc -c -x assembler-with-cpp' instead.
@@ -24,10 +24,8 @@ CFLAGS= -O -DATOMIC_UNCOLLECTABLE -DNO_SIGNALS -DNO_EXECUTE_PERMISSION -DALL_INT
# Setjmp_test may yield overly optimistic results when compiled
# without optimization.
# -DSILENT disables statistics printing, and improves performance.
-# -DCHECKSUMS reports on erroneously clear dirty bits, and unexpectedly
-# altered stubborn objects, at substantial performance cost.
-# Use only for incremental collector debugging.
-# -DFIND_LEAK causes the collector to assume that all inaccessible
+# -DFIND_LEAK causes GC_find_leak to be initially set.
+# This causes the collector to assume that all inaccessible
# objects should have been explicitly deallocated, and reports exceptions.
# Finalization and the test program are not usable in this mode.
# -DSOLARIS_THREADS enables support for Solaris (thr_) threads.
@@ -37,6 +35,8 @@ CFLAGS= -O -DATOMIC_UNCOLLECTABLE -DNO_SIGNALS -DNO_EXECUTE_PERMISSION -DALL_INT
# -D_SOLARIS_PTHREADS enables support for Solaris pthreads.
# Define SOLARIS_THREADS as well.
# -DIRIX_THREADS enables support for Irix pthreads. See README.irix.
+# -DHPUX_THREADS enables support for HP/UX 11 pthreads.
+# Also requires -D_REENTRANT. See README.hp.
# -DLINUX_THREADS enables support for Xavier Leroy's Linux threads.
# see README.linux. -D_REENTRANT may also be required.
# -DALL_INTERIOR_POINTERS allows all pointers to the interior
@@ -85,9 +85,12 @@ CFLAGS= -O -DATOMIC_UNCOLLECTABLE -DNO_SIGNALS -DNO_EXECUTE_PERMISSION -DALL_INT
# finalize.c). Objects reachable from finalizable objects will be marked
# in a sepearte postpass, and hence their memory won't be reclaimed.
# Not recommended unless you are implementing a language that specifies
-# these semantics.
+# these semantics. Since 5.0, determines only only the initial value
+# of GC_java_finalization variable.
# -DFINALIZE_ON_DEMAND causes finalizers to be run only in response
# to explicit GC_invoke_finalizers() calls.
+# In 5.0 this became runtime adjustable, and this only determines the
+# initial value of GC_finalize_on_demand.
# -DATOMIC_UNCOLLECTABLE includes code for GC_malloc_atomic_uncollectable.
# This is useful if either the vendor malloc implementation is poor,
# or if REDIRECT_MALLOC is used.
@@ -98,6 +101,10 @@ CFLAGS= -O -DATOMIC_UNCOLLECTABLE -DNO_SIGNALS -DNO_EXECUTE_PERMISSION -DALL_INT
# fragmentation, but generally better performance for large heaps.
# -DUSE_MMAP use MMAP instead of sbrk to get new memory.
# Works for Solaris and Irix.
+# -DUSE_MUNMAP causes memory to be returned to the OS under the right
+# circumstances. This currently disables VM-based incremental collection.
+# This is currently experimental, and works only under some Unix and
+# Linux versions.
# -DMMAP_STACKS (for Solaris threads) Use mmap from /dev/zero rather than
# GC_scratch_alloc() to get stack memory.
# -DPRINT_BLACK_LIST Whenever a black list entry is added, i.e. whenever
@@ -109,12 +116,25 @@ CFLAGS= -O -DATOMIC_UNCOLLECTABLE -DNO_SIGNALS -DNO_EXECUTE_PERMISSION -DALL_INT
# allocation strategy. The new strategy tries harder to minimize
# fragmentation, sometimes at the expense of spending more time in the
# large block allocator and/or collecting more frequently.
-# If you expect the allocator to promtly use an explicitly expanded
+# If you expect the allocator to promptly use an explicitly expanded
# heap, this is highly recommended.
+# -DKEEP_BACK_PTRS Add code to save back pointers in debugging headers
+# for objects allocated with the debugging allocator. If all objects
+# through GC_MALLOC with GC_DEBUG defined, this allows the client
+# to determine how particular or randomly chosen objects are reachable
+# for debugging/profiling purposes. The backptr.h interface is
+# implemented only if this is defined.
+# -DGC_ASSERTIONS Enable some internal GC assertion checking. Currently
+# this facility is only used in a few places. It is intended primarily
+# for debugging of the garbage collector itself, but could also
+# occasionally be useful for debugging of client code. Slows down the
+# collector somewhat, but not drastically.
+# -DCHECKSUMS reports on erroneously clear dirty bits, and unexpectedly
+# altered stubborn objects, at substantial performance cost.
+# Use only for debugging of the incremental collector.
#
-
LIBGC_CFLAGS= -O -DNO_SIGNALS -DSILENT \
-DREDIRECT_MALLOC=GC_malloc_uncollectable \
-DDONT_ADD_BYTE_AT_END -DALL_INTERIOR_POINTERS
@@ -131,9 +151,9 @@ RANLIB= ranlib
srcdir = .
VPATH = $(srcdir)
-OBJS= alloc.o reclaim.o allchblk.o misc.o mach_dep.o os_dep.o mark_rts.o headers.o mark.o obj_map.o blacklst.o finalize.o new_hblk.o dbg_mlc.o malloc.o stubborn.o checksums.o solaris_threads.o irix_threads.o linux_threads.o typd_mlc.o ptr_chck.o mallocx.o solaris_pthreads.o
+OBJS= alloc.o reclaim.o allchblk.o misc.o mach_dep.o os_dep.o mark_rts.o headers.o mark.o obj_map.o blacklst.o finalize.o new_hblk.o dbg_mlc.o malloc.o stubborn.o checksums.o solaris_threads.o hpux_irix_threads.o linux_threads.o typd_mlc.o ptr_chck.o mallocx.o solaris_pthreads.o
-CSRCS= reclaim.c allchblk.c misc.c alloc.c mach_dep.c os_dep.c mark_rts.c headers.c mark.c obj_map.c pcr_interface.c blacklst.c finalize.c new_hblk.c real_malloc.c dyn_load.c dbg_mlc.c malloc.c stubborn.c checksums.c solaris_threads.c irix_threads.c linux_threads.c typd_mlc.c ptr_chck.c mallocx.c solaris_pthreads.c
+CSRCS= reclaim.c allchblk.c misc.c alloc.c mach_dep.c os_dep.c mark_rts.c headers.c mark.c obj_map.c pcr_interface.c blacklst.c finalize.c new_hblk.c real_malloc.c dyn_load.c dbg_mlc.c malloc.c stubborn.c checksums.c solaris_threads.c hpux_irix_threads.c linux_threads.c typd_mlc.c ptr_chck.c mallocx.c solaris_pthreads.c
CORD_SRCS= cord/cordbscs.c cord/cordxtra.c cord/cordprnt.c cord/de.c cord/cordtest.c cord/cord.h cord/ec.h cord/private/cord_pos.h cord/de_win.c cord/de_win.h cord/de_cmds.h cord/de_win.ICO cord/de_win.RC cord/SCOPTIONS.amiga cord/SMakefile.amiga
@@ -145,7 +165,7 @@ SRCS= $(CSRCS) mips_sgi_mach_dep.s rs6000_mach_dep.s alpha_mach_dep.s \
threadlibs.c if_mach.c if_not_there.c gc_cpp.cc gc_cpp.h weakpointer.h \
gcc_support.c mips_ultrix_mach_dep.s include/gc_alloc.h gc_alloc.h \
include/new_gc_alloc.h include/javaxfc.h sparc_sunos4_mach_dep.s \
- solaris_threads.h $(CORD_SRCS)
+ solaris_threads.h backptr.h hpux_test_and_clear.s $(CORD_SRCS)
OTHER_FILES= Makefile PCR-Makefile OS2_MAKEFILE NT_MAKEFILE BCC_MAKEFILE \
README test.c test_cpp.cc setjmp_t.c SMakefile.amiga \
@@ -153,7 +173,7 @@ OTHER_FILES= Makefile PCR-Makefile OS2_MAKEFILE NT_MAKEFILE BCC_MAKEFILE \
cord/gc.h include/gc.h include/gc_typed.h include/cord.h \
include/ec.h include/private/cord_pos.h include/private/gcconfig.h \
include/private/gc_hdrs.h include/private/gc_priv.h \
- include/gc_cpp.h README.rs6000 \
+ include/gc_cpp.h README.rs6000 include/backptr.h \
include/weakpointer.h README.QUICK callprocs pc_excludes \
barrett_diagram README.OS2 README.Mac MacProjects.sit.hqx \
MacOS.c EMX_MAKEFILE makefile.depend README.debugging \
@@ -162,7 +182,8 @@ OTHER_FILES= Makefile PCR-Makefile OS2_MAKEFILE NT_MAKEFILE BCC_MAKEFILE \
add_gc_prefix.c README.solaris2 README.sgi README.hp README.uts \
win32_threads.c NT_THREADS_MAKEFILE gc.mak README.dj Makefile.dj \
README.alpha README.linux version.h Makefile.DLLs \
- WCC_MAKEFILE
+ WCC_MAKEFILE nursery.c include/gc_nursery.h include/gc_copy_descr.h \
+ include/leak_detector.h
CORD_INCLUDE_FILES= $(srcdir)/gc.h $(srcdir)/cord/cord.h $(srcdir)/cord/ec.h \
$(srcdir)/cord/private/cord_pos.h
@@ -199,19 +220,23 @@ mark.o typd_mlc.o finalize.o: $(srcdir)/gc_mark.h
base_lib gc.a: $(OBJS) dyn_load.o $(UTILS)
echo > base_lib
- rm -f on_sparc_sunos5_1
- ./if_mach SPARC SUNOS5 touch on_sparc_sunos5_1
+ rm -f dont_ar_1
+ ./if_mach SPARC SUNOS5 touch dont_ar_1
./if_mach SPARC SUNOS5 $(AR) rus gc.a $(OBJS) dyn_load.o
- ./if_not_there on_sparc_sunos5_1 $(AR) ru gc.a $(OBJS) dyn_load.o
- ./if_not_there on_sparc_sunos5_1 $(RANLIB) gc.a || cat /dev/null
+ ./if_mach M68K AMIGA touch dont_ar_1
+ ./if_mach M68K AMIGA $(AR) -vrus gc.a $(OBJS) dyn_load.o
+ ./if_not_there dont_ar_1 $(AR) ru gc.a $(OBJS) dyn_load.o
+ ./if_not_there dont_ar_1 $(RANLIB) gc.a || cat /dev/null
# ignore ranlib failure; that usually means it doesn't exist, and isn't needed
cords: $(CORD_OBJS) cord/cordtest $(UTILS)
- rm -f on_sparc_sunos5_3
- ./if_mach SPARC SUNOS5 touch on_sparc_sunos5_3
+ rm -f dont_ar_3
+ ./if_mach SPARC SUNOS5 touch dont_ar_3
./if_mach SPARC SUNOS5 $(AR) rus gc.a $(CORD_OBJS)
- ./if_not_there on_sparc_sunos5_3 $(AR) ru gc.a $(CORD_OBJS)
- ./if_not_there on_sparc_sunos5_3 $(RANLIB) gc.a || cat /dev/null
+ ./if_mach M68K AMIGA touch dont_ar_3
+ ./if_mach M68K AMIGA $(AR) -vrus gc.a $(CORD_OBJS)
+ ./if_not_there dont_ar_3 $(AR) ru gc.a $(CORD_OBJS)
+ ./if_not_there dont_ar_3 $(RANLIB) gc.a || cat /dev/null
gc_cpp.o: $(srcdir)/gc_cpp.cc $(srcdir)/gc_cpp.h $(srcdir)/gc.h Makefile
$(CXX) -c $(CXXFLAGS) $(srcdir)/gc_cpp.cc
@@ -219,15 +244,17 @@ gc_cpp.o: $(srcdir)/gc_cpp.cc $(srcdir)/gc_cpp.h $(srcdir)/gc.h Makefile
test_cpp: $(srcdir)/test_cpp.cc $(srcdir)/gc_cpp.h gc_cpp.o $(srcdir)/gc.h \
base_lib $(UTILS)
rm -f test_cpp
- ./if_mach HP_PA "" $(CXX) $(CXXFLAGS) -o test_cpp $(srcdir)/test_cpp.cc gc_cpp.o gc.a -ldld
+ ./if_mach HP_PA HPUX $(CXX) $(CXXFLAGS) -o test_cpp $(srcdir)/test_cpp.cc gc_cpp.o gc.a -ldld `./threadlibs`
./if_not_there test_cpp $(CXX) $(CXXFLAGS) -o test_cpp $(srcdir)/test_cpp.cc gc_cpp.o gc.a `./threadlibs`
c++: gc_cpp.o $(srcdir)/gc_cpp.h test_cpp
- rm -f on_sparc_sunos5_4
- ./if_mach SPARC SUNOS5 touch on_sparc_sunos5_4
+ rm -f dont_ar_4
+ ./if_mach SPARC SUNOS5 touch dont_ar_4
./if_mach SPARC SUNOS5 $(AR) rus gc.a gc_cpp.o
- ./if_not_there on_sparc_sunos5_4 $(AR) ru gc.a gc_cpp.o
- ./if_not_there on_sparc_sunos5_4 $(RANLIB) gc.a || cat /dev/null
+ ./if_mach M68K AMIGA touch dont_ar_4
+ ./if_mach M68K AMIGA $(AR) -vrus gc.a gc_cpp.o
+ ./if_not_there dont_ar_4 $(AR) ru gc.a gc_cpp.o
+ ./if_not_there dont_ar_4 $(RANLIB) gc.a || cat /dev/null
./test_cpp 1
echo > c++
@@ -276,9 +303,13 @@ mach_dep.o: $(srcdir)/mach_dep.c $(srcdir)/mips_sgi_mach_dep.s $(srcdir)/mips_ul
./if_mach ALPHA "" $(AS) -o mach_dep.o $(srcdir)/alpha_mach_dep.s
./if_mach SPARC SUNOS5 $(AS) -o mach_dep.o $(srcdir)/sparc_mach_dep.s
./if_mach SPARC SUNOS4 $(AS) -o mach_dep.o $(srcdir)/sparc_sunos4_mach_dep.s
+ ./if_mach SPARC OPENBSD $(AS) -o mach_dep.o $(srcdir)/sparc_sunos4_mach_dep.s
+ ./if_mach HP_PA HPUX $(AS) -o hpux_test_and_clear.o $(srcdir)/hpux_test_and_clear.s
+ ./if_mach HP_PA HPUX $(CC) -c -o md_tmp.o $(SPECIALCFLAGS) $(srcdir)/mach_dep.c
+ ./if_mach HP_PA HPUX ld -r -o mach_dep.o md_tmp.o hpux_test_and_clear.o
./if_not_there mach_dep.o $(CC) -c $(SPECIALCFLAGS) $(srcdir)/mach_dep.c
-mark_rts.o: $(srcdir)/mark_rts.c if_mach if_not_there $(UTILS)
+mark_rts.o: $(srcdir)/mark_rts.c $(UTILS)
rm -f mark_rts.o
-./if_mach ALPHA OSF1 $(CC) -c $(CFLAGS) -Wo,-notail $(srcdir)/mark_rts.c
./if_not_there mark_rts.o $(CC) -c $(CFLAGS) $(srcdir)/mark_rts.c
@@ -303,16 +334,17 @@ cord/cordprnt.o: $(srcdir)/cord/cordprnt.c $(CORD_INCLUDE_FILES)
cord/cordtest: $(srcdir)/cord/cordtest.c $(CORD_OBJS) gc.a $(UTILS)
rm -f cord/cordtest
./if_mach SPARC DRSNX $(CC) $(CFLAGS) -o cord/cordtest $(srcdir)/cord/cordtest.c $(CORD_OBJS) gc.a -lucb
- ./if_mach HP_PA "" $(CC) $(CFLAGS) -o cord/cordtest $(srcdir)/cord/cordtest.c $(CORD_OBJS) gc.a -ldld
+ ./if_mach HP_PA HPUX $(CC) $(CFLAGS) -o cord/cordtest $(srcdir)/cord/cordtest.c $(CORD_OBJS) gc.a -ldld `./threadlibs`
./if_not_there cord/cordtest $(CC) $(CFLAGS) -o cord/cordtest $(srcdir)/cord/cordtest.c $(CORD_OBJS) gc.a `./threadlibs`
cord/de: $(srcdir)/cord/de.c cord/cordbscs.o cord/cordxtra.o gc.a $(UTILS)
rm -f cord/de
./if_mach SPARC DRSNX $(CC) $(CFLAGS) -o cord/de $(srcdir)/cord/de.c cord/cordbscs.o cord/cordxtra.o gc.a $(CURSES) -lucb `./threadlibs`
- ./if_mach HP_PA "" $(CC) $(CFLAGS) -o cord/de $(srcdir)/cord/de.c cord/cordbscs.o cord/cordxtra.o gc.a $(CURSES) -ldld
+ ./if_mach HP_PA HPUX $(CC) $(CFLAGS) -o cord/de $(srcdir)/cord/de.c cord/cordbscs.o cord/cordxtra.o gc.a $(CURSES) -ldld `./threadlibs`
./if_mach RS6000 "" $(CC) $(CFLAGS) -o cord/de $(srcdir)/cord/de.c cord/cordbscs.o cord/cordxtra.o gc.a -lcurses
./if_mach I386 LINUX $(CC) $(CFLAGS) -o cord/de $(srcdir)/cord/de.c cord/cordbscs.o cord/cordxtra.o gc.a -lcurses `./threadlibs`
./if_mach ALPHA LINUX $(CC) $(CFLAGS) -o cord/de $(srcdir)/cord/de.c cord/cordbscs.o cord/cordxtra.o gc.a -lcurses
+ ./if_mach M68K AMIGA $(CC) $(CFLAGS) -o cord/de $(srcdir)/cord/de.c cord/cordbscs.o cord/cordxtra.o gc.a -lcurses
./if_not_there cord/de $(CC) $(CFLAGS) -o cord/de $(srcdir)/cord/de.c cord/cordbscs.o cord/cordxtra.o gc.a $(CURSES) `./threadlibs`
if_mach: $(srcdir)/if_mach.c $(srcdir)/gcconfig.h
@@ -330,16 +362,16 @@ clean:
threadlibs $(CORD_OBJS) cord/cordtest cord/de
-rm -f *~
-gctest: test.o gc.a if_mach if_not_there
+gctest: test.o gc.a $(UTILS)
rm -f gctest
./if_mach SPARC DRSNX $(CC) $(CFLAGS) -o gctest test.o gc.a -lucb
- ./if_mach HP_PA "" $(CC) $(CFLAGS) -o gctest test.o gc.a -ldld
+ ./if_mach HP_PA HPUX $(CC) $(CFLAGS) -o gctest test.o gc.a -ldld `./threadlibs`
./if_not_there gctest $(CC) $(CFLAGS) -o gctest test.o gc.a `./threadlibs`
# If an optimized setjmp_test generates a segmentation fault,
# odds are your compiler is broken. Gctest may still work.
# Try compiling setjmp_t.c unoptimized.
-setjmp_test: $(srcdir)/setjmp_t.c $(srcdir)/gc.h if_mach if_not_there
+setjmp_test: $(srcdir)/setjmp_t.c $(srcdir)/gc.h $(UTILS)
$(CC) $(CFLAGS) -o setjmp_test $(srcdir)/setjmp_t.c
test: KandRtest cord/cordtest
@@ -355,7 +387,7 @@ add_gc_prefix: add_gc_prefix.c
gc.tar: $(SRCS) $(OTHER_FILES) add_gc_prefix
./add_gc_prefix $(SRCS) $(OTHER_FILES) > /tmp/gc.tar-files
- (cd $(srcdir)/.. ; tar cvfh - `cat /tmp/gc.tar-files`) > gc.tar
+ tar cvfh gc.tar `cat /tmp/gc.tar-files`
pc_gc.tar: $(SRCS) $(OTHER_FILES)
tar cvfX pc_gc.tar pc_excludes $(SRCS) $(OTHER_FILES)
diff --git a/boehm-gc/README.QUICK b/boehm-gc/README.QUICK
index 3273c8ba4eb..ddebf82ca50 100644
--- a/boehm-gc/README.QUICK
+++ b/boehm-gc/README.QUICK
@@ -1,5 +1,7 @@
Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
-Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
+Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+Copyright (c) 1999 by Hewlett-Packard. All rights reserved.
THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
diff --git a/boehm-gc/allchblk.c b/boehm-gc/allchblk.c
index ff94b4803ac..189b94214a7 100644
--- a/boehm-gc/allchblk.c
+++ b/boehm-gc/allchblk.c
@@ -1,7 +1,8 @@
/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
- * Copyright (c) 1998 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1998-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
@@ -12,7 +13,6 @@
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
-/* Boehm, August 9, 1995 5:08 pm PDT */
#define DEBUG
#undef DEBUG
@@ -21,39 +21,68 @@
/*
- * allocate/free routines for heap blocks
- * Note that everything called from outside the garbage collector
- * should be prepared to abort at any point as the result of a signal.
+ * Free heap blocks are kept on one of several free lists,
+ * depending on the size of the block. Each free list is doubly linked.
+ * Adjacent free blocks are coalesced.
*/
-/*
- * Free heap blocks are kept on a list sorted by address.
- * The hb_hdr.hbh_sz field of a free heap block contains the length
- * (in bytes) of the entire block.
- * Neighbors are coalesced.
- */
# define MAX_BLACK_LIST_ALLOC (2*HBLKSIZE)
/* largest block we will allocate starting on a black */
/* listed block. Must be >= HBLKSIZE. */
-struct hblk * GC_hblkfreelist = 0;
-struct hblk *GC_savhbp = (struct hblk *)0; /* heap block preceding next */
- /* block to be examined by */
- /* GC_allochblk. */
+# define UNIQUE_THRESHOLD 32
+ /* Sizes up to this many HBLKs each have their own free list */
+# define HUGE_THRESHOLD 256
+ /* Sizes of at least this many heap blocks are mapped to a */
+ /* single free list. */
+# define FL_COMPRESSION 8
+ /* In between sizes map this many distinct sizes to a single */
+ /* bin. */
+
+# define N_HBLK_FLS (HUGE_THRESHOLD - UNIQUE_THRESHOLD)/FL_COMPRESSION \
+ + UNIQUE_THRESHOLD
+
+struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 };
+
+/* Map a number of blocks to the appropriate large block free list index. */
+int GC_hblk_fl_from_blocks(blocks_needed)
+word blocks_needed;
+{
+ if (blocks_needed <= UNIQUE_THRESHOLD) return blocks_needed;
+ if (blocks_needed >= HUGE_THRESHOLD) return N_HBLK_FLS;
+ return (blocks_needed - UNIQUE_THRESHOLD)/FL_COMPRESSION
+ + UNIQUE_THRESHOLD;
+
+}
+
+# define HBLK_IS_FREE(hdr) ((hdr) -> hb_map == GC_invalid_map)
+# define PHDR(hhdr) HDR(hhdr -> hb_prev)
+# define NHDR(hhdr) HDR(hhdr -> hb_next)
+
+# ifdef USE_MUNMAP
+# define IS_MAPPED(hhdr) (((hhdr) -> hb_flags & WAS_UNMAPPED) == 0)
+# else /* !USE_MMAP */
+# define IS_MAPPED(hhdr) 1
+# endif /* USE_MUNMAP */
# if !defined(NO_DEBUGGING)
void GC_print_hblkfreelist()
{
- struct hblk * h = GC_hblkfreelist;
+ struct hblk * h;
word total_free = 0;
- hdr * hhdr = HDR(h);
+ hdr * hhdr;
word sz;
+ int i;
- while (h != 0) {
+ for (i = 0; i <= N_HBLK_FLS; ++i) {
+ h = GC_hblkfreelist[i];
+ if (0 != h) GC_printf1("Free list %ld:\n", (unsigned long)i);
+ while (h != 0) {
+ hhdr = HDR(h);
sz = hhdr -> hb_sz;
- GC_printf2("0x%lx size %lu ", (unsigned long)h, (unsigned long)sz);
+ GC_printf2("\t0x%lx size %lu ", (unsigned long)h, (unsigned long)sz);
total_free += sz;
if (GC_is_black_listed(h, HBLKSIZE) != 0) {
GC_printf0("start black listed\n");
@@ -63,11 +92,90 @@ void GC_print_hblkfreelist()
GC_printf0("not black listed\n");
}
h = hhdr -> hb_next;
- hhdr = HDR(h);
+ }
+ }
+ if (total_free != GC_large_free_bytes) {
+ GC_printf1("GC_large_free_bytes = %lu (INCONSISTENT!!)\n",
+ (unsigned long) GC_large_free_bytes);
}
GC_printf1("Total of %lu bytes on free list\n", (unsigned long)total_free);
}
+/* Return the free list index on which the block described by the header */
+/* appears, or -1 if it appears nowhere. */
+int free_list_index_of(wanted)
+hdr * wanted;
+{
+ struct hblk * h;
+ hdr * hhdr;
+ int i;
+
+ for (i = 0; i <= N_HBLK_FLS; ++i) {
+ h = GC_hblkfreelist[i];
+ while (h != 0) {
+ hhdr = HDR(h);
+ if (hhdr == wanted) return i;
+ h = hhdr -> hb_next;
+ }
+ }
+ return -1;
+}
+
+void GC_dump_regions()
+{
+ unsigned i;
+ ptr_t start, end;
+ ptr_t p;
+ size_t bytes;
+ hdr *hhdr;
+ for (i = 0; i < GC_n_heap_sects; ++i) {
+ start = GC_heap_sects[i].hs_start;
+ bytes = GC_heap_sects[i].hs_bytes;
+ end = start + bytes;
+ /* Merge in contiguous sections. */
+ while (i+1 < GC_n_heap_sects && GC_heap_sects[i+1].hs_start == end) {
+ ++i;
+ end = GC_heap_sects[i].hs_start + GC_heap_sects[i].hs_bytes;
+ }
+ GC_printf2("***Section from 0x%lx to 0x%lx\n", start, end);
+ for (p = start; p < end;) {
+ hhdr = HDR(p);
+ GC_printf1("\t0x%lx ", (unsigned long)p);
+ if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
+ GC_printf1("Missing header!!\n", hhdr);
+ p += HBLKSIZE;
+ continue;
+ }
+ if (HBLK_IS_FREE(hhdr)) {
+ int correct_index = GC_hblk_fl_from_blocks(
+ divHBLKSZ(hhdr -> hb_sz));
+ int actual_index;
+
+ GC_printf1("\tfree block of size 0x%lx bytes",
+ (unsigned long)(hhdr -> hb_sz));
+ if (IS_MAPPED(hhdr)) {
+ GC_printf0("\n");
+ } else {
+ GC_printf0("(unmapped)\n");
+ }
+ actual_index = free_list_index_of(hhdr);
+ if (-1 == actual_index) {
+ GC_printf1("\t\tBlock not on free list %ld!!\n",
+ correct_index);
+ } else if (correct_index != actual_index) {
+ GC_printf2("\t\tBlock on list %ld, should be on %ld!!\n",
+ actual_index, correct_index);
+ }
+ p += hhdr -> hb_sz;
+ } else {
+ GC_printf1("\tused for blocks of size 0x%lx bytes\n",
+ (unsigned long)WORDS_TO_BYTES(hhdr -> hb_sz));
+ p += HBLKSIZE * OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
+ }
+ }
+ }
+}
+
# endif /* NO_DEBUGGING */
/* Initialize hdr for a block containing the indicated size and */
@@ -100,20 +208,265 @@ unsigned char flags;
return(TRUE);
}
-#ifdef EXACT_FIRST
-# define LAST_TRIP 2
-#else
-# define LAST_TRIP 1
-#endif
+#define FL_UNKNOWN -1
+/*
+ * Remove hhdr from the appropriate free list.
+ * We assume it is on the nth free list, or on the size
+ * appropriate free list if n is FL_UNKNOWN.
+ */
+void GC_remove_from_fl(hhdr, n)
+hdr * hhdr;
+int n;
+{
+ GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
+ if (hhdr -> hb_prev == 0) {
+ int index;
+ if (FL_UNKNOWN == n) {
+ index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
+ } else {
+ index = n;
+ }
+ GC_ASSERT(HDR(GC_hblkfreelist[index]) == hhdr);
+ GC_hblkfreelist[index] = hhdr -> hb_next;
+ } else {
+ PHDR(hhdr) -> hb_next = hhdr -> hb_next;
+ }
+ if (0 != hhdr -> hb_next) {
+ GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr)));
+ NHDR(hhdr) -> hb_prev = hhdr -> hb_prev;
+ }
+}
+
+/*
+ * Return a pointer to the free block ending just before h, if any.
+ */
+struct hblk * GC_free_block_ending_at(h)
+struct hblk *h;
+{
+ struct hblk * p = h - 1;
+ hdr * phdr = HDR(p);
+
+ while (0 != phdr && IS_FORWARDING_ADDR_OR_NIL(phdr)) {
+ p = FORWARDED_ADDR(p,phdr);
+ phdr = HDR(p);
+ }
+ if (0 != phdr && HBLK_IS_FREE(phdr)) return p;
+ p = GC_prev_block(h - 1);
+ if (0 != p) {
+ phdr = HDR(p);
+ if (HBLK_IS_FREE(phdr) && (ptr_t)p + phdr -> hb_sz == (ptr_t)h) {
+ return p;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Add hhdr to the appropriate free list.
+ * We maintain individual free lists sorted by address.
+ */
+void GC_add_to_fl(h, hhdr)
+struct hblk *h;
+hdr * hhdr;
+{
+ int index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
+ struct hblk *second = GC_hblkfreelist[index];
+# ifdef GC_ASSERTIONS
+ struct hblk *next = (struct hblk *)((word)h + hhdr -> hb_sz);
+ hdr * nexthdr = HDR(next);
+ struct hblk *prev = GC_free_block_ending_at(h);
+ hdr * prevhdr = HDR(prev);
+ GC_ASSERT(nexthdr == 0 || !HBLK_IS_FREE(nexthdr) || !IS_MAPPED(nexthdr));
+ GC_ASSERT(prev == 0 || !HBLK_IS_FREE(prevhdr) || !IS_MAPPED(prevhdr));
+# endif
+ GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
+ GC_hblkfreelist[index] = h;
+ hhdr -> hb_next = second;
+ hhdr -> hb_prev = 0;
+ if (0 != second) HDR(second) -> hb_prev = h;
+ GC_invalidate_map(hhdr);
+}
+
+#ifdef USE_MUNMAP
+
+/* Unmap blocks that haven't been recently touched. This is the only way */
+/* way blocks are ever unmapped. */
+void GC_unmap_old(void)
+{
+ struct hblk * h;
+ hdr * hhdr;
+ word sz;
+ unsigned short last_rec, threshold;
+ int i;
+# define UNMAP_THRESHOLD 6
+
+ for (i = 0; i <= N_HBLK_FLS; ++i) {
+ for (h = GC_hblkfreelist[i]; 0 != h; h = hhdr -> hb_next) {
+ hhdr = HDR(h);
+ if (!IS_MAPPED(hhdr)) continue;
+ threshold = (unsigned short)(GC_gc_no - UNMAP_THRESHOLD);
+ last_rec = hhdr -> hb_last_reclaimed;
+ if (last_rec > GC_gc_no
+ || last_rec < threshold && threshold < GC_gc_no
+ /* not recently wrapped */) {
+ sz = hhdr -> hb_sz;
+ GC_unmap((ptr_t)h, sz);
+ hhdr -> hb_flags |= WAS_UNMAPPED;
+ }
+ }
+ }
+}
+
+/* Merge all unmapped blocks that are adjacent to other free */
+/* blocks. This may involve remapping, since all blocks are either */
+/* fully mapped or fully unmapped. */
+void GC_merge_unmapped(void)
+{
+ struct hblk * h, *next;
+ hdr * hhdr, *nexthdr;
+ word size, nextsize;
+ int i;
+
+ for (i = 0; i <= N_HBLK_FLS; ++i) {
+ h = GC_hblkfreelist[i];
+ while (h != 0) {
+ hhdr = HDR(h);
+ size = hhdr->hb_sz;
+ next = (struct hblk *)((word)h + size);
+ nexthdr = HDR(next);
+ /* Coalesce with successor, if possible */
+ if (0 != nexthdr && HBLK_IS_FREE(nexthdr)) {
+ nextsize = nexthdr -> hb_sz;
+ if (IS_MAPPED(hhdr)) {
+ GC_ASSERT(!IS_MAPPED(nexthdr));
+ /* make both consistent, so that we can merge */
+ if (size > nextsize) {
+ GC_remap((ptr_t)next, nextsize);
+ } else {
+ GC_unmap((ptr_t)h, size);
+ hhdr -> hb_flags |= WAS_UNMAPPED;
+ }
+ } else if (IS_MAPPED(nexthdr)) {
+ GC_ASSERT(!IS_MAPPED(hhdr));
+ if (size > nextsize) {
+ GC_unmap((ptr_t)next, nextsize);
+ } else {
+ GC_remap((ptr_t)h, size);
+ hhdr -> hb_flags &= ~WAS_UNMAPPED;
+ }
+ } else {
+ /* Unmap any gap in the middle */
+ GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nexthdr -> hb_sz);
+ }
+ /* If they are both unmapped, we merge, but leave unmapped. */
+ GC_remove_from_fl(hhdr, i);
+ GC_remove_from_fl(nexthdr, FL_UNKNOWN);
+ hhdr -> hb_sz += nexthdr -> hb_sz;
+ GC_remove_header(next);
+ GC_add_to_fl(h, hhdr);
+ /* Start over at beginning of list */
+ h = GC_hblkfreelist[i];
+ } else /* not mergable with successor */ {
+ h = hhdr -> hb_next;
+ }
+ } /* while (h != 0) ... */
+ } /* for ... */
+}
+
+#endif /* USE_MUNMAP */
+
+/*
+ * Return a pointer to a block starting at h of length bytes.
+ * Memory for the block is mapped.
+ * Remove the block from its free list, and return the remainder (if any)
+ * to its appropriate free list.
+ * May fail by returning 0.
+ * The header for the returned block must be set up by the caller.
+ * If the return value is not 0, then hhdr is the header for it.
+ */
+struct hblk * GC_get_first_part(h, hhdr, bytes, index)
+struct hblk *h;
+hdr * hhdr;
+word bytes;
+int index;
+{
+ word total_size = hhdr -> hb_sz;
+ struct hblk * rest;
+ hdr * rest_hdr;
+
+ GC_ASSERT((total_size & (HBLKSIZE-1)) == 0);
+ GC_remove_from_fl(hhdr, index);
+ if (total_size == bytes) return h;
+ rest = (struct hblk *)((word)h + bytes);
+ if (!GC_install_header(rest)) return(0);
+ rest_hdr = HDR(rest);
+ rest_hdr -> hb_sz = total_size - bytes;
+ rest_hdr -> hb_flags = 0;
+# ifdef GC_ASSERTIONS
+ // Mark h not free, to avoid assertion about adjacent free blocks.
+ hhdr -> hb_map = 0;
+# endif
+ GC_add_to_fl(rest, rest_hdr);
+ return h;
+}
+
+/*
+ * H is a free block. N points at an address inside it.
+ * A new header for n has already been set up. Fix up h's header
+ * to reflect the fact that it is being split, move it to the
+ * appropriate free list.
+ * N replaces h in the original free list.
+ *
+ * Nhdr is not completely filled in, since it is about to allocated.
+ * It may in fact end up on the wrong free list for its size.
+ * (Hence adding it to a free list is silly. But this path is hopefully
+ * rare enough that it doesn't matter. The code is cleaner this way.)
+ */
+void GC_split_block(h, hhdr, n, nhdr, index)
+struct hblk *h;
+hdr * hhdr;
+struct hblk *n;
+hdr * nhdr;
+int index; /* Index of free list */
+{
+ word total_size = hhdr -> hb_sz;
+ word h_size = (word)n - (word)h;
+ struct hblk *prev = hhdr -> hb_prev;
+ struct hblk *next = hhdr -> hb_next;
-word GC_max_hblk_size = HBLKSIZE;
+ /* Replace h with n on its freelist */
+ nhdr -> hb_prev = prev;
+ nhdr -> hb_next = next;
+ nhdr -> hb_sz = total_size - h_size;
+ nhdr -> hb_flags = 0;
+ if (0 != prev) {
+ HDR(prev) -> hb_next = n;
+ } else {
+ GC_hblkfreelist[index] = n;
+ }
+ if (0 != next) {
+ HDR(next) -> hb_prev = n;
+ }
+# ifdef GC_ASSERTIONS
+ nhdr -> hb_map = 0; /* Don't fail test for consecutive */
+ /* free blocks in GC_add_to_fl. */
+# endif
+# ifdef USE_MUNMAP
+ hhdr -> hb_last_reclaimed = GC_gc_no;
+# endif
+ hhdr -> hb_sz = h_size;
+ GC_add_to_fl(h, hhdr);
+ GC_invalidate_map(nhdr);
+}
+struct hblk * GC_allochblk_nth();
+
/*
* Allocate (and return pointer to) a heap block
- * for objects of size sz words.
+ * for objects of size sz words, searching the nth free list.
*
* NOTE: We set obj_map field in header correctly.
- * Caller is resposnsible for building an object freelist in block.
+ * Caller is responsible for building an object freelist in block.
*
* We clear the block if it is destined for large objects, and if
* kind requires that newly allocated objects be cleared.
@@ -124,48 +477,42 @@ word sz;
int kind;
unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
{
- register struct hblk *thishbp;
- register hdr * thishdr; /* Header corr. to thishbp */
+ int start_list = GC_hblk_fl_from_blocks(OBJ_SZ_TO_BLOCKS(sz));
+ int i;
+ for (i = start_list; i <= N_HBLK_FLS; ++i) {
+ struct hblk * result = GC_allochblk_nth(sz, kind, flags, i);
+ if (0 != result) return result;
+ }
+ return 0;
+}
+/*
+ * The same, but with search restricted to nth free list.
+ */
+struct hblk *
+GC_allochblk_nth(sz, kind, flags, n)
+word sz;
+int kind;
+unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
+int n;
+{
register struct hblk *hbp;
register hdr * hhdr; /* Header corr. to hbp */
- struct hblk *prevhbp;
- register hdr * phdr; /* Header corr. to prevhbp */
+ register struct hblk *thishbp;
+ register hdr * thishdr; /* Header corr. to hbp */
signed_word size_needed; /* number of bytes in requested objects */
signed_word size_avail; /* bytes available in this block */
- int trip_count = 0;
size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS(sz);
- if ((word)size_needed > GC_max_hblk_size)
- GC_max_hblk_size = size_needed;
/* search for a big enough block in free list */
- hbp = GC_savhbp;
+ hbp = GC_hblkfreelist[n];
hhdr = HDR(hbp);
- for(;;) {
-
- prevhbp = hbp;
- phdr = hhdr;
- hbp = (prevhbp == 0? GC_hblkfreelist : phdr->hb_next);
- hhdr = HDR(hbp);
-
- if( prevhbp == GC_savhbp) {
- if (trip_count == LAST_TRIP) return(0);
- ++trip_count;
- }
-
- if( hbp == 0 ) continue;
-
+ for(; 0 != hbp; hbp = hhdr -> hb_next, hhdr = HDR(hbp)) {
size_avail = hhdr->hb_sz;
-# ifdef EXACT_FIRST
- if (trip_count <= 1 && size_avail != size_needed) continue;
-# endif
if (size_avail < size_needed) continue;
# ifdef PRESERVE_LAST
if (size_avail != size_needed
- && !GC_incremental
- && (word)size_needed <= GC_max_hblk_size/2
- && GC_in_last_heap_sect((ptr_t)hbp)
- && GC_should_collect()) {
+ && !GC_incremental && GC_should_collect()) {
continue;
}
# endif
@@ -176,13 +523,14 @@ unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
signed_word next_size;
thishbp = hhdr -> hb_next;
- if (thishbp == 0) thishbp = GC_hblkfreelist;
- thishdr = HDR(thishbp);
- next_size = (signed_word)(thishdr -> hb_sz);
- if (next_size < size_avail
+ if (thishbp != 0) {
+ thishdr = HDR(thishbp);
+ next_size = (signed_word)(thishdr -> hb_sz);
+ if (next_size < size_avail
&& next_size >= size_needed
&& !GC_is_black_listed(thishbp, (word)size_needed)) {
continue;
+ }
}
}
if ( !IS_UNCOLLECTABLE(kind) &&
@@ -204,19 +552,21 @@ unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
thishbp = lasthbp;
if (size_avail >= size_needed) {
if (thishbp != hbp && GC_install_header(thishbp)) {
+ /* Make sure it's mapped before we mangle it. */
+# ifdef USE_MUNMAP
+ if (!IS_MAPPED(hhdr)) {
+ GC_remap((ptr_t)hbp, size_avail);
+ hhdr -> hb_flags &= ~WAS_UNMAPPED;
+ }
+# endif
/* Split the block at thishbp */
thishdr = HDR(thishbp);
- /* GC_invalidate_map not needed, since we will */
- /* allocate this block. */
- thishdr -> hb_next = hhdr -> hb_next;
- thishdr -> hb_sz = size_avail;
- hhdr -> hb_sz = (ptr_t)thishbp - (ptr_t)hbp;
- hhdr -> hb_next = thishbp;
+ GC_split_block(hbp, hhdr, thishbp, thishdr, n);
/* Advance to thishbp */
- prevhbp = hbp;
- phdr = hhdr;
hbp = thishbp;
hhdr = thishdr;
+ /* We must now allocate thishbp, since it may */
+ /* be on the wrong free list. */
}
} else if (size_needed > (signed_word)BL_LIMIT
&& orig_avail - size_needed
@@ -224,12 +574,10 @@ unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
/* Punt, since anything else risks unreasonable heap growth. */
WARN("Needed to allocate blacklisted block at 0x%lx\n",
(word)hbp);
- thishbp = hbp;
size_avail = orig_avail;
- } else if (size_avail == 0
- && size_needed == HBLKSIZE
- && prevhbp != 0) {
-# ifndef FIND_LEAK
+ } else if (size_avail == 0 && size_needed == HBLKSIZE
+ && IS_MAPPED(hhdr)) {
+ if (!GC_find_leak) {
static unsigned count = 0;
/* The block is completely blacklisted. We need */
@@ -241,11 +589,14 @@ unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
/* Allocate and drop the block in small chunks, to */
/* maximize the chance that we will recover some */
/* later. */
- struct hblk * limit = hbp + (hhdr->hb_sz/HBLKSIZE);
+ word total_size = hhdr -> hb_sz;
+ struct hblk * limit = hbp + divHBLKSZ(total_size);
struct hblk * h;
+ struct hblk * prev = hhdr -> hb_prev;
- GC_words_wasted += hhdr->hb_sz;
- phdr -> hb_next = hhdr -> hb_next;
+ GC_words_wasted += total_size;
+ GC_large_free_bytes -= total_size;
+ GC_remove_from_fl(hhdr, n);
for (h = hbp; h < limit; h++) {
if (h == hbp || GC_install_header(h)) {
hhdr = HDR(h);
@@ -254,70 +605,53 @@ unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
BYTES_TO_WORDS(HBLKSIZE - HDR_BYTES),
PTRFREE, 0); /* Cant fail */
if (GC_debugging_started) {
- BZERO(hbp + HDR_BYTES, HBLKSIZE - HDR_BYTES);
+ BZERO(h + HDR_BYTES, HBLKSIZE - HDR_BYTES);
}
}
}
/* Restore hbp to point at free block */
- if (GC_savhbp == hbp) GC_savhbp = prevhbp;
- hbp = prevhbp;
- hhdr = phdr;
- if (hbp == GC_savhbp) --trip_count;
+ hbp = prev;
+ if (0 == hbp) {
+ return GC_allochblk_nth(sz, kind, flags, n);
+ }
+ hhdr = HDR(hbp);
}
-# endif
+ }
}
}
if( size_avail >= size_needed ) {
- /* found a big enough block */
- /* let thishbp --> the block */
- /* set prevhbp, hbp to bracket it */
- thishbp = hbp;
- thishdr = hhdr;
- if( size_avail == size_needed ) {
- hbp = hhdr->hb_next;
- hhdr = HDR(hbp);
- } else {
- hbp = (struct hblk *)
- (((word)thishbp) + size_needed);
- if (!GC_install_header(hbp)) {
- hbp = thishbp;
- continue;
- }
- hhdr = HDR(hbp);
- GC_invalidate_map(hhdr);
- hhdr->hb_next = thishdr->hb_next;
- hhdr->hb_sz = size_avail - size_needed;
- }
- /* remove *thishbp from hblk freelist */
- if( prevhbp == 0 ) {
- GC_hblkfreelist = hbp;
- } else {
- phdr->hb_next = hbp;
- }
- /* save current list search position */
- GC_savhbp = hbp;
+# ifdef USE_MUNMAP
+ if (!IS_MAPPED(hhdr)) {
+ GC_remap((ptr_t)hbp, size_avail);
+ hhdr -> hb_flags &= ~WAS_UNMAPPED;
+ }
+# endif
+ /* hbp may be on the wrong freelist; the parameter n */
+ /* is important. */
+ hbp = GC_get_first_part(hbp, hhdr, size_needed, n);
break;
}
}
+
+ if (0 == hbp) return 0;
/* Notify virtual dirty bit implementation that we are about to write. */
- GC_write_hint(thishbp);
- /* This should deal better with large blocks. */
+ GC_write_hint(hbp);
/* Add it to map of valid blocks */
- if (!GC_install_counts(thishbp, (word)size_needed)) return(0);
+ if (!GC_install_counts(hbp, (word)size_needed)) return(0);
/* This leaks memory under very rare conditions. */
/* Set up header */
- if (!setup_header(thishdr, sz, kind, flags)) {
- GC_remove_counts(thishbp, (word)size_needed);
+ if (!setup_header(hhdr, sz, kind, flags)) {
+ GC_remove_counts(hbp, (word)size_needed);
return(0); /* ditto */
}
/* Clear block if necessary */
if (GC_debugging_started
|| sz > MAXOBJSZ && GC_obj_kinds[kind].ok_init) {
- BZERO(thishbp + HDR_BYTES, size_needed - HDR_BYTES);
+ BZERO(hbp + HDR_BYTES, size_needed - HDR_BYTES);
}
/* We just successfully allocated a block. Restart count of */
@@ -327,8 +661,11 @@ unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
GC_fail_count = 0;
}
+
+ GC_large_free_bytes -= size_needed;
- return( thishbp );
+ GC_ASSERT(IS_MAPPED(hhdr));
+ return( hbp );
}
struct hblk * GC_freehblk_ptr = 0; /* Search position hint for GC_freehblk */
@@ -341,75 +678,50 @@ struct hblk * GC_freehblk_ptr = 0; /* Search position hint for GC_freehblk */
* All mark words are assumed to be cleared.
*/
void
-GC_freehblk(p)
-register struct hblk *p;
+GC_freehblk(hbp)
+struct hblk *hbp;
{
-register hdr *phdr; /* Header corresponding to p */
-register struct hblk *hbp, *prevhbp;
-register hdr *hhdr, *prevhdr;
-register signed_word size;
+struct hblk *next, *prev;
+hdr *hhdr, *prevhdr, *nexthdr;
+signed_word size;
- /* GC_savhbp may become invalid due to coalescing. Clear it. */
- GC_savhbp = (struct hblk *)0;
- phdr = HDR(p);
- size = phdr->hb_sz;
- size = HBLKSIZE * OBJ_SZ_TO_BLOCKS(size);
- GC_remove_counts(p, (word)size);
- phdr->hb_sz = size;
- GC_invalidate_map(phdr);
- prevhbp = 0;
-
- /* The following optimization was suggested by David Detlefs. */
- /* Note that the header cannot be NIL, since there cannot be an */
- /* intervening call to GC_freehblk without resetting */
- /* GC_freehblk_ptr. */
- if (GC_freehblk_ptr != 0 &&
- HDR(GC_freehblk_ptr)->hb_map == GC_invalid_map &&
- (ptr_t)GC_freehblk_ptr < (ptr_t)p) {
- hbp = GC_freehblk_ptr;
- } else {
- hbp = GC_hblkfreelist;
- };
hhdr = HDR(hbp);
-
- while( (hbp != 0) && (hbp < p) ) {
- prevhbp = hbp;
- prevhdr = hhdr;
- hbp = hhdr->hb_next;
- hhdr = HDR(hbp);
- }
- GC_freehblk_ptr = prevhbp;
+ size = hhdr->hb_sz;
+ size = HBLKSIZE * OBJ_SZ_TO_BLOCKS(size);
+ GC_remove_counts(hbp, (word)size);
+ hhdr->hb_sz = size;
/* Check for duplicate deallocation in the easy case */
- if (hbp != 0 && (ptr_t)p + size > (ptr_t)hbp
- || prevhbp != 0 && (ptr_t)prevhbp + prevhdr->hb_sz > (ptr_t)p) {
+ if (HBLK_IS_FREE(hhdr)) {
GC_printf1("Duplicate large block deallocation of 0x%lx\n",
- (unsigned long) p);
- GC_printf2("Surrounding free blocks are 0x%lx and 0x%lx\n",
- (unsigned long) prevhbp, (unsigned long) hbp);
+ (unsigned long) hbp);
}
+ GC_ASSERT(IS_MAPPED(hhdr));
+ GC_invalidate_map(hhdr);
+ next = (struct hblk *)((word)hbp + size);
+ nexthdr = HDR(next);
+ prev = GC_free_block_ending_at(hbp);
/* Coalesce with successor, if possible */
- if( (((word)p)+size) == ((word)hbp) ) {
- phdr->hb_next = hhdr->hb_next;
- phdr->hb_sz += hhdr->hb_sz;
- GC_remove_header(hbp);
- } else {
- phdr->hb_next = hbp;
+ if(0 != nexthdr && HBLK_IS_FREE(nexthdr) && IS_MAPPED(nexthdr)) {
+ GC_remove_from_fl(nexthdr, FL_UNKNOWN);
+ hhdr -> hb_sz += nexthdr -> hb_sz;
+ GC_remove_header(next);
+ }
+ /* Coalesce with predecessor, if possible. */
+ if (0 != prev) {
+ prevhdr = HDR(prev);
+ if (IS_MAPPED(prevhdr)) {
+ GC_remove_from_fl(prevhdr, FL_UNKNOWN);
+ prevhdr -> hb_sz += hhdr -> hb_sz;
+ GC_remove_header(hbp);
+ hbp = prev;
+ hhdr = prevhdr;
+ }
}
-
- if( prevhbp == 0 ) {
- GC_hblkfreelist = p;
- } else if( (((word)prevhbp) + prevhdr->hb_sz)
- == ((word)p) ) {
- /* Coalesce with predecessor */
- prevhdr->hb_next = phdr->hb_next;
- prevhdr->hb_sz += phdr->hb_sz;
- GC_remove_header(p);
- } else {
- prevhdr->hb_next = p;
- }
+ GC_large_free_bytes += size;
+ GC_add_to_fl(hbp, hhdr);
}
diff --git a/boehm-gc/alloc.c b/boehm-gc/alloc.c
index 171dc780b86..65bb602b22e 100644
--- a/boehm-gc/alloc.c
+++ b/boehm-gc/alloc.c
@@ -1,7 +1,8 @@
/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
- * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
* Copyright (c) 1998 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
@@ -62,8 +63,16 @@ word GC_gc_no = 0;
int GC_incremental = 0; /* By default, stop the world. */
#endif
-int GC_full_freq = 4; /* Every 5th collection is a full */
- /* collection. */
+int GC_full_freq = 19; /* Every 20th collection is a full */
+ /* collection, whether we need it */
+ /* or not. */
+
+GC_bool GC_need_full_gc = FALSE;
+ /* Need full GC do to heap growth. */
+
+#define USED_HEAP_SIZE (GC_heapsize - GC_large_free_bytes)
+
+word GC_used_heap_size_after_full = 0;
char * GC_copyright[] =
{"Copyright 1988,1989 Hans-J. Boehm and Alan J. Demers ",
@@ -82,7 +91,7 @@ extern signed_word GC_mem_found; /* Number of reclaimed longwords */
GC_bool GC_dont_expand = 0;
-word GC_free_space_divisor = 4;
+word GC_free_space_divisor = 3;
extern GC_bool GC_collection_in_progress();
/* Collection is in progress, or was abandoned. */
@@ -130,18 +139,22 @@ static word min_words_allocd()
int dummy;
register signed_word stack_size = (ptr_t)(&dummy) - GC_stackbottom;
# endif
- register word total_root_size; /* includes double stack size, */
+ word total_root_size; /* includes double stack size, */
/* since the stack is expensive */
/* to scan. */
+ word scan_size; /* Estimate of memory to be scanned */
+ /* during normal GC. */
if (stack_size < 0) stack_size = -stack_size;
total_root_size = 2 * stack_size + GC_root_size;
+ scan_size = BYTES_TO_WORDS(GC_heapsize - GC_large_free_bytes
+ + (GC_large_free_bytes >> 2)
+ /* use a bit more of large empty heap */
+ + total_root_size);
if (GC_incremental) {
- return(BYTES_TO_WORDS(GC_heapsize + total_root_size)
- / (2 * GC_free_space_divisor));
+ return scan_size / (2 * GC_free_space_divisor);
} else {
- return(BYTES_TO_WORDS(GC_heapsize + total_root_size)
- / GC_free_space_divisor);
+ return scan_size / GC_free_space_divisor;
}
}
@@ -206,6 +219,7 @@ GC_bool GC_should_collect()
return(GC_adj_words_allocd() >= min_words_allocd());
}
+
void GC_notify_full_gc()
{
if (GC_start_call_back != (void (*)())0) {
@@ -213,6 +227,8 @@ void GC_notify_full_gc()
}
}
+GC_bool GC_is_full_gc = FALSE;
+
/*
* Initiate a garbage collection if appropriate.
* Choose judiciously
@@ -222,7 +238,6 @@ void GC_notify_full_gc()
void GC_maybe_gc()
{
static int n_partial_gcs = 0;
- GC_bool is_full_gc = FALSE;
if (GC_should_collect()) {
if (!GC_incremental) {
@@ -230,7 +245,7 @@ void GC_maybe_gc()
GC_gcollect_inner();
n_partial_gcs = 0;
return;
- } else if (n_partial_gcs >= GC_full_freq) {
+ } else if (GC_need_full_gc || n_partial_gcs >= GC_full_freq) {
# ifdef PRINTSTATS
GC_printf2(
"***>Full mark for collection %lu after %ld allocd bytes\n",
@@ -242,7 +257,7 @@ void GC_maybe_gc()
GC_clear_marks();
n_partial_gcs = 0;
GC_notify_full_gc();
- is_full_gc = TRUE;
+ GC_is_full_gc = TRUE;
} else {
n_partial_gcs++;
}
@@ -256,7 +271,7 @@ void GC_maybe_gc()
# endif
GC_finish_collection();
} else {
- if (!is_full_gc) {
+ if (!GC_is_full_gc) {
/* Count this as the first attempt */
GC_n_attempts++;
}
@@ -303,6 +318,7 @@ GC_stop_func stop_func;
# ifdef SAVE_CALL_CHAIN
GC_save_callers(GC_last_stack);
# endif
+ GC_is_full_gc = TRUE;
if (!GC_stopped_mark(stop_func)) {
if (!GC_incremental) {
/* We're partially done and have no way to complete or use */
@@ -470,7 +486,7 @@ void GC_finish_collection()
# ifdef GATHERSTATS
GC_mem_found = 0;
# endif
-# ifdef FIND_LEAK
+ if (GC_find_leak) {
/* Mark all objects on the free list. All objects should be */
/* marked when we're done. */
{
@@ -493,25 +509,26 @@ void GC_finish_collection()
}
}
}
- /* Check that everything is marked */
GC_start_reclaim(TRUE);
-# else
+ /* The above just checks; it doesn't really reclaim anything. */
+ }
- GC_finalize();
-# ifdef STUBBORN_ALLOC
- GC_clean_changing_list();
-# endif
-
-# ifdef PRINTTIMES
- GET_TIME(finalize_time);
-# endif
-
- /* Clear free list mark bits, in case they got accidentally marked */
- /* Note: HBLKPTR(p) == pointer to head of block containing *p */
- /* Also subtract memory remaining from GC_mem_found count. */
- /* Note that composite objects on free list are cleared. */
- /* Thus accidentally marking a free list is not a problem; only */
- /* objects on the list itself will be marked, and that's fixed here. */
+ GC_finalize();
+# ifdef STUBBORN_ALLOC
+ GC_clean_changing_list();
+# endif
+
+# ifdef PRINTTIMES
+ GET_TIME(finalize_time);
+# endif
+
+ /* Clear free list mark bits, in case they got accidentally marked */
+ /* Note: HBLKPTR(p) == pointer to head of block containing *p */
+ /* (or GC_find_leak is set and they were intentionally marked.) */
+ /* Also subtract memory remaining from GC_mem_found count. */
+ /* Note that composite objects on free list are cleared. */
+ /* Thus accidentally marking a free list is not a problem; only */
+ /* objects on the list itself will be marked, and that's fixed here. */
{
register word size; /* current object size */
register ptr_t p; /* pointer to current object */
@@ -537,27 +554,37 @@ void GC_finish_collection()
}
-# ifdef PRINTSTATS
+# ifdef PRINTSTATS
GC_printf1("Bytes recovered before sweep - f.l. count = %ld\n",
(long)WORDS_TO_BYTES(GC_mem_found));
-# endif
-
+# endif
/* Reconstruct free lists to contain everything not marked */
- GC_start_reclaim(FALSE);
-
-# endif /* !FIND_LEAK */
+ GC_start_reclaim(FALSE);
+ if (GC_is_full_gc) {
+ GC_used_heap_size_after_full = USED_HEAP_SIZE;
+ GC_need_full_gc = FALSE;
+ } else {
+ GC_need_full_gc =
+ BYTES_TO_WORDS(USED_HEAP_SIZE - GC_used_heap_size_after_full)
+ > min_words_allocd();
+ }
# ifdef PRINTSTATS
GC_printf2(
- "Immediately reclaimed %ld bytes in heap of size %lu bytes\n",
+ "Immediately reclaimed %ld bytes in heap of size %lu bytes",
(long)WORDS_TO_BYTES(GC_mem_found),
(unsigned long)GC_heapsize);
- GC_printf2("%lu (atomic) + %lu (composite) collectable bytes in use\n",
- (unsigned long)WORDS_TO_BYTES(GC_atomic_in_use),
- (unsigned long)WORDS_TO_BYTES(GC_composite_in_use));
+# ifdef USE_MUNMAP
+ GC_printf1("(%lu unmapped)", GC_unmapped_bytes);
+# endif
+ GC_printf2(
+ "\n%lu (atomic) + %lu (composite) collectable bytes in use\n",
+ (unsigned long)WORDS_TO_BYTES(GC_atomic_in_use),
+ (unsigned long)WORDS_TO_BYTES(GC_composite_in_use));
# endif
GC_n_attempts = 0;
+ GC_is_full_gc = FALSE;
/* Reset or increment counters for next cycle */
GC_words_allocd_before_gc += GC_words_allocd;
GC_non_gc_bytes_at_gc = GC_non_gc_bytes;
@@ -565,6 +592,9 @@ void GC_finish_collection()
GC_words_wasted = 0;
GC_mem_freed = 0;
+# ifdef USE_MUNMAP
+ GC_unmap_old();
+# endif
# ifdef PRINTTIMES
GET_TIME(done_time);
GC_printf2("Finalize + initiate sweep took %lu + %lu msecs\n",
@@ -608,7 +638,7 @@ void GC_gcollect GC_PROTO(())
word GC_n_heap_sects = 0; /* Number of sections currently in heap. */
/*
- * Use the chunk of memory starting at p of syze bytes as part of the heap.
+ * Use the chunk of memory starting at p of size bytes as part of the heap.
* Assumes p is HBLKSIZE aligned, and bytes is a multiple of HBLKSIZE.
*/
void GC_add_to_heap(p, bytes)
@@ -616,6 +646,7 @@ struct hblk *p;
word bytes;
{
word words;
+ hdr * phdr;
if (GC_n_heap_sects >= MAX_HEAP_SECTS) {
ABORT("Too many heap sections: Increase MAXHINCR or MAX_HEAP_SECTS");
@@ -630,7 +661,10 @@ word bytes;
GC_heap_sects[GC_n_heap_sects].hs_bytes = bytes;
GC_n_heap_sects++;
words = BYTES_TO_WORDS(bytes - HDR_BYTES);
- HDR(p) -> hb_sz = words;
+ phdr = HDR(p);
+ phdr -> hb_sz = words;
+ phdr -> hb_map = (char *)1; /* A value != GC_invalid_map */
+ phdr -> hb_flags = 0;
GC_freehblk(p);
GC_heapsize += bytes;
if ((ptr_t)p <= GC_least_plausible_heap_addr
@@ -646,27 +680,6 @@ word bytes;
}
}
-#ifdef PRESERVE_LAST
-
-GC_bool GC_protect_last_block = FALSE;
-
-GC_bool GC_in_last_heap_sect(p)
-ptr_t p;
-{
- struct HeapSect * last_heap_sect;
- ptr_t start;
- ptr_t end;
-
- if (!GC_protect_last_block) return FALSE;
- last_heap_sect = &(GC_heap_sects[GC_n_heap_sects-1]);
- start = last_heap_sect -> hs_start;
- if (p < start) return FALSE;
- end = start + last_heap_sect -> hs_bytes;
- if (p >= end) return FALSE;
- return TRUE;
-}
-#endif
-
# if !defined(NO_DEBUGGING)
void GC_print_heap_sects()
{
@@ -797,9 +810,6 @@ word n;
LOCK();
if (!GC_is_initialized) GC_init_inner();
result = (int)GC_expand_hp_inner(divHBLKSZ((word)bytes));
-# ifdef PRESERVE_LAST
- if (result) GC_protect_last_block = FALSE;
-# endif
UNLOCK();
ENABLE_SIGNALS();
return(result);
@@ -813,7 +823,6 @@ GC_bool GC_collect_or_expand(needed_blocks, ignore_off_page)
word needed_blocks;
GC_bool ignore_off_page;
{
-
if (!GC_incremental && !GC_dont_gc && GC_should_collect()) {
GC_notify_full_gc();
GC_gcollect_inner();
@@ -852,12 +861,6 @@ GC_bool ignore_off_page;
GC_printf0("Memory available again ...\n");
}
# endif
-# ifdef PRESERVE_LAST
- if (needed_blocks > 1) GC_protect_last_block = TRUE;
- /* We were forced to expand the heap as the result */
- /* of a large block allocation. Avoid breaking up */
- /* new block into small pieces. */
-# endif
}
}
return(TRUE);
diff --git a/boehm-gc/blacklst.c b/boehm-gc/blacklst.c
index 0d623c0fcf6..e5a3a26a8cf 100644
--- a/boehm-gc/blacklst.c
+++ b/boehm-gc/blacklst.c
@@ -145,6 +145,13 @@ void GC_promote_black_lists()
if (GC_black_list_spacing < 3 * HBLKSIZE) {
GC_black_list_spacing = 3 * HBLKSIZE;
}
+ if (GC_black_list_spacing > MAXHINCR * HBLKSIZE) {
+ GC_black_list_spacing = MAXHINCR * HBLKSIZE;
+ /* Makes it easier to allocate really huge blocks, which otherwise */
+ /* may have problems with nonuniform blacklist distributions. */
+ /* This way we should always succeed immediately after growing the */
+ /* heap. */
+ }
}
void GC_unpromote_black_lists()
diff --git a/boehm-gc/cord/cordxtra.c b/boehm-gc/cord/cordxtra.c
index b306fbaccea..a5be10de58a 100644
--- a/boehm-gc/cord/cordxtra.c
+++ b/boehm-gc/cord/cordxtra.c
@@ -582,7 +582,7 @@ CORD CORD_from_file_lazy_inner(FILE * f, size_t len)
state -> lf_cache[i] = 0;
}
state -> lf_current = 0;
- GC_register_finalizer(state, CORD_lf_close_proc, 0, 0, 0);
+ GC_REGISTER_FINALIZER(state, CORD_lf_close_proc, 0, 0, 0);
return(CORD_from_fn(CORD_lf_func, state, len));
}
diff --git a/boehm-gc/dbg_mlc.c b/boehm-gc/dbg_mlc.c
index 81516258bf6..41843be1fa0 100644
--- a/boehm-gc/dbg_mlc.c
+++ b/boehm-gc/dbg_mlc.c
@@ -12,8 +12,11 @@
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
-/* Boehm, October 9, 1995 1:16 pm PDT */
+# define I_HIDE_POINTERS
# include "gc_priv.h"
+# ifdef KEEP_BACK_PTRS
+# include "backptr.h"
+# endif
void GC_default_print_heap_obj_proc();
GC_API void GC_register_finalizer_no_order
@@ -31,6 +34,14 @@ GC_API void GC_register_finalizer_no_order
/* Object header */
typedef struct {
+# ifdef KEEP_BACK_PTRS
+ ptr_t oh_back_ptr;
+# define MARKED_FOR_FINALIZATION (ptr_t)(-1)
+ /* Object was marked because it is finalizable. */
+# ifdef ALIGN_DOUBLE
+ word oh_dummy;
+# endif
+# endif
char * oh_string; /* object descriptor string */
word oh_int; /* object descriptor integers */
# ifdef NEED_CALLINFO
@@ -85,6 +96,134 @@ ptr_t p;
return(FALSE);
}
+#ifdef KEEP_BACK_PTRS
+ /* Store back pointer to source in dest, if that appears to be possible. */
+ /* This is not completely safe, since we may mistakenly conclude that */
+ /* dest has a debugging wrapper. But the error probability is very */
+ /* small, and this shouldn't be used in production code. */
+ /* We assume that dest is the real base pointer. Source will usually */
+ /* be a pointer to the interior of an object. */
+ void GC_store_back_pointer(ptr_t source, ptr_t dest)
+ {
+ if (GC_has_debug_info(dest)) {
+ ((oh *)dest) -> oh_back_ptr = (ptr_t)HIDE_POINTER(source);
+ }
+ }
+
+ void GC_marked_for_finalization(ptr_t dest) {
+ GC_store_back_pointer(MARKED_FOR_FINALIZATION, dest);
+ }
+
+ /* Store information about the object referencing dest in *base_p */
+ /* and *offset_p. */
+ /* source is root ==> *base_p = 0, *offset_p = address */
+ /* source is heap object ==> *base_p != 0, *offset_p = offset */
+ /* Returns 1 on success, 0 if source couldn't be determined. */
+ /* Dest can be any address within a heap object. */
+ GC_ref_kind GC_get_back_ptr_info(void *dest, void **base_p, size_t *offset_p)
+ {
+ oh * hdr = (oh *)GC_base(dest);
+ ptr_t bp;
+ ptr_t bp_base;
+ if (!GC_has_debug_info((ptr_t) hdr)) return GC_NO_SPACE;
+ bp = hdr -> oh_back_ptr;
+ if (MARKED_FOR_FINALIZATION == bp) return GC_FINALIZER_REFD;
+ if (0 == bp) return GC_UNREFERENCED;
+ bp = REVEAL_POINTER(bp);
+ bp_base = GC_base(bp);
+ if (0 == bp_base) {
+ *base_p = bp;
+ *offset_p = 0;
+ return GC_REFD_FROM_ROOT;
+ } else {
+ if (GC_has_debug_info(bp_base)) bp_base += sizeof(oh);
+ *base_p = bp_base;
+ *offset_p = bp - bp_base;
+ return GC_REFD_FROM_HEAP;
+ }
+ }
+
+ /* Generate a random heap address. */
+ /* The resulting address is in the heap, but */
+ /* not necessarily inside a valid object. */
+ void *GC_generate_random_heap_address(void)
+ {
+ int i;
+ int heap_offset = random() % GC_heapsize;
+ for (i = 0; i < GC_n_heap_sects; ++ i) {
+ int size = GC_heap_sects[i].hs_bytes;
+ if (heap_offset < size) {
+ return GC_heap_sects[i].hs_start + heap_offset;
+ } else {
+ heap_offset -= size;
+ }
+ }
+ ABORT("GC_generate_random_heap_address: size inconsistency");
+ /*NOTREACHED*/
+ return 0;
+ }
+
+ /* Generate a random address inside a valid marked heap object. */
+ void *GC_generate_random_valid_address(void)
+ {
+ ptr_t result;
+ ptr_t base;
+ for (;;) {
+ result = GC_generate_random_heap_address();
+ base = GC_base(result);
+ if (0 == base) continue;
+ if (!GC_is_marked(base)) continue;
+ return result;
+ }
+ }
+
+ /* Force a garbage collection and generate a backtrace from a */
+ /* random heap address. */
+ void GC_generate_random_backtrace(void)
+ {
+ void * current;
+ int i;
+ void * base;
+ size_t offset;
+ GC_ref_kind source;
+ GC_gcollect();
+ current = GC_generate_random_valid_address();
+ GC_printf1("Chose address 0x%lx in object\n", (unsigned long)current);
+ GC_print_heap_obj(GC_base(current));
+ GC_err_printf0("\n");
+ for (i = 0; ; ++i) {
+ source = GC_get_back_ptr_info(current, &base, &offset);
+ if (GC_UNREFERENCED == source) {
+ GC_err_printf0("Reference could not be found\n");
+ goto out;
+ }
+ if (GC_NO_SPACE == source) {
+ GC_err_printf0("No debug info in object: Can't find reference\n");
+ goto out;
+ }
+ GC_err_printf1("Reachable via %d levels of pointers from ",
+ (unsigned long)i);
+ switch(source) {
+ case GC_REFD_FROM_ROOT:
+ GC_err_printf1("root at 0x%lx\n", (unsigned long)base);
+ goto out;
+ case GC_FINALIZER_REFD:
+ GC_err_printf0("list of finalizable objects\n");
+ goto out;
+ case GC_REFD_FROM_HEAP:
+ GC_err_printf1("offset %ld in object:\n", (unsigned long)offset);
+ /* Take GC_base(base) to get real base, i.e. header. */
+ GC_print_heap_obj(GC_base(base));
+ GC_err_printf0("\n");
+ break;
+ }
+ current = base;
+ }
+ out:;
+ }
+
+#endif /* KEEP_BACK_PTRS */
+
/* Store debugging info into p. Return displaced pointer. */
/* Assumes we don't hold allocation lock. */
ptr_t GC_store_debug_info(p, sz, string, integer)
@@ -100,6 +239,9 @@ word integer;
/* But that's expensive. And this way things should only appear */
/* inconsistent while we're in the handler. */
LOCK();
+# ifdef KEEP_BACK_PTRS
+ ((oh *)p) -> oh_back_ptr = 0;
+# endif
((oh *)p) -> oh_string = string;
((oh *)p) -> oh_int = integer;
((oh *)p) -> oh_sz = sz;
@@ -110,7 +252,7 @@ word integer;
return((ptr_t)result);
}
-/* Check the object with debugging info at p */
+/* Check the object with debugging info at ohdr */
/* return NIL if it's OK. Else return clobbered */
/* address. */
ptr_t GC_check_annotated_obj(ohdr)
@@ -201,10 +343,10 @@ void GC_start_debugging()
}
# ifdef GC_ADD_CALLER
-# define EXTRA_ARGS word ra, char * s, int i
+# define EXTRA_ARGS word ra, CONST char * s, int i
# define OPT_RA ra,
# else
-# define EXTRA_ARGS char * s, int i
+# define EXTRA_ARGS CONST char * s, int i
# define OPT_RA
# endif
@@ -389,13 +531,15 @@ GC_PTR p;
GC_PTR p;
# endif
{
- register GC_PTR base = GC_base(p);
+ register GC_PTR base;
register ptr_t clobbered;
+ if (0 == p) return;
+ base = GC_base(p);
if (base == 0) {
GC_err_printf1("Attempt to free invalid pointer %lx\n",
(unsigned long)p);
- if (p != 0) ABORT("free(invalid pointer)");
+ ABORT("free(invalid pointer)");
}
if ((ptr_t)p - (ptr_t)base != sizeof(oh)) {
GC_err_printf1(
@@ -408,31 +552,29 @@ GC_PTR p;
GC_err_printf0(
"GC_debug_free: found previously deallocated (?) object at ");
} else {
- GC_err_printf0("GC_debug_free: found smashed object at ");
+ GC_err_printf0("GC_debug_free: found smashed location at ");
}
GC_print_smashed_obj(p, clobbered);
}
/* Invalidate size */
((oh *)base) -> oh_sz = GC_size(base);
}
-# ifdef FIND_LEAK
+ if (GC_find_leak) {
GC_free(base);
-# else
- {
- register hdr * hhdr = HDR(p);
- GC_bool uncollectable = FALSE;
+ } else {
+ register hdr * hhdr = HDR(p);
+ GC_bool uncollectable = FALSE;
- if (hhdr -> hb_obj_kind == UNCOLLECTABLE) {
- uncollectable = TRUE;
- }
-# ifdef ATOMIC_UNCOLLECTABLE
- if (hhdr -> hb_obj_kind == AUNCOLLECTABLE) {
- uncollectable = TRUE;
- }
-# endif
- if (uncollectable) GC_free(base);
+ if (hhdr -> hb_obj_kind == UNCOLLECTABLE) {
+ uncollectable = TRUE;
}
-# endif
+# ifdef ATOMIC_UNCOLLECTABLE
+ if (hhdr -> hb_obj_kind == AUNCOLLECTABLE) {
+ uncollectable = TRUE;
+ }
+# endif
+ if (uncollectable) GC_free(base);
+ } /* !GC_find_leak */
}
# ifdef __STDC__
@@ -491,7 +633,7 @@ GC_PTR p;
}
clobbered = GC_check_annotated_obj((oh *)base);
if (clobbered != 0) {
- GC_err_printf0("GC_debug_realloc: found smashed object at ");
+ GC_err_printf0("GC_debug_realloc: found smashed location at ");
GC_print_smashed_obj(p, clobbered);
}
old_sz = ((oh *)base) -> oh_sz;
@@ -528,7 +670,7 @@ word dummy;
if (clobbered != 0) {
GC_err_printf0(
- "GC_check_heap_block: found smashed object at ");
+ "GC_check_heap_block: found smashed location at ");
GC_print_smashed_obj((ptr_t)p, clobbered);
}
}
diff --git a/boehm-gc/dyn_load.c b/boehm-gc/dyn_load.c
index 56aeb3dd450..8c3ec4186c4 100644
--- a/boehm-gc/dyn_load.c
+++ b/boehm-gc/dyn_load.c
@@ -47,7 +47,7 @@
#if (defined(DYNAMIC_LOADING) || defined(MSWIN32)) && !defined(PCR)
#if !defined(SUNOS4) && !defined(SUNOS5DL) && !defined(IRIX5) && \
!defined(MSWIN32) && !(defined(ALPHA) && defined(OSF1)) && \
- !defined(HP_PA) && !(defined(LINUX) && defined(__ELF__)) && \
+ !defined(HPUX) && !(defined(LINUX) && defined(__ELF__)) && \
!defined(RS6000) && !defined(SCO_ELF)
--> We only know how to find data segments of dynamic libraries for the
--> above. Additional SVR4 variants might not be too
@@ -283,6 +283,9 @@ void GC_register_dynamic_libraries()
static struct link_map *
GC_FirstDLOpenedLinkMap()
{
+# ifdef __GNUC__
+# pragma weak _DYNAMIC
+# endif
extern ElfW(Dyn) _DYNAMIC[];
ElfW(Dyn) *dp;
struct r_debug *r;
@@ -655,7 +658,7 @@ void GC_register_dynamic_libraries()
}
#endif
-#if defined(HP_PA)
+#if defined(HPUX)
#include <errno.h>
#include <dl.h>
@@ -678,6 +681,11 @@ void GC_register_dynamic_libraries()
/* Check if this is the end of the list or if some error occured */
if (status != 0) {
+# ifdef HPUX_THREADS
+ /* I've seen errno values of 0. The man page is not clear */
+ /* as to whether errno should get set on a -1 return. */
+ break;
+# else
if (errno == EINVAL) {
break; /* Moved past end of shared library list --> finished */
} else {
@@ -688,6 +696,7 @@ void GC_register_dynamic_libraries()
}
ABORT("shl_get failed");
}
+# endif
}
# ifdef VERBOSE
@@ -710,7 +719,7 @@ void GC_register_dynamic_libraries()
index++;
}
}
-#endif /* HP_PA */
+#endif /* HPUX */
#ifdef RS6000
#pragma alloca
diff --git a/boehm-gc/finalize.c b/boehm-gc/finalize.c
index f33ae734c17..2ee927fe432 100644
--- a/boehm-gc/finalize.c
+++ b/boehm-gc/finalize.c
@@ -1,6 +1,7 @@
/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
@@ -16,6 +17,18 @@
# include "gc_priv.h"
# include "gc_mark.h"
+# ifdef FINALIZE_ON_DEMAND
+ int GC_finalize_on_demand = 1;
+# else
+ int GC_finalize_on_demand = 0;
+# endif
+
+# ifdef JAVA_FINALIZATION
+ int GC_java_finalization = 1;
+# else
+ int GC_java_finalization = 0;
+# endif
+
/* Type of mark procedure used for marking from finalizable object. */
/* This procedure normally does not mark the object, only its */
/* descendents. */
@@ -249,7 +262,7 @@ out:
/* Possible finalization_marker procedures. Note that mark stack */
/* overflow is handled by the caller, and is not a disaster. */
-void GC_normal_finalize_mark_proc(p)
+GC_API void GC_normal_finalize_mark_proc(p)
ptr_t p;
{
hdr * hhdr = HDR(p);
@@ -261,7 +274,7 @@ ptr_t p;
/* This only pays very partial attention to the mark descriptor. */
/* It does the right thing for normal and atomic objects, and treats */
/* most others as normal. */
-void GC_ignore_self_finalize_mark_proc(p)
+GC_API void GC_ignore_self_finalize_mark_proc(p)
ptr_t p;
{
hdr * hhdr = HDR(p);
@@ -284,7 +297,7 @@ ptr_t p;
}
/*ARGSUSED*/
-void GC_null_finalize_mark_proc(p)
+GC_API void GC_null_finalize_mark_proc(p)
ptr_t p;
{
}
@@ -295,7 +308,11 @@ ptr_t p;
/* in the nonthreads case, we try to avoid disabling signals, */
/* since it can be expensive. Threads packages typically */
/* make it cheaper. */
-void GC_register_finalizer_inner(obj, fn, cd, ofn, ocd, mp)
+/* The last parameter is a procedure that determines */
+/* marking for finalization ordering. Any objects marked */
+/* by that procedure will be guaranteed to not have been */
+/* finalized when this finalizer is invoked. */
+GC_API void GC_register_finalizer_inner(obj, fn, cd, ofn, ocd, mp)
GC_PTR obj;
GC_finalization_proc fn;
GC_PTR cd;
@@ -505,6 +522,7 @@ void GC_finalize()
for (curr_fo = fo_head[i]; curr_fo != 0; curr_fo = fo_next(curr_fo)) {
real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
if (!GC_is_marked(real_ptr)) {
+ GC_MARKED_FOR_FINALIZATION(real_ptr);
GC_MARK_FO(real_ptr, curr_fo -> fo_mark_proc);
if (GC_is_marked(real_ptr)) {
WARN("Finalization cycle involving %lx\n", real_ptr);
@@ -521,9 +539,9 @@ void GC_finalize()
while (curr_fo != 0) {
real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
if (!GC_is_marked(real_ptr)) {
-# ifndef JAVA_FINALIZATION
- GC_set_mark_bit(real_ptr);
-# endif
+ if (!GC_java_finalization) {
+ GC_set_mark_bit(real_ptr);
+ }
/* Delete from hash table */
next_fo = fo_next(curr_fo);
if (prev_fo == 0) {
@@ -555,20 +573,20 @@ void GC_finalize()
}
}
-# ifdef JAVA_FINALIZATION
- /* make sure we mark everything reachable from objects finalized
- using the no_order mark_proc */
- for (curr_fo = GC_finalize_now;
- curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
- real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
- if (!GC_is_marked(real_ptr)) {
- if (curr_fo -> fo_mark_proc == GC_null_finalize_mark_proc) {
- GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
- }
- GC_set_mark_bit(real_ptr);
- }
- }
-# endif
+ if (GC_java_finalization) {
+ /* make sure we mark everything reachable from objects finalized
+ using the no_order mark_proc */
+ for (curr_fo = GC_finalize_now;
+ curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
+ real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
+ if (!GC_is_marked(real_ptr)) {
+ if (curr_fo -> fo_mark_proc == GC_null_finalize_mark_proc) {
+ GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
+ }
+ GC_set_mark_bit(real_ptr);
+ }
+ }
+ }
/* Remove dangling disappearing links. */
for (i = 0; i < dl_size; i++) {
@@ -594,7 +612,7 @@ void GC_finalize()
}
}
-#ifdef JAVA_FINALIZATION
+#ifndef JAVA_FINALIZATION_NOT_NEEDED
/* Enqueue all remaining finalizers to be run - Assumes lock is
* held, and signals are disabled */
@@ -649,10 +667,15 @@ void GC_enqueue_all_finalizers()
* Unfortunately, the Java standard implies we have to keep running
* finalizers until there are no more left, a potential infinite loop.
* YUCK.
+ * Note that this is even more dangerous than the usual Java
+ * finalizers, in that objects reachable from static variables
+ * may have been finalized when these finalizers are run.
+ * Finalizers run at this point must be prepared to deal with a
+ * mostly broken world.
* This routine is externally callable, so is called without
* the allocation lock.
*/
-void GC_finalize_all()
+GC_API void GC_finalize_all()
{
DCL_LOCK_STATE;
diff --git a/boehm-gc/headers.c b/boehm-gc/headers.c
index fae683a6315..9564a6a5359 100644
--- a/boehm-gc/headers.c
+++ b/boehm-gc/headers.c
@@ -25,6 +25,12 @@
# include "gc_priv.h"
bottom_index * GC_all_bottom_indices = 0;
+ /* Pointer to first (lowest addr) */
+ /* bottom_index. */
+
+bottom_index * GC_all_bottom_indices_end = 0;
+ /* Pointer to last (highest addr) */
+ /* bottom_index. */
/* Non-macro version of header location routine */
hdr * GC_find_header(h)
@@ -137,16 +143,17 @@ void GC_init_headers()
/* Make sure that there is a bottom level index block for address addr */
/* Return FALSE on failure. */
static GC_bool get_index(addr)
-register word addr;
+word addr;
{
- register word hi =
- (word)(addr) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE);
- register bottom_index * r;
- register bottom_index * p;
- register bottom_index ** prev;
+ word hi = (word)(addr) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE);
+ bottom_index * r;
+ bottom_index * p;
+ bottom_index ** prev;
+ bottom_index *pi;
+
# ifdef HASH_TL
- register unsigned i = TL_HASH(hi);
- register bottom_index * old;
+ unsigned i = TL_HASH(hi);
+ bottom_index * old;
old = p = GC_top_index[i];
while(p != GC_all_nils) {
@@ -164,11 +171,21 @@ register word addr;
if (r == 0) return(FALSE);
GC_top_index[hi] = r;
BZERO(r, sizeof (bottom_index));
-# endif
+# endif
r -> key = hi;
/* Add it to the list of bottom indices */
- prev = &GC_all_bottom_indices;
- while ((p = *prev) != 0 && p -> key < hi) prev = &(p -> asc_link);
+ prev = &GC_all_bottom_indices; /* pointer to p */
+ pi = 0; /* bottom_index preceding p */
+ while ((p = *prev) != 0 && p -> key < hi) {
+ pi = p;
+ prev = &(p -> asc_link);
+ }
+ r -> desc_link = pi;
+ if (0 == p) {
+ GC_all_bottom_indices_end = r;
+ } else {
+ p -> desc_link = r;
+ }
r -> asc_link = p;
*prev = r;
return(TRUE);
@@ -185,6 +202,9 @@ register struct hblk * h;
if (!get_index((word) h)) return(FALSE);
result = alloc_hdr();
SET_HDR(h, result);
+# ifdef USE_MUNMAP
+ result -> hb_last_reclaimed = GC_gc_no;
+# endif
return(result != 0);
}
@@ -261,7 +281,7 @@ word client_data;
/* Get the next valid block whose address is at least h */
/* Return 0 if there is none. */
-struct hblk * GC_next_block(h)
+struct hblk * GC_next_used_block(h)
struct hblk * h;
{
register bottom_index * bi;
@@ -276,15 +296,16 @@ struct hblk * h;
}
while(bi != 0) {
while (j < BOTTOM_SZ) {
- if (IS_FORWARDING_ADDR_OR_NIL(bi -> index[j])) {
+ hdr * hhdr = bi -> index[j];
+ if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
j++;
} else {
- if (bi->index[j]->hb_map != GC_invalid_map) {
+ if (hhdr->hb_map != GC_invalid_map) {
return((struct hblk *)
(((bi -> key << LOG_BOTTOM_SZ) + j)
<< LOG_HBLKSIZE));
} else {
- j += divHBLKSZ(bi->index[j] -> hb_sz);
+ j += divHBLKSZ(hhdr -> hb_sz);
}
}
}
@@ -293,3 +314,38 @@ struct hblk * h;
}
return(0);
}
+
+/* Get the last (highest address) block whose address is */
+/* at most h. Return 0 if there is none. */
+/* Unlike the above, this may return a free block. */
+struct hblk * GC_prev_block(h)
+struct hblk * h;
+{
+ register bottom_index * bi;
+ register signed_word j = ((word)h >> LOG_HBLKSIZE) & (BOTTOM_SZ-1);
+
+ GET_BI(h, bi);
+ if (bi == GC_all_nils) {
+ register word hi = (word)h >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE);
+ bi = GC_all_bottom_indices_end;
+ while (bi != 0 && bi -> key > hi) bi = bi -> desc_link;
+ j = BOTTOM_SZ - 1;
+ }
+ while(bi != 0) {
+ while (j >= 0) {
+ hdr * hhdr = bi -> index[j];
+ if (0 == hhdr) {
+ --j;
+ } else if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
+ j -= (signed_word)hhdr;
+ } else {
+ return((struct hblk *)
+ (((bi -> key << LOG_BOTTOM_SZ) + j)
+ << LOG_HBLKSIZE));
+ }
+ }
+ j = BOTTOM_SZ - 1;
+ bi = bi -> desc_link;
+ }
+ return(0);
+}
diff --git a/boehm-gc/include/gc.h b/boehm-gc/include/gc.h
index ceabb02f6eb..cc74765d098 100644
--- a/boehm-gc/include/gc.h
+++ b/boehm-gc/include/gc.h
@@ -58,9 +58,11 @@
# if defined(__STDC__) || defined(__cplusplus)
# define GC_PROTO(args) args
typedef void * GC_PTR;
+# define GC_CONST const
# else
# define GC_PROTO(args) ()
typedef char * GC_PTR;
+# define GC_CONST
# endif
# ifdef __cplusplus
@@ -96,11 +98,31 @@ GC_API GC_PTR (*GC_oom_fn) GC_PROTO((size_t bytes_requested));
/* pointer to a previously allocated heap */
/* object. */
+GC_API int GC_find_leak;
+ /* Do not actually garbage collect, but simply */
+ /* report inaccessible memory that was not */
+ /* deallocated with GC_free. Initial value */
+ /* is determined by FIND_LEAK macro. */
+
GC_API int GC_quiet; /* Disable statistics output. Only matters if */
/* collector has been compiled with statistics */
/* enabled. This involves a performance cost, */
/* and is thus not the default. */
+GC_API int GC_finalize_on_demand;
+ /* If nonzero, finalizers will only be run in */
+ /* response to an eplit GC_invoke_finalizers */
+ /* call. The default is determined by whether */
+ /* the FINALIZE_ON_DEMAND macro is defined */
+ /* when the collector is built. */
+
+GC_API int GC_java_finalization;
+ /* Mark objects reachable from finalizable */
+ /* objects in a separate postpass. This makes */
+ /* it a bit safer to use non-topologically- */
+ /* ordered finalization. Default value is */
+ /* determined by JAVA_FINALIZATION macro. */
+
GC_API int GC_dont_gc; /* Dont collect unless explicitly requested, e.g. */
/* because it's not safe. */
@@ -111,6 +133,12 @@ GC_API int GC_dont_expand;
GC_API int GC_full_freq; /* Number of partial collections between */
/* full collections. Matters only if */
/* GC_incremental is set. */
+ /* Full collections are also triggered if */
+ /* the collector detects a substantial */
+ /* increase in the number of in-use heap */
+ /* blocks. Values in the tens are now */
+ /* perfectly reasonable, unlike for */
+ /* earlier GC versions. */
GC_API GC_word GC_non_gc_bytes;
/* Bytes not considered candidates for collection. */
@@ -277,6 +305,9 @@ GC_API int GC_try_to_collect GC_PROTO((GC_stop_func stop_func));
/* Includes some pages that were allocated but never written. */
GC_API size_t GC_get_heap_size GC_PROTO((void));
+/* Return a lower bound on the number of free bytes in the heap. */
+GC_API size_t GC_get_free_bytes GC_PROTO((void));
+
/* Return the number of bytes allocated since the last collection. */
GC_API size_t GC_get_bytes_since_gc GC_PROTO((void));
@@ -321,10 +352,11 @@ GC_API GC_PTR GC_malloc_atomic_ignore_off_page GC_PROTO((size_t lb));
#ifdef GC_ADD_CALLER
# define GC_EXTRAS GC_RETURN_ADDR, __FILE__, __LINE__
-# define GC_EXTRA_PARAMS GC_word ra, char * descr_string, int descr_int
+# define GC_EXTRA_PARAMS GC_word ra, GC_CONST char * descr_string,
+ int descr_int
#else
# define GC_EXTRAS __FILE__, __LINE__
-# define GC_EXTRA_PARAMS char * descr_string, int descr_int
+# define GC_EXTRA_PARAMS GC_CONST char * descr_string, int descr_int
#endif
/* Debugging (annotated) allocation. GC_gcollect will check */
@@ -510,7 +542,7 @@ GC_API int GC_invoke_finalizers GC_PROTO((void));
/* be finalized. Return the number of finalizers */
/* that were run. Normally this is also called */
/* implicitly during some allocations. If */
- /* FINALIZE_ON_DEMAND is defined, it must be called */
+ /* GC-finalize_on_demand is nonzero, it must be called */
/* explicitly. */
/* GC_set_warn_proc can be used to redirect or filter warning messages. */
@@ -668,7 +700,7 @@ GC_API void (*GC_is_visible_print_proc)
# endif /* SOLARIS_THREADS */
-#if defined(IRIX_THREADS) || defined(LINUX_THREADS)
+#if defined(IRIX_THREADS) || defined(LINUX_THREADS) || defined(HPUX_THREADS)
/* We treat these similarly. */
# include <pthread.h>
# include <signal.h>
@@ -687,11 +719,12 @@ GC_API void (*GC_is_visible_print_proc)
# if defined(PCR) || defined(SOLARIS_THREADS) || defined(WIN32_THREADS) || \
defined(IRIX_THREADS) || defined(LINUX_THREADS) || \
- defined(IRIX_JDK_THREADS)
+ defined(IRIX_JDK_THREADS) || defined(HPUX_THREADS)
/* Any flavor of threads except SRC_M3. */
/* This returns a list of objects, linked through their first */
/* word. Its use can greatly reduce lock contention problems, since */
/* the allocation lock can be acquired and released many fewer times. */
+/* lb must be large enough to hold the pointer field. */
GC_PTR GC_malloc_many(size_t lb);
#define GC_NEXT(p) (*(GC_PTR *)(p)) /* Retrieve the next element */
/* in returned list. */
diff --git a/boehm-gc/include/gc_alloc.h b/boehm-gc/include/gc_alloc.h
index 1d912db2f0b..1f1d54aff95 100644
--- a/boehm-gc/include/gc_alloc.h
+++ b/boehm-gc/include/gc_alloc.h
@@ -13,7 +13,7 @@
//
// This is a C++ header file that is intended to replace the SGI STL
-// alloc.h.
+// alloc.h. This assumes SGI STL version < 3.0.
//
// This assumes the collector has been compiled with -DATOMIC_UNCOLLECTABLE
// and -DALL_INTERIOR_POINTERS. We also recommend
diff --git a/boehm-gc/include/new_gc_alloc.h b/boehm-gc/include/new_gc_alloc.h
index 577138830c5..54b7bd448d8 100644
--- a/boehm-gc/include/new_gc_alloc.h
+++ b/boehm-gc/include/new_gc_alloc.h
@@ -318,12 +318,10 @@ class traceable_alloc_template {
typedef traceable_alloc_template < 0 > traceable_alloc;
-#ifdef _SGI_SOURCE
-
// We want to specialize simple_alloc so that it does the right thing
// for all pointerfree types. At the moment there is no portable way to
// even approximate that. The following approximation should work for
-// SGI compilers, and perhaps some others.
+// SGI compilers, and recent versions of g++.
# define __GC_SPECIALIZE(T,alloc) \
class simple_alloc<T, alloc> { \
@@ -451,6 +449,4 @@ __STL_END_NAMESPACE
#endif /* __STL_USE_STD_ALLOCATORS */
-#endif /* _SGI_SOURCE */
-
#endif /* GC_ALLOC_H */
diff --git a/boehm-gc/include/private/gc_hdrs.h b/boehm-gc/include/private/gc_hdrs.h
index 2f2d1bf9b8a..60dc2ad37d6 100644
--- a/boehm-gc/include/private/gc_hdrs.h
+++ b/boehm-gc/include/private/gc_hdrs.h
@@ -49,14 +49,16 @@ typedef struct bi {
hdr * index[BOTTOM_SZ];
/*
* The bottom level index contains one of three kinds of values:
- * 0 means we're not responsible for this block.
+ * 0 means we're not responsible for this block,
+ * or this is a block other than the first one in a free block.
* 1 < (long)X <= MAX_JUMP means the block starts at least
* X * HBLKSIZE bytes before the current address.
* A valid pointer points to a hdr structure. (The above can't be
* valid pointers due to the GET_MEM return convention.)
*/
struct bi * asc_link; /* All indices are linked in */
- /* ascending order. */
+ /* ascending order... */
+ struct bi * desc_link; /* ... and in descending order. */
word key; /* high order address bits. */
# ifdef HASH_TL
struct bi * hash_link; /* Hash chain link. */
diff --git a/boehm-gc/include/private/gc_priv.h b/boehm-gc/include/private/gc_priv.h
index 934075fa358..ac4d63a0b26 100644
--- a/boehm-gc/include/private/gc_priv.h
+++ b/boehm-gc/include/private/gc_priv.h
@@ -1,6 +1,9 @@
/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
+ *
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
@@ -64,16 +67,16 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
# include <stddef.h>
# endif
# define VOLATILE volatile
-# define CONST const
#else
# ifdef MSWIN32
# include <stdlib.h>
# endif
# define VOLATILE
-# define CONST
#endif
-#ifdef AMIGA
+#define CONST GC_CONST
+
+#if 0 /* was once defined for AMIGA */
# define GC_FAR __far
#else
# define GC_FAR
@@ -350,7 +353,7 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
+ GC_page_size) \
+ GC_page_size-1)
# else
-# if defined(AMIGA) || defined(NEXT) || defined(DOS4GW)
+# if defined(AMIGA) || defined(NEXT) || defined(MACOSX) || defined(DOS4GW)
# define GET_MEM(bytes) HBLKPTR((size_t) \
calloc(1, (size_t)bytes + GC_page_size) \
+ GC_page_size-1)
@@ -436,7 +439,7 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
# endif
# ifdef LINUX_THREADS
# include <pthread.h>
-# ifdef __i386__
+# if defined(I386)
inline static int GC_test_and_set(volatile unsigned int *addr) {
int oldval;
/* Note: the "xchg" instruction does not need a "lock" prefix */
@@ -446,9 +449,57 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
return oldval;
}
# else
- -- > Need implementation of GC_test_and_set()
+# if defined(POWERPC)
+ inline static int GC_test_and_set(volatile unsigned int *addr) {
+ int oldval;
+ int temp = 1; // locked value
+
+ __asm__ __volatile__(
+ "1:\tlwarx %0,0,%3\n" // load and reserve
+ "\tcmpwi %0, 0\n" // if load is
+ "\tbne 2f\n" // non-zero, return already set
+ "\tstwcx. %2,0,%1\n" // else store conditional
+ "\tbne- 1b\n" // retry if lost reservation
+ "2:\t\n" // oldval is zero if we set
+ : "=&r"(oldval), "=p"(addr)
+ : "r"(temp), "1"(addr)
+ : "memory");
+ return (int)oldval;
+ }
+# else
+# ifdef ALPHA
+ inline static int GC_test_and_set(volatile unsigned int *
+addr)
+ {
+ unsigned long oldvalue;
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ "1: ldl_l %0,%1\n"
+ " and %0,%3,%2\n"
+ " bne %2,2f\n"
+ " xor %0,%3,%0\n"
+ " stl_c %0,%1\n"
+ " beq %0,3f\n"
+ " mb\n"
+ "2:\n"
+ ".section .text2,\"ax\"\n"
+ "3: br 1b\n"
+ ".previous"
+ :"=&r" (temp), "=m" (*addr), "=&r"
+(oldvalue)
+ :"Ir" (1), "m" (*addr));
+
+ return oldvalue;
+ }
+# else
+ -- > Need implementation of GC_test_and_set()
+# endif
+# endif
# endif
-# define GC_clear(addr) (*(addr) = 0)
+ inline static void GC_clear(volatile unsigned int *addr) {
+ *(addr) = 0;
+ }
extern volatile unsigned int GC_allocate_lock;
/* This is not a mutex because mutexes that obey the (optional) */
@@ -462,15 +513,10 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
# define NO_THREAD (pthread_t)(-1)
# define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD
# define I_HOLD_LOCK() (pthread_equal(GC_lock_holder, pthread_self()))
-# ifdef UNDEFINED
-# define LOCK() pthread_mutex_lock(&GC_allocate_ml)
-# define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
-# else
-# define LOCK() \
+# define LOCK() \
{ if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); }
-# define UNLOCK() \
+# define UNLOCK() \
GC_clear(&GC_allocate_lock)
-# endif
extern GC_bool GC_collecting;
# define ENTER_GC() \
{ \
@@ -478,15 +524,30 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
}
# define EXIT_GC() GC_collecting = 0;
# endif /* LINUX_THREADS */
-# if defined(IRIX_THREADS) || defined(IRIX_JDK_THREADS)
+# if defined(HPUX_THREADS)
+# include <pthread.h>
+ extern pthread_mutex_t GC_allocate_ml;
+# define LOCK() pthread_mutex_lock(&GC_allocate_ml)
+# define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
+# endif
+# if defined(IRIX_THREADS) || defined(IRIX_JDK_THREADS)
+ /* This may also eventually be appropriate for HPUX_THREADS */
# include <pthread.h>
-# include <mutex.h>
+# ifndef HPUX_THREADS
+ /* This probably should never be included, but I can't test */
+ /* on Irix anymore. */
+# include <mutex.h>
+# endif
-# if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64)) \
+# ifndef HPUX_THREADS
+# if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64)) \
|| !defined(_COMPILER_VERSION) || _COMPILER_VERSION < 700
# define GC_test_and_set(addr, v) test_and_set(addr,v)
-# else
+# else
# define GC_test_and_set(addr, v) __test_and_set(addr,v)
+# endif
+# else
+ /* I couldn't find a way to do this inline on HP/UX */
# endif
extern unsigned long GC_allocate_lock;
/* This is not a mutex because mutexes that obey the (optional) */
@@ -500,15 +561,17 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
# define NO_THREAD (pthread_t)(-1)
# define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD
# define I_HOLD_LOCK() (pthread_equal(GC_lock_holder, pthread_self()))
-# ifdef UNDEFINED
-# define LOCK() pthread_mutex_lock(&GC_allocate_ml)
-# define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
+# ifdef HPUX_THREADS
+# define LOCK() { if (!GC_test_and_clear(&GC_allocate_lock)) GC_lock(); }
+ /* The following is INCORRECT, since the memory model is too weak. */
+# define UNLOCK() { GC_noop1(&GC_allocate_lock); \
+ *(volatile unsigned long *)(&GC_allocate_lock) = 1; }
# else
-# define LOCK() { if (GC_test_and_set(&GC_allocate_lock, 1)) GC_lock(); }
-# if __mips >= 3 && (defined (_ABIN32) || defined(_ABI64)) \
+# define LOCK() { if (GC_test_and_set(&GC_allocate_lock, 1)) GC_lock(); }
+# if __mips >= 3 && (defined (_ABIN32) || defined(_ABI64)) \
&& defined(_COMPILER_VERSION) && _COMPILER_VERSION >= 700
# define UNLOCK() __lock_release(&GC_allocate_lock)
-# else
+# else
/* The function call in the following should prevent the */
/* compiler from moving assignments to below the UNLOCK. */
/* This is probably not necessary for ucode or gcc 2.8. */
@@ -516,7 +579,7 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
/* versions. */
# define UNLOCK() { GC_noop1(&GC_allocate_lock); \
*(volatile unsigned long *)(&GC_allocate_lock) = 0; }
-# endif
+# endif
# endif
extern GC_bool GC_collecting;
# define ENTER_GC() \
@@ -607,7 +670,7 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
# else
# if defined(SOLARIS_THREADS) || defined(WIN32_THREADS) \
|| defined(IRIX_THREADS) || defined(LINUX_THREADS) \
- || defined(IRIX_JDK_THREADS)
+ || defined(IRIX_JDK_THREADS) || defined(HPUX_THREADS)
void GC_stop_world();
void GC_start_world();
# define STOP_WORLD() GC_stop_world()
@@ -823,6 +886,7 @@ struct hblkhdr {
struct hblk * hb_next; /* Link field for hblk free list */
/* and for lists of chunks waiting to be */
/* reclaimed. */
+ struct hblk * hb_prev; /* Backwards link for free list. */
word hb_descr; /* object descriptor for marking. See */
/* mark.h. */
char* hb_map; /* A pointer to a pointer validity map of the block. */
@@ -837,14 +901,28 @@ struct hblkhdr {
# define IGNORE_OFF_PAGE 1 /* Ignore pointers that do not */
/* point to the first page of */
/* this object. */
+# define WAS_UNMAPPED 2 /* This is a free block, which has */
+ /* been unmapped from the address */
+ /* space. */
+ /* GC_remap must be invoked on it */
+ /* before it can be reallocated. */
+ /* Only set with USE_MUNMAP. */
unsigned short hb_last_reclaimed;
/* Value of GC_gc_no when block was */
/* last allocated or swept. May wrap. */
+ /* For a free block, this is maintained */
+ /* unly for USE_MUNMAP, and indicates */
+ /* when the header was allocated, or */
+ /* when the size of the block last */
+ /* changed. */
word hb_marks[MARK_BITS_SZ];
/* Bit i in the array refers to the */
/* object starting at the ith word (header */
/* INCLUDED) in the heap block. */
/* The lsb of word 0 is numbered 0. */
+ /* Unused bits are invalid, and are */
+ /* occasionally set, e.g for uncollectable */
+ /* objects. */
};
/* heap block body */
@@ -959,6 +1037,9 @@ struct _GC_arrays {
word _max_heapsize;
ptr_t _last_heap_addr;
ptr_t _prev_heap_addr;
+ word _large_free_bytes;
+ /* Total bytes contained in blocks on large object free */
+ /* list. */
word _words_allocd_before_gc;
/* Number of words allocated before this */
/* collection cycle. */
@@ -1005,6 +1086,9 @@ struct _GC_arrays {
/* Number of words in accessible atomic */
/* objects. */
# endif
+# ifdef USE_MUNMAP
+ word _unmapped_bytes;
+# endif
# ifdef MERGE_SIZES
unsigned _size_map[WORDS_TO_BYTES(MAXOBJSZ+1)];
/* Number of words to allocate for a given allocation request in */
@@ -1022,7 +1106,7 @@ struct _GC_arrays {
/* to an object at */
/* block_start+i&~3 - WORDS_TO_BYTES(j). */
/* (If ALL_INTERIOR_POINTERS is defined, then */
- /* instead ((short *)(hbh_map[sz])[i] is j if */
+ /* instead ((short *)(hb_map[sz])[i] is j if */
/* block_start+WORDS_TO_BYTES(i) is in the */
/* interior of an object starting at */
/* block_start+WORDS_TO_BYTES(i-j)). */
@@ -1135,6 +1219,7 @@ GC_API GC_FAR struct _GC_arrays GC_arrays;
# define GC_prev_heap_addr GC_arrays._prev_heap_addr
# define GC_words_allocd GC_arrays._words_allocd
# define GC_words_wasted GC_arrays._words_wasted
+# define GC_large_free_bytes GC_arrays._large_free_bytes
# define GC_words_finalized GC_arrays._words_finalized
# define GC_non_gc_bytes_at_gc GC_arrays._non_gc_bytes_at_gc
# define GC_mem_freed GC_arrays._mem_freed
@@ -1144,6 +1229,9 @@ GC_API GC_FAR struct _GC_arrays GC_arrays;
# define GC_words_allocd_before_gc GC_arrays._words_allocd_before_gc
# define GC_heap_sects GC_arrays._heap_sects
# define GC_last_stack GC_arrays._last_stack
+# ifdef USE_MUNMAP
+# define GC_unmapped_bytes GC_arrays._unmapped_bytes
+# endif
# ifdef MSWIN32
# define GC_heap_bases GC_arrays._heap_bases
# endif
@@ -1236,7 +1324,7 @@ extern char * GC_invalid_map;
/* Pointer to the nowhere valid hblk map */
/* Blocks pointing to this map are free. */
-extern struct hblk * GC_hblkfreelist;
+extern struct hblk * GC_hblkfreelist[];
/* List of completely empty heap blocks */
/* Linked through hb_next field of */
/* header structure associated with */
@@ -1311,7 +1399,12 @@ GC_bool GC_should_collect();
void GC_apply_to_all_blocks(/*fn, client_data*/);
/* Invoke fn(hbp, client_data) for each */
/* allocated heap block. */
-struct hblk * GC_next_block(/* struct hblk * h */);
+struct hblk * GC_next_used_block(/* struct hblk * h */);
+ /* Return first in-use block >= h */
+struct hblk * GC_prev_block(/* struct hblk * h */);
+ /* Return last block <= h. Returned block */
+ /* is managed by GC, but may or may not be in */
+ /* use. */
void GC_mark_init();
void GC_clear_marks(); /* Clear mark bits for all heap objects. */
void GC_invalidate_mark_state(); /* Tell the marker that marked */
@@ -1384,8 +1477,14 @@ extern void (*GC_start_call_back)(/* void */);
/* lock held. */
/* 0 by default. */
void GC_push_regs(); /* Push register contents onto mark stack. */
+ /* If NURSERY is defined, the default push */
+ /* action can be overridden with GC_push_proc */
void GC_remark(); /* Mark from all marked objects. Used */
/* only if we had to drop something. */
+
+# ifdef NURSERY
+ extern void (*GC_push_proc)(ptr_t);
+# endif
# if defined(MSWIN32)
void __cdecl GC_push_one();
# else
@@ -1608,6 +1707,15 @@ extern void (*GC_print_heap_obj)(/* ptr_t p */);
/* detailed description of the object */
/* referred to by p. */
+/* Memory unmapping: */
+#ifdef USE_MUNMAP
+ void GC_unmap_old(void);
+ void GC_merge_unmapped(void);
+ void GC_unmap(ptr_t start, word bytes);
+ void GC_remap(ptr_t start, word bytes);
+ void GC_unmap_gap(ptr_t start1, word bytes1, ptr_t start2, word bytes2);
+#endif
+
/* Virtual dirty bit implementation: */
/* Each implementation exports the following: */
void GC_read_dirty(); /* Retrieve dirty bits. */
@@ -1640,6 +1748,16 @@ void GC_print_heap_sects();
void GC_print_static_roots();
void GC_dump();
+#ifdef KEEP_BACK_PTRS
+ void GC_store_back_pointer(ptr_t source, ptr_t dest);
+ void GC_marked_for_finalization(ptr_t dest);
+# define GC_STORE_BACK_PTR(source, dest) GC_store_back_pointer(source, dest)
+# define GC_MARKED_FOR_FINALIZATION(dest) GC_marked_for_finalization(dest)
+#else
+# define GC_STORE_BACK_PTR(source, dest)
+# define GC_MARKED_FOR_FINALIZATION(dest)
+#endif
+
/* Make arguments appear live to compiler */
# ifdef __WATCOMC__
void GC_noop(void*, ...);
@@ -1690,4 +1808,13 @@ void GC_err_puts(/* char *s */);
/* newlines, don't ... */
+# ifdef GC_ASSERTIONS
+# define GC_ASSERT(expr) if(!(expr)) {\
+ GC_err_printf2("Assertion failure: %s:%ld\n", \
+ __FILE__, (unsigned long)__LINE__); \
+ ABORT("assertion failure"); }
+# else
+# define GC_ASSERT(expr)
+# endif
+
# endif /* GC_PRIVATE_H */
diff --git a/boehm-gc/include/private/gcconfig.h b/boehm-gc/include/private/gcconfig.h
index b1a9dc3613a..c9017d371a8 100644
--- a/boehm-gc/include/private/gcconfig.h
+++ b/boehm-gc/include/private/gcconfig.h
@@ -43,6 +43,11 @@
# define OPENBSD
# define mach_type_known
# endif
+# if defined(__OpenBSD__) && defined(__sparc__)
+# define SPARC
+# define OPENBSD
+# define mach_type_known
+# endif
# if defined(__NetBSD__) && defined(m68k)
# define M68K
# define NETBSD
@@ -100,7 +105,8 @@
# endif
# define mach_type_known
# endif
-# if defined(sparc) && defined(unix) && !defined(sun) && !defined(linux)
+# if defined(sparc) && defined(unix) && !defined(sun) && !defined(linux) \
+ && !defined(__OpenBSD__)
# define SPARC
# define DRSNX
# define mach_type_known
@@ -129,7 +135,7 @@
# define HP_PA
# define mach_type_known
# endif
-# if defined(LINUX) && defined(i386)
+# if defined(LINUX) && (defined(i386) || defined(__i386__))
# define I386
# define mach_type_known
# endif
@@ -141,9 +147,8 @@
# define M68K
# define mach_type_known
# endif
-# if defined(linux) && defined(sparc)
+# if defined(LINUX) && defined(sparc)
# define SPARC
-# define LINUX
# define mach_type_known
# endif
# if defined(__alpha) || defined(__alpha__)
@@ -153,9 +158,11 @@
# endif
# define mach_type_known
# endif
-# if defined(_AMIGA)
-# define M68K
+# if defined(_AMIGA) && !defined(AMIGA)
# define AMIGA
+# endif
+# ifdef AMIGA
+# define M68K
# define mach_type_known
# endif
# if defined(THINK_C) || defined(__MWERKS__) && !defined(__powerc)
@@ -168,6 +175,11 @@
# define MACOS
# define mach_type_known
# endif
+# if defined(macosx)
+# define MACOSX
+# define POWERPC
+# define mach_type_known
+# endif
# if defined(NeXT) && defined(mc68000)
# define M68K
# define NEXT
@@ -486,8 +498,8 @@
# ifdef POWERPC
# define MACH_TYPE "POWERPC"
-# define ALIGNMENT 2
# ifdef MACOS
+# define ALIGNMENT 2 /* Still necessary? Could it be 4? */
# ifndef __LOWMEM__
# include <LowMem.h>
# endif
@@ -497,14 +509,24 @@
# define DATAEND /* not needed */
# endif
# ifdef LINUX
+# define ALIGNMENT 4 /* Guess. Can someone verify? */
+ /* This was 2, but that didn't sound right. */
# define OS_TYPE "LINUX"
# define HEURISTIC1
# undef STACK_GRAN
# define STACK_GRAN 0x10000000
+ /* Stack usually starts at 0x80000000 */
# define DATASTART GC_data_start
extern int _end;
# define DATAEND (&_end)
# endif
+# ifdef MACOSX
+# define ALIGNMENT 4
+# define OS_TYPE "MACOSX"
+# define DATASTART ((ptr_t) get_etext())
+# define STACKBOTTOM ((ptr_t) 0xc0000000)
+# define DATAEND /* not needed */
+# endif
# endif
# ifdef VAX
@@ -603,6 +625,11 @@
# define SVR4
# define STACKBOTTOM ((ptr_t) 0xf0000000)
# endif
+# ifdef OPENBSD
+# define OS_TYPE "OPENBSD"
+# define STACKBOTTOM ((ptr_t) 0xf8000000)
+# define DATASTART ((ptr_t)(&etext))
+# endif
# endif
# ifdef I386
@@ -657,10 +684,13 @@
# endif
# ifdef LINUX
# define OS_TYPE "LINUX"
-# define STACKBOTTOM ((ptr_t)0xc0000000)
- /* Appears to be 0xe0000000 for at least one 2.1.91 kernel. */
- /* Probably needs to be more flexible, but I don't yet */
- /* fully understand how flexible. */
+# define HEURISTIC1
+# undef STACK_GRAN
+# define STACK_GRAN 0x10000000
+ /* STACKBOTTOM is usually 0xc0000000, but this changes with */
+ /* different kernel configurations. In particular, systems */
+ /* with 2GB physical memory will usually move the user */
+ /* address space limit, and hence initial SP to 0x80000000. */
# if !defined(LINUX_THREADS) || !defined(REDIRECT_MALLOC)
# define MPROTECT_VDB
# else
@@ -909,9 +939,13 @@
# define CPP_WORDSZ 64
# define STACKBOTTOM ((ptr_t) 0x120000000)
# ifdef __ELF__
+# if 0
+ /* __data_start apparently disappeared in some recent releases. */
extern int __data_start;
# define DATASTART &__data_start
-# define DYNAMIC_LOADING
+# endif
+# define DATASTART GC_data_start
+# define DYNAMIC_LOADING
# else
# define DATASTART ((ptr_t) 0x140000000)
# endif
@@ -1021,6 +1055,10 @@
# undef MPROTECT_VDB
# endif
+# ifdef USE_MUNMAP
+# undef MPROTECT_VDB /* Can't deal with address space holes. */
+# endif
+
# if !defined(PCR_VDB) && !defined(PROC_VDB) && !defined(MPROTECT_VDB)
# define DEFAULT_VDB
# endif
diff --git a/boehm-gc/linux_threads.c b/boehm-gc/linux_threads.c
index 4bcdd3a196a..8287dce647b 100644
--- a/boehm-gc/linux_threads.c
+++ b/boehm-gc/linux_threads.c
@@ -118,12 +118,12 @@ GC_linux_thread_top_of_stack() relies on implementation details of
LinuxThreads, namely that thread stacks are allocated on 2M boundaries
and grow to no more than 2M.
To make sure that we're using LinuxThreads and not some other thread
-package, we generate a dummy reference to `__pthread_initial_thread_bos',
+package, we generate a dummy reference to `pthread_kill_other_threads_np'
+(was `__pthread_initial_thread_bos' but that disappeared),
which is a symbol defined in LinuxThreads, but (hopefully) not in other
thread packages.
*/
-extern char * __pthread_initial_thread_bos;
-char **dummy_var_to_force_linux_threads = &__pthread_initial_thread_bos;
+void (*dummy_var_to_force_linux_threads)() = pthread_kill_other_threads_np;
#define LINUX_THREADS_STACK_SIZE (2 * 1024 * 1024)
diff --git a/boehm-gc/mach_dep.c b/boehm-gc/mach_dep.c
index 23e270e3de2..52f86346761 100644
--- a/boehm-gc/mach_dep.c
+++ b/boehm-gc/mach_dep.c
@@ -20,7 +20,11 @@
# define _longjmp(b,v) longjmp(b,v)
# endif
# ifdef AMIGA
-# include <dos.h>
+# ifndef __GNUC__
+# include <dos/dos.h>
+# else
+# include <machine/reg.h>
+# endif
# endif
#if defined(__MWERKS__) && !defined(POWERPC)
@@ -58,6 +62,12 @@ asm static void PushMacRegisters()
#endif /* __MWERKS__ */
+# if defined(SPARC) || defined(IA64)
+ /* Value returned from register flushing routine; either sp (SPARC) */
+ /* or ar.bsp (IA64) */
+ word GC_save_regs_ret_val;
+# endif
+
/* Routine to mark from registers that are preserved by the C compiler. */
/* This must be ported to every new architecture. There is a generic */
/* version at the end, that is likely, but not guaranteed to work */
@@ -126,9 +136,28 @@ void GC_push_regs()
asm("addq.w &0x4,%sp"); /* put stack back where it was */
# endif /* M68K HP */
-# ifdef AMIGA
- /* AMIGA - could be replaced by generic code */
- /* a0, a1, d0 and d1 are caller save */
+# if defined(M68K) && defined(AMIGA)
+ /* AMIGA - could be replaced by generic code */
+ /* a0, a1, d0 and d1 are caller save */
+
+# ifdef __GNUC__
+ asm("subq.w &0x4,%sp"); /* allocate word on top of stack */
+
+ asm("mov.l %a2,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %a3,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %a4,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %a5,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %a6,(%sp)"); asm("jsr _GC_push_one");
+ /* Skip frame pointer and stack pointer */
+ asm("mov.l %d2,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %d3,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %d4,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %d5,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %d6,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %d7,(%sp)"); asm("jsr _GC_push_one");
+
+ asm("addq.w &0x4,%sp"); /* put stack back where it was */
+# else /* !__GNUC__ */
GC_push_one(getreg(REG_A2));
GC_push_one(getreg(REG_A3));
GC_push_one(getreg(REG_A4));
@@ -141,7 +170,8 @@ void GC_push_regs()
GC_push_one(getreg(REG_D5));
GC_push_one(getreg(REG_D6));
GC_push_one(getreg(REG_D7));
-# endif
+# endif /* !__GNUC__ */
+# endif /* AMIGA */
# if defined(M68K) && defined(MACOS)
# if defined(THINK_C)
@@ -244,12 +274,12 @@ void GC_push_regs()
asm ("movd r7, tos"); asm ("bsr ?_GC_push_one"); asm ("adjspb $-4");
# endif
-# ifdef SPARC
+# if defined(SPARC) || defined(IA64)
{
word GC_save_regs_in_stack();
/* generic code will not work */
- (void)GC_save_regs_in_stack();
+ GC_save_regs_ret_val = GC_save_regs_in_stack();
}
# endif
@@ -309,12 +339,22 @@ void GC_push_regs()
# endif /* !__GNUC__ */
# endif /* M68K/SYSV */
+# if defined(PJ)
+ {
+ register int * sp asm ("optop");
+ extern int *__libc_stack_end;
+
+ GC_push_all_stack (sp, __libc_stack_end);
+ }
+# endif
/* other machines... */
# if !(defined M68K) && !(defined VAX) && !(defined RT)
# if !(defined SPARC) && !(defined I386) && !(defined NS32K)
-# if !defined(POWERPC) && !defined(UTS4)
+# if !defined(POWERPC) && !defined(UTS4) && !defined(IA64)
+# if !defined(PJ)
--> bad news <--
+# endif
# endif
# endif
# endif
@@ -374,6 +414,27 @@ ptr_t cold_gc_frame;
# endif
# endif
+/* On IA64, we also need to flush register windows. But they end */
+/* up on the other side of the stack segment. */
+/* Returns the backing store pointer for the register stack. */
+# ifdef IA64
+ asm(" .text");
+ asm(" .psr abi64");
+ asm(" .psr lsb");
+ asm(" .lsb");
+ asm("");
+ asm(" .text");
+ asm(" .align 16");
+ asm(" .global GC_save_regs_in_stack");
+ asm(" .proc GC_save_regs_in_stack");
+ asm("GC_save_regs_in_stack:");
+ asm(" .body");
+ asm(" flushrs");
+ asm(" ;;");
+ asm(" mov r8=ar.bsp");
+ asm(" br.ret.sptk.few rp");
+ asm(" .endp GC_save_regs_in_stack");
+# endif
/* GC_clear_stack_inner(arg, limit) clears stack area up to limit and */
/* returns arg. Stack clearing is crucial on SPARC, so we supply */
diff --git a/boehm-gc/malloc.c b/boehm-gc/malloc.c
index 37da584c27d..66e62d29694 100644
--- a/boehm-gc/malloc.c
+++ b/boehm-gc/malloc.c
@@ -93,8 +93,16 @@ register ptr_t *opp;
if(GC_incremental && !GC_dont_gc)
GC_collect_a_little_inner((int)n_blocks);
lw = ROUNDED_UP_WORDS(lb);
- while ((h = GC_allochblk(lw, k, 0)) == 0
- && GC_collect_or_expand(n_blocks, FALSE));
+ h = GC_allochblk(lw, k, 0);
+# ifdef USE_MUNMAP
+ if (0 == h) {
+ GC_merge_unmapped();
+ h = GC_allochblk(lw, k, 0);
+ }
+# endif
+ while (0 == h && GC_collect_or_expand(n_blocks, FALSE)) {
+ h = GC_allochblk(lw, k, 0);
+ }
if (h == 0) {
op = 0;
} else {
diff --git a/boehm-gc/mallocx.c b/boehm-gc/mallocx.c
index b1450215671..8c07fa98846 100644
--- a/boehm-gc/mallocx.c
+++ b/boehm-gc/mallocx.c
@@ -57,8 +57,16 @@ register int k;
if(GC_incremental && !GC_dont_gc)
GC_collect_a_little_inner((int)n_blocks);
lw = ROUNDED_UP_WORDS(lb);
- while ((h = GC_allochblk(lw, k, IGNORE_OFF_PAGE)) == 0
- && GC_collect_or_expand(n_blocks, TRUE));
+ h = GC_allochblk(lw, k, IGNORE_OFF_PAGE);
+# ifdef USE_MUNMAP
+ if (0 == h) {
+ GC_merge_unmapped();
+ h = GC_allochblk(lw, k, IGNORE_OFF_PAGE);
+ }
+# endif
+ while (0 == h && GC_collect_or_expand(n_blocks, TRUE)) {
+ h = GC_allochblk(lw, k, IGNORE_OFF_PAGE);
+ }
if (h == 0) {
op = 0;
} else {
diff --git a/boehm-gc/mark.c b/boehm-gc/mark.c
index c827af5cd12..67085fbcc29 100644
--- a/boehm-gc/mark.c
+++ b/boehm-gc/mark.c
@@ -87,6 +87,10 @@ struct obj_kind GC_obj_kinds[MAXOBJKINDS] = {
# define INITIAL_MARK_STACK_SIZE (1*HBLKSIZE)
/* INITIAL_MARK_STACK_SIZE * sizeof(mse) should be a */
/* multiple of HBLKSIZE. */
+ /* The incremental collector actually likes a larger */
+ /* size, since it want to push all marked dirty objs */
+ /* before marking anything new. Currently we let it */
+ /* grow dynamically. */
# endif
/*
@@ -254,7 +258,12 @@ ptr_t cold_gc_frame;
case MS_PUSH_RESCUERS:
if (GC_mark_stack_top
- >= GC_mark_stack + INITIAL_MARK_STACK_SIZE/4) {
+ >= GC_mark_stack + GC_mark_stack_size
+ - INITIAL_MARK_STACK_SIZE/2) {
+ /* Go ahead and mark, even though that might cause us to */
+ /* see more marked dirty objects later on. Avoid this */
+ /* in the future. */
+ GC_mark_stack_too_small = TRUE;
GC_mark_from_mark_stack();
return(FALSE);
} else {
@@ -671,6 +680,12 @@ int all;
# endif
word p;
{
+# ifdef NURSERY
+ if (0 != GC_push_proc) {
+ GC_push_proc(p);
+ return;
+ }
+# endif
GC_PUSH_ONE_STACK(p, 0);
}
@@ -681,7 +696,7 @@ word p;
# endif
/* As above, but argument passed preliminary test. */
-# ifdef PRINT_BLACK_LIST
+# if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS)
void GC_push_one_checked(p, interior_ptrs, source)
ptr_t source;
# else
@@ -744,6 +759,7 @@ register GC_bool interior_ptrs;
} else {
if (!mark_bit_from_hdr(hhdr, displ)) {
set_mark_bit_from_hdr(hhdr, displ);
+ GC_STORE_BACK_PTR(source, (ptr_t)r);
PUSH_OBJ((word *)r, hhdr, GC_mark_stack_top,
&(GC_mark_stack[GC_mark_stack_size]));
}
@@ -1102,7 +1118,7 @@ struct hblk *h;
{
register hdr * hhdr;
- h = GC_next_block(h);
+ h = GC_next_used_block(h);
if (h == 0) return(0);
hhdr = HDR(h);
GC_push_marked(h, hhdr);
@@ -1114,11 +1130,11 @@ struct hblk *h;
struct hblk * GC_push_next_marked_dirty(h)
struct hblk *h;
{
- register hdr * hhdr = HDR(h);
+ register hdr * hhdr;
if (!GC_dirty_maintained) { ABORT("dirty bits not set up"); }
for (;;) {
- h = GC_next_block(h);
+ h = GC_next_used_block(h);
if (h == 0) return(0);
hhdr = HDR(h);
# ifdef STUBBORN_ALLOC
@@ -1147,7 +1163,7 @@ struct hblk *h;
register hdr * hhdr = HDR(h);
for (;;) {
- h = GC_next_block(h);
+ h = GC_next_used_block(h);
if (h == 0) return(0);
hhdr = HDR(h);
if (hhdr -> hb_obj_kind == UNCOLLECTABLE) break;
diff --git a/boehm-gc/mark_rts.c b/boehm-gc/mark_rts.c
index 2f21ed324dd..0e84f2732fc 100644
--- a/boehm-gc/mark_rts.c
+++ b/boehm-gc/mark_rts.c
@@ -412,6 +412,9 @@ ptr_t cold_gc_frame;
if (0 == cold_gc_frame) return;
# ifdef STACK_GROWS_DOWN
GC_push_all_eager(GC_approx_sp(), cold_gc_frame);
+# ifdef IA64
+ --> fix this
+# endif
# else
GC_push_all_eager( cold_gc_frame, GC_approx_sp() );
# endif
@@ -419,6 +422,31 @@ ptr_t cold_gc_frame;
# ifdef STACK_GROWS_DOWN
GC_push_all_stack_partially_eager( GC_approx_sp(), GC_stackbottom,
cold_gc_frame );
+# ifdef IA64
+ /* We also need to push the register stack backing store. */
+ /* This should really be done in the same way as the */
+ /* regular stack. For now we fudge it a bit. */
+ /* Note that the backing store grows up, so we can't use */
+ /* GC_push_all_stack_partially_eager. */
+ {
+ extern word GC_save_regs_ret_val;
+ /* Previously set to backing store pointer. */
+ ptr_t bsp = (ptr_t) GC_save_regs_ret_val;
+ ptr_t cold_gc_bs_pointer;
+# ifdef ALL_INTERIOR_POINTERS
+ cold_gc_bs_pointer = bsp - 2048;
+ if (cold_gc_bs_pointer < BACKING_STORE_BASE) {
+ cold_gc_bs_pointer = BACKING_STORE_BASE;
+ }
+ GC_push_all(BACKING_STORE_BASE, cold_gc_bs_pointer);
+# else
+ cold_gc_bs_pointer = BACKING_STORE_BASE;
+# endif
+ GC_push_all_eager(cold_gc_bs_pointer, bsp);
+ /* All values should be sufficiently aligned that we */
+ /* dont have to worry about the boundary. */
+ }
+# endif
# else
GC_push_all_stack_partially_eager( GC_stackbottom, GC_approx_sp(),
cold_gc_frame );
diff --git a/boehm-gc/misc.c b/boehm-gc/misc.c
index 7779c43c112..40cbe97de9f 100644
--- a/boehm-gc/misc.c
+++ b/boehm-gc/misc.c
@@ -44,12 +44,13 @@
# else
# if defined(IRIX_THREADS) || defined(LINUX_THREADS) \
|| defined(IRIX_JDK_THREADS)
-# ifdef UNDEFINED
- pthread_mutex_t GC_allocate_ml = PTHREAD_MUTEX_INITIALIZER;
-# endif
pthread_t GC_lock_holder = NO_THREAD;
# else
- --> declare allocator lock here
+# if defined(HPUX_THREADS)
+ pthread_mutex_t GC_allocate_ml = PTHREAD_MUTEX_INITIALIZER;
+# else
+ --> declare allocator lock here
+# endif
# endif
# endif
# endif
@@ -73,6 +74,12 @@ GC_bool GC_dont_gc = 0;
GC_bool GC_quiet = 0;
+#ifdef FIND_LEAK
+ int GC_find_leak = 1;
+#else
+ int GC_find_leak = 0;
+#endif
+
/*ARGSUSED*/
GC_PTR GC_default_oom_fn GC_PROTO((size_t bytes_requested))
{
@@ -385,6 +392,11 @@ size_t GC_get_heap_size GC_PROTO(())
return ((size_t) GC_heapsize);
}
+size_t GC_get_free_bytes GC_PROTO(())
+{
+ return ((size_t) GC_large_free_bytes);
+}
+
size_t GC_get_bytes_since_gc GC_PROTO(())
{
return ((size_t) WORDS_TO_BYTES(GC_words_allocd));
@@ -427,11 +439,9 @@ void GC_init_inner()
# ifdef MSWIN32
GC_init_win32();
# endif
-# if defined(LINUX) && defined(POWERPC)
- GC_init_linuxppc();
-# endif
-# if defined(LINUX) && defined(SPARC)
- GC_init_linuxsparc();
+# if defined(LINUX) && \
+ (defined(POWERPC) || defined(ALPHA) || defined(SPARC) || defined(IA64))
+ GC_init_linux_data_start();
# endif
# ifdef SOLARIS_THREADS
GC_thr_init();
@@ -439,11 +449,12 @@ void GC_init_inner()
GC_dirty_init();
# endif
# if defined(IRIX_THREADS) || defined(LINUX_THREADS) \
- || defined(IRIX_JDK_THREADS)
+ || defined(IRIX_JDK_THREADS) || defined(HPUX_THREADS)
GC_thr_init();
# endif
# if !defined(THREADS) || defined(SOLARIS_THREADS) || defined(WIN32_THREADS) \
- || defined(IRIX_THREADS) || defined(LINUX_THREADS)
+ || defined(IRIX_THREADS) || defined(LINUX_THREADS) \
+ || defined(HPUX_THREADS)
if (GC_stackbottom == 0) {
GC_stackbottom = GC_get_stack_base();
}
@@ -558,7 +569,8 @@ void GC_init_inner()
void GC_enable_incremental GC_PROTO(())
{
-# if !defined(FIND_LEAK) && !defined(SMALL_CONFIG)
+# if !defined(SMALL_CONFIG)
+ if (!GC_find_leak) {
DCL_LOCK_STATE;
DISABLE_SIGNALS();
@@ -596,6 +608,7 @@ void GC_enable_incremental GC_PROTO(())
out:
UNLOCK();
ENABLE_SIGNALS();
+ }
# endif
}
diff --git a/boehm-gc/os_dep.c b/boehm-gc/os_dep.c
index 7b3ba5459d4..e83b5cacd44 100644
--- a/boehm-gc/os_dep.c
+++ b/boehm-gc/os_dep.c
@@ -1,6 +1,8 @@
/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
- * Copyright (c) 1996-1997 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
@@ -31,7 +33,7 @@
/* make sure the former gets defined to be the latter if appropriate. */
# include <features.h>
# if 2 <= __GLIBC__
-# if 0 == __GLIBC_MINOR__
+# if 2 == __GLIBC__ && 0 == __GLIBC_MINOR__
/* glibc 2.1 no longer has sigcontext.h. But signal.h */
/* has the right declaration for glibc 2.1. */
# include <sigcontext.h>
@@ -54,13 +56,13 @@
# include <signal.h>
/* Blatantly OS dependent routines, except for those that are related */
-/* dynamic loading. */
+/* to dynamic loading. */
# if !defined(THREADS) && !defined(STACKBOTTOM) && defined(HEURISTIC2)
# define NEED_FIND_LIMIT
# endif
-# if defined(IRIX_THREADS)
+# if defined(IRIX_THREADS) || defined(HPUX_THREADS)
# define NEED_FIND_LIMIT
# endif
@@ -72,7 +74,8 @@
# define NEED_FIND_LIMIT
# endif
-# if defined(LINUX) && (defined(POWERPC) || defined(SPARC))
+# if defined(LINUX) && \
+ (defined(POWERPC) || defined(SPARC) || defined(ALPHA) || defined(IA64))
# define NEED_FIND_LIMIT
# endif
@@ -139,29 +142,21 @@
# define OPT_PROT_EXEC 0
#endif
-#if defined(LINUX) && defined(POWERPC)
+#if defined(LINUX) && (defined(POWERPC) || defined(SPARC) || defined(ALPHA) \
+ || defined(IA64))
+ /* The I386 case can be handled without a search. The Alpha case */
+ /* used to be handled differently as well, but the rules changed */
+ /* for recent Linux versions. This seems to be the easiest way to */
+ /* cover all versions. */
ptr_t GC_data_start;
- void GC_init_linuxppc()
- {
- extern ptr_t GC_find_limit();
- extern char **_environ;
- /* This may need to be environ, without the underscore, for */
- /* some versions. */
- GC_data_start = GC_find_limit((ptr_t)&_environ, FALSE);
- }
-#endif
-
-#if defined(LINUX) && defined(SPARC)
- ptr_t GC_data_start;
+ extern char * GC_copyright[]; /* Any data symbol would do. */
- void GC_init_linuxsparc()
+ void GC_init_linux_data_start()
{
extern ptr_t GC_find_limit();
- extern char **_environ;
- /* This may need to be environ, without the underscore, for */
- /* some versions. */
- GC_data_start = GC_find_limit((ptr_t)&_environ, FALSE);
+
+ GC_data_start = GC_find_limit((ptr_t)GC_copyright, FALSE);
}
#endif
@@ -362,7 +357,8 @@ word GC_page_size;
}
# else
-# if defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(USE_MMAP)
+# if defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(USE_MMAP) \
+ || defined(USE_MUNMAP)
void GC_setpagesize()
{
GC_page_size = GETPAGESIZE();
@@ -441,6 +437,24 @@ ptr_t GC_get_stack_base()
ptr_t GC_get_stack_base()
{
+ struct Process *proc = (struct Process*)SysBase->ThisTask;
+
+ /* Reference: Amiga Guru Book Pages: 42,567,574 */
+ if (proc->pr_Task.tc_Node.ln_Type==NT_PROCESS
+ && proc->pr_CLI != NULL) {
+ /* first ULONG is StackSize */
+ /*longPtr = proc->pr_ReturnAddr;
+ size = longPtr[0];*/
+
+ return (char *)proc->pr_ReturnAddr + sizeof(ULONG);
+ } else {
+ return (char *)proc->pr_Task.tc_SPUpper;
+ }
+}
+
+#if 0 /* old version */
+ptr_t GC_get_stack_base()
+{
extern struct WBStartup *_WBenchMsg;
extern long __base;
extern long __stack;
@@ -463,10 +477,9 @@ ptr_t GC_get_stack_base()
}
return (ptr_t)(__base + GC_max(size, __stack));
}
+#endif /* 0 */
-# else
-
-
+# else /* !AMIGA, !OS2, ... */
# ifdef NEED_FIND_LIMIT
/* Some tools to implement HEURISTIC2 */
@@ -486,9 +499,9 @@ ptr_t GC_get_stack_base()
typedef void (*handler)();
# endif
-# if defined(SUNOS5SIGS) || defined(IRIX5)
+# if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1)
static struct sigaction old_segv_act;
-# if defined(_sigargs) /* !Irix6.x */
+# if defined(_sigargs) || defined(HPUX) /* !Irix6.x */
static struct sigaction old_bus_act;
# endif
# else
@@ -497,7 +510,7 @@ ptr_t GC_get_stack_base()
void GC_setup_temporary_fault_handler()
{
-# if defined(SUNOS5SIGS) || defined(IRIX5)
+# if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1)
struct sigaction act;
act.sa_handler = GC_fault_handler;
@@ -516,10 +529,11 @@ ptr_t GC_get_stack_base()
(void) sigaction(SIGSEGV, &act, 0);
# else
(void) sigaction(SIGSEGV, &act, &old_segv_act);
-# ifdef _sigargs /* Irix 5.x, not 6.x */
- /* Under 5.x, we may get SIGBUS. */
- /* Pthreads doesn't exist under 5.x, so we don't */
- /* have to worry in the threads case. */
+# if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
+ || defined(HPUX)
+ /* Under Irix 5.x or HP/UX, we may get SIGBUS. */
+ /* Pthreads doesn't exist under Irix 5.x, so we */
+ /* don't have to worry in the threads case. */
(void) sigaction(SIGBUS, &act, &old_bus_act);
# endif
# endif /* IRIX_THREADS */
@@ -533,9 +547,10 @@ ptr_t GC_get_stack_base()
void GC_reset_fault_handler()
{
-# if defined(SUNOS5SIGS) || defined(IRIX5)
+# if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1)
(void) sigaction(SIGSEGV, &old_segv_act, 0);
-# ifdef _sigargs /* Irix 5.x, not 6.x */
+# if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
+ || defined(HPUX)
(void) sigaction(SIGBUS, &old_bus_act, 0);
# endif
# else
@@ -580,6 +595,40 @@ ptr_t GC_get_stack_base()
}
# endif
+#ifdef LINUX_STACKBOTTOM
+
+# define STAT_SKIP 27 /* Number of fields preceding startstack */
+ /* field in /proc/<pid>/stat */
+
+ ptr_t GC_linux_stack_base(void)
+ {
+ char buf[50];
+ FILE *f;
+ char c;
+ word result = 0;
+ int i;
+
+ sprintf(buf, "/proc/%d/stat", getpid());
+ f = fopen(buf, "r");
+ if (NULL == f) ABORT("Couldn't open /proc/<pid>/stat");
+ c = getc(f);
+ /* Skip the required number of fields. This number is hopefully */
+ /* constant across all Linux implementations. */
+ for (i = 0; i < STAT_SKIP; ++i) {
+ while (isspace(c)) c = getc(f);
+ while (!isspace(c)) c = getc(f);
+ }
+ while (isspace(c)) c = getc(f);
+ while (isdigit(c)) {
+ result *= 10;
+ result += c - '0';
+ c = getc(f);
+ }
+ if (result < 0x10000000) ABORT("Absurd stack bottom value");
+ return (ptr_t)result;
+ }
+
+#endif /* LINUX_STACKBOTTOM */
ptr_t GC_get_stack_base()
{
@@ -601,6 +650,9 @@ ptr_t GC_get_stack_base()
& ~STACKBOTTOM_ALIGNMENT_M1);
# endif
# endif /* HEURISTIC1 */
+# ifdef LINUX_STACKBOTTOM
+ result = GC_linux_stack_base();
+# endif
# ifdef HEURISTIC2
# ifdef STACK_GROWS_DOWN
result = GC_find_limit((ptr_t)(&dummy), TRUE);
@@ -851,6 +903,72 @@ void GC_register_data_segments()
# else
# ifdef AMIGA
+ void GC_register_data_segments()
+ {
+ struct Process *proc;
+ struct CommandLineInterface *cli;
+ BPTR myseglist;
+ ULONG *data;
+
+ int num;
+
+
+# ifdef __GNUC__
+ ULONG dataSegSize;
+ GC_bool found_segment = FALSE;
+ extern char __data_size[];
+
+ dataSegSize=__data_size+8;
+ /* Can`t find the Location of __data_size, because
+ it`s possible that is it, inside the segment. */
+
+# endif
+
+ proc= (struct Process*)SysBase->ThisTask;
+
+ /* Reference: Amiga Guru Book Pages: 538ff,565,573
+ and XOper.asm */
+ if (proc->pr_Task.tc_Node.ln_Type==NT_PROCESS) {
+ if (proc->pr_CLI == NULL) {
+ myseglist = proc->pr_SegList;
+ } else {
+ /* ProcLoaded 'Loaded as a command: '*/
+ cli = BADDR(proc->pr_CLI);
+ myseglist = cli->cli_Module;
+ }
+ } else {
+ ABORT("Not a Process.");
+ }
+
+ if (myseglist == NULL) {
+ ABORT("Arrrgh.. can't find segments, aborting");
+ }
+
+ /* xoper hunks Shell Process */
+
+ num=0;
+ for (data = (ULONG *)BADDR(myseglist); data != NULL;
+ data = (ULONG *)BADDR(data[0])) {
+ if (((ULONG) GC_register_data_segments < (ULONG) &data[1]) ||
+ ((ULONG) GC_register_data_segments > (ULONG) &data[1] + data[-1])) {
+# ifdef __GNUC__
+ if (dataSegSize == data[-1]) {
+ found_segment = TRUE;
+ }
+# endif
+ GC_add_roots_inner((char *)&data[1],
+ ((char *)&data[1]) + data[-1], FALSE);
+ }
+ ++num;
+ } /* for */
+# ifdef __GNUC__
+ if (!found_segment) {
+ ABORT("Can`t find correct Segments.\nSolution: Use an newer version of ixemul.library");
+ }
+# endif
+ }
+
+#if 0 /* old version */
void GC_register_data_segments()
{
extern struct WBStartup *_WBenchMsg;
@@ -892,6 +1010,7 @@ void GC_register_data_segments()
}
}
}
+#endif /* old version */
# else
@@ -932,7 +1051,8 @@ int * etext_addr;
void GC_register_data_segments()
{
-# if !defined(PCR) && !defined(SRC_M3) && !defined(NEXT) && !defined(MACOS)
+# if !defined(PCR) && !defined(SRC_M3) && !defined(NEXT) && !defined(MACOS) \
+ && !defined(MACOSX)
# if defined(REDIRECT_MALLOC) && defined(SOLARIS_THREADS)
/* As of Solaris 2.3, the Solaris threads implementation */
/* allocates the data structure for the initial thread with */
@@ -946,7 +1066,7 @@ void GC_register_data_segments()
GC_add_roots_inner(DATASTART, (char *)(DATAEND), FALSE);
# endif
# endif
-# if !defined(PCR) && defined(NEXT)
+# if !defined(PCR) && (defined(NEXT) || defined(MACOSX))
GC_add_roots_inner(DATASTART, (char *) get_end(), FALSE);
# endif
# if defined(MACOS)
@@ -1160,6 +1280,95 @@ void GC_win32_free_heap ()
# endif
+#ifdef USE_MUNMAP
+
+/* For now, this only works on some Unix-like systems. If you */
+/* have something else, don't define USE_MUNMAP. */
+/* We assume ANSI C to support this feature. */
+#include <unistd.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <fcntl.h>
+
+/* Compute a page aligned starting address for the unmap */
+/* operation on a block of size bytes starting at start. */
+/* Return 0 if the block is too small to make this feasible. */
+ptr_t GC_unmap_start(ptr_t start, word bytes)
+{
+ ptr_t result = start;
+ /* Round start to next page boundary. */
+ result += GC_page_size - 1;
+ result = (ptr_t)((word)result & ~(GC_page_size - 1));
+ if (result + GC_page_size > start + bytes) return 0;
+ return result;
+}
+
+/* Compute end address for an unmap operation on the indicated */
+/* block. */
+ptr_t GC_unmap_end(ptr_t start, word bytes)
+{
+ ptr_t end_addr = start + bytes;
+ end_addr = (ptr_t)((word)end_addr & ~(GC_page_size - 1));
+ return end_addr;
+}
+
+/* We assume that GC_remap is called on exactly the same range */
+/* as a previous call to GC_unmap. It is safe to consistently */
+/* round the endpoints in both places. */
+void GC_unmap(ptr_t start, word bytes)
+{
+ ptr_t start_addr = GC_unmap_start(start, bytes);
+ ptr_t end_addr = GC_unmap_end(start, bytes);
+ word len = end_addr - start_addr;
+ if (0 == start_addr) return;
+ if (munmap(start_addr, len) != 0) ABORT("munmap failed");
+ GC_unmapped_bytes += len;
+}
+
+
+void GC_remap(ptr_t start, word bytes)
+{
+ static int zero_descr = -1;
+ ptr_t start_addr = GC_unmap_start(start, bytes);
+ ptr_t end_addr = GC_unmap_end(start, bytes);
+ word len = end_addr - start_addr;
+ ptr_t result;
+
+ if (-1 == zero_descr) zero_descr = open("/dev/zero", O_RDWR);
+ if (0 == start_addr) return;
+ result = mmap(start_addr, len, PROT_READ | PROT_WRITE | OPT_PROT_EXEC,
+ MAP_FIXED | MAP_PRIVATE, zero_descr, 0);
+ if (result != start_addr) {
+ ABORT("mmap remapping failed");
+ }
+ GC_unmapped_bytes -= len;
+}
+
+/* Two adjacent blocks have already been unmapped and are about to */
+/* be merged. Unmap the whole block. This typically requires */
+/* that we unmap a small section in the middle that was not previously */
+/* unmapped due to alignment constraints. */
+void GC_unmap_gap(ptr_t start1, word bytes1, ptr_t start2, word bytes2)
+{
+ ptr_t start1_addr = GC_unmap_start(start1, bytes1);
+ ptr_t end1_addr = GC_unmap_end(start1, bytes1);
+ ptr_t start2_addr = GC_unmap_start(start2, bytes2);
+ ptr_t end2_addr = GC_unmap_end(start2, bytes2);
+ ptr_t start_addr = end1_addr;
+ ptr_t end_addr = start2_addr;
+ word len;
+ GC_ASSERT(start1 + bytes1 == start2);
+ if (0 == start1_addr) start_addr = GC_unmap_start(start1, bytes1 + bytes2);
+ if (0 == start2_addr) end_addr = GC_unmap_end(start1, bytes1 + bytes2);
+ if (0 == start_addr) return;
+ len = end_addr - start_addr;
+ if (len != 0 && munmap(start_addr, len) != 0) ABORT("munmap failed");
+ GC_unmapped_bytes += len;
+}
+
+#endif /* USE_MUNMAP */
+
/* Routine for pushing any additional roots. In THREADS */
/* environment, this is also responsible for marking from */
/* thread stacks. In the SRC_M3 case, it also handles */
@@ -1277,7 +1486,7 @@ void GC_default_push_other_roots()
# if defined(SOLARIS_THREADS) || defined(WIN32_THREADS) \
|| defined(IRIX_THREADS) || defined(LINUX_THREADS) \
- || defined(IRIX_PCR_THREADS)
+ || defined(IRIX_JDK_THREADS) || defined(HPUX_THREADS)
extern void GC_push_all_stacks();
@@ -1404,12 +1613,12 @@ struct hblk *h;
# include <sys/syscall.h>
# define PROTECT(addr, len) \
- if (mprotect((caddr_t)(addr), (int)(len), \
+ if (mprotect((caddr_t)(addr), (size_t)(len), \
PROT_READ | OPT_PROT_EXEC) < 0) { \
ABORT("mprotect failed"); \
}
# define UNPROTECT(addr, len) \
- if (mprotect((caddr_t)(addr), (int)(len), \
+ if (mprotect((caddr_t)(addr), (size_t)(len), \
PROT_WRITE | PROT_READ | OPT_PROT_EXEC ) < 0) { \
ABORT("un-mprotect failed"); \
}
@@ -1438,7 +1647,11 @@ struct hblk *h;
typedef void (* SIG_PF)();
#endif
#if defined(SUNOS5SIGS) || defined(OSF1) || defined(LINUX)
+# ifdef __STDC__
typedef void (* SIG_PF)(int);
+# else
+ typedef void (* SIG_PF)();
+# endif
#endif
#if defined(MSWIN32)
typedef LPTOP_LEVEL_EXCEPTION_FILTER SIG_PF;
@@ -1450,17 +1663,34 @@ struct hblk *h;
typedef void (* REAL_SIG_PF)(int, int, struct sigcontext *);
#endif
#if defined(SUNOS5SIGS)
- typedef void (* REAL_SIG_PF)(int, struct siginfo *, void *);
+# ifdef HPUX
+# define SIGINFO __siginfo
+# else
+# define SIGINFO siginfo
+# endif
+# ifdef __STDC__
+ typedef void (* REAL_SIG_PF)(int, struct SIGINFO *, void *);
+# else
+ typedef void (* REAL_SIG_PF)();
+# endif
#endif
#if defined(LINUX)
# include <linux/version.h>
-# if (LINUX_VERSION_CODE >= 0x20100) && !defined(M68K) || defined(ALPHA)
+# if (LINUX_VERSION_CODE >= 0x20100) && !defined(M68K) || defined(ALPHA) || defined(IA64)
typedef struct sigcontext s_c;
# else
typedef struct sigcontext_struct s_c;
# endif
+# if defined(ALPHA) || defined(M68K)
+ typedef void (* REAL_SIG_PF)(int, int, s_c *);
+# else
+# if defined(IA64)
+ typedef void (* REAL_SIG_PF)(int, siginfo_t *, s_c *);
+# else
+ typedef void (* REAL_SIG_PF)(int, s_c);
+# endif
+# endif
# ifdef ALPHA
- typedef void (* REAL_SIG_PF)(int, int, s_c *);
/* Retrieve fault address from sigcontext structure by decoding */
/* instruction. */
char * get_fault_addr(s_c *sc) {
@@ -1472,8 +1702,6 @@ struct hblk *h;
faultaddr += (word) (((int)instr << 16) >> 16);
return (char *)faultaddr;
}
-# else /* !ALPHA */
- typedef void (* REAL_SIG_PF)(int, s_c);
# endif /* !ALPHA */
# endif
@@ -1509,21 +1737,41 @@ SIG_PF GC_old_segv_handler; /* Also old MSWIN32 ACCESS_VIOLATION filter */
# endif
# endif
# if defined(LINUX)
-# ifdef ALPHA
+# if defined(ALPHA) || defined(M68K)
void GC_write_fault_handler(int sig, int code, s_c * sc)
# else
- void GC_write_fault_handler(int sig, s_c sc)
+# if defined(IA64)
+ void GC_write_fault_handler(int sig, siginfo_t * si, s_c * scp)
+# else
+ void GC_write_fault_handler(int sig, s_c sc)
+# endif
# endif
# define SIG_OK (sig == SIGSEGV)
# define CODE_OK TRUE
- /* Empirically c.trapno == 14, but is that useful? */
- /* We assume Intel architecture, so alignment */
- /* faults are not possible. */
+ /* Empirically c.trapno == 14, on IA32, but is that useful? */
+ /* Should probably consider alignment issues on other */
+ /* architectures. */
# endif
# if defined(SUNOS5SIGS)
- void GC_write_fault_handler(int sig, struct siginfo *scp, void * context)
-# define SIG_OK (sig == SIGSEGV)
-# define CODE_OK (scp -> si_code == SEGV_ACCERR)
+# ifdef __STDC__
+ void GC_write_fault_handler(int sig, struct SIGINFO *scp, void * context)
+# else
+ void GC_write_fault_handler(sig, scp, context)
+ int sig;
+ struct SIGINFO *scp;
+ void * context;
+# endif
+# ifdef HPUX
+# define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
+# define CODE_OK (scp -> si_code == SEGV_ACCERR) \
+ || (scp -> si_code == BUS_ADRERR) \
+ || (scp -> si_code == BUS_UNKNOWN) \
+ || (scp -> si_code == SEGV_UNKNOWN) \
+ || (scp -> si_code == BUS_OBJERR)
+# else
+# define SIG_OK (sig == SIGSEGV)
+# define CODE_OK (scp -> si_code == SEGV_ACCERR)
+# endif
# endif
# if defined(MSWIN32)
LONG WINAPI GC_write_fault_handler(struct _EXCEPTION_POINTERS *exc_info)
@@ -1575,7 +1823,15 @@ SIG_PF GC_old_segv_handler; /* Also old MSWIN32 ACCESS_VIOLATION filter */
# ifdef ALPHA
char * addr = get_fault_addr(sc);
# else
+# ifdef IA64
+ char * addr = si -> si_addr;
+# else
+# if defined(POWERPC)
+ char * addr = (char *) (sc.regs->dar);
+# else
--> architecture not supported
+# endif
+# endif
# endif
# endif
# endif
@@ -1628,10 +1884,14 @@ SIG_PF GC_old_segv_handler; /* Also old MSWIN32 ACCESS_VIOLATION filter */
return;
# endif
# if defined (LINUX)
-# ifdef ALPHA
+# if defined(ALPHA) || defined(M68K)
(*(REAL_SIG_PF)old_handler) (sig, code, sc);
# else
+# if defined(IA64)
+ (*(REAL_SIG_PF)old_handler) (sig, si, scp);
+# else
(*(REAL_SIG_PF)old_handler) (sig, sc);
+# endif
# endif
return;
# endif
@@ -1699,7 +1959,7 @@ struct hblk *h;
void GC_dirty_init()
{
-#if defined(SUNOS5SIGS) || defined(IRIX5)
+#if defined(SUNOS5SIGS) || defined(IRIX5) /* || defined(OSF1) */
struct sigaction act, oldact;
# ifdef IRIX5
act.sa_flags = SA_RESTART;
@@ -1743,7 +2003,7 @@ void GC_dirty_init()
}
# endif
# if defined(SUNOS5SIGS) || defined(IRIX5)
-# if defined(IRIX_THREADS) || defined(IRIX_PCR_THREADS)
+# if defined(IRIX_THREADS) || defined(IRIX_JDK_THREADS)
sigaction(SIGSEGV, 0, &oldact);
sigaction(SIGSEGV, &act, 0);
# else
@@ -1769,6 +2029,15 @@ void GC_dirty_init()
GC_err_printf0("Replaced other SIGSEGV handler\n");
# endif
}
+# ifdef HPUX
+ sigaction(SIGBUS, &act, &oldact);
+ GC_old_bus_handler = oldact.sa_handler;
+ if (GC_old_segv_handler != SIG_DFL) {
+# ifdef PRINTSTATS
+ GC_err_printf0("Replaced other SIGBUS handler\n");
+# endif
+ }
+# endif
# endif
# if defined(MSWIN32)
GC_old_segv_handler = SetUnhandledExceptionFilter(GC_write_fault_handler);
@@ -2241,7 +2510,11 @@ struct hblk *h;
# if defined (DRSNX)
# include <sys/sparc/frame.h>
# else
-# include <sys/frame.h>
+# if defined(OPENBSD)
+# include <frame.h>
+# else
+# include <sys/frame.h>
+# endif
# endif
# endif
# if NARGS > 6
@@ -2251,6 +2524,15 @@ struct hblk *h;
#ifdef SAVE_CALL_CHAIN
/* Fill in the pc and argument information for up to NFRAMES of my */
/* callers. Ignore my frame and my callers frame. */
+
+#ifdef OPENBSD
+# define FR_SAVFP fr_fp
+# define FR_SAVPC fr_pc
+#else
+# define FR_SAVFP fr_savfp
+# define FR_SAVPC fr_savpc
+#endif
+
void GC_save_callers (info)
struct callinfo info[NFRAMES];
{
@@ -2261,11 +2543,11 @@ struct callinfo info[NFRAMES];
frame = (struct frame *) GC_save_regs_in_stack ();
- for (fp = frame -> fr_savfp; fp != 0 && nframes < NFRAMES;
- fp = fp -> fr_savfp, nframes++) {
+ for (fp = frame -> FR_SAVFP; fp != 0 && nframes < NFRAMES;
+ fp = fp -> FR_SAVFP, nframes++) {
register int i;
- info[nframes].ci_pc = fp->fr_savpc;
+ info[nframes].ci_pc = fp->FR_SAVPC;
for (i = 0; i < NARGS; i++) {
info[nframes].ci_arg[i] = ~(fp->fr_arg[i]);
}
diff --git a/boehm-gc/reclaim.c b/boehm-gc/reclaim.c
index 407b4c68194..6e0f53bb058 100644
--- a/boehm-gc/reclaim.c
+++ b/boehm-gc/reclaim.c
@@ -1,6 +1,8 @@
/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
- * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
@@ -11,7 +13,6 @@
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
-/* Boehm, February 15, 1996 2:41 pm PST */
#include <stdio.h>
#include "gc_priv.h"
@@ -19,7 +20,6 @@
signed_word GC_mem_found = 0;
/* Number of words of memory reclaimed */
-# ifdef FIND_LEAK
static void report_leak(p, sz)
ptr_t p;
word sz;
@@ -39,13 +39,10 @@ word sz;
}
# define FOUND_FREE(hblk, word_no) \
- if (abort_if_found) { \
+ { \
report_leak((ptr_t)hblk + WORDS_TO_BYTES(word_no), \
HDR(hblk) -> hb_sz); \
}
-# else
-# define FOUND_FREE(hblk, word_no)
-# endif
/*
* reclaim phase
@@ -71,6 +68,139 @@ register hdr * hhdr;
return(TRUE);
}
+/* The following functions sometimes return a DONT_KNOW value. */
+#define DONT_KNOW 2
+
+#ifdef SMALL_CONFIG
+# define GC_block_nearly_full1(hhdr, pat1) DONT_KNOW
+# define GC_block_nearly_full3(hhdr, pat1, pat2) DONT_KNOW
+# define GC_block_nearly_full(hhdr) DONT_KNOW
+#else
+
+/*
+ * Test whether nearly all of the mark words consist of the same
+ * repeating pattern.
+ */
+#define FULL_THRESHOLD (MARK_BITS_SZ/16)
+
+GC_bool GC_block_nearly_full1(hhdr, pat1)
+hdr *hhdr;
+word pat1;
+{
+ unsigned i;
+ unsigned misses = 0;
+ GC_ASSERT((MARK_BITS_SZ & 1) == 0);
+ for (i = 0; i < MARK_BITS_SZ; ++i) {
+ if ((hhdr -> hb_marks[i] | ~pat1) != ONES) {
+ if (++misses > FULL_THRESHOLD) return FALSE;
+ }
+ }
+ return TRUE;
+}
+
+/*
+ * Test whether the same repeating 3 word pattern occurs in nearly
+ * all the mark bit slots.
+ * This is used as a heuristic, so we're a bit sloppy and ignore
+ * the last one or two words.
+ */
+GC_bool GC_block_nearly_full3(hhdr, pat1, pat2, pat3)
+hdr *hhdr;
+word pat1, pat2, pat3;
+{
+ unsigned i;
+ unsigned misses = 0;
+
+ if (MARK_BITS_SZ < 4) {
+ return DONT_KNOW;
+ }
+ for (i = 0; i < MARK_BITS_SZ - 2; i += 3) {
+ if ((hhdr -> hb_marks[i] | ~pat1) != ONES) {
+ if (++misses > FULL_THRESHOLD) return FALSE;
+ }
+ if ((hhdr -> hb_marks[i+1] | ~pat2) != ONES) {
+ if (++misses > FULL_THRESHOLD) return FALSE;
+ }
+ if ((hhdr -> hb_marks[i+2] | ~pat3) != ONES) {
+ if (++misses > FULL_THRESHOLD) return FALSE;
+ }
+ }
+ return TRUE;
+}
+
+/* Check whether a small object block is nearly full by looking at only */
+/* the mark bits. */
+/* We manually precomputed the mark bit patterns that need to be */
+/* checked for, and we give up on the ones that are unlikely to occur, */
+/* or have period > 3. */
+/* This would be a lot easier with a mark bit per object instead of per */
+/* word, but that would rewuire computing object numbers in the mark */
+/* loop, which would require different data structures ... */
+GC_bool GC_block_nearly_full(hhdr)
+hdr *hhdr;
+{
+ int sz = hhdr -> hb_sz;
+
+# if CPP_WORDSZ != 32 && CPP_WORDSZ != 64
+ return DONT_KNOW; /* Shouldn't be used in any standard config. */
+# endif
+ if (0 != HDR_WORDS) return DONT_KNOW;
+ /* Also shouldn't happen */
+# if CPP_WORDSZ == 32
+ switch(sz) {
+ case 1:
+ return GC_block_nearly_full1(hhdr, 0xffffffffl);
+ case 2:
+ return GC_block_nearly_full1(hhdr, 0x55555555l);
+ case 4:
+ return GC_block_nearly_full1(hhdr, 0x11111111l);
+ case 6:
+ return GC_block_nearly_full3(hhdr, 0x41041041l,
+ 0x10410410l,
+ 0x04104104l);
+ case 8:
+ return GC_block_nearly_full1(hhdr, 0x01010101l);
+ case 12:
+ return GC_block_nearly_full3(hhdr, 0x01001001l,
+ 0x10010010l,
+ 0x00100100l);
+ case 16:
+ return GC_block_nearly_full1(hhdr, 0x00010001l);
+ case 32:
+ return GC_block_nearly_full1(hhdr, 0x00000001l);
+ default:
+ return DONT_KNOW;
+ }
+# endif
+# if CPP_WORDSZ == 64
+ switch(sz) {
+ case 1:
+ return GC_block_nearly_full1(hhdr, 0xffffffffffffffffl);
+ case 2:
+ return GC_block_nearly_full1(hhdr, 0x5555555555555555l);
+ case 4:
+ return GC_block_nearly_full1(hhdr, 0x1111111111111111l);
+ case 6:
+ return GC_block_nearly_full3(hhdr, 0x1041041041041041l,
+ 0x4104104104104104l,
+ 0x0410410410410410l);
+ case 8:
+ return GC_block_nearly_full1(hhdr, 0x0101010101010101l);
+ case 12:
+ return GC_block_nearly_full3(hhdr, 0x1001001001001001l,
+ 0x0100100100100100l,
+ 0x0010010010010010l);
+ case 16:
+ return GC_block_nearly_full1(hhdr, 0x0001000100010001l);
+ case 32:
+ return GC_block_nearly_full1(hhdr, 0x0000000100000001l);
+ default:
+ return DONT_KNOW;
+ }
+# endif
+}
+#endif /* !SMALL_CONFIG */
+
# ifdef GATHERSTATS
# define INCR_WORDS(sz) n_words_found += (sz)
# else
@@ -82,10 +212,9 @@ register hdr * hhdr;
* Clears unmarked objects.
*/
/*ARGSUSED*/
-ptr_t GC_reclaim_clear(hbp, hhdr, sz, list, abort_if_found)
+ptr_t GC_reclaim_clear(hbp, hhdr, sz, list)
register struct hblk *hbp; /* ptr to current heap block */
register hdr * hhdr;
-GC_bool abort_if_found; /* Abort if a reclaimable object is found */
register ptr_t list;
register word sz;
{
@@ -105,7 +234,6 @@ register word sz;
if( mark_bit_from_hdr(hhdr, word_no) ) {
p += sz;
} else {
- FOUND_FREE(hbp, word_no);
INCR_WORDS(sz);
/* object is available - put on list */
obj_link(p) = list;
@@ -131,10 +259,9 @@ register word sz;
* A special case for 2 word composite objects (e.g. cons cells):
*/
/*ARGSUSED*/
-ptr_t GC_reclaim_clear2(hbp, hhdr, list, abort_if_found)
+ptr_t GC_reclaim_clear2(hbp, hhdr, list)
register struct hblk *hbp; /* ptr to current heap block */
hdr * hhdr;
-GC_bool abort_if_found; /* Abort if a reclaimable object is found */
register ptr_t list;
{
register word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
@@ -146,7 +273,6 @@ register ptr_t list;
register int i;
# define DO_OBJ(start_displ) \
if (!(mark_word & ((word)1 << start_displ))) { \
- FOUND_FREE(hbp, p - (word *)hbp + start_displ); \
p[start_displ] = (word)list; \
list = (ptr_t)(p+start_displ); \
p[start_displ+1] = 0; \
@@ -179,10 +305,9 @@ register ptr_t list;
* Another special case for 4 word composite objects:
*/
/*ARGSUSED*/
-ptr_t GC_reclaim_clear4(hbp, hhdr, list, abort_if_found)
+ptr_t GC_reclaim_clear4(hbp, hhdr, list)
register struct hblk *hbp; /* ptr to current heap block */
hdr * hhdr;
-GC_bool abort_if_found; /* Abort if a reclaimable object is found */
register ptr_t list;
{
register word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
@@ -193,7 +318,6 @@ register ptr_t list;
register word mark_word;
# define DO_OBJ(start_displ) \
if (!(mark_word & ((word)1 << start_displ))) { \
- FOUND_FREE(hbp, p - (word *)hbp + start_displ); \
p[start_displ] = (word)list; \
list = (ptr_t)(p+start_displ); \
p[start_displ+1] = 0; \
@@ -239,10 +363,9 @@ register ptr_t list;
/* The same thing, but don't clear objects: */
/*ARGSUSED*/
-ptr_t GC_reclaim_uninit(hbp, hhdr, sz, list, abort_if_found)
+ptr_t GC_reclaim_uninit(hbp, hhdr, sz, list)
register struct hblk *hbp; /* ptr to current heap block */
register hdr * hhdr;
-GC_bool abort_if_found; /* Abort if a reclaimable object is found */
register ptr_t list;
register word sz;
{
@@ -260,7 +383,6 @@ register word sz;
/* go through all words in block */
while( p <= plim ) {
if( !mark_bit_from_hdr(hhdr, word_no) ) {
- FOUND_FREE(hbp, word_no);
INCR_WORDS(sz);
/* object is available - put on list */
obj_link(p) = list;
@@ -275,15 +397,42 @@ register word sz;
return(list);
}
+/* Don't really reclaim objects, just check for unmarked ones: */
+/*ARGSUSED*/
+void GC_reclaim_check(hbp, hhdr, sz)
+register struct hblk *hbp; /* ptr to current heap block */
+register hdr * hhdr;
+register word sz;
+{
+ register int word_no;
+ register word *p, *plim;
+# ifdef GATHERSTATS
+ register int n_words_found = 0;
+# endif
+
+ p = (word *)(hbp->hb_body);
+ word_no = HDR_WORDS;
+ plim = (word *)((((word)hbp) + HBLKSIZE)
+ - WORDS_TO_BYTES(sz));
+
+ /* go through all words in block */
+ while( p <= plim ) {
+ if( !mark_bit_from_hdr(hhdr, word_no) ) {
+ FOUND_FREE(hbp, word_no);
+ }
+ p += sz;
+ word_no += sz;
+ }
+}
+
#ifndef SMALL_CONFIG
/*
* Another special case for 2 word atomic objects:
*/
/*ARGSUSED*/
-ptr_t GC_reclaim_uninit2(hbp, hhdr, list, abort_if_found)
+ptr_t GC_reclaim_uninit2(hbp, hhdr, list)
register struct hblk *hbp; /* ptr to current heap block */
hdr * hhdr;
-GC_bool abort_if_found; /* Abort if a reclaimable object is found */
register ptr_t list;
{
register word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
@@ -295,7 +444,6 @@ register ptr_t list;
register int i;
# define DO_OBJ(start_displ) \
if (!(mark_word & ((word)1 << start_displ))) { \
- FOUND_FREE(hbp, p - (word *)hbp + start_displ); \
p[start_displ] = (word)list; \
list = (ptr_t)(p+start_displ); \
INCR_WORDS(2); \
@@ -327,10 +475,9 @@ register ptr_t list;
* Another special case for 4 word atomic objects:
*/
/*ARGSUSED*/
-ptr_t GC_reclaim_uninit4(hbp, hhdr, list, abort_if_found)
+ptr_t GC_reclaim_uninit4(hbp, hhdr, list)
register struct hblk *hbp; /* ptr to current heap block */
hdr * hhdr;
-GC_bool abort_if_found; /* Abort if a reclaimable object is found */
register ptr_t list;
{
register word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
@@ -341,7 +488,6 @@ register ptr_t list;
register word mark_word;
# define DO_OBJ(start_displ) \
if (!(mark_word & ((word)1 << start_displ))) { \
- FOUND_FREE(hbp, p - (word *)hbp + start_displ); \
p[start_displ] = (word)list; \
list = (ptr_t)(p+start_displ); \
INCR_WORDS(4); \
@@ -382,10 +528,9 @@ register ptr_t list;
/* Finally the one word case, which never requires any clearing: */
/*ARGSUSED*/
-ptr_t GC_reclaim1(hbp, hhdr, list, abort_if_found)
+ptr_t GC_reclaim1(hbp, hhdr, list)
register struct hblk *hbp; /* ptr to current heap block */
hdr * hhdr;
-GC_bool abort_if_found; /* Abort if a reclaimable object is found */
register ptr_t list;
{
register word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
@@ -397,7 +542,6 @@ register ptr_t list;
register int i;
# define DO_OBJ(start_displ) \
if (!(mark_word & ((word)1 << start_displ))) { \
- FOUND_FREE(hbp, p - (word *)hbp + start_displ); \
p[start_displ] = (word)list; \
list = (ptr_t)(p+start_displ); \
INCR_WORDS(1); \
@@ -433,15 +577,16 @@ register ptr_t list;
* If entirely empty blocks are to be completely deallocated, then
* caller should perform that check.
*/
-void GC_reclaim_small_nonempty_block(hbp, abort_if_found)
+void GC_reclaim_small_nonempty_block(hbp, report_if_found)
register struct hblk *hbp; /* ptr to current heap block */
-int abort_if_found; /* Abort if a reclaimable object is found */
+int report_if_found; /* Abort if a reclaimable object is found */
{
hdr * hhdr;
- register word sz; /* size of objects in current block */
- register struct obj_kind * ok;
- register ptr_t * flh;
- register int kind;
+ word sz; /* size of objects in current block */
+ struct obj_kind * ok;
+ ptr_t * flh;
+ int kind;
+ GC_bool full;
hhdr = HDR(hbp);
sz = hhdr -> hb_sz;
@@ -449,43 +594,70 @@ int abort_if_found; /* Abort if a reclaimable object is found */
kind = hhdr -> hb_obj_kind;
ok = &GC_obj_kinds[kind];
flh = &(ok -> ok_freelist[sz]);
- GC_write_hint(hbp);
- if (ok -> ok_init) {
+ if (report_if_found) {
+ GC_reclaim_check(hbp, hhdr, sz);
+ } else if (ok -> ok_init) {
switch(sz) {
# ifndef SMALL_CONFIG
case 1:
- *flh = GC_reclaim1(hbp, hhdr, *flh, abort_if_found);
+ full = GC_block_nearly_full1(hhdr, 0xffffffffl);
+ if (TRUE == full) goto out;
+ if (FALSE == full) GC_write_hint(hbp);
+ /* In the DONT_KNOW case, we let reclaim fault. */
+ *flh = GC_reclaim1(hbp, hhdr, *flh);
break;
case 2:
- *flh = GC_reclaim_clear2(hbp, hhdr, *flh, abort_if_found);
+ full = GC_block_nearly_full1(hhdr, 0x55555555l);
+ if (TRUE == full) goto out;
+ if (FALSE == full) GC_write_hint(hbp);
+ *flh = GC_reclaim_clear2(hbp, hhdr, *flh);
break;
case 4:
- *flh = GC_reclaim_clear4(hbp, hhdr, *flh, abort_if_found);
+ full = GC_block_nearly_full1(hhdr, 0x11111111l);
+ if (TRUE == full) goto out;
+ if (FALSE == full) GC_write_hint(hbp);
+ *flh = GC_reclaim_clear4(hbp, hhdr, *flh);
break;
# endif
default:
- *flh = GC_reclaim_clear(hbp, hhdr, sz, *flh, abort_if_found);
+ full = GC_block_nearly_full(hhdr);
+ if (TRUE == full) goto out;
+ if (FALSE == full) GC_write_hint(hbp);
+ *flh = GC_reclaim_clear(hbp, hhdr, sz, *flh);
break;
}
} else {
switch(sz) {
# ifndef SMALL_CONFIG
case 1:
- *flh = GC_reclaim1(hbp, hhdr, *flh, abort_if_found);
+ full = GC_block_nearly_full1(hhdr, 0xffffffffl);
+ if (TRUE == full) goto out;
+ if (FALSE == full) GC_write_hint(hbp);
+ *flh = GC_reclaim1(hbp, hhdr, *flh);
break;
case 2:
- *flh = GC_reclaim_uninit2(hbp, hhdr, *flh, abort_if_found);
+ full = GC_block_nearly_full1(hhdr, 0x55555555l);
+ if (TRUE == full) goto out;
+ if (FALSE == full) GC_write_hint(hbp);
+ *flh = GC_reclaim_uninit2(hbp, hhdr, *flh);
break;
case 4:
- *flh = GC_reclaim_uninit4(hbp, hhdr, *flh, abort_if_found);
+ full = GC_block_nearly_full1(hhdr, 0x11111111l);
+ if (TRUE == full) goto out;
+ if (FALSE == full) GC_write_hint(hbp);
+ *flh = GC_reclaim_uninit4(hbp, hhdr, *flh);
break;
# endif
default:
- *flh = GC_reclaim_uninit(hbp, hhdr, sz, *flh, abort_if_found);
+ full = GC_block_nearly_full(hhdr);
+ if (TRUE == full) goto out;
+ if (FALSE == full) GC_write_hint(hbp);
+ *flh = GC_reclaim_uninit(hbp, hhdr, sz, *flh);
break;
}
}
+out:
if (IS_UNCOLLECTABLE(kind)) GC_set_hdr_marks(hhdr);
}
@@ -494,11 +666,12 @@ int abort_if_found; /* Abort if a reclaimable object is found */
* to the heap block free list.
* Otherwise enqueue the block for later processing
* by GC_reclaim_small_nonempty_block.
- * If abort_if_found is TRUE, then process any block immediately.
+ * If report_if_found is TRUE, then process any block immediately, and
+ * simply report free objects; do not actually reclaim them.
*/
-void GC_reclaim_block(hbp, abort_if_found)
+void GC_reclaim_block(hbp, report_if_found)
register struct hblk *hbp; /* ptr to current heap block */
-word abort_if_found; /* Abort if a reclaimable object is found */
+word report_if_found; /* Abort if a reclaimable object is found */
{
register hdr * hhdr;
register word sz; /* size of objects in current block */
@@ -511,16 +684,19 @@ word abort_if_found; /* Abort if a reclaimable object is found */
if( sz > MAXOBJSZ ) { /* 1 big object */
if( !mark_bit_from_hdr(hhdr, HDR_WORDS) ) {
- FOUND_FREE(hbp, HDR_WORDS);
-# ifdef GATHERSTATS
+ if (report_if_found) {
+ FOUND_FREE(hbp, HDR_WORDS);
+ } else {
+# ifdef GATHERSTATS
GC_mem_found += sz;
-# endif
- GC_freehblk(hbp);
+# endif
+ GC_freehblk(hbp);
+ }
}
} else {
GC_bool empty = GC_block_empty(hhdr);
- if (abort_if_found) {
- GC_reclaim_small_nonempty_block(hbp, (int)abort_if_found);
+ if (report_if_found) {
+ GC_reclaim_small_nonempty_block(hbp, (int)report_if_found);
} else if (empty) {
# ifdef GATHERSTATS
GC_mem_found += BYTES_TO_WORDS(HBLKSIZE);
@@ -600,11 +776,11 @@ void GC_print_block_list()
#endif /* NO_DEBUGGING */
/*
- * Do the same thing on the entire heap, after first clearing small object
- * free lists (if we are not just looking for leaks).
+ * Perform GC_reclaim_block on the entire heap, after first clearing
+ * small object free lists (if we are not just looking for leaks).
*/
-void GC_start_reclaim(abort_if_found)
-int abort_if_found; /* Abort if a GC_reclaimable object is found */
+void GC_start_reclaim(report_if_found)
+int report_if_found; /* Abort if a GC_reclaimable object is found */
{
int kind;
@@ -617,7 +793,7 @@ int abort_if_found; /* Abort if a GC_reclaimable object is found */
register struct hblk ** rlist = GC_obj_kinds[kind].ok_reclaim_list;
if (rlist == 0) continue; /* This kind not used. */
- if (!abort_if_found) {
+ if (!report_if_found) {
lim = &(GC_obj_kinds[kind].ok_freelist[MAXOBJSZ+1]);
for( fop = GC_obj_kinds[kind].ok_freelist; fop < lim; fop++ ) {
*fop = 0;
@@ -637,7 +813,7 @@ int abort_if_found; /* Abort if a GC_reclaimable object is found */
/* Go through all heap blocks (in hblklist) and reclaim unmarked objects */
/* or enqueue the block for later processing. */
- GC_apply_to_all_blocks(GC_reclaim_block, (word)abort_if_found);
+ GC_apply_to_all_blocks(GC_reclaim_block, (word)report_if_found);
}
diff --git a/boehm-gc/solaris_threads.c b/boehm-gc/solaris_threads.c
index 1f5ebcdc390..65b2c6517b1 100644
--- a/boehm-gc/solaris_threads.c
+++ b/boehm-gc/solaris_threads.c
@@ -616,6 +616,25 @@ GC_thread GC_lookup_thread(thread_t id)
return(p);
}
+# define MAX_ORIG_STACK_SIZE (8 * 1024 * 1024)
+
+word GC_get_orig_stack_size() {
+ struct rlimit rl;
+ static int warned = 0;
+ int result;
+
+ if (getrlimit(RLIMIT_STACK, &rl) != 0) ABORT("getrlimit failed");
+ result = (word)rl.rlim_cur & ~(HBLKSIZE-1);
+ if (result > MAX_ORIG_STACK_SIZE) {
+ if (!warned) {
+ WARN("Large stack limit(%ld): only scanning 8 MB", result);
+ warned = 1;
+ }
+ result = MAX_ORIG_STACK_SIZE;
+ }
+ return result;
+}
+
/* Notify dirty bit implementation of unused parts of my stack. */
/* Caller holds allocation lock. */
void GC_my_stack_limits()
@@ -628,12 +647,9 @@ void GC_my_stack_limits()
if (stack_size == 0) {
/* original thread */
- struct rlimit rl;
-
- if (getrlimit(RLIMIT_STACK, &rl) != 0) ABORT("getrlimit failed");
/* Empirically, what should be the stack page with lowest */
/* address is actually inaccessible. */
- stack_size = ((word)rl.rlim_cur & ~(HBLKSIZE-1)) - GC_page_sz;
+ stack_size = GC_get_orig_stack_size() - GC_page_sz;
stack = GC_stackbottom - stack_size + GC_page_sz;
} else {
stack = me -> stack;
@@ -671,8 +687,7 @@ void GC_push_all_stacks()
top = p -> stack + p -> stack_size;
} else {
/* The original stack. */
- if (getrlimit(RLIMIT_STACK, &rl) != 0) ABORT("getrlimit failed");
- bottom = GC_stackbottom - rl.rlim_cur + GC_page_sz;
+ bottom = GC_stackbottom - GC_get_orig_stack_size() + GC_page_sz;
top = GC_stackbottom;
}
if ((word)sp > (word)bottom && (word)sp < (word)top) bottom = sp;
diff --git a/boehm-gc/sparc_sunos4_mach_dep.s b/boehm-gc/sparc_sunos4_mach_dep.s
index 7accadd3dfb..41858073ef9 100644
--- a/boehm-gc/sparc_sunos4_mach_dep.s
+++ b/boehm-gc/sparc_sunos4_mach_dep.s
@@ -1,4 +1,4 @@
-! SPARCompiler 3.0 and later apparently no loner handles
+! SPARCompiler 3.0 and later apparently no longer handles
! asm outside functions. So we need a separate .s file
! This is only set up for SunOS 4.
! Assumes this is called before the stack contents are
diff --git a/boehm-gc/threadlibs.c b/boehm-gc/threadlibs.c
index 4a0a6cfcd42..df4eb77bb1d 100644
--- a/boehm-gc/threadlibs.c
+++ b/boehm-gc/threadlibs.c
@@ -6,6 +6,9 @@ int main()
# if defined(IRIX_THREADS) || defined(LINUX_THREADS)
printf("-lpthread\n");
# endif
+# if defined(HPUX_THREADS)
+ printf("-lpthread -lrt\n");
+# endif
# ifdef SOLARIS_THREADS
printf("-lthread -ldl\n");
# endif
diff --git a/boehm-gc/version.h b/boehm-gc/version.h
index 88858fa4965..df0770c9b04 100644
--- a/boehm-gc/version.h
+++ b/boehm-gc/version.h
@@ -1,6 +1,6 @@
-#define GC_VERSION_MAJOR 4
-#define GC_VERSION_MINOR 14
-#define GC_ALPHA_VERSION GC_NOT_ALPHA
+#define GC_VERSION_MAJOR 5
+#define GC_VERSION_MINOR 0
+#define GC_ALPHA_VERSION 4
# define GC_NOT_ALPHA 0xff