Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

libgc: update libgc to commit 6d372272 #21822

Merged
merged 6 commits into from
Aug 4, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9,035 changes: 4,616 additions & 4,419 deletions thirdparty/libgc/gc.c

Large diffs are not rendered by default.

4 changes: 4 additions & 0 deletions thirdparty/libgc/include/gc.h
Original file line number Diff line number Diff line change
@@ -1,2 +1,6 @@
/* This file is installed for backward compatibility. */
#include "gc/gc.h"

__attribute__ ((weak)) GC_API void GC_CALL GC_noop1_ptr(volatile void *p) {
GC_noop1((u64)p);
}
174 changes: 119 additions & 55 deletions thirdparty/libgc/include/gc/gc.h

Large diffs are not rendered by default.

28 changes: 19 additions & 9 deletions thirdparty/libgc/include/gc/gc_config_macros.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,18 @@
#endif
#define GC_UNSIGNEDWORD unsigned GC_SIGNEDWORD

/* Size of a pointer in bytes. */
#if defined(__SIZEOF_POINTER__)
# define GC_SIZEOF_PTR __SIZEOF_POINTER__
#elif defined(__LP64__) || defined (_LP64) || defined(_WIN64) \
|| defined(__alpha__) || defined(__arch64__) \
|| defined(__powerpc64__) || defined(__s390x__) \
|| (defined(__x86_64__) && !defined(__ILP32__))
# define GC_SIZEOF_PTR 8
#else
# define GC_SIZEOF_PTR 4
#endif

/* The return type of GC_get_version(). A 32-bit unsigned integer */
/* or longer. */
# define GC_VERSION_VAL_T unsigned
Expand Down Expand Up @@ -330,7 +342,7 @@

#if defined(__sgi) && !defined(__GNUC__) && _COMPILER_VERSION >= 720
# define GC_ADD_CALLER
# define GC_RETURN_ADDR (GC_word)__return_address
# define GC_RETURN_ADDR (GC_return_addr_t)__return_address
#endif

#if defined(__linux__) || defined(__GLIBC__)
Expand All @@ -356,11 +368,8 @@
# define GC_HAVE_BUILTIN_BACKTRACE
#endif

#if defined(GC_HAVE_BUILTIN_BACKTRACE) && !defined(GC_CAN_SAVE_CALL_STACKS)
# define GC_CAN_SAVE_CALL_STACKS
#endif

#if defined(__sparc__)
#if defined(GC_HAVE_BUILTIN_BACKTRACE) && !defined(GC_CAN_SAVE_CALL_STACKS) \
|| defined(__sparc__)
# define GC_CAN_SAVE_CALL_STACKS
#endif

Expand All @@ -380,19 +389,20 @@
# if GC_GNUC_PREREQ(2, 95)
/* gcc knows how to retrieve return address, but we don't know */
/* how to generate call stacks. */
# define GC_RETURN_ADDR (GC_word)__builtin_return_address(0)
# define GC_RETURN_ADDR (GC_return_addr_t)__builtin_return_address(0)
# if GC_GNUC_PREREQ(4, 0) && (defined(__i386__) || defined(__amd64__) \
|| defined(__x86_64__) /* and probably others... */) \
&& !defined(GC_NO_RETURN_ADDR_PARENT)
# define GC_HAVE_RETURN_ADDR_PARENT
# define GC_RETURN_ADDR_PARENT \
(GC_word)__builtin_extract_return_addr(__builtin_return_address(1))
(GC_return_addr_t)__builtin_extract_return_addr( \
__builtin_return_address(1))
/* Note: a compiler might complain that calling */
/* __builtin_return_address with a nonzero argument is unsafe. */
# endif
# else
/* Just pass 0 for gcc compatibility. */
# define GC_RETURN_ADDR 0
# define GC_RETURN_ADDR ((GC_return_addr_t)0)
# endif
#endif /* !GC_CAN_SAVE_CALL_STACKS */

Expand Down
14 changes: 6 additions & 8 deletions thirdparty/libgc/include/gc/gc_gcj.h
Original file line number Diff line number Diff line change
Expand Up @@ -72,22 +72,20 @@ GC_API GC_ATTR_DEPRECATED void GC_CALL GC_init_gcj_malloc(int /* mp_index */,
/* object if GC_malloc() would. In case of out of memory, GC_oom_fn() */
/* is called and its result is returned. */
GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
GC_gcj_malloc(size_t /* lb */,
void * /* ptr_to_struct_containing_descr */);
GC_gcj_malloc(size_t /* lb */, const void * /* vtable_ptr */);

/* The debug versions allocate such that the specified mark proc */
/* is always invoked. */
/* Similar to GC_gcj_malloc, but add the debug info. This is allocated */
/* with GC_gcj_debug_kind. */
GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
GC_debug_gcj_malloc(size_t /* lb */,
void * /* ptr_to_struct_containing_descr */,
GC_debug_gcj_malloc(size_t /* lb */, const void * /* vtable_ptr */,
GC_EXTRA_PARAMS);

/* Similar to GC_gcj_malloc, but assumes that a pointer to near the */
/* beginning (i.e. within the first heap block) of the allocated object */
/* is always maintained. */
GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
GC_gcj_malloc_ignore_off_page(size_t /* lb */,
void * /* ptr_to_struct_containing_descr */);
const void * /* vtable_ptr */);

/* The kind numbers of normal and debug gcj objects. */
/* Useful only for debug support, we hope. */
Expand All @@ -97,7 +95,7 @@ GC_API int GC_gcj_debug_kind;

#ifdef GC_DEBUG
# define GC_GCJ_MALLOC(s,d) GC_debug_gcj_malloc(s,d,GC_EXTRAS)
# define GC_GCJ_MALLOC_IGNORE_OFF_PAGE(s,d) GC_debug_gcj_malloc(s,d,GC_EXTRAS)
# define GC_GCJ_MALLOC_IGNORE_OFF_PAGE(s,d) GC_GCJ_MALLOC(s,d)
#else
# define GC_GCJ_MALLOC(s,d) GC_gcj_malloc(s,d)
# define GC_GCJ_MALLOC_IGNORE_OFF_PAGE(s,d) GC_gcj_malloc_ignore_off_page(s,d)
Expand Down
158 changes: 90 additions & 68 deletions thirdparty/libgc/include/gc/gc_inline.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,17 +19,17 @@

/* WARNING: */
/* Note that for these routines, it is the clients responsibility to */
/* add the extra byte at the end to deal with one-past-the-end pointers.*/
/* In the standard collector configuration, the collector assumes that */
/* such a byte has been added, and hence does not trace the last word */
/* in the resulting object. */
/* This is not an issue if the collector is compiled with */
/* DONT_ADD_BYTE_AT_END, or if GC_all_interior_pointers is not set. */
/* This interface is most useful for compilers that generate C. */
/* It is also used internally for thread-local allocation. */
/* Manual use is hereby discouraged. */
/* Clients should include atomic_ops.h (or similar) before this header. */
/* There is no debugging version of this allocation API. */
/* add the extra byte at the end to deal with one-past-the-end */
/* pointers. In the standard collector configuration, the collector */
/* assumes that such a byte has been added, and hence does not trace */
/* the last "pointer-sized" word in the resulting object. This is not */
/* an issue if GC_get_all_interior_pointers() returns 0 or */
/* if GC_get_dont_add_byte_at_end() returns 1. */
/* This interface is most useful for compilers that generate C. It is */
/* also used internally for thread-local allocation. A manual use is */
/* hereby discouraged. Clients should include atomic_ops.h (or */
/* similar) before this header. There is no debugging version of this */
/* allocation API. */

#include "gc.h"
#include "gc_tiny_fl.h"
Expand Down Expand Up @@ -73,11 +73,31 @@
#define GC_I_PTRFREE 0
#define GC_I_NORMAL 1

/* Store a pointer to a list of newly allocated objects of kind k and */
/* size lb in *result. The caller must make sure that *result is */
/* traced even if objects are ptrfree. */
GC_API void GC_CALL GC_generic_malloc_many(size_t /* lb */, int /* k */,
void ** /* result */);
/* Determine if the collector has been configured not to pad the */
/* allocated objects even in the all-interior-pointers mode. */
/* Meaningful only if GC_get_all_interior_pointers() returns 1. */
GC_API int GC_CALL GC_get_dont_add_byte_at_end(void);

/* Return a list of one or more objects of the indicated size, linked */
/* through the first pointer in each object. This has the advantage */
/* that it acquires the allocator lock only once, and may greatly */
/* reduce time wasted contending for the allocator lock. Typical usage */
/* would be in a thread that requires many items of the same size. */
/* It would keep its own free list in a thread-local storage, and call */
/* GC_malloc_many or friends to replenish it. (We do not round up */
/* object sizes, since a call indicates the intention to consume many */
/* objects of exactly this size.) We assume that the size is non-zero */
/* and a multiple of GC_GRANULE_BYTES, and that the size already */
/* includes the value returned by GC_get_all_interior_pointers() */
/* (unless GC_get_dont_add_byte_at_end() returns a non-zero value). */
/* We return the free-list by assigning it to (*result), since it is */
/* not safe to return, e.g. a linked list of pointer-free objects, */
/* since the collector would not retain the entire list if it were */
/* invoked just as we were returning; the client must make sure that */
/* (*result) is traced even if objects are pointer-free. Note also */
/* that the client should usually clear the link field. */
GC_API void GC_CALL GC_generic_malloc_many(size_t /* lb_adjusted */,
int /* k */, void ** /* result */);

/* Generalized version of GC_malloc and GC_malloc_atomic. */
/* Uses appropriately the thread-local (if available) or the global */
Expand All @@ -93,42 +113,47 @@ GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
# define GC_malloc_kind_global GC_malloc_kind
#endif

/* An internal macro to update the free list pointer atomically (if */
/* An internal macro to update the free-list pointer atomically (if */
/* the AO primitives are available) to avoid race with the marker. */
#if defined(GC_THREADS) && defined(AO_HAVE_store)
#if !defined(GC_THREADS) || !defined(AO_HAVE_store)
# define GC_FAST_M_AO_STORE(my_fl, next) (void)(*(my_fl) = (next))
#elif defined(__SIZEOF_POINTER__) && (__SIZEOF_POINTER__ > __SIZEOF_SIZE_T__)
/* Directly use the GCC atomic intrinsic as the size of a pointer is */
/* bigger than that of AO_t. */
# define GC_FAST_M_AO_STORE(my_fl, next) \
AO_store((volatile AO_t *)(my_fl), (AO_t)(next))
__atomic_store_n(my_fl, next, __ATOMIC_RELAXED)
#else
# define GC_FAST_M_AO_STORE(my_fl, next) (void)(*(my_fl) = (next))
# define GC_FAST_M_AO_STORE(my_fl, next) \
AO_store((volatile AO_t *)(my_fl), (size_t)(next))
#endif

/* The ultimately general inline allocation macro. Allocate an object */
/* of size granules, putting the resulting pointer in result. Tiny_fl */
/* is a "tiny" free list array, which will be used first, if the size */
/* is appropriate. If granules argument is too large, we allocate with */
/* default_expr instead. If we need to refill the free list, we use */
/* GC_generic_malloc_many with the indicated kind. */
/* of size lg (in granules), putting the resulting pointer in result. */
/* Tiny_fl is a "tiny" free-list array, which will be used first, if */
/* the size is appropriate. If lg argument is too large, we allocate */
/* with default_expr instead. If we need to refill the free list, we */
/* use GC_generic_malloc_many with the indicated kind. */
/* Tiny_fl should be an array of GC_TINY_FREELISTS void * pointers. */
/* If num_direct is nonzero, and the individual free list pointers */
/* If num_direct is nonzero, and the individual free-list pointers */
/* are initialized to (void *)1, then we allocate num_direct granules */
/* directly using generic_malloc before putting multiple objects into */
/* the tiny_fl entry. If num_direct is zero, then the free lists may */
/* also be initialized to (void *)0. */
/* also be initialized to NULL. */
/* Note that we use the zeroth free list to hold objects 1 granule in */
/* size that are used to satisfy size 0 allocation requests. */
/* We rely on much of this hopefully getting optimized away in the */
/* case of num_direct is 0. Particularly, if granules argument is */
/* constant, this should generate a small amount of code. */
# define GC_FAST_MALLOC_GRANS(result, granules, tiny_fl, num_direct, \
kind, default_expr, init) \
/* case of num_direct is 0. Particularly, if lg argument is constant, */
/* this should generate a small amount of code. */
#define GC_FAST_MALLOC_GRANS(result, lg, tiny_fl, num_direct, k, \
default_expr, init) \
do { \
if (GC_EXPECT((granules) >= GC_TINY_FREELISTS, 0)) { \
if (GC_EXPECT((lg) >= GC_TINY_FREELISTS, 0)) { \
result = (default_expr); \
} else { \
void **my_fl = (tiny_fl) + (granules); \
void **my_fl = (tiny_fl) + (lg); \
void *my_entry = *my_fl; \
void *next; \
\
\
for (;;) { \
if (GC_EXPECT((GC_word)my_entry \
> (num_direct) + GC_TINY_FREELISTS + 1, 1)) { \
Expand All @@ -137,65 +162,62 @@ GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
GC_FAST_M_AO_STORE(my_fl, next); \
init; \
GC_PREFETCH_FOR_WRITE(next); \
if ((kind) != GC_I_PTRFREE) { \
if ((k) != GC_I_PTRFREE) { \
GC_end_stubborn_change(my_fl); \
GC_reachable_here(next); \
} \
GC_ASSERT(GC_size(result) >= (granules)*GC_GRANULE_BYTES); \
GC_ASSERT((kind) == GC_I_PTRFREE \
|| ((GC_word *)result)[1] == 0); \
GC_ASSERT(GC_size(result) >= (lg) * GC_GRANULE_BYTES); \
GC_ASSERT((k) == GC_I_PTRFREE \
|| 0 /* NULL */ == ((void **)result)[1]); \
break; \
} \
/* Entry contains counter or NULL */ \
if ((GC_signed_word)my_entry - (GC_signed_word)(num_direct) <= 0 \
/* (GC_word)my_entry <= (num_direct) */ \
&& my_entry != 0 /* NULL */) { \
/* Small counter value, not NULL */ \
GC_FAST_M_AO_STORE(my_fl, (char *)my_entry \
+ (granules) + 1); \
GC_FAST_M_AO_STORE(my_fl, (char *)my_entry + (lg) + 1); \
result = (default_expr); \
break; \
} else { \
/* Large counter or NULL */ \
GC_generic_malloc_many((granules) == 0 ? GC_GRANULE_BYTES : \
GC_RAW_BYTES_FROM_INDEX(granules), \
kind, my_fl); \
GC_generic_malloc_many(0 == (lg) ? GC_GRANULE_BYTES \
: GC_RAW_BYTES_FROM_INDEX(lg), \
k, my_fl); \
my_entry = *my_fl; \
if (my_entry == 0) { \
result = (*GC_get_oom_fn())((granules)*GC_GRANULE_BYTES); \
result = (*GC_get_oom_fn())((lg) * GC_GRANULE_BYTES); \
break; \
} \
} \
} \
} \
} while (0)

# define GC_WORDS_TO_WHOLE_GRANULES(n) \
GC_WORDS_TO_GRANULES((n) + GC_GRANULE_WORDS - 1)

/* Allocate n words (not bytes). The pointer is stored to result. */
/* Note: this should really only be used if GC_all_interior_pointers is */
/* not set, or DONT_ADD_BYTE_AT_END is set; see above. */
/* Does not acquire the allocator lock. The caller is responsible for */
/* supplying a cleared tiny_fl free list array. For single-threaded */
/* applications, this may be a global array. */
# define GC_MALLOC_WORDS_KIND(result, n, tiny_fl, kind, init) \
/* Allocate n "pointer-sized" words. The allocation size is */
/* rounded up to a granule size. The pointer is stored to result. */
/* Should not be used unless GC_get_all_interior_pointers() returns 0 */
/* or if GC_get_dont_add_byte_at_end() returns 1. Does not acquire the */
/* allocator lock. The caller is responsible for supplying a cleared */
/* tiny_fl free-list array. For single-threaded applications, this may */
/* be a global array. */
#define GC_MALLOC_WORDS_KIND(result, n, tiny_fl, k, init) \
do { \
size_t granules = GC_WORDS_TO_WHOLE_GRANULES(n); \
GC_FAST_MALLOC_GRANS(result, granules, tiny_fl, 0, kind, \
GC_malloc_kind(granules*GC_GRANULE_BYTES, kind), \
init); \
size_t lg = GC_PTRS_TO_WHOLE_GRANULES(n); \
\
GC_FAST_MALLOC_GRANS(result, lg, tiny_fl, 0 /* num_direct */, k, \
GC_malloc_kind(lg * GC_GRANULE_BYTES, k), init); \
} while (0)

# define GC_MALLOC_WORDS(result, n, tiny_fl) \
#define GC_MALLOC_WORDS(result, n, tiny_fl) \
GC_MALLOC_WORDS_KIND(result, n, tiny_fl, GC_I_NORMAL, \
*(void **)(result) = 0)
(void)(*(void **)(result) = 0 /* NULL */))

# define GC_MALLOC_ATOMIC_WORDS(result, n, tiny_fl) \
#define GC_MALLOC_ATOMIC_WORDS(result, n, tiny_fl) \
GC_MALLOC_WORDS_KIND(result, n, tiny_fl, GC_I_PTRFREE, (void)0)

/* And once more for two word initialized objects: */
# define GC_CONS(result, first, second, tiny_fl) \
/* And one more for two-pointer initialized objects: */
#define GC_CONS(result, first, second, tiny_fl) \
do { \
void *l = (void *)(first); \
void *r = (void *)(second); \
Expand All @@ -207,11 +229,11 @@ GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
} \
} while (0)

/* Print address of each object in the free list. The caller should */
/* hold the allocator lock at least in the reader mode. Defined only */
/* if the library has been compiled without NO_DEBUGGING. */
GC_API void GC_CALL GC_print_free_list(int /* kind */,
size_t /* sz_in_granules */);
/* Print address of each object in the free list for the given kind and */
/* size (in granules). The caller should hold the allocator lock at */
/* least in the reader mode. Defined only if the library has been */
/* compiled without NO_DEBUGGING. */
GC_API void GC_CALL GC_print_free_list(int /* k */, size_t /* lg */);

#ifdef __cplusplus
} /* extern "C" */
Expand Down
Loading
Loading