2013-09-12 Sebastian Huber <sebastian.huber@embedded-brains.de>

* libc/include/sys/cdefs.h: Synchronize with latest FreeBSD
        version.
        * libc/include/stdatomic.h: Likewise.
This commit is contained in:
Jeff Johnston 2013-09-12 22:05:43 +00:00
parent 45070312d4
commit 44d6d53977
3 changed files with 205 additions and 80 deletions

View File

@ -1,3 +1,9 @@
2013-09-12 Sebastian Huber <sebastian.huber@embedded-brains.de>
* libc/include/sys/cdefs.h: Synchronize with latest FreeBSD
version.
* libc/include/stdatomic.h: Likewise.
2013-09-08 Yaakov Selkowitz <yselkowitz@users.sourceforge.net> 2013-09-08 Yaakov Selkowitz <yselkowitz@users.sourceforge.net>
* libc/include/search.h (__compar_fn_t): Add typedef. * libc/include/search.h (__compar_fn_t): Add typedef.

View File

@ -33,16 +33,49 @@
#include <sys/cdefs.h> #include <sys/cdefs.h>
#include <sys/_types.h> #include <sys/_types.h>
#if __has_feature(cxx_atomic) #if __has_extension(c_atomic) || __has_extension(cxx_atomic)
#define __CLANG_ATOMICS #define __CLANG_ATOMICS
#elif __GNUC_PREREQ__(4, 7) #elif __GNUC_PREREQ__(4, 7)
#define __GNUC_ATOMICS #define __GNUC_ATOMICS
#elif !defined(__GNUC__) #elif defined(__GNUC__)
#define __SYNC_ATOMICS
#else
#error "stdatomic.h does not support your compiler" #error "stdatomic.h does not support your compiler"
#endif #endif
#if !defined(__CLANG_ATOMICS) /*
#define _Atomic(T) struct { volatile T __val; } * 7.17.1 Atomic lock-free macros.
*/
#ifdef __GCC_ATOMIC_BOOL_LOCK_FREE
#define ATOMIC_BOOL_LOCK_FREE __GCC_ATOMIC_BOOL_LOCK_FREE
#endif
#ifdef __GCC_ATOMIC_CHAR_LOCK_FREE
#define ATOMIC_CHAR_LOCK_FREE __GCC_ATOMIC_CHAR_LOCK_FREE
#endif
#ifdef __GCC_ATOMIC_CHAR16_T_LOCK_FREE
#define ATOMIC_CHAR16_T_LOCK_FREE __GCC_ATOMIC_CHAR16_T_LOCK_FREE
#endif
#ifdef __GCC_ATOMIC_CHAR32_T_LOCK_FREE
#define ATOMIC_CHAR32_T_LOCK_FREE __GCC_ATOMIC_CHAR32_T_LOCK_FREE
#endif
#ifdef __GCC_ATOMIC_WCHAR_T_LOCK_FREE
#define ATOMIC_WCHAR_T_LOCK_FREE __GCC_ATOMIC_WCHAR_T_LOCK_FREE
#endif
#ifdef __GCC_ATOMIC_SHORT_LOCK_FREE
#define ATOMIC_SHORT_LOCK_FREE __GCC_ATOMIC_SHORT_LOCK_FREE
#endif
#ifdef __GCC_ATOMIC_INT_LOCK_FREE
#define ATOMIC_INT_LOCK_FREE __GCC_ATOMIC_INT_LOCK_FREE
#endif
#ifdef __GCC_ATOMIC_LONG_LOCK_FREE
#define ATOMIC_LONG_LOCK_FREE __GCC_ATOMIC_LONG_LOCK_FREE
#endif
#ifdef __GCC_ATOMIC_LLONG_LOCK_FREE
#define ATOMIC_LLONG_LOCK_FREE __GCC_ATOMIC_LLONG_LOCK_FREE
#endif
#ifdef __GCC_ATOMIC_POINTER_LOCK_FREE
#define ATOMIC_POINTER_LOCK_FREE __GCC_ATOMIC_POINTER_LOCK_FREE
#endif #endif
/* /*
@ -54,9 +87,7 @@
#define atomic_init(obj, value) __c11_atomic_init(obj, value) #define atomic_init(obj, value) __c11_atomic_init(obj, value)
#else #else
#define ATOMIC_VAR_INIT(value) { .__val = (value) } #define ATOMIC_VAR_INIT(value) { .__val = (value) }
#define atomic_init(obj, value) do { \ #define atomic_init(obj, value) ((void)((obj)->__val = (value)))
(obj)->__val = (value); \
} while (0)
#endif #endif
/* /*
@ -91,43 +122,62 @@
* atomic operations. * atomic operations.
*/ */
enum memory_order { typedef enum {
memory_order_relaxed = __ATOMIC_RELAXED, memory_order_relaxed = __ATOMIC_RELAXED,
memory_order_consume = __ATOMIC_CONSUME, memory_order_consume = __ATOMIC_CONSUME,
memory_order_acquire = __ATOMIC_ACQUIRE, memory_order_acquire = __ATOMIC_ACQUIRE,
memory_order_release = __ATOMIC_RELEASE, memory_order_release = __ATOMIC_RELEASE,
memory_order_acq_rel = __ATOMIC_ACQ_REL, memory_order_acq_rel = __ATOMIC_ACQ_REL,
memory_order_seq_cst = __ATOMIC_SEQ_CST memory_order_seq_cst = __ATOMIC_SEQ_CST
}; } memory_order;
/* /*
* 7.17.4 Fences. * 7.17.4 Fences.
*/ */
static __inline void
atomic_thread_fence(memory_order __order __unused)
{
#ifdef __CLANG_ATOMICS #ifdef __CLANG_ATOMICS
#define atomic_thread_fence(order) __c11_atomic_thread_fence(order) __c11_atomic_thread_fence(__order);
#define atomic_signal_fence(order) __c11_atomic_signal_fence(order)
#elif defined(__GNUC_ATOMICS) #elif defined(__GNUC_ATOMICS)
#define atomic_thread_fence(order) __atomic_thread_fence(order) __atomic_thread_fence(__order);
#define atomic_signal_fence(order) __atomic_signal_fence(order)
#else #else
#define atomic_thread_fence(order) __sync_synchronize() __sync_synchronize();
#define atomic_signal_fence(order) __asm volatile ("" : : : "memory")
#endif #endif
}
static __inline void
atomic_signal_fence(memory_order __order __unused)
{
#ifdef __CLANG_ATOMICS
__c11_atomic_signal_fence(__order);
#elif defined(__GNUC_ATOMICS)
__atomic_signal_fence(__order);
#else
__asm volatile ("" ::: "memory");
#endif
}
/* /*
* 7.17.5 Lock-free property. * 7.17.5 Lock-free property.
*/ */
#if defined(__CLANG_ATOMICS) #if defined(_KERNEL)
/* Atomics in kernelspace are always lock-free. */
#define atomic_is_lock_free(obj) \ #define atomic_is_lock_free(obj) \
__c11_atomic_is_lock_free(sizeof(obj)) ((void)(obj), (_Bool)1)
#elif defined(__CLANG_ATOMICS)
#define atomic_is_lock_free(obj) \
__atomic_is_lock_free(sizeof(*(obj)), obj)
#elif defined(__GNUC_ATOMICS) #elif defined(__GNUC_ATOMICS)
#define atomic_is_lock_free(obj) \ #define atomic_is_lock_free(obj) \
__atomic_is_lock_free(sizeof((obj)->__val)) __atomic_is_lock_free(sizeof((obj)->__val), &(obj)->__val)
#else #else
#define atomic_is_lock_free(obj) \ #define atomic_is_lock_free(obj) \
(sizeof((obj)->__val) <= sizeof(void *)) ((void)(obj), sizeof((obj)->__val) <= sizeof(void *))
#endif #endif
/* /*
@ -233,61 +283,65 @@ typedef _Atomic(uintmax_t) atomic_uintmax_t;
#define atomic_store_explicit(object, desired, order) \ #define atomic_store_explicit(object, desired, order) \
__atomic_store_n(&(object)->__val, desired, order) __atomic_store_n(&(object)->__val, desired, order)
#else #else
#define __atomic_apply_stride(object, operand) \
(((__typeof__((object)->__val))0) + (operand))
#define atomic_compare_exchange_strong_explicit(object, expected, \ #define atomic_compare_exchange_strong_explicit(object, expected, \
desired, success, failure) ({ \ desired, success, failure) __extension__ ({ \
__typeof__((object)->__val) __v; \ __typeof__(expected) __ep = (expected); \
_Bool __r; \ __typeof__(*__ep) __e = *__ep; \
__v = __sync_val_compare_and_swap(&(object)->__val, \ (void)(success); (void)(failure); \
*(expected), desired); \ (_Bool)((*__ep = __sync_val_compare_and_swap(&(object)->__val, \
__r = *(expected) == __v; \ __e, desired)) == __e); \
*(expected) = __v; \
__r; \
}) })
#define atomic_compare_exchange_weak_explicit(object, expected, \ #define atomic_compare_exchange_weak_explicit(object, expected, \
desired, success, failure) \ desired, success, failure) \
atomic_compare_exchange_strong_explicit(object, expected, \ atomic_compare_exchange_strong_explicit(object, expected, \
desired, success, failure) desired, success, failure)
#if __has_builtin(__sync_swap) #if __has_builtin(__sync_swap)
/* Clang provides a full-barrier atomic exchange - use it if available. */ /* Clang provides a full-barrier atomic exchange - use it if available. */
#define atomic_exchange_explicit(object, desired, order) \ #define atomic_exchange_explicit(object, desired, order) \
__sync_swap(&(object)->__val, desired) ((void)(order), __sync_swap(&(object)->__val, desired))
#else #else
/* /*
* __sync_lock_test_and_set() is only an acquire barrier in theory (although in * __sync_lock_test_and_set() is only an acquire barrier in theory (although in
* practice it is usually a full barrier) so we need an explicit barrier after * practice it is usually a full barrier) so we need an explicit barrier before
* it. * it.
*/ */
#define atomic_exchange_explicit(object, desired, order) ({ \ #define atomic_exchange_explicit(object, desired, order) \
__typeof__((object)->__val) __v; \ __extension__ ({ \
__v = __sync_lock_test_and_set(&(object)->__val, desired); \ __typeof__(object) __o = (object); \
__typeof__(desired) __d = (desired); \
(void)(order); \
__sync_synchronize(); \ __sync_synchronize(); \
__v; \ __sync_lock_test_and_set(&(__o)->__val, __d); \
}) })
#endif #endif
#define atomic_fetch_add_explicit(object, operand, order) \ #define atomic_fetch_add_explicit(object, operand, order) \
__sync_fetch_and_add(&(object)->__val, operand) ((void)(order), __sync_fetch_and_add(&(object)->__val, \
__atomic_apply_stride(object, operand)))
#define atomic_fetch_and_explicit(object, operand, order) \ #define atomic_fetch_and_explicit(object, operand, order) \
__sync_fetch_and_and(&(object)->__val, operand) ((void)(order), __sync_fetch_and_and(&(object)->__val, operand))
#define atomic_fetch_or_explicit(object, operand, order) \ #define atomic_fetch_or_explicit(object, operand, order) \
__sync_fetch_and_or(&(object)->__val, operand) ((void)(order), __sync_fetch_and_or(&(object)->__val, operand))
#define atomic_fetch_sub_explicit(object, operand, order) \ #define atomic_fetch_sub_explicit(object, operand, order) \
__sync_fetch_and_sub(&(object)->__val, operand) ((void)(order), __sync_fetch_and_sub(&(object)->__val, \
__atomic_apply_stride(object, operand)))
#define atomic_fetch_xor_explicit(object, operand, order) \ #define atomic_fetch_xor_explicit(object, operand, order) \
__sync_fetch_and_xor(&(object)->__val, operand) ((void)(order), __sync_fetch_and_xor(&(object)->__val, operand))
#define atomic_load_explicit(object, order) \ #define atomic_load_explicit(object, order) \
__sync_fetch_and_add(&(object)->__val, 0) ((void)(order), __sync_fetch_and_add(&(object)->__val, 0))
#define atomic_store_explicit(object, desired, order) do { \ #define atomic_store_explicit(object, desired, order) \
__sync_synchronize(); \ ((void)atomic_exchange_explicit(object, desired, order))
(object)->__val = (desired); \
__sync_synchronize(); \
} while (0)
#endif #endif
/* /*
* Convenience functions. * Convenience functions.
*
* Don't provide these in kernel space. In kernel space, we should be
* disciplined enough to always provide explicit barriers.
*/ */
#ifndef _KERNEL
#define atomic_compare_exchange_strong(object, expected, desired) \ #define atomic_compare_exchange_strong(object, expected, desired) \
atomic_compare_exchange_strong_explicit(object, expected, \ atomic_compare_exchange_strong_explicit(object, expected, \
desired, memory_order_seq_cst, memory_order_seq_cst) desired, memory_order_seq_cst, memory_order_seq_cst)
@ -310,23 +364,50 @@ typedef _Atomic(uintmax_t) atomic_uintmax_t;
atomic_load_explicit(object, memory_order_seq_cst) atomic_load_explicit(object, memory_order_seq_cst)
#define atomic_store(object, desired) \ #define atomic_store(object, desired) \
atomic_store_explicit(object, desired, memory_order_seq_cst) atomic_store_explicit(object, desired, memory_order_seq_cst)
#endif /* !_KERNEL */
/* /*
* 7.17.8 Atomic flag type and operations. * 7.17.8 Atomic flag type and operations.
*
* XXX: Assume atomic_bool can be used as an atomic_flag. Is there some
* kind of compiler built-in type we could use?
*/ */
typedef atomic_bool atomic_flag; typedef struct {
atomic_bool __flag;
} atomic_flag;
#define ATOMIC_FLAG_INIT ATOMIC_VAR_INIT(0) #define ATOMIC_FLAG_INIT { ATOMIC_VAR_INIT(0) }
#define atomic_flag_clear_explicit(object, order) \ static __inline _Bool
atomic_store_explicit(object, 0, order) atomic_flag_test_and_set_explicit(volatile atomic_flag *__object,
#define atomic_flag_test_and_set_explicit(object, order) \ memory_order __order)
atomic_compare_exchange_strong_explicit(object, 0, 1, order, order) {
return (atomic_exchange_explicit(&__object->__flag, 1, __order));
}
#define atomic_flag_clear(object) \ static __inline void
atomic_flag_clear_explicit(object, memory_order_seq_cst) atomic_flag_clear_explicit(volatile atomic_flag *__object, memory_order __order)
#define atomic_flag_test_and_set(object) \ {
atomic_flag_test_and_set_explicit(object, memory_order_seq_cst)
atomic_store_explicit(&__object->__flag, 0, __order);
}
#ifndef _KERNEL
static __inline _Bool
atomic_flag_test_and_set(volatile atomic_flag *__object)
{
return (atomic_flag_test_and_set_explicit(__object,
memory_order_seq_cst));
}
static __inline void
atomic_flag_clear(volatile atomic_flag *__object)
{
atomic_flag_clear_explicit(__object, memory_order_seq_cst);
}
#endif /* !_KERNEL */
#endif /* !_STDATOMIC_H_ */ #endif /* !_STDATOMIC_H_ */

View File

@ -66,6 +66,23 @@
# define __ptrvalue /* nothing */ # define __ptrvalue /* nothing */
#endif #endif
/*
* Testing against Clang-specific extensions.
*/
#ifndef __has_extension
#define __has_extension __has_feature
#endif
#ifndef __has_feature
#define __has_feature(x) 0
#endif
#ifndef __has_include
#define __has_include(x) 0
#endif
#ifndef __has_builtin
#define __has_builtin(x) 0
#endif
#if defined(__cplusplus) #if defined(__cplusplus)
#define __BEGIN_DECLS extern "C" { #define __BEGIN_DECLS extern "C" {
#define __END_DECLS } #define __END_DECLS }
@ -252,23 +269,45 @@
/* /*
* Keywords added in C11. * Keywords added in C11.
*/ */
#if defined(__cplusplus) && __cplusplus >= 201103L
#define _Alignas(e) alignas(e) #if !defined(__STDC_VERSION__) || __STDC_VERSION__ < 201112L
#define _Alignof(e) alignof(e)
#define _Noreturn [[noreturn]] #if !__has_extension(c_alignas)
#define _Static_assert(e, s) static_assert(e, s) #if (defined(__cplusplus) && __cplusplus >= 201103L) || \
/* FIXME: change this to thread_local when clang in base supports it */ __has_extension(cxx_alignas)
#define _Thread_local __thread #define _Alignas(x) alignas(x)
#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L
/* Do nothing. They are language keywords. */
#else #else
/* Not supported. Implement them using our versions. */ /* XXX: Only emulates _Alignas(constant-expression); not _Alignas(type-name). */
#define _Alignas(x) __aligned(x) #define _Alignas(x) __aligned(x)
#endif
#endif
#if defined(__cplusplus) && __cplusplus >= 201103L
#define _Alignof(x) alignof(x)
#else
#define _Alignof(x) __alignof(x) #define _Alignof(x) __alignof(x)
#endif
#if !__has_extension(c_atomic) && !__has_extension(cxx_atomic)
/*
* No native support for _Atomic(). Place object in structure to prevent
* most forms of direct non-atomic access.
*/
#define _Atomic(T) struct { T volatile __val; }
#endif
#if defined(__cplusplus) && __cplusplus >= 201103L
#define _Noreturn [[noreturn]]
#else
#define _Noreturn __dead2 #define _Noreturn __dead2
#define _Thread_local __thread #endif
#if __GNUC_PREREQ__(4, 6) && !defined(__cplusplus) #if __GNUC_PREREQ__(4, 6) && !defined(__cplusplus)
/* Do nothing: _Static_assert() works as per C11 */ /* Do nothing: _Static_assert() works as per C11 */
#elif !__has_extension(c_static_assert)
#if (defined(__cplusplus) && __cplusplus >= 201103L) || \
__has_extension(cxx_static_assert)
#define _Static_assert(x, y) static_assert(x, y)
#elif defined(__COUNTER__) #elif defined(__COUNTER__)
#define _Static_assert(x, y) __Static_assert(x, __COUNTER__) #define _Static_assert(x, y) __Static_assert(x, __COUNTER__)
#define __Static_assert(x, y) ___Static_assert(x, y) #define __Static_assert(x, y) ___Static_assert(x, y)
@ -278,6 +317,18 @@
#endif #endif
#endif #endif
#if !__has_extension(c_thread_local)
/* XXX: Change this to test against C++11 when clang in base supports it. */
#if /* (defined(__cplusplus) && __cplusplus >= 201103L) || */ \
__has_extension(cxx_thread_local)
#define _Thread_local thread_local
#else
#define _Thread_local __thread
#endif
#endif
#endif /* __STDC_VERSION__ || __STDC_VERSION__ < 201112L */
/* /*
* Emulation of C11 _Generic(). Unlike the previously defined C11 * Emulation of C11 _Generic(). Unlike the previously defined C11
* keywords, it is not possible to implement this using exactly the same * keywords, it is not possible to implement this using exactly the same
@ -650,17 +701,4 @@
#endif #endif
#endif #endif
#ifndef __has_extension
#define __has_extension __has_feature
#endif
#ifndef __has_feature
#define __has_feature(x) 0
#endif
#ifndef __has_include
#define __has_include(x) 0
#endif
#ifndef __has_builtin
#define __has_builtin(x) 0
#endif
#endif /* !_SYS_CDEFS_H_ */ #endif /* !_SYS_CDEFS_H_ */