Commit c1559603 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'locking/atomics'

parents 1c067caa a6f1de04
......@@ -6,7 +6,8 @@
# 2) Generate timeconst.h
# 3) Generate asm-offsets.h (may need bounds.h and timeconst.h)
# 4) Check for missing system calls
# 5) Generate constants.py (may need bounds.h)
# 5) check atomics headers are up-to-date
# 6) Generate constants.py (may need bounds.h)
#####
# 1) Generate bounds.h
......@@ -59,7 +60,20 @@ missing-syscalls: scripts/checksyscalls.sh $(offsets-file) FORCE
$(call cmd,syscalls)
#####
# 5) Generate constants for Python GDB integration
# 5) Check atomic headers are up-to-date
#
always += old-atomics
targets += old-atomics
quiet_cmd_atomics = CALL $<
cmd_atomics = $(CONFIG_SHELL) $<
old-atomics: scripts/atomic/check-atomics.sh FORCE
$(call cmd,atomics)
#####
# 6) Generate constants for Python GDB integration
#
extra-$(CONFIG_GDB_SCRIPTS) += build_constants_py
......
......@@ -2609,6 +2609,7 @@ L: linux-kernel@vger.kernel.org
S: Maintained
F: arch/*/include/asm/atomic*.h
F: include/*/atomic*.h
F: scripts/atomic/
ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER
M: Bradley Grove <linuxdrivers@attotech.com>
......
This diff is collapsed.
......@@ -39,7 +39,7 @@
#define ATOMIC_OP(op, asm_op) \
__LL_SC_INLINE void \
__LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \
__LL_SC_PREFIX(arch_atomic_##op(int i, atomic_t *v)) \
{ \
unsigned long tmp; \
int result; \
......@@ -53,11 +53,11 @@ __LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: "Ir" (i)); \
} \
__LL_SC_EXPORT(atomic_##op);
__LL_SC_EXPORT(arch_atomic_##op);
#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
__LL_SC_INLINE int \
__LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \
__LL_SC_PREFIX(arch_atomic_##op##_return##name(int i, atomic_t *v)) \
{ \
unsigned long tmp; \
int result; \
......@@ -75,11 +75,11 @@ __LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \
\
return result; \
} \
__LL_SC_EXPORT(atomic_##op##_return##name);
__LL_SC_EXPORT(arch_atomic_##op##_return##name);
#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \
__LL_SC_INLINE int \
__LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v)) \
__LL_SC_PREFIX(arch_atomic_fetch_##op##name(int i, atomic_t *v)) \
{ \
unsigned long tmp; \
int val, result; \
......@@ -97,7 +97,7 @@ __LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v)) \
\
return result; \
} \
__LL_SC_EXPORT(atomic_fetch_##op##name);
__LL_SC_EXPORT(arch_atomic_fetch_##op##name);
#define ATOMIC_OPS(...) \
ATOMIC_OP(__VA_ARGS__) \
......@@ -133,7 +133,7 @@ ATOMIC_OPS(xor, eor)
#define ATOMIC64_OP(op, asm_op) \
__LL_SC_INLINE void \
__LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \
__LL_SC_PREFIX(arch_atomic64_##op(long i, atomic64_t *v)) \
{ \
long result; \
unsigned long tmp; \
......@@ -147,11 +147,11 @@ __LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: "Ir" (i)); \
} \
__LL_SC_EXPORT(atomic64_##op);
__LL_SC_EXPORT(arch_atomic64_##op);
#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
__LL_SC_INLINE long \
__LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \
__LL_SC_PREFIX(arch_atomic64_##op##_return##name(long i, atomic64_t *v))\
{ \
long result; \
unsigned long tmp; \
......@@ -169,11 +169,11 @@ __LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \
\
return result; \
} \
__LL_SC_EXPORT(atomic64_##op##_return##name);
__LL_SC_EXPORT(arch_atomic64_##op##_return##name);
#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \
__LL_SC_INLINE long \
__LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v)) \
__LL_SC_PREFIX(arch_atomic64_fetch_##op##name(long i, atomic64_t *v)) \
{ \
long result, val; \
unsigned long tmp; \
......@@ -191,7 +191,7 @@ __LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v)) \
\
return result; \
} \
__LL_SC_EXPORT(atomic64_fetch_##op##name);
__LL_SC_EXPORT(arch_atomic64_fetch_##op##name);
#define ATOMIC64_OPS(...) \
ATOMIC64_OP(__VA_ARGS__) \
......@@ -226,7 +226,7 @@ ATOMIC64_OPS(xor, eor)
#undef ATOMIC64_OP
__LL_SC_INLINE long
__LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
__LL_SC_PREFIX(arch_atomic64_dec_if_positive(atomic64_t *v))
{
long result;
unsigned long tmp;
......@@ -246,7 +246,7 @@ __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
return result;
}
__LL_SC_EXPORT(atomic64_dec_if_positive);
__LL_SC_EXPORT(arch_atomic64_dec_if_positive);
#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl) \
__LL_SC_INLINE u##sz \
......
......@@ -25,9 +25,9 @@
#error "please don't include this file directly"
#endif
#define __LL_SC_ATOMIC(op) __LL_SC_CALL(atomic_##op)
#define __LL_SC_ATOMIC(op) __LL_SC_CALL(arch_atomic_##op)
#define ATOMIC_OP(op, asm_op) \
static inline void atomic_##op(int i, atomic_t *v) \
static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
......@@ -47,7 +47,7 @@ ATOMIC_OP(add, stadd)
#undef ATOMIC_OP
#define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \
static inline int atomic_fetch_##op##name(int i, atomic_t *v) \
static inline int arch_atomic_fetch_##op##name(int i, atomic_t *v) \
{ \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
......@@ -79,7 +79,7 @@ ATOMIC_FETCH_OPS(add, ldadd)
#undef ATOMIC_FETCH_OPS
#define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \
static inline int atomic_add_return##name(int i, atomic_t *v) \
static inline int arch_atomic_add_return##name(int i, atomic_t *v) \
{ \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
......@@ -105,7 +105,7 @@ ATOMIC_OP_ADD_RETURN( , al, "memory")
#undef ATOMIC_OP_ADD_RETURN
static inline void atomic_and(int i, atomic_t *v)
static inline void arch_atomic_and(int i, atomic_t *v)
{
register int w0 asm ("w0") = i;
register atomic_t *x1 asm ("x1") = v;
......@@ -123,7 +123,7 @@ static inline void atomic_and(int i, atomic_t *v)
}
#define ATOMIC_FETCH_OP_AND(name, mb, cl...) \
static inline int atomic_fetch_and##name(int i, atomic_t *v) \
static inline int arch_atomic_fetch_and##name(int i, atomic_t *v) \
{ \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
......@@ -149,7 +149,7 @@ ATOMIC_FETCH_OP_AND( , al, "memory")
#undef ATOMIC_FETCH_OP_AND
static inline void atomic_sub(int i, atomic_t *v)
static inline void arch_atomic_sub(int i, atomic_t *v)
{
register int w0 asm ("w0") = i;
register atomic_t *x1 asm ("x1") = v;
......@@ -167,7 +167,7 @@ static inline void atomic_sub(int i, atomic_t *v)
}
#define ATOMIC_OP_SUB_RETURN(name, mb, cl...) \
static inline int atomic_sub_return##name(int i, atomic_t *v) \
static inline int arch_atomic_sub_return##name(int i, atomic_t *v) \
{ \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
......@@ -195,7 +195,7 @@ ATOMIC_OP_SUB_RETURN( , al, "memory")
#undef ATOMIC_OP_SUB_RETURN
#define ATOMIC_FETCH_OP_SUB(name, mb, cl...) \
static inline int atomic_fetch_sub##name(int i, atomic_t *v) \
static inline int arch_atomic_fetch_sub##name(int i, atomic_t *v) \
{ \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
......@@ -222,9 +222,9 @@ ATOMIC_FETCH_OP_SUB( , al, "memory")
#undef ATOMIC_FETCH_OP_SUB
#undef __LL_SC_ATOMIC
#define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op)
#define __LL_SC_ATOMIC64(op) __LL_SC_CALL(arch_atomic64_##op)
#define ATOMIC64_OP(op, asm_op) \
static inline void atomic64_##op(long i, atomic64_t *v) \
static inline void arch_atomic64_##op(long i, atomic64_t *v) \
{ \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
......@@ -244,7 +244,7 @@ ATOMIC64_OP(add, stadd)
#undef ATOMIC64_OP
#define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \
static inline long atomic64_fetch_##op##name(long i, atomic64_t *v) \
static inline long arch_atomic64_fetch_##op##name(long i, atomic64_t *v)\
{ \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
......@@ -276,7 +276,7 @@ ATOMIC64_FETCH_OPS(add, ldadd)
#undef ATOMIC64_FETCH_OPS
#define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \
static inline long atomic64_add_return##name(long i, atomic64_t *v) \
static inline long arch_atomic64_add_return##name(long i, atomic64_t *v)\
{ \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
......@@ -302,7 +302,7 @@ ATOMIC64_OP_ADD_RETURN( , al, "memory")
#undef ATOMIC64_OP_ADD_RETURN
static inline void atomic64_and(long i, atomic64_t *v)
static inline void arch_atomic64_and(long i, atomic64_t *v)
{
register long x0 asm ("x0") = i;
register atomic64_t *x1 asm ("x1") = v;
......@@ -320,7 +320,7 @@ static inline void atomic64_and(long i, atomic64_t *v)
}
#define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \
static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \
static inline long arch_atomic64_fetch_and##name(long i, atomic64_t *v) \
{ \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
......@@ -346,7 +346,7 @@ ATOMIC64_FETCH_OP_AND( , al, "memory")
#undef ATOMIC64_FETCH_OP_AND
static inline void atomic64_sub(long i, atomic64_t *v)
static inline void arch_atomic64_sub(long i, atomic64_t *v)
{
register long x0 asm ("x0") = i;
register atomic64_t *x1 asm ("x1") = v;
......@@ -364,7 +364,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
}
#define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \
static inline long atomic64_sub_return##name(long i, atomic64_t *v) \
static inline long arch_atomic64_sub_return##name(long i, atomic64_t *v)\
{ \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
......@@ -392,7 +392,7 @@ ATOMIC64_OP_SUB_RETURN( , al, "memory")
#undef ATOMIC64_OP_SUB_RETURN
#define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \
static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \
static inline long arch_atomic64_fetch_sub##name(long i, atomic64_t *v) \
{ \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
......@@ -418,7 +418,7 @@ ATOMIC64_FETCH_OP_SUB( , al, "memory")
#undef ATOMIC64_FETCH_OP_SUB
static inline long atomic64_dec_if_positive(atomic64_t *v)
static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
{
register long x0 asm ("x0") = (long)v;
......
......@@ -110,10 +110,10 @@ __XCHG_GEN(_mb)
})
/* xchg */
#define xchg_relaxed(...) __xchg_wrapper( , __VA_ARGS__)
#define xchg_acquire(...) __xchg_wrapper(_acq, __VA_ARGS__)
#define xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__)
#define xchg(...) __xchg_wrapper( _mb, __VA_ARGS__)
#define arch_xchg_relaxed(...) __xchg_wrapper( , __VA_ARGS__)
#define arch_xchg_acquire(...) __xchg_wrapper(_acq, __VA_ARGS__)
#define arch_xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__)
#define arch_xchg(...) __xchg_wrapper( _mb, __VA_ARGS__)
#define __CMPXCHG_GEN(sfx) \
static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
......@@ -154,18 +154,18 @@ __CMPXCHG_GEN(_mb)
})
/* cmpxchg */
#define cmpxchg_relaxed(...) __cmpxchg_wrapper( , __VA_ARGS__)
#define cmpxchg_acquire(...) __cmpxchg_wrapper(_acq, __VA_ARGS__)
#define cmpxchg_release(...) __cmpxchg_wrapper(_rel, __VA_ARGS__)
#define cmpxchg(...) __cmpxchg_wrapper( _mb, __VA_ARGS__)
#define cmpxchg_local cmpxchg_relaxed
#define arch_cmpxchg_relaxed(...) __cmpxchg_wrapper( , __VA_ARGS__)
#define arch_cmpxchg_acquire(...) __cmpxchg_wrapper(_acq, __VA_ARGS__)
#define arch_cmpxchg_release(...) __cmpxchg_wrapper(_rel, __VA_ARGS__)
#define arch_cmpxchg(...) __cmpxchg_wrapper( _mb, __VA_ARGS__)
#define arch_cmpxchg_local arch_cmpxchg_relaxed
/* cmpxchg64 */
#define cmpxchg64_relaxed cmpxchg_relaxed
#define cmpxchg64_acquire cmpxchg_acquire
#define cmpxchg64_release cmpxchg_release
#define cmpxchg64 cmpxchg
#define cmpxchg64_local cmpxchg_local
#define arch_cmpxchg64_relaxed arch_cmpxchg_relaxed
#define arch_cmpxchg64_acquire arch_cmpxchg_acquire
#define arch_cmpxchg64_release arch_cmpxchg_release
#define arch_cmpxchg64 arch_cmpxchg
#define arch_cmpxchg64_local arch_cmpxchg_local
/* cmpxchg_double */
#define system_has_cmpxchg_double() 1
......@@ -177,24 +177,24 @@ __CMPXCHG_GEN(_mb)
VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1); \
})
#define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
({\
int __ret;\
__cmpxchg_double_check(ptr1, ptr2); \
__ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \
(unsigned long)(n1), (unsigned long)(n2), \
ptr1); \
__ret; \
#define arch_cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
({ \
int __ret; \
__cmpxchg_double_check(ptr1, ptr2); \
__ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \
(unsigned long)(n1), (unsigned long)(n2), \
ptr1); \
__ret; \
})
#define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
({\
int __ret;\
__cmpxchg_double_check(ptr1, ptr2); \
__ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \
(unsigned long)(n1), (unsigned long)(n2), \
ptr1); \
__ret; \
#define arch_cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
({ \
int __ret; \
__cmpxchg_double_check(ptr1, ptr2); \
__ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \
(unsigned long)(n1), (unsigned long)(n2), \
ptr1); \
__ret; \
})
#define __CMPWAIT_CASE(w, sfx, sz) \
......
......@@ -15,13 +15,13 @@
* ops which are SMP safe even on a UP kernel.
*/
#define sync_set_bit(nr, p) set_bit(nr, p)
#define sync_clear_bit(nr, p) clear_bit(nr, p)
#define sync_change_bit(nr, p) change_bit(nr, p)
#define sync_test_and_set_bit(nr, p) test_and_set_bit(nr, p)
#define sync_test_and_clear_bit(nr, p) test_and_clear_bit(nr, p)
#define sync_test_and_change_bit(nr, p) test_and_change_bit(nr, p)
#define sync_test_bit(nr, addr) test_bit(nr, addr)
#define sync_cmpxchg cmpxchg
#define sync_set_bit(nr, p) set_bit(nr, p)
#define sync_clear_bit(nr, p) clear_bit(nr, p)
#define sync_change_bit(nr, p) change_bit(nr, p)
#define sync_test_and_set_bit(nr, p) test_and_set_bit(nr, p)
#define sync_test_and_clear_bit(nr, p) test_and_clear_bit(nr, p)
#define sync_test_and_change_bit(nr, p) test_and_change_bit(nr, p)
#define sync_test_bit(nr, addr) test_bit(nr, addr)
#define arch_sync_cmpxchg arch_cmpxchg
#endif
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
# helpers for dealing with atomics.tbl
#meta_in(meta, match)
meta_in()
{
case "$1" in
[$2]) return 0;;
esac
return 1
}
#meta_has_ret(meta)
meta_has_ret()
{
meta_in "$1" "bBiIfFlR"
}
#meta_has_acquire(meta)
meta_has_acquire()
{
meta_in "$1" "BFIlR"
}
#meta_has_release(meta)
meta_has_release()
{
meta_in "$1" "BFIRs"
}
#meta_has_relaxed(meta)
meta_has_relaxed()
{
meta_in "$1" "BFIR"
}
#find_fallback_template(pfx, name, sfx, order)
find_fallback_template()
{
local pfx="$1"; shift
local name="$1"; shift
local sfx="$1"; shift
local order="$1"; shift
local base=""
local file=""
# We may have fallbacks for a specific case (e.g. read_acquire()), or
# an entire class, e.g. *inc*().
#
# Start at the most specific, and fall back to the most general. Once
# we find a specific fallback, don't bother looking for more.
for base in "${pfx}${name}${sfx}${order}" "${name}"; do
file="${ATOMICDIR}/fallbacks/${base}"
if [ -f "${file}" ]; then
printf "${file}"
break
fi
done
}
#gen_ret_type(meta, int)
gen_ret_type() {
local meta="$1"; shift
local int="$1"; shift
case "${meta}" in
[sv]) printf "void";;
[bB]) printf "bool";;
[aiIfFlR]) printf "${int}";;
esac
}
#gen_ret_stmt(meta)
gen_ret_stmt()
{
if meta_has_ret "${meta}"; then
printf "return ";
fi
}
# gen_param_name(arg)
gen_param_name()
{
# strip off the leading 'c' for 'cv'
local name="${1#c}"
printf "${name#*:}"
}
# gen_param_type(arg, int, atomic)
gen_param_type()
{
local type="${1%%:*}"; shift
local int="$1"; shift
local atomic="$1"; shift
case "${type}" in
i) type="${int} ";;
p) type="${int} *";;
v) type="${atomic}_t *";;
cv) type="const ${atomic}_t *";;
esac
printf "${type}"
}
#gen_param(arg, int, atomic)
gen_param()
{
local arg="$1"; shift
local int="$1"; shift
local atomic="$1"; shift
local name="$(gen_param_name "${arg}")"
local type="$(gen_param_type "${arg}" "${int}" "${atomic}")"
printf "${type}${name}"
}
#gen_params(int, atomic, arg...)
gen_params()
{
local int="$1"; shift
local atomic="$1"; shift
while [ "$#" -gt 0 ]; do
gen_param "$1" "${int}" "${atomic}"
[ "$#" -gt 1 ] && printf ", "
shift;
done
}
#gen_args(arg...)
gen_args()
{
while [ "$#" -gt 0 ]; do
printf "$(gen_param_name "$1")"
[ "$#" -gt 1 ] && printf ", "
shift;
done
}
#gen_proto_order_variants(meta, pfx, name, sfx, ...)
gen_proto_order_variants()
{
local meta="$1"; shift
local pfx="$1"; shift
local name="$1"; shift
local sfx="$1"; shift
gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@"
if meta_has_acquire "${meta}"; then
gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@"
fi
if meta_has_release "${meta}"; then
gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@"
fi
if meta_has_relaxed "${meta}"; then
gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_relaxed" "$@"
fi
}
#gen_proto_variants(meta, name, ...)
gen_proto_variants()
{
local meta="$1"; shift
local name="$1"; shift
local pfx=""
local sfx=""
meta_in "${meta}" "fF" && pfx="fetch_"
meta_in "${meta}" "R" && sfx="_return"
gen_proto_order_variants "${meta}" "${pfx}" "${name}" "${sfx}" "$@"
}
#gen_proto(meta, ...)
gen_proto() {
local meta="$1"; shift
for m in $(echo "${meta}" | fold -w1); do
gen_proto_variants "${m}" "$@"
done
}
# name meta args...
#
# Where meta contains a string of variants to generate.
# Upper-case implies _{acquire,release,relaxed} variants.
# Valid meta values are:
# * B/b - bool: returns bool
# * v - void: returns void
# * I/i - int: returns base type
# * R - return: returns base type (has _return variants)
# * F/f - fetch: returns base type (has fetch_ variants)
# * l - load: returns base type (has _acquire order variant)
# * s - store: returns void (has _release order variant)
#
# Where args contains list of type[:name], where type is:
# * cv - const pointer to atomic base type (atomic_t/atomic64_t/atomic_long_t)
# * v - pointer to atomic base type (atomic_t/atomic64_t/atomic_long_t)
# * i - base type (int/s64/long)
# * p - pointer to base type (int/s64/long)
#
read l cv
set s v i
add vRF i v
sub vRF i v
inc vRF v
dec vRF v
and vF i v
andnot vF i v
or vF i v
xor vF i v
xchg I v i
cmpxchg I v i:old i:new
try_cmpxchg B v p:old i:new
sub_and_test b i v
dec_and_test b v
inc_and_test b v
add_negative b i v
add_unless fb v i:a i:u
inc_not_zero b v
inc_unless_negative b v
dec_unless_positive b v
dec_if_positive i v
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
#
# Check if atomic headers are up-to-date
ATOMICDIR=$(dirname $0)
ATOMICTBL=${ATOMICDIR}/atomics.tbl
LINUXDIR=${ATOMICDIR}/../..
cat <<EOF |
gen-atomic-instrumented.sh asm-generic/atomic-instrumented.h
gen-atomic-long.sh asm-generic/atomic-long.h
gen-atomic-fallback.sh linux/atomic-fallback.h
EOF
while read script header; do
if ! (${ATOMICDIR}/${script} ${ATOMICTBL} | diff - ${LINUXDIR}/include/${header} > /dev/null); then
printf "warning: include/${header} is out-of-date.\n"
fi
done
cat <<EOF
static inline ${ret}
${atomic}_${pfx}${name}${sfx}_acquire(${params})
{
${ret} ret = ${atomic}_${pfx}${name}${sfx}_relaxed(${args});
__atomic_acquire_fence();
return ret;
}
EOF
cat <<EOF
/**
* ${atomic}_add_negative - add and test if negative
* @i: integer value to add
* @v: pointer of type ${atomic}_t
*
* Atomically adds @i to @v and returns true
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
static inline bool
${atomic}_add_negative(${int} i, ${atomic}_t *v)
{
return ${atomic}_add_return(i, v) < 0;
}
EOF
cat << EOF
/**
* ${atomic}_add_unless - add unless the number is already a given value
* @v: pointer of type ${atomic}_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, if @v was not already @u.
* Returns true if the addition was done.
*/
static inline bool
${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u)
{
return ${atomic}_fetch_add_unless(v, a, u) != u;
}
EOF
cat <<EOF
static inline ${ret}
${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v)
{
${retstmt}${atomic}_${pfx}and${sfx}${order}(~i, v);
}
EOF
cat <<EOF
static inline ${ret}
${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v)
{
${retstmt}${atomic}_${pfx}sub${sfx}${order}(1, v);
}
EOF
cat <<EOF
/**
* ${atomic}_dec_and_test - decrement and test
* @v: pointer of type ${atomic}_t
*
* Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
* cases.
*/
static inline bool
${atomic}_dec_and_test(${atomic}_t *v)
{
return ${atomic}_dec_return(v) == 0;
}
EOF