Commit 117780ee authored by H. Peter Anvin's avatar H. Peter Anvin Committed by H. Peter Anvin

x86, asm: use bool for bitops and other assembly outputs

The gcc people have confirmed that using "bool" when combined with
inline assembly always is treated as a byte-sized operand that can be
assumed to be 0 or 1, which is exactly what the SET instruction
emits.  Change the output types and intermediate variables of as many
operations as practical to "bool".
Signed-off-by: default avatarH. Peter Anvin <hpa@zytor.com>
Link: http://lkml.kernel.org/r/1465414726-197858-3-git-send-email-hpa@linux.intel.comReviewed-by: default avatarAndy Lutomirski <luto@kernel.org>
Reviewed-by: default avatarBorislav Petkov <bp@suse.de>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
parent 2823d4da
......@@ -16,14 +16,16 @@
#define BOOT_BITOPS_H
#define _LINUX_BITOPS_H /* Inhibit inclusion of <linux/bitops.h> */
static inline int constant_test_bit(int nr, const void *addr)
#include <linux/types.h>
static inline bool constant_test_bit(int nr, const void *addr)
{
const u32 *p = (const u32 *)addr;
return ((1UL << (nr & 31)) & (p[nr >> 5])) != 0;
}
static inline int variable_test_bit(int nr, const void *addr)
static inline bool variable_test_bit(int nr, const void *addr)
{
u8 v;
bool v;
const u32 *p = (const u32 *)addr;
asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
......
......@@ -176,16 +176,16 @@ static inline void wrgs32(u32 v, addr_t addr)
}
/* Note: these only return true/false, not a signed return value! */
static inline int memcmp_fs(const void *s1, addr_t s2, size_t len)
static inline bool memcmp_fs(const void *s1, addr_t s2, size_t len)
{
u8 diff;
bool diff;
asm volatile("fs; repe; cmpsb; setnz %0"
: "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
return diff;
}
static inline int memcmp_gs(const void *s1, addr_t s2, size_t len)
static inline bool memcmp_gs(const void *s1, addr_t s2, size_t len)
{
u8 diff;
bool diff;
asm volatile("gs; repe; cmpsb; setnz %0"
: "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
return diff;
......
......@@ -17,7 +17,7 @@
int memcmp(const void *s1, const void *s2, size_t len)
{
u8 diff;
bool diff;
asm("repe; cmpsb; setnz %0"
: "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
return diff;
......
......@@ -45,11 +45,11 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
: "memory", "cc");
}
static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
u32 ecx_in, u32 *eax)
static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
u32 ecx_in, u32 *eax)
{
int cx, dx, si;
u8 error;
bool error;
/*
* N.B. We do NOT need a cld after the BIOS call
......
......@@ -43,7 +43,7 @@
#ifdef CONFIG_ARCH_RANDOM
/* Instead of arch_get_random_long() when alternatives haven't run. */
static inline int rdrand_long(unsigned long *v)
static inline bool rdrand_long(unsigned long *v)
{
int ok;
asm volatile("1: " RDRAND_LONG "\n\t"
......@@ -53,13 +53,13 @@ static inline int rdrand_long(unsigned long *v)
"2:"
: "=r" (ok), "=a" (*v)
: "0" (RDRAND_RETRY_LOOPS));
return ok;
return !!ok;
}
/* A single attempt at RDSEED */
static inline bool rdseed_long(unsigned long *v)
{
unsigned char ok;
bool ok;
asm volatile(RDSEED_LONG "\n\t"
"setc %0"
: "=qm" (ok), "=a" (*v));
......@@ -67,7 +67,7 @@ static inline bool rdseed_long(unsigned long *v)
}
#define GET_RANDOM(name, type, rdrand, nop) \
static inline int name(type *v) \
static inline bool name(type *v) \
{ \
int ok; \
alternative_io("movl $0, %0\n\t" \
......@@ -80,13 +80,13 @@ static inline int name(type *v) \
X86_FEATURE_RDRAND, \
ASM_OUTPUT2("=r" (ok), "=a" (*v)), \
"0" (RDRAND_RETRY_LOOPS)); \
return ok; \
return !!ok; \
}
#define GET_SEED(name, type, rdseed, nop) \
static inline int name(type *v) \
static inline bool name(type *v) \
{ \
unsigned char ok; \
bool ok; \
alternative_io("movb $0, %0\n\t" \
nop, \
rdseed "\n\t" \
......@@ -119,7 +119,7 @@ GET_SEED(arch_get_random_seed_int, unsigned int, RDSEED_INT, ASM_NOP4);
#else
static inline int rdrand_long(unsigned long *v)
static inline bool rdrand_long(unsigned long *v)
{
return 0;
}
......
......@@ -75,7 +75,7 @@ static __always_inline void atomic_sub(int i, atomic_t *v)
* true if the result is zero, or false for all
* other cases.
*/
static __always_inline int atomic_sub_and_test(int i, atomic_t *v)
static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
}
......@@ -112,7 +112,7 @@ static __always_inline void atomic_dec(atomic_t *v)
* returns true if the result is 0, or false for all other
* cases.
*/
static __always_inline int atomic_dec_and_test(atomic_t *v)
static __always_inline bool atomic_dec_and_test(atomic_t *v)
{
GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
}
......@@ -125,7 +125,7 @@ static __always_inline int atomic_dec_and_test(atomic_t *v)
* and returns true if the result is zero, or false for all
* other cases.
*/
static __always_inline int atomic_inc_and_test(atomic_t *v)
static __always_inline bool atomic_inc_and_test(atomic_t *v)
{
GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
}
......@@ -139,7 +139,7 @@ static __always_inline int atomic_inc_and_test(atomic_t *v)
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
static __always_inline int atomic_add_negative(int i, atomic_t *v)
static __always_inline bool atomic_add_negative(int i, atomic_t *v)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
}
......
......@@ -70,7 +70,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
* true if the result is zero, or false for all
* other cases.
*/
static inline int atomic64_sub_and_test(long i, atomic64_t *v)
static inline bool atomic64_sub_and_test(long i, atomic64_t *v)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
}
......@@ -109,7 +109,7 @@ static __always_inline void atomic64_dec(atomic64_t *v)
* returns true if the result is 0, or false for all other
* cases.
*/
static inline int atomic64_dec_and_test(atomic64_t *v)
static inline bool atomic64_dec_and_test(atomic64_t *v)
{
GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
}
......@@ -122,7 +122,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
* and returns true if the result is zero, or false for all
* other cases.
*/
static inline int atomic64_inc_and_test(atomic64_t *v)
static inline bool atomic64_inc_and_test(atomic64_t *v)
{
GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
}
......@@ -136,7 +136,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
static inline int atomic64_add_negative(long i, atomic64_t *v)
static inline bool atomic64_add_negative(long i, atomic64_t *v)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
}
......@@ -180,7 +180,7 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
static inline bool atomic64_add_unless(atomic64_t *v, long a, long u)
{
long c, old;
c = atomic64_read(v);
......
......@@ -201,7 +201,7 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr)
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static __always_inline int test_and_set_bit(long nr, volatile unsigned long *addr)
static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
}
......@@ -213,7 +213,7 @@ static __always_inline int test_and_set_bit(long nr, volatile unsigned long *add
*
* This is the same as test_and_set_bit on x86.
*/
static __always_inline int
static __always_inline bool
test_and_set_bit_lock(long nr, volatile unsigned long *addr)
{
return test_and_set_bit(nr, addr);
......@@ -228,9 +228,9 @@ test_and_set_bit_lock(long nr, volatile unsigned long *addr)
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
static __always_inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
{
unsigned char oldbit;
bool oldbit;
asm("bts %2,%1\n\t"
"setc %0"
......@@ -247,7 +247,7 @@ static __always_inline int __test_and_set_bit(long nr, volatile unsigned long *a
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static __always_inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
}
......@@ -268,9 +268,9 @@ static __always_inline int test_and_clear_bit(long nr, volatile unsigned long *a
* accessed from a hypervisor on the same CPU if running in a VM: don't change
* this without also updating arch/x86/kernel/kvm.c
*/
static __always_inline int __test_and_clear_bit(long nr, volatile unsigned long *addr)
static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
{
unsigned char oldbit;
bool oldbit;
asm volatile("btr %2,%1\n\t"
"setc %0"
......@@ -280,9 +280,9 @@ static __always_inline int __test_and_clear_bit(long nr, volatile unsigned long
}
/* WARNING: non atomic and it can be reordered! */
static __always_inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
static __always_inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
{
unsigned char oldbit;
bool oldbit;
asm volatile("btc %2,%1\n\t"
"setc %0"
......@@ -300,20 +300,20 @@ static __always_inline int __test_and_change_bit(long nr, volatile unsigned long
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static __always_inline int test_and_change_bit(long nr, volatile unsigned long *addr)
static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
}
static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
{
return ((1UL << (nr & (BITS_PER_LONG-1))) &
(addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
}
static __always_inline int variable_test_bit(long nr, volatile const unsigned long *addr)
static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr)
{
unsigned char oldbit;
bool oldbit;
asm volatile("bt %2,%1\n\t"
"setc %0"
......@@ -329,7 +329,7 @@ static __always_inline int variable_test_bit(long nr, volatile const unsigned lo
* @nr: bit number to test
* @addr: Address to start counting from
*/
static int test_bit(int nr, const volatile unsigned long *addr);
static bool test_bit(int nr, const volatile unsigned long *addr);
#endif
#define test_bit(nr, addr) \
......
......@@ -50,7 +50,7 @@ static inline void local_sub(long i, local_t *l)
* true if the result is zero, or false for all
* other cases.
*/
static inline int local_sub_and_test(long i, local_t *l)
static inline bool local_sub_and_test(long i, local_t *l)
{
GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
}
......@@ -63,7 +63,7 @@ static inline int local_sub_and_test(long i, local_t *l)
* returns true if the result is 0, or false for all other
* cases.
*/
static inline int local_dec_and_test(local_t *l)
static inline bool local_dec_and_test(local_t *l)
{
GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
}
......@@ -76,7 +76,7 @@ static inline int local_dec_and_test(local_t *l)
* and returns true if the result is zero, or false for all
* other cases.
*/
static inline int local_inc_and_test(local_t *l)
static inline bool local_inc_and_test(local_t *l)
{
GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
}
......@@ -90,7 +90,7 @@ static inline int local_inc_and_test(local_t *l)
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
static inline int local_add_negative(long i, local_t *l)
static inline bool local_add_negative(long i, local_t *l)
{
GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
}
......
......@@ -510,14 +510,14 @@ do { \
/* This is not atomic against other CPUs -- CPU preemption needs to be off */
#define x86_test_and_clear_bit_percpu(bit, var) \
({ \
unsigned char old__; \
bool old__; \
asm volatile("btr %2,"__percpu_arg(1)"\n\tsetc %0" \
: "=qm" (old__), "+m" (var) \
: "dIr" (bit)); \
old__; \
})
static __always_inline int x86_this_cpu_constant_test_bit(unsigned int nr,
static __always_inline bool x86_this_cpu_constant_test_bit(unsigned int nr,
const unsigned long __percpu *addr)
{
unsigned long __percpu *a = (unsigned long *)addr + nr / BITS_PER_LONG;
......@@ -529,10 +529,10 @@ static __always_inline int x86_this_cpu_constant_test_bit(unsigned int nr,
#endif
}
static inline int x86_this_cpu_variable_test_bit(int nr,
static inline bool x86_this_cpu_variable_test_bit(int nr,
const unsigned long __percpu *addr)
{
unsigned char oldbit;
bool oldbit;
asm volatile("bt "__percpu_arg(2)",%1\n\t"
"setc %0"
......
......@@ -23,11 +23,11 @@ cc_label: \
#define __GEN_RMWcc(fullop, var, cc, ...) \
do { \
char c; \
bool c; \
asm volatile (fullop "; set" cc " %1" \
: "+m" (var), "=qm" (c) \
: __VA_ARGS__ : "memory"); \
return c != 0; \
return c; \
} while (0)
#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
......
......@@ -77,7 +77,7 @@ static inline void __down_read(struct rw_semaphore *sem)
/*
* trylock for reading -- returns 1 if successful, 0 if contention
*/
static inline int __down_read_trylock(struct rw_semaphore *sem)
static inline bool __down_read_trylock(struct rw_semaphore *sem)
{
long result, tmp;
asm volatile("# beginning __down_read_trylock\n\t"
......@@ -93,7 +93,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
: "+m" (sem->count), "=&a" (result), "=&r" (tmp)
: "i" (RWSEM_ACTIVE_READ_BIAS)
: "memory", "cc");
return result >= 0 ? 1 : 0;
return result >= 0;
}
/*
......@@ -134,9 +134,10 @@ static inline int __down_write_killable(struct rw_semaphore *sem)
/*
* trylock for writing -- returns 1 if successful, 0 if contention
*/
static inline int __down_write_trylock(struct rw_semaphore *sem)
static inline bool __down_write_trylock(struct rw_semaphore *sem)
{
long result, tmp;
bool result;
long tmp0, tmp1;
asm volatile("# beginning __down_write_trylock\n\t"
" mov %0,%1\n\t"
"1:\n\t"
......@@ -144,14 +145,14 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
/* was the active mask 0 before? */
" jnz 2f\n\t"
" mov %1,%2\n\t"
" add %3,%2\n\t"
" add %4,%2\n\t"
LOCK_PREFIX " cmpxchg %2,%0\n\t"
" jnz 1b\n\t"
"2:\n\t"
" sete %b1\n\t"
" movzbl %b1, %k1\n\t"
" sete %3\n\t"
"# ending __down_write_trylock\n\t"
: "+m" (sem->count), "=&a" (result), "=&r" (tmp)
: "+m" (sem->count), "=&a" (tmp0), "=&r" (tmp1),
"=qm" (result)
: "er" (RWSEM_ACTIVE_WRITE_BIAS)
: "memory", "cc");
return result;
......
......@@ -95,27 +95,27 @@ static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
#ifdef CONFIG_ARCH_RANDOM
# include <asm/archrandom.h>
#else
static inline int arch_get_random_long(unsigned long *v)
static inline bool arch_get_random_long(unsigned long *v)
{
return 0;
}
static inline int arch_get_random_int(unsigned int *v)
static inline bool arch_get_random_int(unsigned int *v)
{
return 0;
}
static inline int arch_has_random(void)
static inline bool arch_has_random(void)
{
return 0;
}
static inline int arch_get_random_seed_long(unsigned long *v)
static inline bool arch_get_random_seed_long(unsigned long *v)
{
return 0;
}
static inline int arch_get_random_seed_int(unsigned int *v)
static inline bool arch_get_random_seed_int(unsigned int *v)
{
return 0;
}
static inline int arch_has_random_seed(void)
static inline bool arch_has_random_seed(void)
{
return 0;
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment