Commit 397c0ead authored by Stephen Rothwell's avatar Stephen Rothwell

Merge remote-tracking branch 'tip/auto-latest'

parents 9deda982 28948fd5
......@@ -5047,6 +5047,14 @@
or other driver-specific files in the
Documentation/watchdog/ directory.
watchdog_thresh=
[KNL]
Set the hard lockup detector stall duration
threshold in seconds. The soft lockup detector
threshold is set to twice the value. A value of 0
disables both lockup detectors. Default is 10
seconds.
workqueue.watchdog_thresh=
If CONFIG_WQ_WATCHDOG is configured, workqueue can
warn stall conditions and dump internal state to
......
This diff is collapsed.
......@@ -6,7 +6,8 @@
# 2) Generate timeconst.h
# 3) Generate asm-offsets.h (may need bounds.h and timeconst.h)
# 4) Check for missing system calls
# 5) Generate constants.py (may need bounds.h)
# 5) check atomics headers are up-to-date
# 6) Generate constants.py (may need bounds.h)
#####
# 1) Generate bounds.h
......@@ -59,7 +60,20 @@ missing-syscalls: scripts/checksyscalls.sh $(offsets-file) FORCE
$(call cmd,syscalls)
#####
# 5) Generate constants for Python GDB integration
# 5) Check atomic headers are up-to-date
#
always += old-atomics
targets += old-atomics
quiet_cmd_atomics = CALL $<
cmd_atomics = $(CONFIG_SHELL) $<
old-atomics: scripts/atomic/check-atomics.sh FORCE
$(call cmd,atomics)
#####
# 6) Generate constants for Python GDB integration
#
extra-$(CONFIG_GDB_SCRIPTS) += build_constants_py
......
......@@ -2616,6 +2616,7 @@ L: linux-kernel@vger.kernel.org
S: Maintained
F: arch/*/include/asm/atomic*.h
F: include/*/atomic*.h
F: scripts/atomic/
ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER
M: Bradley Grove <linuxdrivers@attotech.com>
......
This diff is collapsed.
......@@ -39,7 +39,7 @@
#define ATOMIC_OP(op, asm_op) \
__LL_SC_INLINE void \
__LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \
__LL_SC_PREFIX(arch_atomic_##op(int i, atomic_t *v)) \
{ \
unsigned long tmp; \
int result; \
......@@ -53,11 +53,11 @@ __LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: "Ir" (i)); \
} \
__LL_SC_EXPORT(atomic_##op);
__LL_SC_EXPORT(arch_atomic_##op);
#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
__LL_SC_INLINE int \
__LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \
__LL_SC_PREFIX(arch_atomic_##op##_return##name(int i, atomic_t *v)) \
{ \
unsigned long tmp; \
int result; \
......@@ -75,11 +75,11 @@ __LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \
\
return result; \
} \
__LL_SC_EXPORT(atomic_##op##_return##name);
__LL_SC_EXPORT(arch_atomic_##op##_return##name);
#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \
__LL_SC_INLINE int \
__LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v)) \
__LL_SC_PREFIX(arch_atomic_fetch_##op##name(int i, atomic_t *v)) \
{ \
unsigned long tmp; \
int val, result; \
......@@ -97,7 +97,7 @@ __LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v)) \
\
return result; \
} \
__LL_SC_EXPORT(atomic_fetch_##op##name);
__LL_SC_EXPORT(arch_atomic_fetch_##op##name);
#define ATOMIC_OPS(...) \
ATOMIC_OP(__VA_ARGS__) \
......@@ -133,7 +133,7 @@ ATOMIC_OPS(xor, eor)
#define ATOMIC64_OP(op, asm_op) \
__LL_SC_INLINE void \
__LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \
__LL_SC_PREFIX(arch_atomic64_##op(long i, atomic64_t *v)) \
{ \
long result; \
unsigned long tmp; \
......@@ -147,11 +147,11 @@ __LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: "Ir" (i)); \
} \
__LL_SC_EXPORT(atomic64_##op);
__LL_SC_EXPORT(arch_atomic64_##op);
#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
__LL_SC_INLINE long \
__LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \
__LL_SC_PREFIX(arch_atomic64_##op##_return##name(long i, atomic64_t *v))\
{ \
long result; \
unsigned long tmp; \
......@@ -169,11 +169,11 @@ __LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \
\
return result; \
} \
__LL_SC_EXPORT(atomic64_##op##_return##name);
__LL_SC_EXPORT(arch_atomic64_##op##_return##name);
#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \
__LL_SC_INLINE long \
__LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v)) \
__LL_SC_PREFIX(arch_atomic64_fetch_##op##name(long i, atomic64_t *v)) \
{ \
long result, val; \
unsigned long tmp; \
......@@ -191,7 +191,7 @@ __LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v)) \
\
return result; \
} \
__LL_SC_EXPORT(atomic64_fetch_##op##name);
__LL_SC_EXPORT(arch_atomic64_fetch_##op##name);
#define ATOMIC64_OPS(...) \
ATOMIC64_OP(__VA_ARGS__) \
......@@ -226,7 +226,7 @@ ATOMIC64_OPS(xor, eor)
#undef ATOMIC64_OP
__LL_SC_INLINE long
__LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
__LL_SC_PREFIX(arch_atomic64_dec_if_positive(atomic64_t *v))
{
long result;
unsigned long tmp;
......@@ -246,7 +246,7 @@ __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
return result;
}
__LL_SC_EXPORT(atomic64_dec_if_positive);
__LL_SC_EXPORT(arch_atomic64_dec_if_positive);
#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl) \
__LL_SC_INLINE u##sz \
......
......@@ -25,9 +25,9 @@
#error "please don't include this file directly"
#endif
#define __LL_SC_ATOMIC(op) __LL_SC_CALL(atomic_##op)
#define __LL_SC_ATOMIC(op) __LL_SC_CALL(arch_atomic_##op)
#define ATOMIC_OP(op, asm_op) \
static inline void atomic_##op(int i, atomic_t *v) \
static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
......@@ -47,7 +47,7 @@ ATOMIC_OP(add, stadd)
#undef ATOMIC_OP
#define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \
static inline int atomic_fetch_##op##name(int i, atomic_t *v) \
static inline int arch_atomic_fetch_##op##name(int i, atomic_t *v) \
{ \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
......@@ -79,7 +79,7 @@ ATOMIC_FETCH_OPS(add, ldadd)
#undef ATOMIC_FETCH_OPS
#define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \
static inline int atomic_add_return##name(int i, atomic_t *v) \
static inline int arch_atomic_add_return##name(int i, atomic_t *v) \
{ \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
......@@ -105,7 +105,7 @@ ATOMIC_OP_ADD_RETURN( , al, "memory")
#undef ATOMIC_OP_ADD_RETURN
static inline void atomic_and(int i, atomic_t *v)
static inline void arch_atomic_and(int i, atomic_t *v)
{
register int w0 asm ("w0") = i;
register atomic_t *x1 asm ("x1") = v;
......@@ -123,7 +123,7 @@ static inline void atomic_and(int i, atomic_t *v)
}
#define ATOMIC_FETCH_OP_AND(name, mb, cl...) \
static inline int atomic_fetch_and##name(int i, atomic_t *v) \
static inline int arch_atomic_fetch_and##name(int i, atomic_t *v) \
{ \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
......@@ -149,7 +149,7 @@ ATOMIC_FETCH_OP_AND( , al, "memory")
#undef ATOMIC_FETCH_OP_AND
static inline void atomic_sub(int i, atomic_t *v)
static inline void arch_atomic_sub(int i, atomic_t *v)
{
register int w0 asm ("w0") = i;
register atomic_t *x1 asm ("x1") = v;
......@@ -167,7 +167,7 @@ static inline void atomic_sub(int i, atomic_t *v)
}
#define ATOMIC_OP_SUB_RETURN(name, mb, cl...) \
static inline int atomic_sub_return##name(int i, atomic_t *v) \
static inline int arch_atomic_sub_return##name(int i, atomic_t *v) \
{ \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
......@@ -195,7 +195,7 @@ ATOMIC_OP_SUB_RETURN( , al, "memory")
#undef ATOMIC_OP_SUB_RETURN
#define ATOMIC_FETCH_OP_SUB(name, mb, cl...) \
static inline int atomic_fetch_sub##name(int i, atomic_t *v) \
static inline int arch_atomic_fetch_sub##name(int i, atomic_t *v) \
{ \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
......@@ -222,9 +222,9 @@ ATOMIC_FETCH_OP_SUB( , al, "memory")
#undef ATOMIC_FETCH_OP_SUB
#undef __LL_SC_ATOMIC
#define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op)
#define __LL_SC_ATOMIC64(op) __LL_SC_CALL(arch_atomic64_##op)
#define ATOMIC64_OP(op, asm_op) \
static inline void atomic64_##op(long i, atomic64_t *v) \
static inline void arch_atomic64_##op(long i, atomic64_t *v) \
{ \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
......@@ -244,7 +244,7 @@ ATOMIC64_OP(add, stadd)
#undef ATOMIC64_OP
#define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \
static inline long atomic64_fetch_##op##name(long i, atomic64_t *v) \
static inline long arch_atomic64_fetch_##op##name(long i, atomic64_t *v)\
{ \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
......@@ -276,7 +276,7 @@ ATOMIC64_FETCH_OPS(add, ldadd)
#undef ATOMIC64_FETCH_OPS
#define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \
static inline long atomic64_add_return##name(long i, atomic64_t *v) \
static inline long arch_atomic64_add_return##name(long i, atomic64_t *v)\
{ \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
......@@ -302,7 +302,7 @@ ATOMIC64_OP_ADD_RETURN( , al, "memory")
#undef ATOMIC64_OP_ADD_RETURN
static inline void atomic64_and(long i, atomic64_t *v)
static inline void arch_atomic64_and(long i, atomic64_t *v)
{
register long x0 asm ("x0") = i;
register atomic64_t *x1 asm ("x1") = v;
......@@ -320,7 +320,7 @@ static inline void atomic64_and(long i, atomic64_t *v)
}
#define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \
static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \
static inline long arch_atomic64_fetch_and##name(long i, atomic64_t *v) \
{ \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
......@@ -346,7 +346,7 @@ ATOMIC64_FETCH_OP_AND( , al, "memory")
#undef ATOMIC64_FETCH_OP_AND
static inline void atomic64_sub(long i, atomic64_t *v)
static inline void arch_atomic64_sub(long i, atomic64_t *v)
{
register long x0 asm ("x0") = i;
register atomic64_t *x1 asm ("x1") = v;
......@@ -364,7 +364,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
}
#define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \
static inline long atomic64_sub_return##name(long i, atomic64_t *v) \
static inline long arch_atomic64_sub_return##name(long i, atomic64_t *v)\
{ \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
......@@ -392,7 +392,7 @@ ATOMIC64_OP_SUB_RETURN( , al, "memory")
#undef ATOMIC64_OP_SUB_RETURN
#define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \
static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \
static inline long arch_atomic64_fetch_sub##name(long i, atomic64_t *v) \
{ \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
......@@ -418,7 +418,7 @@ ATOMIC64_FETCH_OP_SUB( , al, "memory")
#undef ATOMIC64_FETCH_OP_SUB
static inline long atomic64_dec_if_positive(atomic64_t *v)
static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
{
register long x0 asm ("x0") = (long)v;
......
......@@ -110,10 +110,10 @@ __XCHG_GEN(_mb)
})
/* xchg */
#define xchg_relaxed(...) __xchg_wrapper( , __VA_ARGS__)
#define xchg_acquire(...) __xchg_wrapper(_acq, __VA_ARGS__)
#define xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__)
#define xchg(...) __xchg_wrapper( _mb, __VA_ARGS__)
#define arch_xchg_relaxed(...) __xchg_wrapper( , __VA_ARGS__)
#define arch_xchg_acquire(...) __xchg_wrapper(_acq, __VA_ARGS__)
#define arch_xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__)
#define arch_xchg(...) __xchg_wrapper( _mb, __VA_ARGS__)
#define __CMPXCHG_GEN(sfx) \
static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
......@@ -154,18 +154,18 @@ __CMPXCHG_GEN(_mb)
})
/* cmpxchg */
#define cmpxchg_relaxed(...) __cmpxchg_wrapper( , __VA_ARGS__)
#define cmpxchg_acquire(...) __cmpxchg_wrapper(_acq, __VA_ARGS__)
#define cmpxchg_release(...) __cmpxchg_wrapper(_rel, __VA_ARGS__)
#define cmpxchg(...) __cmpxchg_wrapper( _mb, __VA_ARGS__)
#define cmpxchg_local cmpxchg_relaxed
#define arch_cmpxchg_relaxed(...) __cmpxchg_wrapper( , __VA_ARGS__)
#define arch_cmpxchg_acquire(...) __cmpxchg_wrapper(_acq, __VA_ARGS__)
#define arch_cmpxchg_release(...) __cmpxchg_wrapper(_rel, __VA_ARGS__)
#define arch_cmpxchg(...) __cmpxchg_wrapper( _mb, __VA_ARGS__)
#define arch_cmpxchg_local arch_cmpxchg_relaxed
/* cmpxchg64 */
#define cmpxchg64_relaxed cmpxchg_relaxed
#define cmpxchg64_acquire cmpxchg_acquire
#define cmpxchg64_release cmpxchg_release
#define cmpxchg64 cmpxchg
#define cmpxchg64_local cmpxchg_local
#define arch_cmpxchg64_relaxed arch_cmpxchg_relaxed
#define arch_cmpxchg64_acquire arch_cmpxchg_acquire
#define arch_cmpxchg64_release arch_cmpxchg_release
#define arch_cmpxchg64 arch_cmpxchg
#define arch_cmpxchg64_local arch_cmpxchg_local
/* cmpxchg_double */
#define system_has_cmpxchg_double() 1
......@@ -177,24 +177,24 @@ __CMPXCHG_GEN(_mb)
VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1); \
})
#define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
({\
int __ret;\
__cmpxchg_double_check(ptr1, ptr2); \
__ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \
(unsigned long)(n1), (unsigned long)(n2), \
ptr1); \
__ret; \
#define arch_cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
({ \
int __ret; \
__cmpxchg_double_check(ptr1, ptr2); \
__ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \
(unsigned long)(n1), (unsigned long)(n2), \
ptr1); \
__ret; \
})
#define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
({\
int __ret;\
__cmpxchg_double_check(ptr1, ptr2); \
__ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \
(unsigned long)(n1), (unsigned long)(n2), \
ptr1); \
__ret; \
#define arch_cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
({ \
int __ret; \
__cmpxchg_double_check(ptr1, ptr2); \
__ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \
(unsigned long)(n1), (unsigned long)(n2), \
ptr1); \
__ret; \
})
#define __CMPWAIT_CASE(w, sfx, sz) \
......
......@@ -15,13 +15,13 @@
* ops which are SMP safe even on a UP kernel.
*/
#define sync_set_bit(nr, p) set_bit(nr, p)
#define sync_clear_bit(nr, p) clear_bit(nr, p)
#define sync_change_bit(nr, p) change_bit(nr, p)
#define sync_test_and_set_bit(nr, p) test_and_set_bit(nr, p)
#define sync_test_and_clear_bit(nr, p) test_and_clear_bit(nr, p)
#define sync_test_and_change_bit(nr, p) test_and_change_bit(nr, p)
#define sync_test_bit(nr, addr) test_bit(nr, addr)
#define sync_cmpxchg cmpxchg
#define sync_set_bit(nr, p) set_bit(nr, p)
#define sync_clear_bit(nr, p) clear_bit(nr, p)
#define sync_change_bit(nr, p) change_bit(nr, p)
#define sync_test_and_set_bit(nr, p) test_and_set_bit(nr, p)
#define sync_test_and_clear_bit(nr, p) test_and_clear_bit(nr, p)
#define sync_test_and_change_bit(nr, p) test_and_change_bit(nr, p)
#define sync_test_bit(nr, addr) test_bit(nr, addr)
#define arch_sync_cmpxchg arch_cmpxchg
#endif
......@@ -100,7 +100,7 @@ $(obj)/zoffset.h: $(obj)/compressed/vmlinux FORCE
AFLAGS_header.o += -I$(objtree)/$(obj)
$(obj)/header.o: $(obj)/zoffset.h
LDFLAGS_setup.elf := -T
LDFLAGS_setup.elf := -m elf_i386 -T
$(obj)/setup.elf: $(src)/setup.ld $(SETUP_OBJS) FORCE
$(call if_changed,ld)
......
/* SPDX-License-Identifier: GPL-2.0 */
#include <asm-generic/vmlinux.lds.h>
OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT)
#undef i386
......
......@@ -3,7 +3,7 @@
*
* Linker script for the i386 setup code
*/
OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
OUTPUT_FORMAT("elf32-i386")
OUTPUT_ARCH(i386)
ENTRY(_start)
......
......@@ -94,13 +94,12 @@ static inline int alternatives_text_reserved(void *start, void *end)
#define alt_total_slen alt_end_marker"b-661b"
#define alt_rlen(num) e_replacement(num)"f-"b_replacement(num)"f"
#define __OLDINSTR(oldinstr, num) \
#define OLDINSTR(oldinstr, num) \
"# ALT: oldnstr\n" \
"661:\n\t" oldinstr "\n662:\n" \
"# ALT: padding\n" \
".skip -(((" alt_rlen(num) ")-(" alt_slen ")) > 0) * " \
"((" alt_rlen(num) ")-(" alt_slen ")),0x90\n"
#define OLDINSTR(oldinstr, num) \
__OLDINSTR(oldinstr, num) \
"((" alt_rlen(num) ")-(" alt_slen ")),0x90\n" \
alt_end_marker ":\n"
/*
......@@ -116,11 +115,23 @@ static inline int alternatives_text_reserved(void *start, void *end)
* additionally longer than the first replacement alternative.
*/
#define OLDINSTR_2(oldinstr, num1, num2) \
"# ALT: oldinstr2\n" \
"661:\n\t" oldinstr "\n662:\n" \
"# ALT: padding2\n" \
".skip -((" alt_max_short(alt_rlen(num1), alt_rlen(num2)) " - (" alt_slen ")) > 0) * " \
"(" alt_max_short(alt_rlen(num1), alt_rlen(num2)) " - (" alt_slen ")), 0x90\n" \
alt_end_marker ":\n"
#define OLDINSTR_3(oldinsn, n1, n2, n3) \
"# ALT: oldinstr3\n" \
"661:\n\t" oldinsn "\n662:\n" \
"# ALT: padding3\n" \
".skip -((" alt_max_short(alt_max_short(alt_rlen(n1), alt_rlen(n2)), alt_rlen(n3)) \
" - (" alt_slen ")) > 0) * " \
"(" alt_max_short(alt_max_short(alt_rlen(n1), alt_rlen(n2)), alt_rlen(n3)) \
" - (" alt_slen ")), 0x90\n" \
alt_end_marker ":\n"
#define ALTINSTR_ENTRY(feature, num) \
" .long 661b - .\n" /* label */ \
" .long " b_replacement(num)"f - .\n" /* new instruction */ \
......@@ -129,8 +140,9 @@ static inline int alternatives_text_reserved(void *start, void *end)
" .byte " alt_rlen(num) "\n" /* replacement len */ \
" .byte " alt_pad_len "\n" /* pad len */
#define ALTINSTR_REPLACEMENT(newinstr, feature, num) /* replacement */ \
b_replacement(num)":\n\t" newinstr "\n" e_replacement(num) ":\n\t"
#define ALTINSTR_REPLACEMENT(newinstr, feature, num) /* replacement */ \
"# ALT: replacement " #num "\n" \
b_replacement(num)":\n\t" newinstr "\n" e_replacement(num) ":\n"
/* alternative assembly primitive: */
#define ALTERNATIVE(oldinstr, newinstr, feature) \
......@@ -153,6 +165,19 @@ static inline int alternatives_text_reserved(void *start, void *end)
ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
".popsection\n"
#define ALTERNATIVE_3(oldinsn, newinsn1, feat1, newinsn2, feat2, newinsn3, feat3) \
OLDINSTR_3(oldinsn, 1, 2, 3) \
".pushsection .altinstructions,\"a\"\n" \
ALTINSTR_ENTRY(feat1, 1) \
ALTINSTR_ENTRY(feat2, 2) \
ALTINSTR_ENTRY(feat3, 3) \
".popsection\n" \
".pushsection .altinstr_replacement, \"ax\"\n" \
ALTINSTR_REPLACEMENT(newinsn1, feat1, 1) \
ALTINSTR_REPLACEMENT(newinsn2, feat2, 2) \
ALTINSTR_REPLACEMENT(newinsn3, feat3, 3) \
".popsection\n"
/*
* Alternative instructions for different CPU types or capabilities.
*
......
......@@ -36,13 +36,7 @@
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
*/
#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
/* Technically wrong, but this avoids compilation errors on some gcc
versions. */
#define BITOP_ADDR(x) "=m" (*(volatile long *) (x))
#else
#define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
#endif
#define ADDR BITOP_ADDR(addr)
......
......@@ -178,6 +178,10 @@ static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
/*
* Init a new mm. Used on mm copies, like at fork()
* and on mm's that are brand-new, like at execve().
*/
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
......@@ -228,8 +232,22 @@ do { \
} while (0)
#endif
static inline void arch_dup_pkeys(struct mm_struct *oldmm,
struct mm_struct *mm)
{
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
return;
/* Duplicate the oldmm pkey state in mm: */
mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map;
mm->context.execute_only_pkey = oldmm->context.execute_only_pkey;
#endif
}
static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
{
arch_dup_pkeys(oldmm, mm);
paravirt_arch_dup_mmap(oldmm, mm);
return ldt_dup_context(oldmm, mm);
}
......
......@@ -217,6 +217,8 @@ static __always_inline unsigned long long rdtsc(void)
*/
static __always_inline unsigned long long rdtsc_ordered(void)
{
DECLARE_ARGS(val, low, high);
/*
* The RDTSC instruction is not ordered relative to memory
* access. The Intel SDM and the AMD APM are both vague on this
......@@ -227,9 +229,19 @@ static __always_inline unsigned long long rdtsc_ordered(void)
* ordering guarantees as reading from a global memory location
* that some other imaginary CPU is updating continuously with a
* time stamp.
*
* Thus, use the preferred barrier on the respective CPU, aiming for
* RDTSCP as the default.
*/
barrier_nospec();
return rdtsc();
asm volatile(ALTERNATIVE_3("rdtsc",
"mfence; rdtsc", X86_FEATURE_MFENCE_RDTSC,
"lfence; rdtsc", X86_FEATURE_LFENCE_RDTSC,
"rdtscp", X86_FEATURE_RDTSCP)
: EAX_EDX_RET(val, low, high)
/* RDTSCP clobbers ECX with MSR_TSC_AUX. */
:: "ecx");
return EAX_EDX_VAL(val, low, high);
}
static inline unsigned long long native_read_pmc(int counter)
......
......@@ -179,14 +179,7 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len)
* No 3D Now!
*/
#if (__GNUC__ >= 4)
#define memcpy(t, f, n) __builtin_memcpy(t, f, n)
#else
#define memcpy(t, f, n) \
(__builtin_constant_p((n)) \
? __constant_memcpy((t), (f), (n)) \
: __memcpy((t), (f), (n)))
#endif
#endif
#endif /* !CONFIG_FORTIFY_SOURCE */
......@@ -216,29 +209,6 @@ static inline void *__memset_generic(void *s, char c, size_t count)
/* we might want to write optimized versions of these later */
#define __constant_count_memset(s, c, count) __memset_generic((s), (c), (count))
/*
* memset(x, 0, y) is a reasonably common thing to do, so we want to fill
* things 32 bits at a time even when we don't know the size of the
* area at compile-time..
*/
static __always_inline
void *__constant_c_memset(void *s, unsigned long c, size_t count)
{
int d0, d1;
asm volatile("rep ; stosl\n\t"
"testb $2,%b3\n\t"
"je 1f\n\t"
"stosw\n"
"1:\ttestb $1,%b3\n\t"
"je 2f\n\t"
"stosb\n"
"2:"
: "=&c" (d0), "=&D" (d1)
: "a" (c), "q" (count), "0" (count/4), "1" ((long)s)
: "memory");
return s;
}
/* Added by Gertjan van Wingerde to make minix and sysv module work */
#define __HAVE_ARCH_STRNLEN
extern size_t strnlen(const char *s, size_t count);
......@@ -247,72 +217,6 @@ extern size_t strnlen(const char *s, size_t count);
#define __HAVE_ARCH_STRSTR
extern char *strstr(const char *cs, const char *ct);
/*
* This looks horribly ugly, but the compiler can optimize it totally,
* as we by now know that both pattern and count is constant..
*/
static __always_inline
void *__constant_c_and_count_memset(void *s, unsigned long pattern,
size_t count)
{
switch (count) {
case 0:
return s;
case 1:
*(unsigned char *)s = pattern & 0xff;
return s;
case 2:
*(unsigned short *)s = pattern & 0xffff;
return s;
case 3:
*(unsigned short *)s = pattern & 0xffff;
*((unsigned char *)s + 2) = pattern & 0xff;
return s;
case 4:
*(unsigned long *)s = pattern;
return s;
}
#define COMMON(x) \
asm volatile("rep ; stosl" \
x \
: "=&c" (d0), "=&D" (d1) \
: "a" (eax), "0" (count/4), "1" ((long)s) \
: "memory")
{
int d0, d1;
#if __GNUC__ == 4 && __GNUC_MINOR__ == 0
/* Workaround for broken gcc 4.0 */
register unsigned long eax asm("%eax") = pattern;
#else
unsigned long eax = pattern;
#endif
switch (count % 4) {
case 0:
COMMON("");
return s;
case 1:
COMMON("\n\tstosb");
return s;
case 2:
COMMON("\n\tstosw");
return s;
default:
COMMON("\n\tstosw\n\tstosb");
return s;
}
}
#undef COMMON
}
#define __constant_c_x_memset(s, c, count) \
(__builtin_constant_p(count) \
? __constant_c_and_count_memset((s), (c), (count)) \
: __constant_c_memset((s), (c), (count)))
#define __memset(s, c, count) \
(__builtin_constant_p(count) \
? __constant_count_memset((s), (c), (count)) \
......@@ -321,15 +225,7 @@ void *__constant_c_and_count_memset(void *s, unsigned long pattern,
#define __HAVE_ARCH_MEMSET
extern void *memset(void *, int, size_t);