Commit a0abcf2e authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'x86/vdso' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into next

Pull x86 cdso updates from Peter Anvin:
 "Vdso cleanups and improvements largely from Andy Lutomirski.  This
  makes the vdso a lot less ''special''"

* 'x86/vdso' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/vdso, build: Make LE access macros clearer, host-safe
  x86/vdso, build: Fix cross-compilation from big-endian architectures
  x86/vdso, build: When vdso2c fails, unlink the output
  x86, vdso: Fix an OOPS accessing the HPET mapping w/o an HPET
  x86, mm: Replace arch_vma_name with vm_ops->name for vsyscalls
  x86, mm: Improve _install_special_mapping and fix x86 vdso naming
  mm, fs: Add vm_ops->name as an alternative to arch_vma_name
  x86, vdso: Fix an OOPS accessing the HPET mapping w/o an HPET
  x86, vdso: Remove vestiges of VDSO_PRELINK and some outdated comments
  x86, vdso: Move the vvar and hpet mappings next to the 64-bit vDSO
  x86, vdso: Move the 32-bit vdso special pages after the text
  x86, vdso: Reimplement vdso.so preparation in build-time C
  x86, vdso: Move syscall and sysenter setup into kernel/cpu/common.c
  x86, vdso: Clean up 32-bit vs 64-bit vdso params
  x86, mm: Ensure correct alignment of the fixmap
parents 2071b3e3 c191920f
...@@ -383,8 +383,8 @@ int ia32_setup_frame(int sig, struct ksignal *ksig, ...@@ -383,8 +383,8 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
} else { } else {
/* Return stub is in 32bit vsyscall page */ /* Return stub is in 32bit vsyscall page */
if (current->mm->context.vdso) if (current->mm->context.vdso)
restorer = VDSO32_SYMBOL(current->mm->context.vdso, restorer = current->mm->context.vdso +
sigreturn); selected_vdso32->sym___kernel_sigreturn;
else else
restorer = &frame->retcode; restorer = &frame->retcode;
} }
...@@ -462,8 +462,8 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig, ...@@ -462,8 +462,8 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
if (ksig->ka.sa.sa_flags & SA_RESTORER) if (ksig->ka.sa.sa_flags & SA_RESTORER)
restorer = ksig->ka.sa.sa_restorer; restorer = ksig->ka.sa.sa_restorer;
else else
restorer = VDSO32_SYMBOL(current->mm->context.vdso, restorer = current->mm->context.vdso +
rt_sigreturn); selected_vdso32->sym___kernel_rt_sigreturn;
put_user_ex(ptr_to_compat(restorer), &frame->pretcode); put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
/* /*
......
...@@ -75,7 +75,12 @@ typedef struct user_fxsr_struct elf_fpxregset_t; ...@@ -75,7 +75,12 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
#include <asm/vdso.h> #include <asm/vdso.h>
extern unsigned int vdso_enabled; #ifdef CONFIG_X86_64
extern unsigned int vdso64_enabled;
#endif
#if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
extern unsigned int vdso32_enabled;
#endif
/* /*
* This is used to ensure we don't load something for the wrong architecture. * This is used to ensure we don't load something for the wrong architecture.
...@@ -269,9 +274,9 @@ extern int force_personality32; ...@@ -269,9 +274,9 @@ extern int force_personality32;
struct task_struct; struct task_struct;
#define ARCH_DLINFO_IA32(vdso_enabled) \ #define ARCH_DLINFO_IA32 \
do { \ do { \
if (vdso_enabled) { \ if (vdso32_enabled) { \
NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \ NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \
NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \ NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \
} \ } \
...@@ -281,7 +286,7 @@ do { \ ...@@ -281,7 +286,7 @@ do { \
#define STACK_RND_MASK (0x7ff) #define STACK_RND_MASK (0x7ff)
#define ARCH_DLINFO ARCH_DLINFO_IA32(vdso_enabled) #define ARCH_DLINFO ARCH_DLINFO_IA32
/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
...@@ -292,16 +297,17 @@ do { \ ...@@ -292,16 +297,17 @@ do { \
#define ARCH_DLINFO \ #define ARCH_DLINFO \
do { \ do { \
if (vdso_enabled) \ if (vdso64_enabled) \
NEW_AUX_ENT(AT_SYSINFO_EHDR, \ NEW_AUX_ENT(AT_SYSINFO_EHDR, \
(unsigned long)current->mm->context.vdso); \ (unsigned long __force)current->mm->context.vdso); \
} while (0) } while (0)
/* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */
#define ARCH_DLINFO_X32 \ #define ARCH_DLINFO_X32 \
do { \ do { \
if (vdso_enabled) \ if (vdso64_enabled) \
NEW_AUX_ENT(AT_SYSINFO_EHDR, \ NEW_AUX_ENT(AT_SYSINFO_EHDR, \
(unsigned long)current->mm->context.vdso); \ (unsigned long __force)current->mm->context.vdso); \
} while (0) } while (0)
#define AT_SYSINFO 32 #define AT_SYSINFO 32
...@@ -310,7 +316,7 @@ do { \ ...@@ -310,7 +316,7 @@ do { \
if (test_thread_flag(TIF_X32)) \ if (test_thread_flag(TIF_X32)) \
ARCH_DLINFO_X32; \ ARCH_DLINFO_X32; \
else \ else \
ARCH_DLINFO_IA32(sysctl_vsyscall32) ARCH_DLINFO_IA32
#define COMPAT_ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000) #define COMPAT_ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
...@@ -319,18 +325,17 @@ else \ ...@@ -319,18 +325,17 @@ else \
#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso) #define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
#define VDSO_ENTRY \ #define VDSO_ENTRY \
((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall)) ((unsigned long)current->mm->context.vdso + \
selected_vdso32->sym___kernel_vsyscall)
struct linux_binprm; struct linux_binprm;
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
extern int arch_setup_additional_pages(struct linux_binprm *bprm, extern int arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp); int uses_interp);
extern int x32_setup_additional_pages(struct linux_binprm *bprm, extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp); int uses_interp);
#define compat_arch_setup_additional_pages compat_arch_setup_additional_pages
extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
#define compat_arch_setup_additional_pages syscall32_setup_pages
extern unsigned long arch_randomize_brk(struct mm_struct *mm); extern unsigned long arch_randomize_brk(struct mm_struct *mm);
#define arch_randomize_brk arch_randomize_brk #define arch_randomize_brk arch_randomize_brk
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#include <linux/threads.h> #include <linux/threads.h>
#include <asm/kmap_types.h> #include <asm/kmap_types.h>
#else #else
#include <asm/vsyscall.h> #include <uapi/asm/vsyscall.h>
#endif #endif
/* /*
...@@ -41,7 +41,8 @@ ...@@ -41,7 +41,8 @@
extern unsigned long __FIXADDR_TOP; extern unsigned long __FIXADDR_TOP;
#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP) #define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP)
#else #else
#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE) #define FIXADDR_TOP (round_up(VSYSCALL_ADDR + PAGE_SIZE, 1<<PMD_SHIFT) - \
PAGE_SIZE)
#endif #endif
...@@ -68,11 +69,7 @@ enum fixed_addresses { ...@@ -68,11 +69,7 @@ enum fixed_addresses {
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
FIX_HOLE, FIX_HOLE,
#else #else
VSYSCALL_LAST_PAGE, VSYSCALL_PAGE = (FIXADDR_TOP - VSYSCALL_ADDR) >> PAGE_SHIFT,
VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE
+ ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
VVAR_PAGE,
VSYSCALL_HPET,
#ifdef CONFIG_PARAVIRT_CLOCK #ifdef CONFIG_PARAVIRT_CLOCK
PVCLOCK_FIXMAP_BEGIN, PVCLOCK_FIXMAP_BEGIN,
PVCLOCK_FIXMAP_END = PVCLOCK_FIXMAP_BEGIN+PVCLOCK_VSYSCALL_NR_PAGES-1, PVCLOCK_FIXMAP_END = PVCLOCK_FIXMAP_BEGIN+PVCLOCK_VSYSCALL_NR_PAGES-1,
......
...@@ -18,7 +18,7 @@ typedef struct { ...@@ -18,7 +18,7 @@ typedef struct {
#endif #endif
struct mutex lock; struct mutex lock;
void *vdso; void __user *vdso;
} mm_context_t; } mm_context_t;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
...@@ -12,8 +12,6 @@ void ia32_syscall(void); ...@@ -12,8 +12,6 @@ void ia32_syscall(void);
void ia32_cstar_target(void); void ia32_cstar_target(void);
void ia32_sysenter_target(void); void ia32_sysenter_target(void);
void syscall32_cpu_init(void);
void x86_configure_nx(void); void x86_configure_nx(void);
void x86_report_nx(void); void x86_report_nx(void);
......
...@@ -3,63 +3,51 @@ ...@@ -3,63 +3,51 @@
#include <asm/page_types.h> #include <asm/page_types.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h>
#ifdef __ASSEMBLER__ #ifndef __ASSEMBLER__
#define DEFINE_VDSO_IMAGE(symname, filename) \ #include <linux/mm_types.h>
__PAGE_ALIGNED_DATA ; \
.globl symname##_start, symname##_end ; \
.align PAGE_SIZE ; \
symname##_start: ; \
.incbin filename ; \
symname##_end: ; \
.align PAGE_SIZE /* extra data here leaks to userspace. */ ; \
\
.previous ; \
\
.globl symname##_pages ; \
.bss ; \
.align 8 ; \
.type symname##_pages, @object ; \
symname##_pages: ; \
.zero (symname##_end - symname##_start + PAGE_SIZE - 1) / PAGE_SIZE * (BITS_PER_LONG / 8) ; \
.size symname##_pages, .-symname##_pages
#else struct vdso_image {
void *data;
unsigned long size; /* Always a multiple of PAGE_SIZE */
#define DECLARE_VDSO_IMAGE(symname) \ /* text_mapping.pages is big enough for data/size page pointers */
extern char symname##_start[], symname##_end[]; \ struct vm_special_mapping text_mapping;
extern struct page *symname##_pages[]
#if defined CONFIG_X86_32 || defined CONFIG_COMPAT unsigned long alt, alt_len;
#include <asm/vdso32.h> unsigned long sym_end_mapping; /* Total size of the mapping */
DECLARE_VDSO_IMAGE(vdso32_int80); unsigned long sym_vvar_page;
#ifdef CONFIG_COMPAT unsigned long sym_hpet_page;
DECLARE_VDSO_IMAGE(vdso32_syscall); unsigned long sym_VDSO32_NOTE_MASK;
unsigned long sym___kernel_sigreturn;
unsigned long sym___kernel_rt_sigreturn;
unsigned long sym___kernel_vsyscall;
unsigned long sym_VDSO32_SYSENTER_RETURN;
};
#ifdef CONFIG_X86_64
extern const struct vdso_image vdso_image_64;
#endif
#ifdef CONFIG_X86_X32
extern const struct vdso_image vdso_image_x32;
#endif #endif
DECLARE_VDSO_IMAGE(vdso32_sysenter);
/* #if defined CONFIG_X86_32 || defined CONFIG_COMPAT
* Given a pointer to the vDSO image, find the pointer to VDSO32_name extern const struct vdso_image vdso_image_32_int80;
* as that symbol is defined in the vDSO sources or linker script. #ifdef CONFIG_COMPAT
*/ extern const struct vdso_image vdso_image_32_syscall;
#define VDSO32_SYMBOL(base, name) \
({ \
extern const char VDSO32_##name[]; \
(void __user *)(VDSO32_##name + (unsigned long)(base)); \
})
#endif #endif
extern const struct vdso_image vdso_image_32_sysenter;
/* extern const struct vdso_image *selected_vdso32;
* These symbols are defined with the addresses in the vsyscall page. #endif
* See vsyscall-sigreturn.S.
*/
extern void __user __kernel_sigreturn;
extern void __user __kernel_rt_sigreturn;
void __init patch_vdso32(void *vdso, size_t len); extern void __init init_vdso_image(const struct vdso_image *image);
#endif /* __ASSEMBLER__ */ #endif /* __ASSEMBLER__ */
......
#ifndef _ASM_X86_VDSO32_H
#define _ASM_X86_VDSO32_H
#define VDSO_BASE_PAGE 0
#define VDSO_VVAR_PAGE 1
#define VDSO_HPET_PAGE 2
#define VDSO_PAGES 3
#define VDSO_PREV_PAGES 2
#define VDSO_OFFSET(x) ((x) * PAGE_SIZE)
#endif
...@@ -29,31 +29,13 @@ ...@@ -29,31 +29,13 @@
#else #else
#ifdef BUILD_VDSO32 extern char __vvar_page;
#define DECLARE_VVAR(offset, type, name) \ #define DECLARE_VVAR(offset, type, name) \
extern type vvar_ ## name __attribute__((visibility("hidden"))); extern type vvar_ ## name __attribute__((visibility("hidden")));
#define VVAR(name) (vvar_ ## name) #define VVAR(name) (vvar_ ## name)
#else
extern char __vvar_page;
/* Base address of vvars. This is not ABI. */
#ifdef CONFIG_X86_64
#define VVAR_ADDRESS (-10*1024*1024 - 4096)
#else
#define VVAR_ADDRESS (&__vvar_page)
#endif
#define DECLARE_VVAR(offset, type, name) \
static type const * const vvaraddr_ ## name = \
(void *)(VVAR_ADDRESS + (offset));
#define VVAR(name) (*vvaraddr_ ## name)
#endif
#define DEFINE_VVAR(type, name) \ #define DEFINE_VVAR(type, name) \
type name \ type name \
__attribute__((section(".vvar_" #name), aligned(16))) __visible __attribute__((section(".vvar_" #name), aligned(16))) __visible
......
...@@ -7,11 +7,6 @@ enum vsyscall_num { ...@@ -7,11 +7,6 @@ enum vsyscall_num {
__NR_vgetcpu, __NR_vgetcpu,
}; };
#define VSYSCALL_START (-10UL << 20) #define VSYSCALL_ADDR (-10UL << 20)
#define VSYSCALL_SIZE 1024
#define VSYSCALL_END (-2UL << 20)
#define VSYSCALL_MAPPED_PAGES 1
#define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr))
#endif /* _UAPI_ASM_X86_VSYSCALL_H */ #endif /* _UAPI_ASM_X86_VSYSCALL_H */
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/debugreg.h> #include <asm/debugreg.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/vsyscall.h>
#include <linux/topology.h> #include <linux/topology.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
...@@ -953,6 +954,38 @@ static void vgetcpu_set_mode(void) ...@@ -953,6 +954,38 @@ static void vgetcpu_set_mode(void)
else else
vgetcpu_mode = VGETCPU_LSL; vgetcpu_mode = VGETCPU_LSL;
} }
/* May not be __init: called during resume */
static void syscall32_cpu_init(void)
{
/* Load these always in case some future AMD CPU supports
SYSENTER from compat mode too. */
wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
wrmsrl(MSR_CSTAR, ia32_cstar_target);
}
#endif
#ifdef CONFIG_X86_32
void enable_sep_cpu(void)
{
int cpu = get_cpu();
struct tss_struct *tss = &per_cpu(init_tss, cpu);
if (!boot_cpu_has(X86_FEATURE_SEP)) {
put_cpu();
return;
}
tss->x86_tss.ss1 = __KERNEL_CS;
tss->x86_tss.sp1 = sizeof(struct tss_struct) + (unsigned long) tss;
wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
wrmsr(MSR_IA32_SYSENTER_ESP, tss->x86_tss.sp1, 0);
wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) ia32_sysenter_target, 0);
put_cpu();
}
#endif #endif
void __init identify_boot_cpu(void) void __init identify_boot_cpu(void)
......
...@@ -74,9 +74,6 @@ static inline void hpet_writel(unsigned int d, unsigned int a) ...@@ -74,9 +74,6 @@ static inline void hpet_writel(unsigned int d, unsigned int a)
static inline void hpet_set_mapping(void) static inline void hpet_set_mapping(void)
{ {
hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE); hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
#ifdef CONFIG_X86_64
__set_fixmap(VSYSCALL_HPET, hpet_address, PAGE_KERNEL_VVAR_NOCACHE);
#endif
} }
static inline void hpet_clear_mapping(void) static inline void hpet_clear_mapping(void)
......
...@@ -298,7 +298,8 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set, ...@@ -298,7 +298,8 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
} }
if (current->mm->context.vdso) if (current->mm->context.vdso)
restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn); restorer = current->mm->context.vdso +
selected_vdso32->sym___kernel_sigreturn;
else else
restorer = &frame->retcode; restorer = &frame->retcode;
if (ksig->ka.sa.sa_flags & SA_RESTORER) if (ksig->ka.sa.sa_flags & SA_RESTORER)
...@@ -361,7 +362,8 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig, ...@@ -361,7 +362,8 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
save_altstack_ex(&frame->uc.uc_stack, regs->sp); save_altstack_ex(&frame->uc.uc_stack, regs->sp);
/* Set up to return from userspace. */ /* Set up to return from userspace. */
restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); restorer = current->mm->context.vdso +
selected_vdso32->sym___kernel_sigreturn;
if (ksig->ka.sa.sa_flags & SA_RESTORER) if (ksig->ka.sa.sa_flags & SA_RESTORER)
restorer = ksig->ka.sa.sa_restorer; restorer = ksig->ka.sa.sa_restorer;
put_user_ex(restorer, &frame->pretcode); put_user_ex(restorer, &frame->pretcode);
......
...@@ -91,7 +91,7 @@ static int addr_to_vsyscall_nr(unsigned long addr) ...@@ -91,7 +91,7 @@ static int addr_to_vsyscall_nr(unsigned long addr)
{ {
int nr; int nr;
if ((addr & ~0xC00UL) != VSYSCALL_START) if ((addr & ~0xC00UL) != VSYSCALL_ADDR)
return -EINVAL; return -EINVAL;
nr = (addr & 0xC00UL) >> 10; nr = (addr & 0xC00UL) >> 10;
...@@ -330,24 +330,17 @@ void __init map_vsyscall(void) ...@@ -330,24 +330,17 @@ void __init map_vsyscall(void)
{ {
extern char __vsyscall_page; extern char __vsyscall_page;
unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page); unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
__set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
vsyscall_mode == NATIVE vsyscall_mode == NATIVE
? PAGE_KERNEL_VSYSCALL ? PAGE_KERNEL_VSYSCALL
: PAGE_KERNEL_VVAR); : PAGE_KERNEL_VVAR);
BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) != BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
(unsigned long)VSYSCALL_START); (unsigned long)VSYSCALL_ADDR);
__set_fixmap(VVAR_PAGE, physaddr_vvar_page, PAGE_KERNEL_VVAR);
BUILD_BUG_ON((unsigned long)__fix_to_virt(VVAR_PAGE) !=
(unsigned long)VVAR_ADDRESS);
} }
static int __init vsyscall_init(void) static int __init vsyscall_init(void)
{ {
BUG_ON(VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE));
cpu_notifier_register_begin(); cpu_notifier_register_begin();
on_each_cpu(cpu_vsyscall_init, NULL, 1); on_each_cpu(cpu_vsyscall_init, NULL, 1);
......
...@@ -18,7 +18,8 @@ ...@@ -18,7 +18,8 @@
#include <asm/traps.h> /* dotraplinkage, ... */ #include <asm/traps.h> /* dotraplinkage, ... */
#include <asm/pgalloc.h> /* pgd_*(), ... */ #include <asm/pgalloc.h> /* pgd_*(), ... */
#include <asm/kmemcheck.h> /* kmemcheck_*(), ... */ #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
#include <asm/fixmap.h> /* VSYSCALL_START */ #include <asm/fixmap.h> /* VSYSCALL_ADDR */
#include <asm/vsyscall.h> /* emulate_vsyscall */
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <asm/trace/exceptions.h> #include <asm/trace/exceptions.h>
...@@ -771,7 +772,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, ...@@ -771,7 +772,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
* emulation. * emulation.
*/ */
if (unlikely((error_code & PF_INSTR) && if (unlikely((error_code & PF_INSTR) &&
((address & ~0xfff) == VSYSCALL_START))) { ((address & ~0xfff) == VSYSCALL_ADDR))) {
if (emulate_vsyscall(regs, address)) if (emulate_vsyscall(regs, address))
return; return;
} }
......
...@@ -1055,8 +1055,8 @@ void __init mem_init(void) ...@@ -1055,8 +1055,8 @@ void __init mem_init(void)
after_bootmem = 1; after_bootmem = 1;
/* Register memory areas for /proc/kcore */ /* Register memory areas for /proc/kcore */
kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START, kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR,
VSYSCALL_END - VSYSCALL_START, KCORE_OTHER); PAGE_SIZE, KCORE_OTHER);
mem_init_print_info(NULL); mem_init_print_info(NULL);
} }
...@@ -1185,11 +1185,19 @@ int kern_addr_valid(unsigned long addr) ...@@ -1185,11 +1185,19 @@ int kern_addr_valid(unsigned long addr)
* covers the 64bit vsyscall page now. 32bit has a real VMA now and does * covers the 64bit vsyscall page now. 32bit has a real VMA now and does
* not need special handling anymore: * not need special handling anymore: