aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/kernel/signal_32.c8
-rw-r--r--arch/powerpc/kernel/signal_64.c4
-rw-r--r--arch/powerpc/kernel/vdso.c57
-rw-r--r--include/asm-powerpc/elf.h2
-rw-r--r--include/asm-powerpc/mmu.h1
-rw-r--r--include/asm-powerpc/page.h3
-rw-r--r--include/asm-powerpc/processor.h1
7 files changed, 47 insertions, 29 deletions
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 01e3c08cb55..22f07898484 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -757,10 +757,10 @@ static int handle_rt_signal(unsigned long sig, struct k_sigaction *ka,
/* Save user registers on the stack */
frame = &rt_sf->uc.uc_mcontext;
- if (vdso32_rt_sigtramp && current->thread.vdso_base) {
+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
if (save_user_regs(regs, frame, 0))
goto badframe;
- regs->link = current->thread.vdso_base + vdso32_rt_sigtramp;
+ regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
} else {
if (save_user_regs(regs, frame, __NR_rt_sigreturn))
goto badframe;
@@ -1029,10 +1029,10 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka,
|| __put_user(sig, &sc->signal))
goto badframe;
- if (vdso32_sigtramp && current->thread.vdso_base) {
+ if (vdso32_sigtramp && current->mm->context.vdso_base) {
if (save_user_regs(regs, &frame->mctx, 0))
goto badframe;
- regs->link = current->thread.vdso_base + vdso32_sigtramp;
+ regs->link = current->mm->context.vdso_base + vdso32_sigtramp;
} else {
if (save_user_regs(regs, &frame->mctx, __NR_sigreturn))
goto badframe;
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 27f65b95184..23ba69c2691 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -394,8 +394,8 @@ static int setup_rt_frame(int signr, struct k_sigaction *ka, siginfo_t *info,
current->thread.fpscr.val = 0;
/* Set up to return from userspace. */
- if (vdso64_rt_sigtramp && current->thread.vdso_base) {
- regs->link = current->thread.vdso_base + vdso64_rt_sigtramp;
+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
+ regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
} else {
err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
if (err)
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 573afb68d69..bc3e15be308 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -223,6 +223,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
struct vm_area_struct *vma;
unsigned long vdso_pages;
unsigned long vdso_base;
+ int rc;
#ifdef CONFIG_PPC64
if (test_thread_flag(TIF_32BIT)) {
@@ -237,20 +238,13 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
vdso_base = VDSO32_MBASE;
#endif
- current->thread.vdso_base = 0;
+ current->mm->context.vdso_base = 0;
/* vDSO has a problem and was disabled, just don't "enable" it for the
* process
*/
if (vdso_pages == 0)
return 0;
-
- vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
- if (vma == NULL)
- return -ENOMEM;
-
- memset(vma, 0, sizeof(*vma));
-
/* Add a page to the vdso size for the data page */
vdso_pages ++;
@@ -259,17 +253,23 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
* at vdso_base which is the "natural" base for it, but we might fail
* and end up putting it elsewhere.
*/
+ down_write(&mm->mmap_sem);
vdso_base = get_unmapped_area(NULL, vdso_base,
vdso_pages << PAGE_SHIFT, 0, 0);
- if (vdso_base & ~PAGE_MASK) {
- kmem_cache_free(vm_area_cachep, vma);
- return (int)vdso_base;
+ if (IS_ERR_VALUE(vdso_base)) {
+ rc = vdso_base;
+ goto fail_mmapsem;
}
- current->thread.vdso_base = vdso_base;
+ /* Allocate a VMA structure and fill it up */
+ vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL);
+ if (vma == NULL) {
+ rc = -ENOMEM;
+ goto fail_mmapsem;
+ }
vma->vm_mm = mm;
- vma->vm_start = current->thread.vdso_base;
+ vma->vm_start = vdso_base;
vma->vm_end = vma->vm_start + (vdso_pages << PAGE_SHIFT);
/*
@@ -282,23 +282,38 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
* It's fine to use that for setting breakpoints in the vDSO code
* pages though
*/
- vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
+ vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC;
vma->vm_flags |= mm->def_flags;
vma->vm_page_prot = protection_map[vma->vm_flags & 0x7];
vma->vm_ops = &vdso_vmops;
- down_write(&mm->mmap_sem);
- if (insert_vm_struct(mm, vma)) {
- up_write(&mm->mmap_sem);
- kmem_cache_free(vm_area_cachep, vma);
- return -ENOMEM;
- }
+ /* Insert new VMA */
+ rc = insert_vm_struct(mm, vma);
+ if (rc)
+ goto fail_vma;
+
+ /* Put vDSO base into mm struct and account for memory usage */
+ current->mm->context.vdso_base = vdso_base;
mm->total_vm += (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
up_write(&mm->mmap_sem);
-
return 0;
+
+ fail_vma:
+ kmem_cache_free(vm_area_cachep, vma);
+ fail_mmapsem:
+ up_write(&mm->mmap_sem);
+ return rc;
+}
+
+const char *arch_vma_name(struct vm_area_struct *vma)
+{
+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base)
+ return "[vdso]";
+ return NULL;
}
+
+
static void * __init find_section32(Elf32_Ehdr *ehdr, const char *secname,
unsigned long *size)
{
diff --git a/include/asm-powerpc/elf.h b/include/asm-powerpc/elf.h
index 94d228f9c6a..319655ce66c 100644
--- a/include/asm-powerpc/elf.h
+++ b/include/asm-powerpc/elf.h
@@ -294,7 +294,7 @@ do { \
NEW_AUX_ENT(AT_DCACHEBSIZE, dcache_bsize); \
NEW_AUX_ENT(AT_ICACHEBSIZE, icache_bsize); \
NEW_AUX_ENT(AT_UCACHEBSIZE, ucache_bsize); \
- VDSO_AUX_ENT(AT_SYSINFO_EHDR, current->thread.vdso_base) \
+ VDSO_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso_base) \
} while (0)
/* PowerPC64 relocations defined by the ABIs */
diff --git a/include/asm-powerpc/mmu.h b/include/asm-powerpc/mmu.h
index 31f721994bd..96e47d1ce97 100644
--- a/include/asm-powerpc/mmu.h
+++ b/include/asm-powerpc/mmu.h
@@ -360,6 +360,7 @@ typedef struct {
#ifdef CONFIG_HUGETLB_PAGE
u16 low_htlb_areas, high_htlb_areas;
#endif
+ unsigned long vdso_base;
} mm_context_t;
diff --git a/include/asm-powerpc/page.h b/include/asm-powerpc/page.h
index ae610b62048..a315d0c0d96 100644
--- a/include/asm-powerpc/page.h
+++ b/include/asm-powerpc/page.h
@@ -192,6 +192,9 @@ extern void copy_user_page(void *to, void *from, unsigned long vaddr,
struct page *p);
extern int page_is_ram(unsigned long pfn);
+struct vm_area_struct;
+extern const char *arch_vma_name(struct vm_area_struct *vma);
+
#include <asm-generic/memory_model.h>
#endif /* __ASSEMBLY__ */
diff --git a/include/asm-powerpc/processor.h b/include/asm-powerpc/processor.h
index 93f83efeb31..d5c7ef1cca2 100644
--- a/include/asm-powerpc/processor.h
+++ b/include/asm-powerpc/processor.h
@@ -153,7 +153,6 @@ struct thread_struct {
unsigned long start_tb; /* Start purr when proc switched in */
unsigned long accum_tb; /* Total accumilated purr for process */
#endif
- unsigned long vdso_base; /* base of the vDSO library */
unsigned long dabr; /* Data address breakpoint register */
#ifdef CONFIG_ALTIVEC
/* Complete AltiVec register set */