aboutsummaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2014-07-09 19:22:13 +0100
committerMark Brown <broonie@kernel.org>2015-01-21 22:55:42 +0000
commit82a95d0521cb6258559d18dca736da8272ba05a7 (patch)
tree5a17f67696e00400909e35505387a7b9031445cd /arch/arm64
parent95c91bdd8eafdc74337049c45d74c903b7dac49c (diff)
arm64: vdso: move data page before code pages
Andy pointed out that binutils generates additional sections in the vdso image (e.g. section string table) which, if our .text section gets big enough, could cross a page boundary and end up screwing up the location where the kernel expects to put the data page. This patch solves the issue in the same manner as x86_32, by moving the data page before the code pages. Cc: Andy Lutomirski <luto@amacapital.net> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> (cherry picked from commit 601255ae3c98fdeeee3a8bb4696425e4f868b4f1) Signed-off-by: Mark Brown <broonie@kernel.org>
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/kernel/vdso.c34
-rw-r--r--arch/arm64/kernel/vdso/vdso.lds.S4
2 files changed, 18 insertions, 20 deletions
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index ad8dddbffef2..b8289f08795f 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -118,8 +118,8 @@ static int __init vdso_init(void)
}
vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT;
- pr_info("vdso: %ld pages (%ld code, %ld data) at base %p\n",
- vdso_pages + 1, vdso_pages, 1L, &vdso_start);
+ pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n",
+ vdso_pages + 1, vdso_pages, &vdso_start, 1L, vdso_data);
/* Allocate the vDSO pagelist, plus a page for the data. */
vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *),
@@ -127,22 +127,22 @@ static int __init vdso_init(void)
if (vdso_pagelist == NULL)
return -ENOMEM;
+ /* Grab the vDSO data page. */
+ vdso_pagelist[0] = virt_to_page(vdso_data);
+
/* Grab the vDSO code pages. */
for (i = 0; i < vdso_pages; i++)
- vdso_pagelist[i] = virt_to_page(&vdso_start + i * PAGE_SIZE);
-
- /* Grab the vDSO data page. */
- vdso_pagelist[i] = virt_to_page(vdso_data);
+ vdso_pagelist[i + 1] = virt_to_page(&vdso_start + i * PAGE_SIZE);
/* Populate the special mapping structures */
vdso_spec[0] = (struct vm_special_mapping) {
- .name = "[vdso]",
+ .name = "[vvar]",
.pages = vdso_pagelist,
};
vdso_spec[1] = (struct vm_special_mapping) {
- .name = "[vvar]",
- .pages = vdso_pagelist + vdso_pages,
+ .name = "[vdso]",
+ .pages = &vdso_pagelist[1],
};
return 0;
@@ -166,22 +166,22 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
ret = ERR_PTR(vdso_base);
goto up_fail;
}
- mm->context.vdso = (void *)vdso_base;
-
- ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
- VM_READ|VM_EXEC|
- VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
+ ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
+ VM_READ|VM_MAYREAD,
&vdso_spec[0]);
if (IS_ERR(ret))
goto up_fail;
- vdso_base += vdso_text_len;
- ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
- VM_READ|VM_MAYREAD,
+ vdso_base += PAGE_SIZE;
+ mm->context.vdso = (void *)vdso_base;
+ ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
+ VM_READ|VM_EXEC|
+ VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
&vdso_spec[1]);
if (IS_ERR(ret))
goto up_fail;
+
up_write(&mm->mmap_sem);
return 0;
diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S
index 8154b8d1c826..beca249bc2f3 100644
--- a/arch/arm64/kernel/vdso/vdso.lds.S
+++ b/arch/arm64/kernel/vdso/vdso.lds.S
@@ -28,6 +28,7 @@ OUTPUT_ARCH(aarch64)
SECTIONS
{
+ PROVIDE(_vdso_data = . - PAGE_SIZE);
. = VDSO_LBASE + SIZEOF_HEADERS;
.hash : { *(.hash) } :text
@@ -57,9 +58,6 @@ SECTIONS
_end = .;
PROVIDE(end = .);
- . = ALIGN(PAGE_SIZE);
- PROVIDE(_vdso_data = .);
-
/DISCARD/ : {
*(.note.GNU-stack)
*(.data .data.* .gnu.linkonce.d.* .sdata*)