aboutsummaryrefslogtreecommitdiff
path: root/arch/arm64/kernel/vdso.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel/vdso.c')
-rw-r--r--arch/arm64/kernel/vdso.c197
1 files changed, 151 insertions, 46 deletions
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 4e016574bd91..d4202a32abc9 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -18,6 +18,7 @@
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/slab.h>
+#include <linux/time_namespace.h>
#include <linux/timekeeper_internal.h>
#include <linux/vmalloc.h>
#include <vdso/datapage.h>
@@ -40,6 +41,12 @@ enum vdso_abi {
#endif /* CONFIG_COMPAT_VDSO */
};
+enum vvar_pages {
+ VVAR_DATA_PAGE_OFFSET,
+ VVAR_TIMENS_PAGE_OFFSET,
+ VVAR_NR_PAGES,
+};
+
struct vdso_abi_info {
const char *name;
const char *vdso_code_start;
@@ -107,25 +114,122 @@ static int __vdso_init(enum vdso_abi abi)
vdso_info[abi].vdso_code_start) >>
PAGE_SHIFT;
- /* Allocate the vDSO pagelist, plus a page for the data. */
- vdso_pagelist = kcalloc(vdso_info[abi].vdso_pages + 1,
+ vdso_pagelist = kcalloc(vdso_info[abi].vdso_pages,
sizeof(struct page *),
GFP_KERNEL);
if (vdso_pagelist == NULL)
return -ENOMEM;
- /* Grab the vDSO data page. */
- vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data));
-
-
/* Grab the vDSO code pages. */
pfn = sym_to_pfn(vdso_info[abi].vdso_code_start);
for (i = 0; i < vdso_info[abi].vdso_pages; i++)
- vdso_pagelist[i + 1] = pfn_to_page(pfn + i);
+ vdso_pagelist[i] = pfn_to_page(pfn + i);
+
+ vdso_info[abi].cm->pages = vdso_pagelist;
+
+ return 0;
+}
+
+#ifdef CONFIG_TIME_NS
+struct vdso_data *arch_get_vdso_data(void *vvar_page)
+{
+ return (struct vdso_data *)(vvar_page);
+}
+
+/*
+ * The vvar mapping contains data for a specific time namespace, so when a task
+ * changes namespace we must unmap its vvar data for the old namespace.
+ * Subsequent faults will map in data for the new namespace.
+ *
+ * For more details see timens_setup_vdso_data().
+ */
+int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
+{
+ struct mm_struct *mm = task->mm;
+ struct vm_area_struct *vma;
+
+ mmap_read_lock(mm);
+
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ unsigned long size = vma->vm_end - vma->vm_start;
+
+ if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA64].dm))
+ zap_page_range(vma, vma->vm_start, size);
+#ifdef CONFIG_COMPAT_VDSO
+ if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA32].dm))
+ zap_page_range(vma, vma->vm_start, size);
+#endif
+ }
+
+ mmap_read_unlock(mm);
+ return 0;
+}
+
+static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
+{
+ if (likely(vma->vm_mm == current->mm))
+ return current->nsproxy->time_ns->vvar_page;
+
+ /*
+ * VM_PFNMAP | VM_IO protect .fault() handler from being called
+ * through interfaces like /proc/$pid/mem or
+ * process_vm_{readv,writev}() as long as there's no .access()
+ * in special_mapping_vmops.
+ * For more details check_vma_flags() and __access_remote_vm()
+ */
+ WARN(1, "vvar_page accessed remotely");
+
+ return NULL;
+}
+#else
+static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
+{
+ return NULL;
+}
+#endif
- vdso_info[abi].dm->pages = &vdso_pagelist[0];
- vdso_info[abi].cm->pages = &vdso_pagelist[1];
+static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
+ struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct page *timens_page = find_timens_vvar_page(vma);
+ unsigned long pfn;
+
+ switch (vmf->pgoff) {
+ case VVAR_DATA_PAGE_OFFSET:
+ if (timens_page)
+ pfn = page_to_pfn(timens_page);
+ else
+ pfn = sym_to_pfn(vdso_data);
+ break;
+#ifdef CONFIG_TIME_NS
+ case VVAR_TIMENS_PAGE_OFFSET:
+ /*
+ * If a task belongs to a time namespace then a namespace
+ * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
+ * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
+ * offset.
+ * See also the comment near timens_setup_vdso_data().
+ */
+ if (!timens_page)
+ return VM_FAULT_SIGBUS;
+ pfn = sym_to_pfn(vdso_data);
+ break;
+#endif /* CONFIG_TIME_NS */
+ default:
+ return VM_FAULT_SIGBUS;
+ }
+
+ return vmf_insert_pfn(vma, vmf->address, pfn);
+}
+
+static int vvar_mremap(const struct vm_special_mapping *sm,
+ struct vm_area_struct *new_vma)
+{
+ unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
+
+ if (new_size != VVAR_NR_PAGES * PAGE_SIZE)
+ return -EINVAL;
return 0;
}
@@ -139,9 +243,11 @@ static int __setup_additional_pages(enum vdso_abi abi,
unsigned long gp_flags = 0;
void *ret;
+ BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
+
vdso_text_len = vdso_info[abi].vdso_pages << PAGE_SHIFT;
/* Be sure to map the data page */
- vdso_mapping_len = vdso_text_len + PAGE_SIZE;
+ vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE;
vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
if (IS_ERR_VALUE(vdso_base)) {
@@ -149,8 +255,8 @@ static int __setup_additional_pages(enum vdso_abi abi,
goto up_fail;
}
- ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
- VM_READ|VM_MAYREAD,
+ ret = _install_special_mapping(mm, vdso_base, VVAR_NR_PAGES * PAGE_SIZE,
+ VM_READ|VM_MAYREAD|VM_PFNMAP,
vdso_info[abi].dm);
if (IS_ERR(ret))
goto up_fail;
@@ -158,7 +264,7 @@ static int __setup_additional_pages(enum vdso_abi abi,
if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && system_supports_bti())
gp_flags = VM_ARM64_BTI;
- vdso_base += PAGE_SIZE;
+ vdso_base += VVAR_NR_PAGES * PAGE_SIZE;
mm->context.vdso = (void *)vdso_base;
ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
VM_READ|VM_EXEC|gp_flags|
@@ -191,15 +297,12 @@ enum aarch32_map {
#ifdef CONFIG_COMPAT_VDSO
AA32_MAP_VVAR,
AA32_MAP_VDSO,
-#else
- AA32_MAP_SIGPAGE
#endif
+ AA32_MAP_SIGPAGE
};
static struct page *aarch32_vectors_page __ro_after_init;
-#ifndef CONFIG_COMPAT_VDSO
static struct page *aarch32_sig_page __ro_after_init;
-#endif
static struct vm_special_mapping aarch32_vdso_maps[] = {
[AA32_MAP_VECTORS] = {
@@ -209,17 +312,18 @@ static struct vm_special_mapping aarch32_vdso_maps[] = {
#ifdef CONFIG_COMPAT_VDSO
[AA32_MAP_VVAR] = {
.name = "[vvar]",
+ .fault = vvar_fault,
+ .mremap = vvar_mremap,
},
[AA32_MAP_VDSO] = {
.name = "[vdso]",
.mremap = aarch32_vdso_mremap,
},
-#else
+#endif /* CONFIG_COMPAT_VDSO */
[AA32_MAP_SIGPAGE] = {
.name = "[sigpage]", /* ABI */
.pages = &aarch32_sig_page,
},
-#endif /* CONFIG_COMPAT_VDSO */
};
static int aarch32_alloc_kuser_vdso_page(void)
@@ -242,27 +346,11 @@ static int aarch32_alloc_kuser_vdso_page(void)
return 0;
}
-#ifdef CONFIG_COMPAT_VDSO
-static int __aarch32_alloc_vdso_pages(void)
-{
- int ret;
-
- vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
- vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
-
- ret = __vdso_init(VDSO_ABI_AA32);
- if (ret)
- return ret;
-
- return aarch32_alloc_kuser_vdso_page();
-}
-#else
-static int __aarch32_alloc_vdso_pages(void)
+static int aarch32_alloc_sigpage(void)
{
extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
unsigned long sigpage;
- int ret;
sigpage = get_zeroed_page(GFP_ATOMIC);
if (!sigpage)
@@ -271,18 +359,34 @@ static int __aarch32_alloc_vdso_pages(void)
memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz);
aarch32_sig_page = virt_to_page(sigpage);
flush_dcache_page(aarch32_sig_page);
+ return 0;
+}
- ret = aarch32_alloc_kuser_vdso_page();
- if (ret)
- free_page(sigpage);
+#ifdef CONFIG_COMPAT_VDSO
+static int __aarch32_alloc_vdso_pages(void)
+{
+ vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
+ vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
- return ret;
+ return __vdso_init(VDSO_ABI_AA32);
}
#endif /* CONFIG_COMPAT_VDSO */
static int __init aarch32_alloc_vdso_pages(void)
{
- return __aarch32_alloc_vdso_pages();
+ int ret;
+
+#ifdef CONFIG_COMPAT_VDSO
+ ret = __aarch32_alloc_vdso_pages();
+ if (ret)
+ return ret;
+#endif
+
+ ret = aarch32_alloc_sigpage();
+ if (ret)
+ return ret;
+
+ return aarch32_alloc_kuser_vdso_page();
}
arch_initcall(aarch32_alloc_vdso_pages);
@@ -305,7 +409,6 @@ static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
return PTR_ERR_OR_ZERO(ret);
}
-#ifndef CONFIG_COMPAT_VDSO
static int aarch32_sigreturn_setup(struct mm_struct *mm)
{
unsigned long addr;
@@ -328,12 +431,11 @@ static int aarch32_sigreturn_setup(struct mm_struct *mm)
if (IS_ERR(ret))
goto out;
- mm->context.vdso = (void *)addr;
+ mm->context.sigpage = (void *)addr;
out:
return PTR_ERR_OR_ZERO(ret);
}
-#endif /* !CONFIG_COMPAT_VDSO */
int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
@@ -352,10 +454,11 @@ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
mm,
bprm,
uses_interp);
-#else
- ret = aarch32_sigreturn_setup(mm);
+ if (ret)
+ goto out;
#endif /* CONFIG_COMPAT_VDSO */
+ ret = aarch32_sigreturn_setup(mm);
out:
mmap_write_unlock(mm);
return ret;
@@ -376,6 +479,8 @@ enum aarch64_map {
static struct vm_special_mapping aarch64_vdso_maps[] __ro_after_init = {
[AA64_MAP_VVAR] = {
.name = "[vvar]",
+ .fault = vvar_fault,
+ .mremap = vvar_mremap,
},
[AA64_MAP_VDSO] = {
.name = "[vdso]",