diff options
Diffstat (limited to 'arch/um/kernel/skas/mmu.c')
-rw-r--r-- | arch/um/kernel/skas/mmu.c | 54 |
1 files changed, 34 insertions, 20 deletions
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c index aeed1c2aaf3c..47f98d87ea3c 100644 --- a/arch/um/kernel/skas/mmu.c +++ b/arch/um/kernel/skas/mmu.c @@ -14,11 +14,14 @@ #include <as-layout.h> #include <os.h> #include <skas.h> +#include <stub-data.h> + +/* Ensure the stub_data struct covers the allocated area */ +static_assert(sizeof(struct stub_data) == STUB_DATA_PAGES * UM_KERN_PAGE_SIZE); int init_new_context(struct task_struct *task, struct mm_struct *mm) { - struct mm_context *from_mm = NULL; - struct mm_context *to_mm = &mm->context; + struct mm_id *new_id = &mm->context.id; unsigned long stack = 0; int ret = -ENOMEM; @@ -26,34 +29,46 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm) if (stack == 0) goto out; - to_mm->id.stack = stack; - if (current->mm != NULL && current->mm != &init_mm) - from_mm = ¤t->mm->context; + new_id->stack = stack; block_signals_trace(); - if (from_mm) - to_mm->id.u.pid = copy_context_skas0(stack, - from_mm->id.u.pid); - else to_mm->id.u.pid = start_userspace(stack); + new_id->u.pid = start_userspace(stack); unblock_signals_trace(); - if (to_mm->id.u.pid < 0) { - ret = to_mm->id.u.pid; + if (new_id->u.pid < 0) { + ret = new_id->u.pid; goto out_free; } - ret = init_new_ldt(to_mm, from_mm); - if (ret < 0) { - printk(KERN_ERR "init_new_context_skas - init_ldt" - " failed, errno = %d\n", ret); - goto out_free; - } + /* + * Ensure the new MM is clean and nothing unwanted is mapped. + * + * TODO: We should clear the memory up to STUB_START to ensure there is + * nothing mapped there, i.e. we (currently) have: + * + * |- user memory -|- unused -|- stub -|- unused -| + * ^ TASK_SIZE ^ STUB_START + * + * Meaning we have two unused areas where we may still have valid + * mappings from our internal clone(). That isn't really a problem as + * userspace is not going to access them, but it is definitely not + * correct. + * + * However, we are "lucky" and if rseq is configured, then on 32 bit + * it will fall into the first empty range while on 64 bit it is going + * to use an anonymous mapping in the second range. As such, things + * continue to work for now as long as we don't start unmapping these + * areas. + * + * Change this to STUB_START once we have a clean userspace. + */ + unmap(new_id, 0, TASK_SIZE); return 0; out_free: - if (to_mm->id.stack != 0) - free_pages(to_mm->id.stack, ilog2(STUB_DATA_PAGES)); + if (new_id->stack != 0) + free_pages(new_id->stack, ilog2(STUB_DATA_PAGES)); out: return ret; } @@ -76,5 +91,4 @@ void destroy_context(struct mm_struct *mm) os_kill_ptraced_process(mmu->id.u.pid, 1); free_pages(mmu->id.stack, ilog2(STUB_DATA_PAGES)); - free_ldt(mmu); } |