aboutsummaryrefslogtreecommitdiff
path: root/arch/i386/kernel/process.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/kernel/process.c')
-rw-r--r--arch/i386/kernel/process.c65
1 files changed, 39 insertions, 26 deletions
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index ba243a4cc119..b45cbf93d439 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -164,7 +164,7 @@ static inline void play_dead(void)
*/
local_irq_disable();
while (1)
- __asm__ __volatile__("hlt":::"memory");
+ halt();
}
#else
static inline void play_dead(void)
@@ -313,16 +313,12 @@ void show_regs(struct pt_regs * regs)
printk(" DS: %04x ES: %04x\n",
0xffff & regs->xds,0xffff & regs->xes);
- __asm__("movl %%cr0, %0": "=r" (cr0));
- __asm__("movl %%cr2, %0": "=r" (cr2));
- __asm__("movl %%cr3, %0": "=r" (cr3));
- /* This could fault if %cr4 does not exist */
- __asm__("1: movl %%cr4, %0 \n"
- "2: \n"
- ".section __ex_table,\"a\" \n"
- ".long 1b,2b \n"
- ".previous \n"
- : "=r" (cr4): "0" (0));
+ cr0 = read_cr0();
+ cr2 = read_cr2();
+ cr3 = read_cr3();
+ if (current_cpu_data.x86 > 4) {
+ cr4 = read_cr4();
+ }
printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4);
show_trace(NULL, &regs->esp);
}
@@ -682,41 +678,56 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
__unlazy_fpu(prev_p);
/*
- * Reload esp0, LDT and the page table pointer:
+ * Reload esp0.
*/
load_esp0(tss, next);
/*
- * Load the per-thread Thread-Local Storage descriptor.
+ * Save away %fs and %gs. No need to save %es and %ds, as
+ * those are always kernel segments while inside the kernel.
+ * Doing this before setting the new TLS descriptors avoids
+ * the situation where we temporarily have non-reloadable
+ * segments in %fs and %gs. This could be an issue if the
+ * NMI handler ever used %fs or %gs (it does not today), or
+ * if the kernel is running inside of a hypervisor layer.
*/
- load_TLS(next, cpu);
+ savesegment(fs, prev->fs);
+ savesegment(gs, prev->gs);
/*
- * Save away %fs and %gs. No need to save %es and %ds, as
- * those are always kernel segments while inside the kernel.
+ * Load the per-thread Thread-Local Storage descriptor.
*/
- asm volatile("mov %%fs,%0":"=m" (prev->fs));
- asm volatile("mov %%gs,%0":"=m" (prev->gs));
+ load_TLS(next, cpu);
/*
* Restore %fs and %gs if needed.
+ *
+ * Glibc normally makes %fs be zero, and %gs is one of
+ * the TLS segments.
*/
- if (unlikely(prev->fs | prev->gs | next->fs | next->gs)) {
+ if (unlikely(prev->fs | next->fs))
loadsegment(fs, next->fs);
+
+ if (prev->gs | next->gs)
loadsegment(gs, next->gs);
- }
+
+ /*
+ * Restore IOPL if needed.
+ */
+ if (unlikely(prev->iopl != next->iopl))
+ set_iopl_mask(next->iopl);
/*
* Now maybe reload the debug registers
*/
if (unlikely(next->debugreg[7])) {
- set_debugreg(current->thread.debugreg[0], 0);
- set_debugreg(current->thread.debugreg[1], 1);
- set_debugreg(current->thread.debugreg[2], 2);
- set_debugreg(current->thread.debugreg[3], 3);
+ set_debugreg(next->debugreg[0], 0);
+ set_debugreg(next->debugreg[1], 1);
+ set_debugreg(next->debugreg[2], 2);
+ set_debugreg(next->debugreg[3], 3);
/* no 4 and 5 */
- set_debugreg(current->thread.debugreg[6], 6);
- set_debugreg(current->thread.debugreg[7], 7);
+ set_debugreg(next->debugreg[6], 6);
+ set_debugreg(next->debugreg[7], 7);
}
if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr))
@@ -913,6 +924,8 @@ asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
return -EINVAL;
+ memset(&info, 0, sizeof(info));
+
desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
info.entry_number = idx;