diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-25 15:59:38 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-25 15:59:38 -0700 |
commit | fa2af6e4fe0c4d2f8875d42625b25675e8584010 (patch) | |
tree | ef9a92949858ab763aa1bfda7cb11a5f7b84d123 /arch/tile/mm/init.c | |
parent | 109b9b0408e5f1dd327a44f446841a9fbe0bcd83 (diff) | |
parent | 1fcb78e9da714d96f65edd37b29dae3b1f7df508 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
Pull tile updates from Chris Metcalf:
"These changes cover a range of new arch/tile features and
optimizations. They've been through LKML review and on linux-next for
a month or so. There's also one bug-fix that just missed 3.4, which
I've marked for stable."
Fixed up trivial conflict in arch/tile/Kconfig (new added tile Kconfig
entries clashing with the generic timer/clockevents changes).
* git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile:
tile: default to tilegx_defconfig for ARCH=tile
tile: fix bug where fls(0) was not returning 0
arch/tile: mark TILEGX as not EXPERIMENTAL
tile/mm/fault.c: Port OOM changes to handle_page_fault
arch/tile: add descriptive text if the kernel reports a bad trap
arch/tile: allow querying cpu module information from the hypervisor
arch/tile: fix hardwall for tilegx and generalize for idn and ipi
arch/tile: support multiple huge page sizes dynamically
mm: add new arch_make_huge_pte() method for tile support
arch/tile: support kexec() for tilegx
arch/tile: support <asm/cachectl.h> header for cacheflush() syscall
arch/tile: Allow tilegx to build with either 16K or 64K page size
arch/tile: optimize get_user/put_user and friends
arch/tile: support building big-endian kernel
arch/tile: allow building Linux with transparent huge pages enabled
arch/tile: use interrupt critical sections less
Diffstat (limited to 'arch/tile/mm/init.c')
-rw-r--r-- | arch/tile/mm/init.c | 19 |
1 files changed, 11 insertions, 8 deletions
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c index 6a9d20ddc34f..630dd2ce2afe 100644 --- a/arch/tile/mm/init.c +++ b/arch/tile/mm/init.c @@ -82,7 +82,7 @@ static int num_l2_ptes[MAX_NUMNODES]; static void init_prealloc_ptes(int node, int pages) { - BUG_ON(pages & (HV_L2_ENTRIES-1)); + BUG_ON(pages & (PTRS_PER_PTE - 1)); if (pages) { num_l2_ptes[node] = pages; l2_ptes[node] = __alloc_bootmem(pages * sizeof(pte_t), @@ -131,14 +131,9 @@ static void __init assign_pte(pmd_t *pmd, pte_t *page_table) #ifdef __tilegx__ -#if HV_L1_SIZE != HV_L2_SIZE -# error Rework assumption that L1 and L2 page tables are same size. -#endif - -/* Since pmd_t arrays and pte_t arrays are the same size, just use casts. */ static inline pmd_t *alloc_pmd(void) { - return (pmd_t *)alloc_pte(); + return __alloc_bootmem(L1_KERNEL_PGTABLE_SIZE, HV_PAGE_TABLE_ALIGN, 0); } static inline void assign_pmd(pud_t *pud, pmd_t *pmd) @@ -444,6 +439,7 @@ static pgd_t pgtables[PTRS_PER_PGD] */ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) { + unsigned long long irqmask; unsigned long address, pfn; pmd_t *pmd; pte_t *pte; @@ -633,10 +629,13 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) * - install pgtables[] as the real page table * - flush the TLB so the new page table takes effect */ + irqmask = interrupt_mask_save_mask(); + interrupt_mask_set_mask(-1ULL); rc = flush_and_install_context(__pa(pgtables), init_pgprot((unsigned long)pgtables), __get_cpu_var(current_asid), cpumask_bits(my_cpu_mask)); + interrupt_mask_restore_mask(irqmask); BUG_ON(rc != 0); /* Copy the page table back to the normal swapper_pg_dir. */ @@ -699,6 +698,7 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base) #endif /* CONFIG_HIGHMEM */ +#ifndef CONFIG_64BIT static void __init init_free_pfn_range(unsigned long start, unsigned long end) { unsigned long pfn; @@ -771,6 +771,7 @@ static void __init set_non_bootmem_pages_init(void) init_free_pfn_range(start, end); } } +#endif /* * paging_init() sets up the page tables - note that all of lowmem is @@ -807,7 +808,7 @@ void __init paging_init(void) * changing init_mm once we get up and running, and there's no * need for e.g. vmalloc_sync_all(). */ - BUILD_BUG_ON(pgd_index(VMALLOC_START) != pgd_index(VMALLOC_END)); + BUILD_BUG_ON(pgd_index(VMALLOC_START) != pgd_index(VMALLOC_END - 1)); pud = pud_offset(pgd_base + pgd_index(VMALLOC_START), VMALLOC_START); assign_pmd(pud, alloc_pmd()); #endif @@ -859,8 +860,10 @@ void __init mem_init(void) /* this will put all bootmem onto the freelists */ totalram_pages += free_all_bootmem(); +#ifndef CONFIG_64BIT /* count all remaining LOWMEM and give all HIGHMEM to page allocator */ set_non_bootmem_pages_init(); +#endif codesize = (unsigned long)&_etext - (unsigned long)&_text; datasize = (unsigned long)&_end - (unsigned long)&_sdata; |