aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/fadvise.c20
-rw-r--r--mm/hugetlb.c6
-rw-r--r--mm/memory.c2
-rw-r--r--mm/swapfile.c14
4 files changed, 15 insertions, 27 deletions
diff --git a/mm/fadvise.c b/mm/fadvise.c
index 907c39257ca0..0a03357a1f8e 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -35,17 +35,6 @@
*
* LINUX_FADV_ASYNC_WRITE: push some or all of the dirty pages at the disk.
*
- * LINUX_FADV_WRITE_WAIT, LINUX_FADV_ASYNC_WRITE: push all of the currently
- * dirty pages at the disk.
- *
- * LINUX_FADV_WRITE_WAIT, LINUX_FADV_ASYNC_WRITE, LINUX_FADV_WRITE_WAIT: push
- * all of the currently dirty pages at the disk, wait until they have been
- * written.
- *
- * It should be noted that none of these operations write out the file's
- * metadata. So unless the application is strictly performing overwrites of
- * already-instantiated disk blocks, there are no guarantees here that the data
- * will be available after a crash.
*/
asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
{
@@ -129,15 +118,6 @@ asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
invalidate_mapping_pages(mapping, start_index,
end_index);
break;
- case LINUX_FADV_ASYNC_WRITE:
- ret = __filemap_fdatawrite_range(mapping, offset, endbyte,
- WB_SYNC_NONE);
- break;
- case LINUX_FADV_WRITE_WAIT:
- ret = wait_on_page_writeback_range(mapping,
- offset >> PAGE_CACHE_SHIFT,
- endbyte >> PAGE_CACHE_SHIFT);
- break;
default:
ret = -EINVAL;
}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ebad6bbb3501..832f676ca038 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -334,6 +334,7 @@ static unsigned long set_max_huge_pages(unsigned long count)
return nr_huge_pages;
spin_lock(&hugetlb_lock);
+ count = max(count, reserved_huge_pages);
try_to_free_low(count);
while (count < nr_huge_pages) {
struct page *page = dequeue_huge_page(NULL, 0);
@@ -697,9 +698,10 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT;
page = pte_page(*pte);
same_page:
- get_page(page);
- if (pages)
+ if (pages) {
+ get_page(page);
pages[i] = page + pfn_offset;
+ }
if (vmas)
vmas[i] = vma;
diff --git a/mm/memory.c b/mm/memory.c
index 8d8f52569f32..0ec7bc644271 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -87,7 +87,7 @@ int randomize_va_space __read_mostly = 1;
static int __init disable_randmaps(char *s)
{
randomize_va_space = 0;
- return 0;
+ return 1;
}
__setup("norandmaps", disable_randmaps);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 39aa9d129612..e5fd5385f0cc 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -397,18 +397,24 @@ void free_swap_and_cache(swp_entry_t entry)
p = swap_info_get(entry);
if (p) {
- if (swap_entry_free(p, swp_offset(entry)) == 1)
- page = find_trylock_page(&swapper_space, entry.val);
+ if (swap_entry_free(p, swp_offset(entry)) == 1) {
+ page = find_get_page(&swapper_space, entry.val);
+ if (page && unlikely(TestSetPageLocked(page))) {
+ page_cache_release(page);
+ page = NULL;
+ }
+ }
spin_unlock(&swap_lock);
}
if (page) {
int one_user;
BUG_ON(PagePrivate(page));
- page_cache_get(page);
one_user = (page_count(page) == 2);
/* Only cache user (+us), or swap space full? Free it! */
- if (!PageWriteback(page) && (one_user || vm_swap_full())) {
+ /* Also recheck PageSwapCache after page is locked (above) */
+ if (PageSwapCache(page) && !PageWriteback(page) &&
+ (one_user || vm_swap_full())) {
delete_from_swap_cache(page);
SetPageDirty(page);
}