diff options
| author | Mark Brown <[email protected]> | 2015-10-12 18:09:27 +0100 | 
|---|---|---|
| committer | Mark Brown <[email protected]> | 2015-10-12 18:09:27 +0100 | 
| commit | 79828b4fa835f73cdaf4bffa48696abdcbea9d02 (patch) | |
| tree | 5e0fa7156acb75ba603022bc807df8f2fedb97a8 /fs/f2fs/segment.c | |
| parent | 721b51fcf91898299d96f4b72cb9434cda29dce6 (diff) | |
| parent | 8c1a9d6323abf0fb1e5dad96cf3f1c783505ea5a (diff) | |
Merge remote-tracking branch 'asoc/fix/rt5645' into asoc-fix-rt5645
Diffstat (limited to 'fs/f2fs/segment.c')
| -rw-r--r-- | fs/f2fs/segment.c | 79 | 
1 files changed, 50 insertions, 29 deletions
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 1eb343768781..78e6d0696847 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -197,28 +197,20 @@ void register_inmem_page(struct inode *inode, struct page *page)  {  	struct f2fs_inode_info *fi = F2FS_I(inode);  	struct inmem_pages *new; -	int err; -	SetPagePrivate(page);  	f2fs_trace_pid(page); +	set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE); +	SetPagePrivate(page); +  	new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);  	/* add atomic page indices to the list */  	new->page = page;  	INIT_LIST_HEAD(&new->list); -retry: +  	/* increase reference count with clean state */  	mutex_lock(&fi->inmem_lock); -	err = radix_tree_insert(&fi->inmem_root, page->index, new); -	if (err == -EEXIST) { -		mutex_unlock(&fi->inmem_lock); -		kmem_cache_free(inmem_entry_slab, new); -		return; -	} else if (err) { -		mutex_unlock(&fi->inmem_lock); -		goto retry; -	}  	get_page(page);  	list_add_tail(&new->list, &fi->inmem_pages);  	inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES); @@ -227,7 +219,7 @@ retry:  	trace_f2fs_register_inmem_page(page, INMEM);  } -void commit_inmem_pages(struct inode *inode, bool abort) +int commit_inmem_pages(struct inode *inode, bool abort)  {  	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);  	struct f2fs_inode_info *fi = F2FS_I(inode); @@ -239,6 +231,7 @@ void commit_inmem_pages(struct inode *inode, bool abort)  		.rw = WRITE_SYNC | REQ_PRIO,  		.encrypted_page = NULL,  	}; +	int err = 0;  	/*  	 * The abort is true only when f2fs_evict_inode is called. @@ -254,23 +247,29 @@ void commit_inmem_pages(struct inode *inode, bool abort)  	mutex_lock(&fi->inmem_lock);  	list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) { +		lock_page(cur->page);  		if (!abort) { -			lock_page(cur->page);  			if (cur->page->mapping == inode->i_mapping) { +				set_page_dirty(cur->page);  				f2fs_wait_on_page_writeback(cur->page, DATA);  				if (clear_page_dirty_for_io(cur->page))  					inode_dec_dirty_pages(inode);  				trace_f2fs_commit_inmem_page(cur->page, INMEM);  				fio.page = cur->page; -				do_write_data_page(&fio); +				err = do_write_data_page(&fio);  				submit_bio = true; +				if (err) { +					unlock_page(cur->page); +					break; +				}  			} -			f2fs_put_page(cur->page, 1);  		} else {  			trace_f2fs_commit_inmem_page(cur->page, INMEM_DROP); -			put_page(cur->page);  		} -		radix_tree_delete(&fi->inmem_root, cur->page->index); +		set_page_private(cur->page, 0); +		ClearPagePrivate(cur->page); +		f2fs_put_page(cur->page, 1); +  		list_del(&cur->list);  		kmem_cache_free(inmem_entry_slab, cur);  		dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES); @@ -282,6 +281,7 @@ void commit_inmem_pages(struct inode *inode, bool abort)  		if (submit_bio)  			f2fs_submit_merged_bio(sbi, DATA, WRITE);  	} +	return err;  }  /* @@ -303,10 +303,18 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi)  void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)  {  	/* try to shrink extent cache when there is no enough memory */ -	f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER); +	if (!available_free_memory(sbi, EXTENT_CACHE)) +		f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER); + +	/* check the # of cached NAT entries */ +	if (!available_free_memory(sbi, NAT_ENTRIES)) +		try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK); + +	if (!available_free_memory(sbi, FREE_NIDS)) +		try_to_free_nids(sbi, NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES); -	/* check the # of cached NAT entries and prefree segments */ -	if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK) || +	/* checkpoint is the only way to shrink partial cached entries */ +	if (!available_free_memory(sbi, NAT_ENTRIES) ||  			excess_prefree_segs(sbi) ||  			!available_free_memory(sbi, INO_ENTRIES))  		f2fs_sync_fs(sbi->sb, true); @@ -322,10 +330,12 @@ repeat:  		return 0;  	if (!llist_empty(&fcc->issue_list)) { -		struct bio *bio = bio_alloc(GFP_NOIO, 0); +		struct bio *bio;  		struct flush_cmd *cmd, *next;  		int ret; +		bio = f2fs_bio_alloc(0); +  		fcc->dispatch_list = llist_del_all(&fcc->issue_list);  		fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list); @@ -357,8 +367,15 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi)  	if (test_opt(sbi, NOBARRIER))  		return 0; -	if (!test_opt(sbi, FLUSH_MERGE)) -		return blkdev_issue_flush(sbi->sb->s_bdev, GFP_KERNEL, NULL); +	if (!test_opt(sbi, FLUSH_MERGE)) { +		struct bio *bio = f2fs_bio_alloc(0); +		int ret; + +		bio->bi_bdev = sbi->sb->s_bdev; +		ret = submit_bio_wait(WRITE_FLUSH, bio); +		bio_put(bio); +		return ret; +	}  	init_completion(&cmd.wait); @@ -502,7 +519,7 @@ static int f2fs_issue_discard(struct f2fs_sb_info *sbi,  	return blkdev_issue_discard(sbi->sb->s_bdev, start, len, GFP_NOFS, 0);  } -void discard_next_dnode(struct f2fs_sb_info *sbi, block_t blkaddr) +bool discard_next_dnode(struct f2fs_sb_info *sbi, block_t blkaddr)  {  	int err = -ENOTSUPP; @@ -512,13 +529,16 @@ void discard_next_dnode(struct f2fs_sb_info *sbi, block_t blkaddr)  		unsigned int offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);  		if (f2fs_test_bit(offset, se->discard_map)) -			return; +			return false;  		err = f2fs_issue_discard(sbi, blkaddr, 1);  	} -	if (err) +	if (err) {  		update_meta_page(sbi, NULL, blkaddr); +		return true; +	} +	return false;  }  static void __add_discard_entry(struct f2fs_sb_info *sbi, @@ -1217,7 +1237,8 @@ void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,  	mutex_lock(&sit_i->sentry_lock);  	/* direct_io'ed data is aligned to the segment for better performance */ -	if (direct_io && curseg->next_blkoff) +	if (direct_io && curseg->next_blkoff && +				!has_not_enough_free_secs(sbi, 0))  		__allocate_new_segments(sbi, type);  	*new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); @@ -1732,7 +1753,7 @@ static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,  static struct sit_entry_set *grab_sit_entry_set(void)  {  	struct sit_entry_set *ses = -			f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_ATOMIC); +			f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_NOFS);  	ses->entry_cnt = 0;  	INIT_LIST_HEAD(&ses->set_list);  |