diff options
| author | Mark Brown <[email protected]> | 2015-10-12 18:09:27 +0100 | 
|---|---|---|
| committer | Mark Brown <[email protected]> | 2015-10-12 18:09:27 +0100 | 
| commit | 79828b4fa835f73cdaf4bffa48696abdcbea9d02 (patch) | |
| tree | 5e0fa7156acb75ba603022bc807df8f2fedb97a8 /fs/inode.c | |
| parent | 721b51fcf91898299d96f4b72cb9434cda29dce6 (diff) | |
| parent | 8c1a9d6323abf0fb1e5dad96cf3f1c783505ea5a (diff) | |
Merge remote-tracking branch 'asoc/fix/rt5645' into asoc-fix-rt5645
Diffstat (limited to 'fs/inode.c')
| -rw-r--r-- | fs/inode.c | 50 | 
1 files changed, 31 insertions, 19 deletions
diff --git a/fs/inode.c b/fs/inode.c index d30640f7a193..78a17b8859e1 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -28,16 +28,16 @@   *   inode->i_state, inode->i_hash, __iget()   * Inode LRU list locks protect:   *   inode->i_sb->s_inode_lru, inode->i_lru - * inode_sb_list_lock protects: - *   sb->s_inodes, inode->i_sb_list + * inode->i_sb->s_inode_list_lock protects: + *   inode->i_sb->s_inodes, inode->i_sb_list   * bdi->wb.list_lock protects: - *   bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_wb_list + *   bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_io_list   * inode_hash_lock protects:   *   inode_hashtable, inode->i_hash   *   * Lock ordering:   * - * inode_sb_list_lock + * inode->i_sb->s_inode_list_lock   *   inode->i_lock   *     Inode LRU list locks   * @@ -45,7 +45,7 @@   *   inode->i_lock   *   * inode_hash_lock - *   inode_sb_list_lock + *   inode->i_sb->s_inode_list_lock   *   inode->i_lock   *   * iunique_lock @@ -57,8 +57,6 @@ static unsigned int i_hash_shift __read_mostly;  static struct hlist_head *inode_hashtable __read_mostly;  static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock); -__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock); -  /*   * Empty aops. Can be used for the cases where the user does not   * define any of the address_space operations. @@ -359,7 +357,7 @@ void inode_init_once(struct inode *inode)  	memset(inode, 0, sizeof(*inode));  	INIT_HLIST_NODE(&inode->i_hash);  	INIT_LIST_HEAD(&inode->i_devices); -	INIT_LIST_HEAD(&inode->i_wb_list); +	INIT_LIST_HEAD(&inode->i_io_list);  	INIT_LIST_HEAD(&inode->i_lru);  	address_space_init_once(&inode->i_data);  	i_size_ordered_init(inode); @@ -426,18 +424,18 @@ static void inode_lru_list_del(struct inode *inode)   */  void inode_sb_list_add(struct inode *inode)  { -	spin_lock(&inode_sb_list_lock); +	spin_lock(&inode->i_sb->s_inode_list_lock);  	list_add(&inode->i_sb_list, &inode->i_sb->s_inodes); -	spin_unlock(&inode_sb_list_lock); +	spin_unlock(&inode->i_sb->s_inode_list_lock);  }  EXPORT_SYMBOL_GPL(inode_sb_list_add);  static inline void inode_sb_list_del(struct inode *inode)  {  	if (!list_empty(&inode->i_sb_list)) { -		spin_lock(&inode_sb_list_lock); +		spin_lock(&inode->i_sb->s_inode_list_lock);  		list_del_init(&inode->i_sb_list); -		spin_unlock(&inode_sb_list_lock); +		spin_unlock(&inode->i_sb->s_inode_list_lock);  	}  } @@ -527,8 +525,8 @@ static void evict(struct inode *inode)  	BUG_ON(!(inode->i_state & I_FREEING));  	BUG_ON(!list_empty(&inode->i_lru)); -	if (!list_empty(&inode->i_wb_list)) -		inode_wb_list_del(inode); +	if (!list_empty(&inode->i_io_list)) +		inode_io_list_del(inode);  	inode_sb_list_del(inode); @@ -577,6 +575,7 @@ static void dispose_list(struct list_head *head)  		list_del_init(&inode->i_lru);  		evict(inode); +		cond_resched();  	}  } @@ -594,7 +593,8 @@ void evict_inodes(struct super_block *sb)  	struct inode *inode, *next;  	LIST_HEAD(dispose); -	spin_lock(&inode_sb_list_lock); +again: +	spin_lock(&sb->s_inode_list_lock);  	list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {  		if (atomic_read(&inode->i_count))  			continue; @@ -609,8 +609,20 @@ void evict_inodes(struct super_block *sb)  		inode_lru_list_del(inode);  		spin_unlock(&inode->i_lock);  		list_add(&inode->i_lru, &dispose); + +		/* +		 * We can have a ton of inodes to evict at unmount time given +		 * enough memory, check to see if we need to go to sleep for a +		 * bit so we don't livelock. +		 */ +		if (need_resched()) { +			spin_unlock(&sb->s_inode_list_lock); +			cond_resched(); +			dispose_list(&dispose); +			goto again; +		}  	} -	spin_unlock(&inode_sb_list_lock); +	spin_unlock(&sb->s_inode_list_lock);  	dispose_list(&dispose);  } @@ -631,7 +643,7 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty)  	struct inode *inode, *next;  	LIST_HEAD(dispose); -	spin_lock(&inode_sb_list_lock); +	spin_lock(&sb->s_inode_list_lock);  	list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {  		spin_lock(&inode->i_lock);  		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { @@ -654,7 +666,7 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty)  		spin_unlock(&inode->i_lock);  		list_add(&inode->i_lru, &dispose);  	} -	spin_unlock(&inode_sb_list_lock); +	spin_unlock(&sb->s_inode_list_lock);  	dispose_list(&dispose); @@ -890,7 +902,7 @@ struct inode *new_inode(struct super_block *sb)  {  	struct inode *inode; -	spin_lock_prefetch(&inode_sb_list_lock); +	spin_lock_prefetch(&sb->s_inode_list_lock);  	inode = new_inode_pseudo(sb);  	if (inode)  |