aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--fs/btrfs/disk-io.c12
-rw-r--r--fs/btrfs/locking.c25
-rw-r--r--fs/btrfs/locking.h5
3 files changed, 9 insertions, 33 deletions
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 991ff26fb8c6..1b1b9e8197d6 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1341,17 +1341,8 @@ struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev)
{
int ret;
- unsigned int nofs_flag;
- /*
- * We might be called under a transaction (e.g. indirect backref
- * resolution) which could deadlock if it triggers memory reclaim
- */
- nofs_flag = memalloc_nofs_save();
- ret = btrfs_drew_lock_init(&root->snapshot_lock);
- memalloc_nofs_restore(nofs_flag);
- if (ret)
- goto fail;
+ btrfs_drew_lock_init(&root->snapshot_lock);
if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID &&
!btrfs_is_data_reloc_root(root)) {
@@ -2065,7 +2056,6 @@ void btrfs_put_root(struct btrfs_root *root)
WARN_ON(test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state));
if (root->anon_dev)
free_anon_bdev(root->anon_dev);
- btrfs_drew_lock_destroy(&root->snapshot_lock);
free_root_extent_buffers(root);
#ifdef CONFIG_BTRFS_DEBUG
spin_lock(&root->fs_info->fs_roots_radix_lock);
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 870528d87526..3a496b0d3d2b 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -325,24 +325,12 @@ struct extent_buffer *btrfs_try_read_lock_root_node(struct btrfs_root *root)
* acquire the lock.
*/
-int btrfs_drew_lock_init(struct btrfs_drew_lock *lock)
+void btrfs_drew_lock_init(struct btrfs_drew_lock *lock)
{
- int ret;
-
- ret = percpu_counter_init(&lock->writers, 0, GFP_KERNEL);
- if (ret)
- return ret;
-
atomic_set(&lock->readers, 0);
+ atomic_set(&lock->writers, 0);
init_waitqueue_head(&lock->pending_readers);
init_waitqueue_head(&lock->pending_writers);
-
- return 0;
-}
-
-void btrfs_drew_lock_destroy(struct btrfs_drew_lock *lock)
-{
- percpu_counter_destroy(&lock->writers);
}
/* Return true if acquisition is successful, false otherwise */
@@ -351,10 +339,10 @@ bool btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock)
if (atomic_read(&lock->readers))
return false;
- percpu_counter_inc(&lock->writers);
+ atomic_inc(&lock->writers);
/* Ensure writers count is updated before we check for pending readers */
- smp_mb();
+ smp_mb__after_atomic();
if (atomic_read(&lock->readers)) {
btrfs_drew_write_unlock(lock);
return false;
@@ -374,7 +362,7 @@ void btrfs_drew_write_lock(struct btrfs_drew_lock *lock)
void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock)
{
- percpu_counter_dec(&lock->writers);
+ atomic_dec(&lock->writers);
cond_wake_up(&lock->pending_readers);
}
@@ -390,8 +378,7 @@ void btrfs_drew_read_lock(struct btrfs_drew_lock *lock)
*/
smp_mb__after_atomic();
- wait_event(lock->pending_readers,
- percpu_counter_sum(&lock->writers) == 0);
+ wait_event(lock->pending_readers, atomic_read(&lock->writers) == 0);
}
void btrfs_drew_read_unlock(struct btrfs_drew_lock *lock)
diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h
index 11c2269b4b6f..edb9b4a0dba1 100644
--- a/fs/btrfs/locking.h
+++ b/fs/btrfs/locking.h
@@ -195,13 +195,12 @@ static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw)
struct btrfs_drew_lock {
atomic_t readers;
- struct percpu_counter writers;
+ atomic_t writers;
wait_queue_head_t pending_writers;
wait_queue_head_t pending_readers;
};
-int btrfs_drew_lock_init(struct btrfs_drew_lock *lock);
-void btrfs_drew_lock_destroy(struct btrfs_drew_lock *lock);
+void btrfs_drew_lock_init(struct btrfs_drew_lock *lock);
void btrfs_drew_write_lock(struct btrfs_drew_lock *lock);
bool btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock);
void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock);