diff options
Diffstat (limited to 'fs/btrfs/locking.c')
| -rw-r--r-- | fs/btrfs/locking.c | 135 | 
1 files changed, 135 insertions, 0 deletions
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index 571c4826c428..fb647d8cf527 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c @@ -523,3 +523,138 @@ void btrfs_unlock_up_safe(struct btrfs_path *path, int level)  		path->locks[i] = 0;  	}  } + +/* + * Loop around taking references on and locking the root node of the tree until + * we end up with a lock on the root node. + * + * Return: root extent buffer with write lock held + */ +struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root) +{ +	struct extent_buffer *eb; + +	while (1) { +		eb = btrfs_root_node(root); +		btrfs_tree_lock(eb); +		if (eb == root->node) +			break; +		btrfs_tree_unlock(eb); +		free_extent_buffer(eb); +	} +	return eb; +} + +/* + * Loop around taking references on and locking the root node of the tree until + * we end up with a lock on the root node. + * + * Return: root extent buffer with read lock held + */ +struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root) +{ +	struct extent_buffer *eb; + +	while (1) { +		eb = btrfs_root_node(root); +		btrfs_tree_read_lock(eb); +		if (eb == root->node) +			break; +		btrfs_tree_read_unlock(eb); +		free_extent_buffer(eb); +	} +	return eb; +} + +/* + * DREW locks + * ========== + * + * DREW stands for double-reader-writer-exclusion lock. It's used in situation + * where you want to provide A-B exclusion but not AA or BB. + * + * Currently implementation gives more priority to reader. If a reader and a + * writer both race to acquire their respective sides of the lock the writer + * would yield its lock as soon as it detects a concurrent reader. Additionally + * if there are pending readers no new writers would be allowed to come in and + * acquire the lock. + */ + +int btrfs_drew_lock_init(struct btrfs_drew_lock *lock) +{ +	int ret; + +	ret = percpu_counter_init(&lock->writers, 0, GFP_KERNEL); +	if (ret) +		return ret; + +	atomic_set(&lock->readers, 0); +	init_waitqueue_head(&lock->pending_readers); +	init_waitqueue_head(&lock->pending_writers); + +	return 0; +} + +void btrfs_drew_lock_destroy(struct btrfs_drew_lock *lock) +{ +	percpu_counter_destroy(&lock->writers); +} + +/* Return true if acquisition is successful, false otherwise */ +bool btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock) +{ +	if (atomic_read(&lock->readers)) +		return false; + +	percpu_counter_inc(&lock->writers); + +	/* Ensure writers count is updated before we check for pending readers */ +	smp_mb(); +	if (atomic_read(&lock->readers)) { +		btrfs_drew_write_unlock(lock); +		return false; +	} + +	return true; +} + +void btrfs_drew_write_lock(struct btrfs_drew_lock *lock) +{ +	while (true) { +		if (btrfs_drew_try_write_lock(lock)) +			return; +		wait_event(lock->pending_writers, !atomic_read(&lock->readers)); +	} +} + +void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock) +{ +	percpu_counter_dec(&lock->writers); +	cond_wake_up(&lock->pending_readers); +} + +void btrfs_drew_read_lock(struct btrfs_drew_lock *lock) +{ +	atomic_inc(&lock->readers); + +	/* +	 * Ensure the pending reader count is perceieved BEFORE this reader +	 * goes to sleep in case of active writers. This guarantees new writers +	 * won't be allowed and that the current reader will be woken up when +	 * the last active writer finishes its jobs. +	 */ +	smp_mb__after_atomic(); + +	wait_event(lock->pending_readers, +		   percpu_counter_sum(&lock->writers) == 0); +} + +void btrfs_drew_read_unlock(struct btrfs_drew_lock *lock) +{ +	/* +	 * atomic_dec_and_test implies a full barrier, so woken up writers +	 * are guaranteed to see the decrement +	 */ +	if (atomic_dec_and_test(&lock->readers)) +		wake_up(&lock->pending_writers); +}  |