Commit 06297d8c authored by David Sterba's avatar David Sterba
Browse files

btrfs: switch extent_buffer blocking_writers from atomic to int



The blocking_writers is either 0 or 1 and always updated under the lock,
so we don't need the atomic_t semantics.
Reviewed-by: default avatarNikolay Borisov <nborisov@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 38e9372e
......@@ -4857,7 +4857,7 @@ __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
eb->bflags = 0;
rwlock_init(&eb->lock);
atomic_set(&eb->blocking_readers, 0);
atomic_set(&eb->blocking_writers, 0);
eb->blocking_writers = 0;
eb->lock_nested = false;
init_waitqueue_head(&eb->write_lock_wq);
init_waitqueue_head(&eb->read_lock_wq);
......
......@@ -167,7 +167,7 @@ struct extent_buffer {
struct rcu_head rcu_head;
pid_t lock_owner;
atomic_t blocking_writers;
int blocking_writers;
atomic_t blocking_readers;
bool lock_nested;
/* >= 0 if eb belongs to a log tree, -1 otherwise */
......
......@@ -111,10 +111,10 @@ void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
*/
if (eb->lock_nested && current->pid == eb->lock_owner)
return;
if (atomic_read(&eb->blocking_writers) == 0) {
if (eb->blocking_writers == 0) {
btrfs_assert_spinning_writers_put(eb);
btrfs_assert_tree_locked(eb);
atomic_inc(&eb->blocking_writers);
eb->blocking_writers++;
write_unlock(&eb->lock);
}
}
......@@ -148,12 +148,11 @@ void btrfs_clear_lock_blocking_write(struct extent_buffer *eb)
*/
if (eb->lock_nested && current->pid == eb->lock_owner)
return;
BUG_ON(atomic_read(&eb->blocking_writers) != 1);
write_lock(&eb->lock);
BUG_ON(eb->blocking_writers != 1);
btrfs_assert_spinning_writers_get(eb);
/* atomic_dec_and_test implies a barrier */
if (atomic_dec_and_test(&eb->blocking_writers))
cond_wake_up_nomb(&eb->write_lock_wq);
if (--eb->blocking_writers == 0)
cond_wake_up(&eb->write_lock_wq);
}
/*
......@@ -167,12 +166,10 @@ void btrfs_tree_read_lock(struct extent_buffer *eb)
if (trace_btrfs_tree_read_lock_enabled())
start_ns = ktime_get_ns();
again:
BUG_ON(!atomic_read(&eb->blocking_writers) &&
current->pid == eb->lock_owner);
read_lock(&eb->lock);
if (atomic_read(&eb->blocking_writers) &&
current->pid == eb->lock_owner) {
BUG_ON(eb->blocking_writers == 0 &&
current->pid == eb->lock_owner);
if (eb->blocking_writers && current->pid == eb->lock_owner) {
/*
* This extent is already write-locked by our thread. We allow
* an additional read lock to be added because it's for the same
......@@ -185,10 +182,10 @@ again:
trace_btrfs_tree_read_lock(eb, start_ns);
return;
}
if (atomic_read(&eb->blocking_writers)) {
if (eb->blocking_writers) {
read_unlock(&eb->lock);
wait_event(eb->write_lock_wq,
atomic_read(&eb->blocking_writers) == 0);
eb->blocking_writers == 0);
goto again;
}
btrfs_assert_tree_read_locks_get(eb);
......@@ -203,11 +200,11 @@ again:
*/
int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
{
if (atomic_read(&eb->blocking_writers))
if (eb->blocking_writers)
return 0;
read_lock(&eb->lock);
if (atomic_read(&eb->blocking_writers)) {
if (eb->blocking_writers) {
read_unlock(&eb->lock);
return 0;
}
......@@ -223,13 +220,13 @@ int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
*/
int btrfs_try_tree_read_lock(struct extent_buffer *eb)
{
if (atomic_read(&eb->blocking_writers))
if (eb->blocking_writers)
return 0;
if (!read_trylock(&eb->lock))
return 0;
if (atomic_read(&eb->blocking_writers)) {
if (eb->blocking_writers) {
read_unlock(&eb->lock);
return 0;
}
......@@ -245,13 +242,11 @@ int btrfs_try_tree_read_lock(struct extent_buffer *eb)
*/
int btrfs_try_tree_write_lock(struct extent_buffer *eb)
{
if (atomic_read(&eb->blocking_writers) ||
atomic_read(&eb->blocking_readers))
if (eb->blocking_writers || atomic_read(&eb->blocking_readers))
return 0;
write_lock(&eb->lock);
if (atomic_read(&eb->blocking_writers) ||
atomic_read(&eb->blocking_readers)) {
if (eb->blocking_writers || atomic_read(&eb->blocking_readers)) {
write_unlock(&eb->lock);
return 0;
}
......@@ -322,10 +317,9 @@ void btrfs_tree_lock(struct extent_buffer *eb)
WARN_ON(eb->lock_owner == current->pid);
again:
wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
wait_event(eb->write_lock_wq, eb->blocking_writers == 0);
write_lock(&eb->lock);
if (atomic_read(&eb->blocking_readers) ||
atomic_read(&eb->blocking_writers)) {
if (atomic_read(&eb->blocking_readers) || eb->blocking_writers) {
write_unlock(&eb->lock);
goto again;
}
......@@ -340,7 +334,7 @@ again:
*/
void btrfs_tree_unlock(struct extent_buffer *eb)
{
int blockers = atomic_read(&eb->blocking_writers);
int blockers = eb->blocking_writers;
BUG_ON(blockers > 1);
......@@ -351,7 +345,7 @@ void btrfs_tree_unlock(struct extent_buffer *eb)
if (blockers) {
btrfs_assert_no_spinning_writers(eb);
atomic_dec(&eb->blocking_writers);
eb->blocking_writers--;
/* Use the lighter barrier after atomic */
smp_mb__after_atomic();
cond_wake_up_nomb(&eb->write_lock_wq);
......
......@@ -155,7 +155,7 @@ static void print_eb_refs_lock(struct extent_buffer *eb)
"refs %u lock (w:%d r:%d bw:%d br:%d sw:%d sr:%d) lock_owner %u current %u",
atomic_read(&eb->refs), atomic_read(&eb->write_locks),
atomic_read(&eb->read_locks),
atomic_read(&eb->blocking_writers),
eb->blocking_writers,
atomic_read(&eb->blocking_readers),
atomic_read(&eb->spinning_writers),
atomic_read(&eb->spinning_readers),
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment