diff options
Diffstat (limited to 'fs/bcachefs/io_write.c')
| -rw-r--r-- | fs/bcachefs/io_write.c | 55 |
1 files changed, 34 insertions, 21 deletions
diff --git a/fs/bcachefs/io_write.c b/fs/bcachefs/io_write.c index 9401d13e31bb..d31c8d006d97 100644 --- a/fs/bcachefs/io_write.c +++ b/fs/bcachefs/io_write.c @@ -69,11 +69,10 @@ void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw) u64 io_latency = time_after64(now, submit_time) ? now - submit_time : 0; - u64 old, new, v = atomic64_read(latency); + u64 old, new; + old = atomic64_read(latency); do { - old = v; - /* * If the io latency was reasonably close to the current * latency, skip doing the update and atomic operation - most of @@ -84,7 +83,7 @@ void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw) break; new = ewma_add(old, io_latency, 5); - } while ((v = atomic64_cmpxchg(latency, old, new)) != old); + } while (!atomic64_try_cmpxchg(latency, &old, new)); bch2_congested_acct(ca, io_latency, now, rw); @@ -555,7 +554,7 @@ out: err: keys->top = keys->keys; op->error = ret; - op->flags |= BCH_WRITE_DONE; + op->flags |= BCH_WRITE_SUBMITTED; goto out; } @@ -590,7 +589,7 @@ static CLOSURE_CALLBACK(bch2_write_index) struct workqueue_struct *wq = index_update_wq(op); unsigned long flags; - if ((op->flags & BCH_WRITE_DONE) && + if ((op->flags & BCH_WRITE_SUBMITTED) && (op->flags & BCH_WRITE_MOVE)) bch2_bio_free_pages_pool(op->c, &op->wbio.bio); @@ -635,7 +634,7 @@ void bch2_write_point_do_index_updates(struct work_struct *work) __bch2_write_index(op); - if (!(op->flags & BCH_WRITE_DONE)) + if (!(op->flags & BCH_WRITE_SUBMITTED)) __bch2_write(op); else bch2_write_done(&op->cl); @@ -1081,7 +1080,10 @@ do_write: *_dst = dst; return more; csum_err: - bch_err(c, "%s writ error: error verifying existing checksum while rewriting existing data (memory corruption?)", + bch_err_inum_offset_ratelimited(c, + op->pos.inode, + op->pos.offset << 9, + "%s write error: error verifying existing checksum while rewriting existing data (memory corruption?)", op->flags & BCH_WRITE_MOVE ? "move" : "user"); ret = -EIO; err: @@ -1220,7 +1222,7 @@ static void bch2_nocow_write(struct bch_write_op *op) DARRAY_PREALLOCATED(struct bucket_to_lock, 3) buckets; u32 snapshot; struct bucket_to_lock *stale_at; - int ret; + int stale, ret; if (op->flags & BCH_WRITE_MOVE) return; @@ -1299,7 +1301,8 @@ retry: BUCKET_NOCOW_LOCK_UPDATE); rcu_read_lock(); - bool stale = gen_after(*bucket_gen(ca, i->b.offset), i->gen); + u8 *gen = bucket_gen(ca, i->b.offset); + stale = !gen ? -1 : gen_after(*gen, i->gen); rcu_read_unlock(); if (unlikely(stale)) { @@ -1315,7 +1318,7 @@ retry: wbio_init(bio)->put_bio = true; bio->bi_opf = op->wbio.bio.bi_opf; } else { - op->flags |= BCH_WRITE_DONE; + op->flags |= BCH_WRITE_SUBMITTED; } op->pos.offset += bio_sectors(bio); @@ -1329,7 +1332,7 @@ retry: op->insert_keys.top, true); bch2_keylist_push(&op->insert_keys); - if (op->flags & BCH_WRITE_DONE) + if (op->flags & BCH_WRITE_SUBMITTED) break; bch2_btree_iter_advance(&iter); } @@ -1344,14 +1347,14 @@ err: op->pos.inode, op->pos.offset << 9, "%s: btree lookup error %s", __func__, bch2_err_str(ret)); op->error = ret; - op->flags |= BCH_WRITE_DONE; + op->flags |= BCH_WRITE_SUBMITTED; } bch2_trans_put(trans); darray_exit(&buckets); /* fallback to cow write path? */ - if (!(op->flags & BCH_WRITE_DONE)) { + if (!(op->flags & BCH_WRITE_SUBMITTED)) { closure_sync(&op->cl); __bch2_nocow_write_done(op); op->insert_keys.top = op->insert_keys.keys; @@ -1380,8 +1383,18 @@ err_bucket_stale: break; } - /* We can retry this: */ - ret = -BCH_ERR_transaction_restart; + struct printbuf buf = PRINTBUF; + if (bch2_fs_inconsistent_on(stale < 0, c, + "pointer to invalid bucket in nocow path on device %llu\n %s", + stale_at->b.inode, + (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { + ret = -EIO; + } else { + /* We can retry this: */ + ret = -BCH_ERR_transaction_restart; + } + printbuf_exit(&buf); + goto err_get_ioref; } @@ -1397,7 +1410,7 @@ static void __bch2_write(struct bch_write_op *op) if (unlikely(op->opts.nocow && c->opts.nocow_enabled)) { bch2_nocow_write(op); - if (op->flags & BCH_WRITE_DONE) + if (op->flags & BCH_WRITE_SUBMITTED) goto out_nofs_restore; } again: @@ -1452,7 +1465,7 @@ again: bch2_alloc_sectors_done_inlined(c, wp); err: if (ret <= 0) { - op->flags |= BCH_WRITE_DONE; + op->flags |= BCH_WRITE_SUBMITTED; if (ret < 0) { if (!(op->flags & BCH_WRITE_ALLOC_NOWAIT)) @@ -1488,7 +1501,7 @@ err: * once, as that signals backpressure to the caller. */ if ((op->flags & BCH_WRITE_SYNC) || - (!(op->flags & BCH_WRITE_DONE) && + (!(op->flags & BCH_WRITE_SUBMITTED) && !(op->flags & BCH_WRITE_IN_WORKER))) { if (closure_sync_timeout(&op->cl, HZ * 10)) { bch2_print_allocator_stuck(c); @@ -1497,7 +1510,7 @@ err: __bch2_write_index(op); - if (!(op->flags & BCH_WRITE_DONE)) + if (!(op->flags & BCH_WRITE_SUBMITTED)) goto again; bch2_write_done(&op->cl); } else { @@ -1519,7 +1532,7 @@ static void bch2_write_data_inline(struct bch_write_op *op, unsigned data_len) memset(&op->failed, 0, sizeof(op->failed)); op->flags |= BCH_WRITE_WROTE_DATA_INLINE; - op->flags |= BCH_WRITE_DONE; + op->flags |= BCH_WRITE_SUBMITTED; bch2_check_set_feature(op->c, BCH_FEATURE_inline_data); |