diff options
Diffstat (limited to 'fs/btrfs/compression.c')
| -rw-r--r-- | fs/btrfs/compression.c | 54 | 
1 files changed, 52 insertions, 2 deletions
| diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index d2ef9ac2a630..280384bf34f1 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -107,7 +107,8 @@ static void end_compressed_bio_read(struct bio *bio)  	struct inode *inode;  	struct page *page;  	unsigned long index; -	int ret; +	unsigned int mirror = btrfs_io_bio(bio)->mirror_num; +	int ret = 0;  	if (bio->bi_status)  		cb->errors = 1; @@ -118,6 +119,21 @@ static void end_compressed_bio_read(struct bio *bio)  	if (!refcount_dec_and_test(&cb->pending_bios))  		goto out; +	/* +	 * Record the correct mirror_num in cb->orig_bio so that +	 * read-repair can work properly. +	 */ +	ASSERT(btrfs_io_bio(cb->orig_bio)); +	btrfs_io_bio(cb->orig_bio)->mirror_num = mirror; +	cb->mirror_num = mirror; + +	/* +	 * Some IO in this cb have failed, just skip checksum as there +	 * is no way it could be correct. +	 */ +	if (cb->errors == 1) +		goto csum_failed; +  	inode = cb->inode;  	ret = check_compressed_csum(BTRFS_I(inode), cb,  				    (u64)bio->bi_iter.bi_sector << 9); @@ -704,6 +720,7 @@ static struct {  static const struct btrfs_compress_op * const btrfs_compress_op[] = {  	&btrfs_zlib_compress,  	&btrfs_lzo_compress, +	&btrfs_zstd_compress,  };  void __init btrfs_init_compress(void) @@ -825,7 +842,7 @@ static void free_workspace(int type, struct list_head *workspace)  	int *free_ws			= &btrfs_comp_ws[idx].free_ws;  	spin_lock(ws_lock); -	if (*free_ws < num_online_cpus()) { +	if (*free_ws <= num_online_cpus()) {  		list_add(workspace, idle_ws);  		(*free_ws)++;  		spin_unlock(ws_lock); @@ -1047,3 +1064,36 @@ int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,  	return 1;  } + +/* + * Compression heuristic. + * + * For now is's a naive and optimistic 'return true', we'll extend the logic to + * quickly (compared to direct compression) detect data characteristics + * (compressible/uncompressible) to avoid wasting CPU time on uncompressible + * data. + * + * The following types of analysis can be performed: + * - detect mostly zero data + * - detect data with low "byte set" size (text, etc) + * - detect data with low/high "core byte" set + * + * Return non-zero if the compression should be done, 0 otherwise. + */ +int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end) +{ +	u64 index = start >> PAGE_SHIFT; +	u64 end_index = end >> PAGE_SHIFT; +	struct page *page; +	int ret = 1; + +	while (index <= end_index) { +		page = find_get_page(inode->i_mapping, index); +		kmap(page); +		kunmap(page); +		put_page(page); +		index++; +	} + +	return ret; +} |