On 2020/11/4 上午3:46, David Sterba wrote: > On Wed, Oct 28, 2020 at 03:24:32PM +0800, Qu Wenruo wrote: >> >> +/* >> + * Helper to find csums for logical bytenr range > > * Find checksums for logical bytenr range > > 'Helper' or 'This function' are redundant > >> + * [disk_bytenr, disk_bytenr + len) and restore the result to @dst. > ^^^^^^^ > store > > So this replaces the individual parameter description and is better > readable, ok. > >> + * >> + * Return >0 for the number of sectors we found. >> + * Return 0 for the range [disk_bytenr, disk_bytenr + sectorsize) has no csum >> + * for it. Caller may want to try next sector until one range is hit. >> + * Return <0 for fatal error. >> + */ >> +static int find_csum_tree_sums(struct btrfs_fs_info *fs_info, >> + struct btrfs_path *path, u64 disk_bytenr, >> + u64 len, u8 *dst) >> +{ >> + struct btrfs_csum_item *item = NULL; >> + struct btrfs_key key; >> + u32 csum_size = btrfs_super_csum_size(fs_info->super_copy); > > This could use the fs_info->csum_size now that we have it > >> + u32 sectorsize = fs_info->sectorsize; >> + int ret; >> + u64 csum_start; >> + u64 csum_len; >> + >> + ASSERT(IS_ALIGNED(disk_bytenr, sectorsize) && >> + IS_ALIGNED(len, sectorsize)); >> + >> + /* Check if the current csum item covers disk_bytenr */ >> + if (path->nodes[0]) { >> + item = btrfs_item_ptr(path->nodes[0], path->slots[0], >> + struct btrfs_csum_item); >> + btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); >> + csum_start = key.offset; >> + csum_len = btrfs_item_size_nr(path->nodes[0], path->slots[0]) / >> + csum_size * sectorsize; > > path->slots[0]) / csum_size * sectorsize > > This expresission would be better on one line But it's already over 80 charactors. Or maybe I could use a small helper to do the csum_len calcuation like calc_csum_lenght(path)? > >> + if (csum_start <= disk_bytenr && >> + csum_start + csum_len > disk_bytenr) >> + goto found; >> + } >> + >> + /* Current item doesn't contain the desired range, re-search */ > ^^^^^^^^^ > search again > >> + btrfs_release_path(path); >> + item = btrfs_lookup_csum(NULL, fs_info->csum_root, path, >> + disk_bytenr, 0); >> + if (IS_ERR(item)) { >> + ret = PTR_ERR(item); >> + goto out; >> + } >> +found: >> + btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); >> + csum_start = key.offset; >> + csum_len = btrfs_item_size_nr(path->nodes[0], path->slots[0]) / >> + csum_size * sectorsize; > > same, on one line > >> + ASSERT(csum_start <= disk_bytenr && >> + csum_start + csum_len > disk_bytenr); >> + >> + ret = div_u64(min(csum_start + csum_len, disk_bytenr + len) - >> + disk_bytenr, sectorsize); > > This can use the sectorsize_bits Already done in the latest version. "[PATCH 24/32] btrfs: file-item: refactor btrfs_lookup_bio_sums() to handle out-of-order bvecs". ... >> + /* >> + * In fact, for csum lookup we don't really need bio at all. >> + * >> + * We know the on-disk bytenr, the file_offset, and length. >> + * That's enough to search csum. The bio is in fact just a distraction >> + * and following bio bvec would make thing much hard to go. >> + * As we could have subpage bvec (with different bv_len) and non-linear >> + * bvec. >> + * >> + * So here we don't bother bio at all, just use @cur_offset to do the >> + * iteration. > > This comment style is maybe more suitable for changelog but in the code > it comes in the context of the code so I'd expect something like: > > We don't need to use bio because we already know the on-disk > bytenr, file_offset and length. For subpage bvec we can even > have different bv_len than PAGE_SIZE or non-linear bvec. Indeed looks better. > > Though, if there's redundant information in the bio it can be > cross-checked by asserts. The assert() can be done, but since we no longer do bvec iteration at all, I doubt if it's really needed. Thanks, Qu > >> + */ >> + for (cur_offset = orig_file_offset; cur_offset < orig_file_offset + orig_len; >> + cur_offset += count * sectorsize) { >> + u64 cur_disk_bytenr; >> + int search_len = orig_file_offset + orig_len - cur_offset; >> + int diff_sectors; > > Int types mixed with u64 > >> + u8 *csum_dst; >> + >> + diff_sectors = div_u64(cur_offset - orig_file_offset, >> + sectorsize); >> + cur_disk_bytenr = orig_disk_bytenr + >> + diff_sectors * sectorsize; >> + csum_dst = csum + diff_sectors * csum_size; >> + >> + count = btrfs_find_ordered_sum(inode, cur_offset, >> + cur_disk_bytenr, csum_dst, >> + search_len / sectorsize); >> if (count) >> - goto found; >> - >> - if (!item || disk_bytenr < item_start_offset || >> - disk_bytenr >= item_last_offset) { >> - struct btrfs_key found_key; >> - u32 item_size; >> - >> - if (item) >> - btrfs_release_path(path); >> - item = btrfs_lookup_csum(NULL, fs_info->csum_root, >> - path, disk_bytenr, 0); >> - if (IS_ERR(item)) { >> - count = 1; >> - memset(csum, 0, csum_size); >> - if (BTRFS_I(inode)->root->root_key.objectid == >> - BTRFS_DATA_RELOC_TREE_OBJECTID) { >> - set_extent_bits(io_tree, offset, >> - offset + fs_info->sectorsize - 1, >> - EXTENT_NODATASUM); >> - } else { >> - btrfs_info_rl(fs_info, >> - "no csum found for inode %llu start %llu", >> - btrfs_ino(BTRFS_I(inode)), offset); >> - } >> - item = NULL; >> - btrfs_release_path(path); >> - goto found; >> + continue; >> + count = find_csum_tree_sums(fs_info, path, cur_disk_bytenr, >> + search_len, csum_dst); >> + if (!count) { >> + /* >> + * For not found case, the csum has been zeroed >> + * in find_csum_tree_sums() already, just skip >> + * to next sector. >> + */ >> + count = 1; >> + if (BTRFS_I(inode)->root->root_key.objectid == >> + BTRFS_DATA_RELOC_TREE_OBJECTID) { >> + set_extent_bits(io_tree, cur_offset, >> + cur_offset + sectorsize - 1, >> + EXTENT_NODATASUM); >> + } else { >> + btrfs_warn_rl(fs_info, >> + "csum hole found for root %lld inode %llu range [%llu, %llu)", >> + BTRFS_I(inode)->root->root_key.objectid, >> + btrfs_ino(BTRFS_I(inode)), >> + cur_offset, cur_offset + sectorsize); >> } >> - btrfs_item_key_to_cpu(path->nodes[0], &found_key, >> - path->slots[0]); >> - >> - item_start_offset = found_key.offset; >> - item_size = btrfs_item_size_nr(path->nodes[0], >> - path->slots[0]); >> - item_last_offset = item_start_offset + >> - (item_size / csum_size) * >> - fs_info->sectorsize; >> - item = btrfs_item_ptr(path->nodes[0], path->slots[0], >> - struct btrfs_csum_item); >> - } >> - /* >> - * this byte range must be able to fit inside >> - * a single leaf so it will also fit inside a u32 >> - */ >> - diff = disk_bytenr - item_start_offset; >> - diff = diff / fs_info->sectorsize; >> - diff = diff * csum_size; >> - count = min_t(int, nblocks, (item_last_offset - disk_bytenr) >> >> - inode->i_sb->s_blocksize_bits); >> - read_extent_buffer(path->nodes[0], csum, >> - ((unsigned long)item) + diff, >> - csum_size * count); >> -found: >> - csum += count * csum_size; >> - nblocks -= count; >> -next: >> - while (count > 0) { >> - count--; >> - disk_bytenr += fs_info->sectorsize; >> - offset += fs_info->sectorsize; >> - page_bytes_left -= fs_info->sectorsize; >> - if (!page_bytes_left) >> - break; /* move to next bio */ >> } >> } >> >> - WARN_ON_ONCE(count); >> btrfs_free_path(path); >> return BLK_STS_OK; >> } >> -- >> 2.29.1