Merge remote-tracking branch 'aosp/upstream-f2fs-stable-linux-5.4.y' into android-5.4
Merged in v5.5-rc1. * aosp/upstream-f2fs-stable-linux-5.4.y: docs: fs-verity: mention statx() support f2fs: support STATX_ATTR_VERITY ext4: support STATX_ATTR_VERITY statx: define STATX_ATTR_VERITY docs: fs-verity: document first supported kernel version f2fs: add support for IV_INO_LBLK_64 encryption policies ext4: add support for IV_INO_LBLK_64 encryption policies fscrypt: add support for IV_INO_LBLK_64 policies fscrypt: avoid data race on fscrypt_mode::logged_impl_name fscrypt: zeroize fscrypt_info before freeing fscrypt: remove struct fscrypt_ctx fscrypt: invoke crypto API for ESSIV handling null_blk: remove unused variable warning on !CONFIG_BLK_DEV_ZONED block: set the zone size in blk_revalidate_disk_zones atomically block: don't handle bio based drivers in blk_revalidate_disk_zones null_blk: cleanup null_gendisk_register null_blk: fix zone size paramter check block: allocate the zone bitmaps lazily block: replace seq_zones_bitmap with conv_zones_bitmap block: simplify blkdev_nr_zones block: remove the empty line at the end of blk-zoned.c scsi: sd_zbc: Improve report zones error printout scsi: sd_zbc: Remove set but not used variable 'buflen' block: rework zone reporting scsi: sd_zbc: Cleanup sd_zbc_alloc_report_buffer() null_blk: clean up report zones null_blk: clean up the block device operations null_blk: return fixed zoned reads > write pointer scsi: sd_zbc: add zone open, close, and finish support block: Remove partition support for zoned block devices block: Simplify report zones execution block: cleanup the !zoned case in blk_revalidate_disk_zones block: Enhance blk_revalidate_disk_zones() block: add zone open, close and finish ioctl support block: add zone open, close and finish operations block: Simplify REQ_OP_ZONE_RESET_ALL handling block: Remove REQ_OP_ZONE_RESET plugging f2fs: stop GC when the victim becomes fully valid f2fs: expose main_blkaddr in sysfs f2fs: choose hardlimit when softlimit is larger than hardlimit in f2fs_statfs_project() f2fs: Fix deadlock in f2fs_gc() context during atomic files handling f2fs: show f2fs instance in printk_ratelimited f2fs: fix potential overflow f2fs: fix to update dir's i_pino during cross_rename f2fs: support aligned pinned file f2fs: avoid kernel panic on corruption test f2fs: fix wrong description in document f2fs: cache global IPU bio f2fs: fix to avoid memory leakage in f2fs_listxattr f2fs: check total_segments from devices in raw_super f2fs: update multi-dev metadata in resize_fs f2fs: mark recovery flag correctly in read_raw_super_block() f2fs: fix to update time in lazytime mode Change-Id: I9325127228fb82b67f064ce8b3bc8d40ac76e65b Signed-off-by: Jaegeuk Kim <jaegeuk@google.com>
This commit is contained in:
commit
57df3030e1
@ -31,6 +31,12 @@ Contact: "Jaegeuk Kim" <jaegeuk.kim@samsung.com>
|
||||
Description:
|
||||
Controls the issue rate of segment discard commands.
|
||||
|
||||
What: /sys/fs/f2fs/<disk>/max_blkaddr
|
||||
Date: November 2019
|
||||
Contact: "Ramon Pantin" <pantin@google.com>
|
||||
Description:
|
||||
Shows first block address of MAIN area.
|
||||
|
||||
What: /sys/fs/f2fs/<disk>/ipu_policy
|
||||
Date: November 2013
|
||||
Contact: "Jaegeuk Kim" <jaegeuk.kim@samsung.com>
|
||||
|
@ -297,6 +297,9 @@ Files in /sys/fs/f2fs/<devname>
|
||||
reclaim the prefree segments to free segments.
|
||||
By default, 5% over total # of segments.
|
||||
|
||||
main_blkaddr This value gives the first block address of
|
||||
MAIN area in the partition.
|
||||
|
||||
max_small_discards This parameter controls the number of discard
|
||||
commands that consist small blocks less than 2MB.
|
||||
The candidates to be discarded are cached until
|
||||
@ -346,7 +349,7 @@ Files in /sys/fs/f2fs/<devname>
|
||||
|
||||
ram_thresh This parameter controls the memory footprint used
|
||||
by free nids and cached nat entries. By default,
|
||||
10 is set, which indicates 10 MB / 1 GB RAM.
|
||||
1 is set, which indicates 10 MB / 1 GB RAM.
|
||||
|
||||
ra_nid_pages When building free nids, F2FS reads NAT blocks
|
||||
ahead for speed up. Default is 0.
|
||||
|
@ -226,6 +226,14 @@ To do so, check for FS_VERITY_FL (0x00100000) in the returned flags.
|
||||
The verity flag is not settable via FS_IOC_SETFLAGS. You must use
|
||||
FS_IOC_ENABLE_VERITY instead, since parameters must be provided.
|
||||
|
||||
statx
|
||||
-----
|
||||
|
||||
Since Linux v5.5, the statx() system call sets STATX_ATTR_VERITY if
|
||||
the file has fs-verity enabled. This can perform better than
|
||||
FS_IOC_GETFLAGS and FS_IOC_MEASURE_VERITY because it doesn't require
|
||||
opening the file, and opening verity files can be expensive.
|
||||
|
||||
Accessing verity files
|
||||
======================
|
||||
|
||||
@ -398,7 +406,7 @@ pages have been read into the pagecache. (See `Verifying data`_.)
|
||||
ext4
|
||||
----
|
||||
|
||||
ext4 supports fs-verity since Linux TODO and e2fsprogs v1.45.2.
|
||||
ext4 supports fs-verity since Linux v5.4 and e2fsprogs v1.45.2.
|
||||
|
||||
To create verity files on an ext4 filesystem, the filesystem must have
|
||||
been formatted with ``-O verity`` or had ``tune2fs -O verity`` run on
|
||||
@ -434,7 +442,7 @@ also only supports extent-based files.
|
||||
f2fs
|
||||
----
|
||||
|
||||
f2fs supports fs-verity since Linux TODO and f2fs-tools v1.11.0.
|
||||
f2fs supports fs-verity since Linux v5.4 and f2fs-tools v1.11.0.
|
||||
|
||||
To create verity files on an f2fs filesystem, the filesystem must have
|
||||
been formatted with ``-O verity``.
|
||||
|
@ -133,6 +133,9 @@ static const char *const blk_op_name[] = {
|
||||
REQ_OP_NAME(SECURE_ERASE),
|
||||
REQ_OP_NAME(ZONE_RESET),
|
||||
REQ_OP_NAME(ZONE_RESET_ALL),
|
||||
REQ_OP_NAME(ZONE_OPEN),
|
||||
REQ_OP_NAME(ZONE_CLOSE),
|
||||
REQ_OP_NAME(ZONE_FINISH),
|
||||
REQ_OP_NAME(WRITE_SAME),
|
||||
REQ_OP_NAME(WRITE_ZEROES),
|
||||
REQ_OP_NAME(SCSI_IN),
|
||||
@ -849,11 +852,7 @@ static inline int blk_partition_remap(struct bio *bio)
|
||||
if (unlikely(bio_check_ro(bio, p)))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Zone reset does not include bi_size so bio_sectors() is always 0.
|
||||
* Include a test for the reset op code and perform the remap if needed.
|
||||
*/
|
||||
if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET) {
|
||||
if (bio_sectors(bio)) {
|
||||
if (bio_check_eod(bio, part_nr_sects_read(p)))
|
||||
goto out;
|
||||
bio->bi_iter.bi_sector += p->start_sect;
|
||||
@ -940,6 +939,9 @@ generic_make_request_checks(struct bio *bio)
|
||||
goto not_supported;
|
||||
break;
|
||||
case REQ_OP_ZONE_RESET:
|
||||
case REQ_OP_ZONE_OPEN:
|
||||
case REQ_OP_ZONE_CLOSE:
|
||||
case REQ_OP_ZONE_FINISH:
|
||||
if (!blk_queue_is_zoned(q))
|
||||
goto not_supported;
|
||||
break;
|
||||
|
@ -70,195 +70,98 @@ void __blk_req_zone_write_unlock(struct request *rq)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__blk_req_zone_write_unlock);
|
||||
|
||||
static inline unsigned int __blkdev_nr_zones(struct request_queue *q,
|
||||
sector_t nr_sectors)
|
||||
{
|
||||
sector_t zone_sectors = blk_queue_zone_sectors(q);
|
||||
|
||||
return (nr_sectors + zone_sectors - 1) >> ilog2(zone_sectors);
|
||||
}
|
||||
|
||||
/**
|
||||
* blkdev_nr_zones - Get number of zones
|
||||
* @bdev: Target block device
|
||||
* @disk: Target gendisk
|
||||
*
|
||||
* Description:
|
||||
* Return the total number of zones of a zoned block device.
|
||||
* For a regular block device, the number of zones is always 0.
|
||||
* Return the total number of zones of a zoned block device. For a block
|
||||
* device without zone capabilities, the number of zones is always 0.
|
||||
*/
|
||||
unsigned int blkdev_nr_zones(struct block_device *bdev)
|
||||
unsigned int blkdev_nr_zones(struct gendisk *disk)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(bdev);
|
||||
sector_t zone_sectors = blk_queue_zone_sectors(disk->queue);
|
||||
|
||||
if (!blk_queue_is_zoned(q))
|
||||
if (!blk_queue_is_zoned(disk->queue))
|
||||
return 0;
|
||||
|
||||
return __blkdev_nr_zones(q, bdev->bd_part->nr_sects);
|
||||
return (get_capacity(disk) + zone_sectors - 1) >> ilog2(zone_sectors);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blkdev_nr_zones);
|
||||
|
||||
/*
|
||||
* Check that a zone report belongs to this partition, and if yes, fix its start
|
||||
* sector and write pointer and return true. Return false otherwise.
|
||||
*/
|
||||
static bool blkdev_report_zone(struct block_device *bdev, struct blk_zone *rep)
|
||||
{
|
||||
sector_t offset = get_start_sect(bdev);
|
||||
|
||||
if (rep->start < offset)
|
||||
return false;
|
||||
|
||||
rep->start -= offset;
|
||||
if (rep->start + rep->len > bdev->bd_part->nr_sects)
|
||||
return false;
|
||||
|
||||
if (rep->type == BLK_ZONE_TYPE_CONVENTIONAL)
|
||||
rep->wp = rep->start + rep->len;
|
||||
else
|
||||
rep->wp -= offset;
|
||||
return true;
|
||||
}
|
||||
|
||||
static int blk_report_zones(struct gendisk *disk, sector_t sector,
|
||||
struct blk_zone *zones, unsigned int *nr_zones)
|
||||
{
|
||||
struct request_queue *q = disk->queue;
|
||||
unsigned int z = 0, n, nrz = *nr_zones;
|
||||
sector_t capacity = get_capacity(disk);
|
||||
int ret;
|
||||
|
||||
while (z < nrz && sector < capacity) {
|
||||
n = nrz - z;
|
||||
ret = disk->fops->report_zones(disk, sector, &zones[z], &n);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!n)
|
||||
break;
|
||||
sector += blk_queue_zone_sectors(q) * n;
|
||||
z += n;
|
||||
}
|
||||
|
||||
WARN_ON(z > *nr_zones);
|
||||
*nr_zones = z;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* blkdev_report_zones - Get zones information
|
||||
* @bdev: Target block device
|
||||
* @sector: Sector from which to report zones
|
||||
* @zones: Array of zone structures where to return the zones information
|
||||
* @nr_zones: Number of zone structures in the zone array
|
||||
* @nr_zones: Maximum number of zones to report
|
||||
* @cb: Callback function called for each reported zone
|
||||
* @data: Private data for the callback
|
||||
*
|
||||
* Description:
|
||||
* Get zone information starting from the zone containing @sector.
|
||||
* The number of zone information reported may be less than the number
|
||||
* requested by @nr_zones. The number of zones actually reported is
|
||||
* returned in @nr_zones.
|
||||
* The caller must use memalloc_noXX_save/restore() calls to control
|
||||
* memory allocations done within this function (zone array and command
|
||||
* buffer allocation by the device driver).
|
||||
* Get zone information starting from the zone containing @sector for at most
|
||||
* @nr_zones, and call @cb for each zone reported by the device.
|
||||
* To report all zones in a device starting from @sector, the BLK_ALL_ZONES
|
||||
* constant can be passed to @nr_zones.
|
||||
* Returns the number of zones reported by the device, or a negative errno
|
||||
* value in case of failure.
|
||||
*
|
||||
* Note: The caller must use memalloc_noXX_save/restore() calls to control
|
||||
* memory allocations done within this function.
|
||||
*/
|
||||
int blkdev_report_zones(struct block_device *bdev, sector_t sector,
|
||||
struct blk_zone *zones, unsigned int *nr_zones)
|
||||
unsigned int nr_zones, report_zones_cb cb, void *data)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(bdev);
|
||||
unsigned int i, nrz;
|
||||
int ret;
|
||||
struct gendisk *disk = bdev->bd_disk;
|
||||
sector_t capacity = get_capacity(disk);
|
||||
|
||||
if (!blk_queue_is_zoned(q))
|
||||
if (!blk_queue_is_zoned(bdev_get_queue(bdev)) ||
|
||||
WARN_ON_ONCE(!disk->fops->report_zones))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/*
|
||||
* A block device that advertized itself as zoned must have a
|
||||
* report_zones method. If it does not have one defined, the device
|
||||
* driver has a bug. So warn about that.
|
||||
*/
|
||||
if (WARN_ON_ONCE(!bdev->bd_disk->fops->report_zones))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!*nr_zones || sector >= bdev->bd_part->nr_sects) {
|
||||
*nr_zones = 0;
|
||||
if (!nr_zones || sector >= capacity)
|
||||
return 0;
|
||||
}
|
||||
|
||||
nrz = min(*nr_zones,
|
||||
__blkdev_nr_zones(q, bdev->bd_part->nr_sects - sector));
|
||||
ret = blk_report_zones(bdev->bd_disk, get_start_sect(bdev) + sector,
|
||||
zones, &nrz);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < nrz; i++) {
|
||||
if (!blkdev_report_zone(bdev, zones))
|
||||
break;
|
||||
zones++;
|
||||
}
|
||||
|
||||
*nr_zones = i;
|
||||
|
||||
return 0;
|
||||
return disk->fops->report_zones(disk, sector, nr_zones, cb, data);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blkdev_report_zones);
|
||||
|
||||
/*
|
||||
* Special case of zone reset operation to reset all zones in one command,
|
||||
* useful for applications like mkfs.
|
||||
*/
|
||||
static int __blkdev_reset_all_zones(struct block_device *bdev, gfp_t gfp_mask)
|
||||
{
|
||||
struct bio *bio = bio_alloc(gfp_mask, 0);
|
||||
int ret;
|
||||
|
||||
/* across the zones operations, don't need any sectors */
|
||||
bio_set_dev(bio, bdev);
|
||||
bio_set_op_attrs(bio, REQ_OP_ZONE_RESET_ALL, 0);
|
||||
|
||||
ret = submit_bio_wait(bio);
|
||||
bio_put(bio);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline bool blkdev_allow_reset_all_zones(struct block_device *bdev,
|
||||
sector_t sector,
|
||||
sector_t nr_sectors)
|
||||
{
|
||||
if (!blk_queue_zone_resetall(bdev_get_queue(bdev)))
|
||||
return false;
|
||||
|
||||
if (nr_sectors != part_nr_sects_read(bdev->bd_part))
|
||||
return false;
|
||||
/*
|
||||
* REQ_OP_ZONE_RESET_ALL can be executed only if the block device is
|
||||
* the entire disk, that is, if the blocks device start offset is 0 and
|
||||
* its capacity is the same as the entire disk.
|
||||
* REQ_OP_ZONE_RESET_ALL can be executed only if the number of sectors
|
||||
* of the applicable zone range is the entire disk.
|
||||
*/
|
||||
return get_start_sect(bdev) == 0 &&
|
||||
part_nr_sects_read(bdev->bd_part) == get_capacity(bdev->bd_disk);
|
||||
return !sector && nr_sectors == get_capacity(bdev->bd_disk);
|
||||
}
|
||||
|
||||
/**
|
||||
* blkdev_reset_zones - Reset zones write pointer
|
||||
* blkdev_zone_mgmt - Execute a zone management operation on a range of zones
|
||||
* @bdev: Target block device
|
||||
* @sector: Start sector of the first zone to reset
|
||||
* @nr_sectors: Number of sectors, at least the length of one zone
|
||||
* @op: Operation to be performed on the zones
|
||||
* @sector: Start sector of the first zone to operate on
|
||||
* @nr_sectors: Number of sectors, should be at least the length of one zone and
|
||||
* must be zone size aligned.
|
||||
* @gfp_mask: Memory allocation flags (for bio_alloc)
|
||||
*
|
||||
* Description:
|
||||
* Reset the write pointer of the zones contained in the range
|
||||
* Perform the specified operation on the range of zones specified by
|
||||
* @sector..@sector+@nr_sectors. Specifying the entire disk sector range
|
||||
* is valid, but the specified range should not contain conventional zones.
|
||||
* The operation to execute on each zone can be a zone reset, open, close
|
||||
* or finish request.
|
||||
*/
|
||||
int blkdev_reset_zones(struct block_device *bdev,
|
||||
sector_t sector, sector_t nr_sectors,
|
||||
gfp_t gfp_mask)
|
||||
int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
|
||||
sector_t sector, sector_t nr_sectors,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(bdev);
|
||||
sector_t zone_sectors;
|
||||
sector_t zone_sectors = blk_queue_zone_sectors(q);
|
||||
sector_t capacity = get_capacity(bdev->bd_disk);
|
||||
sector_t end_sector = sector + nr_sectors;
|
||||
struct bio *bio = NULL;
|
||||
struct blk_plug plug;
|
||||
int ret;
|
||||
|
||||
if (!blk_queue_is_zoned(q))
|
||||
@ -267,45 +170,62 @@ int blkdev_reset_zones(struct block_device *bdev,
|
||||
if (bdev_read_only(bdev))
|
||||
return -EPERM;
|
||||
|
||||
if (!nr_sectors || end_sector > bdev->bd_part->nr_sects)
|
||||
if (!op_is_zone_mgmt(op))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!nr_sectors || end_sector > capacity)
|
||||
/* Out of range */
|
||||
return -EINVAL;
|
||||
|
||||
if (blkdev_allow_reset_all_zones(bdev, nr_sectors))
|
||||
return __blkdev_reset_all_zones(bdev, gfp_mask);
|
||||
|
||||
/* Check alignment (handle eventual smaller last zone) */
|
||||
zone_sectors = blk_queue_zone_sectors(q);
|
||||
if (sector & (zone_sectors - 1))
|
||||
return -EINVAL;
|
||||
|
||||
if ((nr_sectors & (zone_sectors - 1)) &&
|
||||
end_sector != bdev->bd_part->nr_sects)
|
||||
if ((nr_sectors & (zone_sectors - 1)) && end_sector != capacity)
|
||||
return -EINVAL;
|
||||
|
||||
blk_start_plug(&plug);
|
||||
while (sector < end_sector) {
|
||||
|
||||
bio = blk_next_bio(bio, 0, gfp_mask);
|
||||
bio->bi_iter.bi_sector = sector;
|
||||
bio_set_dev(bio, bdev);
|
||||
bio_set_op_attrs(bio, REQ_OP_ZONE_RESET, 0);
|
||||
|
||||
/*
|
||||
* Special case for the zone reset operation that reset all
|
||||
* zones, this is useful for applications like mkfs.
|
||||
*/
|
||||
if (op == REQ_OP_ZONE_RESET &&
|
||||
blkdev_allow_reset_all_zones(bdev, sector, nr_sectors)) {
|
||||
bio->bi_opf = REQ_OP_ZONE_RESET_ALL;
|
||||
break;
|
||||
}
|
||||
|
||||
bio->bi_opf = op;
|
||||
bio->bi_iter.bi_sector = sector;
|
||||
sector += zone_sectors;
|
||||
|
||||
/* This may take a while, so be nice to others */
|
||||
cond_resched();
|
||||
|
||||
}
|
||||
|
||||
ret = submit_bio_wait(bio);
|
||||
bio_put(bio);
|
||||
|
||||
blk_finish_plug(&plug);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blkdev_reset_zones);
|
||||
EXPORT_SYMBOL_GPL(blkdev_zone_mgmt);
|
||||
|
||||
struct zone_report_args {
|
||||
struct blk_zone __user *zones;
|
||||
};
|
||||
|
||||
static int blkdev_copy_zone_to_user(struct blk_zone *zone, unsigned int idx,
|
||||
void *data)
|
||||
{
|
||||
struct zone_report_args *args = data;
|
||||
|
||||
if (copy_to_user(&args->zones[idx], zone, sizeof(struct blk_zone)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* BLKREPORTZONE ioctl processing.
|
||||
@ -315,9 +235,9 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
void __user *argp = (void __user *)arg;
|
||||
struct zone_report_args args;
|
||||
struct request_queue *q;
|
||||
struct blk_zone_report rep;
|
||||
struct blk_zone *zones;
|
||||
int ret;
|
||||
|
||||
if (!argp)
|
||||
@ -339,44 +259,29 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
if (!rep.nr_zones)
|
||||
return -EINVAL;
|
||||
|
||||
rep.nr_zones = min(blkdev_nr_zones(bdev), rep.nr_zones);
|
||||
args.zones = argp + sizeof(struct blk_zone_report);
|
||||
ret = blkdev_report_zones(bdev, rep.sector, rep.nr_zones,
|
||||
blkdev_copy_zone_to_user, &args);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
zones = kvmalloc_array(rep.nr_zones, sizeof(struct blk_zone),
|
||||
GFP_KERNEL | __GFP_ZERO);
|
||||
if (!zones)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = blkdev_report_zones(bdev, rep.sector, zones, &rep.nr_zones);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (copy_to_user(argp, &rep, sizeof(struct blk_zone_report))) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (rep.nr_zones) {
|
||||
if (copy_to_user(argp + sizeof(struct blk_zone_report), zones,
|
||||
sizeof(struct blk_zone) * rep.nr_zones))
|
||||
ret = -EFAULT;
|
||||
}
|
||||
|
||||
out:
|
||||
kvfree(zones);
|
||||
|
||||
return ret;
|
||||
rep.nr_zones = ret;
|
||||
if (copy_to_user(argp, &rep, sizeof(struct blk_zone_report)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* BLKRESETZONE ioctl processing.
|
||||
* BLKRESETZONE, BLKOPENZONE, BLKCLOSEZONE and BLKFINISHZONE ioctl processing.
|
||||
* Called from blkdev_ioctl.
|
||||
*/
|
||||
int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
void __user *argp = (void __user *)arg;
|
||||
struct request_queue *q;
|
||||
struct blk_zone_range zrange;
|
||||
enum req_opf op;
|
||||
|
||||
if (!argp)
|
||||
return -EINVAL;
|
||||
@ -397,8 +302,25 @@ int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
if (copy_from_user(&zrange, argp, sizeof(struct blk_zone_range)))
|
||||
return -EFAULT;
|
||||
|
||||
return blkdev_reset_zones(bdev, zrange.sector, zrange.nr_sectors,
|
||||
GFP_KERNEL);
|
||||
switch (cmd) {
|
||||
case BLKRESETZONE:
|
||||
op = REQ_OP_ZONE_RESET;
|
||||
break;
|
||||
case BLKOPENZONE:
|
||||
op = REQ_OP_ZONE_OPEN;
|
||||
break;
|
||||
case BLKCLOSEZONE:
|
||||
op = REQ_OP_ZONE_CLOSE;
|
||||
break;
|
||||
case BLKFINISHZONE:
|
||||
op = REQ_OP_ZONE_FINISH;
|
||||
break;
|
||||
default:
|
||||
return -ENOTTY;
|
||||
}
|
||||
|
||||
return blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors,
|
||||
GFP_KERNEL);
|
||||
}
|
||||
|
||||
static inline unsigned long *blk_alloc_zone_bitmap(int node,
|
||||
@ -408,141 +330,151 @@ static inline unsigned long *blk_alloc_zone_bitmap(int node,
|
||||
GFP_NOIO, node);
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate an array of struct blk_zone to get nr_zones zone information.
|
||||
* The allocated array may be smaller than nr_zones.
|
||||
*/
|
||||
static struct blk_zone *blk_alloc_zones(unsigned int *nr_zones)
|
||||
{
|
||||
struct blk_zone *zones;
|
||||
size_t nrz = min(*nr_zones, BLK_ZONED_REPORT_MAX_ZONES);
|
||||
|
||||
/*
|
||||
* GFP_KERNEL here is meaningless as the caller task context has
|
||||
* the PF_MEMALLOC_NOIO flag set in blk_revalidate_disk_zones()
|
||||
* with memalloc_noio_save().
|
||||
*/
|
||||
zones = kvcalloc(nrz, sizeof(struct blk_zone), GFP_KERNEL);
|
||||
if (!zones) {
|
||||
*nr_zones = 0;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
*nr_zones = nrz;
|
||||
|
||||
return zones;
|
||||
}
|
||||
|
||||
void blk_queue_free_zone_bitmaps(struct request_queue *q)
|
||||
{
|
||||
kfree(q->seq_zones_bitmap);
|
||||
q->seq_zones_bitmap = NULL;
|
||||
kfree(q->conv_zones_bitmap);
|
||||
q->conv_zones_bitmap = NULL;
|
||||
kfree(q->seq_zones_wlock);
|
||||
q->seq_zones_wlock = NULL;
|
||||
}
|
||||
|
||||
struct blk_revalidate_zone_args {
|
||||
struct gendisk *disk;
|
||||
unsigned long *conv_zones_bitmap;
|
||||
unsigned long *seq_zones_wlock;
|
||||
unsigned int nr_zones;
|
||||
sector_t zone_sectors;
|
||||
sector_t sector;
|
||||
};
|
||||
|
||||
/*
|
||||
* Helper function to check the validity of zones of a zoned block device.
|
||||
*/
|
||||
static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
|
||||
void *data)
|
||||
{
|
||||
struct blk_revalidate_zone_args *args = data;
|
||||
struct gendisk *disk = args->disk;
|
||||
struct request_queue *q = disk->queue;
|
||||
sector_t capacity = get_capacity(disk);
|
||||
|
||||
/*
|
||||
* All zones must have the same size, with the exception on an eventual
|
||||
* smaller last zone.
|
||||
*/
|
||||
if (zone->start == 0) {
|
||||
if (zone->len == 0 || !is_power_of_2(zone->len)) {
|
||||
pr_warn("%s: Invalid zoned device with non power of two zone size (%llu)\n",
|
||||
disk->disk_name, zone->len);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
args->zone_sectors = zone->len;
|
||||
args->nr_zones = (capacity + zone->len - 1) >> ilog2(zone->len);
|
||||
} else if (zone->start + args->zone_sectors < capacity) {
|
||||
if (zone->len != args->zone_sectors) {
|
||||
pr_warn("%s: Invalid zoned device with non constant zone size\n",
|
||||
disk->disk_name);
|
||||
return -ENODEV;
|
||||
}
|
||||
} else {
|
||||
if (zone->len > args->zone_sectors) {
|
||||
pr_warn("%s: Invalid zoned device with larger last zone size\n",
|
||||
disk->disk_name);
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
|
||||
/* Check for holes in the zone report */
|
||||
if (zone->start != args->sector) {
|
||||
pr_warn("%s: Zone gap at sectors %llu..%llu\n",
|
||||
disk->disk_name, args->sector, zone->start);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* Check zone type */
|
||||
switch (zone->type) {
|
||||
case BLK_ZONE_TYPE_CONVENTIONAL:
|
||||
if (!args->conv_zones_bitmap) {
|
||||
args->conv_zones_bitmap =
|
||||
blk_alloc_zone_bitmap(q->node, args->nr_zones);
|
||||
if (!args->conv_zones_bitmap)
|
||||
return -ENOMEM;
|
||||
}
|
||||
set_bit(idx, args->conv_zones_bitmap);
|
||||
break;
|
||||
case BLK_ZONE_TYPE_SEQWRITE_REQ:
|
||||
case BLK_ZONE_TYPE_SEQWRITE_PREF:
|
||||
if (!args->seq_zones_wlock) {
|
||||
args->seq_zones_wlock =
|
||||
blk_alloc_zone_bitmap(q->node, args->nr_zones);
|
||||
if (!args->seq_zones_wlock)
|
||||
return -ENOMEM;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
pr_warn("%s: Invalid zone type 0x%x at sectors %llu\n",
|
||||
disk->disk_name, (int)zone->type, zone->start);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
args->sector += zone->len;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_revalidate_disk_zones - (re)allocate and initialize zone bitmaps
|
||||
* @disk: Target disk
|
||||
*
|
||||
* Helper function for low-level device drivers to (re) allocate and initialize
|
||||
* a disk request queue zone bitmaps. This functions should normally be called
|
||||
* within the disk ->revalidate method. For BIO based queues, no zone bitmap
|
||||
* is allocated.
|
||||
* within the disk ->revalidate method for blk-mq based drivers. For BIO based
|
||||
* drivers only q->nr_zones needs to be updated so that the sysfs exposed value
|
||||
* is correct.
|
||||
*/
|
||||
int blk_revalidate_disk_zones(struct gendisk *disk)
|
||||
{
|
||||
struct request_queue *q = disk->queue;
|
||||
unsigned int nr_zones = __blkdev_nr_zones(q, get_capacity(disk));
|
||||
unsigned long *seq_zones_wlock = NULL, *seq_zones_bitmap = NULL;
|
||||
unsigned int i, rep_nr_zones = 0, z = 0, nrz;
|
||||
struct blk_zone *zones = NULL;
|
||||
struct blk_revalidate_zone_args args = {
|
||||
.disk = disk,
|
||||
};
|
||||
unsigned int noio_flag;
|
||||
sector_t sector = 0;
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
|
||||
return -EIO;
|
||||
if (WARN_ON_ONCE(!queue_is_mq(q)))
|
||||
return -EIO;
|
||||
|
||||
/*
|
||||
* BIO based queues do not use a scheduler so only q->nr_zones
|
||||
* needs to be updated so that the sysfs exposed value is correct.
|
||||
*/
|
||||
if (!queue_is_mq(q)) {
|
||||
q->nr_zones = nr_zones;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure that all memory allocations in this context are done as
|
||||
* if GFP_NOIO was specified.
|
||||
* Ensure that all memory allocations in this context are done as if
|
||||
* GFP_NOIO was specified.
|
||||
*/
|
||||
noio_flag = memalloc_noio_save();
|
||||
|
||||
if (!blk_queue_is_zoned(q) || !nr_zones) {
|
||||
nr_zones = 0;
|
||||
goto update;
|
||||
}
|
||||
|
||||
/* Allocate bitmaps */
|
||||
ret = -ENOMEM;
|
||||
seq_zones_wlock = blk_alloc_zone_bitmap(q->node, nr_zones);
|
||||
if (!seq_zones_wlock)
|
||||
goto out;
|
||||
seq_zones_bitmap = blk_alloc_zone_bitmap(q->node, nr_zones);
|
||||
if (!seq_zones_bitmap)
|
||||
goto out;
|
||||
|
||||
/* Get zone information and initialize seq_zones_bitmap */
|
||||
rep_nr_zones = nr_zones;
|
||||
zones = blk_alloc_zones(&rep_nr_zones);
|
||||
if (!zones)
|
||||
goto out;
|
||||
|
||||
while (z < nr_zones) {
|
||||
nrz = min(nr_zones - z, rep_nr_zones);
|
||||
ret = blk_report_zones(disk, sector, zones, &nrz);
|
||||
if (ret)
|
||||
goto out;
|
||||
if (!nrz)
|
||||
break;
|
||||
for (i = 0; i < nrz; i++) {
|
||||
if (zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL)
|
||||
set_bit(z, seq_zones_bitmap);
|
||||
z++;
|
||||
}
|
||||
sector += nrz * blk_queue_zone_sectors(q);
|
||||
}
|
||||
|
||||
if (WARN_ON(z != nr_zones)) {
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
update:
|
||||
/*
|
||||
* Install the new bitmaps, making sure the queue is stopped and
|
||||
* all I/Os are completed (i.e. a scheduler is not referencing the
|
||||
* bitmaps).
|
||||
*/
|
||||
blk_mq_freeze_queue(q);
|
||||
q->nr_zones = nr_zones;
|
||||
swap(q->seq_zones_wlock, seq_zones_wlock);
|
||||
swap(q->seq_zones_bitmap, seq_zones_bitmap);
|
||||
blk_mq_unfreeze_queue(q);
|
||||
|
||||
out:
|
||||
ret = disk->fops->report_zones(disk, 0, UINT_MAX,
|
||||
blk_revalidate_zone_cb, &args);
|
||||
memalloc_noio_restore(noio_flag);
|
||||
|
||||
kvfree(zones);
|
||||
kfree(seq_zones_wlock);
|
||||
kfree(seq_zones_bitmap);
|
||||
|
||||
if (ret) {
|
||||
/*
|
||||
* Install the new bitmaps and update nr_zones only once the queue is
|
||||
* stopped and all I/Os are completed (i.e. a scheduler is not
|
||||
* referencing the bitmaps).
|
||||
*/
|
||||
blk_mq_freeze_queue(q);
|
||||
if (ret >= 0) {
|
||||
blk_queue_chunk_sectors(q, args.zone_sectors);
|
||||
q->nr_zones = args.nr_zones;
|
||||
swap(q->seq_zones_wlock, args.seq_zones_wlock);
|
||||
swap(q->conv_zones_bitmap, args.conv_zones_bitmap);
|
||||
ret = 0;
|
||||
} else {
|
||||
pr_warn("%s: failed to revalidate zones\n", disk->disk_name);
|
||||
blk_mq_freeze_queue(q);
|
||||
blk_queue_free_zone_bitmaps(q);
|
||||
blk_mq_unfreeze_queue(q);
|
||||
}
|
||||
blk_mq_unfreeze_queue(q);
|
||||
|
||||
kfree(args.seq_zones_wlock);
|
||||
kfree(args.conv_zones_bitmap);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones);
|
||||
|
||||
|
@ -532,11 +532,14 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
|
||||
case BLKREPORTZONE:
|
||||
return blkdev_report_zones_ioctl(bdev, mode, cmd, arg);
|
||||
case BLKRESETZONE:
|
||||
return blkdev_reset_zones_ioctl(bdev, mode, cmd, arg);
|
||||
case BLKOPENZONE:
|
||||
case BLKCLOSEZONE:
|
||||
case BLKFINISHZONE:
|
||||
return blkdev_zone_mgmt_ioctl(bdev, mode, cmd, arg);
|
||||
case BLKGETZONESZ:
|
||||
return put_uint(arg, bdev_zone_sectors(bdev));
|
||||
case BLKGETNRZONES:
|
||||
return put_uint(arg, blkdev_nr_zones(bdev));
|
||||
return put_uint(arg, blkdev_nr_zones(bdev->bd_disk));
|
||||
case HDIO_GETGEO:
|
||||
return blkdev_getgeo(bdev, argp);
|
||||
case BLKRAGET:
|
||||
|
@ -459,56 +459,6 @@ static int drop_partitions(struct gendisk *disk, struct block_device *bdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool part_zone_aligned(struct gendisk *disk,
|
||||
struct block_device *bdev,
|
||||
sector_t from, sector_t size)
|
||||
{
|
||||
unsigned int zone_sectors = bdev_zone_sectors(bdev);
|
||||
|
||||
/*
|
||||
* If this function is called, then the disk is a zoned block device
|
||||
* (host-aware or host-managed). This can be detected even if the
|
||||
* zoned block device support is disabled (CONFIG_BLK_DEV_ZONED not
|
||||
* set). In this case, however, only host-aware devices will be seen
|
||||
* as a block device is not created for host-managed devices. Without
|
||||
* zoned block device support, host-aware drives can still be used as
|
||||
* regular block devices (no zone operation) and their zone size will
|
||||
* be reported as 0. Allow this case.
|
||||
*/
|
||||
if (!zone_sectors)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Check partition start and size alignement. If the drive has a
|
||||
* smaller last runt zone, ignore it and allow the partition to
|
||||
* use it. Check the zone size too: it should be a power of 2 number
|
||||
* of sectors.
|
||||
*/
|
||||
if (WARN_ON_ONCE(!is_power_of_2(zone_sectors))) {
|
||||
u32 rem;
|
||||
|
||||
div_u64_rem(from, zone_sectors, &rem);
|
||||
if (rem)
|
||||
return false;
|
||||
if ((from + size) < get_capacity(disk)) {
|
||||
div_u64_rem(size, zone_sectors, &rem);
|
||||
if (rem)
|
||||
return false;
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
if (from & (zone_sectors - 1))
|
||||
return false;
|
||||
if ((from + size) < get_capacity(disk) &&
|
||||
(size & (zone_sectors - 1)))
|
||||
return false;
|
||||
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
|
||||
{
|
||||
struct parsed_partitions *state = NULL;
|
||||
@ -544,6 +494,14 @@ int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
|
||||
}
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* Partitions are not supported on zoned block devices */
|
||||
if (bdev_is_zoned(bdev)) {
|
||||
pr_warn("%s: ignoring partition table on zoned block device\n",
|
||||
disk->disk_name);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* If any partition code tried to read beyond EOD, try
|
||||
* unlocking native capacity even if partition table is
|
||||
@ -607,21 +565,6 @@ int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* On a zoned block device, partitions should be aligned on the
|
||||
* device zone size (i.e. zone boundary crossing not allowed).
|
||||
* Otherwise, resetting the write pointer of the last zone of
|
||||
* one partition may impact the following partition.
|
||||
*/
|
||||
if (bdev_is_zoned(bdev) &&
|
||||
!part_zone_aligned(disk, bdev, from, size)) {
|
||||
printk(KERN_WARNING
|
||||
"%s: p%d start %llu+%llu is not zone aligned\n",
|
||||
disk->disk_name, p, (unsigned long long) from,
|
||||
(unsigned long long) size);
|
||||
continue;
|
||||
}
|
||||
|
||||
part = add_partition(disk, p, from, size,
|
||||
state->parts[p].flags,
|
||||
&state->parts[p].info);
|
||||
@ -635,6 +578,7 @@ int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
|
||||
md_autodetect_dev(part_to_dev(part)->devt);
|
||||
#endif
|
||||
}
|
||||
out:
|
||||
free_partitions(state);
|
||||
return 0;
|
||||
}
|
||||
|
@ -91,11 +91,13 @@ struct nullb {
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
int null_zone_init(struct nullb_device *dev);
|
||||
void null_zone_exit(struct nullb_device *dev);
|
||||
int null_zone_report(struct gendisk *disk, sector_t sector,
|
||||
struct blk_zone *zones, unsigned int *nr_zones);
|
||||
int null_report_zones(struct gendisk *disk, sector_t sector,
|
||||
unsigned int nr_zones, report_zones_cb cb, void *data);
|
||||
blk_status_t null_handle_zoned(struct nullb_cmd *cmd,
|
||||
enum req_opf op, sector_t sector,
|
||||
sector_t nr_sectors);
|
||||
size_t null_zone_valid_read_len(struct nullb *nullb,
|
||||
sector_t sector, unsigned int len);
|
||||
#else
|
||||
static inline int null_zone_init(struct nullb_device *dev)
|
||||
{
|
||||
@ -103,17 +105,18 @@ static inline int null_zone_init(struct nullb_device *dev)
|
||||
return -EINVAL;
|
||||
}
|
||||
static inline void null_zone_exit(struct nullb_device *dev) {}
|
||||
static inline int null_zone_report(struct gendisk *disk, sector_t sector,
|
||||
struct blk_zone *zones,
|
||||
unsigned int *nr_zones)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
static inline blk_status_t null_handle_zoned(struct nullb_cmd *cmd,
|
||||
enum req_opf op, sector_t sector,
|
||||
sector_t nr_sectors)
|
||||
{
|
||||
return BLK_STS_NOTSUPP;
|
||||
}
|
||||
static inline size_t null_zone_valid_read_len(struct nullb *nullb,
|
||||
sector_t sector,
|
||||
unsigned int len)
|
||||
{
|
||||
return len;
|
||||
}
|
||||
#define null_report_zones NULL
|
||||
#endif /* CONFIG_BLK_DEV_ZONED */
|
||||
#endif /* __NULL_BLK_H */
|
||||
|
@ -996,6 +996,16 @@ static int copy_from_nullb(struct nullb *nullb, struct page *dest,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nullb_fill_pattern(struct nullb *nullb, struct page *page,
|
||||
unsigned int len, unsigned int off)
|
||||
{
|
||||
void *dst;
|
||||
|
||||
dst = kmap_atomic(page);
|
||||
memset(dst + off, 0xFF, len);
|
||||
kunmap_atomic(dst);
|
||||
}
|
||||
|
||||
static void null_handle_discard(struct nullb *nullb, sector_t sector, size_t n)
|
||||
{
|
||||
size_t temp;
|
||||
@ -1036,10 +1046,24 @@ static int null_transfer(struct nullb *nullb, struct page *page,
|
||||
unsigned int len, unsigned int off, bool is_write, sector_t sector,
|
||||
bool is_fua)
|
||||
{
|
||||
struct nullb_device *dev = nullb->dev;
|
||||
unsigned int valid_len = len;
|
||||
int err = 0;
|
||||
|
||||
if (!is_write) {
|
||||
err = copy_from_nullb(nullb, page, off, sector, len);
|
||||
if (dev->zoned)
|
||||
valid_len = null_zone_valid_read_len(nullb,
|
||||
sector, len);
|
||||
|
||||
if (valid_len) {
|
||||
err = copy_from_nullb(nullb, page, off,
|
||||
sector, valid_len);
|
||||
off += valid_len;
|
||||
len -= valid_len;
|
||||
}
|
||||
|
||||
if (len)
|
||||
nullb_fill_pattern(nullb, page, len, off);
|
||||
flush_dcache_page(page);
|
||||
} else {
|
||||
flush_dcache_page(page);
|
||||
@ -1418,20 +1442,9 @@ static void null_config_discard(struct nullb *nullb)
|
||||
blk_queue_flag_set(QUEUE_FLAG_DISCARD, nullb->q);
|
||||
}
|
||||
|
||||
static int null_open(struct block_device *bdev, fmode_t mode)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void null_release(struct gendisk *disk, fmode_t mode)
|
||||
{
|
||||
}
|
||||
|
||||
static const struct block_device_operations null_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = null_open,
|
||||
.release = null_release,
|
||||
.report_zones = null_zone_report,
|
||||
static const struct block_device_operations null_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.report_zones = null_report_zones,
|
||||
};
|
||||
|
||||
static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
|
||||
@ -1520,29 +1533,35 @@ static int init_driver_queues(struct nullb *nullb)
|
||||
|
||||
static int null_gendisk_register(struct nullb *nullb)
|
||||
{
|
||||
sector_t size = ((sector_t)nullb->dev->size * SZ_1M) >> SECTOR_SHIFT;
|
||||
struct gendisk *disk;
|
||||
sector_t size;
|
||||
|
||||
disk = nullb->disk = alloc_disk_node(1, nullb->dev->home_node);
|
||||
if (!disk)
|
||||
return -ENOMEM;
|
||||
size = (sector_t)nullb->dev->size * 1024 * 1024ULL;
|
||||
set_capacity(disk, size >> 9);
|
||||
set_capacity(disk, size);
|
||||
|
||||
disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
|
||||
disk->major = null_major;
|
||||
disk->first_minor = nullb->index;
|
||||
disk->fops = &null_fops;
|
||||
disk->fops = &null_ops;
|
||||
disk->private_data = nullb;
|
||||
disk->queue = nullb->q;
|
||||
strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
if (nullb->dev->zoned) {
|
||||
int ret = blk_revalidate_disk_zones(disk);
|
||||
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
if (queue_is_mq(nullb->q)) {
|
||||
int ret = blk_revalidate_disk_zones(disk);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
blk_queue_chunk_sectors(nullb->q,
|
||||
nullb->dev->zone_size_sects);
|
||||
nullb->q->nr_zones = blkdev_nr_zones(disk);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
add_disk(disk);
|
||||
return 0;
|
||||
@ -1568,7 +1587,7 @@ static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
|
||||
return blk_mq_alloc_tag_set(set);
|
||||
}
|
||||
|
||||
static void null_validate_conf(struct nullb_device *dev)
|
||||
static int null_validate_conf(struct nullb_device *dev)
|
||||
{
|
||||
dev->blocksize = round_down(dev->blocksize, 512);
|
||||
dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096);
|
||||
@ -1595,6 +1614,14 @@ static void null_validate_conf(struct nullb_device *dev)
|
||||
/* can not stop a queue */
|
||||
if (dev->queue_mode == NULL_Q_BIO)
|
||||
dev->mbps = 0;
|
||||
|
||||
if (dev->zoned &&
|
||||
(!dev->zone_size || !is_power_of_2(dev->zone_size))) {
|
||||
pr_err("zone_size must be power-of-two\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
|
||||
@ -1627,7 +1654,9 @@ static int null_add_dev(struct nullb_device *dev)
|
||||
struct nullb *nullb;
|
||||
int rv;
|
||||
|
||||
null_validate_conf(dev);
|
||||
rv = null_validate_conf(dev);
|
||||
if (rv)
|
||||
return rv;
|
||||
|
||||
nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node);
|
||||
if (!nullb) {
|
||||
@ -1692,7 +1721,6 @@ static int null_add_dev(struct nullb_device *dev)
|
||||
if (rv)
|
||||
goto out_cleanup_blk_queue;
|
||||
|
||||
blk_queue_chunk_sectors(nullb->q, dev->zone_size_sects);
|
||||
nullb->q->limits.zoned = BLK_ZONED_HM;
|
||||
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, nullb->q);
|
||||
blk_queue_required_elevator_features(nullb->q,
|
||||
@ -1753,11 +1781,6 @@ static int __init null_init(void)
|
||||
g_bs = PAGE_SIZE;
|
||||
}
|
||||
|
||||
if (!is_power_of_2(g_zone_size)) {
|
||||
pr_err("zone_size must be power-of-two\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) {
|
||||
pr_err("invalid home_node value\n");
|
||||
g_home_node = NUMA_NO_NODE;
|
||||
|
@ -66,22 +66,53 @@ void null_zone_exit(struct nullb_device *dev)
|
||||
kvfree(dev->zones);
|
||||
}
|
||||
|
||||
int null_zone_report(struct gendisk *disk, sector_t sector,
|
||||
struct blk_zone *zones, unsigned int *nr_zones)
|
||||
int null_report_zones(struct gendisk *disk, sector_t sector,
|
||||
unsigned int nr_zones, report_zones_cb cb, void *data)
|
||||
{
|
||||
struct nullb *nullb = disk->private_data;
|
||||
struct nullb_device *dev = nullb->dev;
|
||||
unsigned int zno, nrz = 0;
|
||||
unsigned int first_zone, i;
|
||||
struct blk_zone zone;
|
||||
int error;
|
||||
|
||||
zno = null_zone_no(dev, sector);
|
||||
if (zno < dev->nr_zones) {
|
||||
nrz = min_t(unsigned int, *nr_zones, dev->nr_zones - zno);
|
||||
memcpy(zones, &dev->zones[zno], nrz * sizeof(struct blk_zone));
|
||||
first_zone = null_zone_no(dev, sector);
|
||||
if (first_zone >= dev->nr_zones)
|
||||
return 0;
|
||||
|
||||
nr_zones = min(nr_zones, dev->nr_zones - first_zone);
|
||||
for (i = 0; i < nr_zones; i++) {
|
||||
/*
|
||||
* Stacked DM target drivers will remap the zone information by
|
||||
* modifying the zone information passed to the report callback.
|
||||
* So use a local copy to avoid corruption of the device zone
|
||||
* array.
|
||||
*/
|
||||
memcpy(&zone, &dev->zones[first_zone + i],
|
||||
sizeof(struct blk_zone));
|
||||
error = cb(&zone, i, data);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
*nr_zones = nrz;
|
||||
return nr_zones;
|
||||
}
|
||||
|
||||
return 0;
|
||||
size_t null_zone_valid_read_len(struct nullb *nullb,
|
||||
sector_t sector, unsigned int len)
|
||||
{
|
||||
struct nullb_device *dev = nullb->dev;
|
||||
struct blk_zone *zone = &dev->zones[null_zone_no(dev, sector)];
|
||||
unsigned int nr_sectors = len >> SECTOR_SHIFT;
|
||||
|
||||
/* Read must be below the write pointer position */
|
||||
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL ||
|
||||
sector + nr_sectors <= zone->wp)
|
||||
return len;
|
||||
|
||||
if (sector > zone->wp)
|
||||
return 0;
|
||||
|
||||
return (zone->wp - sector) << SECTOR_SHIFT;
|
||||
}
|
||||
|
||||
static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
|
||||
@ -121,8 +152,7 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
|
||||
static blk_status_t null_zone_reset(struct nullb_cmd *cmd, sector_t sector)
|
||||
{
|
||||
struct nullb_device *dev = cmd->nq->dev;
|
||||
unsigned int zno = null_zone_no(dev, sector);
|
||||
struct blk_zone *zone = &dev->zones[zno];
|
||||
struct blk_zone *zone = &dev->zones[null_zone_no(dev, sector)];
|
||||
size_t i;
|
||||
|
||||
switch (req_op(cmd->rq)) {
|
||||
|
@ -460,21 +460,15 @@ static int flakey_prepare_ioctl(struct dm_target *ti, struct block_device **bdev
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
static int flakey_report_zones(struct dm_target *ti, sector_t sector,
|
||||
struct blk_zone *zones, unsigned int *nr_zones)
|
||||
static int flakey_report_zones(struct dm_target *ti,
|
||||
struct dm_report_zones_args *args, unsigned int nr_zones)
|
||||
{
|
||||
struct flakey_c *fc = ti->private;
|
||||
int ret;
|
||||
sector_t sector = flakey_map_sector(ti, args->next_sector);
|
||||
|
||||
/* Do report and remap it */
|
||||
ret = blkdev_report_zones(fc->dev->bdev, flakey_map_sector(ti, sector),
|
||||
zones, nr_zones);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
if (*nr_zones)
|
||||
dm_remap_zone_report(ti, fc->start, zones, nr_zones);
|
||||
return 0;
|
||||
args->start = fc->start;
|
||||
return blkdev_report_zones(fc->dev->bdev, sector, nr_zones,
|
||||
dm_report_zones_cb, args);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -136,21 +136,15 @@ static int linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
static int linear_report_zones(struct dm_target *ti, sector_t sector,
|
||||
struct blk_zone *zones, unsigned int *nr_zones)
|
||||
static int linear_report_zones(struct dm_target *ti,
|
||||
struct dm_report_zones_args *args, unsigned int nr_zones)
|
||||
{
|
||||
struct linear_c *lc = (struct linear_c *) ti->private;
|
||||
int ret;
|
||||
struct linear_c *lc = ti->private;
|
||||
sector_t sector = linear_map_sector(ti, args->next_sector);
|
||||
|
||||
/* Do report and remap it */
|
||||
ret = blkdev_report_zones(lc->dev->bdev, linear_map_sector(ti, sector),
|
||||
zones, nr_zones);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
if (*nr_zones)
|
||||
dm_remap_zone_report(ti, lc->start, zones, nr_zones);
|
||||
return 0;
|
||||
args->start = lc->start;
|
||||
return blkdev_report_zones(lc->dev->bdev, sector, nr_zones,
|
||||
dm_report_zones_cb, args);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -1965,12 +1965,14 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
|
||||
/*
|
||||
* For a zoned target, the number of zones should be updated for the
|
||||
* correct value to be exposed in sysfs queue/nr_zones. For a BIO based
|
||||
* target, this is all that is needed. For a request based target, the
|
||||
* queue zone bitmaps must also be updated.
|
||||
* Use blk_revalidate_disk_zones() to handle this.
|
||||
* target, this is all that is needed.
|
||||
*/
|
||||
if (blk_queue_is_zoned(q))
|
||||
blk_revalidate_disk_zones(t->md->disk);
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
if (blk_queue_is_zoned(q)) {
|
||||
WARN_ON_ONCE(queue_is_mq(q));
|
||||
q->nr_zones = blkdev_nr_zones(t->md->disk);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Allow reads to exceed readahead limits */
|
||||
q->backing_dev_info->io_pages = limits->max_sectors >> (PAGE_SHIFT - 9);
|
||||
|
@ -1088,9 +1088,10 @@ static int dmz_load_sb(struct dmz_metadata *zmd)
|
||||
/*
|
||||
* Initialize a zone descriptor.
|
||||
*/
|
||||
static int dmz_init_zone(struct dmz_metadata *zmd, struct dm_zone *zone,
|
||||
struct blk_zone *blkz)
|
||||
static int dmz_init_zone(struct blk_zone *blkz, unsigned int idx, void *data)
|
||||
{
|
||||
struct dmz_metadata *zmd = data;
|
||||
struct dm_zone *zone = &zmd->zones[idx];
|
||||
struct dmz_dev *dev = zmd->dev;
|
||||
|
||||
/* Ignore the eventual last runt (smaller) zone */
|
||||
@ -1104,26 +1105,29 @@ static int dmz_init_zone(struct dmz_metadata *zmd, struct dm_zone *zone,
|
||||
atomic_set(&zone->refcount, 0);
|
||||
zone->chunk = DMZ_MAP_UNMAPPED;
|
||||
|
||||
if (blkz->type == BLK_ZONE_TYPE_CONVENTIONAL) {
|
||||
switch (blkz->type) {
|
||||
case BLK_ZONE_TYPE_CONVENTIONAL:
|
||||
set_bit(DMZ_RND, &zone->flags);
|
||||
zmd->nr_rnd_zones++;
|
||||
} else if (blkz->type == BLK_ZONE_TYPE_SEQWRITE_REQ ||
|
||||
blkz->type == BLK_ZONE_TYPE_SEQWRITE_PREF) {
|
||||
break;
|
||||
case BLK_ZONE_TYPE_SEQWRITE_REQ:
|
||||
case BLK_ZONE_TYPE_SEQWRITE_PREF:
|
||||
set_bit(DMZ_SEQ, &zone->flags);
|
||||
} else
|
||||
break;
|
||||
default:
|
||||
return -ENXIO;
|
||||
|
||||
if (blkz->cond == BLK_ZONE_COND_OFFLINE)
|
||||
set_bit(DMZ_OFFLINE, &zone->flags);
|
||||
else if (blkz->cond == BLK_ZONE_COND_READONLY)
|
||||
set_bit(DMZ_READ_ONLY, &zone->flags);
|
||||
}
|
||||
|
||||
if (dmz_is_rnd(zone))
|
||||
zone->wp_block = 0;
|
||||
else
|
||||
zone->wp_block = dmz_sect2blk(blkz->wp - blkz->start);
|
||||
|
||||
if (!dmz_is_offline(zone) && !dmz_is_readonly(zone)) {
|
||||
if (blkz->cond == BLK_ZONE_COND_OFFLINE)
|
||||
set_bit(DMZ_OFFLINE, &zone->flags);
|
||||
else if (blkz->cond == BLK_ZONE_COND_READONLY)
|
||||
set_bit(DMZ_READ_ONLY, &zone->flags);
|
||||
else {
|
||||
zmd->nr_useable_zones++;
|
||||
if (dmz_is_rnd(zone)) {
|
||||
zmd->nr_rnd_zones++;
|
||||
@ -1146,12 +1150,6 @@ static void dmz_drop_zones(struct dmz_metadata *zmd)
|
||||
zmd->zones = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* The size of a zone report in number of zones.
|
||||
* This results in 4096*64B=256KB report zones commands.
|
||||
*/
|
||||
#define DMZ_REPORT_NR_ZONES 4096
|
||||
|
||||
/*
|
||||
* Allocate and initialize zone descriptors using the zone
|
||||
* information from disk.
|
||||
@ -1159,11 +1157,7 @@ static void dmz_drop_zones(struct dmz_metadata *zmd)
|
||||
static int dmz_init_zones(struct dmz_metadata *zmd)
|
||||
{
|
||||
struct dmz_dev *dev = zmd->dev;
|
||||
struct dm_zone *zone;
|
||||
struct blk_zone *blkz;
|
||||
unsigned int nr_blkz;
|
||||
sector_t sector = 0;
|
||||
int i, ret = 0;
|
||||
int ret;
|
||||
|
||||
/* Init */
|
||||
zmd->zone_bitmap_size = dev->zone_nr_blocks >> 3;
|
||||
@ -1177,54 +1171,38 @@ static int dmz_init_zones(struct dmz_metadata *zmd)
|
||||
dmz_dev_info(dev, "Using %zu B for zone information",
|
||||
sizeof(struct dm_zone) * dev->nr_zones);
|
||||
|
||||
/* Get zone information */
|
||||
nr_blkz = DMZ_REPORT_NR_ZONES;
|
||||
blkz = kcalloc(nr_blkz, sizeof(struct blk_zone), GFP_KERNEL);
|
||||
if (!blkz) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get zone information and initialize zone descriptors.
|
||||
* At the same time, determine where the super block
|
||||
* should be: first block of the first randomly writable
|
||||
* zone.
|
||||
* Get zone information and initialize zone descriptors. At the same
|
||||
* time, determine where the super block should be: first block of the
|
||||
* first randomly writable zone.
|
||||
*/
|
||||
zone = zmd->zones;
|
||||
while (sector < dev->capacity) {
|
||||
/* Get zone information */
|
||||
nr_blkz = DMZ_REPORT_NR_ZONES;
|
||||
ret = blkdev_report_zones(dev->bdev, sector, blkz, &nr_blkz);
|
||||
if (ret) {
|
||||
dmz_dev_err(dev, "Report zones failed %d", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!nr_blkz)
|
||||
break;
|
||||
|
||||
/* Process report */
|
||||
for (i = 0; i < nr_blkz; i++) {
|
||||
ret = dmz_init_zone(zmd, zone, &blkz[i]);
|
||||
if (ret)
|
||||
goto out;
|
||||
sector += dev->zone_nr_sectors;
|
||||
zone++;
|
||||
}
|
||||
}
|
||||
|
||||
/* The entire zone configuration of the disk should now be known */
|
||||
if (sector < dev->capacity) {
|
||||
dmz_dev_err(dev, "Failed to get correct zone information");
|
||||
ret = -ENXIO;
|
||||
}
|
||||
out:
|
||||
kfree(blkz);
|
||||
if (ret)
|
||||
ret = blkdev_report_zones(dev->bdev, 0, BLK_ALL_ZONES, dmz_init_zone,
|
||||
zmd);
|
||||
if (ret < 0) {
|
||||
dmz_drop_zones(zmd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dmz_update_zone_cb(struct blk_zone *blkz, unsigned int idx,
|
||||
void *data)
|
||||
{
|
||||
struct dm_zone *zone = data;
|
||||
|
||||
clear_bit(DMZ_OFFLINE, &zone->flags);
|
||||
clear_bit(DMZ_READ_ONLY, &zone->flags);
|
||||
if (blkz->cond == BLK_ZONE_COND_OFFLINE)
|
||||
set_bit(DMZ_OFFLINE, &zone->flags);
|
||||
else if (blkz->cond == BLK_ZONE_COND_READONLY)
|
||||
set_bit(DMZ_READ_ONLY, &zone->flags);
|
||||
|
||||
if (dmz_is_seq(zone))
|
||||
zone->wp_block = dmz_sect2blk(blkz->wp - blkz->start);
|
||||
else
|
||||
zone->wp_block = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1232,9 +1210,7 @@ static int dmz_init_zones(struct dmz_metadata *zmd)
|
||||
*/
|
||||
static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
|
||||
{
|
||||
unsigned int nr_blkz = 1;
|
||||
unsigned int noio_flag;
|
||||
struct blk_zone blkz;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
@ -1244,30 +1220,19 @@ static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
|
||||
* GFP_NOIO was specified.
|
||||
*/
|
||||
noio_flag = memalloc_noio_save();
|
||||
ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone),
|
||||
&blkz, &nr_blkz);
|
||||
ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone), 1,
|
||||
dmz_update_zone_cb, zone);
|
||||
memalloc_noio_restore(noio_flag);
|
||||
if (!nr_blkz)
|
||||
|
||||
if (ret == 0)
|
||||
ret = -EIO;
|
||||
if (ret) {
|
||||
if (ret < 0) {
|
||||
dmz_dev_err(zmd->dev, "Get zone %u report failed",
|
||||
dmz_id(zmd, zone));
|
||||
dmz_check_bdev(zmd->dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
clear_bit(DMZ_OFFLINE, &zone->flags);
|
||||
clear_bit(DMZ_READ_ONLY, &zone->flags);
|
||||
if (blkz.cond == BLK_ZONE_COND_OFFLINE)
|
||||
set_bit(DMZ_OFFLINE, &zone->flags);
|
||||
else if (blkz.cond == BLK_ZONE_COND_READONLY)
|
||||
set_bit(DMZ_READ_ONLY, &zone->flags);
|
||||
|
||||
if (dmz_is_seq(zone))
|
||||
zone->wp_block = dmz_sect2blk(blkz.wp - blkz.start);
|
||||
else
|
||||
zone->wp_block = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1321,9 +1286,9 @@ static int dmz_reset_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
|
||||
if (!dmz_is_empty(zone) || dmz_seq_write_err(zone)) {
|
||||
struct dmz_dev *dev = zmd->dev;
|
||||
|
||||
ret = blkdev_reset_zones(dev->bdev,
|
||||
dmz_start_sect(zmd, zone),
|
||||
dev->zone_nr_sectors, GFP_NOIO);
|
||||
ret = blkdev_zone_mgmt(dev->bdev, REQ_OP_ZONE_RESET,
|
||||
dmz_start_sect(zmd, zone),
|
||||
dev->zone_nr_sectors, GFP_NOIO);
|
||||
if (ret) {
|
||||
dmz_dev_err(dev, "Reset zone %u failed %d",
|
||||
dmz_id(zmd, zone), ret);
|
||||
|
@ -727,7 +727,7 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path)
|
||||
dev->zone_nr_blocks = dmz_sect2blk(dev->zone_nr_sectors);
|
||||
dev->zone_nr_blocks_shift = ilog2(dev->zone_nr_blocks);
|
||||
|
||||
dev->nr_zones = blkdev_nr_zones(dev->bdev);
|
||||
dev->nr_zones = blkdev_nr_zones(dev->bdev->bd_disk);
|
||||
|
||||
dmz->dev = dev;
|
||||
|
||||
|
132
drivers/md/dm.c
132
drivers/md/dm.c
@ -440,14 +440,48 @@ static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
|
||||
return dm_get_geometry(md, geo);
|
||||
}
|
||||
|
||||
static int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
|
||||
struct blk_zone *zones, unsigned int *nr_zones)
|
||||
{
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
int dm_report_zones_cb(struct blk_zone *zone, unsigned int idx, void *data)
|
||||
{
|
||||
struct dm_report_zones_args *args = data;
|
||||
sector_t sector_diff = args->tgt->begin - args->start;
|
||||
|
||||
/*
|
||||
* Ignore zones beyond the target range.
|
||||
*/
|
||||
if (zone->start >= args->start + args->tgt->len)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Remap the start sector and write pointer position of the zone
|
||||
* to match its position in the target range.
|
||||
*/
|
||||
zone->start += sector_diff;
|
||||
if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) {
|
||||
if (zone->cond == BLK_ZONE_COND_FULL)
|
||||
zone->wp = zone->start + zone->len;
|
||||
else if (zone->cond == BLK_ZONE_COND_EMPTY)
|
||||
zone->wp = zone->start;
|
||||
else
|
||||
zone->wp += sector_diff;
|
||||
}
|
||||
|
||||
args->next_sector = zone->start + zone->len;
|
||||
return args->orig_cb(zone, args->zone_idx++, args->orig_data);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_report_zones_cb);
|
||||
|
||||
static int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
|
||||
unsigned int nr_zones, report_zones_cb cb, void *data)
|
||||
{
|
||||
struct mapped_device *md = disk->private_data;
|
||||
struct dm_target *tgt;
|
||||
struct dm_table *map;
|
||||
int srcu_idx, ret;
|
||||
struct dm_report_zones_args args = {
|
||||
.next_sector = sector,
|
||||
.orig_data = data,
|
||||
.orig_cb = cb,
|
||||
};
|
||||
|
||||
if (dm_suspended_md(md))
|
||||
return -EAGAIN;
|
||||
@ -456,38 +490,30 @@ static int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
|
||||
if (!map)
|
||||
return -EIO;
|
||||
|
||||
tgt = dm_table_find_target(map, sector);
|
||||
if (!tgt) {
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
do {
|
||||
struct dm_target *tgt;
|
||||
|
||||
/*
|
||||
* If we are executing this, we already know that the block device
|
||||
* is a zoned device and so each target should have support for that
|
||||
* type of drive. A missing report_zones method means that the target
|
||||
* driver has a problem.
|
||||
*/
|
||||
if (WARN_ON(!tgt->type->report_zones)) {
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
tgt = dm_table_find_target(map, args.next_sector);
|
||||
if (WARN_ON_ONCE(!tgt->type->report_zones)) {
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* blkdev_report_zones() will loop and call this again to cover all the
|
||||
* zones of the target, eventually moving on to the next target.
|
||||
* So there is no need to loop here trying to fill the entire array
|
||||
* of zones.
|
||||
*/
|
||||
ret = tgt->type->report_zones(tgt, sector, zones, nr_zones);
|
||||
args.tgt = tgt;
|
||||
ret = tgt->type->report_zones(tgt, &args, nr_zones);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
} while (args.zone_idx < nr_zones &&
|
||||
args.next_sector < get_capacity(disk));
|
||||
|
||||
ret = args.zone_idx;
|
||||
out:
|
||||
dm_put_live_table(md, srcu_idx);
|
||||
return ret;
|
||||
#else
|
||||
return -ENOTSUPP;
|
||||
#endif
|
||||
}
|
||||
#else
|
||||
#define dm_blk_report_zones NULL
|
||||
#endif /* CONFIG_BLK_DEV_ZONED */
|
||||
|
||||
static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
|
||||
struct block_device **bdev)
|
||||
@ -1212,54 +1238,6 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
|
||||
|
||||
/*
|
||||
* The zone descriptors obtained with a zone report indicate
|
||||
* zone positions within the underlying device of the target. The zone
|
||||
* descriptors must be remapped to match their position within the dm device.
|
||||
* The caller target should obtain the zones information using
|
||||
* blkdev_report_zones() to ensure that remapping for partition offset is
|
||||
* already handled.
|
||||
*/
|
||||
void dm_remap_zone_report(struct dm_target *ti, sector_t start,
|
||||
struct blk_zone *zones, unsigned int *nr_zones)
|
||||
{
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
struct blk_zone *zone;
|
||||
unsigned int nrz = *nr_zones;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Remap the start sector and write pointer position of the zones in
|
||||
* the array. Since we may have obtained from the target underlying
|
||||
* device more zones that the target size, also adjust the number
|
||||
* of zones.
|
||||
*/
|
||||
for (i = 0; i < nrz; i++) {
|
||||
zone = zones + i;
|
||||
if (zone->start >= start + ti->len) {
|
||||
memset(zone, 0, sizeof(struct blk_zone) * (nrz - i));
|
||||
break;
|
||||
}
|
||||
|
||||
zone->start = zone->start + ti->begin - start;
|
||||
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
|
||||
continue;
|
||||
|
||||
if (zone->cond == BLK_ZONE_COND_FULL)
|
||||
zone->wp = zone->start + zone->len;
|
||||
else if (zone->cond == BLK_ZONE_COND_EMPTY)
|
||||
zone->wp = zone->start;
|
||||
else
|
||||
zone->wp = zone->wp + ti->begin - start;
|
||||
}
|
||||
|
||||
*nr_zones = i;
|
||||
#else /* !CONFIG_BLK_DEV_ZONED */
|
||||
*nr_zones = 0;
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_remap_zone_report);
|
||||
|
||||
static blk_qc_t __map_bio(struct dm_target_io *tio)
|
||||
{
|
||||
int r;
|
||||
|
@ -122,8 +122,6 @@ static void sd_eh_reset(struct scsi_cmnd *);
|
||||
static int sd_eh_action(struct scsi_cmnd *, int);
|
||||
static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);
|
||||
static void scsi_disk_release(struct device *cdev);
|
||||
static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *);
|
||||
static void sd_print_result(const struct scsi_disk *, const char *, int);
|
||||
|
||||
static DEFINE_IDA(sd_index_ida);
|
||||
|
||||
@ -1291,9 +1289,17 @@ static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
|
||||
case REQ_OP_WRITE:
|
||||
return sd_setup_read_write_cmnd(cmd);
|
||||
case REQ_OP_ZONE_RESET:
|
||||
return sd_zbc_setup_reset_cmnd(cmd, false);
|
||||
return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER,
|
||||
false);
|
||||
case REQ_OP_ZONE_RESET_ALL:
|
||||
return sd_zbc_setup_reset_cmnd(cmd, true);
|
||||
return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER,
|
||||
true);
|
||||
case REQ_OP_ZONE_OPEN:
|
||||
return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_OPEN_ZONE, false);
|
||||
case REQ_OP_ZONE_CLOSE:
|
||||
return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_CLOSE_ZONE, false);
|
||||
case REQ_OP_ZONE_FINISH:
|
||||
return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_FINISH_ZONE, false);
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
return BLK_STS_NOTSUPP;
|
||||
@ -1961,6 +1967,9 @@ static int sd_done(struct scsi_cmnd *SCpnt)
|
||||
case REQ_OP_WRITE_SAME:
|
||||
case REQ_OP_ZONE_RESET:
|
||||
case REQ_OP_ZONE_RESET_ALL:
|
||||
case REQ_OP_ZONE_OPEN:
|
||||
case REQ_OP_ZONE_CLOSE:
|
||||
case REQ_OP_ZONE_FINISH:
|
||||
if (!result) {
|
||||
good_bytes = blk_rq_bytes(req);
|
||||
scsi_set_resid(SCpnt, 0);
|
||||
@ -3705,15 +3714,13 @@ static void __exit exit_sd(void)
|
||||
module_init(init_sd);
|
||||
module_exit(exit_sd);
|
||||
|
||||
static void sd_print_sense_hdr(struct scsi_disk *sdkp,
|
||||
struct scsi_sense_hdr *sshdr)
|
||||
void sd_print_sense_hdr(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
|
||||
{
|
||||
scsi_print_sense_hdr(sdkp->device,
|
||||
sdkp->disk ? sdkp->disk->disk_name : NULL, sshdr);
|
||||
}
|
||||
|
||||
static void sd_print_result(const struct scsi_disk *sdkp, const char *msg,
|
||||
int result)
|
||||
void sd_print_result(const struct scsi_disk *sdkp, const char *msg, int result)
|
||||
{
|
||||
const char *hb_string = scsi_hostbyte_string(result);
|
||||
const char *db_string = scsi_driverbyte_string(result);
|
||||
@ -3728,4 +3735,3 @@ static void sd_print_result(const struct scsi_disk *sdkp, const char *msg,
|
||||
"%s: Result: hostbyte=0x%02x driverbyte=0x%02x\n",
|
||||
msg, host_byte(result), driver_byte(result));
|
||||
}
|
||||
|
||||
|
@ -209,11 +209,12 @@ static inline int sd_is_zoned(struct scsi_disk *sdkp)
|
||||
|
||||
extern int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buffer);
|
||||
extern void sd_zbc_print_zones(struct scsi_disk *sdkp);
|
||||
extern blk_status_t sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd, bool all);
|
||||
blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
|
||||
unsigned char op, bool all);
|
||||
extern void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
|
||||
struct scsi_sense_hdr *sshdr);
|
||||
extern int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
|
||||
struct blk_zone *zones, unsigned int *nr_zones);
|
||||
int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
|
||||
unsigned int nr_zones, report_zones_cb cb, void *data);
|
||||
|
||||
#else /* CONFIG_BLK_DEV_ZONED */
|
||||
|
||||
@ -225,8 +226,9 @@ static inline int sd_zbc_read_zones(struct scsi_disk *sdkp,
|
||||
|
||||
static inline void sd_zbc_print_zones(struct scsi_disk *sdkp) {}
|
||||
|
||||
static inline blk_status_t sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd,
|
||||
bool all)
|
||||
static inline blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
|
||||
unsigned char op,
|
||||
bool all)
|
||||
{
|
||||
return BLK_STS_TARGET;
|
||||
}
|
||||
@ -239,4 +241,7 @@ static inline void sd_zbc_complete(struct scsi_cmnd *cmd,
|
||||
|
||||
#endif /* CONFIG_BLK_DEV_ZONED */
|
||||
|
||||
void sd_print_sense_hdr(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr);
|
||||
void sd_print_result(const struct scsi_disk *sdkp, const char *msg, int result);
|
||||
|
||||
#endif /* _SCSI_DISK_H */
|
||||
|
@ -19,34 +19,27 @@
|
||||
|
||||
#include "sd.h"
|
||||
|
||||
/**
|
||||
* sd_zbc_parse_report - Convert a zone descriptor to a struct blk_zone,
|
||||
* @sdkp: The disk the report originated from
|
||||
* @buf: Address of the report zone descriptor
|
||||
* @zone: the destination zone structure
|
||||
*
|
||||
* All LBA sized values are converted to 512B sectors unit.
|
||||
*/
|
||||
static void sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf,
|
||||
struct blk_zone *zone)
|
||||
static int sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf,
|
||||
unsigned int idx, report_zones_cb cb, void *data)
|
||||
{
|
||||
struct scsi_device *sdp = sdkp->device;
|
||||
struct blk_zone zone = { 0 };
|
||||
|
||||
memset(zone, 0, sizeof(struct blk_zone));
|
||||
|
||||
zone->type = buf[0] & 0x0f;
|
||||
zone->cond = (buf[1] >> 4) & 0xf;
|
||||
zone.type = buf[0] & 0x0f;
|
||||
zone.cond = (buf[1] >> 4) & 0xf;
|
||||
if (buf[1] & 0x01)
|
||||
zone->reset = 1;
|
||||
zone.reset = 1;
|
||||
if (buf[1] & 0x02)
|
||||
zone->non_seq = 1;
|
||||
zone.non_seq = 1;
|
||||
|
||||
zone->len = logical_to_sectors(sdp, get_unaligned_be64(&buf[8]));
|
||||
zone->start = logical_to_sectors(sdp, get_unaligned_be64(&buf[16]));
|
||||
zone->wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24]));
|
||||
if (zone->type != ZBC_ZONE_TYPE_CONV &&
|
||||
zone->cond == ZBC_ZONE_COND_FULL)
|
||||
zone->wp = zone->start + zone->len;
|
||||
zone.len = logical_to_sectors(sdp, get_unaligned_be64(&buf[8]));
|
||||
zone.start = logical_to_sectors(sdp, get_unaligned_be64(&buf[16]));
|
||||
zone.wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24]));
|
||||
if (zone.type != ZBC_ZONE_TYPE_CONV &&
|
||||
zone.cond == ZBC_ZONE_COND_FULL)
|
||||
zone.wp = zone.start + zone.len;
|
||||
|
||||
return cb(&zone, idx, data);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -87,9 +80,11 @@ static int sd_zbc_do_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
|
||||
timeout, SD_MAX_RETRIES, NULL);
|
||||
if (result) {
|
||||
sd_printk(KERN_ERR, sdkp,
|
||||
"REPORT ZONES lba %llu failed with %d/%d\n",
|
||||
(unsigned long long)lba,
|
||||
host_byte(result), driver_byte(result));
|
||||
"REPORT ZONES start lba %llu failed\n", lba);
|
||||
sd_print_result(sdkp, "REPORT ZONES", result);
|
||||
if (driver_byte(result) == DRIVER_SENSE &&
|
||||
scsi_sense_valid(&sshdr))
|
||||
sd_print_sense_hdr(sdkp, &sshdr);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
@ -104,11 +99,6 @@ static int sd_zbc_do_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Maximum number of zones to get with one report zones command.
|
||||
*/
|
||||
#define SD_ZBC_REPORT_MAX_ZONES 8192U
|
||||
|
||||
/**
|
||||
* Allocate a buffer for report zones reply.
|
||||
* @sdkp: The target disk
|
||||
@ -138,63 +128,24 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
|
||||
* sure that the allocated buffer can always be mapped by limiting the
|
||||
* number of pages allocated to the HBA max segments limit.
|
||||
*/
|
||||
nr_zones = min(nr_zones, SD_ZBC_REPORT_MAX_ZONES);
|
||||
bufsize = roundup((nr_zones + 1) * 64, 512);
|
||||
nr_zones = min(nr_zones, sdkp->nr_zones);
|
||||
bufsize = roundup((nr_zones + 1) * 64, SECTOR_SIZE);
|
||||
bufsize = min_t(size_t, bufsize,
|
||||
queue_max_hw_sectors(q) << SECTOR_SHIFT);
|
||||
bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT);
|
||||
|
||||
buf = vzalloc(bufsize);
|
||||
if (buf)
|
||||
*buflen = bufsize;
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
/**
|
||||
* sd_zbc_report_zones - Disk report zones operation.
|
||||
* @disk: The target disk
|
||||
* @sector: Start 512B sector of the report
|
||||
* @zones: Array of zone descriptors
|
||||
* @nr_zones: Number of descriptors in the array
|
||||
*
|
||||
* Execute a report zones command on the target disk.
|
||||
*/
|
||||
int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
|
||||
struct blk_zone *zones, unsigned int *nr_zones)
|
||||
{
|
||||
struct scsi_disk *sdkp = scsi_disk(disk);
|
||||
unsigned int i, nrz = *nr_zones;
|
||||
unsigned char *buf;
|
||||
size_t buflen = 0, offset = 0;
|
||||
int ret = 0;
|
||||
|
||||
if (!sd_is_zoned(sdkp))
|
||||
/* Not a zoned device */
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
buf = sd_zbc_alloc_report_buffer(sdkp, nrz, &buflen);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = sd_zbc_do_report_zones(sdkp, buf, buflen,
|
||||
sectors_to_logical(sdkp->device, sector), true);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
nrz = min(nrz, get_unaligned_be32(&buf[0]) / 64);
|
||||
for (i = 0; i < nrz; i++) {
|
||||
offset += 64;
|
||||
sd_zbc_parse_report(sdkp, buf + offset, zones);
|
||||
zones++;
|
||||
while (bufsize >= SECTOR_SIZE) {
|
||||
buf = __vmalloc(bufsize,
|
||||
GFP_KERNEL | __GFP_ZERO | __GFP_NORETRY,
|
||||
PAGE_KERNEL);
|
||||
if (buf) {
|
||||
*buflen = bufsize;
|
||||
return buf;
|
||||
}
|
||||
bufsize >>= 1;
|
||||
}
|
||||
|
||||
*nr_zones = nrz;
|
||||
|
||||
out:
|
||||
kvfree(buf);
|
||||
|
||||
return ret;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -206,14 +157,65 @@ static inline sector_t sd_zbc_zone_sectors(struct scsi_disk *sdkp)
|
||||
return logical_to_sectors(sdkp->device, sdkp->zone_blocks);
|
||||
}
|
||||
|
||||
int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
|
||||
unsigned int nr_zones, report_zones_cb cb, void *data)
|
||||
{
|
||||
struct scsi_disk *sdkp = scsi_disk(disk);
|
||||
unsigned int nr, i;
|
||||
unsigned char *buf;
|
||||
size_t offset, buflen = 0;
|
||||
int zone_idx = 0;
|
||||
int ret;
|
||||
|
||||
if (!sd_is_zoned(sdkp))
|
||||
/* Not a zoned device */
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
buf = sd_zbc_alloc_report_buffer(sdkp, nr_zones, &buflen);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
while (zone_idx < nr_zones && sector < get_capacity(disk)) {
|
||||
ret = sd_zbc_do_report_zones(sdkp, buf, buflen,
|
||||
sectors_to_logical(sdkp->device, sector), true);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
offset = 0;
|
||||
nr = min(nr_zones, get_unaligned_be32(&buf[0]) / 64);
|
||||
if (!nr)
|
||||
break;
|
||||
|
||||
for (i = 0; i < nr && zone_idx < nr_zones; i++) {
|
||||
offset += 64;
|
||||
ret = sd_zbc_parse_report(sdkp, buf + offset, zone_idx,
|
||||
cb, data);
|
||||
if (ret)
|
||||
goto out;
|
||||
zone_idx++;
|
||||
}
|
||||
|
||||
sector += sd_zbc_zone_sectors(sdkp) * i;
|
||||
}
|
||||
|
||||
ret = zone_idx;
|
||||
out:
|
||||
kvfree(buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* sd_zbc_setup_reset_cmnd - Prepare a RESET WRITE POINTER scsi command.
|
||||
* sd_zbc_setup_zone_mgmt_cmnd - Prepare a zone ZBC_OUT command. The operations
|
||||
* can be RESET WRITE POINTER, OPEN, CLOSE or FINISH.
|
||||
* @cmd: the command to setup
|
||||
* @all: Reset all zones control.
|
||||
* @op: Operation to be performed
|
||||
* @all: All zones control
|
||||
*
|
||||
* Called from sd_init_command() for a REQ_OP_ZONE_RESET request.
|
||||
* Called from sd_init_command() for REQ_OP_ZONE_RESET, REQ_OP_ZONE_RESET_ALL,
|
||||
* REQ_OP_ZONE_OPEN, REQ_OP_ZONE_CLOSE or REQ_OP_ZONE_FINISH requests.
|
||||
*/
|
||||
blk_status_t sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd, bool all)
|
||||
blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
|
||||
unsigned char op, bool all)
|
||||
{
|
||||
struct request *rq = cmd->request;
|
||||
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
|
||||
@ -234,7 +236,7 @@ blk_status_t sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd, bool all)
|
||||
cmd->cmd_len = 16;
|
||||
memset(cmd->cmnd, 0, cmd->cmd_len);
|
||||
cmd->cmnd[0] = ZBC_OUT;
|
||||
cmd->cmnd[1] = ZO_RESET_WRITE_POINTER;
|
||||
cmd->cmnd[1] = op;
|
||||
if (all)
|
||||
cmd->cmnd[14] = 0x1;
|
||||
else
|
||||
@ -263,14 +265,14 @@ void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
|
||||
int result = cmd->result;
|
||||
struct request *rq = cmd->request;
|
||||
|
||||
if (req_op(rq) == REQ_OP_ZONE_RESET &&
|
||||
if (op_is_zone_mgmt(req_op(rq)) &&
|
||||
result &&
|
||||
sshdr->sense_key == ILLEGAL_REQUEST &&
|
||||
sshdr->asc == 0x24) {
|
||||
/*
|
||||
* INVALID FIELD IN CDB error: reset of a conventional
|
||||
* zone was attempted. Nothing to worry about, so be
|
||||
* quiet about the error.
|
||||
* INVALID FIELD IN CDB error: a zone management command was
|
||||
* attempted on a conventional zone. Nothing to worry about,
|
||||
* so be quiet about the error.
|
||||
*/
|
||||
rq->rq_flags |= RQF_QUIET;
|
||||
}
|
||||
@ -335,32 +337,18 @@ static int sd_zbc_check_zoned_characteristics(struct scsi_disk *sdkp,
|
||||
* Returns the zone size in number of blocks upon success or an error code
|
||||
* upon failure.
|
||||
*/
|
||||
static int sd_zbc_check_zones(struct scsi_disk *sdkp, u32 *zblocks)
|
||||
static int sd_zbc_check_zones(struct scsi_disk *sdkp, unsigned char *buf,
|
||||
u32 *zblocks)
|
||||
{
|
||||
size_t bufsize, buflen;
|
||||
unsigned int noio_flag;
|
||||
u64 zone_blocks = 0;
|
||||
sector_t max_lba, block = 0;
|
||||
unsigned char *buf;
|
||||
sector_t max_lba;
|
||||
unsigned char *rec;
|
||||
int ret;
|
||||
u8 same;
|
||||
|
||||
/* Do all memory allocations as if GFP_NOIO was specified */
|
||||
noio_flag = memalloc_noio_save();
|
||||
|
||||
/* Get a buffer */
|
||||
buf = sd_zbc_alloc_report_buffer(sdkp, SD_ZBC_REPORT_MAX_ZONES,
|
||||
&bufsize);
|
||||
if (!buf) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Do a report zone to get max_lba and the same field */
|
||||
ret = sd_zbc_do_report_zones(sdkp, buf, bufsize, 0, false);
|
||||
/* Do a report zone to get max_lba and the size of the first zone */
|
||||
ret = sd_zbc_do_report_zones(sdkp, buf, SD_BUF_SIZE, 0, false);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
return ret;
|
||||
|
||||
if (sdkp->rc_basis == 0) {
|
||||
/* The max_lba field is the capacity of this device */
|
||||
@ -375,82 +363,27 @@ static int sd_zbc_check_zones(struct scsi_disk *sdkp, u32 *zblocks)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Check same field: for any value other than 0, we know that all zones
|
||||
* have the same size.
|
||||
*/
|
||||
same = buf[4] & 0x0f;
|
||||
if (same > 0) {
|
||||
rec = &buf[64];
|
||||
zone_blocks = get_unaligned_be64(&rec[8]);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check the size of all zones: all zones must be of
|
||||
* equal size, except the last zone which can be smaller
|
||||
* than other zones.
|
||||
*/
|
||||
do {
|
||||
|
||||
/* Parse REPORT ZONES header */
|
||||
buflen = min_t(size_t, get_unaligned_be32(&buf[0]) + 64,
|
||||
bufsize);
|
||||
rec = buf + 64;
|
||||
|
||||
/* Parse zone descriptors */
|
||||
while (rec < buf + buflen) {
|
||||
u64 this_zone_blocks = get_unaligned_be64(&rec[8]);
|
||||
|
||||
if (zone_blocks == 0) {
|
||||
zone_blocks = this_zone_blocks;
|
||||
} else if (this_zone_blocks != zone_blocks &&
|
||||
(block + this_zone_blocks < sdkp->capacity
|
||||
|| this_zone_blocks > zone_blocks)) {
|
||||
zone_blocks = 0;
|
||||
goto out;
|
||||
}
|
||||
block += this_zone_blocks;
|
||||
rec += 64;
|
||||
}
|
||||
|
||||
if (block < sdkp->capacity) {
|
||||
ret = sd_zbc_do_report_zones(sdkp, buf, bufsize, block,
|
||||
true);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
} while (block < sdkp->capacity);
|
||||
|
||||
out:
|
||||
if (!zone_blocks) {
|
||||
if (sdkp->first_scan)
|
||||
sd_printk(KERN_NOTICE, sdkp,
|
||||
"Devices with non constant zone "
|
||||
"size are not supported\n");
|
||||
ret = -ENODEV;
|
||||
} else if (!is_power_of_2(zone_blocks)) {
|
||||
/* Parse REPORT ZONES header */
|
||||
rec = buf + 64;
|
||||
zone_blocks = get_unaligned_be64(&rec[8]);
|
||||
if (!zone_blocks || !is_power_of_2(zone_blocks)) {
|
||||
if (sdkp->first_scan)
|
||||
sd_printk(KERN_NOTICE, sdkp,
|
||||
"Devices with non power of 2 zone "
|
||||
"size are not supported\n");
|
||||
ret = -ENODEV;
|
||||
} else if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
|
||||
if (sdkp->first_scan)
|
||||
sd_printk(KERN_NOTICE, sdkp,
|
||||
"Zone size too large\n");
|
||||
ret = -EFBIG;
|
||||
} else {
|
||||
*zblocks = zone_blocks;
|
||||
ret = 0;
|
||||
return -EFBIG;
|
||||
}
|
||||
|
||||
out_free:
|
||||
memalloc_noio_restore(noio_flag);
|
||||
kvfree(buf);
|
||||
*zblocks = zone_blocks;
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
|
||||
@ -476,13 +409,11 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
|
||||
* Check zone size: only devices with a constant zone size (except
|
||||
* an eventual last runt zone) that is a power of 2 are supported.
|
||||
*/
|
||||
ret = sd_zbc_check_zones(sdkp, &zone_blocks);
|
||||
ret = sd_zbc_check_zones(sdkp, buf, &zone_blocks);
|
||||
if (ret != 0)
|
||||
goto err;
|
||||
|
||||
/* The drive satisfies the kernel restrictions: set it up */
|
||||
blk_queue_chunk_sectors(sdkp->disk->queue,
|
||||
logical_to_sectors(sdkp->device, zone_blocks));
|
||||
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, sdkp->disk->queue);
|
||||
blk_queue_required_elevator_features(sdkp->disk->queue,
|
||||
ELEVATOR_F_ZBD_SEQ_WRITE);
|
||||
|
@ -537,7 +537,7 @@ struct fscrypt_mode {
|
||||
int keysize;
|
||||
int ivsize;
|
||||
enum blk_crypto_mode_num blk_crypto_mode;
|
||||
bool logged_impl_name;
|
||||
int logged_impl_name;
|
||||
};
|
||||
|
||||
extern struct fscrypt_mode fscrypt_modes[];
|
||||
|
@ -84,15 +84,13 @@ fscrypt_allocate_skcipher(struct fscrypt_mode *mode, const u8 *raw_key,
|
||||
mode->cipher_str, PTR_ERR(tfm));
|
||||
return tfm;
|
||||
}
|
||||
if (unlikely(!mode->logged_impl_name)) {
|
||||
if (!xchg(&mode->logged_impl_name, 1)) {
|
||||
/*
|
||||
* fscrypt performance can vary greatly depending on which
|
||||
* crypto algorithm implementation is used. Help people debug
|
||||
* performance problems by logging the ->cra_driver_name the
|
||||
* first time a mode is used. Note that multiple threads can
|
||||
* race here, but it doesn't really matter.
|
||||
* first time a mode is used.
|
||||
*/
|
||||
mode->logged_impl_name = true;
|
||||
pr_info("fscrypt: %s using implementation \"%s\"\n",
|
||||
mode->friendly_name,
|
||||
crypto_skcipher_alg(tfm)->base.cra_driver_name);
|
||||
|
@ -5797,12 +5797,15 @@ int ext4_getattr(const struct path *path, struct kstat *stat,
|
||||
stat->attributes |= STATX_ATTR_IMMUTABLE;
|
||||
if (flags & EXT4_NODUMP_FL)
|
||||
stat->attributes |= STATX_ATTR_NODUMP;
|
||||
if (flags & EXT4_VERITY_FL)
|
||||
stat->attributes |= STATX_ATTR_VERITY;
|
||||
|
||||
stat->attributes_mask |= (STATX_ATTR_APPEND |
|
||||
STATX_ATTR_COMPRESSED |
|
||||
STATX_ATTR_ENCRYPTED |
|
||||
STATX_ATTR_IMMUTABLE |
|
||||
STATX_ATTR_NODUMP);
|
||||
STATX_ATTR_NODUMP |
|
||||
STATX_ATTR_VERITY);
|
||||
|
||||
generic_fillattr(inode, stat);
|
||||
return 0;
|
||||
|
@ -581,7 +581,7 @@ int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi)
|
||||
|
||||
if (time_to_inject(sbi, FAULT_ORPHAN)) {
|
||||
spin_unlock(&im->ino_lock);
|
||||
f2fs_show_injection_info(FAULT_ORPHAN);
|
||||
f2fs_show_injection_info(sbi, FAULT_ORPHAN);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
|
190
fs/f2fs/data.c
190
fs/f2fs/data.c
@ -30,6 +30,7 @@
|
||||
#define NUM_PREALLOC_POST_READ_CTXS 128
|
||||
|
||||
static struct kmem_cache *bio_post_read_ctx_cache;
|
||||
static struct kmem_cache *bio_entry_slab;
|
||||
static mempool_t *bio_post_read_ctx_pool;
|
||||
|
||||
static bool __is_cp_guaranteed(struct page *page)
|
||||
@ -168,9 +169,10 @@ static bool f2fs_bio_post_read_required(struct bio *bio)
|
||||
|
||||
static void f2fs_read_end_io(struct bio *bio)
|
||||
{
|
||||
if (time_to_inject(F2FS_P_SB(bio_first_page_all(bio)),
|
||||
FAULT_READ_IO)) {
|
||||
f2fs_show_injection_info(FAULT_READ_IO);
|
||||
struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio));
|
||||
|
||||
if (time_to_inject(sbi, FAULT_READ_IO)) {
|
||||
f2fs_show_injection_info(sbi, FAULT_READ_IO);
|
||||
bio->bi_status = BLK_STS_IOERR;
|
||||
}
|
||||
|
||||
@ -192,7 +194,7 @@ static void f2fs_write_end_io(struct bio *bio)
|
||||
struct bvec_iter_all iter_all;
|
||||
|
||||
if (time_to_inject(sbi, FAULT_WRITE_IO)) {
|
||||
f2fs_show_injection_info(FAULT_WRITE_IO);
|
||||
f2fs_show_injection_info(sbi, FAULT_WRITE_IO);
|
||||
bio->bi_status = BLK_STS_IOERR;
|
||||
}
|
||||
|
||||
@ -574,6 +576,126 @@ static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
|
||||
return io_type_is_mergeable(io, fio);
|
||||
}
|
||||
|
||||
static void add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio,
|
||||
struct page *page, enum temp_type temp)
|
||||
{
|
||||
struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
|
||||
struct bio_entry *be;
|
||||
|
||||
be = f2fs_kmem_cache_alloc(bio_entry_slab, GFP_NOFS);
|
||||
be->bio = bio;
|
||||
bio_get(bio);
|
||||
|
||||
if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE)
|
||||
f2fs_bug_on(sbi, 1);
|
||||
|
||||
down_write(&io->bio_list_lock);
|
||||
list_add_tail(&be->list, &io->bio_list);
|
||||
up_write(&io->bio_list_lock);
|
||||
}
|
||||
|
||||
static void del_bio_entry(struct bio_entry *be)
|
||||
{
|
||||
list_del(&be->list);
|
||||
kmem_cache_free(bio_entry_slab, be);
|
||||
}
|
||||
|
||||
static int add_ipu_page(struct f2fs_sb_info *sbi, struct bio **bio,
|
||||
struct page *page)
|
||||
{
|
||||
enum temp_type temp;
|
||||
bool found = false;
|
||||
int ret = -EAGAIN;
|
||||
|
||||
for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
|
||||
struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
|
||||
struct list_head *head = &io->bio_list;
|
||||
struct bio_entry *be;
|
||||
|
||||
down_write(&io->bio_list_lock);
|
||||
list_for_each_entry(be, head, list) {
|
||||
if (be->bio != *bio)
|
||||
continue;
|
||||
|
||||
found = true;
|
||||
|
||||
if (bio_add_page(*bio, page, PAGE_SIZE, 0) == PAGE_SIZE) {
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
/* bio is full */
|
||||
del_bio_entry(be);
|
||||
__submit_bio(sbi, *bio, DATA);
|
||||
break;
|
||||
}
|
||||
up_write(&io->bio_list_lock);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
bio_put(*bio);
|
||||
*bio = NULL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
|
||||
struct bio **bio, struct page *page)
|
||||
{
|
||||
enum temp_type temp;
|
||||
bool found = false;
|
||||
struct bio *target = bio ? *bio : NULL;
|
||||
|
||||
for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
|
||||
struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
|
||||
struct list_head *head = &io->bio_list;
|
||||
struct bio_entry *be;
|
||||
|
||||
if (list_empty(head))
|
||||
continue;
|
||||
|
||||
down_read(&io->bio_list_lock);
|
||||
list_for_each_entry(be, head, list) {
|
||||
if (target)
|
||||
found = (target == be->bio);
|
||||
else
|
||||
found = __has_merged_page(be->bio, NULL,
|
||||
page, 0);
|
||||
if (found)
|
||||
break;
|
||||
}
|
||||
up_read(&io->bio_list_lock);
|
||||
|
||||
if (!found)
|
||||
continue;
|
||||
|
||||
found = false;
|
||||
|
||||
down_write(&io->bio_list_lock);
|
||||
list_for_each_entry(be, head, list) {
|
||||
if (target)
|
||||
found = (target == be->bio);
|
||||
else
|
||||
found = __has_merged_page(be->bio, NULL,
|
||||
page, 0);
|
||||
if (found) {
|
||||
target = be->bio;
|
||||
del_bio_entry(be);
|
||||
break;
|
||||
}
|
||||
}
|
||||
up_write(&io->bio_list_lock);
|
||||
}
|
||||
|
||||
if (found)
|
||||
__submit_bio(sbi, target, DATA);
|
||||
if (bio && *bio) {
|
||||
bio_put(*bio);
|
||||
*bio = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
int f2fs_merge_page_bio(struct f2fs_io_info *fio)
|
||||
{
|
||||
struct bio *bio = *fio->bio;
|
||||
@ -590,10 +712,8 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio)
|
||||
if (bio && (!page_is_mergeable(fio->sbi, bio, *fio->last_block,
|
||||
fio->new_blkaddr) ||
|
||||
!f2fs_crypt_mergeable_bio(bio, fio->page->mapping->host,
|
||||
fio->page->index, fio))) {
|
||||
__submit_bio(fio->sbi, bio, fio->type);
|
||||
bio = NULL;
|
||||
}
|
||||
fio->page->index, fio)))
|
||||
f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL);
|
||||
alloc_new:
|
||||
if (!bio) {
|
||||
bio = __bio_alloc(fio, BIO_MAX_PAGES);
|
||||
@ -601,12 +721,11 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio)
|
||||
fio->page->index, fio,
|
||||
GFP_NOIO);
|
||||
bio_set_op_attrs(bio, fio->op, fio->op_flags);
|
||||
}
|
||||
|
||||
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
|
||||
__submit_bio(fio->sbi, bio, fio->type);
|
||||
bio = NULL;
|
||||
goto alloc_new;
|
||||
add_bio_entry(fio->sbi, bio, page, fio->temp);
|
||||
} else {
|
||||
if (add_ipu_page(fio->sbi, &bio, page))
|
||||
goto alloc_new;
|
||||
}
|
||||
|
||||
if (fio->io_wbc)
|
||||
@ -620,19 +739,6 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void f2fs_submit_ipu_bio(struct f2fs_sb_info *sbi, struct bio **bio,
|
||||
struct page *page)
|
||||
{
|
||||
if (!bio)
|
||||
return;
|
||||
|
||||
if (!__has_merged_page(*bio, NULL, page, 0))
|
||||
return;
|
||||
|
||||
__submit_bio(sbi, *bio, DATA);
|
||||
*bio = NULL;
|
||||
}
|
||||
|
||||
void f2fs_submit_page_write(struct f2fs_io_info *fio)
|
||||
{
|
||||
struct f2fs_sb_info *sbi = fio->sbi;
|
||||
@ -2147,7 +2253,7 @@ static int __write_data_page(struct page *page, bool *submitted,
|
||||
loff_t i_size = i_size_read(inode);
|
||||
const pgoff_t end_index = ((unsigned long long) i_size)
|
||||
>> PAGE_SHIFT;
|
||||
loff_t psize = (page->index + 1) << PAGE_SHIFT;
|
||||
loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT;
|
||||
unsigned offset = 0;
|
||||
bool need_balance_fs = false;
|
||||
int err = 0;
|
||||
@ -2264,14 +2370,12 @@ static int __write_data_page(struct page *page, bool *submitted,
|
||||
|
||||
unlock_page(page);
|
||||
if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
|
||||
!F2FS_I(inode)->cp_task) {
|
||||
f2fs_submit_ipu_bio(sbi, bio, page);
|
||||
!F2FS_I(inode)->cp_task)
|
||||
f2fs_balance_fs(sbi, need_balance_fs);
|
||||
}
|
||||
|
||||
if (unlikely(f2fs_cp_error(sbi))) {
|
||||
f2fs_submit_ipu_bio(sbi, bio, page);
|
||||
f2fs_submit_merged_write(sbi, DATA);
|
||||
f2fs_submit_merged_ipu_write(sbi, bio, NULL);
|
||||
submitted = NULL;
|
||||
}
|
||||
|
||||
@ -2391,13 +2495,11 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
|
||||
}
|
||||
|
||||
if (PageWriteback(page)) {
|
||||
if (wbc->sync_mode != WB_SYNC_NONE) {
|
||||
if (wbc->sync_mode != WB_SYNC_NONE)
|
||||
f2fs_wait_on_page_writeback(page,
|
||||
DATA, true, true);
|
||||
f2fs_submit_ipu_bio(sbi, &bio, page);
|
||||
} else {
|
||||
else
|
||||
goto continue_unlock;
|
||||
}
|
||||
}
|
||||
|
||||
if (!clear_page_dirty_for_io(page))
|
||||
@ -2455,7 +2557,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
|
||||
NULL, 0, DATA);
|
||||
/* submit cached bio of IPU write */
|
||||
if (bio)
|
||||
__submit_bio(sbi, bio, DATA);
|
||||
f2fs_submit_merged_ipu_write(sbi, &bio, NULL);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -3301,8 +3403,22 @@ int __init f2fs_init_post_read_processing(void)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
void __exit f2fs_destroy_post_read_processing(void)
|
||||
void f2fs_destroy_post_read_processing(void)
|
||||
{
|
||||
mempool_destroy(bio_post_read_ctx_pool);
|
||||
kmem_cache_destroy(bio_post_read_ctx_cache);
|
||||
}
|
||||
|
||||
int __init f2fs_init_bio_entry_cache(void)
|
||||
{
|
||||
bio_entry_slab = f2fs_kmem_cache_create("bio_entry_slab",
|
||||
sizeof(struct bio_entry));
|
||||
if (!bio_entry_slab)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __exit f2fs_destroy_bio_entry_cache(void)
|
||||
{
|
||||
kmem_cache_destroy(bio_entry_slab);
|
||||
}
|
||||
|
@ -628,7 +628,7 @@ int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
|
||||
|
||||
start:
|
||||
if (time_to_inject(F2FS_I_SB(dir), FAULT_DIR_DEPTH)) {
|
||||
f2fs_show_injection_info(FAULT_DIR_DEPTH);
|
||||
f2fs_show_injection_info(F2FS_I_SB(dir), FAULT_DIR_DEPTH);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
@ -919,8 +919,9 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
|
||||
bit_pos++;
|
||||
ctx->pos = start_pos + bit_pos;
|
||||
printk_ratelimited(
|
||||
"%s, invalid namelen(0), ino:%u, run fsck to fix.",
|
||||
KERN_WARNING, le32_to_cpu(de->ino));
|
||||
"%sF2FS-fs (%s): invalid namelen(0), ino:%u, run fsck to fix.",
|
||||
KERN_WARNING, sbi->sb->s_id,
|
||||
le32_to_cpu(de->ino));
|
||||
set_sbi_flag(sbi, SBI_NEED_FSCK);
|
||||
continue;
|
||||
}
|
||||
|
@ -893,6 +893,7 @@ enum {
|
||||
CURSEG_WARM_NODE, /* direct node blocks of normal files */
|
||||
CURSEG_COLD_NODE, /* indirect node blocks */
|
||||
NO_CHECK_TYPE,
|
||||
CURSEG_COLD_DATA_PINNED,/* cold data for pinned file */
|
||||
};
|
||||
|
||||
struct flush_cmd {
|
||||
@ -1071,6 +1072,11 @@ struct f2fs_io_info {
|
||||
unsigned char version; /* version of the node */
|
||||
};
|
||||
|
||||
struct bio_entry {
|
||||
struct bio *bio;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
#define is_read_io(rw) ((rw) == READ)
|
||||
struct f2fs_bio_info {
|
||||
struct f2fs_sb_info *sbi; /* f2fs superblock */
|
||||
@ -1080,6 +1086,8 @@ struct f2fs_bio_info {
|
||||
struct rw_semaphore io_rwsem; /* blocking op for bio */
|
||||
spinlock_t io_lock; /* serialize DATA/NODE IOs */
|
||||
struct list_head io_list; /* track fios */
|
||||
struct list_head bio_list; /* bio entry list head */
|
||||
struct rw_semaphore bio_list_lock; /* lock to protect bio entry list */
|
||||
};
|
||||
|
||||
#define FDEV(i) (sbi->devs[i])
|
||||
@ -1298,6 +1306,7 @@ struct f2fs_sb_info {
|
||||
|
||||
/* threshold for gc trials on pinned files */
|
||||
u64 gc_pin_file_threshold;
|
||||
struct rw_semaphore pin_sem;
|
||||
|
||||
/* maximum # of trials to find a victim segment for SSR and GC */
|
||||
unsigned int max_victim_search;
|
||||
@ -1369,9 +1378,10 @@ struct f2fs_private_dio {
|
||||
};
|
||||
|
||||
#ifdef CONFIG_F2FS_FAULT_INJECTION
|
||||
#define f2fs_show_injection_info(type) \
|
||||
printk_ratelimited("%sF2FS-fs : inject %s in %s of %pS\n", \
|
||||
KERN_INFO, f2fs_fault_name[type], \
|
||||
#define f2fs_show_injection_info(sbi, type) \
|
||||
printk_ratelimited("%sF2FS-fs (%s) : inject %s in %s of %pS\n", \
|
||||
KERN_INFO, sbi->sb->s_id, \
|
||||
f2fs_fault_name[type], \
|
||||
__func__, __builtin_return_address(0))
|
||||
static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
|
||||
{
|
||||
@ -1391,7 +1401,7 @@ static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
|
||||
return false;
|
||||
}
|
||||
#else
|
||||
#define f2fs_show_injection_info(type) do { } while (0)
|
||||
#define f2fs_show_injection_info(sbi, type) do { } while (0)
|
||||
static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
|
||||
{
|
||||
return false;
|
||||
@ -1776,7 +1786,7 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
|
||||
return ret;
|
||||
|
||||
if (time_to_inject(sbi, FAULT_BLOCK)) {
|
||||
f2fs_show_injection_info(FAULT_BLOCK);
|
||||
f2fs_show_injection_info(sbi, FAULT_BLOCK);
|
||||
release = *count;
|
||||
goto release_quota;
|
||||
}
|
||||
@ -2028,7 +2038,7 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
|
||||
}
|
||||
|
||||
if (time_to_inject(sbi, FAULT_BLOCK)) {
|
||||
f2fs_show_injection_info(FAULT_BLOCK);
|
||||
f2fs_show_injection_info(sbi, FAULT_BLOCK);
|
||||
goto enospc;
|
||||
}
|
||||
|
||||
@ -2143,7 +2153,8 @@ static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
|
||||
return page;
|
||||
|
||||
if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) {
|
||||
f2fs_show_injection_info(FAULT_PAGE_ALLOC);
|
||||
f2fs_show_injection_info(F2FS_M_SB(mapping),
|
||||
FAULT_PAGE_ALLOC);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
@ -2158,7 +2169,7 @@ static inline struct page *f2fs_pagecache_get_page(
|
||||
int fgp_flags, gfp_t gfp_mask)
|
||||
{
|
||||
if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) {
|
||||
f2fs_show_injection_info(FAULT_PAGE_GET);
|
||||
f2fs_show_injection_info(F2FS_M_SB(mapping), FAULT_PAGE_GET);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -2227,7 +2238,7 @@ static inline struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi,
|
||||
return bio;
|
||||
}
|
||||
if (time_to_inject(sbi, FAULT_ALLOC_BIO)) {
|
||||
f2fs_show_injection_info(FAULT_ALLOC_BIO);
|
||||
f2fs_show_injection_info(sbi, FAULT_ALLOC_BIO);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -2794,7 +2805,7 @@ static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
|
||||
void *ret;
|
||||
|
||||
if (time_to_inject(sbi, FAULT_KMALLOC)) {
|
||||
f2fs_show_injection_info(FAULT_KMALLOC);
|
||||
f2fs_show_injection_info(sbi, FAULT_KMALLOC);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -2815,7 +2826,7 @@ static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi,
|
||||
size_t size, gfp_t flags)
|
||||
{
|
||||
if (time_to_inject(sbi, FAULT_KVMALLOC)) {
|
||||
f2fs_show_injection_info(FAULT_KVMALLOC);
|
||||
f2fs_show_injection_info(sbi, FAULT_KVMALLOC);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -3113,7 +3124,7 @@ void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi);
|
||||
int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
|
||||
void allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
|
||||
unsigned int start, unsigned int end);
|
||||
void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi);
|
||||
void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi, int type);
|
||||
int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
|
||||
bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
|
||||
struct cp_control *cpc);
|
||||
@ -3199,10 +3210,14 @@ void f2fs_destroy_checkpoint_caches(void);
|
||||
*/
|
||||
int f2fs_init_post_read_processing(void);
|
||||
void f2fs_destroy_post_read_processing(void);
|
||||
int f2fs_init_bio_entry_cache(void);
|
||||
void f2fs_destroy_bio_entry_cache(void);
|
||||
void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type);
|
||||
void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
|
||||
struct inode *inode, struct page *page,
|
||||
nid_t ino, enum page_type type);
|
||||
void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
|
||||
struct bio **bio, struct page *page);
|
||||
void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi);
|
||||
int f2fs_submit_page_bio(struct f2fs_io_info *fio);
|
||||
int f2fs_merge_page_bio(struct f2fs_io_info *fio);
|
||||
|
@ -681,7 +681,7 @@ int f2fs_truncate(struct inode *inode)
|
||||
trace_f2fs_truncate(inode);
|
||||
|
||||
if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
|
||||
f2fs_show_injection_info(FAULT_TRUNCATE);
|
||||
f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_TRUNCATE);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
@ -726,11 +726,14 @@ int f2fs_getattr(const struct path *path, struct kstat *stat,
|
||||
stat->attributes |= STATX_ATTR_IMMUTABLE;
|
||||
if (flags & F2FS_NODUMP_FL)
|
||||
stat->attributes |= STATX_ATTR_NODUMP;
|
||||
if (IS_VERITY(inode))
|
||||
stat->attributes |= STATX_ATTR_VERITY;
|
||||
|
||||
stat->attributes_mask |= (STATX_ATTR_APPEND |
|
||||
STATX_ATTR_ENCRYPTED |
|
||||
STATX_ATTR_IMMUTABLE |
|
||||
STATX_ATTR_NODUMP);
|
||||
STATX_ATTR_NODUMP |
|
||||
STATX_ATTR_VERITY);
|
||||
|
||||
generic_fillattr(inode, stat);
|
||||
|
||||
@ -1139,7 +1142,7 @@ static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
|
||||
}
|
||||
dn.ofs_in_node++;
|
||||
i++;
|
||||
new_size = (dst + i) << PAGE_SHIFT;
|
||||
new_size = (loff_t)(dst + i) << PAGE_SHIFT;
|
||||
if (dst_inode->i_size < new_size)
|
||||
f2fs_i_size_write(dst_inode, new_size);
|
||||
} while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
|
||||
@ -1545,12 +1548,44 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
|
||||
if (off_end)
|
||||
map.m_len++;
|
||||
|
||||
if (f2fs_is_pinned_file(inode))
|
||||
map.m_seg_type = CURSEG_COLD_DATA;
|
||||
if (!map.m_len)
|
||||
return 0;
|
||||
|
||||
err = f2fs_map_blocks(inode, &map, 1, (f2fs_is_pinned_file(inode) ?
|
||||
F2FS_GET_BLOCK_PRE_DIO :
|
||||
F2FS_GET_BLOCK_PRE_AIO));
|
||||
if (f2fs_is_pinned_file(inode)) {
|
||||
block_t len = (map.m_len >> sbi->log_blocks_per_seg) <<
|
||||
sbi->log_blocks_per_seg;
|
||||
block_t done = 0;
|
||||
|
||||
if (map.m_len % sbi->blocks_per_seg)
|
||||
len += sbi->blocks_per_seg;
|
||||
|
||||
map.m_len = sbi->blocks_per_seg;
|
||||
next_alloc:
|
||||
if (has_not_enough_free_secs(sbi, 0,
|
||||
GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
|
||||
mutex_lock(&sbi->gc_mutex);
|
||||
err = f2fs_gc(sbi, true, false, NULL_SEGNO);
|
||||
if (err && err != -ENODATA && err != -EAGAIN)
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
down_write(&sbi->pin_sem);
|
||||
map.m_seg_type = CURSEG_COLD_DATA_PINNED;
|
||||
f2fs_allocate_new_segments(sbi, CURSEG_COLD_DATA);
|
||||
err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
|
||||
up_write(&sbi->pin_sem);
|
||||
|
||||
done += map.m_len;
|
||||
len -= map.m_len;
|
||||
map.m_lblk += map.m_len;
|
||||
if (!err && len)
|
||||
goto next_alloc;
|
||||
|
||||
map.m_len = done;
|
||||
} else {
|
||||
err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
|
||||
}
|
||||
out_err:
|
||||
if (err) {
|
||||
pgoff_t last_off;
|
||||
|
||||
|
46
fs/f2fs/gc.c
46
fs/f2fs/gc.c
@ -54,7 +54,7 @@ static int gc_thread_func(void *data)
|
||||
}
|
||||
|
||||
if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
|
||||
f2fs_show_injection_info(FAULT_CHECKPOINT);
|
||||
f2fs_show_injection_info(sbi, FAULT_CHECKPOINT);
|
||||
f2fs_stop_checkpoint(sbi, false);
|
||||
}
|
||||
|
||||
@ -1012,8 +1012,14 @@ static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
|
||||
block_t start_bidx;
|
||||
nid_t nid = le32_to_cpu(entry->nid);
|
||||
|
||||
/* stop BG_GC if there is not enough free sections. */
|
||||
if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
|
||||
/*
|
||||
* stop BG_GC if there is not enough free sections.
|
||||
* Or, stop GC if the segment becomes fully valid caused by
|
||||
* race condition along with SSR block allocation.
|
||||
*/
|
||||
if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
|
||||
get_valid_blocks(sbi, segno, false) ==
|
||||
sbi->blocks_per_seg)
|
||||
return submitted;
|
||||
|
||||
if (check_valid_map(sbi, segno, off) == 0)
|
||||
@ -1437,11 +1443,20 @@ static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
|
||||
raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
|
||||
raw_sb->block_count = cpu_to_le64(block_count +
|
||||
(long long)segs * sbi->blocks_per_seg);
|
||||
if (f2fs_is_multi_device(sbi)) {
|
||||
int last_dev = sbi->s_ndevs - 1;
|
||||
int dev_segs =
|
||||
le32_to_cpu(raw_sb->devs[last_dev].total_segments);
|
||||
|
||||
raw_sb->devs[last_dev].total_segments =
|
||||
cpu_to_le32(dev_segs + segs);
|
||||
}
|
||||
}
|
||||
|
||||
static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
|
||||
{
|
||||
int segs = secs * sbi->segs_per_sec;
|
||||
long long blks = (long long)segs * sbi->blocks_per_seg;
|
||||
long long user_block_count =
|
||||
le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
|
||||
|
||||
@ -1449,8 +1464,20 @@ static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
|
||||
MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
|
||||
FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
|
||||
FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
|
||||
F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count +
|
||||
(long long)segs * sbi->blocks_per_seg);
|
||||
F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks);
|
||||
|
||||
if (f2fs_is_multi_device(sbi)) {
|
||||
int last_dev = sbi->s_ndevs - 1;
|
||||
|
||||
FDEV(last_dev).total_segments =
|
||||
(int)FDEV(last_dev).total_segments + segs;
|
||||
FDEV(last_dev).end_blk =
|
||||
(long long)FDEV(last_dev).end_blk + blks;
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
FDEV(last_dev).nr_blkz = (int)FDEV(last_dev).nr_blkz +
|
||||
(int)(blks >> sbi->log_blocks_per_blkz);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
|
||||
@ -1465,6 +1492,15 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
|
||||
if (block_count > old_block_count)
|
||||
return -EINVAL;
|
||||
|
||||
if (f2fs_is_multi_device(sbi)) {
|
||||
int last_dev = sbi->s_ndevs - 1;
|
||||
__u64 last_segs = FDEV(last_dev).total_segments;
|
||||
|
||||
if (block_count + last_segs * sbi->blocks_per_seg <=
|
||||
old_block_count)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* new fs size should align to section size */
|
||||
div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem);
|
||||
if (rem)
|
||||
|
@ -681,7 +681,7 @@ void f2fs_evict_inode(struct inode *inode)
|
||||
err = f2fs_truncate(inode);
|
||||
|
||||
if (time_to_inject(sbi, FAULT_EVICT_INODE)) {
|
||||
f2fs_show_injection_info(FAULT_EVICT_INODE);
|
||||
f2fs_show_injection_info(sbi, FAULT_EVICT_INODE);
|
||||
err = -EIO;
|
||||
}
|
||||
|
||||
|
@ -2349,7 +2349,6 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
|
||||
|
||||
if (ret) {
|
||||
up_read(&nm_i->nat_tree_lock);
|
||||
f2fs_bug_on(sbi, !mount);
|
||||
f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
|
||||
return ret;
|
||||
}
|
||||
@ -2399,7 +2398,7 @@ bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
|
||||
struct free_nid *i = NULL;
|
||||
retry:
|
||||
if (time_to_inject(sbi, FAULT_ALLOC_NID)) {
|
||||
f2fs_show_injection_info(FAULT_ALLOC_NID);
|
||||
f2fs_show_injection_info(sbi, FAULT_ALLOC_NID);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -711,7 +711,7 @@ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
|
||||
f2fs_put_page(page, 1);
|
||||
}
|
||||
if (!err)
|
||||
f2fs_allocate_new_segments(sbi);
|
||||
f2fs_allocate_new_segments(sbi, NO_CHECK_TYPE);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -489,7 +489,7 @@ int f2fs_commit_inmem_pages(struct inode *inode)
|
||||
void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
|
||||
{
|
||||
if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
|
||||
f2fs_show_injection_info(FAULT_CHECKPOINT);
|
||||
f2fs_show_injection_info(sbi, FAULT_CHECKPOINT);
|
||||
f2fs_stop_checkpoint(sbi, false);
|
||||
}
|
||||
|
||||
@ -1017,8 +1017,9 @@ static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
|
||||
|
||||
if (dc->error)
|
||||
printk_ratelimited(
|
||||
"%sF2FS-fs: Issue discard(%u, %u, %u) failed, ret: %d",
|
||||
KERN_INFO, dc->lstart, dc->start, dc->len, dc->error);
|
||||
"%sF2FS-fs (%s): Issue discard(%u, %u, %u) failed, ret: %d",
|
||||
KERN_INFO, sbi->sb->s_id,
|
||||
dc->lstart, dc->start, dc->len, dc->error);
|
||||
__detach_discard_cmd(dcc, dc);
|
||||
}
|
||||
|
||||
@ -1158,7 +1159,7 @@ static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
|
||||
dc->len += len;
|
||||
|
||||
if (time_to_inject(sbi, FAULT_DISCARD)) {
|
||||
f2fs_show_injection_info(FAULT_DISCARD);
|
||||
f2fs_show_injection_info(sbi, FAULT_DISCARD);
|
||||
err = -EIO;
|
||||
goto submit;
|
||||
}
|
||||
@ -1780,7 +1781,8 @@ static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
|
||||
return -EIO;
|
||||
}
|
||||
trace_f2fs_issue_reset_zone(bdev, blkstart);
|
||||
return blkdev_reset_zones(bdev, sector, nr_sects, GFP_NOFS);
|
||||
return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
|
||||
sector, nr_sects, GFP_NOFS);
|
||||
}
|
||||
|
||||
/* For conventional zones, use regular discard if supported */
|
||||
@ -2699,7 +2701,7 @@ void allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
|
||||
up_read(&SM_I(sbi)->curseg_lock);
|
||||
}
|
||||
|
||||
void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
|
||||
void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi, int type)
|
||||
{
|
||||
struct curseg_info *curseg;
|
||||
unsigned int old_segno;
|
||||
@ -2708,10 +2710,17 @@ void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
|
||||
down_write(&SIT_I(sbi)->sentry_lock);
|
||||
|
||||
for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
|
||||
if (type != NO_CHECK_TYPE && i != type)
|
||||
continue;
|
||||
|
||||
curseg = CURSEG_I(sbi, i);
|
||||
old_segno = curseg->segno;
|
||||
SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
|
||||
locate_dirty_segment(sbi, old_segno);
|
||||
if (type == NO_CHECK_TYPE || curseg->next_blkoff ||
|
||||
get_valid_blocks(sbi, curseg->segno, false) ||
|
||||
get_ckpt_valid_blocks(sbi, curseg->segno)) {
|
||||
old_segno = curseg->segno;
|
||||
SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
|
||||
locate_dirty_segment(sbi, old_segno);
|
||||
}
|
||||
}
|
||||
|
||||
up_write(&SIT_I(sbi)->sentry_lock);
|
||||
@ -3077,6 +3086,19 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
|
||||
{
|
||||
struct sit_info *sit_i = SIT_I(sbi);
|
||||
struct curseg_info *curseg = CURSEG_I(sbi, type);
|
||||
bool put_pin_sem = false;
|
||||
|
||||
if (type == CURSEG_COLD_DATA) {
|
||||
/* GC during CURSEG_COLD_DATA_PINNED allocation */
|
||||
if (down_read_trylock(&sbi->pin_sem)) {
|
||||
put_pin_sem = true;
|
||||
} else {
|
||||
type = CURSEG_WARM_DATA;
|
||||
curseg = CURSEG_I(sbi, type);
|
||||
}
|
||||
} else if (type == CURSEG_COLD_DATA_PINNED) {
|
||||
type = CURSEG_COLD_DATA;
|
||||
}
|
||||
|
||||
down_read(&SM_I(sbi)->curseg_lock);
|
||||
|
||||
@ -3142,6 +3164,9 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
|
||||
mutex_unlock(&curseg->curseg_mutex);
|
||||
|
||||
up_read(&SM_I(sbi)->curseg_lock);
|
||||
|
||||
if (put_pin_sem)
|
||||
up_read(&sbi->pin_sem);
|
||||
}
|
||||
|
||||
static void update_device_state(struct f2fs_io_info *fio)
|
||||
@ -3388,7 +3413,10 @@ void f2fs_wait_on_page_writeback(struct page *page,
|
||||
if (PageWriteback(page)) {
|
||||
struct f2fs_sb_info *sbi = F2FS_P_SB(page);
|
||||
|
||||
/* submit cached LFS IO */
|
||||
f2fs_submit_merged_write_cond(sbi, NULL, page, 0, type);
|
||||
/* sbumit cached IPU IO */
|
||||
f2fs_submit_merged_ipu_write(sbi, NULL, page);
|
||||
if (ordered) {
|
||||
wait_on_page_writeback(page);
|
||||
f2fs_bug_on(sbi, locked && PageWriteback(page));
|
||||
|
@ -313,6 +313,8 @@ struct sit_entry_set {
|
||||
*/
|
||||
static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type)
|
||||
{
|
||||
if (type == CURSEG_COLD_DATA_PINNED)
|
||||
type = CURSEG_COLD_DATA;
|
||||
return (struct curseg_info *)(SM_I(sbi)->curseg_array + type);
|
||||
}
|
||||
|
||||
|
101
fs/f2fs/super.c
101
fs/f2fs/super.c
@ -1222,9 +1222,13 @@ static int f2fs_statfs_project(struct super_block *sb,
|
||||
return PTR_ERR(dquot);
|
||||
spin_lock(&dquot->dq_dqb_lock);
|
||||
|
||||
limit = (dquot->dq_dqb.dqb_bsoftlimit ?
|
||||
dquot->dq_dqb.dqb_bsoftlimit :
|
||||
dquot->dq_dqb.dqb_bhardlimit) >> sb->s_blocksize_bits;
|
||||
limit = 0;
|
||||
if (dquot->dq_dqb.dqb_bsoftlimit)
|
||||
limit = dquot->dq_dqb.dqb_bsoftlimit;
|
||||
if (dquot->dq_dqb.dqb_bhardlimit &&
|
||||
(!limit || dquot->dq_dqb.dqb_bhardlimit < limit))
|
||||
limit = dquot->dq_dqb.dqb_bhardlimit;
|
||||
|
||||
if (limit && buf->f_blocks > limit) {
|
||||
curblock = dquot->dq_dqb.dqb_curspace >> sb->s_blocksize_bits;
|
||||
buf->f_blocks = limit;
|
||||
@ -1233,9 +1237,13 @@ static int f2fs_statfs_project(struct super_block *sb,
|
||||
(buf->f_blocks - curblock) : 0;
|
||||
}
|
||||
|
||||
limit = dquot->dq_dqb.dqb_isoftlimit ?
|
||||
dquot->dq_dqb.dqb_isoftlimit :
|
||||
dquot->dq_dqb.dqb_ihardlimit;
|
||||
limit = 0;
|
||||
if (dquot->dq_dqb.dqb_isoftlimit)
|
||||
limit = dquot->dq_dqb.dqb_isoftlimit;
|
||||
if (dquot->dq_dqb.dqb_ihardlimit &&
|
||||
(!limit || dquot->dq_dqb.dqb_ihardlimit < limit))
|
||||
limit = dquot->dq_dqb.dqb_ihardlimit;
|
||||
|
||||
if (limit && buf->f_files > limit) {
|
||||
buf->f_files = limit;
|
||||
buf->f_ffree =
|
||||
@ -2659,6 +2667,21 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
if (RDEV(0).path[0]) {
|
||||
block_t dev_seg_count = le32_to_cpu(RDEV(0).total_segments);
|
||||
int i = 1;
|
||||
|
||||
while (i < MAX_DEVICES && RDEV(i).path[0]) {
|
||||
dev_seg_count += le32_to_cpu(RDEV(i).total_segments);
|
||||
i++;
|
||||
}
|
||||
if (segment_count != dev_seg_count) {
|
||||
f2fs_info(sbi, "Segment count (%u) mismatch with total segments from devices (%u)",
|
||||
segment_count, dev_seg_count);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
}
|
||||
|
||||
if (secs_per_zone > total_sections || !secs_per_zone) {
|
||||
f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)",
|
||||
secs_per_zone, total_sections);
|
||||
@ -2893,6 +2916,7 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
|
||||
spin_lock_init(&sbi->dev_lock);
|
||||
|
||||
init_rwsem(&sbi->sb_lock);
|
||||
init_rwsem(&sbi->pin_sem);
|
||||
}
|
||||
|
||||
static int init_percpu_info(struct f2fs_sb_info *sbi)
|
||||
@ -2912,15 +2936,21 @@ static int init_percpu_info(struct f2fs_sb_info *sbi)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
static int f2fs_report_zone_cb(struct blk_zone *zone, unsigned int idx,
|
||||
void *data)
|
||||
{
|
||||
struct f2fs_dev_info *dev = data;
|
||||
|
||||
if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL)
|
||||
set_bit(idx, dev->blkz_seq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
|
||||
{
|
||||
struct block_device *bdev = FDEV(devi).bdev;
|
||||
sector_t nr_sectors = bdev->bd_part->nr_sects;
|
||||
sector_t sector = 0;
|
||||
struct blk_zone *zones;
|
||||
unsigned int i, nr_zones;
|
||||
unsigned int n = 0;
|
||||
int err = -EIO;
|
||||
int ret;
|
||||
|
||||
if (!f2fs_sb_has_blkzoned(sbi))
|
||||
return 0;
|
||||
@ -2945,38 +2975,13 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
|
||||
if (!FDEV(devi).blkz_seq)
|
||||
return -ENOMEM;
|
||||
|
||||
#define F2FS_REPORT_NR_ZONES 4096
|
||||
|
||||
zones = f2fs_kzalloc(sbi,
|
||||
array_size(F2FS_REPORT_NR_ZONES,
|
||||
sizeof(struct blk_zone)),
|
||||
GFP_KERNEL);
|
||||
if (!zones)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Get block zones type */
|
||||
while (zones && sector < nr_sectors) {
|
||||
ret = blkdev_report_zones(bdev, 0, BLK_ALL_ZONES, f2fs_report_zone_cb,
|
||||
&FDEV(devi));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
nr_zones = F2FS_REPORT_NR_ZONES;
|
||||
err = blkdev_report_zones(bdev, sector, zones, &nr_zones);
|
||||
if (err)
|
||||
break;
|
||||
if (!nr_zones) {
|
||||
err = -EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
for (i = 0; i < nr_zones; i++) {
|
||||
if (zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL)
|
||||
set_bit(n, FDEV(devi).blkz_seq);
|
||||
sector += zones[i].len;
|
||||
n++;
|
||||
}
|
||||
}
|
||||
|
||||
kvfree(zones);
|
||||
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -3006,6 +3011,7 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
|
||||
f2fs_err(sbi, "Unable to read %dth superblock",
|
||||
block + 1);
|
||||
err = -EIO;
|
||||
*recovery = 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -3015,6 +3021,7 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
|
||||
f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock",
|
||||
block + 1);
|
||||
brelse(bh);
|
||||
*recovery = 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -3027,10 +3034,6 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
|
||||
brelse(bh);
|
||||
}
|
||||
|
||||
/* Fail to read any one of the superblocks*/
|
||||
if (err < 0)
|
||||
*recovery = 1;
|
||||
|
||||
/* No valid superblock */
|
||||
if (!*raw_super)
|
||||
kvfree(super);
|
||||
@ -3384,6 +3387,8 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
sbi->write_io[i][j].bio = NULL;
|
||||
spin_lock_init(&sbi->write_io[i][j].io_lock);
|
||||
INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
|
||||
INIT_LIST_HEAD(&sbi->write_io[i][j].bio_list);
|
||||
init_rwsem(&sbi->write_io[i][j].bio_list_lock);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3795,8 +3800,13 @@ static int __init init_f2fs_fs(void)
|
||||
err = f2fs_init_post_read_processing();
|
||||
if (err)
|
||||
goto free_root_stats;
|
||||
err = f2fs_init_bio_entry_cache();
|
||||
if (err)
|
||||
goto free_post_read;
|
||||
return 0;
|
||||
|
||||
free_post_read:
|
||||
f2fs_destroy_post_read_processing();
|
||||
free_root_stats:
|
||||
f2fs_destroy_root_stats();
|
||||
unregister_filesystem(&f2fs_fs_type);
|
||||
@ -3820,6 +3830,7 @@ static int __init init_f2fs_fs(void)
|
||||
|
||||
static void __exit exit_f2fs_fs(void)
|
||||
{
|
||||
f2fs_destroy_bio_entry_cache();
|
||||
f2fs_destroy_post_read_processing();
|
||||
f2fs_destroy_root_stats();
|
||||
unregister_filesystem(&f2fs_fs_type);
|
||||
|
@ -154,6 +154,8 @@ static ssize_t features_show(struct f2fs_attr *a,
|
||||
if (f2fs_sb_has_casefold(sbi))
|
||||
len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
|
||||
len ? ", " : "", "casefold");
|
||||
len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
|
||||
len ? ", " : "", "pin_file");
|
||||
len += snprintf(buf + len, PAGE_SIZE - len, "\n");
|
||||
return len;
|
||||
}
|
||||
@ -443,6 +445,7 @@ F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_no_gc_sleep_time, no_gc_sleep_time);
|
||||
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_idle, gc_mode);
|
||||
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_urgent, gc_mode);
|
||||
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, reclaim_segments, rec_prefree_segments);
|
||||
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, main_blkaddr, main_blkaddr);
|
||||
F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, max_small_discards, max_discards);
|
||||
F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, discard_granularity, discard_granularity);
|
||||
F2FS_RW_ATTR(RESERVED_BLOCKS, f2fs_sb_info, reserved_blocks, reserved_blocks);
|
||||
@ -510,6 +513,7 @@ static struct attribute *f2fs_attrs[] = {
|
||||
ATTR_LIST(gc_idle),
|
||||
ATTR_LIST(gc_urgent),
|
||||
ATTR_LIST(reclaim_segments),
|
||||
ATTR_LIST(main_blkaddr),
|
||||
ATTR_LIST(max_small_discards),
|
||||
ATTR_LIST(discard_granularity),
|
||||
ATTR_LIST(batched_trim_sections),
|
||||
|
@ -539,8 +539,9 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
|
||||
ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
|
||||
{
|
||||
struct inode *inode = d_inode(dentry);
|
||||
nid_t xnid = F2FS_I(inode)->i_xattr_nid;
|
||||
struct f2fs_xattr_entry *entry;
|
||||
void *base_addr;
|
||||
void *base_addr, *last_base_addr;
|
||||
int error = 0;
|
||||
size_t rest = buffer_size;
|
||||
|
||||
@ -550,6 +551,8 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
last_base_addr = (void *)base_addr + XATTR_SIZE(xnid, inode);
|
||||
|
||||
list_for_each_xattr(entry, base_addr) {
|
||||
const struct xattr_handler *handler =
|
||||
f2fs_xattr_handler(entry->e_name_index);
|
||||
@ -557,6 +560,15 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
|
||||
size_t prefix_len;
|
||||
size_t size;
|
||||
|
||||
if ((void *)(entry) + sizeof(__u32) > last_base_addr ||
|
||||
(void *)XATTR_NEXT_ENTRY(entry) > last_base_addr) {
|
||||
f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
|
||||
inode->i_ino);
|
||||
set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
|
||||
error = -EFSCORRUPTED;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
if (!handler || (handler->list && !handler->list(dentry)))
|
||||
continue;
|
||||
|
||||
|
@ -296,6 +296,12 @@ enum req_opf {
|
||||
REQ_OP_ZONE_RESET_ALL = 8,
|
||||
/* write the zero filled sector many times */
|
||||
REQ_OP_WRITE_ZEROES = 9,
|
||||
/* Open a zone */
|
||||
REQ_OP_ZONE_OPEN = 10,
|
||||
/* Close a zone */
|
||||
REQ_OP_ZONE_CLOSE = 11,
|
||||
/* Transition a zone to full */
|
||||
REQ_OP_ZONE_FINISH = 12,
|
||||
|
||||
/* SCSI passthrough using struct scsi_request */
|
||||
REQ_OP_SCSI_IN = 32,
|
||||
@ -423,6 +429,25 @@ static inline bool op_is_discard(unsigned int op)
|
||||
return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if a bio or request operation is a zone management operation, with
|
||||
* the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case
|
||||
* due to its different handling in the block layer and device response in
|
||||
* case of command failure.
|
||||
*/
|
||||
static inline bool op_is_zone_mgmt(enum req_opf op)
|
||||
{
|
||||
switch (op & REQ_OP_MASK) {
|
||||
case REQ_OP_ZONE_RESET:
|
||||
case REQ_OP_ZONE_OPEN:
|
||||
case REQ_OP_ZONE_CLOSE:
|
||||
case REQ_OP_ZONE_FINISH:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static inline int op_stat_group(unsigned int op)
|
||||
{
|
||||
if (op_is_discard(op))
|
||||
|
@ -350,34 +350,28 @@ struct queue_limits {
|
||||
enum blk_zoned_model zoned;
|
||||
};
|
||||
|
||||
typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
|
||||
void *data);
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
|
||||
/*
|
||||
* Maximum number of zones to report with a single report zones command.
|
||||
*/
|
||||
#define BLK_ZONED_REPORT_MAX_ZONES 8192U
|
||||
|
||||
extern unsigned int blkdev_nr_zones(struct block_device *bdev);
|
||||
extern int blkdev_report_zones(struct block_device *bdev,
|
||||
sector_t sector, struct blk_zone *zones,
|
||||
unsigned int *nr_zones);
|
||||
extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors,
|
||||
sector_t nr_sectors, gfp_t gfp_mask);
|
||||
#define BLK_ALL_ZONES ((unsigned int)-1)
|
||||
int blkdev_report_zones(struct block_device *bdev, sector_t sector,
|
||||
unsigned int nr_zones, report_zones_cb cb, void *data);
|
||||
unsigned int blkdev_nr_zones(struct gendisk *disk);
|
||||
extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
|
||||
sector_t sectors, sector_t nr_sectors,
|
||||
gfp_t gfp_mask);
|
||||
extern int blk_revalidate_disk_zones(struct gendisk *disk);
|
||||
|
||||
extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
extern int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
extern int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
|
||||
#else /* CONFIG_BLK_DEV_ZONED */
|
||||
|
||||
static inline unsigned int blkdev_nr_zones(struct block_device *bdev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int blk_revalidate_disk_zones(struct gendisk *disk)
|
||||
static inline unsigned int blkdev_nr_zones(struct gendisk *disk)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
@ -389,9 +383,9 @@ static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
|
||||
return -ENOTTY;
|
||||
}
|
||||
|
||||
static inline int blkdev_reset_zones_ioctl(struct block_device *bdev,
|
||||
fmode_t mode, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev,
|
||||
fmode_t mode, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
return -ENOTTY;
|
||||
}
|
||||
@ -511,9 +505,9 @@ struct request_queue {
|
||||
/*
|
||||
* Zoned block device information for request dispatch control.
|
||||
* nr_zones is the total number of zones of the device. This is always
|
||||
* 0 for regular block devices. seq_zones_bitmap is a bitmap of nr_zones
|
||||
* bits which indicates if a zone is conventional (bit clear) or
|
||||
* sequential (bit set). seq_zones_wlock is a bitmap of nr_zones
|
||||
* 0 for regular block devices. conv_zones_bitmap is a bitmap of nr_zones
|
||||
* bits which indicates if a zone is conventional (bit set) or
|
||||
* sequential (bit clear). seq_zones_wlock is a bitmap of nr_zones
|
||||
* bits which indicates if a zone is write locked, that is, if a write
|
||||
* request targeting the zone was dispatched. All three fields are
|
||||
* initialized by the low level device driver (e.g. scsi/sd.c).
|
||||
@ -526,7 +520,7 @@ struct request_queue {
|
||||
* blk_mq_unfreeze_queue().
|
||||
*/
|
||||
unsigned int nr_zones;
|
||||
unsigned long *seq_zones_bitmap;
|
||||
unsigned long *conv_zones_bitmap;
|
||||
unsigned long *seq_zones_wlock;
|
||||
#endif /* CONFIG_BLK_DEV_ZONED */
|
||||
|
||||
@ -731,9 +725,11 @@ static inline unsigned int blk_queue_zone_no(struct request_queue *q,
|
||||
static inline bool blk_queue_zone_is_seq(struct request_queue *q,
|
||||
sector_t sector)
|
||||
{
|
||||
if (!blk_queue_is_zoned(q) || !q->seq_zones_bitmap)
|
||||
if (!blk_queue_is_zoned(q))
|
||||
return false;
|
||||
return test_bit(blk_queue_zone_no(q, sector), q->seq_zones_bitmap);
|
||||
if (!q->conv_zones_bitmap)
|
||||
return true;
|
||||
return !test_bit(blk_queue_zone_no(q, sector), q->conv_zones_bitmap);
|
||||
}
|
||||
#else /* CONFIG_BLK_DEV_ZONED */
|
||||
static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
|
||||
@ -1715,7 +1711,7 @@ struct block_device_operations {
|
||||
/* this callback is with swap_lock and sometimes page table lock held */
|
||||
void (*swap_slot_free_notify) (struct block_device *, unsigned long);
|
||||
int (*report_zones)(struct gendisk *, sector_t sector,
|
||||
struct blk_zone *zones, unsigned int *nr_zones);
|
||||
unsigned int nr_zones, report_zones_cb cb, void *data);
|
||||
struct module *owner;
|
||||
const struct pr_ops *pr_ops;
|
||||
};
|
||||
|
@ -17,6 +17,7 @@
|
||||
struct dm_dev;
|
||||
struct dm_target;
|
||||
struct dm_table;
|
||||
struct dm_report_zones_args;
|
||||
struct mapped_device;
|
||||
struct bio_vec;
|
||||
|
||||
@ -93,9 +94,9 @@ typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv,
|
||||
|
||||
typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev);
|
||||
|
||||
typedef int (*dm_report_zones_fn) (struct dm_target *ti, sector_t sector,
|
||||
struct blk_zone *zones,
|
||||
unsigned int *nr_zones);
|
||||
typedef int (*dm_report_zones_fn) (struct dm_target *ti,
|
||||
struct dm_report_zones_args *args,
|
||||
unsigned int nr_zones);
|
||||
|
||||
/*
|
||||
* These iteration functions are typically used to check (and combine)
|
||||
@ -422,10 +423,23 @@ struct gendisk *dm_disk(struct mapped_device *md);
|
||||
int dm_suspended(struct dm_target *ti);
|
||||
int dm_noflush_suspending(struct dm_target *ti);
|
||||
void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
|
||||
void dm_remap_zone_report(struct dm_target *ti, sector_t start,
|
||||
struct blk_zone *zones, unsigned int *nr_zones);
|
||||
union map_info *dm_get_rq_mapinfo(struct request *rq);
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
struct dm_report_zones_args {
|
||||
struct dm_target *tgt;
|
||||
sector_t next_sector;
|
||||
|
||||
void *orig_data;
|
||||
report_zones_cb orig_cb;
|
||||
unsigned int zone_idx;
|
||||
|
||||
/* must be filled by ->report_zones before calling dm_report_zones_cb */
|
||||
sector_t start;
|
||||
};
|
||||
int dm_report_zones_cb(struct blk_zone *zone, unsigned int idx, void *data);
|
||||
#endif /* CONFIG_BLK_DEV_ZONED */
|
||||
|
||||
/*
|
||||
* Device mapper functions to parse and create devices specified by the
|
||||
* parameter "dm-mod.create="
|
||||
|
@ -33,7 +33,8 @@ struct kstat {
|
||||
STATX_ATTR_IMMUTABLE | \
|
||||
STATX_ATTR_APPEND | \
|
||||
STATX_ATTR_NODUMP | \
|
||||
STATX_ATTR_ENCRYPTED \
|
||||
STATX_ATTR_ENCRYPTED | \
|
||||
STATX_ATTR_VERITY \
|
||||
)/* Attrs corresponding to FS_*_FL flags */
|
||||
u64 ino;
|
||||
dev_t dev;
|
||||
|
@ -120,9 +120,11 @@ struct blk_zone_report {
|
||||
};
|
||||
|
||||
/**
|
||||
* struct blk_zone_range - BLKRESETZONE ioctl request
|
||||
* @sector: starting sector of the first zone to issue reset write pointer
|
||||
* @nr_sectors: Total number of sectors of 1 or more zones to reset
|
||||
* struct blk_zone_range - BLKRESETZONE/BLKOPENZONE/
|
||||
* BLKCLOSEZONE/BLKFINISHZONE ioctl
|
||||
* requests
|
||||
* @sector: Starting sector of the first zone to operate on.
|
||||
* @nr_sectors: Total number of sectors of all zones to operate on.
|
||||
*/
|
||||
struct blk_zone_range {
|
||||
__u64 sector;
|
||||
@ -139,10 +141,19 @@ struct blk_zone_range {
|
||||
* sector range. The sector range must be zone aligned.
|
||||
* @BLKGETZONESZ: Get the device zone size in number of 512 B sectors.
|
||||
* @BLKGETNRZONES: Get the total number of zones of the device.
|
||||
* @BLKOPENZONE: Open the zones in the specified sector range.
|
||||
* The 512 B sector range must be zone aligned.
|
||||
* @BLKCLOSEZONE: Close the zones in the specified sector range.
|
||||
* The 512 B sector range must be zone aligned.
|
||||
* @BLKFINISHZONE: Mark the zones as full in the specified sector range.
|
||||
* The 512 B sector range must be zone aligned.
|
||||
*/
|
||||
#define BLKREPORTZONE _IOWR(0x12, 130, struct blk_zone_report)
|
||||
#define BLKRESETZONE _IOW(0x12, 131, struct blk_zone_range)
|
||||
#define BLKGETZONESZ _IOR(0x12, 132, __u32)
|
||||
#define BLKGETNRZONES _IOR(0x12, 133, __u32)
|
||||
#define BLKOPENZONE _IOW(0x12, 134, struct blk_zone_range)
|
||||
#define BLKCLOSEZONE _IOW(0x12, 135, struct blk_zone_range)
|
||||
#define BLKFINISHZONE _IOW(0x12, 136, struct blk_zone_range)
|
||||
|
||||
#endif /* _UAPI_BLKZONED_H */
|
||||
|
@ -167,8 +167,8 @@ struct statx {
|
||||
#define STATX_ATTR_APPEND 0x00000020 /* [I] File is append-only */
|
||||
#define STATX_ATTR_NODUMP 0x00000040 /* [I] File is not to be dumped */
|
||||
#define STATX_ATTR_ENCRYPTED 0x00000800 /* [I] File requires key to decrypt in fs */
|
||||
|
||||
#define STATX_ATTR_AUTOMOUNT 0x00001000 /* Dir: Automount trigger */
|
||||
#define STATX_ATTR_VERITY 0x00100000 /* [I] Verity protected file */
|
||||
|
||||
|
||||
#endif /* _UAPI_LINUX_STAT_H */
|
||||
|
Loading…
Reference in New Issue
Block a user