Revert "block: introduce zone_write_granularity limit"
This reverts commit 6b4bb49e34
which is
commit a805a4fa4fa376bbc145762bb8b09caa2fa8af48 upstream.
It breaks the Android kernel abi and can be brought back in the future
in an abi-safe way if it is really needed.
Bug: 161946584
Change-Id: Ib32d4f828dc1ebceb03c73906e7867eaffbdee2f
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
parent
767bb1b3ae
commit
be9f128eaf
@ -273,11 +273,4 @@ devices are described in the ZBC (Zoned Block Commands) and ZAC
|
|||||||
do not support zone commands, they will be treated as regular block devices
|
do not support zone commands, they will be treated as regular block devices
|
||||||
and zoned will report "none".
|
and zoned will report "none".
|
||||||
|
|
||||||
zone_write_granularity (RO)
|
|
||||||
---------------------------
|
|
||||||
This indicates the alignment constraint, in bytes, for write operations in
|
|
||||||
sequential zones of zoned block devices (devices with a zoned attributed
|
|
||||||
that reports "host-managed" or "host-aware"). This value is always 0 for
|
|
||||||
regular block devices.
|
|
||||||
|
|
||||||
Jens Axboe <jens.axboe@oracle.com>, February 2009
|
Jens Axboe <jens.axboe@oracle.com>, February 2009
|
||||||
|
@ -60,7 +60,6 @@ void blk_set_default_limits(struct queue_limits *lim)
|
|||||||
lim->io_opt = 0;
|
lim->io_opt = 0;
|
||||||
lim->misaligned = 0;
|
lim->misaligned = 0;
|
||||||
lim->zoned = BLK_ZONED_NONE;
|
lim->zoned = BLK_ZONED_NONE;
|
||||||
lim->zone_write_granularity = 0;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_set_default_limits);
|
EXPORT_SYMBOL(blk_set_default_limits);
|
||||||
|
|
||||||
@ -354,28 +353,6 @@ void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_queue_physical_block_size);
|
EXPORT_SYMBOL(blk_queue_physical_block_size);
|
||||||
|
|
||||||
/**
|
|
||||||
* blk_queue_zone_write_granularity - set zone write granularity for the queue
|
|
||||||
* @q: the request queue for the zoned device
|
|
||||||
* @size: the zone write granularity size, in bytes
|
|
||||||
*
|
|
||||||
* Description:
|
|
||||||
* This should be set to the lowest possible size allowing to write in
|
|
||||||
* sequential zones of a zoned block device.
|
|
||||||
*/
|
|
||||||
void blk_queue_zone_write_granularity(struct request_queue *q,
|
|
||||||
unsigned int size)
|
|
||||||
{
|
|
||||||
if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
|
|
||||||
return;
|
|
||||||
|
|
||||||
q->limits.zone_write_granularity = size;
|
|
||||||
|
|
||||||
if (q->limits.zone_write_granularity < q->limits.logical_block_size)
|
|
||||||
q->limits.zone_write_granularity = q->limits.logical_block_size;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(blk_queue_zone_write_granularity);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* blk_queue_alignment_offset - set physical block alignment offset
|
* blk_queue_alignment_offset - set physical block alignment offset
|
||||||
* @q: the request queue for the device
|
* @q: the request queue for the device
|
||||||
@ -653,8 +630,6 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
|
|||||||
t->discard_granularity;
|
t->discard_granularity;
|
||||||
}
|
}
|
||||||
|
|
||||||
t->zone_write_granularity = max(t->zone_write_granularity,
|
|
||||||
b->zone_write_granularity);
|
|
||||||
t->zoned = max(t->zoned, b->zoned);
|
t->zoned = max(t->zoned, b->zoned);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -871,8 +846,6 @@ EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
|
|||||||
*/
|
*/
|
||||||
void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
|
void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
|
||||||
{
|
{
|
||||||
struct request_queue *q = disk->queue;
|
|
||||||
|
|
||||||
switch (model) {
|
switch (model) {
|
||||||
case BLK_ZONED_HM:
|
case BLK_ZONED_HM:
|
||||||
/*
|
/*
|
||||||
@ -901,15 +874,7 @@ void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
q->limits.zoned = model;
|
disk->queue->limits.zoned = model;
|
||||||
if (model != BLK_ZONED_NONE) {
|
|
||||||
/*
|
|
||||||
* Set the zone write granularity to the device logical block
|
|
||||||
* size by default. The driver can change this value if needed.
|
|
||||||
*/
|
|
||||||
blk_queue_zone_write_granularity(q,
|
|
||||||
queue_logical_block_size(q));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blk_queue_set_zoned);
|
EXPORT_SYMBOL_GPL(blk_queue_set_zoned);
|
||||||
|
|
||||||
|
@ -219,12 +219,6 @@ static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
|
|||||||
(unsigned long long)q->limits.max_write_zeroes_sectors << 9);
|
(unsigned long long)q->limits.max_write_zeroes_sectors << 9);
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t queue_zone_write_granularity_show(struct request_queue *q,
|
|
||||||
char *page)
|
|
||||||
{
|
|
||||||
return queue_var_show(queue_zone_write_granularity(q), page);
|
|
||||||
}
|
|
||||||
|
|
||||||
static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page)
|
static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page)
|
||||||
{
|
{
|
||||||
unsigned long long max_sectors = q->limits.max_zone_append_sectors;
|
unsigned long long max_sectors = q->limits.max_zone_append_sectors;
|
||||||
@ -591,7 +585,6 @@ QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
|
|||||||
QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
|
QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
|
||||||
QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes");
|
QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes");
|
||||||
QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes");
|
QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes");
|
||||||
QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity");
|
|
||||||
|
|
||||||
QUEUE_RO_ENTRY(queue_zoned, "zoned");
|
QUEUE_RO_ENTRY(queue_zoned, "zoned");
|
||||||
QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones");
|
QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones");
|
||||||
@ -646,7 +639,6 @@ static struct attribute *queue_attrs[] = {
|
|||||||
&queue_write_same_max_entry.attr,
|
&queue_write_same_max_entry.attr,
|
||||||
&queue_write_zeroes_max_entry.attr,
|
&queue_write_zeroes_max_entry.attr,
|
||||||
&queue_zone_append_max_entry.attr,
|
&queue_zone_append_max_entry.attr,
|
||||||
&queue_zone_write_granularity_entry.attr,
|
|
||||||
&queue_nonrot_entry.attr,
|
&queue_nonrot_entry.attr,
|
||||||
&queue_zoned_entry.attr,
|
&queue_zoned_entry.attr,
|
||||||
&queue_nr_zones_entry.attr,
|
&queue_nr_zones_entry.attr,
|
||||||
|
@ -793,14 +793,6 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
|
|||||||
blk_queue_max_active_zones(q, 0);
|
blk_queue_max_active_zones(q, 0);
|
||||||
nr_zones = round_up(sdkp->capacity, zone_blocks) >> ilog2(zone_blocks);
|
nr_zones = round_up(sdkp->capacity, zone_blocks) >> ilog2(zone_blocks);
|
||||||
|
|
||||||
/*
|
|
||||||
* Per ZBC and ZAC specifications, writes in sequential write required
|
|
||||||
* zones of host-managed devices must be aligned to the device physical
|
|
||||||
* block size.
|
|
||||||
*/
|
|
||||||
if (blk_queue_zoned_model(q) == BLK_ZONED_HM)
|
|
||||||
blk_queue_zone_write_granularity(q, sdkp->physical_block_size);
|
|
||||||
|
|
||||||
/* READ16/WRITE16 is mandatory for ZBC disks */
|
/* READ16/WRITE16 is mandatory for ZBC disks */
|
||||||
sdkp->device->use_16_for_rw = 1;
|
sdkp->device->use_16_for_rw = 1;
|
||||||
sdkp->device->use_10_for_rw = 0;
|
sdkp->device->use_10_for_rw = 0;
|
||||||
|
@ -349,7 +349,6 @@ struct queue_limits {
|
|||||||
unsigned int max_zone_append_sectors;
|
unsigned int max_zone_append_sectors;
|
||||||
unsigned int discard_granularity;
|
unsigned int discard_granularity;
|
||||||
unsigned int discard_alignment;
|
unsigned int discard_alignment;
|
||||||
unsigned int zone_write_granularity;
|
|
||||||
|
|
||||||
unsigned short max_segments;
|
unsigned short max_segments;
|
||||||
unsigned short max_integrity_segments;
|
unsigned short max_integrity_segments;
|
||||||
@ -1170,8 +1169,6 @@ extern void blk_queue_logical_block_size(struct request_queue *, unsigned int);
|
|||||||
extern void blk_queue_max_zone_append_sectors(struct request_queue *q,
|
extern void blk_queue_max_zone_append_sectors(struct request_queue *q,
|
||||||
unsigned int max_zone_append_sectors);
|
unsigned int max_zone_append_sectors);
|
||||||
extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
|
extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
|
||||||
void blk_queue_zone_write_granularity(struct request_queue *q,
|
|
||||||
unsigned int size);
|
|
||||||
extern void blk_queue_alignment_offset(struct request_queue *q,
|
extern void blk_queue_alignment_offset(struct request_queue *q,
|
||||||
unsigned int alignment);
|
unsigned int alignment);
|
||||||
void blk_queue_update_readahead(struct request_queue *q);
|
void blk_queue_update_readahead(struct request_queue *q);
|
||||||
@ -1483,18 +1480,6 @@ static inline int bdev_io_opt(struct block_device *bdev)
|
|||||||
return queue_io_opt(bdev_get_queue(bdev));
|
return queue_io_opt(bdev_get_queue(bdev));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned int
|
|
||||||
queue_zone_write_granularity(const struct request_queue *q)
|
|
||||||
{
|
|
||||||
return q->limits.zone_write_granularity;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned int
|
|
||||||
bdev_zone_write_granularity(struct block_device *bdev)
|
|
||||||
{
|
|
||||||
return queue_zone_write_granularity(bdev_get_queue(bdev));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int queue_alignment_offset(const struct request_queue *q)
|
static inline int queue_alignment_offset(const struct request_queue *q)
|
||||||
{
|
{
|
||||||
if (q->limits.misaligned)
|
if (q->limits.misaligned)
|
||||||
|
Loading…
Reference in New Issue
Block a user