ANDROID: block: Support configuring limits below the page size

Allow block drivers to configure the following:
* Maximum number of hardware sectors values smaller than
  PAGE_SIZE >> SECTOR_SHIFT. For PAGE_SIZE = 4096 this means that values
  below 8 become supported.
* A maximum segment size below the page size. This is most useful
  for page sizes above 4096 bytes.

The blk_sub_page_segments static branch will be used in later patches to
prevent that performance of block drivers that support segments >=
PAGE_SIZE and max_hw_sectors >= PAGE_SIZE >> SECTOR_SHIFT would be affected.

This patch may change the behavior of existing block drivers from not
working into working. If a block driver calls
blk_queue_max_hw_sectors() or blk_queue_max_segment_size(), this is
usually done to configure the maximum supported limits. An attempt to
configure a limit below what is supported by the block layer causes the
block layer to select a larger value. If that value is not supported by
the block driver, this may cause other data to be transferred than
requested, a kernel crash or other undesirable behavior.

Keeps the ABI stable by taking advantage of hole in struct queue_limits.

Bug: 308663717
Bug: 319125789
Bug: 324152549

Change-Id: I7358f3e16aa0c80a6d345cb7887fbe9276e52912
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
[jyescas@google.com: disable subpage limits in block/blk-sysfs.c
                     instead block/blk-core.c because the function
                     blk_free_queue() is not defined in 6.1 kernel]
Signed-off-by: Juan Yescas <jyescas@google.com>
This commit is contained in:
Bart Van Assche 2023-06-12 13:33:09 -07:00 committed by Juan Yescas
parent 65bdaa7543
commit 4fd9a46455
5 changed files with 83 additions and 0 deletions

View File

@ -185601,6 +185601,12 @@ member {
type_id: 0xa7c362b0
offset: 1088
}
member {
id: 0x4c1b044f
name: "sub_page_limits"
type_id: 0x6d7f5ff6
offset: 840
}
member {
id: 0xedd64f59
name: "sub_reg_offsets"
@ -250162,6 +250168,7 @@ struct_union {
member_id: 0x06473753
member_id: 0x1bdd5453
member_id: 0x26582f94
member_id: 0x4c1b044f
member_id: 0xaf3e33dd
member_id: 0x8d05d4ec
member_id: 0xd671ce1e

View File

@ -21,6 +21,11 @@
#include "blk.h"
#include "blk-wbt.h"
/* Protects blk_nr_sub_page_limit_queues and blk_sub_page_limits changes. */
static DEFINE_MUTEX(blk_sub_page_limit_lock);
static uint32_t blk_nr_sub_page_limit_queues;
DEFINE_STATIC_KEY_FALSE(blk_sub_page_limits);
void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
{
q->rq_timeout = timeout;
@ -61,6 +66,7 @@ void blk_set_default_limits(struct queue_limits *lim)
lim->zoned = BLK_ZONED_NONE;
lim->zone_write_granularity = 0;
lim->dma_alignment = 511;
lim->sub_page_limits = false;
}
/**
@ -103,6 +109,50 @@ void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce)
}
EXPORT_SYMBOL(blk_queue_bounce_limit);
/**
* blk_enable_sub_page_limits - enable support for limits below the page size
* @lim: request queue limits for which to enable support of these features.
*
* Enable support for max_segment_size values smaller than PAGE_SIZE and for
* max_hw_sectors values below PAGE_SIZE >> SECTOR_SHIFT. Support for these
* features is not enabled all the time because of the runtime overhead of these
* features.
*/
static void blk_enable_sub_page_limits(struct queue_limits *lim)
{
if (lim->sub_page_limits)
return;
lim->sub_page_limits = true;
mutex_lock(&blk_sub_page_limit_lock);
if (++blk_nr_sub_page_limit_queues == 1)
static_branch_enable(&blk_sub_page_limits);
mutex_unlock(&blk_sub_page_limit_lock);
}
/**
* blk_disable_sub_page_limits - disable support for limits below the page size
* @lim: request queue limits for which to enable support of these features.
*
* max_segment_size values smaller than PAGE_SIZE and for max_hw_sectors values
* below PAGE_SIZE >> SECTOR_SHIFT. Support for these features is not enabled
* all the time because of the runtime overhead of these features.
*/
void blk_disable_sub_page_limits(struct queue_limits *lim)
{
if (!lim->sub_page_limits)
return;
lim->sub_page_limits = false;
mutex_lock(&blk_sub_page_limit_lock);
WARN_ON_ONCE(blk_nr_sub_page_limit_queues <= 0);
if (--blk_nr_sub_page_limit_queues == 0)
static_branch_disable(&blk_sub_page_limits);
mutex_unlock(&blk_sub_page_limit_lock);
}
/**
* blk_queue_max_hw_sectors - set max sectors for a request for this queue
* @q: the request queue for the device
@ -128,6 +178,11 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
unsigned int min_max_hw_sectors = PAGE_SIZE >> SECTOR_SHIFT;
unsigned int max_sectors;
if (max_hw_sectors < min_max_hw_sectors) {
blk_enable_sub_page_limits(limits);
min_max_hw_sectors = 1;
}
if (max_hw_sectors < min_max_hw_sectors) {
max_hw_sectors = min_max_hw_sectors;
pr_info("set to minimum %u\n", max_hw_sectors);
@ -281,6 +336,11 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
{
unsigned int min_max_segment_size = PAGE_SIZE;
if (max_size < min_max_segment_size) {
blk_enable_sub_page_limits(&q->limits);
min_max_segment_size = SECTOR_SIZE;
}
if (max_size < min_max_segment_size) {
max_size = min_max_segment_size;
pr_info("set to minimum %u\n", max_size);

View File

@ -770,6 +770,8 @@ static void blk_release_queue(struct kobject *kobj)
blk_free_queue_stats(q->stats);
kfree(q->poll_stat);
blk_disable_sub_page_limits(&q->limits);
if (queue_is_mq(q))
blk_mq_release(q);

View File

@ -13,6 +13,7 @@ struct elevator_type;
#define BLK_MAX_TIMEOUT (5 * HZ)
extern struct dentry *blk_debugfs_root;
DECLARE_STATIC_KEY_FALSE(blk_sub_page_limits);
struct blk_flush_queue {
unsigned int flush_pending_idx:1;
@ -37,6 +38,14 @@ struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
gfp_t flags);
void blk_free_flush_queue(struct blk_flush_queue *q);
static inline bool blk_queue_sub_page_limits(const struct queue_limits *lim)
{
return static_branch_unlikely(&blk_sub_page_limits) &&
lim->sub_page_limits;
}
void blk_disable_sub_page_limits(struct queue_limits *q);
void blk_freeze_queue(struct request_queue *q);
void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
void blk_queue_start_drain(struct request_queue *q);

View File

@ -319,6 +319,11 @@ struct queue_limits {
unsigned char misaligned;
unsigned char discard_misaligned;
unsigned char raid_partial_stripes_expensive;
#ifndef __GENKSYMS__
bool sub_page_limits;
#endif
enum blk_zoned_model zoned;
/*