ANDROID: incremental fs: Move throttling to outside page lock

Bug: 241479010
Test: incfs_test passes, play confirm behavior in bug is fixed
Signed-off-by: Paul Lawrence <paullawrence@google.com>
Change-Id: Ie51f2b76d0873057f54fecf7fcc793c66df20969
This commit is contained in:
Paul Lawrence 2022-07-07 08:24:12 -07:00 committed by Treehugger Robot
parent 96e377f2cd
commit ae068d4b32
7 changed files with 51 additions and 36 deletions

View File

@ -15,6 +15,12 @@ Contact: Paul Lawrence <paullawrence@google.com>
Description: Reads 'supported'. Present if zstd compression is supported
for data blocks.
What: /sys/fs/incremental-fs/features/bugfix_throttling
Date: January 2023
Contact: Paul Lawrence <paullawrence@google.com>
Description: Reads 'supported'. Present if the throttling lock bug is fixed
https://android-review.git.corp.google.com/c/kernel/common/+/2381827
What: /sys/fs/incremental-fs/instances/[name]
Date: April 2021
Contact: Paul Lawrence <paullawrence@google.com>

View File

@ -35,6 +35,9 @@ Features
/sys/fs/incremental-fs/features/zstd
Reads 'supported'. Present if zstd compression is supported for data blocks.
/sys/fs/incremental-fs/features/bugfix_throttling
Reads 'supported'. Present if the throttling lock bug is fixed
Optional per mount
------------------

View File

@ -3,7 +3,6 @@
* Copyright 2019 Google LLC
*/
#include <linux/crc32.h>
#include <linux/delay.h>
#include <linux/file.h>
#include <linux/fsverity.h>
#include <linux/gfp.h>
@ -1106,25 +1105,10 @@ static void notify_pending_reads(struct mount_info *mi,
wake_up_all(&mi->mi_blocks_written_notif_wq);
}
static int usleep_interruptible(u32 us)
{
/* See:
* https://www.kernel.org/doc/Documentation/timers/timers-howto.txt
* for explanation
*/
if (us < 10) {
udelay(us);
return 0;
} else if (us < 20000) {
usleep_range(us, us + us / 10);
return 0;
} else
return msleep_interruptible(us / 1000);
}
static int wait_for_data_block(struct data_file *df, int block_index,
struct data_file_block *res_block,
struct incfs_read_data_file_timeouts *timeouts)
struct incfs_read_data_file_timeouts *timeouts,
unsigned int *delayed_min_us)
{
struct data_file_block block = {};
struct data_file_segment *segment = NULL;
@ -1132,7 +1116,7 @@ static int wait_for_data_block(struct data_file *df, int block_index,
struct mount_info *mi = NULL;
int error;
int wait_res = 0;
unsigned int delayed_pending_us = 0, delayed_min_us = 0;
unsigned int delayed_pending_us = 0;
bool delayed_pending = false;
if (!df || !res_block)
@ -1163,8 +1147,7 @@ static int wait_for_data_block(struct data_file *df, int block_index,
if (is_data_block_present(&block)) {
*res_block = block;
if (timeouts && timeouts->min_time_us) {
delayed_min_us = timeouts->min_time_us;
error = usleep_interruptible(delayed_min_us);
*delayed_min_us = timeouts->min_time_us;
goto out;
}
return 0;
@ -1211,13 +1194,9 @@ static int wait_for_data_block(struct data_file *df, int block_index,
delayed_pending = true;
delayed_pending_us = timeouts->max_pending_time_us -
jiffies_to_usecs(wait_res);
if (timeouts->min_pending_time_us > delayed_pending_us) {
delayed_min_us = timeouts->min_pending_time_us -
if (timeouts->min_pending_time_us > delayed_pending_us)
*delayed_min_us = timeouts->min_pending_time_us -
delayed_pending_us;
error = usleep_interruptible(delayed_min_us);
if (error)
return error;
}
error = down_read_killable(&segment->rwsem);
if (error)
@ -1252,9 +1231,9 @@ static int wait_for_data_block(struct data_file *df, int block_index,
delayed_pending_us;
}
if (delayed_min_us) {
if (delayed_min_us && *delayed_min_us) {
mi->mi_reads_delayed_min++;
mi->mi_reads_delayed_min_us += delayed_min_us;
mi->mi_reads_delayed_min_us += *delayed_min_us;
}
return 0;
@ -1284,7 +1263,8 @@ static int incfs_update_sysfs_error(struct file *file, int index, int result,
ssize_t incfs_read_data_file_block(struct mem_range dst, struct file *f,
int index, struct mem_range tmp,
struct incfs_read_data_file_timeouts *timeouts)
struct incfs_read_data_file_timeouts *timeouts,
unsigned int *delayed_min_us)
{
loff_t pos;
ssize_t result;
@ -1303,7 +1283,8 @@ ssize_t incfs_read_data_file_block(struct mem_range dst, struct file *f,
mi = df->df_mount_info;
bfc = df->df_backing_file_context;
result = wait_for_data_block(df, index, &block, timeouts);
result = wait_for_data_block(df, index, &block, timeouts,
delayed_min_us);
if (result < 0)
goto out;

View File

@ -429,7 +429,8 @@ struct incfs_read_data_file_timeouts {
ssize_t incfs_read_data_file_block(struct mem_range dst, struct file *f,
int index, struct mem_range tmp,
struct incfs_read_data_file_timeouts *timeouts);
struct incfs_read_data_file_timeouts *timeouts,
unsigned int *delayed_min_us);
ssize_t incfs_read_merkle_tree_blocks(struct mem_range dst,
struct data_file *df, size_t offset);

View File

@ -33,11 +33,13 @@ static struct kobj_attribute name##_attr = __ATTR_RO(name)
DECLARE_FEATURE_FLAG(corefs);
DECLARE_FEATURE_FLAG(zstd);
DECLARE_FEATURE_FLAG(v2);
DECLARE_FEATURE_FLAG(bugfix_throttling);
static struct attribute *attributes[] = {
&corefs_attr.attr,
&zstd_attr.attr,
&v2_attr.attr,
&bugfix_throttling_attr.attr,
NULL,
};

View File

@ -323,7 +323,7 @@ static int incfs_build_merkle_tree(struct file *f, struct data_file *df,
if (lvl == 0)
result = incfs_read_data_file_block(partial_buf,
f, i, tmp, NULL);
f, i, tmp, NULL, NULL);
else {
hash_level_offset = hash_offset +
hash_tree->hash_level_suboffset[lvl - 1];

View File

@ -5,6 +5,7 @@
#include <linux/blkdev.h>
#include <linux/compat.h>
#include <linux/delay.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/fs_stack.h>
@ -483,7 +484,8 @@ static struct dentry *open_or_create_special_dir(struct dentry *backing_dir,
static int read_single_page_timeouts(struct data_file *df, struct file *f,
int block_index, struct mem_range range,
struct mem_range tmp)
struct mem_range tmp,
unsigned int *delayed_min_us)
{
struct mount_info *mi = df->df_mount_info;
struct incfs_read_data_file_timeouts timeouts = {
@ -515,7 +517,23 @@ static int read_single_page_timeouts(struct data_file *df, struct file *f,
}
return incfs_read_data_file_block(range, f, block_index, tmp,
&timeouts);
&timeouts, delayed_min_us);
}
static int usleep_interruptible(u32 us)
{
/* See:
* https://www.kernel.org/doc/Documentation/timers/timers-howto.txt
* for explanation
*/
if (us < 10) {
udelay(us);
return 0;
} else if (us < 20000) {
usleep_range(us, us + us / 10);
return 0;
} else
return msleep_interruptible(us / 1000);
}
static int read_folio(struct file *f, struct folio *folio)
@ -529,6 +547,7 @@ static int read_folio(struct file *f, struct folio *folio)
int result = 0;
void *page_start;
int block_index;
unsigned int delayed_min_us = 0;
if (!df) {
SetPageError(page);
@ -554,7 +573,8 @@ static int read_folio(struct file *f, struct folio *folio)
bytes_to_read = min_t(loff_t, size - offset, PAGE_SIZE);
read_result = read_single_page_timeouts(df, f, block_index,
range(page_start, bytes_to_read), tmp);
range(page_start, bytes_to_read), tmp,
&delayed_min_us);
free_pages((unsigned long)tmp.data, get_order(tmp.len));
} else {
@ -576,6 +596,8 @@ static int read_folio(struct file *f, struct folio *folio)
flush_dcache_page(page);
kunmap(page);
unlock_page(page);
if (delayed_min_us)
usleep_interruptible(delayed_min_us);
return result;
}