ANDROID: fscrypt: handle direct I/O with IV_INO_LBLK_32

With the existing fscrypt IV generation methods, each file's data blocks
have contiguous DUNs.  Therefore the direct I/O code "just worked"
because it only submits logically contiguous bios.  But with
IV_INO_LBLK_32, the direct I/O code breaks because the DUN can wrap from
0xffffffff to 0.  We can't submit bios across such boundaries.

This is especially difficult to handle when block_size != PAGE_SIZE,
since in that case the DUN can wrap in the middle of a page.  Punt on
this case for now and just handle block_size == PAGE_SIZE.

Add and use a new function fscrypt_dio_supported() to check whether a
direct I/O request is unsupported due to encryption constraints.

Then, update fs/direct-io.c (used by f2fs, and by ext4 in kernel v5.4
and earlier) and fs/iomap/direct-io.c (used by ext4 in kernel v5.5 and
later) to avoid submitting I/O across a DUN discontinuity.

(This is needed in ACK now because ACK already supports direct I/O with
inline crypto.  I'll be sending this upstream along with the encrypted
direct I/O support itself once its prerequisites are closer to landing.)

Test: For now, just manually tested direct I/O on ext4 and f2fs in the
      DUN discontinuity case.
Bug: 144046242
Change-Id: I0c0b0b20a73ade35c3660cc6f9c09d49d3853ba5
Signed-off-by: Eric Biggers <ebiggers@google.com>
This commit is contained in:
Eric Biggers 2020-05-19 16:11:59 -07:00
parent 06a813d6df
commit 8d6c90c9d6
7 changed files with 124 additions and 13 deletions

View File

@ -68,6 +68,14 @@ void fscrypt_free_bounce_page(struct page *bounce_page)
}
EXPORT_SYMBOL(fscrypt_free_bounce_page);
/*
* Generate the IV for the given logical block number within the given file.
* For filenames encryption, lblk_num == 0.
*
* Keep this in sync with fscrypt_limit_dio_pages(). fscrypt_limit_dio_pages()
* needs to know about any IV generation methods where the low bits of IV don't
* simply contain the lblk_num (e.g., IV_INO_LBLK_32).
*/
void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num,
const struct fscrypt_info *ci)
{

View File

@ -16,6 +16,7 @@
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/keyslot-manager.h>
#include <linux/uio.h>
#include "fscrypt_private.h"
@ -428,3 +429,84 @@ bool fscrypt_mergeable_bio_bh(struct bio *bio,
return fscrypt_mergeable_bio(bio, inode, next_lblk);
}
EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio_bh);
/**
* fscrypt_dio_supported() - check whether a direct I/O request is unsupported
* due to encryption constraints
* @iocb: the file and position the I/O is targeting
* @iter: the I/O data segment(s)
*
* Return: true if direct I/O is supported
*/
bool fscrypt_dio_supported(struct kiocb *iocb, struct iov_iter *iter)
{
const struct inode *inode = file_inode(iocb->ki_filp);
const struct fscrypt_info *ci = inode->i_crypt_info;
const unsigned int blocksize = i_blocksize(inode);
/* If the file is unencrypted, no veto from us. */
if (!fscrypt_needs_contents_encryption(inode))
return true;
/* We only support direct I/O with inline crypto, not fs-layer crypto */
if (!fscrypt_inode_uses_inline_crypto(inode))
return false;
/*
* Since the granularity of encryption is filesystem blocks, the I/O
* must be block aligned -- not just disk sector aligned.
*/
if (!IS_ALIGNED(iocb->ki_pos | iov_iter_alignment(iter), blocksize))
return false;
/*
* With IV_INO_LBLK_32 and sub-page blocks, the DUN can wrap around in
* the middle of a page. This isn't handled by the direct I/O code yet.
*/
if (blocksize != PAGE_SIZE &&
(fscrypt_policy_flags(&ci->ci_policy) &
FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32))
return false;
return true;
}
EXPORT_SYMBOL_GPL(fscrypt_dio_supported);
/**
* fscrypt_limit_dio_pages() - limit I/O pages to avoid discontiguous DUNs
* @inode: the file on which I/O is being done
* @pos: the file position (in bytes) at which the I/O is being done
* @nr_pages: the number of pages we want to submit starting at @pos
*
* For direct I/O: limit the number of pages that will be submitted in the bio
* targeting @pos, in order to avoid crossing a data unit number (DUN)
* discontinuity. This is only needed for certain IV generation methods.
*
* This assumes block_size == PAGE_SIZE; see fscrypt_dio_supported().
*
* Return: the actual number of pages that can be submitted
*/
int fscrypt_limit_dio_pages(const struct inode *inode, loff_t pos, int nr_pages)
{
const struct fscrypt_info *ci = inode->i_crypt_info;
u32 dun;
if (!fscrypt_inode_uses_inline_crypto(inode))
return nr_pages;
if (nr_pages <= 1)
return nr_pages;
if (!(fscrypt_policy_flags(&ci->ci_policy) &
FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32))
return nr_pages;
if (WARN_ON_ONCE(i_blocksize(inode) != PAGE_SIZE))
return 1;
/* With IV_INO_LBLK_32, the DUN can wrap around from U32_MAX to 0. */
dun = ci->ci_hashed_ino + (pos >> inode->i_blkbits);
return min_t(u64, nr_pages, (u64)U32_MAX + 1 - dun);
}

View File

@ -787,9 +787,17 @@ static inline int dio_send_cur_page(struct dio *dio, struct dio_submit *sdio,
* current logical offset in the file does not equal what would
* be the next logical offset in the bio, submit the bio we
* have.
*
* When fscrypt inline encryption is used, data unit number
* (DUN) contiguity is also required. Normally that's implied
* by logical contiguity. However, certain IV generation
* methods (e.g. IV_INO_LBLK_32) don't guarantee it. So, we
* must explicitly check fscrypt_mergeable_bio() too.
*/
if (sdio->final_block_in_bio != sdio->cur_page_block ||
cur_offset != bio_next_offset)
cur_offset != bio_next_offset ||
!fscrypt_mergeable_bio(sdio->bio, dio->inode,
cur_offset >> dio->inode->i_blkbits))
dio_bio_submit(dio, sdio);
}

View File

@ -40,12 +40,8 @@ static bool ext4_dio_supported(struct kiocb *iocb, struct iov_iter *iter)
{
struct inode *inode = file_inode(iocb->ki_filp);
if (IS_ENABLED(CONFIG_FS_ENCRYPTION) && IS_ENCRYPTED(inode)) {
if (!fscrypt_inode_uses_inline_crypto(inode) ||
!IS_ALIGNED(iocb->ki_pos | iov_iter_alignment(iter),
i_blocksize(inode)))
return false;
}
if (!fscrypt_dio_supported(iocb, iter))
return false;
if (fsverity_active(inode))
return false;
if (ext4_should_journal_data(inode))

View File

@ -4000,12 +4000,8 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
int rw = iov_iter_rw(iter);
if (IS_ENABLED(CONFIG_FS_ENCRYPTION) && f2fs_encrypted_file(inode)) {
if (!fscrypt_inode_uses_inline_crypto(inode) ||
!IS_ALIGNED(iocb->ki_pos | iov_iter_alignment(iter),
F2FS_BLKSIZE))
return true;
}
if (!fscrypt_dio_supported(iocb, iter))
return true;
if (fsverity_active(inode))
return true;
if (f2fs_is_multi_device(sbi))

View File

@ -252,6 +252,7 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
ret = nr_pages;
goto out;
}
nr_pages = fscrypt_limit_dio_pages(inode, pos, nr_pages);
if (need_zeroout) {
/* zero out from the start of the block to the write offset */
@ -309,6 +310,7 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
copied += n;
nr_pages = iov_iter_npages(dio->submit.iter, BIO_MAX_PAGES);
nr_pages = fscrypt_limit_dio_pages(inode, pos, nr_pages);
iomap_dio_submit_bio(dio, iomap, bio);
} while (nr_pages);

View File

@ -542,6 +542,11 @@ extern bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode,
extern bool fscrypt_mergeable_bio_bh(struct bio *bio,
const struct buffer_head *next_bh);
bool fscrypt_dio_supported(struct kiocb *iocb, struct iov_iter *iter);
int fscrypt_limit_dio_pages(const struct inode *inode, loff_t pos,
int nr_pages);
#else /* CONFIG_FS_ENCRYPTION_INLINE_CRYPT */
static inline bool fscrypt_inode_uses_inline_crypto(const struct inode *inode)
{
@ -574,6 +579,20 @@ static inline bool fscrypt_mergeable_bio_bh(struct bio *bio,
{
return true;
}
static inline bool fscrypt_dio_supported(struct kiocb *iocb,
struct iov_iter *iter)
{
const struct inode *inode = file_inode(iocb->ki_filp);
return !fscrypt_needs_contents_encryption(inode);
}
static inline int fscrypt_limit_dio_pages(const struct inode *inode, loff_t pos,
int nr_pages)
{
return nr_pages;
}
#endif /* !CONFIG_FS_ENCRYPTION_INLINE_CRYPT */
#if IS_ENABLED(CONFIG_FS_ENCRYPTION) && IS_ENABLED(CONFIG_DM_DEFAULT_KEY)