From: John Garry <john.g.garry@oracle.com>
To: axboe@kernel.dk, kbusch@kernel.org, hch@lst.de, sagi@grimberg.me,
martin.petersen@oracle.com, djwong@kernel.org,
viro@zeniv.linux.org.uk, brauner@kernel.org, dchinner@redhat.com,
jejb@linux.ibm.com
Cc: linux-block@vger.kernel.org, linux-kernel@vger.kernel.org,
linux-nvme@lists.infradead.org, linux-scsi@vger.kernel.org,
linux-xfs@vger.kernel.org, linux-fsdevel@vger.kernel.org,
linux-security-module@vger.kernel.org, paul@paul-moore.com,
jmorris@namei.org, serge@hallyn.com,
John Garry <john.g.garry@oracle.com>
Subject: [PATCH RFC 11/16] fs: iomap: Atomic write support
Date: Wed, 3 May 2023 18:38:16 +0000 [thread overview]
Message-ID: <20230503183821.1473305-12-john.g.garry@oracle.com> (raw)
In-Reply-To: <20230503183821.1473305-1-john.g.garry@oracle.com>
Add support to create bio's whose bi_sector and bi_size are aligned to and
multiple of atomic_write_unit, respectively.
When we call iomap_dio_bio_iter() -> bio_iov_iter_get_pages() ->
__bio_iov_iter_get_pages(), we trim the bio to a multiple of
atomic_write_unit.
As such, we expect the iomi start and length to have same size and
alignment requirements per iomap_dio_bio_iter() call.
In iomap_dio_bio_iter(), ensure that for a non-dsync iocb that the mapping
is not dirty nor unmapped.
Signed-off-by: John Garry <john.g.garry@oracle.com>
---
fs/iomap/direct-io.c | 72 ++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 70 insertions(+), 2 deletions(-)
diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
index f771001574d0..37c3c926dfd8 100644
--- a/fs/iomap/direct-io.c
+++ b/fs/iomap/direct-io.c
@@ -36,6 +36,8 @@ struct iomap_dio {
size_t done_before;
bool wait_for_completion;
+ unsigned int atomic_write_unit;
+
union {
/* used during submission and for synchronous completion: */
struct {
@@ -229,9 +231,21 @@ static inline blk_opf_t iomap_dio_bio_opflags(struct iomap_dio *dio,
return opflags;
}
+
+/*
+ * Note: For atomic writes, each bio which we create when we iter should have
+ * bi_sector aligned to atomic_write_unit and also its bi_size should be
+ * a multiple of atomic_write_unit.
+ * The call to bio_iov_iter_get_pages() -> __bio_iov_iter_get_pages()
+ * should trim the length to a multiple of atomic_write_unit for us.
+ * This allows us to split each bio later in the block layer to fit
+ * request_queue limit.
+ */
static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
struct iomap_dio *dio)
{
+ bool atomic_write = (dio->iocb->ki_flags & IOCB_ATOMIC) &&
+ (dio->flags & IOMAP_DIO_WRITE);
const struct iomap *iomap = &iter->iomap;
struct inode *inode = iter->inode;
unsigned int fs_block_size = i_blocksize(inode), pad;
@@ -249,6 +263,14 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
!bdev_iter_is_aligned(iomap->bdev, dio->submit.iter))
return -EINVAL;
+
+ if (atomic_write && !iocb_is_dsync(dio->iocb)) {
+ if (iomap->flags & IOMAP_F_DIRTY)
+ return -EIO;
+ if (iomap->type != IOMAP_MAPPED)
+ return -EIO;
+ }
+
if (iomap->type == IOMAP_UNWRITTEN) {
dio->flags |= IOMAP_DIO_UNWRITTEN;
need_zeroout = true;
@@ -318,6 +340,10 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
GFP_KERNEL);
bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
bio->bi_ioprio = dio->iocb->ki_ioprio;
+ if (atomic_write) {
+ bio->bi_opf |= REQ_ATOMIC;
+ bio->atomic_write_unit = dio->atomic_write_unit;
+ }
bio->bi_private = dio;
bio->bi_end_io = iomap_dio_bio_end_io;
@@ -492,6 +518,8 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
is_sync_kiocb(iocb) || (dio_flags & IOMAP_DIO_FORCE_WAIT);
struct blk_plug plug;
struct iomap_dio *dio;
+ bool is_read = iov_iter_rw(iter) == READ;
+ bool atomic_write = (iocb->ki_flags & IOCB_ATOMIC) && !is_read;
if (!iomi.len)
return NULL;
@@ -500,6 +528,20 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
if (!dio)
return ERR_PTR(-ENOMEM);
+ if (atomic_write) {
+ /*
+ * Note: This lookup is not proper for a multi-device scenario,
+ * however for current iomap users, the bdev per iter
+ * will be fixed, so "works" for now.
+ */
+ struct super_block *i_sb = inode->i_sb;
+ struct block_device *bdev = i_sb->s_bdev;
+
+ dio->atomic_write_unit =
+ bdev_find_max_atomic_write_alignment(bdev,
+ iomi.pos, iomi.len);
+ }
+
dio->iocb = iocb;
atomic_set(&dio->ref, 1);
dio->size = 0;
@@ -513,7 +555,7 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
dio->submit.waiter = current;
dio->submit.poll_bio = NULL;
- if (iov_iter_rw(iter) == READ) {
+ if (is_read) {
if (iomi.pos >= dio->i_size)
goto out_free_dio;
@@ -567,7 +609,7 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
if (ret)
goto out_free_dio;
- if (iov_iter_rw(iter) == WRITE) {
+ if (!is_read) {
/*
* Try to invalidate cache pages for the range we are writing.
* If this invalidation fails, let the caller fall back to
@@ -592,6 +634,32 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
blk_start_plug(&plug);
while ((ret = iomap_iter(&iomi, ops)) > 0) {
+ if (atomic_write) {
+ const struct iomap *_iomap = &iomi.iomap;
+ loff_t iomi_length = iomap_length(&iomi);
+
+ /*
+ * Ensure length and start address is a multiple of
+ * atomic_write_unit - this is critical. If the length
+ * is not a multiple of atomic_write_unit, then we
+ * cannot create a set of bio's in iomap_dio_bio_iter()
+ * who are each a length which is a multiple of
+ * atomic_write_unit.
+ *
+ * Note: It may be more appropiate to have this check
+ * in iomap_dio_bio_iter()
+ */
+ if ((iomap_sector(_iomap, iomi.pos) << SECTOR_SHIFT) %
+ dio->atomic_write_unit) {
+ ret = -EIO;
+ break;
+ }
+
+ if (iomi_length % dio->atomic_write_unit) {
+ ret = -EIO;
+ break;
+ }
+ }
iomi.processed = iomap_dio_iter(&iomi, dio);
/*
--
2.31.1
next prev parent reply other threads:[~2023-05-03 18:40 UTC|newest]
Thread overview: 50+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-05-03 18:38 [PATCH RFC 00/16] block atomic writes John Garry
2023-05-03 18:38 ` [PATCH RFC 01/16] block: Add atomic write operations to request_queue limits John Garry
2023-05-03 21:39 ` Dave Chinner
2023-05-04 18:14 ` John Garry
2023-05-04 22:26 ` Dave Chinner
2023-05-05 7:54 ` John Garry
2023-05-05 22:00 ` Darrick J. Wong
2023-05-07 1:59 ` Martin K. Petersen
2023-05-05 23:18 ` Dave Chinner
2023-05-06 9:38 ` John Garry
2023-05-07 2:35 ` Martin K. Petersen
2023-05-05 22:47 ` Eric Biggers
2023-05-05 23:31 ` Dave Chinner
2023-05-06 0:08 ` Eric Biggers
2023-05-09 0:19 ` Mike Snitzer
2023-05-17 17:02 ` John Garry
2023-05-03 18:38 ` [PATCH RFC 02/16] fs/bdev: Add atomic write support info to statx John Garry
2023-05-03 21:58 ` Dave Chinner
2023-05-04 8:45 ` John Garry
2023-05-04 22:40 ` Dave Chinner
2023-05-05 8:01 ` John Garry
2023-05-05 22:04 ` Darrick J. Wong
2023-05-03 18:38 ` [PATCH RFC 03/16] xfs: Support atomic write for statx John Garry
2023-05-03 22:17 ` Dave Chinner
2023-05-05 22:10 ` Darrick J. Wong
2023-05-03 18:38 ` [PATCH RFC 04/16] fs: Add RWF_ATOMIC and IOCB_ATOMIC flags for atomic write support John Garry
2023-05-03 18:38 ` [PATCH RFC 05/16] block: Add REQ_ATOMIC flag John Garry
2023-05-03 18:38 ` [PATCH RFC 06/16] block: Limit atomic writes according to bio and queue limits John Garry
2023-05-03 18:53 ` Keith Busch
2023-05-04 8:24 ` John Garry
2023-05-03 18:38 ` [PATCH RFC 07/16] block: Add bdev_find_max_atomic_write_alignment() John Garry
2023-05-03 18:38 ` [PATCH RFC 08/16] block: Add support for atomic_write_unit John Garry
2023-05-03 18:38 ` [PATCH RFC 09/16] block: Add blk_validate_atomic_write_op() John Garry
2023-05-03 18:38 ` [PATCH RFC 10/16] block: Add fops atomic write support John Garry
2023-05-03 18:38 ` John Garry [this message]
2023-05-04 5:00 ` [PATCH RFC 11/16] fs: iomap: Atomic " Dave Chinner
2023-05-05 21:19 ` Darrick J. Wong
2023-05-05 23:56 ` Dave Chinner
2023-05-03 18:38 ` [PATCH RFC 12/16] xfs: Add support for fallocate2 John Garry
2023-05-03 23:26 ` Dave Chinner
2023-05-05 22:23 ` Darrick J. Wong
2023-05-05 23:42 ` Dave Chinner
2023-05-03 18:38 ` [PATCH RFC 13/16] scsi: sd: Support reading atomic properties from block limits VPD John Garry
2023-05-03 18:38 ` [PATCH RFC 14/16] scsi: sd: Add WRITE_ATOMIC_16 support John Garry
2023-05-03 18:48 ` Bart Van Assche
2023-05-04 8:17 ` John Garry
2023-05-03 18:38 ` [PATCH RFC 15/16] scsi: scsi_debug: Atomic write support John Garry
2023-05-03 18:38 ` [PATCH RFC 16/16] nvme: Support atomic writes John Garry
2023-05-03 18:49 ` Bart Van Assche
2023-05-04 8:19 ` John Garry
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230503183821.1473305-12-john.g.garry@oracle.com \
--to=john.g.garry@oracle.com \
--cc=axboe@kernel.dk \
--cc=brauner@kernel.org \
--cc=dchinner@redhat.com \
--cc=djwong@kernel.org \
--cc=hch@lst.de \
--cc=jejb@linux.ibm.com \
--cc=jmorris@namei.org \
--cc=kbusch@kernel.org \
--cc=linux-block@vger.kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-nvme@lists.infradead.org \
--cc=linux-scsi@vger.kernel.org \
--cc=linux-security-module@vger.kernel.org \
--cc=linux-xfs@vger.kernel.org \
--cc=martin.petersen@oracle.com \
--cc=paul@paul-moore.com \
--cc=sagi@grimberg.me \
--cc=serge@hallyn.com \
--cc=viro@zeniv.linux.org.uk \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).