mirror of
https://github.com/torvalds/linux.git
synced 2025-08-15 14:11:42 +02:00
vfs-6.17-rc1.iomap
-----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQRAhzRXHqcMeLMyaSiRxhvAZXjcogUCaINCtwAKCRCRxhvAZXjc ogPuAQChc4tCjlNp+yAwbSmuzWooKTN8PHI6v+3ftjdaKSy9AgD/Yya1i8aBYBA8 9HBtIKGAqvcgNB3por7yN+GJ8fxb/Ag= =YmLL -----END PGP SIGNATURE----- Merge tag 'vfs-6.17-rc1.iomap' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs Pull vfs iomap updates from Christian Brauner: - Refactor the iomap writeback code and split the generic and ioend/bio based writeback code. There are two methods that define the split between the generic writeback code, and the implemementation of it, and all knowledge of ioends and bios now sits below that layer. - Add fuse iomap support for buffered writes and dirty folio writeback. This is needed so that granular uptodate and dirty tracking can be used in fuse when large folios are enabled. This has two big advantages. For writes, instead of the entire folio needing to be read into the page cache, only the relevant portions need to be. For writeback, only the dirty portions need to be written back instead of the entire folio. * tag 'vfs-6.17-rc1.iomap' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs: fuse: refactor writeback to use iomap_writepage_ctx inode fuse: hook into iomap for invalidating and checking partial uptodateness fuse: use iomap for folio laundering fuse: use iomap for writeback fuse: use iomap for buffered writes iomap: build the writeback code without CONFIG_BLOCK iomap: add read_folio_range() handler for buffered writes iomap: improve argument passing to iomap_read_folio_sync iomap: replace iomap_folio_ops with iomap_write_ops iomap: export iomap_writeback_folio iomap: move folio_unlock out of iomap_writeback_folio iomap: rename iomap_writepage_map to iomap_writeback_folio iomap: move all ioend handling to ioend.c iomap: add public helpers for uptodate state manipulation iomap: hide ioends from the generic writeback code iomap: refactor the writeback interface iomap: cleanup the pending writeback tracking in iomap_writepage_map_blocks iomap: pass more arguments using the iomap writeback context iomap: header diet
This commit is contained in:
commit
b5d760d53a
27 changed files with 860 additions and 806 deletions
|
@ -167,7 +167,6 @@ structure below:
|
|||
struct dax_device *dax_dev;
|
||||
void *inline_data;
|
||||
void *private;
|
||||
const struct iomap_folio_ops *folio_ops;
|
||||
u64 validity_cookie;
|
||||
};
|
||||
|
||||
|
@ -292,8 +291,6 @@ The fields are as follows:
|
|||
<https://lore.kernel.org/all/20180619164137.13720-7-hch@lst.de/>`_.
|
||||
This value will be passed unchanged to ``->iomap_end``.
|
||||
|
||||
* ``folio_ops`` will be covered in the section on pagecache operations.
|
||||
|
||||
* ``validity_cookie`` is a magic freshness value set by the filesystem
|
||||
that should be used to detect stale mappings.
|
||||
For pagecache operations this is critical for correct operation
|
||||
|
|
|
@ -57,21 +57,19 @@ The following address space operations can be wrapped easily:
|
|||
* ``bmap``
|
||||
* ``swap_activate``
|
||||
|
||||
``struct iomap_folio_ops``
|
||||
``struct iomap_write_ops``
|
||||
--------------------------
|
||||
|
||||
The ``->iomap_begin`` function for pagecache operations may set the
|
||||
``struct iomap::folio_ops`` field to an ops structure to override
|
||||
default behaviors of iomap:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
struct iomap_folio_ops {
|
||||
struct iomap_write_ops {
|
||||
struct folio *(*get_folio)(struct iomap_iter *iter, loff_t pos,
|
||||
unsigned len);
|
||||
void (*put_folio)(struct inode *inode, loff_t pos, unsigned copied,
|
||||
struct folio *folio);
|
||||
bool (*iomap_valid)(struct inode *inode, const struct iomap *iomap);
|
||||
int (*read_folio_range)(const struct iomap_iter *iter,
|
||||
struct folio *folio, loff_t pos, size_t len);
|
||||
};
|
||||
|
||||
iomap calls these functions:
|
||||
|
@ -127,6 +125,10 @@ iomap calls these functions:
|
|||
``->iomap_valid``, then the iomap should considered stale and the
|
||||
validation failed.
|
||||
|
||||
- ``read_folio_range``: Called to synchronously read in the range that will
|
||||
be written to. If this function is not provided, iomap will default to
|
||||
submitting a bio read request.
|
||||
|
||||
These ``struct kiocb`` flags are significant for buffered I/O with iomap:
|
||||
|
||||
* ``IOCB_NOWAIT``: Turns on ``IOMAP_NOWAIT``.
|
||||
|
@ -271,7 +273,7 @@ writeback.
|
|||
It does not lock ``i_rwsem`` or ``invalidate_lock``.
|
||||
|
||||
The dirty bit will be cleared for all folios run through the
|
||||
``->map_blocks`` machinery described below even if the writeback fails.
|
||||
``->writeback_range`` machinery described below even if the writeback fails.
|
||||
This is to prevent dirty folio clots when storage devices fail; an
|
||||
``-EIO`` is recorded for userspace to collect via ``fsync``.
|
||||
|
||||
|
@ -283,15 +285,14 @@ The ``ops`` structure must be specified and is as follows:
|
|||
.. code-block:: c
|
||||
|
||||
struct iomap_writeback_ops {
|
||||
int (*map_blocks)(struct iomap_writepage_ctx *wpc, struct inode *inode,
|
||||
loff_t offset, unsigned len);
|
||||
int (*submit_ioend)(struct iomap_writepage_ctx *wpc, int status);
|
||||
void (*discard_folio)(struct folio *folio, loff_t pos);
|
||||
int (*writeback_range)(struct iomap_writepage_ctx *wpc,
|
||||
struct folio *folio, u64 pos, unsigned int len, u64 end_pos);
|
||||
int (*writeback_submit)(struct iomap_writepage_ctx *wpc, int error);
|
||||
};
|
||||
|
||||
The fields are as follows:
|
||||
|
||||
- ``map_blocks``: Sets ``wpc->iomap`` to the space mapping of the file
|
||||
- ``writeback_range``: Sets ``wpc->iomap`` to the space mapping of the file
|
||||
range (in bytes) given by ``offset`` and ``len``.
|
||||
iomap calls this function for each dirty fs block in each dirty folio,
|
||||
though it will `reuse mappings
|
||||
|
@ -306,27 +307,26 @@ The fields are as follows:
|
|||
This revalidation must be open-coded by the filesystem; it is
|
||||
unclear if ``iomap::validity_cookie`` can be reused for this
|
||||
purpose.
|
||||
This function must be supplied by the filesystem.
|
||||
|
||||
- ``submit_ioend``: Allows the file systems to hook into writeback bio
|
||||
submission.
|
||||
This might include pre-write space accounting updates, or installing
|
||||
a custom ``->bi_end_io`` function for internal purposes, such as
|
||||
deferring the ioend completion to a workqueue to run metadata update
|
||||
transactions from process context before submitting the bio.
|
||||
This function is optional.
|
||||
|
||||
- ``discard_folio``: iomap calls this function after ``->map_blocks``
|
||||
fails to schedule I/O for any part of a dirty folio.
|
||||
The function should throw away any reservations that may have been
|
||||
made for the write.
|
||||
If this methods fails to schedule I/O for any part of a dirty folio, it
|
||||
should throw away any reservations that may have been made for the write.
|
||||
The folio will be marked clean and an ``-EIO`` recorded in the
|
||||
pagecache.
|
||||
Filesystems can use this callback to `remove
|
||||
<https://lore.kernel.org/all/20201029163313.1766967-1-bfoster@redhat.com/>`_
|
||||
delalloc reservations to avoid having delalloc reservations for
|
||||
clean pagecache.
|
||||
This function is optional.
|
||||
This function must be supplied by the filesystem.
|
||||
|
||||
- ``writeback_submit``: Submit the previous built writeback context.
|
||||
Block based file systems should use the iomap_ioend_writeback_submit
|
||||
helper, other file system can implement their own.
|
||||
File systems can optionall to hook into writeback bio submission.
|
||||
This might include pre-write space accounting updates, or installing
|
||||
a custom ``->bi_end_io`` function for internal purposes, such as
|
||||
deferring the ioend completion to a workqueue to run metadata update
|
||||
transactions from process context before submitting the bio.
|
||||
This function must be supplied by the filesystem.
|
||||
|
||||
Pagecache Writeback Completion
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
@ -340,10 +340,9 @@ If the write failed, it will also set the error bits on the folios and
|
|||
the address space.
|
||||
This can happen in interrupt or process context, depending on the
|
||||
storage device.
|
||||
|
||||
Filesystems that need to update internal bookkeeping (e.g. unwritten
|
||||
extent conversions) should provide a ``->submit_ioend`` function to
|
||||
set ``struct iomap_end::bio::bi_end_io`` to its own function.
|
||||
extent conversions) should set their own bi_end_io on the bios
|
||||
submitted by ``->submit_writeback``
|
||||
This function should call ``iomap_finish_ioends`` after finishing its
|
||||
own work (e.g. unwritten extent conversion).
|
||||
|
||||
|
|
37
block/fops.c
37
block/fops.c
|
@ -540,30 +540,42 @@ static void blkdev_readahead(struct readahead_control *rac)
|
|||
iomap_readahead(rac, &blkdev_iomap_ops);
|
||||
}
|
||||
|
||||
static int blkdev_map_blocks(struct iomap_writepage_ctx *wpc,
|
||||
struct inode *inode, loff_t offset, unsigned int len)
|
||||
static ssize_t blkdev_writeback_range(struct iomap_writepage_ctx *wpc,
|
||||
struct folio *folio, u64 offset, unsigned int len, u64 end_pos)
|
||||
{
|
||||
loff_t isize = i_size_read(inode);
|
||||
loff_t isize = i_size_read(wpc->inode);
|
||||
|
||||
if (WARN_ON_ONCE(offset >= isize))
|
||||
return -EIO;
|
||||
if (offset >= wpc->iomap.offset &&
|
||||
offset < wpc->iomap.offset + wpc->iomap.length)
|
||||
return 0;
|
||||
return blkdev_iomap_begin(inode, offset, isize - offset,
|
||||
IOMAP_WRITE, &wpc->iomap, NULL);
|
||||
|
||||
if (offset < wpc->iomap.offset ||
|
||||
offset >= wpc->iomap.offset + wpc->iomap.length) {
|
||||
int error;
|
||||
|
||||
error = blkdev_iomap_begin(wpc->inode, offset, isize - offset,
|
||||
IOMAP_WRITE, &wpc->iomap, NULL);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
return iomap_add_to_ioend(wpc, folio, offset, end_pos, len);
|
||||
}
|
||||
|
||||
static const struct iomap_writeback_ops blkdev_writeback_ops = {
|
||||
.map_blocks = blkdev_map_blocks,
|
||||
.writeback_range = blkdev_writeback_range,
|
||||
.writeback_submit = iomap_ioend_writeback_submit,
|
||||
};
|
||||
|
||||
static int blkdev_writepages(struct address_space *mapping,
|
||||
struct writeback_control *wbc)
|
||||
{
|
||||
struct iomap_writepage_ctx wpc = { };
|
||||
struct iomap_writepage_ctx wpc = {
|
||||
.inode = mapping->host,
|
||||
.wbc = wbc,
|
||||
.ops = &blkdev_writeback_ops
|
||||
};
|
||||
|
||||
return iomap_writepages(mapping, wbc, &wpc, &blkdev_writeback_ops);
|
||||
return iomap_writepages(&wpc);
|
||||
}
|
||||
|
||||
const struct address_space_operations def_blk_aops = {
|
||||
|
@ -714,7 +726,8 @@ blkdev_direct_write(struct kiocb *iocb, struct iov_iter *from)
|
|||
|
||||
static ssize_t blkdev_buffered_write(struct kiocb *iocb, struct iov_iter *from)
|
||||
{
|
||||
return iomap_file_buffered_write(iocb, from, &blkdev_iomap_ops, NULL);
|
||||
return iomap_file_buffered_write(iocb, from, &blkdev_iomap_ops, NULL,
|
||||
NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
config FUSE_FS
|
||||
tristate "FUSE (Filesystem in Userspace) support"
|
||||
select FS_POSIX_ACL
|
||||
select FS_IOMAP
|
||||
help
|
||||
With FUSE it is possible to implement a fully functional filesystem
|
||||
in a userspace program.
|
||||
|
|
349
fs/fuse/file.c
349
fs/fuse/file.c
|
@ -21,6 +21,7 @@
|
|||
#include <linux/filelock.h>
|
||||
#include <linux/splice.h>
|
||||
#include <linux/task_io_accounting_ops.h>
|
||||
#include <linux/iomap.h>
|
||||
|
||||
static int fuse_send_open(struct fuse_mount *fm, u64 nodeid,
|
||||
unsigned int open_flags, int opcode,
|
||||
|
@ -788,12 +789,16 @@ static void fuse_short_read(struct inode *inode, u64 attr_ver, size_t num_read,
|
|||
}
|
||||
}
|
||||
|
||||
static int fuse_do_readfolio(struct file *file, struct folio *folio)
|
||||
static int fuse_do_readfolio(struct file *file, struct folio *folio,
|
||||
size_t off, size_t len)
|
||||
{
|
||||
struct inode *inode = folio->mapping->host;
|
||||
struct fuse_mount *fm = get_fuse_mount(inode);
|
||||
loff_t pos = folio_pos(folio);
|
||||
struct fuse_folio_desc desc = { .length = folio_size(folio) };
|
||||
loff_t pos = folio_pos(folio) + off;
|
||||
struct fuse_folio_desc desc = {
|
||||
.offset = off,
|
||||
.length = len,
|
||||
};
|
||||
struct fuse_io_args ia = {
|
||||
.ap.args.page_zeroing = true,
|
||||
.ap.args.out_pages = true,
|
||||
|
@ -820,8 +825,6 @@ static int fuse_do_readfolio(struct file *file, struct folio *folio)
|
|||
if (res < desc.length)
|
||||
fuse_short_read(inode, attr_ver, res, &ia.ap);
|
||||
|
||||
folio_mark_uptodate(folio);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -834,13 +837,26 @@ static int fuse_read_folio(struct file *file, struct folio *folio)
|
|||
if (fuse_is_bad(inode))
|
||||
goto out;
|
||||
|
||||
err = fuse_do_readfolio(file, folio);
|
||||
err = fuse_do_readfolio(file, folio, 0, folio_size(folio));
|
||||
if (!err)
|
||||
folio_mark_uptodate(folio);
|
||||
|
||||
fuse_invalidate_atime(inode);
|
||||
out:
|
||||
folio_unlock(folio);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int fuse_iomap_read_folio_range(const struct iomap_iter *iter,
|
||||
struct folio *folio, loff_t pos,
|
||||
size_t len)
|
||||
{
|
||||
struct file *file = iter->private;
|
||||
size_t off = offset_in_folio(folio, pos);
|
||||
|
||||
return fuse_do_readfolio(file, folio, off, len);
|
||||
}
|
||||
|
||||
static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args,
|
||||
int err)
|
||||
{
|
||||
|
@ -1374,6 +1390,24 @@ static void fuse_dio_unlock(struct kiocb *iocb, bool exclusive)
|
|||
}
|
||||
}
|
||||
|
||||
static const struct iomap_write_ops fuse_iomap_write_ops = {
|
||||
.read_folio_range = fuse_iomap_read_folio_range,
|
||||
};
|
||||
|
||||
static int fuse_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
|
||||
unsigned int flags, struct iomap *iomap,
|
||||
struct iomap *srcmap)
|
||||
{
|
||||
iomap->type = IOMAP_MAPPED;
|
||||
iomap->length = length;
|
||||
iomap->offset = offset;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct iomap_ops fuse_iomap_ops = {
|
||||
.iomap_begin = fuse_iomap_begin,
|
||||
};
|
||||
|
||||
static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
|
@ -1383,6 +1417,7 @@ static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
|||
struct inode *inode = mapping->host;
|
||||
ssize_t err, count;
|
||||
struct fuse_conn *fc = get_fuse_conn(inode);
|
||||
bool writeback = false;
|
||||
|
||||
if (fc->writeback_cache) {
|
||||
/* Update size (EOF optimization) and mode (SUID clearing) */
|
||||
|
@ -1391,16 +1426,11 @@ static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
if (fc->handle_killpriv_v2 &&
|
||||
setattr_should_drop_suidgid(idmap,
|
||||
file_inode(file))) {
|
||||
goto writethrough;
|
||||
}
|
||||
|
||||
return generic_file_write_iter(iocb, from);
|
||||
if (!fc->handle_killpriv_v2 ||
|
||||
!setattr_should_drop_suidgid(idmap, file_inode(file)))
|
||||
writeback = true;
|
||||
}
|
||||
|
||||
writethrough:
|
||||
inode_lock(inode);
|
||||
|
||||
err = count = generic_write_checks(iocb, from);
|
||||
|
@ -1419,6 +1449,15 @@ writethrough:
|
|||
goto out;
|
||||
written = direct_write_fallback(iocb, from, written,
|
||||
fuse_perform_write(iocb, from));
|
||||
} else if (writeback) {
|
||||
/*
|
||||
* Use iomap so that we can do granular uptodate reads
|
||||
* and granular dirty tracking for large folios.
|
||||
*/
|
||||
written = iomap_file_buffered_write(iocb, from,
|
||||
&fuse_iomap_ops,
|
||||
&fuse_iomap_write_ops,
|
||||
file);
|
||||
} else {
|
||||
written = fuse_perform_write(iocb, from);
|
||||
}
|
||||
|
@ -1793,7 +1832,7 @@ static void fuse_writepage_finish(struct fuse_writepage_args *wpa)
|
|||
* scope of the fi->lock alleviates xarray lock
|
||||
* contention and noticeably improves performance.
|
||||
*/
|
||||
folio_end_writeback(ap->folios[i]);
|
||||
iomap_finish_folio_write(inode, ap->folios[i], 1);
|
||||
dec_wb_stat(&bdi->wb, WB_WRITEBACK);
|
||||
wb_writeout_inc(&bdi->wb);
|
||||
}
|
||||
|
@ -1980,19 +2019,20 @@ static void fuse_writepage_add_to_bucket(struct fuse_conn *fc,
|
|||
}
|
||||
|
||||
static void fuse_writepage_args_page_fill(struct fuse_writepage_args *wpa, struct folio *folio,
|
||||
uint32_t folio_index)
|
||||
uint32_t folio_index, loff_t offset, unsigned len)
|
||||
{
|
||||
struct inode *inode = folio->mapping->host;
|
||||
struct fuse_args_pages *ap = &wpa->ia.ap;
|
||||
|
||||
ap->folios[folio_index] = folio;
|
||||
ap->descs[folio_index].offset = 0;
|
||||
ap->descs[folio_index].length = folio_size(folio);
|
||||
ap->descs[folio_index].offset = offset;
|
||||
ap->descs[folio_index].length = len;
|
||||
|
||||
inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
|
||||
}
|
||||
|
||||
static struct fuse_writepage_args *fuse_writepage_args_setup(struct folio *folio,
|
||||
size_t offset,
|
||||
struct fuse_file *ff)
|
||||
{
|
||||
struct inode *inode = folio->mapping->host;
|
||||
|
@ -2005,7 +2045,7 @@ static struct fuse_writepage_args *fuse_writepage_args_setup(struct folio *folio
|
|||
return NULL;
|
||||
|
||||
fuse_writepage_add_to_bucket(fc, wpa);
|
||||
fuse_write_args_fill(&wpa->ia, ff, folio_pos(folio), 0);
|
||||
fuse_write_args_fill(&wpa->ia, ff, folio_pos(folio) + offset, 0);
|
||||
wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE;
|
||||
wpa->inode = inode;
|
||||
wpa->ia.ff = ff;
|
||||
|
@ -2017,63 +2057,28 @@ static struct fuse_writepage_args *fuse_writepage_args_setup(struct folio *folio
|
|||
return wpa;
|
||||
}
|
||||
|
||||
static int fuse_writepage_locked(struct folio *folio)
|
||||
{
|
||||
struct address_space *mapping = folio->mapping;
|
||||
struct inode *inode = mapping->host;
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
struct fuse_writepage_args *wpa;
|
||||
struct fuse_args_pages *ap;
|
||||
struct fuse_file *ff;
|
||||
int error = -EIO;
|
||||
|
||||
ff = fuse_write_file_get(fi);
|
||||
if (!ff)
|
||||
goto err;
|
||||
|
||||
wpa = fuse_writepage_args_setup(folio, ff);
|
||||
error = -ENOMEM;
|
||||
if (!wpa)
|
||||
goto err_writepage_args;
|
||||
|
||||
ap = &wpa->ia.ap;
|
||||
ap->num_folios = 1;
|
||||
|
||||
folio_start_writeback(folio);
|
||||
fuse_writepage_args_page_fill(wpa, folio, 0);
|
||||
|
||||
spin_lock(&fi->lock);
|
||||
list_add_tail(&wpa->queue_entry, &fi->queued_writes);
|
||||
fuse_flush_writepages(inode);
|
||||
spin_unlock(&fi->lock);
|
||||
|
||||
return 0;
|
||||
|
||||
err_writepage_args:
|
||||
fuse_file_put(ff, false);
|
||||
err:
|
||||
mapping_set_error(folio->mapping, error);
|
||||
return error;
|
||||
}
|
||||
|
||||
struct fuse_fill_wb_data {
|
||||
struct fuse_writepage_args *wpa;
|
||||
struct fuse_file *ff;
|
||||
struct inode *inode;
|
||||
unsigned int max_folios;
|
||||
unsigned int nr_pages;
|
||||
/*
|
||||
* nr_bytes won't overflow since fuse_writepage_need_send() caps
|
||||
* wb requests to never exceed fc->max_pages (which has an upper bound
|
||||
* of U16_MAX).
|
||||
*/
|
||||
unsigned int nr_bytes;
|
||||
};
|
||||
|
||||
static bool fuse_pages_realloc(struct fuse_fill_wb_data *data)
|
||||
static bool fuse_pages_realloc(struct fuse_fill_wb_data *data,
|
||||
unsigned int max_pages)
|
||||
{
|
||||
struct fuse_args_pages *ap = &data->wpa->ia.ap;
|
||||
struct fuse_conn *fc = get_fuse_conn(data->inode);
|
||||
struct folio **folios;
|
||||
struct fuse_folio_desc *descs;
|
||||
unsigned int nfolios = min_t(unsigned int,
|
||||
max_t(unsigned int, data->max_folios * 2,
|
||||
FUSE_DEFAULT_MAX_PAGES_PER_REQ),
|
||||
fc->max_pages);
|
||||
max_pages);
|
||||
WARN_ON(nfolios <= data->max_folios);
|
||||
|
||||
folios = fuse_folios_alloc(nfolios, GFP_NOFS, &descs);
|
||||
|
@ -2090,10 +2095,10 @@ static bool fuse_pages_realloc(struct fuse_fill_wb_data *data)
|
|||
return true;
|
||||
}
|
||||
|
||||
static void fuse_writepages_send(struct fuse_fill_wb_data *data)
|
||||
static void fuse_writepages_send(struct inode *inode,
|
||||
struct fuse_fill_wb_data *data)
|
||||
{
|
||||
struct fuse_writepage_args *wpa = data->wpa;
|
||||
struct inode *inode = data->inode;
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
|
||||
spin_lock(&fi->lock);
|
||||
|
@ -2102,199 +2107,150 @@ static void fuse_writepages_send(struct fuse_fill_wb_data *data)
|
|||
spin_unlock(&fi->lock);
|
||||
}
|
||||
|
||||
static bool fuse_writepage_need_send(struct fuse_conn *fc, struct folio *folio,
|
||||
struct fuse_args_pages *ap,
|
||||
static bool fuse_writepage_need_send(struct fuse_conn *fc, loff_t pos,
|
||||
unsigned len, struct fuse_args_pages *ap,
|
||||
struct fuse_fill_wb_data *data)
|
||||
{
|
||||
struct folio *prev_folio;
|
||||
struct fuse_folio_desc prev_desc;
|
||||
unsigned bytes = data->nr_bytes + len;
|
||||
loff_t prev_pos;
|
||||
|
||||
WARN_ON(!ap->num_folios);
|
||||
|
||||
/* Reached max pages */
|
||||
if (data->nr_pages + folio_nr_pages(folio) > fc->max_pages)
|
||||
if ((bytes + PAGE_SIZE - 1) >> PAGE_SHIFT > fc->max_pages)
|
||||
return true;
|
||||
|
||||
/* Reached max write bytes */
|
||||
if ((data->nr_pages * PAGE_SIZE) + folio_size(folio) > fc->max_write)
|
||||
if (bytes > fc->max_write)
|
||||
return true;
|
||||
|
||||
/* Discontinuity */
|
||||
if (folio_next_index(ap->folios[ap->num_folios - 1]) != folio->index)
|
||||
prev_folio = ap->folios[ap->num_folios - 1];
|
||||
prev_desc = ap->descs[ap->num_folios - 1];
|
||||
prev_pos = folio_pos(prev_folio) + prev_desc.offset + prev_desc.length;
|
||||
if (prev_pos != pos)
|
||||
return true;
|
||||
|
||||
/* Need to grow the pages array? If so, did the expansion fail? */
|
||||
if (ap->num_folios == data->max_folios && !fuse_pages_realloc(data))
|
||||
if (ap->num_folios == data->max_folios &&
|
||||
!fuse_pages_realloc(data, fc->max_pages))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int fuse_writepages_fill(struct folio *folio,
|
||||
struct writeback_control *wbc, void *_data)
|
||||
static ssize_t fuse_iomap_writeback_range(struct iomap_writepage_ctx *wpc,
|
||||
struct folio *folio, u64 pos,
|
||||
unsigned len, u64 end_pos)
|
||||
{
|
||||
struct fuse_fill_wb_data *data = _data;
|
||||
struct fuse_fill_wb_data *data = wpc->wb_ctx;
|
||||
struct fuse_writepage_args *wpa = data->wpa;
|
||||
struct fuse_args_pages *ap = &wpa->ia.ap;
|
||||
struct inode *inode = data->inode;
|
||||
struct inode *inode = wpc->inode;
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
struct fuse_conn *fc = get_fuse_conn(inode);
|
||||
int err;
|
||||
loff_t offset = offset_in_folio(folio, pos);
|
||||
|
||||
WARN_ON_ONCE(!data);
|
||||
/* len will always be page aligned */
|
||||
WARN_ON_ONCE(len & (PAGE_SIZE - 1));
|
||||
|
||||
if (!data->ff) {
|
||||
err = -EIO;
|
||||
data->ff = fuse_write_file_get(fi);
|
||||
if (!data->ff)
|
||||
goto out_unlock;
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (wpa && fuse_writepage_need_send(fc, folio, ap, data)) {
|
||||
fuse_writepages_send(data);
|
||||
if (wpa && fuse_writepage_need_send(fc, pos, len, ap, data)) {
|
||||
fuse_writepages_send(inode, data);
|
||||
data->wpa = NULL;
|
||||
data->nr_pages = 0;
|
||||
data->nr_bytes = 0;
|
||||
}
|
||||
|
||||
if (data->wpa == NULL) {
|
||||
err = -ENOMEM;
|
||||
wpa = fuse_writepage_args_setup(folio, data->ff);
|
||||
wpa = fuse_writepage_args_setup(folio, offset, data->ff);
|
||||
if (!wpa)
|
||||
goto out_unlock;
|
||||
return -ENOMEM;
|
||||
fuse_file_get(wpa->ia.ff);
|
||||
data->max_folios = 1;
|
||||
ap = &wpa->ia.ap;
|
||||
}
|
||||
folio_start_writeback(folio);
|
||||
|
||||
fuse_writepage_args_page_fill(wpa, folio, ap->num_folios);
|
||||
data->nr_pages += folio_nr_pages(folio);
|
||||
iomap_start_folio_write(inode, folio, 1);
|
||||
fuse_writepage_args_page_fill(wpa, folio, ap->num_folios,
|
||||
offset, len);
|
||||
data->nr_bytes += len;
|
||||
|
||||
err = 0;
|
||||
ap->num_folios++;
|
||||
if (!data->wpa)
|
||||
data->wpa = wpa;
|
||||
out_unlock:
|
||||
folio_unlock(folio);
|
||||
|
||||
return err;
|
||||
return len;
|
||||
}
|
||||
|
||||
static int fuse_iomap_writeback_submit(struct iomap_writepage_ctx *wpc,
|
||||
int error)
|
||||
{
|
||||
struct fuse_fill_wb_data *data = wpc->wb_ctx;
|
||||
|
||||
WARN_ON_ONCE(!data);
|
||||
|
||||
if (data->wpa) {
|
||||
WARN_ON(!data->wpa->ia.ap.num_folios);
|
||||
fuse_writepages_send(wpc->inode, data);
|
||||
}
|
||||
|
||||
if (data->ff)
|
||||
fuse_file_put(data->ff, false);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
static const struct iomap_writeback_ops fuse_writeback_ops = {
|
||||
.writeback_range = fuse_iomap_writeback_range,
|
||||
.writeback_submit = fuse_iomap_writeback_submit,
|
||||
};
|
||||
|
||||
static int fuse_writepages(struct address_space *mapping,
|
||||
struct writeback_control *wbc)
|
||||
{
|
||||
struct inode *inode = mapping->host;
|
||||
struct fuse_conn *fc = get_fuse_conn(inode);
|
||||
struct fuse_fill_wb_data data;
|
||||
int err;
|
||||
struct fuse_fill_wb_data data = {};
|
||||
struct iomap_writepage_ctx wpc = {
|
||||
.inode = inode,
|
||||
.iomap.type = IOMAP_MAPPED,
|
||||
.wbc = wbc,
|
||||
.ops = &fuse_writeback_ops,
|
||||
.wb_ctx = &data,
|
||||
};
|
||||
|
||||
err = -EIO;
|
||||
if (fuse_is_bad(inode))
|
||||
goto out;
|
||||
return -EIO;
|
||||
|
||||
if (wbc->sync_mode == WB_SYNC_NONE &&
|
||||
fc->num_background >= fc->congestion_threshold)
|
||||
return 0;
|
||||
|
||||
data.inode = inode;
|
||||
data.wpa = NULL;
|
||||
data.ff = NULL;
|
||||
data.nr_pages = 0;
|
||||
|
||||
err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data);
|
||||
if (data.wpa) {
|
||||
WARN_ON(!data.wpa->ia.ap.num_folios);
|
||||
fuse_writepages_send(&data);
|
||||
}
|
||||
if (data.ff)
|
||||
fuse_file_put(data.ff, false);
|
||||
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* It's worthy to make sure that space is reserved on disk for the write,
|
||||
* but how to implement it without killing performance need more thinking.
|
||||
*/
|
||||
static int fuse_write_begin(const struct kiocb *iocb,
|
||||
struct address_space *mapping,
|
||||
loff_t pos, unsigned len, struct folio **foliop,
|
||||
void **fsdata)
|
||||
{
|
||||
pgoff_t index = pos >> PAGE_SHIFT;
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct fuse_conn *fc = get_fuse_conn(file_inode(file));
|
||||
struct folio *folio;
|
||||
loff_t fsize;
|
||||
int err = -ENOMEM;
|
||||
|
||||
WARN_ON(!fc->writeback_cache);
|
||||
|
||||
folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
|
||||
mapping_gfp_mask(mapping));
|
||||
if (IS_ERR(folio))
|
||||
goto error;
|
||||
|
||||
if (folio_test_uptodate(folio) || len >= folio_size(folio))
|
||||
goto success;
|
||||
/*
|
||||
* Check if the start of this folio comes after the end of file,
|
||||
* in which case the readpage can be optimized away.
|
||||
*/
|
||||
fsize = i_size_read(mapping->host);
|
||||
if (fsize <= folio_pos(folio)) {
|
||||
size_t off = offset_in_folio(folio, pos);
|
||||
if (off)
|
||||
folio_zero_segment(folio, 0, off);
|
||||
goto success;
|
||||
}
|
||||
err = fuse_do_readfolio(file, folio);
|
||||
if (err)
|
||||
goto cleanup;
|
||||
success:
|
||||
*foliop = folio;
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
error:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int fuse_write_end(const struct kiocb *iocb,
|
||||
struct address_space *mapping,
|
||||
loff_t pos, unsigned len, unsigned copied,
|
||||
struct folio *folio, void *fsdata)
|
||||
{
|
||||
struct inode *inode = folio->mapping->host;
|
||||
|
||||
/* Haven't copied anything? Skip zeroing, size extending, dirtying. */
|
||||
if (!copied)
|
||||
goto unlock;
|
||||
|
||||
pos += copied;
|
||||
if (!folio_test_uptodate(folio)) {
|
||||
/* Zero any unwritten bytes at the end of the page */
|
||||
size_t endoff = pos & ~PAGE_MASK;
|
||||
if (endoff)
|
||||
folio_zero_segment(folio, endoff, PAGE_SIZE);
|
||||
folio_mark_uptodate(folio);
|
||||
}
|
||||
|
||||
if (pos > inode->i_size)
|
||||
i_size_write(inode, pos);
|
||||
|
||||
folio_mark_dirty(folio);
|
||||
|
||||
unlock:
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
|
||||
return copied;
|
||||
return iomap_writepages(&wpc);
|
||||
}
|
||||
|
||||
static int fuse_launder_folio(struct folio *folio)
|
||||
{
|
||||
int err = 0;
|
||||
struct fuse_fill_wb_data data = {};
|
||||
struct iomap_writepage_ctx wpc = {
|
||||
.inode = folio->mapping->host,
|
||||
.iomap.type = IOMAP_MAPPED,
|
||||
.ops = &fuse_writeback_ops,
|
||||
.wb_ctx = &data,
|
||||
};
|
||||
|
||||
if (folio_clear_dirty_for_io(folio)) {
|
||||
err = fuse_writepage_locked(folio);
|
||||
err = iomap_writeback_folio(&wpc, folio);
|
||||
err = fuse_iomap_writeback_submit(&wpc, err);
|
||||
if (!err)
|
||||
folio_wait_writeback(folio);
|
||||
}
|
||||
|
@ -3147,12 +3103,13 @@ static const struct address_space_operations fuse_file_aops = {
|
|||
.readahead = fuse_readahead,
|
||||
.writepages = fuse_writepages,
|
||||
.launder_folio = fuse_launder_folio,
|
||||
.dirty_folio = filemap_dirty_folio,
|
||||
.dirty_folio = iomap_dirty_folio,
|
||||
.release_folio = iomap_release_folio,
|
||||
.invalidate_folio = iomap_invalidate_folio,
|
||||
.is_partially_uptodate = iomap_is_partially_uptodate,
|
||||
.migrate_folio = filemap_migrate_folio,
|
||||
.bmap = fuse_bmap,
|
||||
.direct_IO = fuse_direct_IO,
|
||||
.write_begin = fuse_write_begin,
|
||||
.write_end = fuse_write_end,
|
||||
};
|
||||
|
||||
void fuse_init_file_inode(struct inode *inode, unsigned int flags)
|
||||
|
|
|
@ -159,7 +159,11 @@ static int gfs2_writepages(struct address_space *mapping,
|
|||
struct writeback_control *wbc)
|
||||
{
|
||||
struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
|
||||
struct iomap_writepage_ctx wpc = { };
|
||||
struct iomap_writepage_ctx wpc = {
|
||||
.inode = mapping->host,
|
||||
.wbc = wbc,
|
||||
.ops = &gfs2_writeback_ops,
|
||||
};
|
||||
int ret;
|
||||
|
||||
/*
|
||||
|
@ -168,7 +172,7 @@ static int gfs2_writepages(struct address_space *mapping,
|
|||
* want balance_dirty_pages() to loop indefinitely trying to write out
|
||||
* pages held in the ail that it can't find.
|
||||
*/
|
||||
ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
|
||||
ret = iomap_writepages(&wpc);
|
||||
if (ret == 0 && wbc->nr_to_write > 0)
|
||||
set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
|
||||
return ret;
|
||||
|
|
|
@ -963,12 +963,16 @@ static struct folio *
|
|||
gfs2_iomap_get_folio(struct iomap_iter *iter, loff_t pos, unsigned len)
|
||||
{
|
||||
struct inode *inode = iter->inode;
|
||||
struct gfs2_inode *ip = GFS2_I(inode);
|
||||
unsigned int blockmask = i_blocksize(inode) - 1;
|
||||
struct gfs2_sbd *sdp = GFS2_SB(inode);
|
||||
unsigned int blocks;
|
||||
struct folio *folio;
|
||||
int status;
|
||||
|
||||
if (!gfs2_is_jdata(ip) && !gfs2_is_stuffed(ip))
|
||||
return iomap_get_folio(iter, pos, len);
|
||||
|
||||
blocks = ((pos & blockmask) + len + blockmask) >> inode->i_blkbits;
|
||||
status = gfs2_trans_begin(sdp, RES_DINODE + blocks, 0);
|
||||
if (status)
|
||||
|
@ -987,7 +991,7 @@ static void gfs2_iomap_put_folio(struct inode *inode, loff_t pos,
|
|||
struct gfs2_inode *ip = GFS2_I(inode);
|
||||
struct gfs2_sbd *sdp = GFS2_SB(inode);
|
||||
|
||||
if (!gfs2_is_stuffed(ip))
|
||||
if (gfs2_is_jdata(ip) && !gfs2_is_stuffed(ip))
|
||||
gfs2_trans_add_databufs(ip->i_gl, folio,
|
||||
offset_in_folio(folio, pos),
|
||||
copied);
|
||||
|
@ -995,13 +999,14 @@ static void gfs2_iomap_put_folio(struct inode *inode, loff_t pos,
|
|||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
|
||||
if (tr->tr_num_buf_new)
|
||||
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
|
||||
|
||||
gfs2_trans_end(sdp);
|
||||
if (gfs2_is_jdata(ip) || gfs2_is_stuffed(ip)) {
|
||||
if (tr->tr_num_buf_new)
|
||||
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
|
||||
gfs2_trans_end(sdp);
|
||||
}
|
||||
}
|
||||
|
||||
static const struct iomap_folio_ops gfs2_iomap_folio_ops = {
|
||||
const struct iomap_write_ops gfs2_iomap_write_ops = {
|
||||
.get_folio = gfs2_iomap_get_folio,
|
||||
.put_folio = gfs2_iomap_put_folio,
|
||||
};
|
||||
|
@ -1078,8 +1083,6 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
|
|||
gfs2_trans_end(sdp);
|
||||
}
|
||||
|
||||
if (gfs2_is_stuffed(ip) || gfs2_is_jdata(ip))
|
||||
iomap->folio_ops = &gfs2_iomap_folio_ops;
|
||||
return 0;
|
||||
|
||||
out_trans_end:
|
||||
|
@ -1304,7 +1307,7 @@ static int gfs2_block_zero_range(struct inode *inode, loff_t from, loff_t length
|
|||
return 0;
|
||||
length = min(length, inode->i_size - from);
|
||||
return iomap_zero_range(inode, from, length, NULL, &gfs2_iomap_ops,
|
||||
NULL);
|
||||
&gfs2_iomap_write_ops, NULL);
|
||||
}
|
||||
|
||||
#define GFS2_JTRUNC_REVOKES 8192
|
||||
|
@ -2469,23 +2472,26 @@ out:
|
|||
return error;
|
||||
}
|
||||
|
||||
static int gfs2_map_blocks(struct iomap_writepage_ctx *wpc, struct inode *inode,
|
||||
loff_t offset, unsigned int len)
|
||||
static ssize_t gfs2_writeback_range(struct iomap_writepage_ctx *wpc,
|
||||
struct folio *folio, u64 offset, unsigned int len, u64 end_pos)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (WARN_ON_ONCE(gfs2_is_stuffed(GFS2_I(inode))))
|
||||
if (WARN_ON_ONCE(gfs2_is_stuffed(GFS2_I(wpc->inode))))
|
||||
return -EIO;
|
||||
|
||||
if (offset >= wpc->iomap.offset &&
|
||||
offset < wpc->iomap.offset + wpc->iomap.length)
|
||||
return 0;
|
||||
if (offset < wpc->iomap.offset ||
|
||||
offset >= wpc->iomap.offset + wpc->iomap.length) {
|
||||
int ret;
|
||||
|
||||
memset(&wpc->iomap, 0, sizeof(wpc->iomap));
|
||||
ret = gfs2_iomap_get(inode, offset, INT_MAX, &wpc->iomap);
|
||||
return ret;
|
||||
memset(&wpc->iomap, 0, sizeof(wpc->iomap));
|
||||
ret = gfs2_iomap_get(wpc->inode, offset, INT_MAX, &wpc->iomap);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return iomap_add_to_ioend(wpc, folio, offset, end_pos, len);
|
||||
}
|
||||
|
||||
const struct iomap_writeback_ops gfs2_writeback_ops = {
|
||||
.map_blocks = gfs2_map_blocks,
|
||||
.writeback_range = gfs2_writeback_range,
|
||||
.writeback_submit = iomap_ioend_writeback_submit,
|
||||
};
|
||||
|
|
|
@ -44,6 +44,7 @@ static inline void gfs2_write_calc_reserv(const struct gfs2_inode *ip,
|
|||
}
|
||||
|
||||
extern const struct iomap_ops gfs2_iomap_ops;
|
||||
extern const struct iomap_write_ops gfs2_iomap_write_ops;
|
||||
extern const struct iomap_writeback_ops gfs2_writeback_ops;
|
||||
|
||||
int gfs2_unstuff_dinode(struct gfs2_inode *ip);
|
||||
|
|
|
@ -1058,7 +1058,8 @@ retry:
|
|||
}
|
||||
|
||||
pagefault_disable();
|
||||
ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops, NULL);
|
||||
ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops,
|
||||
&gfs2_iomap_write_ops, NULL);
|
||||
pagefault_enable();
|
||||
if (ret > 0)
|
||||
written += ret;
|
||||
|
|
|
@ -9,9 +9,9 @@ ccflags-y += -I $(src) # needed for trace events
|
|||
obj-$(CONFIG_FS_IOMAP) += iomap.o
|
||||
|
||||
iomap-y += trace.o \
|
||||
iter.o
|
||||
iomap-$(CONFIG_BLOCK) += buffered-io.o \
|
||||
direct-io.o \
|
||||
iter.o \
|
||||
buffered-io.o
|
||||
iomap-$(CONFIG_BLOCK) += direct-io.o \
|
||||
ioend.o \
|
||||
fiemap.o \
|
||||
seek.o
|
||||
|
|
|
@ -3,20 +3,11 @@
|
|||
* Copyright (C) 2010 Red Hat, Inc.
|
||||
* Copyright (C) 2016-2023 Christoph Hellwig.
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/iomap.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/uio.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/dax.h>
|
||||
#include <linux/writeback.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/sched/signal.h>
|
||||
#include <linux/migrate.h>
|
||||
#include "internal.h"
|
||||
#include "trace.h"
|
||||
|
||||
#include "../internal.h"
|
||||
|
@ -287,6 +278,46 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
|
|||
*lenp = plen;
|
||||
}
|
||||
|
||||
static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter,
|
||||
loff_t pos)
|
||||
{
|
||||
const struct iomap *srcmap = iomap_iter_srcmap(iter);
|
||||
|
||||
return srcmap->type != IOMAP_MAPPED ||
|
||||
(srcmap->flags & IOMAP_F_NEW) ||
|
||||
pos >= i_size_read(iter->inode);
|
||||
}
|
||||
|
||||
/**
|
||||
* iomap_read_inline_data - copy inline data into the page cache
|
||||
* @iter: iteration structure
|
||||
* @folio: folio to copy to
|
||||
*
|
||||
* Copy the inline data in @iter into @folio and zero out the rest of the folio.
|
||||
* Only a single IOMAP_INLINE extent is allowed at the end of each file.
|
||||
* Returns zero for success to complete the read, or the usual negative errno.
|
||||
*/
|
||||
static int iomap_read_inline_data(const struct iomap_iter *iter,
|
||||
struct folio *folio)
|
||||
{
|
||||
const struct iomap *iomap = iomap_iter_srcmap(iter);
|
||||
size_t size = i_size_read(iter->inode) - iomap->offset;
|
||||
size_t offset = offset_in_folio(folio, iomap->offset);
|
||||
|
||||
if (folio_test_uptodate(folio))
|
||||
return 0;
|
||||
|
||||
if (WARN_ON_ONCE(size > iomap->length))
|
||||
return -EIO;
|
||||
if (offset > 0)
|
||||
ifs_alloc(iter->inode, folio, iter->flags);
|
||||
|
||||
folio_fill_tail(folio, offset, iomap->inline_data, size);
|
||||
iomap_set_range_uptodate(folio, offset, folio_size(folio) - offset);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
static void iomap_finish_folio_read(struct folio *folio, size_t off,
|
||||
size_t len, int error)
|
||||
{
|
||||
|
@ -326,45 +357,6 @@ struct iomap_readpage_ctx {
|
|||
struct readahead_control *rac;
|
||||
};
|
||||
|
||||
/**
|
||||
* iomap_read_inline_data - copy inline data into the page cache
|
||||
* @iter: iteration structure
|
||||
* @folio: folio to copy to
|
||||
*
|
||||
* Copy the inline data in @iter into @folio and zero out the rest of the folio.
|
||||
* Only a single IOMAP_INLINE extent is allowed at the end of each file.
|
||||
* Returns zero for success to complete the read, or the usual negative errno.
|
||||
*/
|
||||
static int iomap_read_inline_data(const struct iomap_iter *iter,
|
||||
struct folio *folio)
|
||||
{
|
||||
const struct iomap *iomap = iomap_iter_srcmap(iter);
|
||||
size_t size = i_size_read(iter->inode) - iomap->offset;
|
||||
size_t offset = offset_in_folio(folio, iomap->offset);
|
||||
|
||||
if (folio_test_uptodate(folio))
|
||||
return 0;
|
||||
|
||||
if (WARN_ON_ONCE(size > iomap->length))
|
||||
return -EIO;
|
||||
if (offset > 0)
|
||||
ifs_alloc(iter->inode, folio, iter->flags);
|
||||
|
||||
folio_fill_tail(folio, offset, iomap->inline_data, size);
|
||||
iomap_set_range_uptodate(folio, offset, folio_size(folio) - offset);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter,
|
||||
loff_t pos)
|
||||
{
|
||||
const struct iomap *srcmap = iomap_iter_srcmap(iter);
|
||||
|
||||
return srcmap->type != IOMAP_MAPPED ||
|
||||
(srcmap->flags & IOMAP_F_NEW) ||
|
||||
pos >= i_size_read(iter->inode);
|
||||
}
|
||||
|
||||
static int iomap_readpage_iter(struct iomap_iter *iter,
|
||||
struct iomap_readpage_ctx *ctx)
|
||||
{
|
||||
|
@ -557,6 +549,27 @@ void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_readahead);
|
||||
|
||||
static int iomap_read_folio_range(const struct iomap_iter *iter,
|
||||
struct folio *folio, loff_t pos, size_t len)
|
||||
{
|
||||
const struct iomap *srcmap = iomap_iter_srcmap(iter);
|
||||
struct bio_vec bvec;
|
||||
struct bio bio;
|
||||
|
||||
bio_init(&bio, srcmap->bdev, &bvec, 1, REQ_OP_READ);
|
||||
bio.bi_iter.bi_sector = iomap_sector(srcmap, pos);
|
||||
bio_add_folio_nofail(&bio, folio, len, offset_in_folio(folio, pos));
|
||||
return submit_bio_wait(&bio);
|
||||
}
|
||||
#else
|
||||
static int iomap_read_folio_range(const struct iomap_iter *iter,
|
||||
struct folio *folio, loff_t pos, size_t len)
|
||||
{
|
||||
WARN_ON_ONCE(1);
|
||||
return -EIO;
|
||||
}
|
||||
#endif /* CONFIG_BLOCK */
|
||||
|
||||
/*
|
||||
* iomap_is_partially_uptodate checks whether blocks within a folio are
|
||||
* uptodate or not.
|
||||
|
@ -670,22 +683,10 @@ iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
|
|||
pos + len - 1);
|
||||
}
|
||||
|
||||
static int iomap_read_folio_sync(loff_t block_start, struct folio *folio,
|
||||
size_t poff, size_t plen, const struct iomap *iomap)
|
||||
{
|
||||
struct bio_vec bvec;
|
||||
struct bio bio;
|
||||
|
||||
bio_init(&bio, iomap->bdev, &bvec, 1, REQ_OP_READ);
|
||||
bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
|
||||
bio_add_folio_nofail(&bio, folio, plen, poff);
|
||||
return submit_bio_wait(&bio);
|
||||
}
|
||||
|
||||
static int __iomap_write_begin(const struct iomap_iter *iter, size_t len,
|
||||
static int __iomap_write_begin(const struct iomap_iter *iter,
|
||||
const struct iomap_write_ops *write_ops, size_t len,
|
||||
struct folio *folio)
|
||||
{
|
||||
const struct iomap *srcmap = iomap_iter_srcmap(iter);
|
||||
struct iomap_folio_state *ifs;
|
||||
loff_t pos = iter->pos;
|
||||
loff_t block_size = i_blocksize(iter->inode);
|
||||
|
@ -734,8 +735,12 @@ static int __iomap_write_begin(const struct iomap_iter *iter, size_t len,
|
|||
if (iter->flags & IOMAP_NOWAIT)
|
||||
return -EAGAIN;
|
||||
|
||||
status = iomap_read_folio_sync(block_start, folio,
|
||||
poff, plen, srcmap);
|
||||
if (write_ops && write_ops->read_folio_range)
|
||||
status = write_ops->read_folio_range(iter,
|
||||
folio, block_start, plen);
|
||||
else
|
||||
status = iomap_read_folio_range(iter,
|
||||
folio, block_start, plen);
|
||||
if (status)
|
||||
return status;
|
||||
}
|
||||
|
@ -745,28 +750,27 @@ static int __iomap_write_begin(const struct iomap_iter *iter, size_t len,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct folio *__iomap_get_folio(struct iomap_iter *iter, size_t len)
|
||||
static struct folio *__iomap_get_folio(struct iomap_iter *iter,
|
||||
const struct iomap_write_ops *write_ops, size_t len)
|
||||
{
|
||||
const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
|
||||
loff_t pos = iter->pos;
|
||||
|
||||
if (!mapping_large_folio_support(iter->inode->i_mapping))
|
||||
len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
|
||||
|
||||
if (folio_ops && folio_ops->get_folio)
|
||||
return folio_ops->get_folio(iter, pos, len);
|
||||
else
|
||||
return iomap_get_folio(iter, pos, len);
|
||||
if (write_ops && write_ops->get_folio)
|
||||
return write_ops->get_folio(iter, pos, len);
|
||||
return iomap_get_folio(iter, pos, len);
|
||||
}
|
||||
|
||||
static void __iomap_put_folio(struct iomap_iter *iter, size_t ret,
|
||||
static void __iomap_put_folio(struct iomap_iter *iter,
|
||||
const struct iomap_write_ops *write_ops, size_t ret,
|
||||
struct folio *folio)
|
||||
{
|
||||
const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
|
||||
loff_t pos = iter->pos;
|
||||
|
||||
if (folio_ops && folio_ops->put_folio) {
|
||||
folio_ops->put_folio(iter->inode, pos, ret, folio);
|
||||
if (write_ops && write_ops->put_folio) {
|
||||
write_ops->put_folio(iter->inode, pos, ret, folio);
|
||||
} else {
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
|
@ -803,10 +807,10 @@ static int iomap_write_begin_inline(const struct iomap_iter *iter,
|
|||
* offset, and length. Callers can optionally pass a max length *plen,
|
||||
* otherwise init to zero.
|
||||
*/
|
||||
static int iomap_write_begin(struct iomap_iter *iter, struct folio **foliop,
|
||||
static int iomap_write_begin(struct iomap_iter *iter,
|
||||
const struct iomap_write_ops *write_ops, struct folio **foliop,
|
||||
size_t *poffset, u64 *plen)
|
||||
{
|
||||
const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
|
||||
const struct iomap *srcmap = iomap_iter_srcmap(iter);
|
||||
loff_t pos = iter->pos;
|
||||
u64 len = min_t(u64, SIZE_MAX, iomap_length(iter));
|
||||
|
@ -821,7 +825,7 @@ static int iomap_write_begin(struct iomap_iter *iter, struct folio **foliop,
|
|||
if (fatal_signal_pending(current))
|
||||
return -EINTR;
|
||||
|
||||
folio = __iomap_get_folio(iter, len);
|
||||
folio = __iomap_get_folio(iter, write_ops, len);
|
||||
if (IS_ERR(folio))
|
||||
return PTR_ERR(folio);
|
||||
|
||||
|
@ -835,8 +839,8 @@ static int iomap_write_begin(struct iomap_iter *iter, struct folio **foliop,
|
|||
* could do the wrong thing here (zero a page range incorrectly or fail
|
||||
* to zero) and corrupt data.
|
||||
*/
|
||||
if (folio_ops && folio_ops->iomap_valid) {
|
||||
bool iomap_valid = folio_ops->iomap_valid(iter->inode,
|
||||
if (write_ops && write_ops->iomap_valid) {
|
||||
bool iomap_valid = write_ops->iomap_valid(iter->inode,
|
||||
&iter->iomap);
|
||||
if (!iomap_valid) {
|
||||
iter->iomap.flags |= IOMAP_F_STALE;
|
||||
|
@ -852,7 +856,7 @@ static int iomap_write_begin(struct iomap_iter *iter, struct folio **foliop,
|
|||
else if (srcmap->flags & IOMAP_F_BUFFER_HEAD)
|
||||
status = __block_write_begin_int(folio, pos, len, NULL, srcmap);
|
||||
else
|
||||
status = __iomap_write_begin(iter, len, folio);
|
||||
status = __iomap_write_begin(iter, write_ops, len, folio);
|
||||
|
||||
if (unlikely(status))
|
||||
goto out_unlock;
|
||||
|
@ -862,8 +866,7 @@ static int iomap_write_begin(struct iomap_iter *iter, struct folio **foliop,
|
|||
return 0;
|
||||
|
||||
out_unlock:
|
||||
__iomap_put_folio(iter, 0, folio);
|
||||
|
||||
__iomap_put_folio(iter, write_ops, 0, folio);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -934,7 +937,8 @@ static bool iomap_write_end(struct iomap_iter *iter, size_t len, size_t copied,
|
|||
return __iomap_write_end(iter->inode, pos, len, copied, folio);
|
||||
}
|
||||
|
||||
static int iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
|
||||
static int iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i,
|
||||
const struct iomap_write_ops *write_ops)
|
||||
{
|
||||
ssize_t total_written = 0;
|
||||
int status = 0;
|
||||
|
@ -978,7 +982,8 @@ retry:
|
|||
break;
|
||||
}
|
||||
|
||||
status = iomap_write_begin(iter, &folio, &offset, &bytes);
|
||||
status = iomap_write_begin(iter, write_ops, &folio, &offset,
|
||||
&bytes);
|
||||
if (unlikely(status)) {
|
||||
iomap_write_failed(iter->inode, iter->pos, bytes);
|
||||
break;
|
||||
|
@ -1007,7 +1012,7 @@ retry:
|
|||
i_size_write(iter->inode, pos + written);
|
||||
iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
|
||||
}
|
||||
__iomap_put_folio(iter, written, folio);
|
||||
__iomap_put_folio(iter, write_ops, written, folio);
|
||||
|
||||
if (old_size < pos)
|
||||
pagecache_isize_extended(iter->inode, old_size, pos);
|
||||
|
@ -1040,7 +1045,8 @@ retry:
|
|||
|
||||
ssize_t
|
||||
iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
|
||||
const struct iomap_ops *ops, void *private)
|
||||
const struct iomap_ops *ops,
|
||||
const struct iomap_write_ops *write_ops, void *private)
|
||||
{
|
||||
struct iomap_iter iter = {
|
||||
.inode = iocb->ki_filp->f_mapping->host,
|
||||
|
@ -1057,7 +1063,7 @@ iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
|
|||
iter.flags |= IOMAP_DONTCACHE;
|
||||
|
||||
while ((ret = iomap_iter(&iter, ops)) > 0)
|
||||
iter.status = iomap_write_iter(&iter, i);
|
||||
iter.status = iomap_write_iter(&iter, i, write_ops);
|
||||
|
||||
if (unlikely(iter.pos == iocb->ki_pos))
|
||||
return ret;
|
||||
|
@ -1291,7 +1297,8 @@ void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_write_delalloc_release);
|
||||
|
||||
static int iomap_unshare_iter(struct iomap_iter *iter)
|
||||
static int iomap_unshare_iter(struct iomap_iter *iter,
|
||||
const struct iomap_write_ops *write_ops)
|
||||
{
|
||||
struct iomap *iomap = &iter->iomap;
|
||||
u64 bytes = iomap_length(iter);
|
||||
|
@ -1306,14 +1313,15 @@ static int iomap_unshare_iter(struct iomap_iter *iter)
|
|||
bool ret;
|
||||
|
||||
bytes = min_t(u64, SIZE_MAX, bytes);
|
||||
status = iomap_write_begin(iter, &folio, &offset, &bytes);
|
||||
status = iomap_write_begin(iter, write_ops, &folio, &offset,
|
||||
&bytes);
|
||||
if (unlikely(status))
|
||||
return status;
|
||||
if (iomap->flags & IOMAP_F_STALE)
|
||||
break;
|
||||
|
||||
ret = iomap_write_end(iter, bytes, bytes, folio);
|
||||
__iomap_put_folio(iter, bytes, folio);
|
||||
__iomap_put_folio(iter, write_ops, bytes, folio);
|
||||
if (WARN_ON_ONCE(!ret))
|
||||
return -EIO;
|
||||
|
||||
|
@ -1331,7 +1339,8 @@ static int iomap_unshare_iter(struct iomap_iter *iter)
|
|||
|
||||
int
|
||||
iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
|
||||
const struct iomap_ops *ops)
|
||||
const struct iomap_ops *ops,
|
||||
const struct iomap_write_ops *write_ops)
|
||||
{
|
||||
struct iomap_iter iter = {
|
||||
.inode = inode,
|
||||
|
@ -1346,7 +1355,7 @@ iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
|
|||
|
||||
iter.len = min(len, size - pos);
|
||||
while ((ret = iomap_iter(&iter, ops)) > 0)
|
||||
iter.status = iomap_unshare_iter(&iter);
|
||||
iter.status = iomap_unshare_iter(&iter, write_ops);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_file_unshare);
|
||||
|
@ -1365,7 +1374,8 @@ static inline int iomap_zero_iter_flush_and_stale(struct iomap_iter *i)
|
|||
return filemap_write_and_wait_range(mapping, i->pos, end);
|
||||
}
|
||||
|
||||
static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
|
||||
static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero,
|
||||
const struct iomap_write_ops *write_ops)
|
||||
{
|
||||
u64 bytes = iomap_length(iter);
|
||||
int status;
|
||||
|
@ -1376,7 +1386,8 @@ static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
|
|||
bool ret;
|
||||
|
||||
bytes = min_t(u64, SIZE_MAX, bytes);
|
||||
status = iomap_write_begin(iter, &folio, &offset, &bytes);
|
||||
status = iomap_write_begin(iter, write_ops, &folio, &offset,
|
||||
&bytes);
|
||||
if (status)
|
||||
return status;
|
||||
if (iter->iomap.flags & IOMAP_F_STALE)
|
||||
|
@ -1389,7 +1400,7 @@ static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
|
|||
folio_mark_accessed(folio);
|
||||
|
||||
ret = iomap_write_end(iter, bytes, bytes, folio);
|
||||
__iomap_put_folio(iter, bytes, folio);
|
||||
__iomap_put_folio(iter, write_ops, bytes, folio);
|
||||
if (WARN_ON_ONCE(!ret))
|
||||
return -EIO;
|
||||
|
||||
|
@ -1405,7 +1416,8 @@ static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
|
|||
|
||||
int
|
||||
iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
|
||||
const struct iomap_ops *ops, void *private)
|
||||
const struct iomap_ops *ops,
|
||||
const struct iomap_write_ops *write_ops, void *private)
|
||||
{
|
||||
struct iomap_iter iter = {
|
||||
.inode = inode,
|
||||
|
@ -1435,7 +1447,8 @@ iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
|
|||
filemap_range_needs_writeback(mapping, pos, pos + plen - 1)) {
|
||||
iter.len = plen;
|
||||
while ((ret = iomap_iter(&iter, ops)) > 0)
|
||||
iter.status = iomap_zero_iter(&iter, did_zero);
|
||||
iter.status = iomap_zero_iter(&iter, did_zero,
|
||||
write_ops);
|
||||
|
||||
iter.len = len - (iter.pos - pos);
|
||||
if (ret || !iter.len)
|
||||
|
@ -1466,7 +1479,7 @@ iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
|
|||
continue;
|
||||
}
|
||||
|
||||
iter.status = iomap_zero_iter(&iter, did_zero);
|
||||
iter.status = iomap_zero_iter(&iter, did_zero, write_ops);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -1474,7 +1487,8 @@ EXPORT_SYMBOL_GPL(iomap_zero_range);
|
|||
|
||||
int
|
||||
iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
|
||||
const struct iomap_ops *ops, void *private)
|
||||
const struct iomap_ops *ops,
|
||||
const struct iomap_write_ops *write_ops, void *private)
|
||||
{
|
||||
unsigned int blocksize = i_blocksize(inode);
|
||||
unsigned int off = pos & (blocksize - 1);
|
||||
|
@ -1483,7 +1497,7 @@ iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
|
|||
if (!off)
|
||||
return 0;
|
||||
return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops,
|
||||
private);
|
||||
write_ops, private);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_truncate_page);
|
||||
|
||||
|
@ -1537,7 +1551,18 @@ out_unlock:
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
|
||||
|
||||
static void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
|
||||
void iomap_start_folio_write(struct inode *inode, struct folio *folio,
|
||||
size_t len)
|
||||
{
|
||||
struct iomap_folio_state *ifs = folio->private;
|
||||
|
||||
WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs);
|
||||
if (ifs)
|
||||
atomic_add(len, &ifs->write_bytes_pending);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_start_folio_write);
|
||||
|
||||
void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
|
||||
size_t len)
|
||||
{
|
||||
struct iomap_folio_state *ifs = folio->private;
|
||||
|
@ -1548,269 +1573,32 @@ static void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
|
|||
if (!ifs || atomic_sub_and_test(len, &ifs->write_bytes_pending))
|
||||
folio_end_writeback(folio);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_finish_folio_write);
|
||||
|
||||
/*
|
||||
* We're now finished for good with this ioend structure. Update the page
|
||||
* state, release holds on bios, and finally free up memory. Do not use the
|
||||
* ioend after this.
|
||||
*/
|
||||
u32 iomap_finish_ioend_buffered(struct iomap_ioend *ioend)
|
||||
static int iomap_writeback_range(struct iomap_writepage_ctx *wpc,
|
||||
struct folio *folio, u64 pos, u32 rlen, u64 end_pos,
|
||||
bool *wb_pending)
|
||||
{
|
||||
struct inode *inode = ioend->io_inode;
|
||||
struct bio *bio = &ioend->io_bio;
|
||||
struct folio_iter fi;
|
||||
u32 folio_count = 0;
|
||||
|
||||
if (ioend->io_error) {
|
||||
mapping_set_error(inode->i_mapping, ioend->io_error);
|
||||
if (!bio_flagged(bio, BIO_QUIET)) {
|
||||
pr_err_ratelimited(
|
||||
"%s: writeback error on inode %lu, offset %lld, sector %llu",
|
||||
inode->i_sb->s_id, inode->i_ino,
|
||||
ioend->io_offset, ioend->io_sector);
|
||||
}
|
||||
}
|
||||
|
||||
/* walk all folios in bio, ending page IO on them */
|
||||
bio_for_each_folio_all(fi, bio) {
|
||||
iomap_finish_folio_write(inode, fi.folio, fi.length);
|
||||
folio_count++;
|
||||
}
|
||||
|
||||
bio_put(bio); /* frees the ioend */
|
||||
return folio_count;
|
||||
}
|
||||
|
||||
static void iomap_writepage_end_bio(struct bio *bio)
|
||||
{
|
||||
struct iomap_ioend *ioend = iomap_ioend_from_bio(bio);
|
||||
|
||||
ioend->io_error = blk_status_to_errno(bio->bi_status);
|
||||
iomap_finish_ioend_buffered(ioend);
|
||||
}
|
||||
|
||||
/*
|
||||
* Submit an ioend.
|
||||
*
|
||||
* If @error is non-zero, it means that we have a situation where some part of
|
||||
* the submission process has failed after we've marked pages for writeback.
|
||||
* We cannot cancel ioend directly in that case, so call the bio end I/O handler
|
||||
* with the error status here to run the normal I/O completion handler to clear
|
||||
* the writeback bit and let the file system proess the errors.
|
||||
*/
|
||||
static int iomap_submit_ioend(struct iomap_writepage_ctx *wpc, int error)
|
||||
{
|
||||
if (!wpc->ioend)
|
||||
return error;
|
||||
|
||||
/*
|
||||
* Let the file systems prepare the I/O submission and hook in an I/O
|
||||
* comletion handler. This also needs to happen in case after a
|
||||
* failure happened so that the file system end I/O handler gets called
|
||||
* to clean up.
|
||||
*/
|
||||
if (wpc->ops->submit_ioend) {
|
||||
error = wpc->ops->submit_ioend(wpc, error);
|
||||
} else {
|
||||
if (WARN_ON_ONCE(wpc->iomap.flags & IOMAP_F_ANON_WRITE))
|
||||
error = -EIO;
|
||||
if (!error)
|
||||
submit_bio(&wpc->ioend->io_bio);
|
||||
}
|
||||
|
||||
if (error) {
|
||||
wpc->ioend->io_bio.bi_status = errno_to_blk_status(error);
|
||||
bio_endio(&wpc->ioend->io_bio);
|
||||
}
|
||||
|
||||
wpc->ioend = NULL;
|
||||
return error;
|
||||
}
|
||||
|
||||
static struct iomap_ioend *iomap_alloc_ioend(struct iomap_writepage_ctx *wpc,
|
||||
struct writeback_control *wbc, struct inode *inode, loff_t pos,
|
||||
u16 ioend_flags)
|
||||
{
|
||||
struct bio *bio;
|
||||
|
||||
bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS,
|
||||
REQ_OP_WRITE | wbc_to_write_flags(wbc),
|
||||
GFP_NOFS, &iomap_ioend_bioset);
|
||||
bio->bi_iter.bi_sector = iomap_sector(&wpc->iomap, pos);
|
||||
bio->bi_end_io = iomap_writepage_end_bio;
|
||||
bio->bi_write_hint = inode->i_write_hint;
|
||||
wbc_init_bio(wbc, bio);
|
||||
wpc->nr_folios = 0;
|
||||
return iomap_init_ioend(inode, bio, pos, ioend_flags);
|
||||
}
|
||||
|
||||
static bool iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t pos,
|
||||
u16 ioend_flags)
|
||||
{
|
||||
if (ioend_flags & IOMAP_IOEND_BOUNDARY)
|
||||
return false;
|
||||
if ((ioend_flags & IOMAP_IOEND_NOMERGE_FLAGS) !=
|
||||
(wpc->ioend->io_flags & IOMAP_IOEND_NOMERGE_FLAGS))
|
||||
return false;
|
||||
if (pos != wpc->ioend->io_offset + wpc->ioend->io_size)
|
||||
return false;
|
||||
if (!(wpc->iomap.flags & IOMAP_F_ANON_WRITE) &&
|
||||
iomap_sector(&wpc->iomap, pos) !=
|
||||
bio_end_sector(&wpc->ioend->io_bio))
|
||||
return false;
|
||||
/*
|
||||
* Limit ioend bio chain lengths to minimise IO completion latency. This
|
||||
* also prevents long tight loops ending page writeback on all the
|
||||
* folios in the ioend.
|
||||
*/
|
||||
if (wpc->nr_folios >= IOEND_BATCH_SIZE)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Test to see if we have an existing ioend structure that we could append to
|
||||
* first; otherwise finish off the current ioend and start another.
|
||||
*
|
||||
* If a new ioend is created and cached, the old ioend is submitted to the block
|
||||
* layer instantly. Batching optimisations are provided by higher level block
|
||||
* plugging.
|
||||
*
|
||||
* At the end of a writeback pass, there will be a cached ioend remaining on the
|
||||
* writepage context that the caller will need to submit.
|
||||
*/
|
||||
static int iomap_add_to_ioend(struct iomap_writepage_ctx *wpc,
|
||||
struct writeback_control *wbc, struct folio *folio,
|
||||
struct inode *inode, loff_t pos, loff_t end_pos,
|
||||
unsigned len)
|
||||
{
|
||||
struct iomap_folio_state *ifs = folio->private;
|
||||
size_t poff = offset_in_folio(folio, pos);
|
||||
unsigned int ioend_flags = 0;
|
||||
int error;
|
||||
|
||||
if (wpc->iomap.type == IOMAP_UNWRITTEN)
|
||||
ioend_flags |= IOMAP_IOEND_UNWRITTEN;
|
||||
if (wpc->iomap.flags & IOMAP_F_SHARED)
|
||||
ioend_flags |= IOMAP_IOEND_SHARED;
|
||||
if (folio_test_dropbehind(folio))
|
||||
ioend_flags |= IOMAP_IOEND_DONTCACHE;
|
||||
if (pos == wpc->iomap.offset && (wpc->iomap.flags & IOMAP_F_BOUNDARY))
|
||||
ioend_flags |= IOMAP_IOEND_BOUNDARY;
|
||||
|
||||
if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos, ioend_flags)) {
|
||||
new_ioend:
|
||||
error = iomap_submit_ioend(wpc, 0);
|
||||
if (error)
|
||||
return error;
|
||||
wpc->ioend = iomap_alloc_ioend(wpc, wbc, inode, pos,
|
||||
ioend_flags);
|
||||
}
|
||||
|
||||
if (!bio_add_folio(&wpc->ioend->io_bio, folio, len, poff))
|
||||
goto new_ioend;
|
||||
|
||||
if (ifs)
|
||||
atomic_add(len, &ifs->write_bytes_pending);
|
||||
|
||||
/*
|
||||
* Clamp io_offset and io_size to the incore EOF so that ondisk
|
||||
* file size updates in the ioend completion are byte-accurate.
|
||||
* This avoids recovering files with zeroed tail regions when
|
||||
* writeback races with appending writes:
|
||||
*
|
||||
* Thread 1: Thread 2:
|
||||
* ------------ -----------
|
||||
* write [A, A+B]
|
||||
* update inode size to A+B
|
||||
* submit I/O [A, A+BS]
|
||||
* write [A+B, A+B+C]
|
||||
* update inode size to A+B+C
|
||||
* <I/O completes, updates disk size to min(A+B+C, A+BS)>
|
||||
* <power failure>
|
||||
*
|
||||
* After reboot:
|
||||
* 1) with A+B+C < A+BS, the file has zero padding in range
|
||||
* [A+B, A+B+C]
|
||||
*
|
||||
* |< Block Size (BS) >|
|
||||
* |DDDDDDDDDDDD0000000000000|
|
||||
* ^ ^ ^
|
||||
* A A+B A+B+C
|
||||
* (EOF)
|
||||
*
|
||||
* 2) with A+B+C > A+BS, the file has zero padding in range
|
||||
* [A+B, A+BS]
|
||||
*
|
||||
* |< Block Size (BS) >|< Block Size (BS) >|
|
||||
* |DDDDDDDDDDDD0000000000000|00000000000000000000000000|
|
||||
* ^ ^ ^ ^
|
||||
* A A+B A+BS A+B+C
|
||||
* (EOF)
|
||||
*
|
||||
* D = Valid Data
|
||||
* 0 = Zero Padding
|
||||
*
|
||||
* Note that this defeats the ability to chain the ioends of
|
||||
* appending writes.
|
||||
*/
|
||||
wpc->ioend->io_size += len;
|
||||
if (wpc->ioend->io_offset + wpc->ioend->io_size > end_pos)
|
||||
wpc->ioend->io_size = end_pos - wpc->ioend->io_offset;
|
||||
|
||||
wbc_account_cgroup_owner(wbc, folio, len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iomap_writepage_map_blocks(struct iomap_writepage_ctx *wpc,
|
||||
struct writeback_control *wbc, struct folio *folio,
|
||||
struct inode *inode, u64 pos, u64 end_pos,
|
||||
unsigned dirty_len, unsigned *count)
|
||||
{
|
||||
int error;
|
||||
|
||||
do {
|
||||
unsigned map_len;
|
||||
ssize_t ret;
|
||||
|
||||
error = wpc->ops->map_blocks(wpc, inode, pos, dirty_len);
|
||||
if (error)
|
||||
break;
|
||||
trace_iomap_writepage_map(inode, pos, dirty_len, &wpc->iomap);
|
||||
ret = wpc->ops->writeback_range(wpc, folio, pos, rlen, end_pos);
|
||||
if (WARN_ON_ONCE(ret == 0 || ret > rlen))
|
||||
return -EIO;
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
rlen -= ret;
|
||||
pos += ret;
|
||||
|
||||
map_len = min_t(u64, dirty_len,
|
||||
wpc->iomap.offset + wpc->iomap.length - pos);
|
||||
WARN_ON_ONCE(!folio->private && map_len < dirty_len);
|
||||
/*
|
||||
* Holes are not be written back by ->writeback_range, so track
|
||||
* if we did handle anything that is not a hole here.
|
||||
*/
|
||||
if (wpc->iomap.type != IOMAP_HOLE)
|
||||
*wb_pending = true;
|
||||
} while (rlen);
|
||||
|
||||
switch (wpc->iomap.type) {
|
||||
case IOMAP_INLINE:
|
||||
WARN_ON_ONCE(1);
|
||||
error = -EIO;
|
||||
break;
|
||||
case IOMAP_HOLE:
|
||||
break;
|
||||
default:
|
||||
error = iomap_add_to_ioend(wpc, wbc, folio, inode, pos,
|
||||
end_pos, map_len);
|
||||
if (!error)
|
||||
(*count)++;
|
||||
break;
|
||||
}
|
||||
dirty_len -= map_len;
|
||||
pos += map_len;
|
||||
} while (dirty_len && !error);
|
||||
|
||||
/*
|
||||
* We cannot cancel the ioend directly here on error. We may have
|
||||
* already set other pages under writeback and hence we have to run I/O
|
||||
* completion to mark the error state of the pages under writeback
|
||||
* appropriately.
|
||||
*
|
||||
* Just let the file system know what portion of the folio failed to
|
||||
* map.
|
||||
*/
|
||||
if (error && wpc->ops->discard_folio)
|
||||
wpc->ops->discard_folio(folio, pos);
|
||||
return error;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1819,7 +1607,7 @@ static int iomap_writepage_map_blocks(struct iomap_writepage_ctx *wpc,
|
|||
* If the folio is entirely beyond i_size, return false. If it straddles
|
||||
* i_size, adjust end_pos and zero all data beyond i_size.
|
||||
*/
|
||||
static bool iomap_writepage_handle_eof(struct folio *folio, struct inode *inode,
|
||||
static bool iomap_writeback_handle_eof(struct folio *folio, struct inode *inode,
|
||||
u64 *end_pos)
|
||||
{
|
||||
u64 isize = i_size_read(inode);
|
||||
|
@ -1871,15 +1659,14 @@ static bool iomap_writepage_handle_eof(struct folio *folio, struct inode *inode,
|
|||
return true;
|
||||
}
|
||||
|
||||
static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
|
||||
struct writeback_control *wbc, struct folio *folio)
|
||||
int iomap_writeback_folio(struct iomap_writepage_ctx *wpc, struct folio *folio)
|
||||
{
|
||||
struct iomap_folio_state *ifs = folio->private;
|
||||
struct inode *inode = folio->mapping->host;
|
||||
struct inode *inode = wpc->inode;
|
||||
u64 pos = folio_pos(folio);
|
||||
u64 end_pos = pos + folio_size(folio);
|
||||
u64 end_aligned = 0;
|
||||
unsigned count = 0;
|
||||
bool wb_pending = false;
|
||||
int error = 0;
|
||||
u32 rlen;
|
||||
|
||||
|
@ -1887,12 +1674,10 @@ static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
|
|||
WARN_ON_ONCE(folio_test_dirty(folio));
|
||||
WARN_ON_ONCE(folio_test_writeback(folio));
|
||||
|
||||
trace_iomap_writepage(inode, pos, folio_size(folio));
|
||||
trace_iomap_writeback_folio(inode, pos, folio_size(folio));
|
||||
|
||||
if (!iomap_writepage_handle_eof(folio, inode, &end_pos)) {
|
||||
folio_unlock(folio);
|
||||
if (!iomap_writeback_handle_eof(folio, inode, &end_pos))
|
||||
return 0;
|
||||
}
|
||||
WARN_ON_ONCE(end_pos <= pos);
|
||||
|
||||
if (i_blocks_per_folio(inode, folio) > 1) {
|
||||
|
@ -1908,7 +1693,7 @@ static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
|
|||
* all blocks.
|
||||
*/
|
||||
WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending) != 0);
|
||||
atomic_inc(&ifs->write_bytes_pending);
|
||||
iomap_start_folio_write(inode, folio, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1922,14 +1707,14 @@ static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
|
|||
*/
|
||||
end_aligned = round_up(end_pos, i_blocksize(inode));
|
||||
while ((rlen = iomap_find_dirty_range(folio, &pos, end_aligned))) {
|
||||
error = iomap_writepage_map_blocks(wpc, wbc, folio, inode,
|
||||
pos, end_pos, rlen, &count);
|
||||
error = iomap_writeback_range(wpc, folio, pos, rlen, end_pos,
|
||||
&wb_pending);
|
||||
if (error)
|
||||
break;
|
||||
pos += rlen;
|
||||
}
|
||||
|
||||
if (count)
|
||||
if (wb_pending)
|
||||
wpc->nr_folios++;
|
||||
|
||||
/*
|
||||
|
@ -1946,23 +1731,22 @@ static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
|
|||
* already at this point. In that case we need to clear the writeback
|
||||
* bit ourselves right after unlocking the page.
|
||||
*/
|
||||
folio_unlock(folio);
|
||||
if (ifs) {
|
||||
if (atomic_dec_and_test(&ifs->write_bytes_pending))
|
||||
folio_end_writeback(folio);
|
||||
} else {
|
||||
if (!count)
|
||||
if (!wb_pending)
|
||||
folio_end_writeback(folio);
|
||||
}
|
||||
mapping_set_error(inode->i_mapping, error);
|
||||
return error;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_writeback_folio);
|
||||
|
||||
int
|
||||
iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
|
||||
struct iomap_writepage_ctx *wpc,
|
||||
const struct iomap_writeback_ops *ops)
|
||||
iomap_writepages(struct iomap_writepage_ctx *wpc)
|
||||
{
|
||||
struct address_space *mapping = wpc->inode->i_mapping;
|
||||
struct folio *folio = NULL;
|
||||
int error;
|
||||
|
||||
|
@ -1974,9 +1758,22 @@ iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
|
|||
PF_MEMALLOC))
|
||||
return -EIO;
|
||||
|
||||
wpc->ops = ops;
|
||||
while ((folio = writeback_iter(mapping, wbc, folio, &error)))
|
||||
error = iomap_writepage_map(wpc, wbc, folio);
|
||||
return iomap_submit_ioend(wpc, error);
|
||||
while ((folio = writeback_iter(mapping, wpc->wbc, folio, &error))) {
|
||||
error = iomap_writeback_folio(wpc, folio);
|
||||
folio_unlock(folio);
|
||||
}
|
||||
|
||||
/*
|
||||
* If @error is non-zero, it means that we have a situation where some
|
||||
* part of the submission process has failed after we've marked pages
|
||||
* for writeback.
|
||||
*
|
||||
* We cannot cancel the writeback directly in that case, so always call
|
||||
* ->writeback_submit to run the I/O completion handler to clear the
|
||||
* writeback bit and let the file system proess the errors.
|
||||
*/
|
||||
if (wpc->wb_ctx)
|
||||
return wpc->ops->writeback_submit(wpc, error);
|
||||
return error;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_writepages);
|
||||
|
|
|
@ -3,14 +3,9 @@
|
|||
* Copyright (C) 2010 Red Hat, Inc.
|
||||
* Copyright (c) 2016-2025 Christoph Hellwig.
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/fscrypt.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/iomap.h>
|
||||
#include <linux/backing-dev.h>
|
||||
#include <linux/uio.h>
|
||||
#include <linux/task_io_accounting_ops.h>
|
||||
#include "internal.h"
|
||||
#include "trace.h"
|
||||
|
|
|
@ -2,9 +2,6 @@
|
|||
/*
|
||||
* Copyright (c) 2016-2021 Christoph Hellwig.
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/iomap.h>
|
||||
#include <linux/fiemap.h>
|
||||
#include <linux/pagemap.h>
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
|
||||
#define IOEND_BATCH_SIZE 4096
|
||||
|
||||
u32 iomap_finish_ioend_buffered(struct iomap_ioend *ioend);
|
||||
u32 iomap_finish_ioend_direct(struct iomap_ioend *ioend);
|
||||
|
||||
#endif /* _IOMAP_INTERNAL_H */
|
||||
|
|
220
fs/iomap/ioend.c
220
fs/iomap/ioend.c
|
@ -1,10 +1,13 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2024-2025 Christoph Hellwig.
|
||||
* Copyright (c) 2016-2025 Christoph Hellwig.
|
||||
*/
|
||||
#include <linux/iomap.h>
|
||||
#include <linux/list_sort.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/writeback.h>
|
||||
#include "internal.h"
|
||||
#include "trace.h"
|
||||
|
||||
struct bio_set iomap_ioend_bioset;
|
||||
EXPORT_SYMBOL_GPL(iomap_ioend_bioset);
|
||||
|
@ -28,6 +31,221 @@ struct iomap_ioend *iomap_init_ioend(struct inode *inode,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_init_ioend);
|
||||
|
||||
/*
|
||||
* We're now finished for good with this ioend structure. Update the folio
|
||||
* state, release holds on bios, and finally free up memory. Do not use the
|
||||
* ioend after this.
|
||||
*/
|
||||
static u32 iomap_finish_ioend_buffered(struct iomap_ioend *ioend)
|
||||
{
|
||||
struct inode *inode = ioend->io_inode;
|
||||
struct bio *bio = &ioend->io_bio;
|
||||
struct folio_iter fi;
|
||||
u32 folio_count = 0;
|
||||
|
||||
if (ioend->io_error) {
|
||||
mapping_set_error(inode->i_mapping, ioend->io_error);
|
||||
if (!bio_flagged(bio, BIO_QUIET)) {
|
||||
pr_err_ratelimited(
|
||||
"%s: writeback error on inode %lu, offset %lld, sector %llu",
|
||||
inode->i_sb->s_id, inode->i_ino,
|
||||
ioend->io_offset, ioend->io_sector);
|
||||
}
|
||||
}
|
||||
|
||||
/* walk all folios in bio, ending page IO on them */
|
||||
bio_for_each_folio_all(fi, bio) {
|
||||
iomap_finish_folio_write(inode, fi.folio, fi.length);
|
||||
folio_count++;
|
||||
}
|
||||
|
||||
bio_put(bio); /* frees the ioend */
|
||||
return folio_count;
|
||||
}
|
||||
|
||||
static void ioend_writeback_end_bio(struct bio *bio)
|
||||
{
|
||||
struct iomap_ioend *ioend = iomap_ioend_from_bio(bio);
|
||||
|
||||
ioend->io_error = blk_status_to_errno(bio->bi_status);
|
||||
iomap_finish_ioend_buffered(ioend);
|
||||
}
|
||||
|
||||
/*
|
||||
* We cannot cancel the ioend directly in case of an error, so call the bio end
|
||||
* I/O handler with the error status here to run the normal I/O completion
|
||||
* handler.
|
||||
*/
|
||||
int iomap_ioend_writeback_submit(struct iomap_writepage_ctx *wpc, int error)
|
||||
{
|
||||
struct iomap_ioend *ioend = wpc->wb_ctx;
|
||||
|
||||
if (!ioend->io_bio.bi_end_io)
|
||||
ioend->io_bio.bi_end_io = ioend_writeback_end_bio;
|
||||
|
||||
if (WARN_ON_ONCE(wpc->iomap.flags & IOMAP_F_ANON_WRITE))
|
||||
error = -EIO;
|
||||
|
||||
if (error) {
|
||||
ioend->io_bio.bi_status = errno_to_blk_status(error);
|
||||
bio_endio(&ioend->io_bio);
|
||||
return error;
|
||||
}
|
||||
|
||||
submit_bio(&ioend->io_bio);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_ioend_writeback_submit);
|
||||
|
||||
static struct iomap_ioend *iomap_alloc_ioend(struct iomap_writepage_ctx *wpc,
|
||||
loff_t pos, u16 ioend_flags)
|
||||
{
|
||||
struct bio *bio;
|
||||
|
||||
bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS,
|
||||
REQ_OP_WRITE | wbc_to_write_flags(wpc->wbc),
|
||||
GFP_NOFS, &iomap_ioend_bioset);
|
||||
bio->bi_iter.bi_sector = iomap_sector(&wpc->iomap, pos);
|
||||
bio->bi_write_hint = wpc->inode->i_write_hint;
|
||||
wbc_init_bio(wpc->wbc, bio);
|
||||
wpc->nr_folios = 0;
|
||||
return iomap_init_ioend(wpc->inode, bio, pos, ioend_flags);
|
||||
}
|
||||
|
||||
static bool iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t pos,
|
||||
u16 ioend_flags)
|
||||
{
|
||||
struct iomap_ioend *ioend = wpc->wb_ctx;
|
||||
|
||||
if (ioend_flags & IOMAP_IOEND_BOUNDARY)
|
||||
return false;
|
||||
if ((ioend_flags & IOMAP_IOEND_NOMERGE_FLAGS) !=
|
||||
(ioend->io_flags & IOMAP_IOEND_NOMERGE_FLAGS))
|
||||
return false;
|
||||
if (pos != ioend->io_offset + ioend->io_size)
|
||||
return false;
|
||||
if (!(wpc->iomap.flags & IOMAP_F_ANON_WRITE) &&
|
||||
iomap_sector(&wpc->iomap, pos) != bio_end_sector(&ioend->io_bio))
|
||||
return false;
|
||||
/*
|
||||
* Limit ioend bio chain lengths to minimise IO completion latency. This
|
||||
* also prevents long tight loops ending page writeback on all the
|
||||
* folios in the ioend.
|
||||
*/
|
||||
if (wpc->nr_folios >= IOEND_BATCH_SIZE)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Test to see if we have an existing ioend structure that we could append to
|
||||
* first; otherwise finish off the current ioend and start another.
|
||||
*
|
||||
* If a new ioend is created and cached, the old ioend is submitted to the block
|
||||
* layer instantly. Batching optimisations are provided by higher level block
|
||||
* plugging.
|
||||
*
|
||||
* At the end of a writeback pass, there will be a cached ioend remaining on the
|
||||
* writepage context that the caller will need to submit.
|
||||
*/
|
||||
ssize_t iomap_add_to_ioend(struct iomap_writepage_ctx *wpc, struct folio *folio,
|
||||
loff_t pos, loff_t end_pos, unsigned int dirty_len)
|
||||
{
|
||||
struct iomap_ioend *ioend = wpc->wb_ctx;
|
||||
size_t poff = offset_in_folio(folio, pos);
|
||||
unsigned int ioend_flags = 0;
|
||||
unsigned int map_len = min_t(u64, dirty_len,
|
||||
wpc->iomap.offset + wpc->iomap.length - pos);
|
||||
int error;
|
||||
|
||||
trace_iomap_add_to_ioend(wpc->inode, pos, dirty_len, &wpc->iomap);
|
||||
|
||||
WARN_ON_ONCE(!folio->private && map_len < dirty_len);
|
||||
|
||||
switch (wpc->iomap.type) {
|
||||
case IOMAP_INLINE:
|
||||
WARN_ON_ONCE(1);
|
||||
return -EIO;
|
||||
case IOMAP_HOLE:
|
||||
return map_len;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (wpc->iomap.type == IOMAP_UNWRITTEN)
|
||||
ioend_flags |= IOMAP_IOEND_UNWRITTEN;
|
||||
if (wpc->iomap.flags & IOMAP_F_SHARED)
|
||||
ioend_flags |= IOMAP_IOEND_SHARED;
|
||||
if (folio_test_dropbehind(folio))
|
||||
ioend_flags |= IOMAP_IOEND_DONTCACHE;
|
||||
if (pos == wpc->iomap.offset && (wpc->iomap.flags & IOMAP_F_BOUNDARY))
|
||||
ioend_flags |= IOMAP_IOEND_BOUNDARY;
|
||||
|
||||
if (!ioend || !iomap_can_add_to_ioend(wpc, pos, ioend_flags)) {
|
||||
new_ioend:
|
||||
if (ioend) {
|
||||
error = wpc->ops->writeback_submit(wpc, 0);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
wpc->wb_ctx = ioend = iomap_alloc_ioend(wpc, pos, ioend_flags);
|
||||
}
|
||||
|
||||
if (!bio_add_folio(&ioend->io_bio, folio, map_len, poff))
|
||||
goto new_ioend;
|
||||
|
||||
iomap_start_folio_write(wpc->inode, folio, map_len);
|
||||
|
||||
/*
|
||||
* Clamp io_offset and io_size to the incore EOF so that ondisk
|
||||
* file size updates in the ioend completion are byte-accurate.
|
||||
* This avoids recovering files with zeroed tail regions when
|
||||
* writeback races with appending writes:
|
||||
*
|
||||
* Thread 1: Thread 2:
|
||||
* ------------ -----------
|
||||
* write [A, A+B]
|
||||
* update inode size to A+B
|
||||
* submit I/O [A, A+BS]
|
||||
* write [A+B, A+B+C]
|
||||
* update inode size to A+B+C
|
||||
* <I/O completes, updates disk size to min(A+B+C, A+BS)>
|
||||
* <power failure>
|
||||
*
|
||||
* After reboot:
|
||||
* 1) with A+B+C < A+BS, the file has zero padding in range
|
||||
* [A+B, A+B+C]
|
||||
*
|
||||
* |< Block Size (BS) >|
|
||||
* |DDDDDDDDDDDD0000000000000|
|
||||
* ^ ^ ^
|
||||
* A A+B A+B+C
|
||||
* (EOF)
|
||||
*
|
||||
* 2) with A+B+C > A+BS, the file has zero padding in range
|
||||
* [A+B, A+BS]
|
||||
*
|
||||
* |< Block Size (BS) >|< Block Size (BS) >|
|
||||
* |DDDDDDDDDDDD0000000000000|00000000000000000000000000|
|
||||
* ^ ^ ^ ^
|
||||
* A A+B A+BS A+B+C
|
||||
* (EOF)
|
||||
*
|
||||
* D = Valid Data
|
||||
* 0 = Zero Padding
|
||||
*
|
||||
* Note that this defeats the ability to chain the ioends of
|
||||
* appending writes.
|
||||
*/
|
||||
ioend->io_size += map_len;
|
||||
if (ioend->io_offset + ioend->io_size > end_pos)
|
||||
ioend->io_size = end_pos - ioend->io_offset;
|
||||
|
||||
wbc_account_cgroup_owner(wpc->wbc, folio, map_len);
|
||||
return map_len;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_add_to_ioend);
|
||||
|
||||
static u32 iomap_finish_ioend(struct iomap_ioend *ioend, int error)
|
||||
{
|
||||
if (ioend->io_parent) {
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
* Copyright (C) 2010 Red Hat, Inc.
|
||||
* Copyright (c) 2016-2021 Christoph Hellwig.
|
||||
*/
|
||||
#include <linux/fs.h>
|
||||
#include <linux/iomap.h>
|
||||
#include "trace.h"
|
||||
|
||||
|
|
|
@ -3,12 +3,8 @@
|
|||
* Copyright (C) 2017 Red Hat, Inc.
|
||||
* Copyright (c) 2018-2021 Christoph Hellwig.
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/iomap.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/pagevec.h>
|
||||
|
||||
static int iomap_seek_hole_iter(struct iomap_iter *iter,
|
||||
loff_t *hole_pos)
|
||||
|
|
|
@ -3,9 +3,6 @@
|
|||
* Copyright (C) 2018 Oracle. All Rights Reserved.
|
||||
* Author: Darrick J. Wong <darrick.wong@oracle.com>
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/iomap.h>
|
||||
#include <linux/swap.h>
|
||||
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
* Copyright (c) 2019 Christoph Hellwig
|
||||
*/
|
||||
#include <linux/iomap.h>
|
||||
#include <linux/uio.h>
|
||||
|
||||
/*
|
||||
* We include this last to have the helpers above available for the trace
|
||||
|
|
|
@ -79,7 +79,7 @@ DECLARE_EVENT_CLASS(iomap_range_class,
|
|||
DEFINE_EVENT(iomap_range_class, name, \
|
||||
TP_PROTO(struct inode *inode, loff_t off, u64 len),\
|
||||
TP_ARGS(inode, off, len))
|
||||
DEFINE_RANGE_EVENT(iomap_writepage);
|
||||
DEFINE_RANGE_EVENT(iomap_writeback_folio);
|
||||
DEFINE_RANGE_EVENT(iomap_release_folio);
|
||||
DEFINE_RANGE_EVENT(iomap_invalidate_folio);
|
||||
DEFINE_RANGE_EVENT(iomap_dio_invalidate_fail);
|
||||
|
@ -169,7 +169,7 @@ DEFINE_EVENT(iomap_class, name, \
|
|||
DEFINE_IOMAP_EVENT(iomap_iter_dstmap);
|
||||
DEFINE_IOMAP_EVENT(iomap_iter_srcmap);
|
||||
|
||||
TRACE_EVENT(iomap_writepage_map,
|
||||
TRACE_EVENT(iomap_add_to_ioend,
|
||||
TP_PROTO(struct inode *inode, u64 pos, unsigned int dirty_len,
|
||||
struct iomap *iomap),
|
||||
TP_ARGS(inode, pos, dirty_len, iomap),
|
||||
|
|
|
@ -233,6 +233,47 @@ xfs_end_bio(
|
|||
spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* We cannot cancel the ioend directly on error. We may have already set other
|
||||
* pages under writeback and hence we have to run I/O completion to mark the
|
||||
* error state of the pages under writeback appropriately.
|
||||
*
|
||||
* If the folio has delalloc blocks on it, the caller is asking us to punch them
|
||||
* out. If we don't, we can leave a stale delalloc mapping covered by a clean
|
||||
* page that needs to be dirtied again before the delalloc mapping can be
|
||||
* converted. This stale delalloc mapping can trip up a later direct I/O read
|
||||
* operation on the same region.
|
||||
*
|
||||
* We prevent this by truncating away the delalloc regions on the folio. Because
|
||||
* they are delalloc, we can do this without needing a transaction. Indeed - if
|
||||
* we get ENOSPC errors, we have to be able to do this truncation without a
|
||||
* transaction as there is no space left for block reservation (typically why
|
||||
* we see a ENOSPC in writeback).
|
||||
*/
|
||||
static void
|
||||
xfs_discard_folio(
|
||||
struct folio *folio,
|
||||
loff_t pos)
|
||||
{
|
||||
struct xfs_inode *ip = XFS_I(folio->mapping->host);
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
|
||||
if (xfs_is_shutdown(mp))
|
||||
return;
|
||||
|
||||
xfs_alert_ratelimited(mp,
|
||||
"page discard on page "PTR_FMT", inode 0x%llx, pos %llu.",
|
||||
folio, ip->i_ino, pos);
|
||||
|
||||
/*
|
||||
* The end of the punch range is always the offset of the first
|
||||
* byte of the next folio. Hence the end offset is only dependent on the
|
||||
* folio itself and not the start offset that is passed in.
|
||||
*/
|
||||
xfs_bmap_punch_delalloc_range(ip, XFS_DATA_FORK, pos,
|
||||
folio_pos(folio) + folio_size(folio), NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Fast revalidation of the cached writeback mapping. Return true if the current
|
||||
* mapping is valid, false otherwise.
|
||||
|
@ -278,13 +319,12 @@ xfs_imap_valid(
|
|||
static int
|
||||
xfs_map_blocks(
|
||||
struct iomap_writepage_ctx *wpc,
|
||||
struct inode *inode,
|
||||
loff_t offset,
|
||||
unsigned int len)
|
||||
{
|
||||
struct xfs_inode *ip = XFS_I(inode);
|
||||
struct xfs_inode *ip = XFS_I(wpc->inode);
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
ssize_t count = i_blocksize(inode);
|
||||
ssize_t count = i_blocksize(wpc->inode);
|
||||
xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
|
||||
xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count);
|
||||
xfs_fileoff_t cow_fsb;
|
||||
|
@ -436,6 +476,24 @@ allocate_blocks:
|
|||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
xfs_writeback_range(
|
||||
struct iomap_writepage_ctx *wpc,
|
||||
struct folio *folio,
|
||||
u64 offset,
|
||||
unsigned int len,
|
||||
u64 end_pos)
|
||||
{
|
||||
ssize_t ret;
|
||||
|
||||
ret = xfs_map_blocks(wpc, offset, len);
|
||||
if (!ret)
|
||||
ret = iomap_add_to_ioend(wpc, folio, offset, end_pos, len);
|
||||
if (ret < 0)
|
||||
xfs_discard_folio(folio, offset);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool
|
||||
xfs_ioend_needs_wq_completion(
|
||||
struct iomap_ioend *ioend)
|
||||
|
@ -456,79 +514,40 @@ xfs_ioend_needs_wq_completion(
|
|||
}
|
||||
|
||||
static int
|
||||
xfs_submit_ioend(
|
||||
struct iomap_writepage_ctx *wpc,
|
||||
int status)
|
||||
xfs_writeback_submit(
|
||||
struct iomap_writepage_ctx *wpc,
|
||||
int error)
|
||||
{
|
||||
struct iomap_ioend *ioend = wpc->ioend;
|
||||
unsigned int nofs_flag;
|
||||
struct iomap_ioend *ioend = wpc->wb_ctx;
|
||||
|
||||
/*
|
||||
* We can allocate memory here while doing writeback on behalf of
|
||||
* memory reclaim. To avoid memory allocation deadlocks set the
|
||||
* task-wide nofs context for the following operations.
|
||||
* Convert CoW extents to regular.
|
||||
*
|
||||
* We can allocate memory here while doing writeback on behalf of memory
|
||||
* reclaim. To avoid memory allocation deadlocks, set the task-wide
|
||||
* nofs context.
|
||||
*/
|
||||
nofs_flag = memalloc_nofs_save();
|
||||
if (!error && (ioend->io_flags & IOMAP_IOEND_SHARED)) {
|
||||
unsigned int nofs_flag;
|
||||
|
||||
/* Convert CoW extents to regular */
|
||||
if (!status && (ioend->io_flags & IOMAP_IOEND_SHARED)) {
|
||||
status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode),
|
||||
nofs_flag = memalloc_nofs_save();
|
||||
error = xfs_reflink_convert_cow(XFS_I(ioend->io_inode),
|
||||
ioend->io_offset, ioend->io_size);
|
||||
memalloc_nofs_restore(nofs_flag);
|
||||
}
|
||||
|
||||
memalloc_nofs_restore(nofs_flag);
|
||||
|
||||
/* send ioends that might require a transaction to the completion wq */
|
||||
/*
|
||||
* Send ioends that might require a transaction to the completion wq.
|
||||
*/
|
||||
if (xfs_ioend_needs_wq_completion(ioend))
|
||||
ioend->io_bio.bi_end_io = xfs_end_bio;
|
||||
|
||||
if (status)
|
||||
return status;
|
||||
submit_bio(&ioend->io_bio);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the folio has delalloc blocks on it, the caller is asking us to punch them
|
||||
* out. If we don't, we can leave a stale delalloc mapping covered by a clean
|
||||
* page that needs to be dirtied again before the delalloc mapping can be
|
||||
* converted. This stale delalloc mapping can trip up a later direct I/O read
|
||||
* operation on the same region.
|
||||
*
|
||||
* We prevent this by truncating away the delalloc regions on the folio. Because
|
||||
* they are delalloc, we can do this without needing a transaction. Indeed - if
|
||||
* we get ENOSPC errors, we have to be able to do this truncation without a
|
||||
* transaction as there is no space left for block reservation (typically why
|
||||
* we see a ENOSPC in writeback).
|
||||
*/
|
||||
static void
|
||||
xfs_discard_folio(
|
||||
struct folio *folio,
|
||||
loff_t pos)
|
||||
{
|
||||
struct xfs_inode *ip = XFS_I(folio->mapping->host);
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
|
||||
if (xfs_is_shutdown(mp))
|
||||
return;
|
||||
|
||||
xfs_alert_ratelimited(mp,
|
||||
"page discard on page "PTR_FMT", inode 0x%llx, pos %llu.",
|
||||
folio, ip->i_ino, pos);
|
||||
|
||||
/*
|
||||
* The end of the punch range is always the offset of the first
|
||||
* byte of the next folio. Hence the end offset is only dependent on the
|
||||
* folio itself and not the start offset that is passed in.
|
||||
*/
|
||||
xfs_bmap_punch_delalloc_range(ip, XFS_DATA_FORK, pos,
|
||||
folio_pos(folio) + folio_size(folio), NULL);
|
||||
return iomap_ioend_writeback_submit(wpc, error);
|
||||
}
|
||||
|
||||
static const struct iomap_writeback_ops xfs_writeback_ops = {
|
||||
.map_blocks = xfs_map_blocks,
|
||||
.submit_ioend = xfs_submit_ioend,
|
||||
.discard_folio = xfs_discard_folio,
|
||||
.writeback_range = xfs_writeback_range,
|
||||
.writeback_submit = xfs_writeback_submit,
|
||||
};
|
||||
|
||||
struct xfs_zoned_writepage_ctx {
|
||||
|
@ -545,11 +564,10 @@ XFS_ZWPC(struct iomap_writepage_ctx *ctx)
|
|||
static int
|
||||
xfs_zoned_map_blocks(
|
||||
struct iomap_writepage_ctx *wpc,
|
||||
struct inode *inode,
|
||||
loff_t offset,
|
||||
unsigned int len)
|
||||
{
|
||||
struct xfs_inode *ip = XFS_I(inode);
|
||||
struct xfs_inode *ip = XFS_I(wpc->inode);
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
|
||||
xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + len);
|
||||
|
@ -608,22 +626,44 @@ xfs_zoned_map_blocks(
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
xfs_zoned_submit_ioend(
|
||||
static ssize_t
|
||||
xfs_zoned_writeback_range(
|
||||
struct iomap_writepage_ctx *wpc,
|
||||
int status)
|
||||
struct folio *folio,
|
||||
u64 offset,
|
||||
unsigned int len,
|
||||
u64 end_pos)
|
||||
{
|
||||
wpc->ioend->io_bio.bi_end_io = xfs_end_bio;
|
||||
if (status)
|
||||
return status;
|
||||
xfs_zone_alloc_and_submit(wpc->ioend, &XFS_ZWPC(wpc)->open_zone);
|
||||
ssize_t ret;
|
||||
|
||||
ret = xfs_zoned_map_blocks(wpc, offset, len);
|
||||
if (!ret)
|
||||
ret = iomap_add_to_ioend(wpc, folio, offset, end_pos, len);
|
||||
if (ret < 0)
|
||||
xfs_discard_folio(folio, offset);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
xfs_zoned_writeback_submit(
|
||||
struct iomap_writepage_ctx *wpc,
|
||||
int error)
|
||||
{
|
||||
struct iomap_ioend *ioend = wpc->wb_ctx;
|
||||
|
||||
ioend->io_bio.bi_end_io = xfs_end_bio;
|
||||
if (error) {
|
||||
ioend->io_bio.bi_status = errno_to_blk_status(error);
|
||||
bio_endio(&ioend->io_bio);
|
||||
return error;
|
||||
}
|
||||
xfs_zone_alloc_and_submit(ioend, &XFS_ZWPC(wpc)->open_zone);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct iomap_writeback_ops xfs_zoned_writeback_ops = {
|
||||
.map_blocks = xfs_zoned_map_blocks,
|
||||
.submit_ioend = xfs_zoned_submit_ioend,
|
||||
.discard_folio = xfs_discard_folio,
|
||||
.writeback_range = xfs_zoned_writeback_range,
|
||||
.writeback_submit = xfs_zoned_writeback_submit,
|
||||
};
|
||||
|
||||
STATIC int
|
||||
|
@ -636,19 +676,29 @@ xfs_vm_writepages(
|
|||
xfs_iflags_clear(ip, XFS_ITRUNCATED);
|
||||
|
||||
if (xfs_is_zoned_inode(ip)) {
|
||||
struct xfs_zoned_writepage_ctx xc = { };
|
||||
struct xfs_zoned_writepage_ctx xc = {
|
||||
.ctx = {
|
||||
.inode = mapping->host,
|
||||
.wbc = wbc,
|
||||
.ops = &xfs_zoned_writeback_ops
|
||||
},
|
||||
};
|
||||
int error;
|
||||
|
||||
error = iomap_writepages(mapping, wbc, &xc.ctx,
|
||||
&xfs_zoned_writeback_ops);
|
||||
error = iomap_writepages(&xc.ctx);
|
||||
if (xc.open_zone)
|
||||
xfs_open_zone_put(xc.open_zone);
|
||||
return error;
|
||||
} else {
|
||||
struct xfs_writepage_ctx wpc = { };
|
||||
struct xfs_writepage_ctx wpc = {
|
||||
.ctx = {
|
||||
.inode = mapping->host,
|
||||
.wbc = wbc,
|
||||
.ops = &xfs_writeback_ops
|
||||
},
|
||||
};
|
||||
|
||||
return iomap_writepages(mapping, wbc, &wpc.ctx,
|
||||
&xfs_writeback_ops);
|
||||
return iomap_writepages(&wpc.ctx);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -979,7 +979,8 @@ write_retry:
|
|||
|
||||
trace_xfs_file_buffered_write(iocb, from);
|
||||
ret = iomap_file_buffered_write(iocb, from,
|
||||
&xfs_buffered_write_iomap_ops, NULL);
|
||||
&xfs_buffered_write_iomap_ops, &xfs_iomap_write_ops,
|
||||
NULL);
|
||||
|
||||
/*
|
||||
* If we hit a space limit, try to free up some lingering preallocated
|
||||
|
@ -1059,7 +1060,8 @@ xfs_file_buffered_write_zoned(
|
|||
retry:
|
||||
trace_xfs_file_buffered_write(iocb, from);
|
||||
ret = iomap_file_buffered_write(iocb, from,
|
||||
&xfs_buffered_write_iomap_ops, &ac);
|
||||
&xfs_buffered_write_iomap_ops, &xfs_iomap_write_ops,
|
||||
&ac);
|
||||
if (ret == -ENOSPC && !cleared_space) {
|
||||
/*
|
||||
* Kick off writeback to convert delalloc space and release the
|
||||
|
|
|
@ -79,6 +79,9 @@ xfs_iomap_valid(
|
|||
{
|
||||
struct xfs_inode *ip = XFS_I(inode);
|
||||
|
||||
if (iomap->type == IOMAP_HOLE)
|
||||
return true;
|
||||
|
||||
if (iomap->validity_cookie !=
|
||||
xfs_iomap_inode_sequence(ip, iomap->flags)) {
|
||||
trace_xfs_iomap_invalid(ip, iomap);
|
||||
|
@ -89,7 +92,7 @@ xfs_iomap_valid(
|
|||
return true;
|
||||
}
|
||||
|
||||
static const struct iomap_folio_ops xfs_iomap_folio_ops = {
|
||||
const struct iomap_write_ops xfs_iomap_write_ops = {
|
||||
.iomap_valid = xfs_iomap_valid,
|
||||
};
|
||||
|
||||
|
@ -151,7 +154,6 @@ xfs_bmbt_to_iomap(
|
|||
iomap->flags |= IOMAP_F_DIRTY;
|
||||
|
||||
iomap->validity_cookie = sequence_cookie;
|
||||
iomap->folio_ops = &xfs_iomap_folio_ops;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2198,7 +2200,8 @@ xfs_zero_range(
|
|||
return dax_zero_range(inode, pos, len, did_zero,
|
||||
&xfs_dax_write_iomap_ops);
|
||||
return iomap_zero_range(inode, pos, len, did_zero,
|
||||
&xfs_buffered_write_iomap_ops, ac);
|
||||
&xfs_buffered_write_iomap_ops, &xfs_iomap_write_ops,
|
||||
ac);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -2214,5 +2217,6 @@ xfs_truncate_page(
|
|||
return dax_truncate_page(inode, pos, did_zero,
|
||||
&xfs_dax_write_iomap_ops);
|
||||
return iomap_truncate_page(inode, pos, did_zero,
|
||||
&xfs_buffered_write_iomap_ops, ac);
|
||||
&xfs_buffered_write_iomap_ops, &xfs_iomap_write_ops,
|
||||
ac);
|
||||
}
|
||||
|
|
|
@ -57,5 +57,6 @@ extern const struct iomap_ops xfs_seek_iomap_ops;
|
|||
extern const struct iomap_ops xfs_xattr_iomap_ops;
|
||||
extern const struct iomap_ops xfs_dax_write_iomap_ops;
|
||||
extern const struct iomap_ops xfs_atomic_write_cow_iomap_ops;
|
||||
extern const struct iomap_write_ops xfs_iomap_write_ops;
|
||||
|
||||
#endif /* __XFS_IOMAP_H__*/
|
||||
|
|
|
@ -1881,7 +1881,8 @@ xfs_reflink_unshare(
|
|||
&xfs_dax_write_iomap_ops);
|
||||
else
|
||||
error = iomap_file_unshare(inode, offset, len,
|
||||
&xfs_buffered_write_iomap_ops);
|
||||
&xfs_buffered_write_iomap_ops,
|
||||
&xfs_iomap_write_ops);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -124,37 +124,46 @@ static void zonefs_readahead(struct readahead_control *rac)
|
|||
* Map blocks for page writeback. This is used only on conventional zone files,
|
||||
* which implies that the page range can only be within the fixed inode size.
|
||||
*/
|
||||
static int zonefs_write_map_blocks(struct iomap_writepage_ctx *wpc,
|
||||
struct inode *inode, loff_t offset,
|
||||
unsigned int len)
|
||||
static ssize_t zonefs_writeback_range(struct iomap_writepage_ctx *wpc,
|
||||
struct folio *folio, u64 offset, unsigned len, u64 end_pos)
|
||||
{
|
||||
struct zonefs_zone *z = zonefs_inode_zone(inode);
|
||||
struct zonefs_zone *z = zonefs_inode_zone(wpc->inode);
|
||||
|
||||
if (WARN_ON_ONCE(zonefs_zone_is_seq(z)))
|
||||
return -EIO;
|
||||
if (WARN_ON_ONCE(offset >= i_size_read(inode)))
|
||||
if (WARN_ON_ONCE(offset >= i_size_read(wpc->inode)))
|
||||
return -EIO;
|
||||
|
||||
/* If the mapping is already OK, nothing needs to be done */
|
||||
if (offset >= wpc->iomap.offset &&
|
||||
offset < wpc->iomap.offset + wpc->iomap.length)
|
||||
return 0;
|
||||
if (offset < wpc->iomap.offset ||
|
||||
offset >= wpc->iomap.offset + wpc->iomap.length) {
|
||||
int error;
|
||||
|
||||
return zonefs_write_iomap_begin(inode, offset,
|
||||
z->z_capacity - offset,
|
||||
IOMAP_WRITE, &wpc->iomap, NULL);
|
||||
error = zonefs_write_iomap_begin(wpc->inode, offset,
|
||||
z->z_capacity - offset, IOMAP_WRITE,
|
||||
&wpc->iomap, NULL);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
return iomap_add_to_ioend(wpc, folio, offset, end_pos, len);
|
||||
}
|
||||
|
||||
static const struct iomap_writeback_ops zonefs_writeback_ops = {
|
||||
.map_blocks = zonefs_write_map_blocks,
|
||||
.writeback_range = zonefs_writeback_range,
|
||||
.writeback_submit = iomap_ioend_writeback_submit,
|
||||
};
|
||||
|
||||
static int zonefs_writepages(struct address_space *mapping,
|
||||
struct writeback_control *wbc)
|
||||
{
|
||||
struct iomap_writepage_ctx wpc = { };
|
||||
struct iomap_writepage_ctx wpc = {
|
||||
.inode = mapping->host,
|
||||
.wbc = wbc,
|
||||
.ops = &zonefs_writeback_ops,
|
||||
};
|
||||
|
||||
return iomap_writepages(mapping, wbc, &wpc, &zonefs_writeback_ops);
|
||||
return iomap_writepages(&wpc);
|
||||
}
|
||||
|
||||
static int zonefs_swap_activate(struct swap_info_struct *sis,
|
||||
|
@ -565,7 +574,8 @@ static ssize_t zonefs_file_buffered_write(struct kiocb *iocb,
|
|||
if (ret <= 0)
|
||||
goto inode_unlock;
|
||||
|
||||
ret = iomap_file_buffered_write(iocb, from, &zonefs_write_iomap_ops, NULL);
|
||||
ret = iomap_file_buffered_write(iocb, from, &zonefs_write_iomap_ops,
|
||||
NULL, NULL);
|
||||
if (ret == -EIO)
|
||||
zonefs_io_error(inode, true);
|
||||
|
||||
|
|
|
@ -101,8 +101,6 @@ struct vm_fault;
|
|||
*/
|
||||
#define IOMAP_NULL_ADDR -1ULL /* addr is not valid */
|
||||
|
||||
struct iomap_folio_ops;
|
||||
|
||||
struct iomap {
|
||||
u64 addr; /* disk offset of mapping, bytes */
|
||||
loff_t offset; /* file offset of mapping, bytes */
|
||||
|
@ -113,7 +111,6 @@ struct iomap {
|
|||
struct dax_device *dax_dev; /* dax_dev for dax operations */
|
||||
void *inline_data;
|
||||
void *private; /* filesystem private */
|
||||
const struct iomap_folio_ops *folio_ops;
|
||||
u64 validity_cookie; /* used with .iomap_valid() */
|
||||
};
|
||||
|
||||
|
@ -143,16 +140,11 @@ static inline bool iomap_inline_data_valid(const struct iomap *iomap)
|
|||
}
|
||||
|
||||
/*
|
||||
* When a filesystem sets folio_ops in an iomap mapping it returns, get_folio
|
||||
* and put_folio will be called for each folio written to. This only applies
|
||||
* to buffered writes as unbuffered writes will not typically have folios
|
||||
* associated with them.
|
||||
*
|
||||
* When get_folio succeeds, put_folio will always be called to do any
|
||||
* cleanup work necessary. put_folio is responsible for unlocking and putting
|
||||
* @folio.
|
||||
*/
|
||||
struct iomap_folio_ops {
|
||||
struct iomap_write_ops {
|
||||
struct folio *(*get_folio)(struct iomap_iter *iter, loff_t pos,
|
||||
unsigned len);
|
||||
void (*put_folio)(struct inode *inode, loff_t pos, unsigned copied,
|
||||
|
@ -174,6 +166,16 @@ struct iomap_folio_ops {
|
|||
* locked by the iomap code.
|
||||
*/
|
||||
bool (*iomap_valid)(struct inode *inode, const struct iomap *iomap);
|
||||
|
||||
/*
|
||||
* Optional if the filesystem wishes to provide a custom handler for
|
||||
* reading in the contents of a folio, otherwise iomap will default to
|
||||
* submitting a bio read request.
|
||||
*
|
||||
* The read must be done synchronously.
|
||||
*/
|
||||
int (*read_folio_range)(const struct iomap_iter *iter,
|
||||
struct folio *folio, loff_t pos, size_t len);
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -335,7 +337,8 @@ static inline bool iomap_want_unshare_iter(const struct iomap_iter *iter)
|
|||
}
|
||||
|
||||
ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
|
||||
const struct iomap_ops *ops, void *private);
|
||||
const struct iomap_ops *ops,
|
||||
const struct iomap_write_ops *write_ops, void *private);
|
||||
int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops);
|
||||
void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops);
|
||||
bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count);
|
||||
|
@ -344,11 +347,14 @@ bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags);
|
|||
void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len);
|
||||
bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio);
|
||||
int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
|
||||
const struct iomap_ops *ops);
|
||||
const struct iomap_ops *ops,
|
||||
const struct iomap_write_ops *write_ops);
|
||||
int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
|
||||
bool *did_zero, const struct iomap_ops *ops, void *private);
|
||||
bool *did_zero, const struct iomap_ops *ops,
|
||||
const struct iomap_write_ops *write_ops, void *private);
|
||||
int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
|
||||
const struct iomap_ops *ops, void *private);
|
||||
const struct iomap_ops *ops,
|
||||
const struct iomap_write_ops *write_ops, void *private);
|
||||
vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops,
|
||||
void *private);
|
||||
typedef void (*iomap_punch_t)(struct inode *inode, loff_t offset, loff_t length,
|
||||
|
@ -391,8 +397,7 @@ sector_t iomap_bmap(struct address_space *mapping, sector_t bno,
|
|||
/*
|
||||
* Structure for writeback I/O completions.
|
||||
*
|
||||
* File systems implementing ->submit_ioend (for buffered I/O) or ->submit_io
|
||||
* for direct I/O) can split a bio generated by iomap. In that case the parent
|
||||
* File systems can split a bio generated by iomap. In that case the parent
|
||||
* ioend it was split from is recorded in ioend->io_parent.
|
||||
*/
|
||||
struct iomap_ioend {
|
||||
|
@ -416,41 +421,38 @@ static inline struct iomap_ioend *iomap_ioend_from_bio(struct bio *bio)
|
|||
|
||||
struct iomap_writeback_ops {
|
||||
/*
|
||||
* Required, maps the blocks so that writeback can be performed on
|
||||
* the range starting at offset.
|
||||
* Performs writeback on the passed in range
|
||||
*
|
||||
* Can return arbitrarily large regions, but we need to call into it at
|
||||
* Can map arbitrarily large regions, but we need to call into it at
|
||||
* least once per folio to allow the file systems to synchronize with
|
||||
* the write path that could be invalidating mappings.
|
||||
*
|
||||
* An existing mapping from a previous call to this method can be reused
|
||||
* by the file system if it is still valid.
|
||||
*/
|
||||
int (*map_blocks)(struct iomap_writepage_ctx *wpc, struct inode *inode,
|
||||
loff_t offset, unsigned len);
|
||||
|
||||
/*
|
||||
* Optional, allows the file systems to hook into bio submission,
|
||||
* including overriding the bi_end_io handler.
|
||||
*
|
||||
* Returns 0 if the bio was successfully submitted, or a negative
|
||||
* error code if status was non-zero or another error happened and
|
||||
* the bio could not be submitted.
|
||||
* Returns the number of bytes processed or a negative errno.
|
||||
*/
|
||||
int (*submit_ioend)(struct iomap_writepage_ctx *wpc, int status);
|
||||
ssize_t (*writeback_range)(struct iomap_writepage_ctx *wpc,
|
||||
struct folio *folio, u64 pos, unsigned int len,
|
||||
u64 end_pos);
|
||||
|
||||
/*
|
||||
* Optional, allows the file system to discard state on a page where
|
||||
* we failed to submit any I/O.
|
||||
* Submit a writeback context previously build up by ->writeback_range.
|
||||
*
|
||||
* Returns 0 if the context was successfully submitted, or a negative
|
||||
* error code if not. If @error is non-zero a failure occurred, and
|
||||
* the writeback context should be completed with an error.
|
||||
*/
|
||||
void (*discard_folio)(struct folio *folio, loff_t pos);
|
||||
int (*writeback_submit)(struct iomap_writepage_ctx *wpc, int error);
|
||||
};
|
||||
|
||||
struct iomap_writepage_ctx {
|
||||
struct iomap iomap;
|
||||
struct iomap_ioend *ioend;
|
||||
struct inode *inode;
|
||||
struct writeback_control *wbc;
|
||||
const struct iomap_writeback_ops *ops;
|
||||
u32 nr_folios; /* folios added to the ioend */
|
||||
void *wb_ctx; /* pending writeback context */
|
||||
};
|
||||
|
||||
struct iomap_ioend *iomap_init_ioend(struct inode *inode, struct bio *bio,
|
||||
|
@ -461,9 +463,17 @@ void iomap_finish_ioends(struct iomap_ioend *ioend, int error);
|
|||
void iomap_ioend_try_merge(struct iomap_ioend *ioend,
|
||||
struct list_head *more_ioends);
|
||||
void iomap_sort_ioends(struct list_head *ioend_list);
|
||||
int iomap_writepages(struct address_space *mapping,
|
||||
struct writeback_control *wbc, struct iomap_writepage_ctx *wpc,
|
||||
const struct iomap_writeback_ops *ops);
|
||||
ssize_t iomap_add_to_ioend(struct iomap_writepage_ctx *wpc, struct folio *folio,
|
||||
loff_t pos, loff_t end_pos, unsigned int dirty_len);
|
||||
int iomap_ioend_writeback_submit(struct iomap_writepage_ctx *wpc, int error);
|
||||
|
||||
void iomap_start_folio_write(struct inode *inode, struct folio *folio,
|
||||
size_t len);
|
||||
void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
|
||||
size_t len);
|
||||
|
||||
int iomap_writeback_folio(struct iomap_writepage_ctx *wpc, struct folio *folio);
|
||||
int iomap_writepages(struct iomap_writepage_ctx *wpc);
|
||||
|
||||
/*
|
||||
* Flags for direct I/O ->end_io:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue