mirror of
https://github.com/torvalds/linux.git
synced 2025-08-15 14:11:42 +02:00
vfs-6.17-rc1.mmap_prepare
-----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQRAhzRXHqcMeLMyaSiRxhvAZXjcogUCaINCgQAKCRCRxhvAZXjc os+nAP9LFHUwWO6EBzHJJGEVjJvvzsbzqeYrRFamYiMc5ulPJwD+KW4RIgJa/MWO pcYE40CacaekD8rFWwYUyszpgmv6ewc= =wCwp -----END PGP SIGNATURE----- Merge tag 'vfs-6.17-rc1.mmap_prepare' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs Pull mmap_prepare updates from Christian Brauner: "Last cycle we introduce f_op->mmap_prepare() inc84bf6dd2b
("mm: introduce new .mmap_prepare() file callback"). This is preferred to the existing f_op->mmap() hook as it does require a VMA to be established yet, thus allowing the mmap logic to invoke this hook far, far earlier, prior to inserting a VMA into the virtual address space, or performing any other heavy handed operations. This allows for much simpler unwinding on error, and for there to be a single attempt at merging a VMA rather than having to possibly reattempt a merge based on potentially altered VMA state. Far more importantly, it prevents inappropriate manipulation of incompletely initialised VMA state, which is something that has been the cause of bugs and complexity in the past. The intent is to gradually deprecate f_op->mmap, and in that vein this series coverts the majority of file systems to using f_op->mmap_prepare. Prerequisite steps are taken - firstly ensuring all checks for mmap capabilities use the file_has_valid_mmap_hooks() helper rather than directly checking for f_op->mmap (which is now not a valid check) and secondly updating daxdev_mapping_supported() to not require a VMA parameter to allow ext4 and xfs to be converted. Commitbb666b7c27
("mm: add mmap_prepare() compatibility layer for nested file systems") handles the nasty edge-case of nested file systems like overlayfs, which introduces a compatibility shim to allow f_op->mmap_prepare() to be invoked from an f_op->mmap() callback. This allows for nested filesystems to continue to function correctly with all file systems regardless of which callback is used. Once we finally convert all file systems, this shim can be removed. As a result, ecryptfs, fuse, and overlayfs remain unaltered so they can nest all other file systems. We additionally do not update resctl - as this requires an update to remap_pfn_range() (or an alternative to it) which we defer to a later series, equally we do not update cramfs which needs a mixed mapping insertion with the same issue, nor do we update procfs, hugetlbfs, syfs or kernfs all of which require VMAs for internal state and hooks. We shall return to all of these later" * tag 'vfs-6.17-rc1.mmap_prepare' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs: doc: update porting, vfs documentation to describe mmap_prepare() fs: replace mmap hook with .mmap_prepare for simple mappings fs: convert most other generic_file_*mmap() users to .mmap_prepare() fs: convert simple use of generic_file_*_mmap() to .mmap_prepare() mm/filemap: introduce generic_file_*_mmap_prepare() helpers fs/xfs: transition from deprecated .mmap hook to .mmap_prepare fs/ext4: transition from deprecated .mmap hook to .mmap_prepare fs/dax: make it possible to check dev dax support without a VMA fs: consistently use can_mmap_file() helper mm/nommu: use file_has_valid_mmap_hooks() helper mm: rename call_mmap/mmap_prepare to vfs_mmap/mmap_prepare
This commit is contained in:
commit
7031769e10
64 changed files with 281 additions and 187 deletions
|
@ -1273,3 +1273,15 @@ to have them set. Better yet, think hard whether you need different
|
|||
->d_op for different dentries - if not, just use set_default_d_op()
|
||||
at mount time and be done with that. Currently procfs is the only
|
||||
thing that really needs ->d_op varying between dentries.
|
||||
|
||||
---
|
||||
|
||||
**highly recommended**
|
||||
|
||||
The file operations mmap() callback is deprecated in favour of
|
||||
mmap_prepare(). This passes a pointer to a vm_area_desc to the callback
|
||||
rather than a VMA, as the VMA at this stage is not yet valid.
|
||||
|
||||
The vm_area_desc provides the minimum required information for a filesystem
|
||||
to initialise state upon memory mapping of a file-backed region, and output
|
||||
parameters for the file system to set this state.
|
||||
|
|
|
@ -1072,12 +1072,14 @@ This describes how the VFS can manipulate an open file. As of kernel
|
|||
|
||||
struct file_operations {
|
||||
struct module *owner;
|
||||
fop_flags_t fop_flags;
|
||||
loff_t (*llseek) (struct file *, loff_t, int);
|
||||
ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
|
||||
ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
|
||||
ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
|
||||
ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
|
||||
int (*iopoll)(struct kiocb *kiocb, bool spin);
|
||||
int (*iopoll)(struct kiocb *kiocb, struct io_comp_batch *,
|
||||
unsigned int flags);
|
||||
int (*iterate_shared) (struct file *, struct dir_context *);
|
||||
__poll_t (*poll) (struct file *, struct poll_table_struct *);
|
||||
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
|
||||
|
@ -1094,18 +1096,24 @@ This describes how the VFS can manipulate an open file. As of kernel
|
|||
int (*flock) (struct file *, int, struct file_lock *);
|
||||
ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
|
||||
ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
|
||||
int (*setlease)(struct file *, long, struct file_lock **, void **);
|
||||
void (*splice_eof)(struct file *file);
|
||||
int (*setlease)(struct file *, int, struct file_lease **, void **);
|
||||
long (*fallocate)(struct file *file, int mode, loff_t offset,
|
||||
loff_t len);
|
||||
void (*show_fdinfo)(struct seq_file *m, struct file *f);
|
||||
#ifndef CONFIG_MMU
|
||||
unsigned (*mmap_capabilities)(struct file *);
|
||||
#endif
|
||||
ssize_t (*copy_file_range)(struct file *, loff_t, struct file *, loff_t, size_t, unsigned int);
|
||||
ssize_t (*copy_file_range)(struct file *, loff_t, struct file *,
|
||||
loff_t, size_t, unsigned int);
|
||||
loff_t (*remap_file_range)(struct file *file_in, loff_t pos_in,
|
||||
struct file *file_out, loff_t pos_out,
|
||||
loff_t len, unsigned int remap_flags);
|
||||
int (*fadvise)(struct file *, loff_t, loff_t, int);
|
||||
int (*uring_cmd)(struct io_uring_cmd *ioucmd, unsigned int issue_flags);
|
||||
int (*uring_cmd_iopoll)(struct io_uring_cmd *, struct io_comp_batch *,
|
||||
unsigned int poll_flags);
|
||||
int (*mmap_prepare)(struct vm_area_desc *);
|
||||
};
|
||||
|
||||
Again, all methods are called without any locks being held, unless
|
||||
|
@ -1145,7 +1153,8 @@ otherwise noted.
|
|||
used on 64 bit kernels.
|
||||
|
||||
``mmap``
|
||||
called by the mmap(2) system call
|
||||
called by the mmap(2) system call. Deprecated in favour of
|
||||
``mmap_prepare``.
|
||||
|
||||
``open``
|
||||
called by the VFS when an inode should be opened. When the VFS
|
||||
|
@ -1222,6 +1231,11 @@ otherwise noted.
|
|||
``fadvise``
|
||||
possibly called by the fadvise64() system call.
|
||||
|
||||
``mmap_prepare``
|
||||
Called by the mmap(2) system call. Allows a VFS to set up a
|
||||
file-backed memory mapping, most notably establishing relevant
|
||||
private state and VMA callbacks.
|
||||
|
||||
Note that the file operations are implemented by the specific
|
||||
filesystem in which the inode resides. When opening a device node
|
||||
(character or block special) most filesystems will call special
|
||||
|
|
12
block/fops.c
12
block/fops.c
|
@ -920,14 +920,14 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
|
|||
return error;
|
||||
}
|
||||
|
||||
static int blkdev_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
static int blkdev_mmap_prepare(struct vm_area_desc *desc)
|
||||
{
|
||||
struct inode *bd_inode = bdev_file_inode(file);
|
||||
struct file *file = desc->file;
|
||||
|
||||
if (bdev_read_only(I_BDEV(bd_inode)))
|
||||
return generic_file_readonly_mmap(file, vma);
|
||||
if (bdev_read_only(I_BDEV(bdev_file_inode(file))))
|
||||
return generic_file_readonly_mmap_prepare(desc);
|
||||
|
||||
return generic_file_mmap(file, vma);
|
||||
return generic_file_mmap_prepare(desc);
|
||||
}
|
||||
|
||||
const struct file_operations def_blk_fops = {
|
||||
|
@ -937,7 +937,7 @@ const struct file_operations def_blk_fops = {
|
|||
.read_iter = blkdev_read_iter,
|
||||
.write_iter = blkdev_write_iter,
|
||||
.iopoll = iocb_bio_iopoll,
|
||||
.mmap = blkdev_mmap,
|
||||
.mmap_prepare = blkdev_mmap_prepare,
|
||||
.fsync = blkdev_fsync,
|
||||
.unlocked_ioctl = blkdev_ioctl,
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
|
|
@ -105,7 +105,7 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *
|
|||
if (!obj->base.filp)
|
||||
return -ENODEV;
|
||||
|
||||
ret = call_mmap(obj->base.filp, vma);
|
||||
ret = vfs_mmap(obj->base.filp, vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -454,9 +454,10 @@ int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end,
|
|||
}
|
||||
|
||||
static int
|
||||
v9fs_file_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
v9fs_file_mmap_prepare(struct vm_area_desc *desc)
|
||||
{
|
||||
int retval;
|
||||
struct file *filp = desc->file;
|
||||
struct inode *inode = file_inode(filp);
|
||||
struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode);
|
||||
|
||||
|
@ -464,12 +465,12 @@ v9fs_file_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||
|
||||
if (!(v9ses->cache & CACHE_WRITEBACK)) {
|
||||
p9_debug(P9_DEBUG_CACHE, "(read-only mmap mode)");
|
||||
return generic_file_readonly_mmap(filp, vma);
|
||||
return generic_file_readonly_mmap_prepare(desc);
|
||||
}
|
||||
|
||||
retval = generic_file_mmap(filp, vma);
|
||||
retval = generic_file_mmap_prepare(desc);
|
||||
if (!retval)
|
||||
vma->vm_ops = &v9fs_mmap_file_vm_ops;
|
||||
desc->vm_ops = &v9fs_mmap_file_vm_ops;
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
@ -516,7 +517,7 @@ const struct file_operations v9fs_file_operations = {
|
|||
.open = v9fs_file_open,
|
||||
.release = v9fs_dir_release,
|
||||
.lock = v9fs_file_lock,
|
||||
.mmap = generic_file_readonly_mmap,
|
||||
.mmap_prepare = generic_file_readonly_mmap_prepare,
|
||||
.splice_read = v9fs_file_splice_read,
|
||||
.splice_write = iter_file_splice_write,
|
||||
.fsync = v9fs_file_fsync,
|
||||
|
@ -531,7 +532,7 @@ const struct file_operations v9fs_file_operations_dotl = {
|
|||
.release = v9fs_dir_release,
|
||||
.lock = v9fs_file_lock_dotl,
|
||||
.flock = v9fs_file_flock_dotl,
|
||||
.mmap = v9fs_file_mmap,
|
||||
.mmap_prepare = v9fs_file_mmap_prepare,
|
||||
.splice_read = v9fs_file_splice_read,
|
||||
.splice_write = iter_file_splice_write,
|
||||
.fsync = v9fs_file_fsync_dotl,
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
const struct file_operations adfs_file_operations = {
|
||||
.llseek = generic_file_llseek,
|
||||
.read_iter = generic_file_read_iter,
|
||||
.mmap = generic_file_mmap,
|
||||
.mmap_prepare = generic_file_mmap_prepare,
|
||||
.fsync = generic_file_fsync,
|
||||
.write_iter = generic_file_write_iter,
|
||||
.splice_read = filemap_splice_read,
|
||||
|
|
|
@ -1003,7 +1003,7 @@ const struct file_operations affs_file_operations = {
|
|||
.llseek = generic_file_llseek,
|
||||
.read_iter = generic_file_read_iter,
|
||||
.write_iter = generic_file_write_iter,
|
||||
.mmap = generic_file_mmap,
|
||||
.mmap_prepare = generic_file_mmap_prepare,
|
||||
.open = affs_file_open,
|
||||
.release = affs_file_release,
|
||||
.fsync = affs_file_fsync,
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
#include <trace/events/netfs.h>
|
||||
#include "internal.h"
|
||||
|
||||
static int afs_file_mmap(struct file *file, struct vm_area_struct *vma);
|
||||
static int afs_file_mmap_prepare(struct vm_area_desc *desc);
|
||||
|
||||
static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter);
|
||||
static ssize_t afs_file_splice_read(struct file *in, loff_t *ppos,
|
||||
|
@ -35,7 +35,7 @@ const struct file_operations afs_file_operations = {
|
|||
.llseek = generic_file_llseek,
|
||||
.read_iter = afs_file_read_iter,
|
||||
.write_iter = netfs_file_write_iter,
|
||||
.mmap = afs_file_mmap,
|
||||
.mmap_prepare = afs_file_mmap_prepare,
|
||||
.splice_read = afs_file_splice_read,
|
||||
.splice_write = iter_file_splice_write,
|
||||
.fsync = afs_fsync,
|
||||
|
@ -492,16 +492,16 @@ static void afs_drop_open_mmap(struct afs_vnode *vnode)
|
|||
/*
|
||||
* Handle setting up a memory mapping on an AFS file.
|
||||
*/
|
||||
static int afs_file_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
static int afs_file_mmap_prepare(struct vm_area_desc *desc)
|
||||
{
|
||||
struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
|
||||
struct afs_vnode *vnode = AFS_FS_I(file_inode(desc->file));
|
||||
int ret;
|
||||
|
||||
afs_add_open_mmap(vnode);
|
||||
|
||||
ret = generic_file_mmap(file, vma);
|
||||
ret = generic_file_mmap_prepare(desc);
|
||||
if (ret == 0)
|
||||
vma->vm_ops = &afs_vm_ops;
|
||||
desc->vm_ops = &afs_vm_ops;
|
||||
else
|
||||
afs_drop_open_mmap(vnode);
|
||||
return ret;
|
||||
|
|
8
fs/aio.c
8
fs/aio.c
|
@ -392,15 +392,15 @@ static const struct vm_operations_struct aio_ring_vm_ops = {
|
|||
#endif
|
||||
};
|
||||
|
||||
static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
static int aio_ring_mmap_prepare(struct vm_area_desc *desc)
|
||||
{
|
||||
vm_flags_set(vma, VM_DONTEXPAND);
|
||||
vma->vm_ops = &aio_ring_vm_ops;
|
||||
desc->vm_flags |= VM_DONTEXPAND;
|
||||
desc->vm_ops = &aio_ring_vm_ops;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct file_operations aio_ring_fops = {
|
||||
.mmap = aio_ring_mmap,
|
||||
.mmap_prepare = aio_ring_mmap_prepare,
|
||||
};
|
||||
|
||||
#if IS_ENABLED(CONFIG_MIGRATION)
|
||||
|
|
|
@ -333,13 +333,13 @@ int backing_file_mmap(struct file *file, struct vm_area_struct *vma,
|
|||
if (WARN_ON_ONCE(!(file->f_mode & FMODE_BACKING)))
|
||||
return -EIO;
|
||||
|
||||
if (!file->f_op->mmap)
|
||||
if (!can_mmap_file(file))
|
||||
return -ENODEV;
|
||||
|
||||
vma_set_file(vma, file);
|
||||
|
||||
old_cred = override_creds(ctx->cred);
|
||||
ret = call_mmap(vma->vm_file, vma);
|
||||
ret = vfs_mmap(vma->vm_file, vma);
|
||||
revert_creds(old_cred);
|
||||
|
||||
if (ctx->accessed)
|
||||
|
|
|
@ -1551,11 +1551,11 @@ static const struct vm_operations_struct bch_vm_ops = {
|
|||
.page_mkwrite = bch2_page_mkwrite,
|
||||
};
|
||||
|
||||
static int bch2_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
static int bch2_mmap_prepare(struct vm_area_desc *desc)
|
||||
{
|
||||
file_accessed(file);
|
||||
file_accessed(desc->file);
|
||||
|
||||
vma->vm_ops = &bch_vm_ops;
|
||||
desc->vm_ops = &bch_vm_ops;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1739,7 +1739,7 @@ static const struct file_operations bch_file_operations = {
|
|||
.llseek = bch2_llseek,
|
||||
.read_iter = bch2_read_iter,
|
||||
.write_iter = bch2_write_iter,
|
||||
.mmap = bch2_mmap,
|
||||
.mmap_prepare = bch2_mmap_prepare,
|
||||
.get_unmapped_area = thp_get_unmapped_area,
|
||||
.fsync = bch2_fsync,
|
||||
.splice_read = filemap_splice_read,
|
||||
|
|
|
@ -27,7 +27,7 @@ const struct file_operations bfs_file_operations = {
|
|||
.llseek = generic_file_llseek,
|
||||
.read_iter = generic_file_read_iter,
|
||||
.write_iter = generic_file_write_iter,
|
||||
.mmap = generic_file_mmap,
|
||||
.mmap_prepare = generic_file_mmap_prepare,
|
||||
.splice_read = filemap_splice_read,
|
||||
};
|
||||
|
||||
|
|
|
@ -646,7 +646,7 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
|
|||
if (!elf_check_arch(interp_elf_ex) ||
|
||||
elf_check_fdpic(interp_elf_ex))
|
||||
goto out;
|
||||
if (!interpreter->f_op->mmap)
|
||||
if (!can_mmap_file(interpreter))
|
||||
goto out;
|
||||
|
||||
total_size = total_mapping_size(interp_elf_phdata,
|
||||
|
@ -848,7 +848,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
|
|||
goto out;
|
||||
if (elf_check_fdpic(elf_ex))
|
||||
goto out;
|
||||
if (!bprm->file->f_op->mmap)
|
||||
if (!can_mmap_file(bprm->file))
|
||||
goto out;
|
||||
|
||||
elf_phdata = load_elf_phdrs(elf_ex, bprm->file);
|
||||
|
|
|
@ -109,7 +109,7 @@ static int is_elf(struct elfhdr *hdr, struct file *file)
|
|||
return 0;
|
||||
if (!elf_check_arch(hdr))
|
||||
return 0;
|
||||
if (!file->f_op->mmap)
|
||||
if (!can_mmap_file(file))
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -2039,15 +2039,16 @@ static const struct vm_operations_struct btrfs_file_vm_ops = {
|
|||
.page_mkwrite = btrfs_page_mkwrite,
|
||||
};
|
||||
|
||||
static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
static int btrfs_file_mmap_prepare(struct vm_area_desc *desc)
|
||||
{
|
||||
struct file *filp = desc->file;
|
||||
struct address_space *mapping = filp->f_mapping;
|
||||
|
||||
if (!mapping->a_ops->read_folio)
|
||||
return -ENOEXEC;
|
||||
|
||||
file_accessed(filp);
|
||||
vma->vm_ops = &btrfs_file_vm_ops;
|
||||
desc->vm_ops = &btrfs_file_vm_ops;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -3821,7 +3822,7 @@ const struct file_operations btrfs_file_operations = {
|
|||
.splice_read = filemap_splice_read,
|
||||
.write_iter = btrfs_file_write_iter,
|
||||
.splice_write = iter_file_splice_write,
|
||||
.mmap = btrfs_file_mmap,
|
||||
.mmap_prepare = btrfs_file_mmap_prepare,
|
||||
.open = btrfs_file_open,
|
||||
.release = btrfs_release_file,
|
||||
.get_unmapped_area = thp_get_unmapped_area,
|
||||
|
|
|
@ -2334,13 +2334,13 @@ static const struct vm_operations_struct ceph_vmops = {
|
|||
.page_mkwrite = ceph_page_mkwrite,
|
||||
};
|
||||
|
||||
int ceph_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
int ceph_mmap_prepare(struct vm_area_desc *desc)
|
||||
{
|
||||
struct address_space *mapping = file->f_mapping;
|
||||
struct address_space *mapping = desc->file->f_mapping;
|
||||
|
||||
if (!mapping->a_ops->read_folio)
|
||||
return -ENOEXEC;
|
||||
vma->vm_ops = &ceph_vmops;
|
||||
desc->vm_ops = &ceph_vmops;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -3171,7 +3171,7 @@ const struct file_operations ceph_file_fops = {
|
|||
.llseek = ceph_llseek,
|
||||
.read_iter = ceph_read_iter,
|
||||
.write_iter = ceph_write_iter,
|
||||
.mmap = ceph_mmap,
|
||||
.mmap_prepare = ceph_mmap_prepare,
|
||||
.fsync = ceph_fsync,
|
||||
.lock = ceph_lock,
|
||||
.setlease = simple_nosetlease,
|
||||
|
|
|
@ -1286,7 +1286,7 @@ extern void __ceph_touch_fmode(struct ceph_inode_info *ci,
|
|||
/* addr.c */
|
||||
extern const struct address_space_operations ceph_aops;
|
||||
extern const struct netfs_request_ops ceph_netfs_ops;
|
||||
extern int ceph_mmap(struct file *file, struct vm_area_struct *vma);
|
||||
int ceph_mmap_prepare(struct vm_area_desc *desc);
|
||||
extern int ceph_uninline_data(struct file *file);
|
||||
extern int ceph_pool_perm_check(struct inode *inode, int need);
|
||||
extern void ceph_pool_perm_destroy(struct ceph_mds_client* mdsc);
|
||||
|
|
|
@ -160,7 +160,7 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
|
|||
size_t count;
|
||||
int ret;
|
||||
|
||||
if (!host_file->f_op->mmap)
|
||||
if (!can_mmap_file(host_file))
|
||||
return -ENODEV;
|
||||
|
||||
if (WARN_ON(coda_file != vma->vm_file))
|
||||
|
@ -199,10 +199,10 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
|
|||
spin_unlock(&cii->c_lock);
|
||||
|
||||
vma->vm_file = get_file(host_file);
|
||||
ret = call_mmap(vma->vm_file, vma);
|
||||
ret = vfs_mmap(vma->vm_file, vma);
|
||||
|
||||
if (ret) {
|
||||
/* if call_mmap fails, our caller will put host_file so we
|
||||
/* if vfs_mmap fails, our caller will put host_file so we
|
||||
* should drop the reference to the coda_file that we got.
|
||||
*/
|
||||
fput(coda_file);
|
||||
|
|
|
@ -193,7 +193,7 @@ static int ecryptfs_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
* natively. If FILESYSTEM_MAX_STACK_DEPTH > 2 or ecryptfs
|
||||
* allows recursive mounting, this will need to be extended.
|
||||
*/
|
||||
if (!lower_file->f_op->mmap)
|
||||
if (!can_mmap_file(lower_file))
|
||||
return -ENODEV;
|
||||
return generic_file_mmap(file, vma);
|
||||
}
|
||||
|
|
|
@ -432,20 +432,20 @@ static const struct vm_operations_struct erofs_dax_vm_ops = {
|
|||
.huge_fault = erofs_dax_huge_fault,
|
||||
};
|
||||
|
||||
static int erofs_file_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
static int erofs_file_mmap_prepare(struct vm_area_desc *desc)
|
||||
{
|
||||
if (!IS_DAX(file_inode(file)))
|
||||
return generic_file_readonly_mmap(file, vma);
|
||||
if (!IS_DAX(file_inode(desc->file)))
|
||||
return generic_file_readonly_mmap_prepare(desc);
|
||||
|
||||
if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
|
||||
if ((desc->vm_flags & VM_SHARED) && (desc->vm_flags & VM_MAYWRITE))
|
||||
return -EINVAL;
|
||||
|
||||
vma->vm_ops = &erofs_dax_vm_ops;
|
||||
vm_flags_set(vma, VM_HUGEPAGE);
|
||||
desc->vm_ops = &erofs_dax_vm_ops;
|
||||
desc->vm_flags |= VM_HUGEPAGE;
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
#define erofs_file_mmap generic_file_readonly_mmap
|
||||
#define erofs_file_mmap_prepare generic_file_readonly_mmap_prepare
|
||||
#endif
|
||||
|
||||
static loff_t erofs_file_llseek(struct file *file, loff_t offset, int whence)
|
||||
|
@ -475,7 +475,7 @@ static loff_t erofs_file_llseek(struct file *file, loff_t offset, int whence)
|
|||
const struct file_operations erofs_file_fops = {
|
||||
.llseek = erofs_file_llseek,
|
||||
.read_iter = erofs_file_read_iter,
|
||||
.mmap = erofs_file_mmap,
|
||||
.mmap_prepare = erofs_file_mmap_prepare,
|
||||
.get_unmapped_area = thp_get_unmapped_area,
|
||||
.splice_read = filemap_splice_read,
|
||||
};
|
||||
|
|
|
@ -682,13 +682,15 @@ static const struct vm_operations_struct exfat_file_vm_ops = {
|
|||
.page_mkwrite = exfat_page_mkwrite,
|
||||
};
|
||||
|
||||
static int exfat_file_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
static int exfat_file_mmap_prepare(struct vm_area_desc *desc)
|
||||
{
|
||||
if (unlikely(exfat_forced_shutdown(file_inode(file)->i_sb)))
|
||||
struct file *file = desc->file;
|
||||
|
||||
if (unlikely(exfat_forced_shutdown(file_inode(desc->file)->i_sb)))
|
||||
return -EIO;
|
||||
|
||||
file_accessed(file);
|
||||
vma->vm_ops = &exfat_file_vm_ops;
|
||||
desc->vm_ops = &exfat_file_vm_ops;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -709,7 +711,7 @@ const struct file_operations exfat_file_operations = {
|
|||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = exfat_compat_ioctl,
|
||||
#endif
|
||||
.mmap = exfat_file_mmap,
|
||||
.mmap_prepare = exfat_file_mmap_prepare,
|
||||
.fsync = exfat_file_fsync,
|
||||
.splice_read = exfat_splice_read,
|
||||
.splice_write = iter_file_splice_write,
|
||||
|
|
|
@ -122,17 +122,19 @@ static const struct vm_operations_struct ext2_dax_vm_ops = {
|
|||
.pfn_mkwrite = ext2_dax_fault,
|
||||
};
|
||||
|
||||
static int ext2_file_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
static int ext2_file_mmap_prepare(struct vm_area_desc *desc)
|
||||
{
|
||||
struct file *file = desc->file;
|
||||
|
||||
if (!IS_DAX(file_inode(file)))
|
||||
return generic_file_mmap(file, vma);
|
||||
return generic_file_mmap_prepare(desc);
|
||||
|
||||
file_accessed(file);
|
||||
vma->vm_ops = &ext2_dax_vm_ops;
|
||||
desc->vm_ops = &ext2_dax_vm_ops;
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
#define ext2_file_mmap generic_file_mmap
|
||||
#define ext2_file_mmap_prepare generic_file_mmap_prepare
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -316,7 +318,7 @@ const struct file_operations ext2_file_operations = {
|
|||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = ext2_compat_ioctl,
|
||||
#endif
|
||||
.mmap = ext2_file_mmap,
|
||||
.mmap_prepare = ext2_file_mmap_prepare,
|
||||
.open = ext2_file_open,
|
||||
.release = ext2_release_file,
|
||||
.fsync = ext2_fsync,
|
||||
|
|
|
@ -804,9 +804,10 @@ static const struct vm_operations_struct ext4_file_vm_ops = {
|
|||
.page_mkwrite = ext4_page_mkwrite,
|
||||
};
|
||||
|
||||
static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
static int ext4_file_mmap_prepare(struct vm_area_desc *desc)
|
||||
{
|
||||
int ret;
|
||||
struct file *file = desc->file;
|
||||
struct inode *inode = file->f_mapping->host;
|
||||
struct dax_device *dax_dev = EXT4_SB(inode->i_sb)->s_daxdev;
|
||||
|
||||
|
@ -821,15 +822,15 @@ static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
* We don't support synchronous mappings for non-DAX files and
|
||||
* for DAX files if underneath dax_device is not synchronous.
|
||||
*/
|
||||
if (!daxdev_mapping_supported(vma, dax_dev))
|
||||
if (!daxdev_mapping_supported(desc->vm_flags, file_inode(file), dax_dev))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
file_accessed(file);
|
||||
if (IS_DAX(file_inode(file))) {
|
||||
vma->vm_ops = &ext4_dax_vm_ops;
|
||||
vm_flags_set(vma, VM_HUGEPAGE);
|
||||
desc->vm_ops = &ext4_dax_vm_ops;
|
||||
desc->vm_flags |= VM_HUGEPAGE;
|
||||
} else {
|
||||
vma->vm_ops = &ext4_file_vm_ops;
|
||||
desc->vm_ops = &ext4_file_vm_ops;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -968,7 +969,7 @@ const struct file_operations ext4_file_operations = {
|
|||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = ext4_compat_ioctl,
|
||||
#endif
|
||||
.mmap = ext4_file_mmap,
|
||||
.mmap_prepare = ext4_file_mmap_prepare,
|
||||
.open = ext4_file_open,
|
||||
.release = ext4_release_file,
|
||||
.fsync = ext4_sync_file,
|
||||
|
|
|
@ -548,8 +548,9 @@ static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
static int f2fs_file_mmap_prepare(struct vm_area_desc *desc)
|
||||
{
|
||||
struct file *file = desc->file;
|
||||
struct inode *inode = file_inode(file);
|
||||
|
||||
if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
|
||||
|
@ -559,7 +560,7 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
return -EOPNOTSUPP;
|
||||
|
||||
file_accessed(file);
|
||||
vma->vm_ops = &f2fs_file_vm_ops;
|
||||
desc->vm_ops = &f2fs_file_vm_ops;
|
||||
|
||||
f2fs_down_read(&F2FS_I(inode)->i_sem);
|
||||
set_inode_flag(inode, FI_MMAP_FILE);
|
||||
|
@ -5414,7 +5415,7 @@ const struct file_operations f2fs_file_operations = {
|
|||
.iopoll = iocb_bio_iopoll,
|
||||
.open = f2fs_file_open,
|
||||
.release = f2fs_release_file,
|
||||
.mmap = f2fs_file_mmap,
|
||||
.mmap_prepare = f2fs_file_mmap_prepare,
|
||||
.flush = f2fs_file_flush,
|
||||
.fsync = f2fs_sync_file,
|
||||
.fallocate = f2fs_fallocate,
|
||||
|
|
|
@ -204,7 +204,7 @@ const struct file_operations fat_file_operations = {
|
|||
.llseek = generic_file_llseek,
|
||||
.read_iter = generic_file_read_iter,
|
||||
.write_iter = generic_file_write_iter,
|
||||
.mmap = generic_file_mmap,
|
||||
.mmap_prepare = generic_file_mmap_prepare,
|
||||
.release = fat_file_release,
|
||||
.unlocked_ioctl = fat_generic_ioctl,
|
||||
.compat_ioctl = compat_ptr_ioctl,
|
||||
|
|
|
@ -690,7 +690,7 @@ static const struct file_operations hfs_file_operations = {
|
|||
.llseek = generic_file_llseek,
|
||||
.read_iter = generic_file_read_iter,
|
||||
.write_iter = generic_file_write_iter,
|
||||
.mmap = generic_file_mmap,
|
||||
.mmap_prepare = generic_file_mmap_prepare,
|
||||
.splice_read = filemap_splice_read,
|
||||
.fsync = hfs_file_fsync,
|
||||
.open = hfs_file_open,
|
||||
|
|
|
@ -368,7 +368,7 @@ static const struct file_operations hfsplus_file_operations = {
|
|||
.llseek = generic_file_llseek,
|
||||
.read_iter = generic_file_read_iter,
|
||||
.write_iter = generic_file_write_iter,
|
||||
.mmap = generic_file_mmap,
|
||||
.mmap_prepare = generic_file_mmap_prepare,
|
||||
.splice_read = filemap_splice_read,
|
||||
.fsync = hfsplus_file_fsync,
|
||||
.open = hfsplus_file_open,
|
||||
|
|
|
@ -382,7 +382,7 @@ static const struct file_operations hostfs_file_fops = {
|
|||
.splice_write = iter_file_splice_write,
|
||||
.read_iter = generic_file_read_iter,
|
||||
.write_iter = generic_file_write_iter,
|
||||
.mmap = generic_file_mmap,
|
||||
.mmap_prepare = generic_file_mmap_prepare,
|
||||
.open = hostfs_open,
|
||||
.release = hostfs_file_release,
|
||||
.fsync = hostfs_fsync,
|
||||
|
|
|
@ -257,7 +257,7 @@ const struct file_operations hpfs_file_ops =
|
|||
.llseek = generic_file_llseek,
|
||||
.read_iter = generic_file_read_iter,
|
||||
.write_iter = generic_file_write_iter,
|
||||
.mmap = generic_file_mmap,
|
||||
.mmap_prepare = generic_file_mmap_prepare,
|
||||
.release = hpfs_file_release,
|
||||
.fsync = hpfs_file_fsync,
|
||||
.splice_read = filemap_splice_read,
|
||||
|
|
|
@ -56,7 +56,7 @@ const struct file_operations jffs2_file_operations =
|
|||
.read_iter = generic_file_read_iter,
|
||||
.write_iter = generic_file_write_iter,
|
||||
.unlocked_ioctl=jffs2_ioctl,
|
||||
.mmap = generic_file_readonly_mmap,
|
||||
.mmap_prepare = generic_file_readonly_mmap_prepare,
|
||||
.fsync = jffs2_fsync,
|
||||
.splice_read = filemap_splice_read,
|
||||
.splice_write = iter_file_splice_write,
|
||||
|
|
|
@ -143,7 +143,7 @@ const struct file_operations jfs_file_operations = {
|
|||
.llseek = generic_file_llseek,
|
||||
.read_iter = generic_file_read_iter,
|
||||
.write_iter = generic_file_write_iter,
|
||||
.mmap = generic_file_mmap,
|
||||
.mmap_prepare = generic_file_mmap_prepare,
|
||||
.splice_read = filemap_splice_read,
|
||||
.splice_write = iter_file_splice_write,
|
||||
.fsync = jfs_fsync,
|
||||
|
|
|
@ -17,7 +17,7 @@ const struct file_operations minix_file_operations = {
|
|||
.llseek = generic_file_llseek,
|
||||
.read_iter = generic_file_read_iter,
|
||||
.write_iter = generic_file_write_iter,
|
||||
.mmap = generic_file_mmap,
|
||||
.mmap_prepare = generic_file_mmap_prepare,
|
||||
.fsync = generic_file_fsync,
|
||||
.splice_read = filemap_splice_read,
|
||||
};
|
||||
|
|
|
@ -207,24 +207,25 @@ nfs_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe
|
|||
EXPORT_SYMBOL_GPL(nfs_file_splice_read);
|
||||
|
||||
int
|
||||
nfs_file_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
nfs_file_mmap_prepare(struct vm_area_desc *desc)
|
||||
{
|
||||
struct file *file = desc->file;
|
||||
struct inode *inode = file_inode(file);
|
||||
int status;
|
||||
|
||||
dprintk("NFS: mmap(%pD2)\n", file);
|
||||
|
||||
/* Note: generic_file_mmap() returns ENOSYS on nommu systems
|
||||
/* Note: generic_file_mmap_prepare() returns ENOSYS on nommu systems
|
||||
* so we call that before revalidating the mapping
|
||||
*/
|
||||
status = generic_file_mmap(file, vma);
|
||||
status = generic_file_mmap_prepare(desc);
|
||||
if (!status) {
|
||||
vma->vm_ops = &nfs_file_vm_ops;
|
||||
desc->vm_ops = &nfs_file_vm_ops;
|
||||
status = nfs_revalidate_mapping(inode, file->f_mapping);
|
||||
}
|
||||
return status;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nfs_file_mmap);
|
||||
EXPORT_SYMBOL_GPL(nfs_file_mmap_prepare);
|
||||
|
||||
/*
|
||||
* Flush any dirty pages for this process, and check for write errors.
|
||||
|
@ -903,7 +904,7 @@ const struct file_operations nfs_file_operations = {
|
|||
.llseek = nfs_file_llseek,
|
||||
.read_iter = nfs_file_read,
|
||||
.write_iter = nfs_file_write,
|
||||
.mmap = nfs_file_mmap,
|
||||
.mmap_prepare = nfs_file_mmap_prepare,
|
||||
.open = nfs_file_open,
|
||||
.flush = nfs_file_flush,
|
||||
.release = nfs_file_release,
|
||||
|
|
|
@ -432,7 +432,7 @@ loff_t nfs_file_llseek(struct file *, loff_t, int);
|
|||
ssize_t nfs_file_read(struct kiocb *, struct iov_iter *);
|
||||
ssize_t nfs_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe,
|
||||
size_t len, unsigned int flags);
|
||||
int nfs_file_mmap(struct file *, struct vm_area_struct *);
|
||||
int nfs_file_mmap_prepare(struct vm_area_desc *);
|
||||
ssize_t nfs_file_write(struct kiocb *, struct iov_iter *);
|
||||
int nfs_file_release(struct inode *, struct file *);
|
||||
int nfs_lock(struct file *, int, struct file_lock *);
|
||||
|
|
|
@ -456,7 +456,7 @@ static int nfs4_setlease(struct file *file, int arg, struct file_lease **lease,
|
|||
const struct file_operations nfs4_file_operations = {
|
||||
.read_iter = nfs_file_read,
|
||||
.write_iter = nfs_file_write,
|
||||
.mmap = nfs_file_mmap,
|
||||
.mmap_prepare = nfs_file_mmap_prepare,
|
||||
.open = nfs4_file_open,
|
||||
.flush = nfs4_file_flush,
|
||||
.release = nfs_file_release,
|
||||
|
|
|
@ -125,10 +125,10 @@ static const struct vm_operations_struct nilfs_file_vm_ops = {
|
|||
.page_mkwrite = nilfs_page_mkwrite,
|
||||
};
|
||||
|
||||
static int nilfs_file_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
static int nilfs_file_mmap_prepare(struct vm_area_desc *desc)
|
||||
{
|
||||
file_accessed(file);
|
||||
vma->vm_ops = &nilfs_file_vm_ops;
|
||||
file_accessed(desc->file);
|
||||
desc->vm_ops = &nilfs_file_vm_ops;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -144,7 +144,7 @@ const struct file_operations nilfs_file_operations = {
|
|||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = nilfs_compat_ioctl,
|
||||
#endif /* CONFIG_COMPAT */
|
||||
.mmap = nilfs_file_mmap,
|
||||
.mmap_prepare = nilfs_file_mmap_prepare,
|
||||
.open = generic_file_open,
|
||||
/* .release = nilfs_release_file, */
|
||||
.fsync = nilfs_sync_file,
|
||||
|
|
|
@ -269,14 +269,15 @@ out:
|
|||
}
|
||||
|
||||
/*
|
||||
* ntfs_file_mmap - file_operations::mmap
|
||||
* ntfs_file_mmap_prepare - file_operations::mmap_prepare
|
||||
*/
|
||||
static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
static int ntfs_file_mmap_prepare(struct vm_area_desc *desc)
|
||||
{
|
||||
struct file *file = desc->file;
|
||||
struct inode *inode = file_inode(file);
|
||||
struct ntfs_inode *ni = ntfs_i(inode);
|
||||
u64 from = ((u64)vma->vm_pgoff << PAGE_SHIFT);
|
||||
bool rw = vma->vm_flags & VM_WRITE;
|
||||
u64 from = ((u64)desc->pgoff << PAGE_SHIFT);
|
||||
bool rw = desc->vm_flags & VM_WRITE;
|
||||
int err;
|
||||
|
||||
/* Avoid any operation if inode is bad. */
|
||||
|
@ -303,7 +304,7 @@ static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
|
||||
if (rw) {
|
||||
u64 to = min_t(loff_t, i_size_read(inode),
|
||||
from + vma->vm_end - vma->vm_start);
|
||||
from + desc->end - desc->start);
|
||||
|
||||
if (is_sparsed(ni)) {
|
||||
/* Allocate clusters for rw map. */
|
||||
|
@ -334,7 +335,7 @@ static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
}
|
||||
}
|
||||
|
||||
err = generic_file_mmap(file, vma);
|
||||
err = generic_file_mmap_prepare(desc);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
@ -1366,7 +1367,7 @@ const struct file_operations ntfs_file_operations = {
|
|||
#endif
|
||||
.splice_read = ntfs_file_splice_read,
|
||||
.splice_write = ntfs_file_splice_write,
|
||||
.mmap = ntfs_file_mmap,
|
||||
.mmap_prepare = ntfs_file_mmap_prepare,
|
||||
.open = ntfs_file_open,
|
||||
.fsync = generic_file_fsync,
|
||||
.fallocate = ntfs_fallocate,
|
||||
|
|
|
@ -2800,7 +2800,7 @@ const struct inode_operations ocfs2_special_file_iops = {
|
|||
*/
|
||||
const struct file_operations ocfs2_fops = {
|
||||
.llseek = ocfs2_file_llseek,
|
||||
.mmap = ocfs2_mmap,
|
||||
.mmap_prepare = ocfs2_mmap_prepare,
|
||||
.fsync = ocfs2_sync_file,
|
||||
.release = ocfs2_file_release,
|
||||
.open = ocfs2_file_open,
|
||||
|
@ -2850,7 +2850,7 @@ const struct file_operations ocfs2_dops = {
|
|||
*/
|
||||
const struct file_operations ocfs2_fops_no_plocks = {
|
||||
.llseek = ocfs2_file_llseek,
|
||||
.mmap = ocfs2_mmap,
|
||||
.mmap_prepare = ocfs2_mmap_prepare,
|
||||
.fsync = ocfs2_sync_file,
|
||||
.release = ocfs2_file_release,
|
||||
.open = ocfs2_file_open,
|
||||
|
|
|
@ -159,8 +159,9 @@ static const struct vm_operations_struct ocfs2_file_vm_ops = {
|
|||
.page_mkwrite = ocfs2_page_mkwrite,
|
||||
};
|
||||
|
||||
int ocfs2_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
int ocfs2_mmap_prepare(struct vm_area_desc *desc)
|
||||
{
|
||||
struct file *file = desc->file;
|
||||
int ret = 0, lock_level = 0;
|
||||
|
||||
ret = ocfs2_inode_lock_atime(file_inode(file),
|
||||
|
@ -171,7 +172,7 @@ int ocfs2_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
}
|
||||
ocfs2_inode_unlock(file_inode(file), lock_level);
|
||||
out:
|
||||
vma->vm_ops = &ocfs2_file_vm_ops;
|
||||
desc->vm_ops = &ocfs2_file_vm_ops;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -2,6 +2,6 @@
|
|||
#ifndef OCFS2_MMAP_H
|
||||
#define OCFS2_MMAP_H
|
||||
|
||||
int ocfs2_mmap(struct file *file, struct vm_area_struct *vma);
|
||||
int ocfs2_mmap_prepare(struct vm_area_desc *desc);
|
||||
|
||||
#endif /* OCFS2_MMAP_H */
|
||||
|
|
|
@ -333,7 +333,7 @@ const struct file_operations omfs_file_operations = {
|
|||
.llseek = generic_file_llseek,
|
||||
.read_iter = generic_file_read_iter,
|
||||
.write_iter = generic_file_write_iter,
|
||||
.mmap = generic_file_mmap,
|
||||
.mmap_prepare = generic_file_mmap_prepare,
|
||||
.fsync = generic_file_fsync,
|
||||
.splice_read = filemap_splice_read,
|
||||
};
|
||||
|
|
|
@ -398,8 +398,9 @@ static const struct vm_operations_struct orangefs_file_vm_ops = {
|
|||
/*
|
||||
* Memory map a region of a file.
|
||||
*/
|
||||
static int orangefs_file_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
static int orangefs_file_mmap_prepare(struct vm_area_desc *desc)
|
||||
{
|
||||
struct file *file = desc->file;
|
||||
int ret;
|
||||
|
||||
ret = orangefs_revalidate_mapping(file_inode(file));
|
||||
|
@ -410,10 +411,11 @@ static int orangefs_file_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
"orangefs_file_mmap: called on %pD\n", file);
|
||||
|
||||
/* set the sequential readahead hint */
|
||||
vm_flags_mod(vma, VM_SEQ_READ, VM_RAND_READ);
|
||||
desc->vm_flags |= VM_SEQ_READ;
|
||||
desc->vm_flags &= ~VM_RAND_READ;
|
||||
|
||||
file_accessed(file);
|
||||
vma->vm_ops = &orangefs_file_vm_ops;
|
||||
desc->vm_ops = &orangefs_file_vm_ops;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -574,7 +576,7 @@ const struct file_operations orangefs_file_operations = {
|
|||
.read_iter = orangefs_file_read_iter,
|
||||
.write_iter = orangefs_file_write_iter,
|
||||
.lock = orangefs_lock,
|
||||
.mmap = orangefs_file_mmap,
|
||||
.mmap_prepare = orangefs_file_mmap_prepare,
|
||||
.open = generic_file_open,
|
||||
.splice_read = orangefs_file_splice_read,
|
||||
.splice_write = iter_file_splice_write,
|
||||
|
|
|
@ -41,7 +41,7 @@ static unsigned long ramfs_mmu_get_unmapped_area(struct file *file,
|
|||
const struct file_operations ramfs_file_operations = {
|
||||
.read_iter = generic_file_read_iter,
|
||||
.write_iter = generic_file_write_iter,
|
||||
.mmap = generic_file_mmap,
|
||||
.mmap_prepare = generic_file_mmap_prepare,
|
||||
.fsync = noop_fsync,
|
||||
.splice_read = filemap_splice_read,
|
||||
.splice_write = iter_file_splice_write,
|
||||
|
|
|
@ -28,7 +28,7 @@ static unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
|
|||
unsigned long len,
|
||||
unsigned long pgoff,
|
||||
unsigned long flags);
|
||||
static int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma);
|
||||
static int ramfs_nommu_mmap_prepare(struct vm_area_desc *desc);
|
||||
|
||||
static unsigned ramfs_mmap_capabilities(struct file *file)
|
||||
{
|
||||
|
@ -38,7 +38,7 @@ static unsigned ramfs_mmap_capabilities(struct file *file)
|
|||
|
||||
const struct file_operations ramfs_file_operations = {
|
||||
.mmap_capabilities = ramfs_mmap_capabilities,
|
||||
.mmap = ramfs_nommu_mmap,
|
||||
.mmap_prepare = ramfs_nommu_mmap_prepare,
|
||||
.get_unmapped_area = ramfs_nommu_get_unmapped_area,
|
||||
.read_iter = generic_file_read_iter,
|
||||
.write_iter = generic_file_write_iter,
|
||||
|
@ -262,12 +262,12 @@ out:
|
|||
/*
|
||||
* set up a mapping for shared memory segments
|
||||
*/
|
||||
static int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
static int ramfs_nommu_mmap_prepare(struct vm_area_desc *desc)
|
||||
{
|
||||
if (!is_nommu_shared_mapping(vma->vm_flags))
|
||||
if (!is_nommu_shared_mapping(desc->vm_flags))
|
||||
return -ENOSYS;
|
||||
|
||||
file_accessed(file);
|
||||
vma->vm_ops = &generic_file_vm_ops;
|
||||
file_accessed(desc->file);
|
||||
desc->vm_ops = &generic_file_vm_ops;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
const struct file_operations generic_ro_fops = {
|
||||
.llseek = generic_file_llseek,
|
||||
.read_iter = generic_file_read_iter,
|
||||
.mmap = generic_file_readonly_mmap,
|
||||
.mmap_prepare = generic_file_readonly_mmap_prepare,
|
||||
.splice_read = filemap_splice_read,
|
||||
};
|
||||
|
||||
|
|
|
@ -61,9 +61,9 @@ static unsigned long romfs_get_unmapped_area(struct file *file,
|
|||
* permit a R/O mapping to be made directly through onto an MTD device if
|
||||
* possible
|
||||
*/
|
||||
static int romfs_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
static int romfs_mmap_prepare(struct vm_area_desc *desc)
|
||||
{
|
||||
return is_nommu_shared_mapping(vma->vm_flags) ? 0 : -ENOSYS;
|
||||
return is_nommu_shared_mapping(desc->vm_flags) ? 0 : -ENOSYS;
|
||||
}
|
||||
|
||||
static unsigned romfs_mmap_capabilities(struct file *file)
|
||||
|
@ -79,7 +79,7 @@ const struct file_operations romfs_ro_fops = {
|
|||
.llseek = generic_file_llseek,
|
||||
.read_iter = generic_file_read_iter,
|
||||
.splice_read = filemap_splice_read,
|
||||
.mmap = romfs_mmap,
|
||||
.mmap_prepare = romfs_mmap_prepare,
|
||||
.get_unmapped_area = romfs_get_unmapped_area,
|
||||
.mmap_capabilities = romfs_mmap_capabilities,
|
||||
};
|
||||
|
|
|
@ -1525,7 +1525,7 @@ const struct file_operations cifs_file_ops = {
|
|||
.flock = cifs_flock,
|
||||
.fsync = cifs_fsync,
|
||||
.flush = cifs_flush,
|
||||
.mmap = cifs_file_mmap,
|
||||
.mmap_prepare = cifs_file_mmap_prepare,
|
||||
.splice_read = filemap_splice_read,
|
||||
.splice_write = iter_file_splice_write,
|
||||
.llseek = cifs_llseek,
|
||||
|
@ -1545,7 +1545,7 @@ const struct file_operations cifs_file_strict_ops = {
|
|||
.flock = cifs_flock,
|
||||
.fsync = cifs_strict_fsync,
|
||||
.flush = cifs_flush,
|
||||
.mmap = cifs_file_strict_mmap,
|
||||
.mmap_prepare = cifs_file_strict_mmap_prepare,
|
||||
.splice_read = filemap_splice_read,
|
||||
.splice_write = iter_file_splice_write,
|
||||
.llseek = cifs_llseek,
|
||||
|
@ -1565,7 +1565,7 @@ const struct file_operations cifs_file_direct_ops = {
|
|||
.flock = cifs_flock,
|
||||
.fsync = cifs_fsync,
|
||||
.flush = cifs_flush,
|
||||
.mmap = cifs_file_mmap,
|
||||
.mmap_prepare = cifs_file_mmap_prepare,
|
||||
.splice_read = copy_splice_read,
|
||||
.splice_write = iter_file_splice_write,
|
||||
.unlocked_ioctl = cifs_ioctl,
|
||||
|
@ -1583,7 +1583,7 @@ const struct file_operations cifs_file_nobrl_ops = {
|
|||
.release = cifs_close,
|
||||
.fsync = cifs_fsync,
|
||||
.flush = cifs_flush,
|
||||
.mmap = cifs_file_mmap,
|
||||
.mmap_prepare = cifs_file_mmap_prepare,
|
||||
.splice_read = filemap_splice_read,
|
||||
.splice_write = iter_file_splice_write,
|
||||
.llseek = cifs_llseek,
|
||||
|
@ -1601,7 +1601,7 @@ const struct file_operations cifs_file_strict_nobrl_ops = {
|
|||
.release = cifs_close,
|
||||
.fsync = cifs_strict_fsync,
|
||||
.flush = cifs_flush,
|
||||
.mmap = cifs_file_strict_mmap,
|
||||
.mmap_prepare = cifs_file_strict_mmap_prepare,
|
||||
.splice_read = filemap_splice_read,
|
||||
.splice_write = iter_file_splice_write,
|
||||
.llseek = cifs_llseek,
|
||||
|
@ -1619,7 +1619,7 @@ const struct file_operations cifs_file_direct_nobrl_ops = {
|
|||
.release = cifs_close,
|
||||
.fsync = cifs_fsync,
|
||||
.flush = cifs_flush,
|
||||
.mmap = cifs_file_mmap,
|
||||
.mmap_prepare = cifs_file_mmap_prepare,
|
||||
.splice_read = copy_splice_read,
|
||||
.splice_write = iter_file_splice_write,
|
||||
.unlocked_ioctl = cifs_ioctl,
|
||||
|
|
|
@ -103,8 +103,8 @@ extern int cifs_lock(struct file *, int, struct file_lock *);
|
|||
extern int cifs_fsync(struct file *, loff_t, loff_t, int);
|
||||
extern int cifs_strict_fsync(struct file *, loff_t, loff_t, int);
|
||||
extern int cifs_flush(struct file *, fl_owner_t id);
|
||||
extern int cifs_file_mmap(struct file *file, struct vm_area_struct *vma);
|
||||
extern int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma);
|
||||
int cifs_file_mmap_prepare(struct vm_area_desc *desc);
|
||||
int cifs_file_strict_mmap_prepare(struct vm_area_desc *desc);
|
||||
extern const struct file_operations cifs_dir_ops;
|
||||
extern int cifs_readdir(struct file *file, struct dir_context *ctx);
|
||||
|
||||
|
|
|
@ -2999,38 +2999,38 @@ static const struct vm_operations_struct cifs_file_vm_ops = {
|
|||
.page_mkwrite = cifs_page_mkwrite,
|
||||
};
|
||||
|
||||
int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
int cifs_file_strict_mmap_prepare(struct vm_area_desc *desc)
|
||||
{
|
||||
int xid, rc = 0;
|
||||
struct inode *inode = file_inode(file);
|
||||
struct inode *inode = file_inode(desc->file);
|
||||
|
||||
xid = get_xid();
|
||||
|
||||
if (!CIFS_CACHE_READ(CIFS_I(inode)))
|
||||
rc = cifs_zap_mapping(inode);
|
||||
if (!rc)
|
||||
rc = generic_file_mmap(file, vma);
|
||||
rc = generic_file_mmap_prepare(desc);
|
||||
if (!rc)
|
||||
vma->vm_ops = &cifs_file_vm_ops;
|
||||
desc->vm_ops = &cifs_file_vm_ops;
|
||||
|
||||
free_xid(xid);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
int cifs_file_mmap_prepare(struct vm_area_desc *desc)
|
||||
{
|
||||
int rc, xid;
|
||||
|
||||
xid = get_xid();
|
||||
|
||||
rc = cifs_revalidate_file(file);
|
||||
rc = cifs_revalidate_file(desc->file);
|
||||
if (rc)
|
||||
cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
|
||||
rc);
|
||||
if (!rc)
|
||||
rc = generic_file_mmap(file, vma);
|
||||
rc = generic_file_mmap_prepare(desc);
|
||||
if (!rc)
|
||||
vma->vm_ops = &cifs_file_vm_ops;
|
||||
desc->vm_ops = &cifs_file_vm_ops;
|
||||
|
||||
free_xid(xid);
|
||||
return rc;
|
||||
|
|
|
@ -1581,17 +1581,17 @@ static const struct vm_operations_struct ubifs_file_vm_ops = {
|
|||
.page_mkwrite = ubifs_vm_page_mkwrite,
|
||||
};
|
||||
|
||||
static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
static int ubifs_file_mmap_prepare(struct vm_area_desc *desc)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = generic_file_mmap(file, vma);
|
||||
err = generic_file_mmap_prepare(desc);
|
||||
if (err)
|
||||
return err;
|
||||
vma->vm_ops = &ubifs_file_vm_ops;
|
||||
desc->vm_ops = &ubifs_file_vm_ops;
|
||||
|
||||
if (IS_ENABLED(CONFIG_UBIFS_ATIME_SUPPORT))
|
||||
file_accessed(file);
|
||||
file_accessed(desc->file);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1654,7 +1654,7 @@ const struct file_operations ubifs_file_operations = {
|
|||
.llseek = generic_file_llseek,
|
||||
.read_iter = generic_file_read_iter,
|
||||
.write_iter = ubifs_write_iter,
|
||||
.mmap = ubifs_file_mmap,
|
||||
.mmap_prepare = ubifs_file_mmap_prepare,
|
||||
.fsync = ubifs_fsync,
|
||||
.unlocked_ioctl = ubifs_ioctl,
|
||||
.splice_read = filemap_splice_read,
|
||||
|
|
|
@ -38,7 +38,7 @@ const struct file_operations ufs_file_operations = {
|
|||
.llseek = generic_file_llseek,
|
||||
.read_iter = generic_file_read_iter,
|
||||
.write_iter = generic_file_write_iter,
|
||||
.mmap = generic_file_mmap,
|
||||
.mmap_prepare = generic_file_mmap_prepare,
|
||||
.open = generic_file_open,
|
||||
.fsync = generic_file_fsync,
|
||||
.splice_read = filemap_splice_read,
|
||||
|
|
|
@ -165,13 +165,13 @@ static const struct vm_operations_struct vboxsf_file_vm_ops = {
|
|||
.map_pages = filemap_map_pages,
|
||||
};
|
||||
|
||||
static int vboxsf_file_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
static int vboxsf_file_mmap_prepare(struct vm_area_desc *desc)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = generic_file_mmap(file, vma);
|
||||
err = generic_file_mmap_prepare(desc);
|
||||
if (!err)
|
||||
vma->vm_ops = &vboxsf_file_vm_ops;
|
||||
desc->vm_ops = &vboxsf_file_vm_ops;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -213,7 +213,7 @@ const struct file_operations vboxsf_reg_fops = {
|
|||
.llseek = generic_file_llseek,
|
||||
.read_iter = generic_file_read_iter,
|
||||
.write_iter = generic_file_write_iter,
|
||||
.mmap = vboxsf_file_mmap,
|
||||
.mmap_prepare = vboxsf_file_mmap_prepare,
|
||||
.open = vboxsf_file_open,
|
||||
.release = vboxsf_file_release,
|
||||
.fsync = noop_fsync,
|
||||
|
|
|
@ -1914,10 +1914,10 @@ static const struct vm_operations_struct xfs_file_vm_ops = {
|
|||
};
|
||||
|
||||
STATIC int
|
||||
xfs_file_mmap(
|
||||
struct file *file,
|
||||
struct vm_area_struct *vma)
|
||||
xfs_file_mmap_prepare(
|
||||
struct vm_area_desc *desc)
|
||||
{
|
||||
struct file *file = desc->file;
|
||||
struct inode *inode = file_inode(file);
|
||||
struct xfs_buftarg *target = xfs_inode_buftarg(XFS_I(inode));
|
||||
|
||||
|
@ -1925,13 +1925,14 @@ xfs_file_mmap(
|
|||
* We don't support synchronous mappings for non-DAX files and
|
||||
* for DAX files if underneath dax_device is not synchronous.
|
||||
*/
|
||||
if (!daxdev_mapping_supported(vma, target->bt_daxdev))
|
||||
if (!daxdev_mapping_supported(desc->vm_flags, file_inode(file),
|
||||
target->bt_daxdev))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
file_accessed(file);
|
||||
vma->vm_ops = &xfs_file_vm_ops;
|
||||
desc->vm_ops = &xfs_file_vm_ops;
|
||||
if (IS_DAX(inode))
|
||||
vm_flags_set(vma, VM_HUGEPAGE);
|
||||
desc->vm_flags |= VM_HUGEPAGE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1946,7 +1947,7 @@ const struct file_operations xfs_file_operations = {
|
|||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = xfs_file_compat_ioctl,
|
||||
#endif
|
||||
.mmap = xfs_file_mmap,
|
||||
.mmap_prepare = xfs_file_mmap_prepare,
|
||||
.open = xfs_file_open,
|
||||
.release = xfs_file_release,
|
||||
.fsync = xfs_file_fsync,
|
||||
|
|
|
@ -312,8 +312,10 @@ static const struct vm_operations_struct zonefs_file_vm_ops = {
|
|||
.page_mkwrite = zonefs_filemap_page_mkwrite,
|
||||
};
|
||||
|
||||
static int zonefs_file_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
static int zonefs_file_mmap_prepare(struct vm_area_desc *desc)
|
||||
{
|
||||
struct file *file = desc->file;
|
||||
|
||||
/*
|
||||
* Conventional zones accept random writes, so their files can support
|
||||
* shared writable mappings. For sequential zone files, only read
|
||||
|
@ -321,11 +323,11 @@ static int zonefs_file_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
* ordering between msync() and page cache writeback.
|
||||
*/
|
||||
if (zonefs_inode_is_seq(file_inode(file)) &&
|
||||
(vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
|
||||
(desc->vm_flags & VM_SHARED) && (desc->vm_flags & VM_MAYWRITE))
|
||||
return -EINVAL;
|
||||
|
||||
file_accessed(file);
|
||||
vma->vm_ops = &zonefs_file_vm_ops;
|
||||
desc->vm_ops = &zonefs_file_vm_ops;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -850,7 +852,7 @@ const struct file_operations zonefs_file_operations = {
|
|||
.open = zonefs_file_open,
|
||||
.release = zonefs_file_release,
|
||||
.fsync = zonefs_file_fsync,
|
||||
.mmap = zonefs_file_mmap,
|
||||
.mmap_prepare = zonefs_file_mmap_prepare,
|
||||
.llseek = zonefs_file_llseek,
|
||||
.read_iter = zonefs_file_read_iter,
|
||||
.write_iter = zonefs_file_write_iter,
|
||||
|
|
|
@ -65,12 +65,13 @@ size_t dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
|
|||
/*
|
||||
* Check if given mapping is supported by the file / underlying device.
|
||||
*/
|
||||
static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
|
||||
struct dax_device *dax_dev)
|
||||
static inline bool daxdev_mapping_supported(vm_flags_t vm_flags,
|
||||
const struct inode *inode,
|
||||
struct dax_device *dax_dev)
|
||||
{
|
||||
if (!(vma->vm_flags & VM_SYNC))
|
||||
if (!(vm_flags & VM_SYNC))
|
||||
return true;
|
||||
if (!IS_DAX(file_inode(vma->vm_file)))
|
||||
if (!IS_DAX(inode))
|
||||
return false;
|
||||
return dax_synchronous(dax_dev);
|
||||
}
|
||||
|
@ -110,10 +111,11 @@ static inline void set_dax_nomc(struct dax_device *dax_dev)
|
|||
static inline void set_dax_synchronous(struct dax_device *dax_dev)
|
||||
{
|
||||
}
|
||||
static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
|
||||
struct dax_device *dax_dev)
|
||||
static inline bool daxdev_mapping_supported(vm_flags_t vm_flags,
|
||||
const struct inode *inode,
|
||||
struct dax_device *dax_dev)
|
||||
{
|
||||
return !(vma->vm_flags & VM_SYNC);
|
||||
return !(vm_flags & VM_SYNC);
|
||||
}
|
||||
static inline size_t dax_recovery_write(struct dax_device *dax_dev,
|
||||
pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i)
|
||||
|
|
|
@ -2263,7 +2263,7 @@ struct inode_operations {
|
|||
} ____cacheline_aligned;
|
||||
|
||||
/* Did the driver provide valid mmap hook configuration? */
|
||||
static inline bool file_has_valid_mmap_hooks(struct file *file)
|
||||
static inline bool can_mmap_file(struct file *file)
|
||||
{
|
||||
bool has_mmap = file->f_op->mmap;
|
||||
bool has_mmap_prepare = file->f_op->mmap_prepare;
|
||||
|
@ -2279,7 +2279,7 @@ static inline bool file_has_valid_mmap_hooks(struct file *file)
|
|||
|
||||
int compat_vma_mmap_prepare(struct file *file, struct vm_area_struct *vma);
|
||||
|
||||
static inline int call_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
static inline int vfs_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
{
|
||||
if (file->f_op->mmap_prepare)
|
||||
return compat_vma_mmap_prepare(file, vma);
|
||||
|
@ -2287,8 +2287,7 @@ static inline int call_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
return file->f_op->mmap(file, vma);
|
||||
}
|
||||
|
||||
static inline int __call_mmap_prepare(struct file *file,
|
||||
struct vm_area_desc *desc)
|
||||
static inline int vfs_mmap_prepare(struct file *file, struct vm_area_desc *desc)
|
||||
{
|
||||
return file->f_op->mmap_prepare(desc);
|
||||
}
|
||||
|
@ -3413,8 +3412,10 @@ extern void inode_add_lru(struct inode *inode);
|
|||
extern int sb_set_blocksize(struct super_block *, int);
|
||||
extern int sb_min_blocksize(struct super_block *, int);
|
||||
|
||||
extern int generic_file_mmap(struct file *, struct vm_area_struct *);
|
||||
extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
|
||||
int generic_file_mmap(struct file *, struct vm_area_struct *);
|
||||
int generic_file_mmap_prepare(struct vm_area_desc *desc);
|
||||
int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
|
||||
int generic_file_readonly_mmap_prepare(struct vm_area_desc *desc);
|
||||
extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *);
|
||||
int generic_write_checks_count(struct kiocb *iocb, loff_t *count);
|
||||
extern int generic_write_check_limits(struct file *file, loff_t pos,
|
||||
|
|
|
@ -602,7 +602,7 @@ static int shm_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = call_mmap(sfd->file, vma);
|
||||
ret = vfs_mmap(sfd->file, vma);
|
||||
if (ret) {
|
||||
__shm_close(sfd);
|
||||
return ret;
|
||||
|
|
29
mm/filemap.c
29
mm/filemap.c
|
@ -3814,6 +3814,18 @@ int generic_file_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int generic_file_mmap_prepare(struct vm_area_desc *desc)
|
||||
{
|
||||
struct file *file = desc->file;
|
||||
struct address_space *mapping = file->f_mapping;
|
||||
|
||||
if (!mapping->a_ops->read_folio)
|
||||
return -ENOEXEC;
|
||||
file_accessed(file);
|
||||
desc->vm_ops = &generic_file_vm_ops;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is for filesystems which do not implement ->writepage.
|
||||
*/
|
||||
|
@ -3823,6 +3835,13 @@ int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
return -EINVAL;
|
||||
return generic_file_mmap(file, vma);
|
||||
}
|
||||
|
||||
int generic_file_readonly_mmap_prepare(struct vm_area_desc *desc)
|
||||
{
|
||||
if (is_shared_maywrite(desc->vm_flags))
|
||||
return -EINVAL;
|
||||
return generic_file_mmap_prepare(desc);
|
||||
}
|
||||
#else
|
||||
vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
|
||||
{
|
||||
|
@ -3832,15 +3851,25 @@ int generic_file_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
int generic_file_mmap_prepare(struct vm_area_desc *desc)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
int generic_file_readonly_mmap_prepare(struct vm_area_desc *desc)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
EXPORT_SYMBOL(filemap_page_mkwrite);
|
||||
EXPORT_SYMBOL(generic_file_mmap);
|
||||
EXPORT_SYMBOL(generic_file_mmap_prepare);
|
||||
EXPORT_SYMBOL(generic_file_readonly_mmap);
|
||||
EXPORT_SYMBOL(generic_file_readonly_mmap_prepare);
|
||||
|
||||
static struct folio *do_read_cache_folio(struct address_space *mapping,
|
||||
pgoff_t index, filler_t filler, struct file *file, gfp_t gfp)
|
||||
|
|
|
@ -164,7 +164,7 @@ static inline void *folio_raw_mapping(const struct folio *folio)
|
|||
*/
|
||||
static inline int mmap_file(struct file *file, struct vm_area_struct *vma)
|
||||
{
|
||||
int err = call_mmap(file, vma);
|
||||
int err = vfs_mmap(file, vma);
|
||||
|
||||
if (likely(!err))
|
||||
return 0;
|
||||
|
|
|
@ -475,7 +475,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
|
|||
vm_flags &= ~VM_MAYEXEC;
|
||||
}
|
||||
|
||||
if (!file_has_valid_mmap_hooks(file))
|
||||
if (!can_mmap_file(file))
|
||||
return -ENODEV;
|
||||
if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
|
||||
return -EINVAL;
|
||||
|
|
|
@ -719,7 +719,7 @@ static int validate_mmap_request(struct file *file,
|
|||
|
||||
if (file) {
|
||||
/* files must support mmap */
|
||||
if (!file->f_op->mmap)
|
||||
if (!can_mmap_file(file))
|
||||
return -ENODEV;
|
||||
|
||||
/* work out if what we've got could possibly be shared
|
||||
|
|
2
mm/vma.c
2
mm/vma.c
|
@ -2569,7 +2569,7 @@ static int call_mmap_prepare(struct mmap_state *map)
|
|||
};
|
||||
|
||||
/* Invoke the hook. */
|
||||
err = __call_mmap_prepare(map->file, &desc);
|
||||
err = vfs_mmap_prepare(map->file, &desc);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
|
|
@ -1442,8 +1442,29 @@ static inline void free_anon_vma_name(struct vm_area_struct *vma)
|
|||
(void)vma;
|
||||
}
|
||||
|
||||
/* Declared in vma.h. */
|
||||
static inline void set_vma_from_desc(struct vm_area_struct *vma,
|
||||
struct vm_area_desc *desc);
|
||||
|
||||
static inline struct vm_area_desc *vma_to_desc(struct vm_area_struct *vma,
|
||||
struct vm_area_desc *desc);
|
||||
|
||||
static int compat_vma_mmap_prepare(struct file *file,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
struct vm_area_desc desc;
|
||||
int err;
|
||||
|
||||
err = file->f_op->mmap_prepare(vma_to_desc(vma, &desc));
|
||||
if (err)
|
||||
return err;
|
||||
set_vma_from_desc(vma, &desc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Did the driver provide valid mmap hook configuration? */
|
||||
static inline bool file_has_valid_mmap_hooks(struct file *file)
|
||||
static inline bool can_mmap_file(struct file *file)
|
||||
{
|
||||
bool has_mmap = file->f_op->mmap;
|
||||
bool has_mmap_prepare = file->f_op->mmap_prepare;
|
||||
|
@ -1451,22 +1472,21 @@ static inline bool file_has_valid_mmap_hooks(struct file *file)
|
|||
/* Hooks are mutually exclusive. */
|
||||
if (WARN_ON_ONCE(has_mmap && has_mmap_prepare))
|
||||
return false;
|
||||
if (WARN_ON_ONCE(!has_mmap && !has_mmap_prepare))
|
||||
if (!has_mmap && !has_mmap_prepare)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline int call_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
static inline int vfs_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
{
|
||||
if (WARN_ON_ONCE(file->f_op->mmap_prepare))
|
||||
return -EINVAL;
|
||||
if (file->f_op->mmap_prepare)
|
||||
return compat_vma_mmap_prepare(file, vma);
|
||||
|
||||
return file->f_op->mmap(file, vma);
|
||||
}
|
||||
|
||||
static inline int __call_mmap_prepare(struct file *file,
|
||||
struct vm_area_desc *desc)
|
||||
static inline int vfs_mmap_prepare(struct file *file, struct vm_area_desc *desc)
|
||||
{
|
||||
return file->f_op->mmap_prepare(desc);
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue