mirror of
https://github.com/torvalds/linux.git
synced 2025-08-15 14:11:42 +02:00
mm: remove callers of pfn_t functionality
All PFN_* pfn_t flags have been removed. Therefore there is no longer a need for the pfn_t type and all uses can be replaced with normal pfns. Link: https://lkml.kernel.org/r/bbedfa576c9822f8032494efbe43544628698b1f.1750323463.git-series.apopple@nvidia.com Signed-off-by: Alistair Popple <apopple@nvidia.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Acked-by: David Hildenbrand <david@redhat.com> Cc: Balbir Singh <balbirs@nvidia.com> Cc: Björn Töpel <bjorn@kernel.org> Cc: Björn Töpel <bjorn@rivosinc.com> Cc: Chunyan Zhang <zhang.lyra@gmail.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Deepak Gupta <debug@rivosinc.com> Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com> Cc: Inki Dae <m.szyprowski@samsung.com> Cc: John Groves <john@groves.net> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
984921edea
commit
21aa65bf82
43 changed files with 109 additions and 235 deletions
|
@ -36,7 +36,6 @@
|
||||||
#include <linux/debugfs.h>
|
#include <linux/debugfs.h>
|
||||||
#include <linux/ioport.h>
|
#include <linux/ioport.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/pfn_t.h>
|
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
|
|
|
@ -4,7 +4,6 @@
|
||||||
#include <linux/pagemap.h>
|
#include <linux/pagemap.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/device.h>
|
#include <linux/device.h>
|
||||||
#include <linux/pfn_t.h>
|
|
||||||
#include <linux/cdev.h>
|
#include <linux/cdev.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/dax.h>
|
#include <linux/dax.h>
|
||||||
|
@ -73,7 +72,7 @@ __weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff,
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dax_set_mapping(struct vm_fault *vmf, pfn_t pfn,
|
static void dax_set_mapping(struct vm_fault *vmf, unsigned long pfn,
|
||||||
unsigned long fault_size)
|
unsigned long fault_size)
|
||||||
{
|
{
|
||||||
unsigned long i, nr_pages = fault_size / PAGE_SIZE;
|
unsigned long i, nr_pages = fault_size / PAGE_SIZE;
|
||||||
|
@ -89,7 +88,7 @@ static void dax_set_mapping(struct vm_fault *vmf, pfn_t pfn,
|
||||||
ALIGN_DOWN(vmf->address, fault_size));
|
ALIGN_DOWN(vmf->address, fault_size));
|
||||||
|
|
||||||
for (i = 0; i < nr_pages; i++) {
|
for (i = 0; i < nr_pages; i++) {
|
||||||
struct folio *folio = pfn_folio(pfn_t_to_pfn(pfn) + i);
|
struct folio *folio = pfn_folio(pfn + i);
|
||||||
|
|
||||||
if (folio->mapping)
|
if (folio->mapping)
|
||||||
continue;
|
continue;
|
||||||
|
@ -104,7 +103,7 @@ static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax,
|
||||||
{
|
{
|
||||||
struct device *dev = &dev_dax->dev;
|
struct device *dev = &dev_dax->dev;
|
||||||
phys_addr_t phys;
|
phys_addr_t phys;
|
||||||
pfn_t pfn;
|
unsigned long pfn;
|
||||||
unsigned int fault_size = PAGE_SIZE;
|
unsigned int fault_size = PAGE_SIZE;
|
||||||
|
|
||||||
if (check_vma(dev_dax, vmf->vma, __func__))
|
if (check_vma(dev_dax, vmf->vma, __func__))
|
||||||
|
@ -125,11 +124,11 @@ static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax,
|
||||||
return VM_FAULT_SIGBUS;
|
return VM_FAULT_SIGBUS;
|
||||||
}
|
}
|
||||||
|
|
||||||
pfn = phys_to_pfn_t(phys, 0);
|
pfn = PHYS_PFN(phys);
|
||||||
|
|
||||||
dax_set_mapping(vmf, pfn, fault_size);
|
dax_set_mapping(vmf, pfn, fault_size);
|
||||||
|
|
||||||
return vmf_insert_page_mkwrite(vmf, pfn_t_to_page(pfn),
|
return vmf_insert_page_mkwrite(vmf, pfn_to_page(pfn),
|
||||||
vmf->flags & FAULT_FLAG_WRITE);
|
vmf->flags & FAULT_FLAG_WRITE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -140,7 +139,7 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
|
||||||
struct device *dev = &dev_dax->dev;
|
struct device *dev = &dev_dax->dev;
|
||||||
phys_addr_t phys;
|
phys_addr_t phys;
|
||||||
pgoff_t pgoff;
|
pgoff_t pgoff;
|
||||||
pfn_t pfn;
|
unsigned long pfn;
|
||||||
unsigned int fault_size = PMD_SIZE;
|
unsigned int fault_size = PMD_SIZE;
|
||||||
|
|
||||||
if (check_vma(dev_dax, vmf->vma, __func__))
|
if (check_vma(dev_dax, vmf->vma, __func__))
|
||||||
|
@ -169,11 +168,11 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
|
||||||
return VM_FAULT_SIGBUS;
|
return VM_FAULT_SIGBUS;
|
||||||
}
|
}
|
||||||
|
|
||||||
pfn = phys_to_pfn_t(phys, 0);
|
pfn = PHYS_PFN(phys);
|
||||||
|
|
||||||
dax_set_mapping(vmf, pfn, fault_size);
|
dax_set_mapping(vmf, pfn, fault_size);
|
||||||
|
|
||||||
return vmf_insert_folio_pmd(vmf, page_folio(pfn_t_to_page(pfn)),
|
return vmf_insert_folio_pmd(vmf, page_folio(pfn_to_page(pfn)),
|
||||||
vmf->flags & FAULT_FLAG_WRITE);
|
vmf->flags & FAULT_FLAG_WRITE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -185,7 +184,7 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
|
||||||
struct device *dev = &dev_dax->dev;
|
struct device *dev = &dev_dax->dev;
|
||||||
phys_addr_t phys;
|
phys_addr_t phys;
|
||||||
pgoff_t pgoff;
|
pgoff_t pgoff;
|
||||||
pfn_t pfn;
|
unsigned long pfn;
|
||||||
unsigned int fault_size = PUD_SIZE;
|
unsigned int fault_size = PUD_SIZE;
|
||||||
|
|
||||||
|
|
||||||
|
@ -215,11 +214,11 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
|
||||||
return VM_FAULT_SIGBUS;
|
return VM_FAULT_SIGBUS;
|
||||||
}
|
}
|
||||||
|
|
||||||
pfn = phys_to_pfn_t(phys, 0);
|
pfn = PHYS_PFN(phys);
|
||||||
|
|
||||||
dax_set_mapping(vmf, pfn, fault_size);
|
dax_set_mapping(vmf, pfn, fault_size);
|
||||||
|
|
||||||
return vmf_insert_folio_pud(vmf, page_folio(pfn_t_to_page(pfn)),
|
return vmf_insert_folio_pud(vmf, page_folio(pfn_to_page(pfn)),
|
||||||
vmf->flags & FAULT_FLAG_WRITE);
|
vmf->flags & FAULT_FLAG_WRITE);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
#include <linux/platform_device.h>
|
#include <linux/platform_device.h>
|
||||||
#include <linux/memregion.h>
|
#include <linux/memregion.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/pfn_t.h>
|
|
||||||
#include <linux/dax.h>
|
#include <linux/dax.h>
|
||||||
#include "../bus.h"
|
#include "../bus.h"
|
||||||
|
|
||||||
|
|
|
@ -5,7 +5,6 @@
|
||||||
#include <linux/memory.h>
|
#include <linux/memory.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/device.h>
|
#include <linux/device.h>
|
||||||
#include <linux/pfn_t.h>
|
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/dax.h>
|
#include <linux/dax.h>
|
||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
/* Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. */
|
/* Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. */
|
||||||
#include <linux/memremap.h>
|
#include <linux/memremap.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/pfn_t.h>
|
|
||||||
#include "../nvdimm/pfn.h"
|
#include "../nvdimm/pfn.h"
|
||||||
#include "../nvdimm/nd.h"
|
#include "../nvdimm/nd.h"
|
||||||
#include "bus.h"
|
#include "bus.h"
|
||||||
|
|
|
@ -7,7 +7,6 @@
|
||||||
#include <linux/mount.h>
|
#include <linux/mount.h>
|
||||||
#include <linux/pseudo_fs.h>
|
#include <linux/pseudo_fs.h>
|
||||||
#include <linux/magic.h>
|
#include <linux/magic.h>
|
||||||
#include <linux/pfn_t.h>
|
|
||||||
#include <linux/cdev.h>
|
#include <linux/cdev.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/uio.h>
|
#include <linux/uio.h>
|
||||||
|
@ -148,7 +147,7 @@ enum dax_device_flags {
|
||||||
* pages accessible at the device relative @pgoff.
|
* pages accessible at the device relative @pgoff.
|
||||||
*/
|
*/
|
||||||
long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
|
long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
|
||||||
enum dax_access_mode mode, void **kaddr, pfn_t *pfn)
|
enum dax_access_mode mode, void **kaddr, unsigned long *pfn)
|
||||||
{
|
{
|
||||||
long avail;
|
long avail;
|
||||||
|
|
||||||
|
|
|
@ -7,7 +7,6 @@
|
||||||
|
|
||||||
|
|
||||||
#include <linux/dma-buf.h>
|
#include <linux/dma-buf.h>
|
||||||
#include <linux/pfn_t.h>
|
|
||||||
#include <linux/shmem_fs.h>
|
#include <linux/shmem_fs.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
|
|
||||||
|
|
|
@ -6,7 +6,6 @@
|
||||||
**************************************************************************/
|
**************************************************************************/
|
||||||
|
|
||||||
#include <linux/fb.h>
|
#include <linux/fb.h>
|
||||||
#include <linux/pfn_t.h>
|
|
||||||
|
|
||||||
#include <drm/drm_crtc_helper.h>
|
#include <drm/drm_crtc_helper.h>
|
||||||
#include <drm/drm_drv.h>
|
#include <drm/drm_drv.h>
|
||||||
|
@ -33,7 +32,7 @@ static vm_fault_t psb_fbdev_vm_fault(struct vm_fault *vmf)
|
||||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||||
|
|
||||||
for (i = 0; i < page_num; ++i) {
|
for (i = 0; i < page_num; ++i) {
|
||||||
err = vmf_insert_mixed(vma, address, __pfn_to_pfn_t(pfn, 0));
|
err = vmf_insert_mixed(vma, address, pfn);
|
||||||
if (unlikely(err & VM_FAULT_ERROR))
|
if (unlikely(err & VM_FAULT_ERROR))
|
||||||
break;
|
break;
|
||||||
address += PAGE_SIZE;
|
address += PAGE_SIZE;
|
||||||
|
|
|
@ -5,7 +5,6 @@
|
||||||
|
|
||||||
#include <linux/anon_inodes.h>
|
#include <linux/anon_inodes.h>
|
||||||
#include <linux/mman.h>
|
#include <linux/mman.h>
|
||||||
#include <linux/pfn_t.h>
|
|
||||||
#include <linux/sizes.h>
|
#include <linux/sizes.h>
|
||||||
|
|
||||||
#include <drm/drm_cache.h>
|
#include <drm/drm_cache.h>
|
||||||
|
|
|
@ -9,7 +9,6 @@
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
#include <linux/shmem_fs.h>
|
#include <linux/shmem_fs.h>
|
||||||
#include <linux/dma-buf.h>
|
#include <linux/dma-buf.h>
|
||||||
#include <linux/pfn_t.h>
|
|
||||||
|
|
||||||
#include <drm/drm_prime.h>
|
#include <drm/drm_prime.h>
|
||||||
#include <drm/drm_file.h>
|
#include <drm/drm_file.h>
|
||||||
|
|
|
@ -8,7 +8,6 @@
|
||||||
#include <linux/seq_file.h>
|
#include <linux/seq_file.h>
|
||||||
#include <linux/shmem_fs.h>
|
#include <linux/shmem_fs.h>
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
#include <linux/pfn_t.h>
|
|
||||||
#include <linux/vmalloc.h>
|
#include <linux/vmalloc.h>
|
||||||
|
|
||||||
#include <drm/drm_prime.h>
|
#include <drm/drm_prime.h>
|
||||||
|
@ -371,7 +370,7 @@ static vm_fault_t omap_gem_fault_1d(struct drm_gem_object *obj,
|
||||||
VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
|
VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
|
||||||
pfn, pfn << PAGE_SHIFT);
|
pfn, pfn << PAGE_SHIFT);
|
||||||
|
|
||||||
return vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, 0));
|
return vmf_insert_mixed(vma, vmf->address, pfn);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Special handling for the case of faulting in 2d tiled buffers */
|
/* Special handling for the case of faulting in 2d tiled buffers */
|
||||||
|
@ -466,8 +465,7 @@ static vm_fault_t omap_gem_fault_2d(struct drm_gem_object *obj,
|
||||||
pfn, pfn << PAGE_SHIFT);
|
pfn, pfn << PAGE_SHIFT);
|
||||||
|
|
||||||
for (i = n; i > 0; i--) {
|
for (i = n; i > 0; i--) {
|
||||||
ret = vmf_insert_mixed(vma,
|
ret = vmf_insert_mixed(vma, vaddr, pfn);
|
||||||
vaddr, __pfn_to_pfn_t(pfn, 0));
|
|
||||||
if (ret & VM_FAULT_ERROR)
|
if (ret & VM_FAULT_ERROR)
|
||||||
break;
|
break;
|
||||||
pfn += priv->usergart[fmt].stride_pfn;
|
pfn += priv->usergart[fmt].stride_pfn;
|
||||||
|
|
|
@ -16,7 +16,6 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/dma-buf.h>
|
#include <linux/dma-buf.h>
|
||||||
#include <linux/pfn_t.h>
|
|
||||||
#include <linux/vmalloc.h>
|
#include <linux/vmalloc.h>
|
||||||
|
|
||||||
#include "v3d_drv.h"
|
#include "v3d_drv.h"
|
||||||
|
|
|
@ -19,7 +19,6 @@
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
#include <linux/workqueue.h>
|
#include <linux/workqueue.h>
|
||||||
#include <linux/dma-mapping.h>
|
#include <linux/dma-mapping.h>
|
||||||
#include <linux/pfn_t.h>
|
|
||||||
|
|
||||||
#ifdef CONFIG_X86
|
#ifdef CONFIG_X86
|
||||||
#include <asm/set_memory.h>
|
#include <asm/set_memory.h>
|
||||||
|
@ -1618,7 +1617,7 @@ static vm_fault_t msc_mmap_fault(struct vm_fault *vmf)
|
||||||
return VM_FAULT_SIGBUS;
|
return VM_FAULT_SIGBUS;
|
||||||
|
|
||||||
get_page(page);
|
get_page(page);
|
||||||
return vmf_insert_mixed(vmf->vma, vmf->address, page_to_pfn_t(page));
|
return vmf_insert_mixed(vmf->vma, vmf->address, page_to_pfn(page));
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct vm_operations_struct msc_mmap_ops = {
|
static const struct vm_operations_struct msc_mmap_ops = {
|
||||||
|
|
|
@ -170,7 +170,7 @@ static struct dax_device *linear_dax_pgoff(struct dm_target *ti, pgoff_t *pgoff)
|
||||||
|
|
||||||
static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
|
static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
|
||||||
long nr_pages, enum dax_access_mode mode, void **kaddr,
|
long nr_pages, enum dax_access_mode mode, void **kaddr,
|
||||||
pfn_t *pfn)
|
unsigned long *pfn)
|
||||||
{
|
{
|
||||||
struct dax_device *dax_dev = linear_dax_pgoff(ti, &pgoff);
|
struct dax_device *dax_dev = linear_dax_pgoff(ti, &pgoff);
|
||||||
|
|
||||||
|
|
|
@ -893,7 +893,7 @@ static struct dax_device *log_writes_dax_pgoff(struct dm_target *ti,
|
||||||
|
|
||||||
static long log_writes_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
|
static long log_writes_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
|
||||||
long nr_pages, enum dax_access_mode mode, void **kaddr,
|
long nr_pages, enum dax_access_mode mode, void **kaddr,
|
||||||
pfn_t *pfn)
|
unsigned long *pfn)
|
||||||
{
|
{
|
||||||
struct dax_device *dax_dev = log_writes_dax_pgoff(ti, &pgoff);
|
struct dax_device *dax_dev = log_writes_dax_pgoff(ti, &pgoff);
|
||||||
|
|
||||||
|
|
|
@ -316,7 +316,7 @@ static struct dax_device *stripe_dax_pgoff(struct dm_target *ti, pgoff_t *pgoff)
|
||||||
|
|
||||||
static long stripe_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
|
static long stripe_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
|
||||||
long nr_pages, enum dax_access_mode mode, void **kaddr,
|
long nr_pages, enum dax_access_mode mode, void **kaddr,
|
||||||
pfn_t *pfn)
|
unsigned long *pfn)
|
||||||
{
|
{
|
||||||
struct dax_device *dax_dev = stripe_dax_pgoff(ti, &pgoff);
|
struct dax_device *dax_dev = stripe_dax_pgoff(ti, &pgoff);
|
||||||
|
|
||||||
|
|
|
@ -255,7 +255,7 @@ static void io_err_io_hints(struct dm_target *ti, struct queue_limits *limits)
|
||||||
|
|
||||||
static long io_err_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
|
static long io_err_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
|
||||||
long nr_pages, enum dax_access_mode mode, void **kaddr,
|
long nr_pages, enum dax_access_mode mode, void **kaddr,
|
||||||
pfn_t *pfn)
|
unsigned long *pfn)
|
||||||
{
|
{
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,7 +13,6 @@
|
||||||
#include <linux/dm-io.h>
|
#include <linux/dm-io.h>
|
||||||
#include <linux/dm-kcopyd.h>
|
#include <linux/dm-kcopyd.h>
|
||||||
#include <linux/dax.h>
|
#include <linux/dax.h>
|
||||||
#include <linux/pfn_t.h>
|
|
||||||
#include <linux/libnvdimm.h>
|
#include <linux/libnvdimm.h>
|
||||||
#include <linux/delay.h>
|
#include <linux/delay.h>
|
||||||
#include "dm-io-tracker.h"
|
#include "dm-io-tracker.h"
|
||||||
|
@ -256,7 +255,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
|
||||||
int r;
|
int r;
|
||||||
loff_t s;
|
loff_t s;
|
||||||
long p, da;
|
long p, da;
|
||||||
pfn_t pfn;
|
unsigned long pfn;
|
||||||
int id;
|
int id;
|
||||||
struct page **pages;
|
struct page **pages;
|
||||||
sector_t offset;
|
sector_t offset;
|
||||||
|
@ -290,7 +289,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
|
||||||
r = da;
|
r = da;
|
||||||
goto err2;
|
goto err2;
|
||||||
}
|
}
|
||||||
if (!pfn_t_has_page(pfn)) {
|
if (!pfn_valid(pfn)) {
|
||||||
wc->memory_map = NULL;
|
wc->memory_map = NULL;
|
||||||
r = -EOPNOTSUPP;
|
r = -EOPNOTSUPP;
|
||||||
goto err2;
|
goto err2;
|
||||||
|
@ -314,13 +313,13 @@ static int persistent_memory_claim(struct dm_writecache *wc)
|
||||||
r = daa ? daa : -EINVAL;
|
r = daa ? daa : -EINVAL;
|
||||||
goto err3;
|
goto err3;
|
||||||
}
|
}
|
||||||
if (!pfn_t_has_page(pfn)) {
|
if (!pfn_valid(pfn)) {
|
||||||
r = -EOPNOTSUPP;
|
r = -EOPNOTSUPP;
|
||||||
goto err3;
|
goto err3;
|
||||||
}
|
}
|
||||||
while (daa-- && i < p) {
|
while (daa-- && i < p) {
|
||||||
pages[i++] = pfn_t_to_page(pfn);
|
pages[i++] = pfn_to_page(pfn);
|
||||||
pfn.val++;
|
pfn++;
|
||||||
if (!(i & 15))
|
if (!(i & 15))
|
||||||
cond_resched();
|
cond_resched();
|
||||||
}
|
}
|
||||||
|
|
|
@ -1218,7 +1218,7 @@ static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
|
||||||
|
|
||||||
static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
|
static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
|
||||||
long nr_pages, enum dax_access_mode mode, void **kaddr,
|
long nr_pages, enum dax_access_mode mode, void **kaddr,
|
||||||
pfn_t *pfn)
|
unsigned long *pfn)
|
||||||
{
|
{
|
||||||
struct mapped_device *md = dax_get_private(dax_dev);
|
struct mapped_device *md = dax_get_private(dax_dev);
|
||||||
sector_t sector = pgoff * PAGE_SECTORS;
|
sector_t sector = pgoff * PAGE_SECTORS;
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
#include <linux/kstrtox.h>
|
#include <linux/kstrtox.h>
|
||||||
#include <linux/vmalloc.h>
|
#include <linux/vmalloc.h>
|
||||||
#include <linux/blk-mq.h>
|
#include <linux/blk-mq.h>
|
||||||
#include <linux/pfn_t.h>
|
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/uio.h>
|
#include <linux/uio.h>
|
||||||
#include <linux/dax.h>
|
#include <linux/dax.h>
|
||||||
|
@ -242,7 +241,7 @@ static void pmem_submit_bio(struct bio *bio)
|
||||||
/* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
|
/* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
|
||||||
__weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
|
__weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
|
||||||
long nr_pages, enum dax_access_mode mode, void **kaddr,
|
long nr_pages, enum dax_access_mode mode, void **kaddr,
|
||||||
pfn_t *pfn)
|
unsigned long *pfn)
|
||||||
{
|
{
|
||||||
resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
|
resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
|
||||||
sector_t sector = PFN_PHYS(pgoff) >> SECTOR_SHIFT;
|
sector_t sector = PFN_PHYS(pgoff) >> SECTOR_SHIFT;
|
||||||
|
@ -254,7 +253,7 @@ __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
|
||||||
if (kaddr)
|
if (kaddr)
|
||||||
*kaddr = pmem->virt_addr + offset;
|
*kaddr = pmem->virt_addr + offset;
|
||||||
if (pfn)
|
if (pfn)
|
||||||
*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
|
*pfn = PHYS_PFN(pmem->phys_addr + offset);
|
||||||
|
|
||||||
if (bb->count &&
|
if (bb->count &&
|
||||||
badblocks_check(bb, sector, num, &first_bad, &num_bad)) {
|
badblocks_check(bb, sector, num, &first_bad, &num_bad)) {
|
||||||
|
@ -303,7 +302,7 @@ static int pmem_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
|
||||||
|
|
||||||
static long pmem_dax_direct_access(struct dax_device *dax_dev,
|
static long pmem_dax_direct_access(struct dax_device *dax_dev,
|
||||||
pgoff_t pgoff, long nr_pages, enum dax_access_mode mode,
|
pgoff_t pgoff, long nr_pages, enum dax_access_mode mode,
|
||||||
void **kaddr, pfn_t *pfn)
|
void **kaddr, unsigned long *pfn)
|
||||||
{
|
{
|
||||||
struct pmem_device *pmem = dax_get_private(dax_dev);
|
struct pmem_device *pmem = dax_get_private(dax_dev);
|
||||||
|
|
||||||
|
@ -513,7 +512,6 @@ static int pmem_attach_disk(struct device *dev,
|
||||||
|
|
||||||
pmem->disk = disk;
|
pmem->disk = disk;
|
||||||
pmem->pgmap.owner = pmem;
|
pmem->pgmap.owner = pmem;
|
||||||
pmem->pfn_flags = 0;
|
|
||||||
if (is_nd_pfn(dev)) {
|
if (is_nd_pfn(dev)) {
|
||||||
pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
|
pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
|
||||||
pmem->pgmap.ops = &fsdax_pagemap_ops;
|
pmem->pgmap.ops = &fsdax_pagemap_ops;
|
||||||
|
|
|
@ -5,7 +5,6 @@
|
||||||
#include <linux/badblocks.h>
|
#include <linux/badblocks.h>
|
||||||
#include <linux/memremap.h>
|
#include <linux/memremap.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/pfn_t.h>
|
|
||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
|
|
||||||
enum dax_access_mode;
|
enum dax_access_mode;
|
||||||
|
@ -16,7 +15,6 @@ struct pmem_device {
|
||||||
phys_addr_t phys_addr;
|
phys_addr_t phys_addr;
|
||||||
/* when non-zero this device is hosting a 'pfn' instance */
|
/* when non-zero this device is hosting a 'pfn' instance */
|
||||||
phys_addr_t data_offset;
|
phys_addr_t data_offset;
|
||||||
u64 pfn_flags;
|
|
||||||
void *virt_addr;
|
void *virt_addr;
|
||||||
/* immutable base size of the namespace */
|
/* immutable base size of the namespace */
|
||||||
size_t size;
|
size_t size;
|
||||||
|
@ -31,7 +29,7 @@ struct pmem_device {
|
||||||
|
|
||||||
long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
|
long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
|
||||||
long nr_pages, enum dax_access_mode mode, void **kaddr,
|
long nr_pages, enum dax_access_mode mode, void **kaddr,
|
||||||
pfn_t *pfn);
|
unsigned long *pfn);
|
||||||
|
|
||||||
#ifdef CONFIG_MEMORY_FAILURE
|
#ifdef CONFIG_MEMORY_FAILURE
|
||||||
static inline bool test_and_clear_pmem_poison(struct page *page)
|
static inline bool test_and_clear_pmem_poison(struct page *page)
|
||||||
|
|
|
@ -17,7 +17,6 @@
|
||||||
#include <linux/blkdev.h>
|
#include <linux/blkdev.h>
|
||||||
#include <linux/completion.h>
|
#include <linux/completion.h>
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
#include <linux/pfn_t.h>
|
|
||||||
#include <linux/uio.h>
|
#include <linux/uio.h>
|
||||||
#include <linux/dax.h>
|
#include <linux/dax.h>
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
|
@ -33,7 +32,7 @@ static void dcssblk_release(struct gendisk *disk);
|
||||||
static void dcssblk_submit_bio(struct bio *bio);
|
static void dcssblk_submit_bio(struct bio *bio);
|
||||||
static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
|
static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
|
||||||
long nr_pages, enum dax_access_mode mode, void **kaddr,
|
long nr_pages, enum dax_access_mode mode, void **kaddr,
|
||||||
pfn_t *pfn);
|
unsigned long *pfn);
|
||||||
|
|
||||||
static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0";
|
static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0";
|
||||||
|
|
||||||
|
@ -914,7 +913,7 @@ fail:
|
||||||
|
|
||||||
static long
|
static long
|
||||||
__dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff,
|
__dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff,
|
||||||
long nr_pages, void **kaddr, pfn_t *pfn)
|
long nr_pages, void **kaddr, unsigned long *pfn)
|
||||||
{
|
{
|
||||||
resource_size_t offset = pgoff * PAGE_SIZE;
|
resource_size_t offset = pgoff * PAGE_SIZE;
|
||||||
unsigned long dev_sz;
|
unsigned long dev_sz;
|
||||||
|
@ -923,7 +922,7 @@ __dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff,
|
||||||
if (kaddr)
|
if (kaddr)
|
||||||
*kaddr = __va(dev_info->start + offset);
|
*kaddr = __va(dev_info->start + offset);
|
||||||
if (pfn)
|
if (pfn)
|
||||||
*pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset), 0);
|
*pfn = PFN_DOWN(dev_info->start + offset);
|
||||||
|
|
||||||
return (dev_sz - offset) / PAGE_SIZE;
|
return (dev_sz - offset) / PAGE_SIZE;
|
||||||
}
|
}
|
||||||
|
@ -931,7 +930,7 @@ __dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff,
|
||||||
static long
|
static long
|
||||||
dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
|
dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
|
||||||
long nr_pages, enum dax_access_mode mode, void **kaddr,
|
long nr_pages, enum dax_access_mode mode, void **kaddr,
|
||||||
pfn_t *pfn)
|
unsigned long *pfn)
|
||||||
{
|
{
|
||||||
struct dcssblk_dev_info *dev_info = dax_get_private(dax_dev);
|
struct dcssblk_dev_info *dev_info = dax_get_private(dax_dev);
|
||||||
|
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
#include <linux/mutex.h>
|
#include <linux/mutex.h>
|
||||||
#include <linux/notifier.h>
|
#include <linux/notifier.h>
|
||||||
#include <linux/pci.h>
|
#include <linux/pci.h>
|
||||||
#include <linux/pfn_t.h>
|
|
||||||
#include <linux/pm_runtime.h>
|
#include <linux/pm_runtime.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
|
@ -1669,12 +1668,12 @@ static vm_fault_t vfio_pci_mmap_huge_fault(struct vm_fault *vmf,
|
||||||
break;
|
break;
|
||||||
#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
|
#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
|
||||||
case PMD_ORDER:
|
case PMD_ORDER:
|
||||||
ret = vmf_insert_pfn_pmd(vmf, __pfn_to_pfn_t(pfn, 0), false);
|
ret = vmf_insert_pfn_pmd(vmf, pfn, false);
|
||||||
break;
|
break;
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
|
#ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
|
||||||
case PUD_ORDER:
|
case PUD_ORDER:
|
||||||
ret = vmf_insert_pfn_pud(vmf, __pfn_to_pfn_t(pfn, 0), false);
|
ret = vmf_insert_pfn_pud(vmf, pfn, false);
|
||||||
break;
|
break;
|
||||||
#endif
|
#endif
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -17,7 +17,6 @@
|
||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
#include <linux/file.h>
|
#include <linux/file.h>
|
||||||
#include <linux/pagemap.h>
|
#include <linux/pagemap.h>
|
||||||
#include <linux/pfn_t.h>
|
|
||||||
#include <linux/ramfs.h>
|
#include <linux/ramfs.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
|
@ -412,8 +411,8 @@ static int cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma)
|
||||||
for (i = 0; i < pages && !ret; i++) {
|
for (i = 0; i < pages && !ret; i++) {
|
||||||
vm_fault_t vmf;
|
vm_fault_t vmf;
|
||||||
unsigned long off = i * PAGE_SIZE;
|
unsigned long off = i * PAGE_SIZE;
|
||||||
pfn_t pfn = phys_to_pfn_t(address + off, 0);
|
vmf = vmf_insert_mixed(vma, vma->vm_start + off,
|
||||||
vmf = vmf_insert_mixed(vma, vma->vm_start + off, pfn);
|
address + off);
|
||||||
if (vmf & VM_FAULT_ERROR)
|
if (vmf & VM_FAULT_ERROR)
|
||||||
ret = vm_fault_to_errno(vmf, 0);
|
ret = vm_fault_to_errno(vmf, 0);
|
||||||
}
|
}
|
||||||
|
|
50
fs/dax.c
50
fs/dax.c
|
@ -20,7 +20,6 @@
|
||||||
#include <linux/sched/signal.h>
|
#include <linux/sched/signal.h>
|
||||||
#include <linux/uio.h>
|
#include <linux/uio.h>
|
||||||
#include <linux/vmstat.h>
|
#include <linux/vmstat.h>
|
||||||
#include <linux/pfn_t.h>
|
|
||||||
#include <linux/sizes.h>
|
#include <linux/sizes.h>
|
||||||
#include <linux/mmu_notifier.h>
|
#include <linux/mmu_notifier.h>
|
||||||
#include <linux/iomap.h>
|
#include <linux/iomap.h>
|
||||||
|
@ -76,9 +75,9 @@ static struct folio *dax_to_folio(void *entry)
|
||||||
return page_folio(pfn_to_page(dax_to_pfn(entry)));
|
return page_folio(pfn_to_page(dax_to_pfn(entry)));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *dax_make_entry(pfn_t pfn, unsigned long flags)
|
static void *dax_make_entry(unsigned long pfn, unsigned long flags)
|
||||||
{
|
{
|
||||||
return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
|
return xa_mk_value(flags | (pfn << DAX_SHIFT));
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool dax_is_locked(void *entry)
|
static bool dax_is_locked(void *entry)
|
||||||
|
@ -713,7 +712,7 @@ retry:
|
||||||
|
|
||||||
if (order > 0)
|
if (order > 0)
|
||||||
flags |= DAX_PMD;
|
flags |= DAX_PMD;
|
||||||
entry = dax_make_entry(pfn_to_pfn_t(0), flags);
|
entry = dax_make_entry(0, flags);
|
||||||
dax_lock_entry(xas, entry);
|
dax_lock_entry(xas, entry);
|
||||||
if (xas_error(xas))
|
if (xas_error(xas))
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
@ -1041,7 +1040,7 @@ static bool dax_fault_is_synchronous(const struct iomap_iter *iter,
|
||||||
* appropriate.
|
* appropriate.
|
||||||
*/
|
*/
|
||||||
static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf,
|
static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf,
|
||||||
const struct iomap_iter *iter, void *entry, pfn_t pfn,
|
const struct iomap_iter *iter, void *entry, unsigned long pfn,
|
||||||
unsigned long flags)
|
unsigned long flags)
|
||||||
{
|
{
|
||||||
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
|
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
|
||||||
|
@ -1239,7 +1238,7 @@ int dax_writeback_mapping_range(struct address_space *mapping,
|
||||||
EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
|
EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
|
||||||
|
|
||||||
static int dax_iomap_direct_access(const struct iomap *iomap, loff_t pos,
|
static int dax_iomap_direct_access(const struct iomap *iomap, loff_t pos,
|
||||||
size_t size, void **kaddr, pfn_t *pfnp)
|
size_t size, void **kaddr, unsigned long *pfnp)
|
||||||
{
|
{
|
||||||
pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
|
pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
|
||||||
int id, rc = 0;
|
int id, rc = 0;
|
||||||
|
@ -1257,7 +1256,7 @@ static int dax_iomap_direct_access(const struct iomap *iomap, loff_t pos,
|
||||||
rc = -EINVAL;
|
rc = -EINVAL;
|
||||||
if (PFN_PHYS(length) < size)
|
if (PFN_PHYS(length) < size)
|
||||||
goto out;
|
goto out;
|
||||||
if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
|
if (*pfnp & (PHYS_PFN(size)-1))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
rc = 0;
|
rc = 0;
|
||||||
|
@ -1361,12 +1360,12 @@ static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf,
|
||||||
{
|
{
|
||||||
struct inode *inode = iter->inode;
|
struct inode *inode = iter->inode;
|
||||||
unsigned long vaddr = vmf->address;
|
unsigned long vaddr = vmf->address;
|
||||||
pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
|
unsigned long pfn = my_zero_pfn(vaddr);
|
||||||
vm_fault_t ret;
|
vm_fault_t ret;
|
||||||
|
|
||||||
*entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE);
|
*entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE);
|
||||||
|
|
||||||
ret = vmf_insert_page_mkwrite(vmf, pfn_t_to_page(pfn), false);
|
ret = vmf_insert_page_mkwrite(vmf, pfn_to_page(pfn), false);
|
||||||
trace_dax_load_hole(inode, vmf, ret);
|
trace_dax_load_hole(inode, vmf, ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -1383,14 +1382,14 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
|
||||||
struct folio *zero_folio;
|
struct folio *zero_folio;
|
||||||
spinlock_t *ptl;
|
spinlock_t *ptl;
|
||||||
pmd_t pmd_entry;
|
pmd_t pmd_entry;
|
||||||
pfn_t pfn;
|
unsigned long pfn;
|
||||||
|
|
||||||
zero_folio = mm_get_huge_zero_folio(vmf->vma->vm_mm);
|
zero_folio = mm_get_huge_zero_folio(vmf->vma->vm_mm);
|
||||||
|
|
||||||
if (unlikely(!zero_folio))
|
if (unlikely(!zero_folio))
|
||||||
goto fallback;
|
goto fallback;
|
||||||
|
|
||||||
pfn = page_to_pfn_t(&zero_folio->page);
|
pfn = page_to_pfn(&zero_folio->page);
|
||||||
*entry = dax_insert_entry(xas, vmf, iter, *entry, pfn,
|
*entry = dax_insert_entry(xas, vmf, iter, *entry, pfn,
|
||||||
DAX_PMD | DAX_ZERO_PAGE);
|
DAX_PMD | DAX_ZERO_PAGE);
|
||||||
|
|
||||||
|
@ -1779,7 +1778,8 @@ static vm_fault_t dax_fault_return(int error)
|
||||||
* insertion for now and return the pfn so that caller can insert it after the
|
* insertion for now and return the pfn so that caller can insert it after the
|
||||||
* fsync is done.
|
* fsync is done.
|
||||||
*/
|
*/
|
||||||
static vm_fault_t dax_fault_synchronous_pfnp(pfn_t *pfnp, pfn_t pfn)
|
static vm_fault_t dax_fault_synchronous_pfnp(unsigned long *pfnp,
|
||||||
|
unsigned long pfn)
|
||||||
{
|
{
|
||||||
if (WARN_ON_ONCE(!pfnp))
|
if (WARN_ON_ONCE(!pfnp))
|
||||||
return VM_FAULT_SIGBUS;
|
return VM_FAULT_SIGBUS;
|
||||||
|
@ -1827,7 +1827,7 @@ static vm_fault_t dax_fault_cow_page(struct vm_fault *vmf,
|
||||||
* @pmd: distinguish whether it is a pmd fault
|
* @pmd: distinguish whether it is a pmd fault
|
||||||
*/
|
*/
|
||||||
static vm_fault_t dax_fault_iter(struct vm_fault *vmf,
|
static vm_fault_t dax_fault_iter(struct vm_fault *vmf,
|
||||||
const struct iomap_iter *iter, pfn_t *pfnp,
|
const struct iomap_iter *iter, unsigned long *pfnp,
|
||||||
struct xa_state *xas, void **entry, bool pmd)
|
struct xa_state *xas, void **entry, bool pmd)
|
||||||
{
|
{
|
||||||
const struct iomap *iomap = &iter->iomap;
|
const struct iomap *iomap = &iter->iomap;
|
||||||
|
@ -1838,7 +1838,7 @@ static vm_fault_t dax_fault_iter(struct vm_fault *vmf,
|
||||||
unsigned long entry_flags = pmd ? DAX_PMD : 0;
|
unsigned long entry_flags = pmd ? DAX_PMD : 0;
|
||||||
struct folio *folio;
|
struct folio *folio;
|
||||||
int ret, err = 0;
|
int ret, err = 0;
|
||||||
pfn_t pfn;
|
unsigned long pfn;
|
||||||
void *kaddr;
|
void *kaddr;
|
||||||
|
|
||||||
if (!pmd && vmf->cow_page)
|
if (!pmd && vmf->cow_page)
|
||||||
|
@ -1875,16 +1875,15 @@ static vm_fault_t dax_fault_iter(struct vm_fault *vmf,
|
||||||
|
|
||||||
folio_ref_inc(folio);
|
folio_ref_inc(folio);
|
||||||
if (pmd)
|
if (pmd)
|
||||||
ret = vmf_insert_folio_pmd(vmf, pfn_folio(pfn_t_to_pfn(pfn)),
|
ret = vmf_insert_folio_pmd(vmf, pfn_folio(pfn), write);
|
||||||
write);
|
|
||||||
else
|
else
|
||||||
ret = vmf_insert_page_mkwrite(vmf, pfn_t_to_page(pfn), write);
|
ret = vmf_insert_page_mkwrite(vmf, pfn_to_page(pfn), write);
|
||||||
folio_put(folio);
|
folio_put(folio);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
|
static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, unsigned long *pfnp,
|
||||||
int *iomap_errp, const struct iomap_ops *ops)
|
int *iomap_errp, const struct iomap_ops *ops)
|
||||||
{
|
{
|
||||||
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
|
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
|
||||||
|
@ -1996,7 +1995,7 @@ static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas,
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
|
static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, unsigned long *pfnp,
|
||||||
const struct iomap_ops *ops)
|
const struct iomap_ops *ops)
|
||||||
{
|
{
|
||||||
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
|
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
|
||||||
|
@ -2077,7 +2076,7 @@ out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
|
static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, unsigned long *pfnp,
|
||||||
const struct iomap_ops *ops)
|
const struct iomap_ops *ops)
|
||||||
{
|
{
|
||||||
return VM_FAULT_FALLBACK;
|
return VM_FAULT_FALLBACK;
|
||||||
|
@ -2098,7 +2097,8 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
|
||||||
* successfully.
|
* successfully.
|
||||||
*/
|
*/
|
||||||
vm_fault_t dax_iomap_fault(struct vm_fault *vmf, unsigned int order,
|
vm_fault_t dax_iomap_fault(struct vm_fault *vmf, unsigned int order,
|
||||||
pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
|
unsigned long *pfnp, int *iomap_errp,
|
||||||
|
const struct iomap_ops *ops)
|
||||||
{
|
{
|
||||||
if (order == 0)
|
if (order == 0)
|
||||||
return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
|
return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
|
||||||
|
@ -2118,8 +2118,8 @@ EXPORT_SYMBOL_GPL(dax_iomap_fault);
|
||||||
* This function inserts a writeable PTE or PMD entry into the page tables
|
* This function inserts a writeable PTE or PMD entry into the page tables
|
||||||
* for an mmaped DAX file. It also marks the page cache entry as dirty.
|
* for an mmaped DAX file. It also marks the page cache entry as dirty.
|
||||||
*/
|
*/
|
||||||
static vm_fault_t
|
static vm_fault_t dax_insert_pfn_mkwrite(struct vm_fault *vmf,
|
||||||
dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
|
unsigned long pfn, unsigned int order)
|
||||||
{
|
{
|
||||||
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
|
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
|
||||||
XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order);
|
XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order);
|
||||||
|
@ -2141,7 +2141,7 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
|
||||||
xas_set_mark(&xas, PAGECACHE_TAG_DIRTY);
|
xas_set_mark(&xas, PAGECACHE_TAG_DIRTY);
|
||||||
dax_lock_entry(&xas, entry);
|
dax_lock_entry(&xas, entry);
|
||||||
xas_unlock_irq(&xas);
|
xas_unlock_irq(&xas);
|
||||||
folio = pfn_folio(pfn_t_to_pfn(pfn));
|
folio = pfn_folio(pfn);
|
||||||
folio_ref_inc(folio);
|
folio_ref_inc(folio);
|
||||||
if (order == 0)
|
if (order == 0)
|
||||||
ret = vmf_insert_page_mkwrite(vmf, &folio->page, true);
|
ret = vmf_insert_page_mkwrite(vmf, &folio->page, true);
|
||||||
|
@ -2168,7 +2168,7 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
|
||||||
* table entry.
|
* table entry.
|
||||||
*/
|
*/
|
||||||
vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, unsigned int order,
|
vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, unsigned int order,
|
||||||
pfn_t pfn)
|
unsigned long pfn)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
|
loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
|
||||||
|
|
|
@ -747,7 +747,7 @@ static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf, unsigned int order)
|
||||||
bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
|
bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
|
||||||
(vmf->vma->vm_flags & VM_SHARED);
|
(vmf->vma->vm_flags & VM_SHARED);
|
||||||
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
|
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
|
||||||
pfn_t pfn;
|
unsigned long pfn;
|
||||||
|
|
||||||
if (write) {
|
if (write) {
|
||||||
sb_start_pagefault(sb);
|
sb_start_pagefault(sb);
|
||||||
|
|
|
@ -10,7 +10,6 @@
|
||||||
#include <linux/dax.h>
|
#include <linux/dax.h>
|
||||||
#include <linux/uio.h>
|
#include <linux/uio.h>
|
||||||
#include <linux/pagemap.h>
|
#include <linux/pagemap.h>
|
||||||
#include <linux/pfn_t.h>
|
|
||||||
#include <linux/iomap.h>
|
#include <linux/iomap.h>
|
||||||
#include <linux/interval_tree.h>
|
#include <linux/interval_tree.h>
|
||||||
|
|
||||||
|
@ -757,7 +756,7 @@ static vm_fault_t __fuse_dax_fault(struct vm_fault *vmf, unsigned int order,
|
||||||
vm_fault_t ret;
|
vm_fault_t ret;
|
||||||
struct inode *inode = file_inode(vmf->vma->vm_file);
|
struct inode *inode = file_inode(vmf->vma->vm_file);
|
||||||
struct super_block *sb = inode->i_sb;
|
struct super_block *sb = inode->i_sb;
|
||||||
pfn_t pfn;
|
unsigned long pfn;
|
||||||
int error = 0;
|
int error = 0;
|
||||||
struct fuse_conn *fc = get_fuse_conn(inode);
|
struct fuse_conn *fc = get_fuse_conn(inode);
|
||||||
struct fuse_conn_dax *fcd = fc->dax;
|
struct fuse_conn_dax *fcd = fc->dax;
|
||||||
|
|
|
@ -9,7 +9,6 @@
|
||||||
#include <linux/pci.h>
|
#include <linux/pci.h>
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
#include <linux/group_cpus.h>
|
#include <linux/group_cpus.h>
|
||||||
#include <linux/pfn_t.h>
|
|
||||||
#include <linux/memremap.h>
|
#include <linux/memremap.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/virtio.h>
|
#include <linux/virtio.h>
|
||||||
|
@ -1008,7 +1007,7 @@ static void virtio_fs_cleanup_vqs(struct virtio_device *vdev)
|
||||||
*/
|
*/
|
||||||
static long virtio_fs_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
|
static long virtio_fs_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
|
||||||
long nr_pages, enum dax_access_mode mode,
|
long nr_pages, enum dax_access_mode mode,
|
||||||
void **kaddr, pfn_t *pfn)
|
void **kaddr, unsigned long *pfn)
|
||||||
{
|
{
|
||||||
struct virtio_fs *fs = dax_get_private(dax_dev);
|
struct virtio_fs *fs = dax_get_private(dax_dev);
|
||||||
phys_addr_t offset = PFN_PHYS(pgoff);
|
phys_addr_t offset = PFN_PHYS(pgoff);
|
||||||
|
@ -1017,7 +1016,7 @@ static long virtio_fs_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
|
||||||
if (kaddr)
|
if (kaddr)
|
||||||
*kaddr = fs->window_kaddr + offset;
|
*kaddr = fs->window_kaddr + offset;
|
||||||
if (pfn)
|
if (pfn)
|
||||||
*pfn = phys_to_pfn_t(fs->window_phys_addr + offset, 0);
|
*pfn = fs->window_phys_addr + offset;
|
||||||
return nr_pages > max_nr_pages ? max_nr_pages : nr_pages;
|
return nr_pages > max_nr_pages ? max_nr_pages : nr_pages;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1730,7 +1730,7 @@ xfs_dax_fault_locked(
|
||||||
bool write_fault)
|
bool write_fault)
|
||||||
{
|
{
|
||||||
vm_fault_t ret;
|
vm_fault_t ret;
|
||||||
pfn_t pfn;
|
unsigned long pfn;
|
||||||
|
|
||||||
if (!IS_ENABLED(CONFIG_FS_DAX)) {
|
if (!IS_ENABLED(CONFIG_FS_DAX)) {
|
||||||
ASSERT(0);
|
ASSERT(0);
|
||||||
|
|
|
@ -26,7 +26,7 @@ struct dax_operations {
|
||||||
* number of pages available for DAX at that pfn.
|
* number of pages available for DAX at that pfn.
|
||||||
*/
|
*/
|
||||||
long (*direct_access)(struct dax_device *, pgoff_t, long,
|
long (*direct_access)(struct dax_device *, pgoff_t, long,
|
||||||
enum dax_access_mode, void **, pfn_t *);
|
enum dax_access_mode, void **, unsigned long *);
|
||||||
/* zero_page_range: required operation. Zero page range */
|
/* zero_page_range: required operation. Zero page range */
|
||||||
int (*zero_page_range)(struct dax_device *, pgoff_t, size_t);
|
int (*zero_page_range)(struct dax_device *, pgoff_t, size_t);
|
||||||
/*
|
/*
|
||||||
|
@ -241,7 +241,7 @@ static inline void dax_break_layout_final(struct inode *inode)
|
||||||
bool dax_alive(struct dax_device *dax_dev);
|
bool dax_alive(struct dax_device *dax_dev);
|
||||||
void *dax_get_private(struct dax_device *dax_dev);
|
void *dax_get_private(struct dax_device *dax_dev);
|
||||||
long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
|
long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
|
||||||
enum dax_access_mode mode, void **kaddr, pfn_t *pfn);
|
enum dax_access_mode mode, void **kaddr, unsigned long *pfn);
|
||||||
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
|
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
|
||||||
size_t bytes, struct iov_iter *i);
|
size_t bytes, struct iov_iter *i);
|
||||||
size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
|
size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
|
||||||
|
@ -255,9 +255,10 @@ void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
|
||||||
ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
|
ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
|
||||||
const struct iomap_ops *ops);
|
const struct iomap_ops *ops);
|
||||||
vm_fault_t dax_iomap_fault(struct vm_fault *vmf, unsigned int order,
|
vm_fault_t dax_iomap_fault(struct vm_fault *vmf, unsigned int order,
|
||||||
pfn_t *pfnp, int *errp, const struct iomap_ops *ops);
|
unsigned long *pfnp, int *errp,
|
||||||
|
const struct iomap_ops *ops);
|
||||||
vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
|
vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
|
||||||
unsigned int order, pfn_t pfn);
|
unsigned int order, unsigned long pfn);
|
||||||
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
|
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
|
||||||
void dax_delete_mapping_range(struct address_space *mapping,
|
void dax_delete_mapping_range(struct address_space *mapping,
|
||||||
loff_t start, loff_t end);
|
loff_t start, loff_t end);
|
||||||
|
|
|
@ -156,7 +156,7 @@ typedef int (*dm_busy_fn) (struct dm_target *ti);
|
||||||
*/
|
*/
|
||||||
typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
|
typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
|
||||||
long nr_pages, enum dax_access_mode node, void **kaddr,
|
long nr_pages, enum dax_access_mode node, void **kaddr,
|
||||||
pfn_t *pfn);
|
unsigned long *pfn);
|
||||||
typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff,
|
typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff,
|
||||||
size_t nr_pages);
|
size_t nr_pages);
|
||||||
|
|
||||||
|
|
|
@ -37,8 +37,10 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
||||||
pmd_t *pmd, unsigned long addr, pgprot_t newprot,
|
pmd_t *pmd, unsigned long addr, pgprot_t newprot,
|
||||||
unsigned long cp_flags);
|
unsigned long cp_flags);
|
||||||
|
|
||||||
vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
|
vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, unsigned long pfn,
|
||||||
vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
|
bool write);
|
||||||
|
vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, unsigned long pfn,
|
||||||
|
bool write);
|
||||||
vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, struct folio *folio,
|
vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, struct folio *folio,
|
||||||
bool write);
|
bool write);
|
||||||
vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio,
|
vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio,
|
||||||
|
|
|
@ -3522,9 +3522,9 @@ vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
||||||
vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
|
vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
|
||||||
unsigned long pfn, pgprot_t pgprot);
|
unsigned long pfn, pgprot_t pgprot);
|
||||||
vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
|
vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
|
||||||
pfn_t pfn);
|
unsigned long pfn);
|
||||||
vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
|
vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
|
||||||
unsigned long addr, pfn_t pfn);
|
unsigned long addr, unsigned long pfn);
|
||||||
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
|
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
|
||||||
|
|
||||||
static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
|
static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
|
||||||
|
|
|
@ -4,15 +4,6 @@
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
|
|
||||||
/*
|
|
||||||
* pfn_t: encapsulates a page-frame number that is optionally backed
|
|
||||||
* by memmap (struct page). Whether a pfn_t has a 'struct page'
|
|
||||||
* backing is indicated by flags in the high bits of the value.
|
|
||||||
*/
|
|
||||||
typedef struct {
|
|
||||||
u64 val;
|
|
||||||
} pfn_t;
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK)
|
#define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK)
|
||||||
|
|
|
@ -1,85 +0,0 @@
|
||||||
/* SPDX-License-Identifier: GPL-2.0 */
|
|
||||||
#ifndef _LINUX_PFN_T_H_
|
|
||||||
#define _LINUX_PFN_T_H_
|
|
||||||
#include <linux/mm.h>
|
|
||||||
|
|
||||||
/*
|
|
||||||
* PFN_FLAGS_MASK - mask of all the possible valid pfn_t flags
|
|
||||||
*/
|
|
||||||
#define PFN_FLAGS_MASK (((u64) (~PAGE_MASK)) << (BITS_PER_LONG_LONG - PAGE_SHIFT))
|
|
||||||
|
|
||||||
#define PFN_FLAGS_TRACE \
|
|
||||||
{ }
|
|
||||||
|
|
||||||
static inline pfn_t __pfn_to_pfn_t(unsigned long pfn, u64 flags)
|
|
||||||
{
|
|
||||||
pfn_t pfn_t = { .val = pfn | (flags & PFN_FLAGS_MASK), };
|
|
||||||
|
|
||||||
return pfn_t;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* a default pfn to pfn_t conversion assumes that @pfn is pfn_valid() */
|
|
||||||
static inline pfn_t pfn_to_pfn_t(unsigned long pfn)
|
|
||||||
{
|
|
||||||
return __pfn_to_pfn_t(pfn, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline pfn_t phys_to_pfn_t(phys_addr_t addr, u64 flags)
|
|
||||||
{
|
|
||||||
return __pfn_to_pfn_t(addr >> PAGE_SHIFT, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool pfn_t_has_page(pfn_t pfn)
|
|
||||||
{
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned long pfn_t_to_pfn(pfn_t pfn)
|
|
||||||
{
|
|
||||||
return pfn.val & ~PFN_FLAGS_MASK;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline struct page *pfn_t_to_page(pfn_t pfn)
|
|
||||||
{
|
|
||||||
if (pfn_t_has_page(pfn))
|
|
||||||
return pfn_to_page(pfn_t_to_pfn(pfn));
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline phys_addr_t pfn_t_to_phys(pfn_t pfn)
|
|
||||||
{
|
|
||||||
return PFN_PHYS(pfn_t_to_pfn(pfn));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline pfn_t page_to_pfn_t(struct page *page)
|
|
||||||
{
|
|
||||||
return pfn_to_pfn_t(page_to_pfn(page));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int pfn_t_valid(pfn_t pfn)
|
|
||||||
{
|
|
||||||
return pfn_valid(pfn_t_to_pfn(pfn));
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_MMU
|
|
||||||
static inline pte_t pfn_t_pte(pfn_t pfn, pgprot_t pgprot)
|
|
||||||
{
|
|
||||||
return pfn_pte(pfn_t_to_pfn(pfn), pgprot);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
||||||
static inline pmd_t pfn_t_pmd(pfn_t pfn, pgprot_t pgprot)
|
|
||||||
{
|
|
||||||
return pfn_pmd(pfn_t_to_pfn(pfn), pgprot);
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
|
|
||||||
static inline pud_t pfn_t_pud(pfn_t pfn, pgprot_t pgprot)
|
|
||||||
{
|
|
||||||
return pfn_pud(pfn_t_to_pfn(pfn), pgprot);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* _LINUX_PFN_T_H_ */
|
|
|
@ -20,7 +20,6 @@
|
||||||
#include <linux/mman.h>
|
#include <linux/mman.h>
|
||||||
#include <linux/mm_types.h>
|
#include <linux/mm_types.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/pfn_t.h>
|
|
||||||
#include <linux/printk.h>
|
#include <linux/printk.h>
|
||||||
#include <linux/pgtable.h>
|
#include <linux/pgtable.h>
|
||||||
#include <linux/random.h>
|
#include <linux/random.h>
|
||||||
|
|
|
@ -22,7 +22,6 @@
|
||||||
#include <linux/mm_types.h>
|
#include <linux/mm_types.h>
|
||||||
#include <linux/khugepaged.h>
|
#include <linux/khugepaged.h>
|
||||||
#include <linux/freezer.h>
|
#include <linux/freezer.h>
|
||||||
#include <linux/pfn_t.h>
|
|
||||||
#include <linux/mman.h>
|
#include <linux/mman.h>
|
||||||
#include <linux/memremap.h>
|
#include <linux/memremap.h>
|
||||||
#include <linux/pagemap.h>
|
#include <linux/pagemap.h>
|
||||||
|
@ -1375,7 +1374,7 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
|
||||||
struct folio_or_pfn {
|
struct folio_or_pfn {
|
||||||
union {
|
union {
|
||||||
struct folio *folio;
|
struct folio *folio;
|
||||||
pfn_t pfn;
|
unsigned long pfn;
|
||||||
};
|
};
|
||||||
bool is_folio;
|
bool is_folio;
|
||||||
};
|
};
|
||||||
|
@ -1391,7 +1390,7 @@ static int insert_pmd(struct vm_area_struct *vma, unsigned long addr,
|
||||||
|
|
||||||
if (!pmd_none(*pmd)) {
|
if (!pmd_none(*pmd)) {
|
||||||
const unsigned long pfn = fop.is_folio ? folio_pfn(fop.folio) :
|
const unsigned long pfn = fop.is_folio ? folio_pfn(fop.folio) :
|
||||||
pfn_t_to_pfn(fop.pfn);
|
fop.pfn;
|
||||||
|
|
||||||
if (write) {
|
if (write) {
|
||||||
if (pmd_pfn(*pmd) != pfn) {
|
if (pmd_pfn(*pmd) != pfn) {
|
||||||
|
@ -1414,7 +1413,7 @@ static int insert_pmd(struct vm_area_struct *vma, unsigned long addr,
|
||||||
folio_add_file_rmap_pmd(fop.folio, &fop.folio->page, vma);
|
folio_add_file_rmap_pmd(fop.folio, &fop.folio->page, vma);
|
||||||
add_mm_counter(mm, mm_counter_file(fop.folio), HPAGE_PMD_NR);
|
add_mm_counter(mm, mm_counter_file(fop.folio), HPAGE_PMD_NR);
|
||||||
} else {
|
} else {
|
||||||
entry = pmd_mkhuge(pfn_t_pmd(fop.pfn, prot));
|
entry = pmd_mkhuge(pfn_pmd(fop.pfn, prot));
|
||||||
entry = pmd_mkspecial(entry);
|
entry = pmd_mkspecial(entry);
|
||||||
}
|
}
|
||||||
if (write) {
|
if (write) {
|
||||||
|
@ -1442,7 +1441,8 @@ static int insert_pmd(struct vm_area_struct *vma, unsigned long addr,
|
||||||
*
|
*
|
||||||
* Return: vm_fault_t value.
|
* Return: vm_fault_t value.
|
||||||
*/
|
*/
|
||||||
vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
|
vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, unsigned long pfn,
|
||||||
|
bool write)
|
||||||
{
|
{
|
||||||
unsigned long addr = vmf->address & PMD_MASK;
|
unsigned long addr = vmf->address & PMD_MASK;
|
||||||
struct vm_area_struct *vma = vmf->vma;
|
struct vm_area_struct *vma = vmf->vma;
|
||||||
|
@ -1473,7 +1473,7 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
|
||||||
return VM_FAULT_OOM;
|
return VM_FAULT_OOM;
|
||||||
}
|
}
|
||||||
|
|
||||||
pfnmap_setup_cachemode_pfn(pfn_t_to_pfn(pfn), &pgprot);
|
pfnmap_setup_cachemode_pfn(pfn, &pgprot);
|
||||||
|
|
||||||
ptl = pmd_lock(vma->vm_mm, vmf->pmd);
|
ptl = pmd_lock(vma->vm_mm, vmf->pmd);
|
||||||
error = insert_pmd(vma, addr, vmf->pmd, fop, pgprot, write,
|
error = insert_pmd(vma, addr, vmf->pmd, fop, pgprot, write,
|
||||||
|
@ -1539,7 +1539,7 @@ static void insert_pud(struct vm_area_struct *vma, unsigned long addr,
|
||||||
|
|
||||||
if (!pud_none(*pud)) {
|
if (!pud_none(*pud)) {
|
||||||
const unsigned long pfn = fop.is_folio ? folio_pfn(fop.folio) :
|
const unsigned long pfn = fop.is_folio ? folio_pfn(fop.folio) :
|
||||||
pfn_t_to_pfn(fop.pfn);
|
fop.pfn;
|
||||||
|
|
||||||
if (write) {
|
if (write) {
|
||||||
if (WARN_ON_ONCE(pud_pfn(*pud) != pfn))
|
if (WARN_ON_ONCE(pud_pfn(*pud) != pfn))
|
||||||
|
@ -1559,7 +1559,7 @@ static void insert_pud(struct vm_area_struct *vma, unsigned long addr,
|
||||||
folio_add_file_rmap_pud(fop.folio, &fop.folio->page, vma);
|
folio_add_file_rmap_pud(fop.folio, &fop.folio->page, vma);
|
||||||
add_mm_counter(mm, mm_counter_file(fop.folio), HPAGE_PUD_NR);
|
add_mm_counter(mm, mm_counter_file(fop.folio), HPAGE_PUD_NR);
|
||||||
} else {
|
} else {
|
||||||
entry = pud_mkhuge(pfn_t_pud(fop.pfn, prot));
|
entry = pud_mkhuge(pfn_pud(fop.pfn, prot));
|
||||||
entry = pud_mkspecial(entry);
|
entry = pud_mkspecial(entry);
|
||||||
}
|
}
|
||||||
if (write) {
|
if (write) {
|
||||||
|
@ -1580,7 +1580,8 @@ static void insert_pud(struct vm_area_struct *vma, unsigned long addr,
|
||||||
*
|
*
|
||||||
* Return: vm_fault_t value.
|
* Return: vm_fault_t value.
|
||||||
*/
|
*/
|
||||||
vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
|
vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, unsigned long pfn,
|
||||||
|
bool write)
|
||||||
{
|
{
|
||||||
unsigned long addr = vmf->address & PUD_MASK;
|
unsigned long addr = vmf->address & PUD_MASK;
|
||||||
struct vm_area_struct *vma = vmf->vma;
|
struct vm_area_struct *vma = vmf->vma;
|
||||||
|
@ -1603,7 +1604,7 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
|
||||||
if (addr < vma->vm_start || addr >= vma->vm_end)
|
if (addr < vma->vm_start || addr >= vma->vm_end)
|
||||||
return VM_FAULT_SIGBUS;
|
return VM_FAULT_SIGBUS;
|
||||||
|
|
||||||
pfnmap_setup_cachemode_pfn(pfn_t_to_pfn(pfn), &pgprot);
|
pfnmap_setup_cachemode_pfn(pfn, &pgprot);
|
||||||
|
|
||||||
ptl = pud_lock(vma->vm_mm, vmf->pud);
|
ptl = pud_lock(vma->vm_mm, vmf->pud);
|
||||||
insert_pud(vma, addr, vmf->pud, fop, pgprot, write);
|
insert_pud(vma, addr, vmf->pud, fop, pgprot, write);
|
||||||
|
|
31
mm/memory.c
31
mm/memory.c
|
@ -57,7 +57,6 @@
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <linux/delayacct.h>
|
#include <linux/delayacct.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/pfn_t.h>
|
|
||||||
#include <linux/writeback.h>
|
#include <linux/writeback.h>
|
||||||
#include <linux/memcontrol.h>
|
#include <linux/memcontrol.h>
|
||||||
#include <linux/mmu_notifier.h>
|
#include <linux/mmu_notifier.h>
|
||||||
|
@ -2435,7 +2434,7 @@ int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
|
||||||
EXPORT_SYMBOL(vm_map_pages_zero);
|
EXPORT_SYMBOL(vm_map_pages_zero);
|
||||||
|
|
||||||
static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
||||||
pfn_t pfn, pgprot_t prot, bool mkwrite)
|
unsigned long pfn, pgprot_t prot, bool mkwrite)
|
||||||
{
|
{
|
||||||
struct mm_struct *mm = vma->vm_mm;
|
struct mm_struct *mm = vma->vm_mm;
|
||||||
pte_t *pte, entry;
|
pte_t *pte, entry;
|
||||||
|
@ -2457,7 +2456,7 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
||||||
* allocation and mapping invalidation so just skip the
|
* allocation and mapping invalidation so just skip the
|
||||||
* update.
|
* update.
|
||||||
*/
|
*/
|
||||||
if (pte_pfn(entry) != pfn_t_to_pfn(pfn)) {
|
if (pte_pfn(entry) != pfn) {
|
||||||
WARN_ON_ONCE(!is_zero_pfn(pte_pfn(entry)));
|
WARN_ON_ONCE(!is_zero_pfn(pte_pfn(entry)));
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
@ -2470,7 +2469,7 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Ok, finally just insert the thing.. */
|
/* Ok, finally just insert the thing.. */
|
||||||
entry = pte_mkspecial(pfn_t_pte(pfn, prot));
|
entry = pte_mkspecial(pfn_pte(pfn, prot));
|
||||||
|
|
||||||
if (mkwrite) {
|
if (mkwrite) {
|
||||||
entry = pte_mkyoung(entry);
|
entry = pte_mkyoung(entry);
|
||||||
|
@ -2541,8 +2540,7 @@ vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
|
||||||
|
|
||||||
pfnmap_setup_cachemode_pfn(pfn, &pgprot);
|
pfnmap_setup_cachemode_pfn(pfn, &pgprot);
|
||||||
|
|
||||||
return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, 0), pgprot,
|
return insert_pfn(vma, addr, pfn, pgprot, false);
|
||||||
false);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(vmf_insert_pfn_prot);
|
EXPORT_SYMBOL(vmf_insert_pfn_prot);
|
||||||
|
|
||||||
|
@ -2573,21 +2571,22 @@ vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(vmf_insert_pfn);
|
EXPORT_SYMBOL(vmf_insert_pfn);
|
||||||
|
|
||||||
static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn, bool mkwrite)
|
static bool vm_mixed_ok(struct vm_area_struct *vma, unsigned long pfn,
|
||||||
|
bool mkwrite)
|
||||||
{
|
{
|
||||||
if (unlikely(is_zero_pfn(pfn_t_to_pfn(pfn))) &&
|
if (unlikely(is_zero_pfn(pfn)) &&
|
||||||
(mkwrite || !vm_mixed_zeropage_allowed(vma)))
|
(mkwrite || !vm_mixed_zeropage_allowed(vma)))
|
||||||
return false;
|
return false;
|
||||||
/* these checks mirror the abort conditions in vm_normal_page */
|
/* these checks mirror the abort conditions in vm_normal_page */
|
||||||
if (vma->vm_flags & VM_MIXEDMAP)
|
if (vma->vm_flags & VM_MIXEDMAP)
|
||||||
return true;
|
return true;
|
||||||
if (is_zero_pfn(pfn_t_to_pfn(pfn)))
|
if (is_zero_pfn(pfn))
|
||||||
return true;
|
return true;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
|
static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
|
||||||
unsigned long addr, pfn_t pfn, bool mkwrite)
|
unsigned long addr, unsigned long pfn, bool mkwrite)
|
||||||
{
|
{
|
||||||
pgprot_t pgprot = vma->vm_page_prot;
|
pgprot_t pgprot = vma->vm_page_prot;
|
||||||
int err;
|
int err;
|
||||||
|
@ -2598,9 +2597,9 @@ static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
|
||||||
if (addr < vma->vm_start || addr >= vma->vm_end)
|
if (addr < vma->vm_start || addr >= vma->vm_end)
|
||||||
return VM_FAULT_SIGBUS;
|
return VM_FAULT_SIGBUS;
|
||||||
|
|
||||||
pfnmap_setup_cachemode_pfn(pfn_t_to_pfn(pfn), &pgprot);
|
pfnmap_setup_cachemode_pfn(pfn, &pgprot);
|
||||||
|
|
||||||
if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot))
|
if (!pfn_modify_allowed(pfn, pgprot))
|
||||||
return VM_FAULT_SIGBUS;
|
return VM_FAULT_SIGBUS;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2610,7 +2609,7 @@ static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
|
||||||
* than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP
|
* than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP
|
||||||
* without pte special, it would there be refcounted as a normal page.
|
* without pte special, it would there be refcounted as a normal page.
|
||||||
*/
|
*/
|
||||||
if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pfn_t_valid(pfn)) {
|
if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pfn_valid(pfn)) {
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2618,7 +2617,7 @@ static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
|
||||||
* regardless of whether the caller specified flags that
|
* regardless of whether the caller specified flags that
|
||||||
* result in pfn_t_has_page() == false.
|
* result in pfn_t_has_page() == false.
|
||||||
*/
|
*/
|
||||||
page = pfn_to_page(pfn_t_to_pfn(pfn));
|
page = pfn_to_page(pfn);
|
||||||
err = insert_page(vma, addr, page, pgprot, mkwrite);
|
err = insert_page(vma, addr, page, pgprot, mkwrite);
|
||||||
} else {
|
} else {
|
||||||
return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
|
return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
|
||||||
|
@ -2653,7 +2652,7 @@ vm_fault_t vmf_insert_page_mkwrite(struct vm_fault *vmf, struct page *page,
|
||||||
EXPORT_SYMBOL_GPL(vmf_insert_page_mkwrite);
|
EXPORT_SYMBOL_GPL(vmf_insert_page_mkwrite);
|
||||||
|
|
||||||
vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
|
vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
|
||||||
pfn_t pfn)
|
unsigned long pfn)
|
||||||
{
|
{
|
||||||
return __vm_insert_mixed(vma, addr, pfn, false);
|
return __vm_insert_mixed(vma, addr, pfn, false);
|
||||||
}
|
}
|
||||||
|
@ -2665,7 +2664,7 @@ EXPORT_SYMBOL(vmf_insert_mixed);
|
||||||
* the same entry was actually inserted.
|
* the same entry was actually inserted.
|
||||||
*/
|
*/
|
||||||
vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
|
vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
|
||||||
unsigned long addr, pfn_t pfn)
|
unsigned long addr, unsigned long pfn)
|
||||||
{
|
{
|
||||||
return __vm_insert_mixed(vma, addr, pfn, true);
|
return __vm_insert_mixed(vma, addr, pfn, true);
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,7 +5,6 @@
|
||||||
#include <linux/kasan.h>
|
#include <linux/kasan.h>
|
||||||
#include <linux/memory_hotplug.h>
|
#include <linux/memory_hotplug.h>
|
||||||
#include <linux/memremap.h>
|
#include <linux/memremap.h>
|
||||||
#include <linux/pfn_t.h>
|
|
||||||
#include <linux/swap.h>
|
#include <linux/swap.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/mmzone.h>
|
#include <linux/mmzone.h>
|
||||||
|
|
|
@ -35,7 +35,6 @@
|
||||||
#include <linux/compat.h>
|
#include <linux/compat.h>
|
||||||
#include <linux/hugetlb.h>
|
#include <linux/hugetlb.h>
|
||||||
#include <linux/gfp.h>
|
#include <linux/gfp.h>
|
||||||
#include <linux/pfn_t.h>
|
|
||||||
#include <linux/page_idle.h>
|
#include <linux/page_idle.h>
|
||||||
#include <linux/page_owner.h>
|
#include <linux/page_owner.h>
|
||||||
#include <linux/sched/mm.h>
|
#include <linux/sched/mm.h>
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
|
|
||||||
long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
|
long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
|
||||||
long nr_pages, enum dax_access_mode mode, void **kaddr,
|
long nr_pages, enum dax_access_mode mode, void **kaddr,
|
||||||
pfn_t *pfn)
|
unsigned long *pfn)
|
||||||
{
|
{
|
||||||
resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
|
resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
|
||||||
*kaddr = pmem->virt_addr + offset;
|
*kaddr = pmem->virt_addr + offset;
|
||||||
page = vmalloc_to_page(pmem->virt_addr + offset);
|
page = vmalloc_to_page(pmem->virt_addr + offset);
|
||||||
if (pfn)
|
if (pfn)
|
||||||
*pfn = page_to_pfn_t(page);
|
*pfn = page_to_pfn(page);
|
||||||
pr_debug_ratelimited("%s: pmem: %p pgoff: %#lx pfn: %#lx\n",
|
pr_debug_ratelimited("%s: pmem: %p pgoff: %#lx pfn: %#lx\n",
|
||||||
__func__, pmem, pgoff, page_to_pfn(page));
|
__func__, pmem, pgoff, page_to_pfn(page));
|
||||||
|
|
||||||
|
@ -39,7 +39,7 @@ long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
|
||||||
if (kaddr)
|
if (kaddr)
|
||||||
*kaddr = pmem->virt_addr + offset;
|
*kaddr = pmem->virt_addr + offset;
|
||||||
if (pfn)
|
if (pfn)
|
||||||
*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
|
*pfn = PHYS_PFN(pmem->phys_addr + offset);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If badblocks are present, limit known good range to the
|
* If badblocks are present, limit known good range to the
|
||||||
|
|
|
@ -8,7 +8,6 @@
|
||||||
#include <linux/ioport.h>
|
#include <linux/ioport.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/pfn_t.h>
|
|
||||||
#include <linux/acpi.h>
|
#include <linux/acpi.h>
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
|
@ -135,12 +134,6 @@ void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(__wrap_devm_memremap_pages);
|
EXPORT_SYMBOL_GPL(__wrap_devm_memremap_pages);
|
||||||
|
|
||||||
pfn_t __wrap_phys_to_pfn_t(phys_addr_t addr, unsigned long flags)
|
|
||||||
{
|
|
||||||
return phys_to_pfn_t(addr, flags);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(__wrap_phys_to_pfn_t);
|
|
||||||
|
|
||||||
void *__wrap_memremap(resource_size_t offset, size_t size,
|
void *__wrap_memremap(resource_size_t offset, size_t size,
|
||||||
unsigned long flags)
|
unsigned long flags)
|
||||||
{
|
{
|
||||||
|
|
|
@ -212,7 +212,6 @@ void __iomem *__wrap_devm_ioremap(struct device *dev,
|
||||||
void *__wrap_devm_memremap(struct device *dev, resource_size_t offset,
|
void *__wrap_devm_memremap(struct device *dev, resource_size_t offset,
|
||||||
size_t size, unsigned long flags);
|
size_t size, unsigned long flags);
|
||||||
void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
|
void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
|
||||||
pfn_t __wrap_phys_to_pfn_t(phys_addr_t addr, unsigned long flags);
|
|
||||||
void *__wrap_memremap(resource_size_t offset, size_t size,
|
void *__wrap_memremap(resource_size_t offset, size_t size,
|
||||||
unsigned long flags);
|
unsigned long flags);
|
||||||
void __wrap_devm_memunmap(struct device *dev, void *addr);
|
void __wrap_devm_memunmap(struct device *dev, void *addr);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue