M7350/kernel/mm/fremap.c

284 lines
7.3 KiB
C
Raw Normal View History

2024-09-09 08:52:07 +00:00
/*
* linux/mm/fremap.c
*
* Explicit pagetable population and nonlinear (random) mappings support.
*
* started by Ingo Molnar, Copyright (C) 2002, 2003
*/
2024-09-09 08:57:42 +00:00
#include <linux/export.h>
2024-09-09 08:52:07 +00:00
#include <linux/backing-dev.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/file.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
#include <linux/swapops.h>
#include <linux/rmap.h>
#include <linux/syscalls.h>
#include <linux/mmu_notifier.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include "internal.h"
2024-09-09 08:57:42 +00:00
static int mm_counter(struct page *page)
{
return PageAnon(page) ? MM_ANONPAGES : MM_FILEPAGES;
}
2024-09-09 08:52:07 +00:00
static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
pte_t pte = *ptep;
2024-09-09 08:57:42 +00:00
struct page *page;
swp_entry_t entry;
2024-09-09 08:52:07 +00:00
if (pte_present(pte)) {
flush_cache_page(vma, addr, pte_pfn(pte));
pte = ptep_clear_flush(vma, addr, ptep);
page = vm_normal_page(vma, addr, pte);
if (page) {
if (pte_dirty(pte))
set_page_dirty(page);
2024-09-09 08:57:42 +00:00
update_hiwater_rss(mm);
dec_mm_counter(mm, mm_counter(page));
2024-09-09 08:52:07 +00:00
page_remove_rmap(page);
page_cache_release(page);
2024-09-09 08:57:42 +00:00
}
} else { /* zap_pte() is not called when pte_none() */
if (!pte_file(pte)) {
2024-09-09 08:52:07 +00:00
update_hiwater_rss(mm);
2024-09-09 08:57:42 +00:00
entry = pte_to_swp_entry(pte);
if (non_swap_entry(entry)) {
if (is_migration_entry(entry)) {
page = migration_entry_to_page(entry);
dec_mm_counter(mm, mm_counter(page));
}
} else {
free_swap_and_cache(entry);
dec_mm_counter(mm, MM_SWAPENTS);
}
2024-09-09 08:52:07 +00:00
}
pte_clear_not_present_full(mm, addr, ptep, 0);
}
}
/*
* Install a file pte to a given virtual memory address, release any
* previously existing mapping.
*/
static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long pgoff, pgprot_t prot)
{
int err = -ENOMEM;
2024-09-09 08:57:42 +00:00
pte_t *pte, ptfile;
2024-09-09 08:52:07 +00:00
spinlock_t *ptl;
pte = get_locked_pte(mm, addr, &ptl);
if (!pte)
goto out;
2024-09-09 08:57:42 +00:00
ptfile = pgoff_to_pte(pgoff);
2024-09-09 08:52:07 +00:00
if (!pte_none(*pte))
zap_pte(mm, vma, addr, pte);
2024-09-09 08:57:42 +00:00
set_pte_at(mm, addr, pte, pte_file_mksoft_dirty(ptfile));
2024-09-09 08:52:07 +00:00
/*
* We don't need to run update_mmu_cache() here because the "file pte"
* being installed by install_file_pte() is not a real pte - it's a
* non-present entry (like a swap entry), noting what file offset should
* be mapped there when there's a fault (in a non-linear vma where
* that's not obvious).
*/
pte_unmap_unlock(pte, ptl);
err = 0;
out:
return err;
}
2024-09-09 08:57:42 +00:00
int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
unsigned long size, pgoff_t pgoff)
2024-09-09 08:52:07 +00:00
{
2024-09-09 08:57:42 +00:00
struct mm_struct *mm = vma->vm_mm;
2024-09-09 08:52:07 +00:00
int err;
do {
err = install_file_pte(mm, vma, addr, pgoff, vma->vm_page_prot);
if (err)
return err;
size -= PAGE_SIZE;
addr += PAGE_SIZE;
pgoff++;
} while (size);
2024-09-09 08:57:42 +00:00
return 0;
2024-09-09 08:52:07 +00:00
}
2024-09-09 08:57:42 +00:00
EXPORT_SYMBOL(generic_file_remap_pages);
2024-09-09 08:52:07 +00:00
/**
* sys_remap_file_pages - remap arbitrary pages of an existing VM_SHARED vma
* @start: start of the remapped virtual memory range
* @size: size of the remapped virtual memory range
* @prot: new protection bits of the range (see NOTE)
* @pgoff: to-be-mapped page of the backing store file
* @flags: 0 or MAP_NONBLOCKED - the later will cause no IO.
*
* sys_remap_file_pages remaps arbitrary pages of an existing VM_SHARED vma
* (shared backing store file).
*
* This syscall works purely via pagetables, so it's the most efficient
* way to map the same (large) file into a given virtual window. Unlike
* mmap()/mremap() it does not create any new vmas. The new mappings are
* also safe across swapout.
*
* NOTE: the @prot parameter right now is ignored (but must be zero),
* and the vma's default protection is used. Arbitrary protections
* might be implemented in the future.
*/
SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
{
struct mm_struct *mm = current->mm;
struct address_space *mapping;
struct vm_area_struct *vma;
int err = -EINVAL;
int has_write_lock = 0;
2024-09-09 08:57:42 +00:00
vm_flags_t vm_flags = 0;
pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. "
"See Documentation/vm/remap_file_pages.txt.\n",
current->comm, current->pid);
2024-09-09 08:52:07 +00:00
if (prot)
return err;
/*
* Sanitize the syscall parameters:
*/
start = start & PAGE_MASK;
size = size & PAGE_MASK;
/* Does the address range wrap, or is the span zero-sized? */
if (start + size <= start)
return err;
/* Does pgoff wrap? */
if (pgoff + (size >> PAGE_SHIFT) < pgoff)
return err;
/* Can we represent this offset inside this architecture's pte's? */
#if PTE_FILE_MAX_BITS < BITS_PER_LONG
if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
return err;
#endif
/* We need down_write() to change vma->vm_flags. */
down_read(&mm->mmap_sem);
retry:
vma = find_vma(mm, start);
/*
* Make sure the vma is shared, that it supports prefaulting,
* and that the remapped range is valid and fully within
2024-09-09 08:57:42 +00:00
* the single existing vma.
2024-09-09 08:52:07 +00:00
*/
if (!vma || !(vma->vm_flags & VM_SHARED))
goto out;
2024-09-09 08:57:42 +00:00
if (!vma->vm_ops || !vma->vm_ops->remap_pages)
2024-09-09 08:52:07 +00:00
goto out;
if (start < vma->vm_start || start + size > vma->vm_end)
goto out;
/* Must set VM_NONLINEAR before any pages are populated. */
if (!(vma->vm_flags & VM_NONLINEAR)) {
2024-09-09 08:57:42 +00:00
/*
* vm_private_data is used as a swapout cursor
* in a VM_NONLINEAR vma.
*/
if (vma->vm_private_data)
goto out;
2024-09-09 08:52:07 +00:00
/* Don't need a nonlinear mapping, exit success */
if (pgoff == linear_page_index(vma, start)) {
err = 0;
goto out;
}
if (!has_write_lock) {
2024-09-09 08:57:42 +00:00
get_write_lock:
2024-09-09 08:52:07 +00:00
up_read(&mm->mmap_sem);
down_write(&mm->mmap_sem);
has_write_lock = 1;
goto retry;
}
mapping = vma->vm_file->f_mapping;
/*
* page_mkclean doesn't work on nonlinear vmas, so if
* dirty pages need to be accounted, emulate with linear
* vmas.
*/
if (mapping_cap_account_dirty(mapping)) {
unsigned long addr;
2024-09-09 08:57:42 +00:00
struct file *file = get_file(vma->vm_file);
/* mmap_region may free vma; grab the info now */
vm_flags = vma->vm_flags;
2024-09-09 08:52:07 +00:00
2024-09-09 08:57:42 +00:00
addr = mmap_region(file, start, size, vm_flags, pgoff);
2024-09-09 08:52:07 +00:00
fput(file);
if (IS_ERR_VALUE(addr)) {
err = addr;
} else {
BUG_ON(addr != start);
err = 0;
}
2024-09-09 08:57:42 +00:00
goto out_freed;
2024-09-09 08:52:07 +00:00
}
mutex_lock(&mapping->i_mmap_mutex);
flush_dcache_mmap_lock(mapping);
vma->vm_flags |= VM_NONLINEAR;
2024-09-09 08:57:42 +00:00
vma_interval_tree_remove(vma, &mapping->i_mmap);
2024-09-09 08:52:07 +00:00
vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
flush_dcache_mmap_unlock(mapping);
mutex_unlock(&mapping->i_mmap_mutex);
}
if (vma->vm_flags & VM_LOCKED) {
/*
* drop PG_Mlocked flag for over-mapped range
*/
2024-09-09 08:57:42 +00:00
if (!has_write_lock)
goto get_write_lock;
vm_flags = vma->vm_flags;
2024-09-09 08:52:07 +00:00
munlock_vma_pages_range(vma, start, start + size);
2024-09-09 08:57:42 +00:00
vma->vm_flags = vm_flags;
2024-09-09 08:52:07 +00:00
}
mmu_notifier_invalidate_range_start(mm, start, start + size);
2024-09-09 08:57:42 +00:00
err = vma->vm_ops->remap_pages(vma, start, size, pgoff);
2024-09-09 08:52:07 +00:00
mmu_notifier_invalidate_range_end(mm, start, start + size);
/*
* We can't clear VM_NONLINEAR because we'd have to do
* it after ->populate completes, and that would prevent
* downgrading the lock. (Locks can't be upgraded).
*/
out:
2024-09-09 08:57:42 +00:00
if (vma)
vm_flags = vma->vm_flags;
out_freed:
2024-09-09 08:52:07 +00:00
if (likely(!has_write_lock))
up_read(&mm->mmap_sem);
else
up_write(&mm->mmap_sem);
2024-09-09 08:57:42 +00:00
if (!err && ((vm_flags & VM_LOCKED) || !(flags & MAP_NONBLOCK)))
mm_populate(start, size);
2024-09-09 08:52:07 +00:00
return err;
}