M7350/kernel/fs/proc/task_mmu.c

1903 lines
45 KiB
C
Raw Normal View History

2024-09-09 08:52:07 +00:00
#include <linux/mm.h>
2024-09-09 08:57:42 +00:00
#include <linux/vmacache.h>
2024-09-09 08:52:07 +00:00
#include <linux/hugetlb.h>
#include <linux/huge_mm.h>
#include <linux/mount.h>
#include <linux/seq_file.h>
#include <linux/highmem.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/mempolicy.h>
#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/swapops.h>
2024-09-09 08:57:42 +00:00
#include <linux/mmu_notifier.h>
#include <linux/mm_inline.h>
#include <linux/ctype.h>
2024-09-09 08:52:07 +00:00
#include <asm/elf.h>
#include <asm/uaccess.h>
#include <asm/tlbflush.h>
#include "internal.h"
void task_mem(struct seq_file *m, struct mm_struct *mm)
{
unsigned long data, text, lib, swap;
unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
/*
* Note: to minimize their overhead, mm maintains hiwater_vm and
* hiwater_rss only when about to *lower* total_vm or rss. Any
* collector of these hiwater stats must therefore get total_vm
* and rss too, which will usually be the higher. Barriers? not
* worth the effort, such snapshots can always be inconsistent.
*/
hiwater_vm = total_vm = mm->total_vm;
if (hiwater_vm < mm->hiwater_vm)
hiwater_vm = mm->hiwater_vm;
hiwater_rss = total_rss = get_mm_rss(mm);
if (hiwater_rss < mm->hiwater_rss)
hiwater_rss = mm->hiwater_rss;
data = mm->total_vm - mm->shared_vm - mm->stack_vm;
text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
swap = get_mm_counter(mm, MM_SWAPENTS);
seq_printf(m,
"VmPeak:\t%8lu kB\n"
"VmSize:\t%8lu kB\n"
"VmLck:\t%8lu kB\n"
"VmPin:\t%8lu kB\n"
"VmHWM:\t%8lu kB\n"
"VmRSS:\t%8lu kB\n"
"VmData:\t%8lu kB\n"
"VmStk:\t%8lu kB\n"
"VmExe:\t%8lu kB\n"
"VmLib:\t%8lu kB\n"
"VmPTE:\t%8lu kB\n"
"VmSwap:\t%8lu kB\n",
hiwater_vm << (PAGE_SHIFT-10),
2024-09-09 08:57:42 +00:00
total_vm << (PAGE_SHIFT-10),
2024-09-09 08:52:07 +00:00
mm->locked_vm << (PAGE_SHIFT-10),
mm->pinned_vm << (PAGE_SHIFT-10),
hiwater_rss << (PAGE_SHIFT-10),
total_rss << (PAGE_SHIFT-10),
data << (PAGE_SHIFT-10),
mm->stack_vm << (PAGE_SHIFT-10), text, lib,
2024-09-09 08:57:42 +00:00
(PTRS_PER_PTE * sizeof(pte_t) *
atomic_long_read(&mm->nr_ptes)) >> 10,
2024-09-09 08:52:07 +00:00
swap << (PAGE_SHIFT-10));
}
unsigned long task_vsize(struct mm_struct *mm)
{
return PAGE_SIZE * mm->total_vm;
}
unsigned long task_statm(struct mm_struct *mm,
unsigned long *shared, unsigned long *text,
unsigned long *data, unsigned long *resident)
{
*shared = get_mm_counter(mm, MM_FILEPAGES);
*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
>> PAGE_SHIFT;
*data = mm->total_vm - mm->shared_vm;
*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
return mm->total_vm;
}
2024-09-09 08:57:42 +00:00
#ifdef CONFIG_NUMA
/*
* Save get_task_policy() for show_numa_map().
*/
static void hold_task_mempolicy(struct proc_maps_private *priv)
2024-09-09 08:52:07 +00:00
{
2024-09-09 08:57:42 +00:00
struct task_struct *task = priv->task;
task_lock(task);
priv->task_mempolicy = get_task_policy(task);
mpol_get(priv->task_mempolicy);
task_unlock(task);
2024-09-09 08:52:07 +00:00
}
2024-09-09 08:57:42 +00:00
static void release_task_mempolicy(struct proc_maps_private *priv)
{
mpol_put(priv->task_mempolicy);
}
#else
static void hold_task_mempolicy(struct proc_maps_private *priv)
{
}
static void release_task_mempolicy(struct proc_maps_private *priv)
{
}
#endif
2024-09-09 08:52:07 +00:00
2024-09-09 08:57:42 +00:00
static void seq_print_vma_name(struct seq_file *m, struct vm_area_struct *vma)
2024-09-09 08:52:07 +00:00
{
2024-09-09 08:57:42 +00:00
const char __user *name = vma_get_anon_name(vma);
struct mm_struct *mm = vma->vm_mm;
unsigned long page_start_vaddr;
unsigned long page_offset;
unsigned long num_pages;
unsigned long max_len = NAME_MAX;
int i;
page_start_vaddr = (unsigned long)name & PAGE_MASK;
page_offset = (unsigned long)name - page_start_vaddr;
num_pages = DIV_ROUND_UP(page_offset + max_len, PAGE_SIZE);
seq_puts(m, "[anon:");
for (i = 0; i < num_pages; i++) {
int len;
int write_len;
const char *kaddr;
long pages_pinned;
struct page *page;
pages_pinned = get_user_pages(current, mm, page_start_vaddr,
1, 0, 0, &page, NULL);
if (pages_pinned < 1) {
seq_puts(m, "<fault>]");
return;
}
kaddr = (const char *)kmap(page);
len = min(max_len, PAGE_SIZE - page_offset);
write_len = strnlen(kaddr + page_offset, len);
seq_write(m, kaddr + page_offset, write_len);
kunmap(page);
put_page(page);
/* if strnlen hit a null terminator then we're done */
if (write_len != len)
break;
max_len -= len;
page_offset = 0;
page_start_vaddr += PAGE_SIZE;
2024-09-09 08:52:07 +00:00
}
2024-09-09 08:57:42 +00:00
seq_putc(m, ']');
}
static void vma_stop(struct proc_maps_private *priv)
{
struct mm_struct *mm = priv->mm;
release_task_mempolicy(priv);
up_read(&mm->mmap_sem);
mmput(mm);
}
static struct vm_area_struct *
m_next_vma(struct proc_maps_private *priv, struct vm_area_struct *vma)
{
if (vma == priv->tail_vma)
return NULL;
return vma->vm_next ?: priv->tail_vma;
}
static void m_cache_vma(struct seq_file *m, struct vm_area_struct *vma)
{
if (m->count < m->size) /* vma is copied successfully */
m->version = m_next_vma(m->private, vma) ? vma->vm_start : -1UL;
2024-09-09 08:52:07 +00:00
}
2024-09-09 08:57:42 +00:00
static void *m_start(struct seq_file *m, loff_t *ppos)
2024-09-09 08:52:07 +00:00
{
struct proc_maps_private *priv = m->private;
unsigned long last_addr = m->version;
struct mm_struct *mm;
2024-09-09 08:57:42 +00:00
struct vm_area_struct *vma;
unsigned int pos = *ppos;
2024-09-09 08:52:07 +00:00
2024-09-09 08:57:42 +00:00
/* See m_cache_vma(). Zero at the start or after lseek. */
2024-09-09 08:52:07 +00:00
if (last_addr == -1UL)
return NULL;
2024-09-09 08:57:42 +00:00
priv->task = get_proc_task(priv->inode);
2024-09-09 08:52:07 +00:00
if (!priv->task)
return ERR_PTR(-ESRCH);
2024-09-09 08:57:42 +00:00
mm = priv->mm;
if (!mm || !atomic_inc_not_zero(&mm->mm_users))
return NULL;
2024-09-09 08:52:07 +00:00
2024-09-09 08:57:42 +00:00
down_read(&mm->mmap_sem);
hold_task_mempolicy(priv);
priv->tail_vma = get_gate_vma(mm);
2024-09-09 08:52:07 +00:00
2024-09-09 08:57:42 +00:00
if (last_addr) {
vma = find_vma(mm, last_addr);
if (vma && (vma = m_next_vma(priv, vma)))
return vma;
2024-09-09 08:52:07 +00:00
}
2024-09-09 08:57:42 +00:00
m->version = 0;
if (pos < mm->map_count) {
for (vma = mm->mmap; pos; pos--) {
m->version = vma->vm_start;
2024-09-09 08:52:07 +00:00
vma = vma->vm_next;
2024-09-09 08:57:42 +00:00
}
return vma;
2024-09-09 08:52:07 +00:00
}
2024-09-09 08:57:42 +00:00
/* we do not bother to update m->version in this case */
if (pos == mm->map_count && priv->tail_vma)
return priv->tail_vma;
2024-09-09 08:52:07 +00:00
2024-09-09 08:57:42 +00:00
vma_stop(priv);
return NULL;
2024-09-09 08:52:07 +00:00
}
static void *m_next(struct seq_file *m, void *v, loff_t *pos)
{
struct proc_maps_private *priv = m->private;
2024-09-09 08:57:42 +00:00
struct vm_area_struct *next;
2024-09-09 08:52:07 +00:00
(*pos)++;
2024-09-09 08:57:42 +00:00
next = m_next_vma(priv, v);
if (!next)
vma_stop(priv);
return next;
2024-09-09 08:52:07 +00:00
}
static void m_stop(struct seq_file *m, void *v)
{
struct proc_maps_private *priv = m->private;
2024-09-09 08:57:42 +00:00
if (!IS_ERR_OR_NULL(v))
vma_stop(priv);
if (priv->task) {
2024-09-09 08:52:07 +00:00
put_task_struct(priv->task);
2024-09-09 08:57:42 +00:00
priv->task = NULL;
}
}
static int proc_maps_open(struct inode *inode, struct file *file,
const struct seq_operations *ops, int psize)
{
struct proc_maps_private *priv = __seq_open_private(file, ops, psize);
if (!priv)
return -ENOMEM;
priv->inode = inode;
priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
if (IS_ERR(priv->mm)) {
int err = PTR_ERR(priv->mm);
seq_release_private(inode, file);
return err;
}
return 0;
}
static int proc_map_release(struct inode *inode, struct file *file)
{
struct seq_file *seq = file->private_data;
struct proc_maps_private *priv = seq->private;
if (priv->mm)
mmdrop(priv->mm);
return seq_release_private(inode, file);
2024-09-09 08:52:07 +00:00
}
static int do_maps_open(struct inode *inode, struct file *file,
const struct seq_operations *ops)
{
2024-09-09 08:57:42 +00:00
return proc_maps_open(inode, file, ops,
sizeof(struct proc_maps_private));
}
static pid_t pid_of_stack(struct proc_maps_private *priv,
struct vm_area_struct *vma, bool is_pid)
{
struct inode *inode = priv->inode;
struct task_struct *task;
pid_t ret = 0;
rcu_read_lock();
task = pid_task(proc_pid(inode), PIDTYPE_PID);
if (task) {
task = task_of_stack(task, vma, is_pid);
if (task)
ret = task_pid_nr_ns(task, inode->i_sb->s_fs_info);
2024-09-09 08:52:07 +00:00
}
2024-09-09 08:57:42 +00:00
rcu_read_unlock();
2024-09-09 08:52:07 +00:00
return ret;
}
static void
show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
{
struct mm_struct *mm = vma->vm_mm;
struct file *file = vma->vm_file;
struct proc_maps_private *priv = m->private;
vm_flags_t flags = vma->vm_flags;
unsigned long ino = 0;
unsigned long long pgoff = 0;
unsigned long start, end;
dev_t dev = 0;
const char *name = NULL;
if (file) {
2024-09-09 08:57:42 +00:00
struct inode *inode = file_inode(vma->vm_file);
2024-09-09 08:52:07 +00:00
dev = inode->i_sb->s_dev;
ino = inode->i_ino;
pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
}
/* We don't show the stack guard page in /proc/maps */
start = vma->vm_start;
if (stack_guard_page_start(vma, start))
start += PAGE_SIZE;
end = vma->vm_end;
if (stack_guard_page_end(vma, end))
end -= PAGE_SIZE;
2024-09-09 08:57:42 +00:00
seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
2024-09-09 08:52:07 +00:00
start,
end,
flags & VM_READ ? 'r' : '-',
flags & VM_WRITE ? 'w' : '-',
flags & VM_EXEC ? 'x' : '-',
flags & VM_MAYSHARE ? 's' : 'p',
pgoff,
2024-09-09 08:57:42 +00:00
MAJOR(dev), MINOR(dev), ino);
2024-09-09 08:52:07 +00:00
/*
* Print the dentry name for named mappings, and a
* special [heap] marker for the heap:
*/
if (file) {
2024-09-09 08:57:42 +00:00
seq_pad(m, ' ');
2024-09-09 08:52:07 +00:00
seq_path(m, &file->f_path, "\n");
goto done;
}
2024-09-09 08:57:42 +00:00
if (vma->vm_ops && vma->vm_ops->name) {
name = vma->vm_ops->name(vma);
if (name)
goto done;
}
2024-09-09 08:52:07 +00:00
name = arch_vma_name(vma);
if (!name) {
pid_t tid;
if (!mm) {
name = "[vdso]";
goto done;
}
if (vma->vm_start <= mm->brk &&
vma->vm_end >= mm->start_brk) {
name = "[heap]";
goto done;
}
2024-09-09 08:57:42 +00:00
tid = pid_of_stack(priv, vma, is_pid);
2024-09-09 08:52:07 +00:00
if (tid != 0) {
/*
* Thread stack in /proc/PID/task/TID/maps or
* the main process stack.
*/
if (!is_pid || (vma->vm_start <= mm->start_stack &&
vma->vm_end >= mm->start_stack)) {
name = "[stack]";
} else {
/* Thread stack in /proc/PID/maps */
2024-09-09 08:57:42 +00:00
seq_pad(m, ' ');
2024-09-09 08:52:07 +00:00
seq_printf(m, "[stack:%d]", tid);
}
2024-09-09 08:57:42 +00:00
goto done;
}
if (vma_get_anon_name(vma)) {
seq_pad(m, ' ');
seq_print_vma_name(m, vma);
2024-09-09 08:52:07 +00:00
}
}
done:
if (name) {
2024-09-09 08:57:42 +00:00
seq_pad(m, ' ');
2024-09-09 08:52:07 +00:00
seq_puts(m, name);
}
seq_putc(m, '\n');
}
static int show_map(struct seq_file *m, void *v, int is_pid)
{
2024-09-09 08:57:42 +00:00
show_map_vma(m, v, is_pid);
m_cache_vma(m, v);
2024-09-09 08:52:07 +00:00
return 0;
}
static int show_pid_map(struct seq_file *m, void *v)
{
return show_map(m, v, 1);
}
static int show_tid_map(struct seq_file *m, void *v)
{
return show_map(m, v, 0);
}
static const struct seq_operations proc_pid_maps_op = {
.start = m_start,
.next = m_next,
.stop = m_stop,
.show = show_pid_map
};
static const struct seq_operations proc_tid_maps_op = {
.start = m_start,
.next = m_next,
.stop = m_stop,
.show = show_tid_map
};
static int pid_maps_open(struct inode *inode, struct file *file)
{
return do_maps_open(inode, file, &proc_pid_maps_op);
}
static int tid_maps_open(struct inode *inode, struct file *file)
{
return do_maps_open(inode, file, &proc_tid_maps_op);
}
const struct file_operations proc_pid_maps_operations = {
.open = pid_maps_open,
.read = seq_read,
.llseek = seq_lseek,
2024-09-09 08:57:42 +00:00
.release = proc_map_release,
2024-09-09 08:52:07 +00:00
};
const struct file_operations proc_tid_maps_operations = {
.open = tid_maps_open,
.read = seq_read,
.llseek = seq_lseek,
2024-09-09 08:57:42 +00:00
.release = proc_map_release,
2024-09-09 08:52:07 +00:00
};
/*
* Proportional Set Size(PSS): my share of RSS.
*
* PSS of a process is the count of pages it has in memory, where each
* page is divided by the number of processes sharing it. So if a
* process has 1000 pages all to itself, and 1000 shared with one other
* process, its PSS will be 1500.
*
* To keep (accumulated) division errors low, we adopt a 64bit
* fixed-point pss counter to minimize division errors. So (pss >>
* PSS_SHIFT) would be the real byte count.
*
* A shift of 12 before division means (assuming 4K page size):
* - 1M 3-user-pages add up to 8KB errors;
* - supports mapcount up to 2^24, or 16M;
* - supports PSS up to 2^52 bytes, or 4PB.
*/
#define PSS_SHIFT 12
#ifdef CONFIG_PROC_PAGE_MONITOR
struct mem_size_stats {
struct vm_area_struct *vma;
unsigned long resident;
unsigned long shared_clean;
unsigned long shared_dirty;
unsigned long private_clean;
unsigned long private_dirty;
unsigned long referenced;
unsigned long anonymous;
unsigned long anonymous_thp;
unsigned long swap;
2024-09-09 08:57:42 +00:00
unsigned long nonlinear;
2024-09-09 08:52:07 +00:00
u64 pss;
};
static void smaps_pte_entry(pte_t ptent, unsigned long addr,
unsigned long ptent_size, struct mm_walk *walk)
{
struct mem_size_stats *mss = walk->private;
struct vm_area_struct *vma = mss->vma;
2024-09-09 08:57:42 +00:00
pgoff_t pgoff = linear_page_index(vma, addr);
struct page *page = NULL;
2024-09-09 08:52:07 +00:00
int mapcount;
2024-09-09 08:57:42 +00:00
if (pte_present(ptent)) {
page = vm_normal_page(vma, addr, ptent);
} else if (is_swap_pte(ptent)) {
swp_entry_t swpent = pte_to_swp_entry(ptent);
if (!non_swap_entry(swpent))
mss->swap += ptent_size;
else if (is_migration_entry(swpent))
page = migration_entry_to_page(swpent);
} else if (pte_file(ptent)) {
if (pte_to_pgoff(ptent) != pgoff)
mss->nonlinear += ptent_size;
2024-09-09 08:52:07 +00:00
}
if (!page)
return;
if (PageAnon(page))
mss->anonymous += ptent_size;
2024-09-09 08:57:42 +00:00
if (page->index != pgoff)
mss->nonlinear += ptent_size;
2024-09-09 08:52:07 +00:00
mss->resident += ptent_size;
/* Accumulate the size in pages that have been accessed. */
if (pte_young(ptent) || PageReferenced(page))
mss->referenced += ptent_size;
mapcount = page_mapcount(page);
if (mapcount >= 2) {
if (pte_dirty(ptent) || PageDirty(page))
mss->shared_dirty += ptent_size;
else
mss->shared_clean += ptent_size;
mss->pss += (ptent_size << PSS_SHIFT) / mapcount;
} else {
if (pte_dirty(ptent) || PageDirty(page))
mss->private_dirty += ptent_size;
else
mss->private_clean += ptent_size;
mss->pss += (ptent_size << PSS_SHIFT);
}
}
static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
struct mem_size_stats *mss = walk->private;
struct vm_area_struct *vma = mss->vma;
pte_t *pte;
spinlock_t *ptl;
2024-09-09 08:57:42 +00:00
if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
2024-09-09 08:52:07 +00:00
smaps_pte_entry(*(pte_t *)pmd, addr, HPAGE_PMD_SIZE, walk);
2024-09-09 08:57:42 +00:00
spin_unlock(ptl);
2024-09-09 08:52:07 +00:00
mss->anonymous_thp += HPAGE_PMD_SIZE;
return 0;
}
if (pmd_trans_unstable(pmd))
return 0;
/*
* The mmap_sem held all the way back in m_start() is what
* keeps khugepaged out of here and from collapsing things
* in here.
*/
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE)
smaps_pte_entry(*pte, addr, PAGE_SIZE, walk);
pte_unmap_unlock(pte - 1, ptl);
cond_resched();
return 0;
}
2024-09-09 08:57:42 +00:00
static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
{
/*
* Don't forget to update Documentation/ on changes.
*/
static const char mnemonics[BITS_PER_LONG][2] = {
/*
* In case if we meet a flag we don't know about.
*/
[0 ... (BITS_PER_LONG-1)] = "??",
[ilog2(VM_READ)] = "rd",
[ilog2(VM_WRITE)] = "wr",
[ilog2(VM_EXEC)] = "ex",
[ilog2(VM_SHARED)] = "sh",
[ilog2(VM_MAYREAD)] = "mr",
[ilog2(VM_MAYWRITE)] = "mw",
[ilog2(VM_MAYEXEC)] = "me",
[ilog2(VM_MAYSHARE)] = "ms",
[ilog2(VM_GROWSDOWN)] = "gd",
[ilog2(VM_PFNMAP)] = "pf",
[ilog2(VM_DENYWRITE)] = "dw",
[ilog2(VM_LOCKED)] = "lo",
[ilog2(VM_IO)] = "io",
[ilog2(VM_SEQ_READ)] = "sr",
[ilog2(VM_RAND_READ)] = "rr",
[ilog2(VM_DONTCOPY)] = "dc",
[ilog2(VM_DONTEXPAND)] = "de",
[ilog2(VM_ACCOUNT)] = "ac",
[ilog2(VM_NORESERVE)] = "nr",
[ilog2(VM_HUGETLB)] = "ht",
[ilog2(VM_NONLINEAR)] = "nl",
[ilog2(VM_ARCH_1)] = "ar",
[ilog2(VM_DONTDUMP)] = "dd",
#ifdef CONFIG_MEM_SOFT_DIRTY
[ilog2(VM_SOFTDIRTY)] = "sd",
#endif
[ilog2(VM_MIXEDMAP)] = "mm",
[ilog2(VM_HUGEPAGE)] = "hg",
[ilog2(VM_NOHUGEPAGE)] = "nh",
[ilog2(VM_MERGEABLE)] = "mg",
};
size_t i;
seq_puts(m, "VmFlags: ");
for (i = 0; i < BITS_PER_LONG; i++) {
if (vma->vm_flags & (1UL << i)) {
seq_printf(m, "%c%c ",
mnemonics[i][0], mnemonics[i][1]);
}
}
seq_putc(m, '\n');
}
2024-09-09 08:52:07 +00:00
static int show_smap(struct seq_file *m, void *v, int is_pid)
{
struct vm_area_struct *vma = v;
struct mem_size_stats mss;
struct mm_walk smaps_walk = {
.pmd_entry = smaps_pte_range,
.mm = vma->vm_mm,
.private = &mss,
};
memset(&mss, 0, sizeof mss);
mss.vma = vma;
/* mmap_sem is held in m_start */
if (vma->vm_mm && !is_vm_hugetlb_page(vma))
walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
show_map_vma(m, vma, is_pid);
2024-09-09 08:57:42 +00:00
if (vma_get_anon_name(vma)) {
seq_puts(m, "Name: ");
seq_print_vma_name(m, vma);
seq_putc(m, '\n');
}
2024-09-09 08:52:07 +00:00
seq_printf(m,
"Size: %8lu kB\n"
"Rss: %8lu kB\n"
"Pss: %8lu kB\n"
"Shared_Clean: %8lu kB\n"
"Shared_Dirty: %8lu kB\n"
"Private_Clean: %8lu kB\n"
"Private_Dirty: %8lu kB\n"
"Referenced: %8lu kB\n"
"Anonymous: %8lu kB\n"
"AnonHugePages: %8lu kB\n"
"Swap: %8lu kB\n"
"KernelPageSize: %8lu kB\n"
"MMUPageSize: %8lu kB\n"
"Locked: %8lu kB\n",
(vma->vm_end - vma->vm_start) >> 10,
mss.resident >> 10,
(unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
mss.shared_clean >> 10,
mss.shared_dirty >> 10,
mss.private_clean >> 10,
mss.private_dirty >> 10,
mss.referenced >> 10,
mss.anonymous >> 10,
mss.anonymous_thp >> 10,
mss.swap >> 10,
vma_kernel_pagesize(vma) >> 10,
vma_mmu_pagesize(vma) >> 10,
(vma->vm_flags & VM_LOCKED) ?
(unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
2024-09-09 08:57:42 +00:00
if (vma->vm_flags & VM_NONLINEAR)
seq_printf(m, "Nonlinear: %8lu kB\n",
mss.nonlinear >> 10);
show_smap_vma_flags(m, vma);
m_cache_vma(m, vma);
2024-09-09 08:52:07 +00:00
return 0;
}
static int show_pid_smap(struct seq_file *m, void *v)
{
return show_smap(m, v, 1);
}
static int show_tid_smap(struct seq_file *m, void *v)
{
return show_smap(m, v, 0);
}
static const struct seq_operations proc_pid_smaps_op = {
.start = m_start,
.next = m_next,
.stop = m_stop,
.show = show_pid_smap
};
static const struct seq_operations proc_tid_smaps_op = {
.start = m_start,
.next = m_next,
.stop = m_stop,
.show = show_tid_smap
};
static int pid_smaps_open(struct inode *inode, struct file *file)
{
return do_maps_open(inode, file, &proc_pid_smaps_op);
}
static int tid_smaps_open(struct inode *inode, struct file *file)
{
return do_maps_open(inode, file, &proc_tid_smaps_op);
}
const struct file_operations proc_pid_smaps_operations = {
.open = pid_smaps_open,
.read = seq_read,
.llseek = seq_lseek,
2024-09-09 08:57:42 +00:00
.release = proc_map_release,
2024-09-09 08:52:07 +00:00
};
const struct file_operations proc_tid_smaps_operations = {
.open = tid_smaps_open,
.read = seq_read,
.llseek = seq_lseek,
2024-09-09 08:57:42 +00:00
.release = proc_map_release,
2024-09-09 08:52:07 +00:00
};
2024-09-09 08:57:42 +00:00
/*
* We do not want to have constant page-shift bits sitting in
* pagemap entries and are about to reuse them some time soon.
*
* Here's the "migration strategy":
* 1. when the system boots these bits remain what they are,
* but a warning about future change is printed in log;
* 2. once anyone clears soft-dirty bits via clear_refs file,
* these flag is set to denote, that user is aware of the
* new API and those page-shift bits change their meaning.
* The respective warning is printed in dmesg;
* 3. In a couple of releases we will remove all the mentions
* of page-shift in pagemap entries.
*/
static bool soft_dirty_cleared __read_mostly;
enum clear_refs_types {
CLEAR_REFS_ALL = 1,
CLEAR_REFS_ANON,
CLEAR_REFS_MAPPED,
CLEAR_REFS_SOFT_DIRTY,
CLEAR_REFS_MM_HIWATER_RSS,
CLEAR_REFS_LAST,
};
struct clear_refs_private {
struct vm_area_struct *vma;
enum clear_refs_types type;
};
static inline void clear_soft_dirty(struct vm_area_struct *vma,
unsigned long addr, pte_t *pte)
{
#ifdef CONFIG_MEM_SOFT_DIRTY
/*
* The soft-dirty tracker uses #PF-s to catch writes
* to pages, so write-protect the pte as well. See the
* Documentation/vm/soft-dirty.txt for full description
* of how soft-dirty works.
*/
pte_t ptent = *pte;
if (pte_present(ptent)) {
ptent = pte_wrprotect(ptent);
ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY);
} else if (is_swap_pte(ptent)) {
ptent = pte_swp_clear_soft_dirty(ptent);
} else if (pte_file(ptent)) {
ptent = pte_file_clear_soft_dirty(ptent);
}
set_pte_at(vma->vm_mm, addr, pte, ptent);
#endif
}
2024-09-09 08:52:07 +00:00
static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long end, struct mm_walk *walk)
{
2024-09-09 08:57:42 +00:00
struct clear_refs_private *cp = walk->private;
struct vm_area_struct *vma = cp->vma;
2024-09-09 08:52:07 +00:00
pte_t *pte, ptent;
spinlock_t *ptl;
struct page *page;
2024-09-09 08:57:42 +00:00
split_huge_page_pmd(vma, addr, pmd);
2024-09-09 08:52:07 +00:00
if (pmd_trans_unstable(pmd))
return 0;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE) {
ptent = *pte;
2024-09-09 08:57:42 +00:00
if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
clear_soft_dirty(vma, addr, pte);
continue;
}
2024-09-09 08:52:07 +00:00
if (!pte_present(ptent))
continue;
page = vm_normal_page(vma, addr, ptent);
if (!page)
continue;
/* Clear accessed and referenced bits. */
ptep_test_and_clear_young(vma, addr, pte);
ClearPageReferenced(page);
}
pte_unmap_unlock(pte - 1, ptl);
cond_resched();
return 0;
}
static ssize_t clear_refs_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct task_struct *task;
char buffer[PROC_NUMBUF];
struct mm_struct *mm;
struct vm_area_struct *vma;
2024-09-09 08:57:42 +00:00
enum clear_refs_types type;
int itype;
2024-09-09 08:52:07 +00:00
int rv;
memset(buffer, 0, sizeof(buffer));
if (count > sizeof(buffer) - 1)
count = sizeof(buffer) - 1;
if (copy_from_user(buffer, buf, count))
return -EFAULT;
2024-09-09 08:57:42 +00:00
rv = kstrtoint(strstrip(buffer), 10, &itype);
2024-09-09 08:52:07 +00:00
if (rv < 0)
return rv;
2024-09-09 08:57:42 +00:00
type = (enum clear_refs_types)itype;
if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST)
2024-09-09 08:52:07 +00:00
return -EINVAL;
2024-09-09 08:57:42 +00:00
if (type == CLEAR_REFS_SOFT_DIRTY) {
soft_dirty_cleared = true;
pr_warn_once("The pagemap bits 55-60 has changed their meaning!"
" See the linux/Documentation/vm/pagemap.txt for "
"details.\n");
}
task = get_proc_task(file_inode(file));
2024-09-09 08:52:07 +00:00
if (!task)
return -ESRCH;
mm = get_task_mm(task);
if (mm) {
2024-09-09 08:57:42 +00:00
struct clear_refs_private cp = {
.type = type,
};
2024-09-09 08:52:07 +00:00
struct mm_walk clear_refs_walk = {
.pmd_entry = clear_refs_pte_range,
.mm = mm,
2024-09-09 08:57:42 +00:00
.private = &cp,
2024-09-09 08:52:07 +00:00
};
2024-09-09 08:57:42 +00:00
if (type == CLEAR_REFS_MM_HIWATER_RSS) {
/*
* Writing 5 to /proc/pid/clear_refs resets the peak
* resident set size to this mm's current rss value.
*/
down_write(&mm->mmap_sem);
reset_mm_hiwater_rss(mm);
up_write(&mm->mmap_sem);
goto out_mm;
}
2024-09-09 08:52:07 +00:00
down_read(&mm->mmap_sem);
2024-09-09 08:57:42 +00:00
if (type == CLEAR_REFS_SOFT_DIRTY) {
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (!(vma->vm_flags & VM_SOFTDIRTY))
continue;
up_read(&mm->mmap_sem);
down_write(&mm->mmap_sem);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
vma->vm_flags &= ~VM_SOFTDIRTY;
vma_set_page_prot(vma);
}
downgrade_write(&mm->mmap_sem);
break;
}
mmu_notifier_invalidate_range_start(mm, 0, -1);
}
2024-09-09 08:52:07 +00:00
for (vma = mm->mmap; vma; vma = vma->vm_next) {
2024-09-09 08:57:42 +00:00
cp.vma = vma;
2024-09-09 08:52:07 +00:00
if (is_vm_hugetlb_page(vma))
continue;
/*
* Writing 1 to /proc/pid/clear_refs affects all pages.
*
* Writing 2 to /proc/pid/clear_refs only affects
* Anonymous pages.
*
* Writing 3 to /proc/pid/clear_refs only affects file
* mapped pages.
2024-09-09 08:57:42 +00:00
*
* Writing 4 to /proc/pid/clear_refs affects all pages.
2024-09-09 08:52:07 +00:00
*/
if (type == CLEAR_REFS_ANON && vma->vm_file)
continue;
if (type == CLEAR_REFS_MAPPED && !vma->vm_file)
continue;
walk_page_range(vma->vm_start, vma->vm_end,
&clear_refs_walk);
}
2024-09-09 08:57:42 +00:00
if (type == CLEAR_REFS_SOFT_DIRTY)
mmu_notifier_invalidate_range_end(mm, 0, -1);
2024-09-09 08:52:07 +00:00
flush_tlb_mm(mm);
up_read(&mm->mmap_sem);
2024-09-09 08:57:42 +00:00
out_mm:
2024-09-09 08:52:07 +00:00
mmput(mm);
}
put_task_struct(task);
return count;
}
const struct file_operations proc_clear_refs_operations = {
.write = clear_refs_write,
.llseek = noop_llseek,
};
typedef struct {
u64 pme;
} pagemap_entry_t;
struct pagemapread {
2024-09-09 08:57:42 +00:00
int pos, len; /* units: PM_ENTRY_BYTES, not bytes */
2024-09-09 08:52:07 +00:00
pagemap_entry_t *buffer;
2024-09-09 08:57:42 +00:00
bool v2;
2024-09-09 08:52:07 +00:00
};
#define PAGEMAP_WALK_SIZE (PMD_SIZE)
#define PAGEMAP_WALK_MASK (PMD_MASK)
2024-09-09 08:57:42 +00:00
#define PM_ENTRY_BYTES sizeof(pagemap_entry_t)
2024-09-09 08:52:07 +00:00
#define PM_STATUS_BITS 3
#define PM_STATUS_OFFSET (64 - PM_STATUS_BITS)
#define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
#define PM_STATUS(nr) (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK)
#define PM_PSHIFT_BITS 6
#define PM_PSHIFT_OFFSET (PM_STATUS_OFFSET - PM_PSHIFT_BITS)
#define PM_PSHIFT_MASK (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET)
2024-09-09 08:57:42 +00:00
#define __PM_PSHIFT(x) (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
2024-09-09 08:52:07 +00:00
#define PM_PFRAME_MASK ((1LL << PM_PSHIFT_OFFSET) - 1)
#define PM_PFRAME(x) ((x) & PM_PFRAME_MASK)
2024-09-09 08:57:42 +00:00
/* in "new" pagemap pshift bits are occupied with more status bits */
#define PM_STATUS2(v2, x) (__PM_PSHIFT(v2 ? x : PAGE_SHIFT))
2024-09-09 08:52:07 +00:00
2024-09-09 08:57:42 +00:00
#define __PM_SOFT_DIRTY (1LL)
2024-09-09 08:52:07 +00:00
#define PM_PRESENT PM_STATUS(4LL)
#define PM_SWAP PM_STATUS(2LL)
2024-09-09 08:57:42 +00:00
#define PM_FILE PM_STATUS(1LL)
#define PM_NOT_PRESENT(v2) PM_STATUS2(v2, 0)
2024-09-09 08:52:07 +00:00
#define PM_END_OF_BUFFER 1
static inline pagemap_entry_t make_pme(u64 val)
{
return (pagemap_entry_t) { .pme = val };
}
static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
struct pagemapread *pm)
{
pm->buffer[pm->pos++] = *pme;
if (pm->pos >= pm->len)
return PM_END_OF_BUFFER;
return 0;
}
static int pagemap_pte_hole(unsigned long start, unsigned long end,
struct mm_walk *walk)
{
struct pagemapread *pm = walk->private;
2024-09-09 08:57:42 +00:00
unsigned long addr = start;
2024-09-09 08:52:07 +00:00
int err = 0;
2024-09-09 08:57:42 +00:00
while (addr < end) {
struct vm_area_struct *vma = find_vma(walk->mm, addr);
pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
/* End of address space hole, which we mark as non-present. */
unsigned long hole_end;
if (vma)
hole_end = min(end, vma->vm_start);
else
hole_end = end;
for (; addr < hole_end; addr += PAGE_SIZE) {
err = add_to_pagemap(addr, &pme, pm);
if (err)
goto out;
}
if (!vma)
2024-09-09 08:52:07 +00:00
break;
2024-09-09 08:57:42 +00:00
/* Addresses in the VMA. */
if (vma->vm_flags & VM_SOFTDIRTY)
pme.pme |= PM_STATUS2(pm->v2, __PM_SOFT_DIRTY);
for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
err = add_to_pagemap(addr, &pme, pm);
if (err)
goto out;
}
2024-09-09 08:52:07 +00:00
}
2024-09-09 08:57:42 +00:00
out:
2024-09-09 08:52:07 +00:00
return err;
}
2024-09-09 08:57:42 +00:00
static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
struct vm_area_struct *vma, unsigned long addr, pte_t pte)
2024-09-09 08:52:07 +00:00
{
2024-09-09 08:57:42 +00:00
u64 frame, flags;
struct page *page = NULL;
int flags2 = 0;
if (pte_present(pte)) {
frame = pte_pfn(pte);
flags = PM_PRESENT;
page = vm_normal_page(vma, addr, pte);
if (pte_soft_dirty(pte))
flags2 |= __PM_SOFT_DIRTY;
} else if (is_swap_pte(pte)) {
swp_entry_t entry;
if (pte_swp_soft_dirty(pte))
flags2 |= __PM_SOFT_DIRTY;
entry = pte_to_swp_entry(pte);
frame = swp_type(entry) |
(swp_offset(entry) << MAX_SWAPFILES_SHIFT);
flags = PM_SWAP;
if (is_migration_entry(entry))
page = migration_entry_to_page(entry);
} else {
if (vma->vm_flags & VM_SOFTDIRTY)
flags2 |= __PM_SOFT_DIRTY;
*pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, flags2));
return;
}
2024-09-09 08:52:07 +00:00
2024-09-09 08:57:42 +00:00
if (page && !PageAnon(page))
flags |= PM_FILE;
if ((vma->vm_flags & VM_SOFTDIRTY))
flags2 |= __PM_SOFT_DIRTY;
*pme = make_pme(PM_PFRAME(frame) | PM_STATUS2(pm->v2, flags2) | flags);
2024-09-09 08:52:07 +00:00
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2024-09-09 08:57:42 +00:00
static void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
pmd_t pmd, int offset, int pmd_flags2)
2024-09-09 08:52:07 +00:00
{
/*
* Currently pmd for thp is always present because thp can not be
* swapped-out, migrated, or HWPOISONed (split in such cases instead.)
* This if-check is just to prepare for future implementation.
*/
if (pmd_present(pmd))
*pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset)
2024-09-09 08:57:42 +00:00
| PM_STATUS2(pm->v2, pmd_flags2) | PM_PRESENT);
2024-09-09 08:52:07 +00:00
else
2024-09-09 08:57:42 +00:00
*pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, pmd_flags2));
2024-09-09 08:52:07 +00:00
}
#else
2024-09-09 08:57:42 +00:00
static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
pmd_t pmd, int offset, int pmd_flags2)
2024-09-09 08:52:07 +00:00
{
}
#endif
static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
struct vm_area_struct *vma;
struct pagemapread *pm = walk->private;
2024-09-09 08:57:42 +00:00
spinlock_t *ptl;
pte_t *pte, *orig_pte;
2024-09-09 08:52:07 +00:00
int err = 0;
/* find the first VMA at or above 'addr' */
vma = find_vma(walk->mm, addr);
2024-09-09 08:57:42 +00:00
if (vma && pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
int pmd_flags2;
if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(*pmd))
pmd_flags2 = __PM_SOFT_DIRTY;
else
pmd_flags2 = 0;
2024-09-09 08:52:07 +00:00
for (; addr != end; addr += PAGE_SIZE) {
unsigned long offset;
2024-09-09 08:57:42 +00:00
pagemap_entry_t pme;
2024-09-09 08:52:07 +00:00
offset = (addr & ~PAGEMAP_WALK_MASK) >>
PAGE_SHIFT;
2024-09-09 08:57:42 +00:00
thp_pmd_to_pagemap_entry(&pme, pm, *pmd, offset, pmd_flags2);
2024-09-09 08:52:07 +00:00
err = add_to_pagemap(addr, &pme, pm);
if (err)
break;
}
2024-09-09 08:57:42 +00:00
spin_unlock(ptl);
2024-09-09 08:52:07 +00:00
return err;
}
if (pmd_trans_unstable(pmd))
return 0;
2024-09-09 08:57:42 +00:00
while (1) {
/* End of address space hole, which we mark as non-present. */
unsigned long hole_end;
if (vma)
hole_end = min(end, vma->vm_start);
else
hole_end = end;
for (; addr < hole_end; addr += PAGE_SIZE) {
pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
err = add_to_pagemap(addr, &pme, pm);
if (err)
return err;
2024-09-09 08:52:07 +00:00
}
2024-09-09 08:57:42 +00:00
if (!vma || vma->vm_start >= end)
break;
/*
* We can't possibly be in a hugetlb VMA. In general,
* for a mm_walk with a pmd_entry and a hugetlb_entry,
* the pmd_entry can only be called on addresses in a
* hugetlb if the walk starts in a non-hugetlb VMA and
* spans a hugepage VMA. Since pagemap_read walks are
* PMD-sized and PMD-aligned, this will never be true.
*/
BUG_ON(is_vm_hugetlb_page(vma));
/* Addresses in the VMA. */
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
for (; addr < min(end, vma->vm_end); pte++, addr += PAGE_SIZE) {
pagemap_entry_t pme;
pte_to_pagemap_entry(&pme, pm, vma, addr, *pte);
err = add_to_pagemap(addr, &pme, pm);
if (err)
break;
2024-09-09 08:52:07 +00:00
}
2024-09-09 08:57:42 +00:00
pte_unmap_unlock(orig_pte, ptl);
2024-09-09 08:52:07 +00:00
if (err)
return err;
2024-09-09 08:57:42 +00:00
if (addr == end)
break;
vma = find_vma(walk->mm, addr);
2024-09-09 08:52:07 +00:00
}
cond_resched();
return err;
}
#ifdef CONFIG_HUGETLB_PAGE
2024-09-09 08:57:42 +00:00
static void huge_pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
pte_t pte, int offset, int flags2)
2024-09-09 08:52:07 +00:00
{
if (pte_present(pte))
2024-09-09 08:57:42 +00:00
*pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset) |
PM_STATUS2(pm->v2, flags2) |
PM_PRESENT);
2024-09-09 08:52:07 +00:00
else
2024-09-09 08:57:42 +00:00
*pme = make_pme(PM_NOT_PRESENT(pm->v2) |
PM_STATUS2(pm->v2, flags2));
2024-09-09 08:52:07 +00:00
}
/* This function walks within one hugetlb entry in the single call */
static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
struct pagemapread *pm = walk->private;
2024-09-09 08:57:42 +00:00
struct vm_area_struct *vma;
2024-09-09 08:52:07 +00:00
int err = 0;
2024-09-09 08:57:42 +00:00
int flags2;
2024-09-09 08:52:07 +00:00
pagemap_entry_t pme;
2024-09-09 08:57:42 +00:00
vma = find_vma(walk->mm, addr);
WARN_ON_ONCE(!vma);
if (vma && (vma->vm_flags & VM_SOFTDIRTY))
flags2 = __PM_SOFT_DIRTY;
else
flags2 = 0;
2024-09-09 08:52:07 +00:00
for (; addr != end; addr += PAGE_SIZE) {
int offset = (addr & ~hmask) >> PAGE_SHIFT;
2024-09-09 08:57:42 +00:00
huge_pte_to_pagemap_entry(&pme, pm, *pte, offset, flags2);
2024-09-09 08:52:07 +00:00
err = add_to_pagemap(addr, &pme, pm);
if (err)
return err;
}
cond_resched();
return err;
}
#endif /* HUGETLB_PAGE */
/*
* /proc/pid/pagemap - an array mapping virtual pages to pfns
*
* For each page in the address space, this file contains one 64-bit entry
* consisting of the following:
*
2024-09-09 08:57:42 +00:00
* Bits 0-54 page frame number (PFN) if present
2024-09-09 08:52:07 +00:00
* Bits 0-4 swap type if swapped
2024-09-09 08:57:42 +00:00
* Bits 5-54 swap offset if swapped
2024-09-09 08:52:07 +00:00
* Bits 55-60 page shift (page size = 1<<page shift)
2024-09-09 08:57:42 +00:00
* Bit 61 page is file-page or shared-anon
2024-09-09 08:52:07 +00:00
* Bit 62 page swapped
* Bit 63 page present
*
* If the page is not present but in swap, then the PFN contains an
* encoding of the swap file number and the page's offset into the
* swap. Unmapped pages return a null PFN. This allows determining
* precisely which pages are mapped (or in swap) and comparing mapped
* pages between processes.
*
* Efficient users of this interface will use /proc/pid/maps to
* determine which areas of memory are actually mapped and llseek to
* skip over unmapped regions.
*/
static ssize_t pagemap_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
2024-09-09 08:57:42 +00:00
struct task_struct *task = get_proc_task(file_inode(file));
2024-09-09 08:52:07 +00:00
struct mm_struct *mm;
struct pagemapread pm;
int ret = -ESRCH;
struct mm_walk pagemap_walk = {};
unsigned long src;
unsigned long svpfn;
unsigned long start_vaddr;
unsigned long end_vaddr;
int copied = 0;
if (!task)
goto out;
ret = -EINVAL;
/* file position must be aligned */
if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
goto out_task;
ret = 0;
if (!count)
goto out_task;
2024-09-09 08:57:42 +00:00
pm.v2 = soft_dirty_cleared;
pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
2024-09-09 08:52:07 +00:00
ret = -ENOMEM;
if (!pm.buffer)
goto out_task;
2024-09-09 08:57:42 +00:00
mm = mm_access(task, PTRACE_MODE_READ);
2024-09-09 08:52:07 +00:00
ret = PTR_ERR(mm);
if (!mm || IS_ERR(mm))
goto out_free;
pagemap_walk.pmd_entry = pagemap_pte_range;
pagemap_walk.pte_hole = pagemap_pte_hole;
#ifdef CONFIG_HUGETLB_PAGE
pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
#endif
pagemap_walk.mm = mm;
pagemap_walk.private = &pm;
src = *ppos;
svpfn = src / PM_ENTRY_BYTES;
start_vaddr = svpfn << PAGE_SHIFT;
end_vaddr = TASK_SIZE_OF(task);
/* watch out for wraparound */
if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT)
start_vaddr = end_vaddr;
/*
* The odds are that this will stop walking way
* before end_vaddr, because the length of the
* user buffer is tracked in "pm", and the walk
* will stop when we hit the end of the buffer.
*/
ret = 0;
while (count && (start_vaddr < end_vaddr)) {
int len;
unsigned long end;
pm.pos = 0;
end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
/* overflow ? */
if (end < start_vaddr || end > end_vaddr)
end = end_vaddr;
down_read(&mm->mmap_sem);
ret = walk_page_range(start_vaddr, end, &pagemap_walk);
up_read(&mm->mmap_sem);
start_vaddr = end;
len = min(count, PM_ENTRY_BYTES * pm.pos);
if (copy_to_user(buf, pm.buffer, len)) {
ret = -EFAULT;
goto out_mm;
}
copied += len;
buf += len;
count -= len;
}
*ppos += copied;
if (!ret || ret == PM_END_OF_BUFFER)
ret = copied;
out_mm:
mmput(mm);
out_free:
kfree(pm.buffer);
out_task:
put_task_struct(task);
out:
return ret;
}
2024-09-09 08:57:42 +00:00
static int pagemap_open(struct inode *inode, struct file *file)
{
/* do not disclose physical addresses: attack vector */
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
pr_warn_once("Bits 55-60 of /proc/PID/pagemap entries are about "
"to stop being page-shift some time soon. See the "
"linux/Documentation/vm/pagemap.txt for details.\n");
return 0;
}
2024-09-09 08:52:07 +00:00
const struct file_operations proc_pagemap_operations = {
.llseek = mem_lseek, /* borrow this */
.read = pagemap_read,
2024-09-09 08:57:42 +00:00
.open = pagemap_open,
2024-09-09 08:52:07 +00:00
};
#endif /* CONFIG_PROC_PAGE_MONITOR */
2024-09-09 08:57:42 +00:00
#ifdef CONFIG_PROCESS_RECLAIM
static int reclaim_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long end, struct mm_walk *walk)
{
struct reclaim_param *rp = walk->private;
struct vm_area_struct *vma = rp->vma;
pte_t *pte, ptent;
spinlock_t *ptl;
struct page *page;
LIST_HEAD(page_list);
int isolated;
int reclaimed;
split_huge_page_pmd(vma, addr, pmd);
if (pmd_trans_unstable(pmd) || !rp->nr_to_reclaim)
return 0;
cont:
isolated = 0;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE) {
ptent = *pte;
if (!pte_present(ptent))
continue;
page = vm_normal_page(vma, addr, ptent);
if (!page)
continue;
if (isolate_lru_page(page))
continue;
list_add(&page->lru, &page_list);
inc_zone_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page));
isolated++;
rp->nr_scanned++;
if ((isolated >= SWAP_CLUSTER_MAX) || !rp->nr_to_reclaim)
break;
}
pte_unmap_unlock(pte - 1, ptl);
reclaimed = reclaim_pages_from_list(&page_list, vma);
rp->nr_reclaimed += reclaimed;
rp->nr_to_reclaim -= reclaimed;
if (rp->nr_to_reclaim < 0)
rp->nr_to_reclaim = 0;
if (rp->nr_to_reclaim && (addr != end))
goto cont;
cond_resched();
return 0;
}
enum reclaim_type {
RECLAIM_FILE,
RECLAIM_ANON,
RECLAIM_ALL,
RECLAIM_RANGE,
};
struct reclaim_param reclaim_task_anon(struct task_struct *task,
int nr_to_reclaim)
{
struct mm_struct *mm;
struct vm_area_struct *vma;
struct mm_walk reclaim_walk = {};
struct reclaim_param rp;
rp.nr_reclaimed = 0;
rp.nr_scanned = 0;
get_task_struct(task);
mm = get_task_mm(task);
if (!mm)
goto out;
reclaim_walk.mm = mm;
reclaim_walk.pmd_entry = reclaim_pte_range;
rp.nr_to_reclaim = nr_to_reclaim;
reclaim_walk.private = &rp;
down_read(&mm->mmap_sem);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (is_vm_hugetlb_page(vma))
continue;
if (vma->vm_file)
continue;
if (!rp.nr_to_reclaim)
break;
rp.vma = vma;
walk_page_range(vma->vm_start, vma->vm_end,
&reclaim_walk);
}
flush_tlb_mm(mm);
up_read(&mm->mmap_sem);
mmput(mm);
out:
put_task_struct(task);
return rp;
}
static ssize_t reclaim_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct task_struct *task;
char buffer[200];
struct mm_struct *mm;
struct vm_area_struct *vma;
enum reclaim_type type;
char *type_buf;
struct mm_walk reclaim_walk = {};
unsigned long start = 0;
unsigned long end = 0;
struct reclaim_param rp;
memset(buffer, 0, sizeof(buffer));
if (count > sizeof(buffer) - 1)
count = sizeof(buffer) - 1;
if (copy_from_user(buffer, buf, count))
return -EFAULT;
type_buf = strstrip(buffer);
if (!strcmp(type_buf, "file"))
type = RECLAIM_FILE;
else if (!strcmp(type_buf, "anon"))
type = RECLAIM_ANON;
else if (!strcmp(type_buf, "all"))
type = RECLAIM_ALL;
else if (isdigit(*type_buf))
type = RECLAIM_RANGE;
else
goto out_err;
if (type == RECLAIM_RANGE) {
char *token;
unsigned long long len, len_in, tmp;
token = strsep(&type_buf, " ");
if (!token)
goto out_err;
tmp = memparse(token, &token);
if (tmp & ~PAGE_MASK || tmp > ULONG_MAX)
goto out_err;
start = tmp;
token = strsep(&type_buf, " ");
if (!token)
goto out_err;
len_in = memparse(token, &token);
len = (len_in + ~PAGE_MASK) & PAGE_MASK;
if (len > ULONG_MAX)
goto out_err;
/*
* Check to see whether len was rounded up from small -ve
* to zero.
*/
if (len_in && !len)
goto out_err;
end = start + len;
if (end < start)
goto out_err;
}
task = get_proc_task(file->f_path.dentry->d_inode);
if (!task)
return -ESRCH;
mm = get_task_mm(task);
if (!mm)
goto out;
reclaim_walk.mm = mm;
reclaim_walk.pmd_entry = reclaim_pte_range;
rp.nr_to_reclaim = ~0;
rp.nr_reclaimed = 0;
reclaim_walk.private = &rp;
down_read(&mm->mmap_sem);
if (type == RECLAIM_RANGE) {
vma = find_vma(mm, start);
while (vma) {
if (vma->vm_start > end)
break;
if (is_vm_hugetlb_page(vma))
continue;
rp.vma = vma;
walk_page_range(max(vma->vm_start, start),
min(vma->vm_end, end),
&reclaim_walk);
vma = vma->vm_next;
}
} else {
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (is_vm_hugetlb_page(vma))
continue;
if (type == RECLAIM_ANON && vma->vm_file)
continue;
if (type == RECLAIM_FILE && !vma->vm_file)
continue;
rp.vma = vma;
walk_page_range(vma->vm_start, vma->vm_end,
&reclaim_walk);
}
}
flush_tlb_mm(mm);
up_read(&mm->mmap_sem);
mmput(mm);
out:
put_task_struct(task);
return count;
out_err:
return -EINVAL;
}
const struct file_operations proc_reclaim_operations = {
.write = reclaim_write,
.llseek = noop_llseek,
};
#endif
2024-09-09 08:52:07 +00:00
#ifdef CONFIG_NUMA
struct numa_maps {
struct vm_area_struct *vma;
unsigned long pages;
unsigned long anon;
unsigned long active;
unsigned long writeback;
unsigned long mapcount_max;
unsigned long dirty;
unsigned long swapcache;
unsigned long node[MAX_NUMNODES];
};
struct numa_maps_private {
struct proc_maps_private proc_maps;
struct numa_maps md;
};
static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
unsigned long nr_pages)
{
int count = page_mapcount(page);
md->pages += nr_pages;
if (pte_dirty || PageDirty(page))
md->dirty += nr_pages;
if (PageSwapCache(page))
md->swapcache += nr_pages;
if (PageActive(page) || PageUnevictable(page))
md->active += nr_pages;
if (PageWriteback(page))
md->writeback += nr_pages;
if (PageAnon(page))
md->anon += nr_pages;
if (count > md->mapcount_max)
md->mapcount_max = count;
md->node[page_to_nid(page)] += nr_pages;
}
static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
unsigned long addr)
{
struct page *page;
int nid;
if (!pte_present(pte))
return NULL;
page = vm_normal_page(vma, addr, pte);
if (!page)
return NULL;
if (PageReserved(page))
return NULL;
nid = page_to_nid(page);
2024-09-09 08:57:42 +00:00
if (!node_isset(nid, node_states[N_MEMORY]))
2024-09-09 08:52:07 +00:00
return NULL;
return page;
}
static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
unsigned long end, struct mm_walk *walk)
{
struct numa_maps *md;
spinlock_t *ptl;
pte_t *orig_pte;
pte_t *pte;
md = walk->private;
2024-09-09 08:57:42 +00:00
if (pmd_trans_huge_lock(pmd, md->vma, &ptl) == 1) {
2024-09-09 08:52:07 +00:00
pte_t huge_pte = *(pte_t *)pmd;
struct page *page;
page = can_gather_numa_stats(huge_pte, md->vma, addr);
if (page)
gather_stats(page, md, pte_dirty(huge_pte),
HPAGE_PMD_SIZE/PAGE_SIZE);
2024-09-09 08:57:42 +00:00
spin_unlock(ptl);
2024-09-09 08:52:07 +00:00
return 0;
}
if (pmd_trans_unstable(pmd))
return 0;
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
do {
struct page *page = can_gather_numa_stats(*pte, md->vma, addr);
if (!page)
continue;
gather_stats(page, md, pte_dirty(*pte), 1);
} while (pte++, addr += PAGE_SIZE, addr != end);
pte_unmap_unlock(orig_pte, ptl);
return 0;
}
#ifdef CONFIG_HUGETLB_PAGE
static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
unsigned long addr, unsigned long end, struct mm_walk *walk)
{
struct numa_maps *md;
struct page *page;
2024-09-09 08:57:42 +00:00
if (!pte_present(*pte))
2024-09-09 08:52:07 +00:00
return 0;
page = pte_page(*pte);
if (!page)
return 0;
md = walk->private;
gather_stats(page, md, pte_dirty(*pte), 1);
return 0;
}
#else
static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
unsigned long addr, unsigned long end, struct mm_walk *walk)
{
return 0;
}
#endif
/*
* Display pages allocated per node and memory policy via /proc.
*/
static int show_numa_map(struct seq_file *m, void *v, int is_pid)
{
struct numa_maps_private *numa_priv = m->private;
struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
struct vm_area_struct *vma = v;
struct numa_maps *md = &numa_priv->md;
struct file *file = vma->vm_file;
struct mm_struct *mm = vma->vm_mm;
struct mm_walk walk = {};
struct mempolicy *pol;
2024-09-09 08:57:42 +00:00
char buffer[64];
int nid;
2024-09-09 08:52:07 +00:00
if (!mm)
return 0;
/* Ensure we start with an empty set of numa_maps statistics. */
memset(md, 0, sizeof(*md));
md->vma = vma;
walk.hugetlb_entry = gather_hugetbl_stats;
walk.pmd_entry = gather_pte_stats;
walk.private = md;
walk.mm = mm;
2024-09-09 08:57:42 +00:00
pol = __get_vma_policy(vma, vma->vm_start);
if (pol) {
mpol_to_str(buffer, sizeof(buffer), pol);
mpol_cond_put(pol);
} else {
mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
}
2024-09-09 08:52:07 +00:00
seq_printf(m, "%08lx %s", vma->vm_start, buffer);
if (file) {
2024-09-09 08:57:42 +00:00
seq_puts(m, " file=");
2024-09-09 08:52:07 +00:00
seq_path(m, &file->f_path, "\n\t= ");
} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
2024-09-09 08:57:42 +00:00
seq_puts(m, " heap");
2024-09-09 08:52:07 +00:00
} else {
2024-09-09 08:57:42 +00:00
pid_t tid = pid_of_stack(proc_priv, vma, is_pid);
2024-09-09 08:52:07 +00:00
if (tid != 0) {
/*
* Thread stack in /proc/PID/task/TID/maps or
* the main process stack.
*/
if (!is_pid || (vma->vm_start <= mm->start_stack &&
vma->vm_end >= mm->start_stack))
2024-09-09 08:57:42 +00:00
seq_puts(m, " stack");
2024-09-09 08:52:07 +00:00
else
seq_printf(m, " stack:%d", tid);
}
}
if (is_vm_hugetlb_page(vma))
2024-09-09 08:57:42 +00:00
seq_puts(m, " huge");
2024-09-09 08:52:07 +00:00
walk_page_range(vma->vm_start, vma->vm_end, &walk);
if (!md->pages)
goto out;
if (md->anon)
seq_printf(m, " anon=%lu", md->anon);
if (md->dirty)
seq_printf(m, " dirty=%lu", md->dirty);
if (md->pages != md->anon && md->pages != md->dirty)
seq_printf(m, " mapped=%lu", md->pages);
if (md->mapcount_max > 1)
seq_printf(m, " mapmax=%lu", md->mapcount_max);
if (md->swapcache)
seq_printf(m, " swapcache=%lu", md->swapcache);
if (md->active < md->pages && !is_vm_hugetlb_page(vma))
seq_printf(m, " active=%lu", md->active);
if (md->writeback)
seq_printf(m, " writeback=%lu", md->writeback);
2024-09-09 08:57:42 +00:00
for_each_node_state(nid, N_MEMORY)
if (md->node[nid])
seq_printf(m, " N%d=%lu", nid, md->node[nid]);
2024-09-09 08:52:07 +00:00
out:
seq_putc(m, '\n');
2024-09-09 08:57:42 +00:00
m_cache_vma(m, vma);
2024-09-09 08:52:07 +00:00
return 0;
}
static int show_pid_numa_map(struct seq_file *m, void *v)
{
return show_numa_map(m, v, 1);
}
static int show_tid_numa_map(struct seq_file *m, void *v)
{
return show_numa_map(m, v, 0);
}
static const struct seq_operations proc_pid_numa_maps_op = {
.start = m_start,
.next = m_next,
.stop = m_stop,
.show = show_pid_numa_map,
};
static const struct seq_operations proc_tid_numa_maps_op = {
.start = m_start,
.next = m_next,
.stop = m_stop,
.show = show_tid_numa_map,
};
static int numa_maps_open(struct inode *inode, struct file *file,
const struct seq_operations *ops)
{
2024-09-09 08:57:42 +00:00
return proc_maps_open(inode, file, ops,
sizeof(struct numa_maps_private));
2024-09-09 08:52:07 +00:00
}
static int pid_numa_maps_open(struct inode *inode, struct file *file)
{
return numa_maps_open(inode, file, &proc_pid_numa_maps_op);
}
static int tid_numa_maps_open(struct inode *inode, struct file *file)
{
return numa_maps_open(inode, file, &proc_tid_numa_maps_op);
}
const struct file_operations proc_pid_numa_maps_operations = {
.open = pid_numa_maps_open,
.read = seq_read,
.llseek = seq_lseek,
2024-09-09 08:57:42 +00:00
.release = proc_map_release,
2024-09-09 08:52:07 +00:00
};
const struct file_operations proc_tid_numa_maps_operations = {
.open = tid_numa_maps_open,
.read = seq_read,
.llseek = seq_lseek,
2024-09-09 08:57:42 +00:00
.release = proc_map_release,
2024-09-09 08:52:07 +00:00
};
#endif /* CONFIG_NUMA */