M7350/kernel/include/linux/mempolicy.h

296 lines
7.1 KiB
C
Raw Normal View History

2024-09-09 08:52:07 +00:00
/*
* NUMA memory policies for Linux.
* Copyright 2003,2004 Andi Kleen SuSE Labs
*/
2024-09-09 08:57:42 +00:00
#ifndef _LINUX_MEMPOLICY_H
#define _LINUX_MEMPOLICY_H 1
2024-09-09 08:52:07 +00:00
#include <linux/mmzone.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
#include <linux/spinlock.h>
#include <linux/nodemask.h>
#include <linux/pagemap.h>
2024-09-09 08:57:42 +00:00
#include <uapi/linux/mempolicy.h>
2024-09-09 08:52:07 +00:00
struct mm_struct;
#ifdef CONFIG_NUMA
/*
* Describe a memory policy.
*
* A mempolicy can be either associated with a process or with a VMA.
* For VMA related allocations the VMA policy is preferred, otherwise
* the process policy is used. Interrupts ignore the memory policy
* of the current process.
*
* Locking policy for interlave:
* In process context there is no locking because only the process accesses
* its own state. All vma manipulation is somewhat protected by a down_read on
* mmap_sem.
*
* Freeing policy:
* Mempolicy objects are reference counted. A mempolicy will be freed when
* mpol_put() decrements the reference count to zero.
*
* Duplicating policy objects:
* mpol_dup() allocates a new mempolicy and copies the specified mempolicy
* to the new storage. The reference count of the new object is initialized
* to 1, representing the caller of mpol_dup().
*/
struct mempolicy {
atomic_t refcnt;
unsigned short mode; /* See MPOL_* above */
unsigned short flags; /* See set_mempolicy() MPOL_F_* above */
union {
short preferred_node; /* preferred */
nodemask_t nodes; /* interleave/bind */
/* undefined for default */
} v;
union {
nodemask_t cpuset_mems_allowed; /* relative to these nodes */
nodemask_t user_nodemask; /* nodemask passed by user */
} w;
};
/*
* Support for managing mempolicy data objects (clone, copy, destroy)
* The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
*/
extern void __mpol_put(struct mempolicy *pol);
static inline void mpol_put(struct mempolicy *pol)
{
if (pol)
__mpol_put(pol);
}
/*
* Does mempolicy pol need explicit unref after use?
* Currently only needed for shared policies.
*/
static inline int mpol_needs_cond_ref(struct mempolicy *pol)
{
return (pol && (pol->flags & MPOL_F_SHARED));
}
static inline void mpol_cond_put(struct mempolicy *pol)
{
if (mpol_needs_cond_ref(pol))
__mpol_put(pol);
}
extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
{
if (pol)
pol = __mpol_dup(pol);
return pol;
}
#define vma_policy(vma) ((vma)->vm_policy)
static inline void mpol_get(struct mempolicy *pol)
{
if (pol)
atomic_inc(&pol->refcnt);
}
extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b);
static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
{
if (a == b)
return true;
return __mpol_equal(a, b);
}
/*
* Tree of shared policies for a shared memory region.
* Maintain the policies in a pseudo mm that contains vmas. The vmas
* carry the policy. As a special twist the pseudo mm is indexed in pages, not
* bytes, so that we can work with shared memory segments bigger than
* unsigned long.
*/
struct sp_node {
struct rb_node nd;
unsigned long start, end;
struct mempolicy *policy;
};
struct shared_policy {
struct rb_root root;
spinlock_t lock;
};
2024-09-09 08:57:42 +00:00
int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst);
2024-09-09 08:52:07 +00:00
void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
int mpol_set_shared_policy(struct shared_policy *info,
struct vm_area_struct *vma,
struct mempolicy *new);
void mpol_free_shared_policy(struct shared_policy *p);
struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
unsigned long idx);
2024-09-09 08:57:42 +00:00
struct mempolicy *get_task_policy(struct task_struct *p);
struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
unsigned long addr);
bool vma_policy_mof(struct vm_area_struct *vma);
2024-09-09 08:52:07 +00:00
extern void numa_default_policy(void);
extern void numa_policy_init(void);
extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
enum mpol_rebind_step step);
extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
unsigned long addr, gfp_t gfp_flags,
struct mempolicy **mpol, nodemask_t **nodemask);
extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
const nodemask_t *mask);
2024-09-09 08:57:42 +00:00
extern unsigned int mempolicy_slab_node(void);
2024-09-09 08:52:07 +00:00
extern enum zone_type policy_zone;
static inline void check_highest_zone(enum zone_type k)
{
if (k > policy_zone && k != ZONE_MOVABLE)
policy_zone = k;
}
2024-09-09 08:57:42 +00:00
int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
const nodemask_t *to, int flags);
2024-09-09 08:52:07 +00:00
#ifdef CONFIG_TMPFS
2024-09-09 08:57:42 +00:00
extern int mpol_parse_str(char *str, struct mempolicy **mpol);
2024-09-09 08:52:07 +00:00
#endif
2024-09-09 08:57:42 +00:00
extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
2024-09-09 08:52:07 +00:00
/* Check if a vma is migratable */
static inline int vma_migratable(struct vm_area_struct *vma)
{
2024-09-09 08:57:42 +00:00
if (vma->vm_flags & (VM_IO | VM_PFNMAP))
return 0;
#ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
if (vma->vm_flags & VM_HUGETLB)
2024-09-09 08:52:07 +00:00
return 0;
2024-09-09 08:57:42 +00:00
#endif
2024-09-09 08:52:07 +00:00
/*
* Migration allocates pages in the highest zone. If we cannot
* do so then migration (at least from node to node) is not
* possible.
*/
if (vma->vm_file &&
gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
< policy_zone)
return 0;
return 1;
}
2024-09-09 08:57:42 +00:00
extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
2024-09-09 08:52:07 +00:00
#else
struct mempolicy {};
static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
{
return true;
}
static inline void mpol_put(struct mempolicy *p)
{
}
static inline void mpol_cond_put(struct mempolicy *pol)
{
}
static inline void mpol_get(struct mempolicy *pol)
{
}
struct shared_policy {};
static inline void mpol_shared_policy_init(struct shared_policy *sp,
struct mempolicy *mpol)
{
}
static inline void mpol_free_shared_policy(struct shared_policy *p)
{
}
2024-09-09 08:57:42 +00:00
#define vma_policy(vma) NULL
static inline int
vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2024-09-09 08:52:07 +00:00
{
2024-09-09 08:57:42 +00:00
return 0;
2024-09-09 08:52:07 +00:00
}
static inline void numa_policy_init(void)
{
}
static inline void numa_default_policy(void)
{
}
static inline void mpol_rebind_task(struct task_struct *tsk,
const nodemask_t *new,
enum mpol_rebind_step step)
{
}
static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
{
}
static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
unsigned long addr, gfp_t gfp_flags,
struct mempolicy **mpol, nodemask_t **nodemask)
{
*mpol = NULL;
*nodemask = NULL;
return node_zonelist(0, gfp_flags);
}
static inline bool init_nodemask_of_mempolicy(nodemask_t *m)
{
return false;
}
2024-09-09 08:57:42 +00:00
static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
const nodemask_t *to, int flags)
2024-09-09 08:52:07 +00:00
{
return 0;
}
static inline void check_highest_zone(int k)
{
}
#ifdef CONFIG_TMPFS
2024-09-09 08:57:42 +00:00
static inline int mpol_parse_str(char *str, struct mempolicy **mpol)
2024-09-09 08:52:07 +00:00
{
return 1; /* error */
}
#endif
2024-09-09 08:57:42 +00:00
static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
unsigned long address)
2024-09-09 08:52:07 +00:00
{
2024-09-09 08:57:42 +00:00
return -1; /* no node preference */
2024-09-09 08:52:07 +00:00
}
#endif /* CONFIG_NUMA */
#endif