2024-09-09 08:52:07 +00:00
|
|
|
#ifndef _SPARC64_TLBFLUSH_H
|
|
|
|
#define _SPARC64_TLBFLUSH_H
|
|
|
|
|
|
|
|
#include <asm/mmu_context.h>
|
|
|
|
|
|
|
|
/* TSB flush operations. */
|
|
|
|
|
|
|
|
#define TLB_BATCH_NR 192
|
|
|
|
|
|
|
|
struct tlb_batch {
|
|
|
|
struct mm_struct *mm;
|
|
|
|
unsigned long tlb_nr;
|
2024-09-09 08:57:42 +00:00
|
|
|
unsigned long active;
|
2024-09-09 08:52:07 +00:00
|
|
|
unsigned long vaddrs[TLB_BATCH_NR];
|
|
|
|
};
|
|
|
|
|
2024-09-09 08:57:42 +00:00
|
|
|
void flush_tsb_kernel_range(unsigned long start, unsigned long end);
|
|
|
|
void flush_tsb_user(struct tlb_batch *tb);
|
|
|
|
void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr);
|
2024-09-09 08:52:07 +00:00
|
|
|
|
|
|
|
/* TLB flush operations. */
|
|
|
|
|
2024-09-09 08:57:42 +00:00
|
|
|
static inline void flush_tlb_mm(struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
}
|
2024-09-09 08:52:07 +00:00
|
|
|
|
2024-09-09 08:57:42 +00:00
|
|
|
static inline void flush_tlb_page(struct vm_area_struct *vma,
|
|
|
|
unsigned long vmaddr)
|
|
|
|
{
|
|
|
|
}
|
2024-09-09 08:52:07 +00:00
|
|
|
|
2024-09-09 08:57:42 +00:00
|
|
|
static inline void flush_tlb_range(struct vm_area_struct *vma,
|
|
|
|
unsigned long start, unsigned long end)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
void flush_tlb_kernel_range(unsigned long start, unsigned long end);
|
|
|
|
|
|
|
|
#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
|
2024-09-09 08:52:07 +00:00
|
|
|
|
2024-09-09 08:57:42 +00:00
|
|
|
void flush_tlb_pending(void);
|
|
|
|
void arch_enter_lazy_mmu_mode(void);
|
|
|
|
void arch_leave_lazy_mmu_mode(void);
|
|
|
|
#define arch_flush_lazy_mmu_mode() do {} while (0)
|
|
|
|
|
|
|
|
/* Local cpu only. */
|
|
|
|
void __flush_tlb_all(void);
|
|
|
|
void __flush_tlb_page(unsigned long context, unsigned long vaddr);
|
|
|
|
void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
|
2024-09-09 08:52:07 +00:00
|
|
|
|
|
|
|
#ifndef CONFIG_SMP
|
|
|
|
|
2024-09-09 08:57:42 +00:00
|
|
|
static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
|
|
|
|
{
|
|
|
|
__flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
|
|
|
|
}
|
2024-09-09 08:52:07 +00:00
|
|
|
|
|
|
|
#else /* CONFIG_SMP */
|
|
|
|
|
2024-09-09 08:57:42 +00:00
|
|
|
void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
|
|
|
|
void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
|
2024-09-09 08:52:07 +00:00
|
|
|
|
2024-09-09 08:57:42 +00:00
|
|
|
#define global_flush_tlb_page(mm, vaddr) \
|
|
|
|
smp_flush_tlb_page(mm, vaddr)
|
2024-09-09 08:52:07 +00:00
|
|
|
|
|
|
|
#endif /* ! CONFIG_SMP */
|
|
|
|
|
|
|
|
#endif /* _SPARC64_TLBFLUSH_H */
|