M7350v1_en_gpl

This commit is contained in:
T
2024-09-09 08:52:07 +00:00
commit f9cc65cfda
65988 changed files with 26357421 additions and 0 deletions

View File

@ -0,0 +1,50 @@
comment "Processor Type"
# Select CPU types depending on the architecture selected. This selects
# which CPUs we support in the kernel image, and the compiler instruction
# optimiser behaviour.
config CPU_UCV2
def_bool y
comment "Processor Features"
config CPU_ICACHE_DISABLE
bool "Disable I-Cache (I-bit)"
help
Say Y here to disable the processor instruction cache. Unless
you have a reason not to or are unsure, say N.
config CPU_DCACHE_DISABLE
bool "Disable D-Cache (D-bit)"
help
Say Y here to disable the processor data cache. Unless
you have a reason not to or are unsure, say N.
config CPU_DCACHE_WRITETHROUGH
bool "Force write through D-cache"
help
Say Y here to use the data cache in writethrough mode. Unless you
specifically require this or are unsure, say N.
config CPU_DCACHE_LINE_DISABLE
bool "Disable D-cache line ops"
default y
help
Say Y here to disable the data cache line operations.
config CPU_TLB_SINGLE_ENTRY_DISABLE
bool "Disable TLB single entry ops"
default y
help
Say Y here to disable the TLB single entry operations.
config SWIOTLB
def_bool y
config IOMMU_HELPER
def_bool SWIOTLB
config NEED_SG_DMA_LENGTH
def_bool SWIOTLB

View File

@ -0,0 +1,15 @@
#
# Makefile for the linux unicore-specific parts of the memory manager.
#
obj-y := extable.o fault.o init.o pgd.o mmu.o
obj-y += flush.o ioremap.o
obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o
obj-$(CONFIG_MODULES) += proc-syms.o
obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o
obj-$(CONFIG_CPU_UCV2) += cache-ucv2.o tlb-ucv2.o proc-ucv2.o

View File

@ -0,0 +1,525 @@
/*
* linux/arch/unicore32/mm/alignment.c
*
* Code specific to PKUnity SoC and UniCore ISA
*
* Copyright (C) 2001-2010 GUAN Xue-tao
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/*
* TODO:
* FPU ldm/stm not handling
*/
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
#include <asm/tlbflush.h>
#include <asm/unaligned.h>
#include "mm.h"
#define CODING_BITS(i) (i & 0xe0000120)
#define LDST_P_BIT(i) (i & (1 << 28)) /* Preindex */
#define LDST_U_BIT(i) (i & (1 << 27)) /* Add offset */
#define LDST_W_BIT(i) (i & (1 << 25)) /* Writeback */
#define LDST_L_BIT(i) (i & (1 << 24)) /* Load */
#define LDST_P_EQ_U(i) ((((i) ^ ((i) >> 1)) & (1 << 27)) == 0)
#define LDSTH_I_BIT(i) (i & (1 << 26)) /* half-word immed */
#define LDM_S_BIT(i) (i & (1 << 26)) /* write ASR from BSR */
#define LDM_H_BIT(i) (i & (1 << 6)) /* select r0-r15 or r16-r31 */
#define RN_BITS(i) ((i >> 19) & 31) /* Rn */
#define RD_BITS(i) ((i >> 14) & 31) /* Rd */
#define RM_BITS(i) (i & 31) /* Rm */
#define REGMASK_BITS(i) (((i & 0x7fe00) >> 3) | (i & 0x3f))
#define OFFSET_BITS(i) (i & 0x03fff)
#define SHIFT_BITS(i) ((i >> 9) & 0x1f)
#define SHIFT_TYPE(i) (i & 0xc0)
#define SHIFT_LSL 0x00
#define SHIFT_LSR 0x40
#define SHIFT_ASR 0x80
#define SHIFT_RORRRX 0xc0
union offset_union {
unsigned long un;
signed long sn;
};
#define TYPE_ERROR 0
#define TYPE_FAULT 1
#define TYPE_LDST 2
#define TYPE_DONE 3
#define TYPE_SWAP 4
#define TYPE_COLS 5 /* Coprocessor load/store */
#define get8_unaligned_check(val, addr, err) \
__asm__( \
"1: ldb.u %1, [%2], #1\n" \
"2:\n" \
" .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %0, #1\n" \
" b 2b\n" \
" .popsection\n" \
" .pushsection __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 3b\n" \
" .popsection\n" \
: "=r" (err), "=&r" (val), "=r" (addr) \
: "0" (err), "2" (addr))
#define get8t_unaligned_check(val, addr, err) \
__asm__( \
"1: ldb.u %1, [%2], #1\n" \
"2:\n" \
" .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %0, #1\n" \
" b 2b\n" \
" .popsection\n" \
" .pushsection __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 3b\n" \
" .popsection\n" \
: "=r" (err), "=&r" (val), "=r" (addr) \
: "0" (err), "2" (addr))
#define get16_unaligned_check(val, addr) \
do { \
unsigned int err = 0, v, a = addr; \
get8_unaligned_check(val, a, err); \
get8_unaligned_check(v, a, err); \
val |= v << 8; \
if (err) \
goto fault; \
} while (0)
#define put16_unaligned_check(val, addr) \
do { \
unsigned int err = 0, v = val, a = addr; \
__asm__( \
"1: stb.u %1, [%2], #1\n" \
" mov %1, %1 >> #8\n" \
"2: stb.u %1, [%2]\n" \
"3:\n" \
" .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
"4: mov %0, #1\n" \
" b 3b\n" \
" .popsection\n" \
" .pushsection __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 4b\n" \
" .long 2b, 4b\n" \
" .popsection\n" \
: "=r" (err), "=&r" (v), "=&r" (a) \
: "0" (err), "1" (v), "2" (a)); \
if (err) \
goto fault; \
} while (0)
#define __put32_unaligned_check(ins, val, addr) \
do { \
unsigned int err = 0, v = val, a = addr; \
__asm__( \
"1: "ins" %1, [%2], #1\n" \
" mov %1, %1 >> #8\n" \
"2: "ins" %1, [%2], #1\n" \
" mov %1, %1 >> #8\n" \
"3: "ins" %1, [%2], #1\n" \
" mov %1, %1 >> #8\n" \
"4: "ins" %1, [%2]\n" \
"5:\n" \
" .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
"6: mov %0, #1\n" \
" b 5b\n" \
" .popsection\n" \
" .pushsection __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 6b\n" \
" .long 2b, 6b\n" \
" .long 3b, 6b\n" \
" .long 4b, 6b\n" \
" .popsection\n" \
: "=r" (err), "=&r" (v), "=&r" (a) \
: "0" (err), "1" (v), "2" (a)); \
if (err) \
goto fault; \
} while (0)
#define get32_unaligned_check(val, addr) \
do { \
unsigned int err = 0, v, a = addr; \
get8_unaligned_check(val, a, err); \
get8_unaligned_check(v, a, err); \
val |= v << 8; \
get8_unaligned_check(v, a, err); \
val |= v << 16; \
get8_unaligned_check(v, a, err); \
val |= v << 24; \
if (err) \
goto fault; \
} while (0)
#define put32_unaligned_check(val, addr) \
__put32_unaligned_check("stb.u", val, addr)
#define get32t_unaligned_check(val, addr) \
do { \
unsigned int err = 0, v, a = addr; \
get8t_unaligned_check(val, a, err); \
get8t_unaligned_check(v, a, err); \
val |= v << 8; \
get8t_unaligned_check(v, a, err); \
val |= v << 16; \
get8t_unaligned_check(v, a, err); \
val |= v << 24; \
if (err) \
goto fault; \
} while (0)
#define put32t_unaligned_check(val, addr) \
__put32_unaligned_check("stb.u", val, addr)
static void
do_alignment_finish_ldst(unsigned long addr, unsigned long instr,
struct pt_regs *regs, union offset_union offset)
{
if (!LDST_U_BIT(instr))
offset.un = -offset.un;
if (!LDST_P_BIT(instr))
addr += offset.un;
if (!LDST_P_BIT(instr) || LDST_W_BIT(instr))
regs->uregs[RN_BITS(instr)] = addr;
}
static int
do_alignment_ldrhstrh(unsigned long addr, unsigned long instr,
struct pt_regs *regs)
{
unsigned int rd = RD_BITS(instr);
/* old value 0x40002120, can't judge swap instr correctly */
if ((instr & 0x4b003fe0) == 0x40000120)
goto swp;
if (LDST_L_BIT(instr)) {
unsigned long val;
get16_unaligned_check(val, addr);
/* signed half-word? */
if (instr & 0x80)
val = (signed long)((signed short)val);
regs->uregs[rd] = val;
} else
put16_unaligned_check(regs->uregs[rd], addr);
return TYPE_LDST;
swp:
/* only handle swap word
* for swap byte should not active this alignment exception */
get32_unaligned_check(regs->uregs[RD_BITS(instr)], addr);
put32_unaligned_check(regs->uregs[RM_BITS(instr)], addr);
return TYPE_SWAP;
fault:
return TYPE_FAULT;
}
static int
do_alignment_ldrstr(unsigned long addr, unsigned long instr,
struct pt_regs *regs)
{
unsigned int rd = RD_BITS(instr);
if (!LDST_P_BIT(instr) && LDST_W_BIT(instr))
goto trans;
if (LDST_L_BIT(instr))
get32_unaligned_check(regs->uregs[rd], addr);
else
put32_unaligned_check(regs->uregs[rd], addr);
return TYPE_LDST;
trans:
if (LDST_L_BIT(instr))
get32t_unaligned_check(regs->uregs[rd], addr);
else
put32t_unaligned_check(regs->uregs[rd], addr);
return TYPE_LDST;
fault:
return TYPE_FAULT;
}
/*
* LDM/STM alignment handler.
*
* There are 4 variants of this instruction:
*
* B = rn pointer before instruction, A = rn pointer after instruction
* ------ increasing address ----->
* | | r0 | r1 | ... | rx | |
* PU = 01 B A
* PU = 11 B A
* PU = 00 A B
* PU = 10 A B
*/
static int
do_alignment_ldmstm(unsigned long addr, unsigned long instr,
struct pt_regs *regs)
{
unsigned int rd, rn, pc_correction, reg_correction, nr_regs, regbits;
unsigned long eaddr, newaddr;
if (LDM_S_BIT(instr))
goto bad;
pc_correction = 4; /* processor implementation defined */
/* count the number of registers in the mask to be transferred */
nr_regs = hweight16(REGMASK_BITS(instr)) * 4;
rn = RN_BITS(instr);
newaddr = eaddr = regs->uregs[rn];
if (!LDST_U_BIT(instr))
nr_regs = -nr_regs;
newaddr += nr_regs;
if (!LDST_U_BIT(instr))
eaddr = newaddr;
if (LDST_P_EQ_U(instr)) /* U = P */
eaddr += 4;
/*
* This is a "hint" - we already have eaddr worked out by the
* processor for us.
*/
if (addr != eaddr) {
printk(KERN_ERR "LDMSTM: PC = %08lx, instr = %08lx, "
"addr = %08lx, eaddr = %08lx\n",
instruction_pointer(regs), instr, addr, eaddr);
show_regs(regs);
}
if (LDM_H_BIT(instr))
reg_correction = 0x10;
else
reg_correction = 0x00;
for (regbits = REGMASK_BITS(instr), rd = 0; regbits;
regbits >>= 1, rd += 1)
if (regbits & 1) {
if (LDST_L_BIT(instr))
get32_unaligned_check(regs->
uregs[rd + reg_correction], eaddr);
else
put32_unaligned_check(regs->
uregs[rd + reg_correction], eaddr);
eaddr += 4;
}
if (LDST_W_BIT(instr))
regs->uregs[rn] = newaddr;
return TYPE_DONE;
fault:
regs->UCreg_pc -= pc_correction;
return TYPE_FAULT;
bad:
printk(KERN_ERR "Alignment trap: not handling ldm with s-bit set\n");
return TYPE_ERROR;
}
static int
do_alignment(unsigned long addr, unsigned int error_code, struct pt_regs *regs)
{
union offset_union offset;
unsigned long instr, instrptr;
int (*handler) (unsigned long addr, unsigned long instr,
struct pt_regs *regs);
unsigned int type;
instrptr = instruction_pointer(regs);
if (instrptr >= PAGE_OFFSET)
instr = *(unsigned long *)instrptr;
else {
__asm__ __volatile__(
"ldw.u %0, [%1]\n"
: "=&r"(instr)
: "r"(instrptr));
}
regs->UCreg_pc += 4;
switch (CODING_BITS(instr)) {
case 0x40000120: /* ldrh or strh */
if (LDSTH_I_BIT(instr))
offset.un = (instr & 0x3e00) >> 4 | (instr & 31);
else
offset.un = regs->uregs[RM_BITS(instr)];
handler = do_alignment_ldrhstrh;
break;
case 0x60000000: /* ldr or str immediate */
case 0x60000100: /* ldr or str immediate */
case 0x60000020: /* ldr or str immediate */
case 0x60000120: /* ldr or str immediate */
offset.un = OFFSET_BITS(instr);
handler = do_alignment_ldrstr;
break;
case 0x40000000: /* ldr or str register */
offset.un = regs->uregs[RM_BITS(instr)];
{
unsigned int shiftval = SHIFT_BITS(instr);
switch (SHIFT_TYPE(instr)) {
case SHIFT_LSL:
offset.un <<= shiftval;
break;
case SHIFT_LSR:
offset.un >>= shiftval;
break;
case SHIFT_ASR:
offset.sn >>= shiftval;
break;
case SHIFT_RORRRX:
if (shiftval == 0) {
offset.un >>= 1;
if (regs->UCreg_asr & PSR_C_BIT)
offset.un |= 1 << 31;
} else
offset.un = offset.un >> shiftval |
offset.un << (32 - shiftval);
break;
}
}
handler = do_alignment_ldrstr;
break;
case 0x80000000: /* ldm or stm */
case 0x80000020: /* ldm or stm */
handler = do_alignment_ldmstm;
break;
default:
goto bad;
}
type = handler(addr, instr, regs);
if (type == TYPE_ERROR || type == TYPE_FAULT)
goto bad_or_fault;
if (type == TYPE_LDST)
do_alignment_finish_ldst(addr, instr, regs, offset);
return 0;
bad_or_fault:
if (type == TYPE_ERROR)
goto bad;
regs->UCreg_pc -= 4;
/*
* We got a fault - fix it up, or die.
*/
do_bad_area(addr, error_code, regs);
return 0;
bad:
/*
* Oops, we didn't handle the instruction.
* However, we must handle fpu instr firstly.
*/
#ifdef CONFIG_UNICORE_FPU_F64
/* handle co.load/store */
#define CODING_COLS 0xc0000000
#define COLS_OFFSET_BITS(i) (i & 0x1FF)
#define COLS_L_BITS(i) (i & (1<<24))
#define COLS_FN_BITS(i) ((i>>14) & 31)
if ((instr & 0xe0000000) == CODING_COLS) {
unsigned int fn = COLS_FN_BITS(instr);
unsigned long val = 0;
if (COLS_L_BITS(instr)) {
get32t_unaligned_check(val, addr);
switch (fn) {
#define ASM_MTF(n) case n: \
__asm__ __volatile__("MTF %0, F" __stringify(n) \
: : "r"(val)); \
break;
ASM_MTF(0); ASM_MTF(1); ASM_MTF(2); ASM_MTF(3);
ASM_MTF(4); ASM_MTF(5); ASM_MTF(6); ASM_MTF(7);
ASM_MTF(8); ASM_MTF(9); ASM_MTF(10); ASM_MTF(11);
ASM_MTF(12); ASM_MTF(13); ASM_MTF(14); ASM_MTF(15);
ASM_MTF(16); ASM_MTF(17); ASM_MTF(18); ASM_MTF(19);
ASM_MTF(20); ASM_MTF(21); ASM_MTF(22); ASM_MTF(23);
ASM_MTF(24); ASM_MTF(25); ASM_MTF(26); ASM_MTF(27);
ASM_MTF(28); ASM_MTF(29); ASM_MTF(30); ASM_MTF(31);
#undef ASM_MTF
}
} else {
switch (fn) {
#define ASM_MFF(n) case n: \
__asm__ __volatile__("MFF %0, F" __stringify(n) \
: : "r"(val)); \
break;
ASM_MFF(0); ASM_MFF(1); ASM_MFF(2); ASM_MFF(3);
ASM_MFF(4); ASM_MFF(5); ASM_MFF(6); ASM_MFF(7);
ASM_MFF(8); ASM_MFF(9); ASM_MFF(10); ASM_MFF(11);
ASM_MFF(12); ASM_MFF(13); ASM_MFF(14); ASM_MFF(15);
ASM_MFF(16); ASM_MFF(17); ASM_MFF(18); ASM_MFF(19);
ASM_MFF(20); ASM_MFF(21); ASM_MFF(22); ASM_MFF(23);
ASM_MFF(24); ASM_MFF(25); ASM_MFF(26); ASM_MFF(27);
ASM_MFF(28); ASM_MFF(29); ASM_MFF(30); ASM_MFF(31);
#undef ASM_MFF
}
put32t_unaligned_check(val, addr);
}
return TYPE_COLS;
}
fault:
return TYPE_FAULT;
#endif
printk(KERN_ERR "Alignment trap: not handling instruction "
"%08lx at [<%08lx>]\n", instr, instrptr);
return 1;
}
/*
* This needs to be done after sysctl_init, otherwise sys/ will be
* overwritten. Actually, this shouldn't be in sys/ at all since
* it isn't a sysctl, and it doesn't contain sysctl information.
*/
static int __init alignment_init(void)
{
hook_fault_code(1, do_alignment, SIGBUS, BUS_ADRALN,
"alignment exception");
return 0;
}
fs_initcall(alignment_init);

View File

@ -0,0 +1,212 @@
/*
* linux/arch/unicore32/mm/cache-ucv2.S
*
* Code specific to PKUnity SoC and UniCore ISA
*
* Copyright (C) 2001-2010 GUAN Xue-tao
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This is the "shell" of the UniCore-v2 processor support.
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/page.h>
#include "proc-macros.S"
/*
* __cpuc_flush_icache_all()
* __cpuc_flush_kern_all()
* __cpuc_flush_user_all()
*
* Flush the entire cache.
*/
ENTRY(__cpuc_flush_icache_all)
/*FALLTHROUGH*/
ENTRY(__cpuc_flush_kern_all)
/*FALLTHROUGH*/
ENTRY(__cpuc_flush_user_all)
mov r0, #0
movc p0.c5, r0, #14 @ Dcache flush all
nop8
mov r0, #0
movc p0.c5, r0, #20 @ Icache invalidate all
nop8
mov pc, lr
/*
* __cpuc_flush_user_range(start, end, flags)
*
* Flush a range of TLB entries in the specified address space.
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
* - flags - vm_area_struct flags describing address space
*/
ENTRY(__cpuc_flush_user_range)
cxor.a r2, #0
beq __cpuc_dma_flush_range
#ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
andn r0, r0, #CACHE_LINESIZE - 1 @ Safety check
sub r1, r1, r0
csub.a r1, #MAX_AREA_SIZE
bsg 2f
andn r1, r1, #CACHE_LINESIZE - 1
add r1, r1, #CACHE_LINESIZE
101: dcacheline_flush r0, r11, r12
add r0, r0, #CACHE_LINESIZE
sub.a r1, r1, #CACHE_LINESIZE
bns 101b
b 3f
#endif
2: mov ip, #0
movc p0.c5, ip, #14 @ Dcache flush all
nop8
3: mov ip, #0
movc p0.c5, ip, #20 @ Icache invalidate all
nop8
mov pc, lr
/*
* __cpuc_coherent_kern_range(start,end)
* __cpuc_coherent_user_range(start,end)
*
* Ensure that the I and D caches are coherent within specified
* region. This is typically used when code has been written to
* a memory region, and will be executed.
*
* - start - virtual start address of region
* - end - virtual end address of region
*/
ENTRY(__cpuc_coherent_kern_range)
/* FALLTHROUGH */
ENTRY(__cpuc_coherent_user_range)
#ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
andn r0, r0, #CACHE_LINESIZE - 1 @ Safety check
sub r1, r1, r0
csub.a r1, #MAX_AREA_SIZE
bsg 2f
andn r1, r1, #CACHE_LINESIZE - 1
add r1, r1, #CACHE_LINESIZE
@ r0 va2pa r10
mov r9, #PAGE_SZ
sub r9, r9, #1 @ PAGE_MASK
101: va2pa r0, r10, r11, r12, r13, 2f @ r10 is PA
b 103f
102: cand.a r0, r9
beq 101b
103: movc p0.c5, r10, #11 @ Dcache clean line of R10
nop8
add r0, r0, #CACHE_LINESIZE
add r10, r10, #CACHE_LINESIZE
sub.a r1, r1, #CACHE_LINESIZE
bns 102b
b 3f
#endif
2: mov ip, #0
movc p0.c5, ip, #10 @ Dcache clean all
nop8
3: mov ip, #0
movc p0.c5, ip, #20 @ Icache invalidate all
nop8
mov pc, lr
/*
* __cpuc_flush_kern_dcache_area(void *addr, size_t size)
*
* - addr - kernel address
* - size - region size
*/
ENTRY(__cpuc_flush_kern_dcache_area)
mov ip, #0
movc p0.c5, ip, #14 @ Dcache flush all
nop8
mov pc, lr
/*
* __cpuc_dma_clean_range(start,end)
* - start - virtual start address of region
* - end - virtual end address of region
*/
ENTRY(__cpuc_dma_clean_range)
#ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
andn r0, r0, #CACHE_LINESIZE - 1
sub r1, r1, r0
andn r1, r1, #CACHE_LINESIZE - 1
add r1, r1, #CACHE_LINESIZE
csub.a r1, #MAX_AREA_SIZE
bsg 2f
@ r0 va2pa r10
mov r9, #PAGE_SZ
sub r9, r9, #1 @ PAGE_MASK
101: va2pa r0, r10, r11, r12, r13, 2f @ r10 is PA
b 1f
102: cand.a r0, r9
beq 101b
1: movc p0.c5, r10, #11 @ Dcache clean line of R10
nop8
add r0, r0, #CACHE_LINESIZE
add r10, r10, #CACHE_LINESIZE
sub.a r1, r1, #CACHE_LINESIZE
bns 102b
mov pc, lr
#endif
2: mov ip, #0
movc p0.c5, ip, #10 @ Dcache clean all
nop8
mov pc, lr
/*
* __cpuc_dma_inv_range(start,end)
* __cpuc_dma_flush_range(start,end)
* - start - virtual start address of region
* - end - virtual end address of region
*/
__cpuc_dma_inv_range:
/* FALLTHROUGH */
ENTRY(__cpuc_dma_flush_range)
#ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
andn r0, r0, #CACHE_LINESIZE - 1
sub r1, r1, r0
andn r1, r1, #CACHE_LINESIZE - 1
add r1, r1, #CACHE_LINESIZE
csub.a r1, #MAX_AREA_SIZE
bsg 2f
@ r0 va2pa r10
101: dcacheline_flush r0, r11, r12
add r0, r0, #CACHE_LINESIZE
sub.a r1, r1, #CACHE_LINESIZE
bns 101b
mov pc, lr
#endif
2: mov ip, #0
movc p0.c5, ip, #14 @ Dcache flush all
nop8
mov pc, lr

View File

@ -0,0 +1,48 @@
/*
* Contains routines needed to support swiotlb for UniCore32.
*
* Copyright (C) 2010 Guan Xuetao
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/pci.h>
#include <linux/cache.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/swiotlb.h>
#include <linux/bootmem.h>
#include <asm/dma.h>
static void *unicore_swiotlb_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
struct dma_attrs *attrs)
{
return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
}
static void unicore_swiotlb_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_addr,
struct dma_attrs *attrs)
{
swiotlb_free_coherent(dev, size, vaddr, dma_addr);
}
struct dma_map_ops swiotlb_dma_map_ops = {
.alloc = unicore_swiotlb_alloc_coherent,
.free = unicore_swiotlb_free_coherent,
.map_sg = swiotlb_map_sg_attrs,
.unmap_sg = swiotlb_unmap_sg_attrs,
.dma_supported = swiotlb_dma_supported,
.map_page = swiotlb_map_page,
.unmap_page = swiotlb_unmap_page,
.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
.sync_single_for_device = swiotlb_sync_single_for_device,
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
.sync_sg_for_device = swiotlb_sync_sg_for_device,
.mapping_error = swiotlb_dma_mapping_error,
};
EXPORT_SYMBOL(swiotlb_dma_map_ops);

View File

@ -0,0 +1,24 @@
/*
* linux/arch/unicore32/mm/extable.c
*
* Code specific to PKUnity SoC and UniCore ISA
*
* Copyright (C) 2001-2010 GUAN Xue-tao
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/uaccess.h>
int fixup_exception(struct pt_regs *regs)
{
const struct exception_table_entry *fixup;
fixup = search_exception_tables(instruction_pointer(regs));
if (fixup)
regs->UCreg_pc = fixup->fixup;
return fixup != NULL;
}

View File

@ -0,0 +1,478 @@
/*
* linux/arch/unicore32/mm/fault.c
*
* Code specific to PKUnity SoC and UniCore ISA
*
* Copyright (C) 2001-2010 GUAN Xue-tao
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/signal.h>
#include <linux/mm.h>
#include <linux/hardirq.h>
#include <linux/init.h>
#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <linux/page-flags.h>
#include <linux/sched.h>
#include <linux/io.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
/*
* Fault status register encodings. We steal bit 31 for our own purposes.
*/
#define FSR_LNX_PF (1 << 31)
static inline int fsr_fs(unsigned int fsr)
{
/* xyabcde will be abcde+xy */
return (fsr & 31) + ((fsr & (3 << 5)) >> 5);
}
/*
* This is useful to dump out the page tables associated with
* 'addr' in mm 'mm'.
*/
void show_pte(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
if (!mm)
mm = &init_mm;
printk(KERN_ALERT "pgd = %p\n", mm->pgd);
pgd = pgd_offset(mm, addr);
printk(KERN_ALERT "[%08lx] *pgd=%08lx", addr, pgd_val(*pgd));
do {
pmd_t *pmd;
pte_t *pte;
if (pgd_none(*pgd))
break;
if (pgd_bad(*pgd)) {
printk("(bad)");
break;
}
pmd = pmd_offset((pud_t *) pgd, addr);
if (PTRS_PER_PMD != 1)
printk(", *pmd=%08lx", pmd_val(*pmd));
if (pmd_none(*pmd))
break;
if (pmd_bad(*pmd)) {
printk("(bad)");
break;
}
/* We must not map this if we have highmem enabled */
if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
break;
pte = pte_offset_map(pmd, addr);
printk(", *pte=%08lx", pte_val(*pte));
pte_unmap(pte);
} while (0);
printk("\n");
}
/*
* Oops. The kernel tried to access some page that wasn't present.
*/
static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr,
unsigned int fsr, struct pt_regs *regs)
{
/*
* Are we prepared to handle this kernel fault?
*/
if (fixup_exception(regs))
return;
/*
* No handler, we'll have to terminate things with extreme prejudice.
*/
bust_spinlocks(1);
printk(KERN_ALERT
"Unable to handle kernel %s at virtual address %08lx\n",
(addr < PAGE_SIZE) ? "NULL pointer dereference" :
"paging request", addr);
show_pte(mm, addr);
die("Oops", regs, fsr);
bust_spinlocks(0);
do_exit(SIGKILL);
}
/*
* Something tried to access memory that isn't in our memory map..
* User mode accesses just cause a SIGSEGV
*/
static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
unsigned int fsr, unsigned int sig, int code,
struct pt_regs *regs)
{
struct siginfo si;
tsk->thread.address = addr;
tsk->thread.error_code = fsr;
tsk->thread.trap_no = 14;
si.si_signo = sig;
si.si_errno = 0;
si.si_code = code;
si.si_addr = (void __user *)addr;
force_sig_info(sig, &si, tsk);
}
void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->active_mm;
/*
* If we are in kernel mode at this point, we
* have no context to handle this fault with.
*/
if (user_mode(regs))
__do_user_fault(tsk, addr, fsr, SIGSEGV, SEGV_MAPERR, regs);
else
__do_kernel_fault(mm, addr, fsr, regs);
}
#define VM_FAULT_BADMAP 0x010000
#define VM_FAULT_BADACCESS 0x020000
/*
* Check that the permissions on the VMA allow for the fault which occurred.
* If we encountered a write fault, we must have write permission, otherwise
* we allow any permission.
*/
static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
{
unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
if (!(fsr ^ 0x12)) /* write? */
mask = VM_WRITE;
if (fsr & FSR_LNX_PF)
mask = VM_EXEC;
return vma->vm_flags & mask ? false : true;
}
static int __do_pf(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
struct task_struct *tsk)
{
struct vm_area_struct *vma;
int fault;
vma = find_vma(mm, addr);
fault = VM_FAULT_BADMAP;
if (unlikely(!vma))
goto out;
if (unlikely(vma->vm_start > addr))
goto check_stack;
/*
* Ok, we have a good vm_area for this
* memory access, so we can handle it.
*/
good_area:
if (access_error(fsr, vma)) {
fault = VM_FAULT_BADACCESS;
goto out;
}
/*
* If for any reason at all we couldn't handle the fault, make
* sure we exit gracefully rather than endlessly redo the fault.
*/
fault = handle_mm_fault(mm, vma, addr & PAGE_MASK,
(!(fsr ^ 0x12)) ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR))
return fault;
if (fault & VM_FAULT_MAJOR)
tsk->maj_flt++;
else
tsk->min_flt++;
return fault;
check_stack:
if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
goto good_area;
out:
return fault;
}
static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
struct task_struct *tsk;
struct mm_struct *mm;
int fault, sig, code;
tsk = current;
mm = tsk->mm;
/*
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
if (in_atomic() || !mm)
goto no_context;
/*
* As per x86, we may deadlock here. However, since the kernel only
* validly references user space from well defined areas of the code,
* we can bug out early if this is from code which shouldn't.
*/
if (!down_read_trylock(&mm->mmap_sem)) {
if (!user_mode(regs)
&& !search_exception_tables(regs->UCreg_pc))
goto no_context;
down_read(&mm->mmap_sem);
} else {
/*
* The above down_read_trylock() might have succeeded in
* which case, we'll have missed the might_sleep() from
* down_read()
*/
might_sleep();
#ifdef CONFIG_DEBUG_VM
if (!user_mode(regs) &&
!search_exception_tables(regs->UCreg_pc))
goto no_context;
#endif
}
fault = __do_pf(mm, addr, fsr, tsk);
up_read(&mm->mmap_sem);
/*
* Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
*/
if (likely(!(fault &
(VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
return 0;
if (fault & VM_FAULT_OOM) {
/*
* We ran out of memory, call the OOM killer, and return to
* userspace (which will retry the fault, or kill us if we
* got oom-killed)
*/
pagefault_out_of_memory();
return 0;
}
/*
* If we are in kernel mode at this point, we
* have no context to handle this fault with.
*/
if (!user_mode(regs))
goto no_context;
if (fault & VM_FAULT_SIGBUS) {
/*
* We had some memory, but were unable to
* successfully fix up this page fault.
*/
sig = SIGBUS;
code = BUS_ADRERR;
} else {
/*
* Something tried to access memory that
* isn't in our memory map..
*/
sig = SIGSEGV;
code = fault == VM_FAULT_BADACCESS ? SEGV_ACCERR : SEGV_MAPERR;
}
__do_user_fault(tsk, addr, fsr, sig, code, regs);
return 0;
no_context:
__do_kernel_fault(mm, addr, fsr, regs);
return 0;
}
/*
* First Level Translation Fault Handler
*
* We enter here because the first level page table doesn't contain
* a valid entry for the address.
*
* If the address is in kernel space (>= TASK_SIZE), then we are
* probably faulting in the vmalloc() area.
*
* If the init_task's first level page tables contains the relevant
* entry, we copy the it to this task. If not, we send the process
* a signal, fixup the exception, or oops the kernel.
*
* NOTE! We MUST NOT take any locks for this case. We may be in an
* interrupt or a critical region, and should only copy the information
* from the master page table, nothing more.
*/
static int do_ifault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
unsigned int index;
pgd_t *pgd, *pgd_k;
pmd_t *pmd, *pmd_k;
if (addr < TASK_SIZE)
return do_pf(addr, fsr, regs);
if (user_mode(regs))
goto bad_area;
index = pgd_index(addr);
pgd = cpu_get_pgd() + index;
pgd_k = init_mm.pgd + index;
if (pgd_none(*pgd_k))
goto bad_area;
pmd_k = pmd_offset((pud_t *) pgd_k, addr);
pmd = pmd_offset((pud_t *) pgd, addr);
if (pmd_none(*pmd_k))
goto bad_area;
set_pmd(pmd, *pmd_k);
flush_pmd_entry(pmd);
return 0;
bad_area:
do_bad_area(addr, fsr, regs);
return 0;
}
/*
* This abort handler always returns "fault".
*/
static int do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
return 1;
}
static int do_good(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
unsigned int res1, res2;
printk("dabt exception but no error!\n");
__asm__ __volatile__(
"mff %0,f0\n"
"mff %1,f1\n"
: "=r"(res1), "=r"(res2)
:
: "memory");
printk(KERN_EMERG "r0 :%08x r1 :%08x\n", res1, res2);
panic("shut up\n");
return 0;
}
static struct fsr_info {
int (*fn) (unsigned long addr, unsigned int fsr, struct pt_regs *regs);
int sig;
int code;
const char *name;
} fsr_info[] = {
/*
* The following are the standard Unicore-I and UniCore-II aborts.
*/
{ do_good, SIGBUS, 0, "no error" },
{ do_bad, SIGBUS, BUS_ADRALN, "alignment exception" },
{ do_bad, SIGBUS, BUS_OBJERR, "external exception" },
{ do_bad, SIGBUS, 0, "burst operation" },
{ do_bad, SIGBUS, 0, "unknown 00100" },
{ do_ifault, SIGSEGV, SEGV_MAPERR, "2nd level pt non-exist"},
{ do_bad, SIGBUS, 0, "2nd lvl large pt non-exist" },
{ do_bad, SIGBUS, 0, "invalid pte" },
{ do_pf, SIGSEGV, SEGV_MAPERR, "page miss" },
{ do_bad, SIGBUS, 0, "middle page miss" },
{ do_bad, SIGBUS, 0, "large page miss" },
{ do_pf, SIGSEGV, SEGV_MAPERR, "super page (section) miss" },
{ do_bad, SIGBUS, 0, "unknown 01100" },
{ do_bad, SIGBUS, 0, "unknown 01101" },
{ do_bad, SIGBUS, 0, "unknown 01110" },
{ do_bad, SIGBUS, 0, "unknown 01111" },
{ do_bad, SIGBUS, 0, "addr: up 3G or IO" },
{ do_pf, SIGSEGV, SEGV_ACCERR, "read unreadable addr" },
{ do_pf, SIGSEGV, SEGV_ACCERR, "write unwriteable addr"},
{ do_pf, SIGSEGV, SEGV_ACCERR, "exec unexecutable addr"},
{ do_bad, SIGBUS, 0, "unknown 10100" },
{ do_bad, SIGBUS, 0, "unknown 10101" },
{ do_bad, SIGBUS, 0, "unknown 10110" },
{ do_bad, SIGBUS, 0, "unknown 10111" },
{ do_bad, SIGBUS, 0, "unknown 11000" },
{ do_bad, SIGBUS, 0, "unknown 11001" },
{ do_bad, SIGBUS, 0, "unknown 11010" },
{ do_bad, SIGBUS, 0, "unknown 11011" },
{ do_bad, SIGBUS, 0, "unknown 11100" },
{ do_bad, SIGBUS, 0, "unknown 11101" },
{ do_bad, SIGBUS, 0, "unknown 11110" },
{ do_bad, SIGBUS, 0, "unknown 11111" }
};
void __init hook_fault_code(int nr,
int (*fn) (unsigned long, unsigned int, struct pt_regs *),
int sig, int code, const char *name)
{
if (nr < 0 || nr >= ARRAY_SIZE(fsr_info))
BUG();
fsr_info[nr].fn = fn;
fsr_info[nr].sig = sig;
fsr_info[nr].code = code;
fsr_info[nr].name = name;
}
/*
* Dispatch a data abort to the relevant handler.
*/
asmlinkage void do_DataAbort(unsigned long addr, unsigned int fsr,
struct pt_regs *regs)
{
const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
struct siginfo info;
if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
return;
printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
inf->name, fsr, addr);
info.si_signo = inf->sig;
info.si_errno = 0;
info.si_code = inf->code;
info.si_addr = (void __user *)addr;
uc32_notify_die("", regs, &info, fsr, 0);
}
asmlinkage void do_PrefetchAbort(unsigned long addr,
unsigned int ifsr, struct pt_regs *regs)
{
const struct fsr_info *inf = fsr_info + fsr_fs(ifsr);
struct siginfo info;
if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
return;
printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
inf->name, ifsr, addr);
info.si_signo = inf->sig;
info.si_errno = 0;
info.si_code = inf->code;
info.si_addr = (void __user *)addr;
uc32_notify_die("", regs, &info, ifsr, 0);
}

View File

@ -0,0 +1,97 @@
/*
* linux/arch/unicore32/mm/flush.c
*
* Code specific to PKUnity SoC and UniCore ISA
*
* Copyright (C) 2001-2010 GUAN Xue-tao
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
void flush_cache_mm(struct mm_struct *mm)
{
}
void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
if (vma->vm_flags & VM_EXEC)
__flush_icache_all();
}
void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr,
unsigned long pfn)
{
}
static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
unsigned long uaddr, void *kaddr, unsigned long len)
{
/* VIPT non-aliasing D-cache */
if (vma->vm_flags & VM_EXEC) {
unsigned long addr = (unsigned long)kaddr;
__cpuc_coherent_kern_range(addr, addr + len);
}
}
/*
* Copy user data from/to a page which is mapped into a different
* processes address space. Really, we want to allow our "user
* space" model to handle this.
*
* Note that this code needs to run on the current CPU.
*/
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long uaddr, void *dst, const void *src,
unsigned long len)
{
memcpy(dst, src, len);
flush_ptrace_access(vma, page, uaddr, dst, len);
}
void __flush_dcache_page(struct address_space *mapping, struct page *page)
{
/*
* Writeback any data associated with the kernel mapping of this
* page. This ensures that data in the physical page is mutually
* coherent with the kernels mapping.
*/
__cpuc_flush_kern_dcache_area(page_address(page), PAGE_SIZE);
}
/*
* Ensure cache coherency between kernel mapping and userspace mapping
* of this page.
*/
void flush_dcache_page(struct page *page)
{
struct address_space *mapping;
/*
* The zero page is never written to, so never has any dirty
* cache lines, and therefore never needs to be flushed.
*/
if (page == ZERO_PAGE(0))
return;
mapping = page_mapping(page);
if (mapping && !mapping_mapped(mapping))
clear_bit(PG_dcache_clean, &page->flags);
else {
__flush_dcache_page(mapping, page);
if (mapping)
__flush_icache_all();
set_bit(PG_dcache_clean, &page->flags);
}
}
EXPORT_SYMBOL(flush_dcache_page);

View File

@ -0,0 +1,518 @@
/*
* linux/arch/unicore32/mm/init.c
*
* Copyright (C) 2010 GUAN Xue-tao
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/swap.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/mman.h>
#include <linux/nodemask.h>
#include <linux/initrd.h>
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <linux/memblock.h>
#include <linux/sort.h>
#include <linux/dma-mapping.h>
#include <linux/export.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/sizes.h>
#include <asm/tlb.h>
#include <asm/memblock.h>
#include <mach/map.h>
#include "mm.h"
static unsigned long phys_initrd_start __initdata = 0x01000000;
static unsigned long phys_initrd_size __initdata = SZ_8M;
static int __init early_initrd(char *p)
{
unsigned long start, size;
char *endp;
start = memparse(p, &endp);
if (*endp == ',') {
size = memparse(endp + 1, NULL);
phys_initrd_start = start;
phys_initrd_size = size;
}
return 0;
}
early_param("initrd", early_initrd);
/*
* This keeps memory configuration data used by a couple memory
* initialization functions, as well as show_mem() for the skipping
* of holes in the memory map. It is populated by uc32_add_memory().
*/
struct meminfo meminfo;
void show_mem(unsigned int filter)
{
int free = 0, total = 0, reserved = 0;
int shared = 0, cached = 0, slab = 0, i;
struct meminfo *mi = &meminfo;
printk(KERN_DEFAULT "Mem-info:\n");
show_free_areas(filter);
for_each_bank(i, mi) {
struct membank *bank = &mi->bank[i];
unsigned int pfn1, pfn2;
struct page *page, *end;
pfn1 = bank_pfn_start(bank);
pfn2 = bank_pfn_end(bank);
page = pfn_to_page(pfn1);
end = pfn_to_page(pfn2 - 1) + 1;
do {
total++;
if (PageReserved(page))
reserved++;
else if (PageSwapCache(page))
cached++;
else if (PageSlab(page))
slab++;
else if (!page_count(page))
free++;
else
shared += page_count(page) - 1;
page++;
} while (page < end);
}
printk(KERN_DEFAULT "%d pages of RAM\n", total);
printk(KERN_DEFAULT "%d free pages\n", free);
printk(KERN_DEFAULT "%d reserved pages\n", reserved);
printk(KERN_DEFAULT "%d slab pages\n", slab);
printk(KERN_DEFAULT "%d pages shared\n", shared);
printk(KERN_DEFAULT "%d pages swap cached\n", cached);
}
static void __init find_limits(unsigned long *min, unsigned long *max_low,
unsigned long *max_high)
{
struct meminfo *mi = &meminfo;
int i;
*min = -1UL;
*max_low = *max_high = 0;
for_each_bank(i, mi) {
struct membank *bank = &mi->bank[i];
unsigned long start, end;
start = bank_pfn_start(bank);
end = bank_pfn_end(bank);
if (*min > start)
*min = start;
if (*max_high < end)
*max_high = end;
if (bank->highmem)
continue;
if (*max_low < end)
*max_low = end;
}
}
static void __init uc32_bootmem_init(unsigned long start_pfn,
unsigned long end_pfn)
{
struct memblock_region *reg;
unsigned int boot_pages;
phys_addr_t bitmap;
pg_data_t *pgdat;
/*
* Allocate the bootmem bitmap page. This must be in a region
* of memory which has already been mapped.
*/
boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
__pfn_to_phys(end_pfn));
/*
* Initialise the bootmem allocator, handing the
* memory banks over to bootmem.
*/
node_set_online(0);
pgdat = NODE_DATA(0);
init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);
/* Free the lowmem regions from memblock into bootmem. */
for_each_memblock(memory, reg) {
unsigned long start = memblock_region_memory_base_pfn(reg);
unsigned long end = memblock_region_memory_end_pfn(reg);
if (end >= end_pfn)
end = end_pfn;
if (start >= end)
break;
free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
}
/* Reserve the lowmem memblock reserved regions in bootmem. */
for_each_memblock(reserved, reg) {
unsigned long start = memblock_region_reserved_base_pfn(reg);
unsigned long end = memblock_region_reserved_end_pfn(reg);
if (end >= end_pfn)
end = end_pfn;
if (start >= end)
break;
reserve_bootmem(__pfn_to_phys(start),
(end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT);
}
}
static void __init uc32_bootmem_free(unsigned long min, unsigned long max_low,
unsigned long max_high)
{
unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
struct memblock_region *reg;
/*
* initialise the zones.
*/
memset(zone_size, 0, sizeof(zone_size));
/*
* The memory size has already been determined. If we need
* to do anything fancy with the allocation of this memory
* to the zones, now is the time to do it.
*/
zone_size[0] = max_low - min;
/*
* Calculate the size of the holes.
* holes = node_size - sum(bank_sizes)
*/
memcpy(zhole_size, zone_size, sizeof(zhole_size));
for_each_memblock(memory, reg) {
unsigned long start = memblock_region_memory_base_pfn(reg);
unsigned long end = memblock_region_memory_end_pfn(reg);
if (start < max_low) {
unsigned long low_end = min(end, max_low);
zhole_size[0] -= low_end - start;
}
}
/*
* Adjust the sizes according to any special requirements for
* this machine type.
*/
arch_adjust_zones(zone_size, zhole_size);
free_area_init_node(0, zone_size, min, zhole_size);
}
int pfn_valid(unsigned long pfn)
{
return memblock_is_memory(pfn << PAGE_SHIFT);
}
EXPORT_SYMBOL(pfn_valid);
static void uc32_memory_present(void)
{
}
static int __init meminfo_cmp(const void *_a, const void *_b)
{
const struct membank *a = _a, *b = _b;
long cmp = bank_pfn_start(a) - bank_pfn_start(b);
return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
}
void __init uc32_memblock_init(struct meminfo *mi)
{
int i;
sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]),
meminfo_cmp, NULL);
for (i = 0; i < mi->nr_banks; i++)
memblock_add(mi->bank[i].start, mi->bank[i].size);
/* Register the kernel text, kernel data and initrd with memblock. */
memblock_reserve(__pa(_text), _end - _text);
#ifdef CONFIG_BLK_DEV_INITRD
if (phys_initrd_size) {
memblock_reserve(phys_initrd_start, phys_initrd_size);
/* Now convert initrd to virtual addresses */
initrd_start = __phys_to_virt(phys_initrd_start);
initrd_end = initrd_start + phys_initrd_size;
}
#endif
uc32_mm_memblock_reserve();
memblock_allow_resize();
memblock_dump_all();
}
void __init bootmem_init(void)
{
unsigned long min, max_low, max_high;
max_low = max_high = 0;
find_limits(&min, &max_low, &max_high);
uc32_bootmem_init(min, max_low);
#ifdef CONFIG_SWIOTLB
swiotlb_init(1);
#endif
/*
* Sparsemem tries to allocate bootmem in memory_present(),
* so must be done after the fixed reservations
*/
uc32_memory_present();
/*
* sparse_init() needs the bootmem allocator up and running.
*/
sparse_init();
/*
* Now free the memory - free_area_init_node needs
* the sparse mem_map arrays initialized by sparse_init()
* for memmap_init_zone(), otherwise all PFNs are invalid.
*/
uc32_bootmem_free(min, max_low, max_high);
high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1;
/*
* This doesn't seem to be used by the Linux memory manager any
* more, but is used by ll_rw_block. If we can get rid of it, we
* also get rid of some of the stuff above as well.
*
* Note: max_low_pfn and max_pfn reflect the number of _pages_ in
* the system, not the maximum PFN.
*/
max_low_pfn = max_low - PHYS_PFN_OFFSET;
max_pfn = max_high - PHYS_PFN_OFFSET;
}
static inline int free_area(unsigned long pfn, unsigned long end, char *s)
{
unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
for (; pfn < end; pfn++) {
struct page *page = pfn_to_page(pfn);
ClearPageReserved(page);
init_page_count(page);
__free_page(page);
pages++;
}
if (size && s)
printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
return pages;
}
static inline void
free_memmap(unsigned long start_pfn, unsigned long end_pfn)
{
struct page *start_pg, *end_pg;
unsigned long pg, pgend;
/*
* Convert start_pfn/end_pfn to a struct page pointer.
*/
start_pg = pfn_to_page(start_pfn - 1) + 1;
end_pg = pfn_to_page(end_pfn);
/*
* Convert to physical addresses, and
* round start upwards and end downwards.
*/
pg = PAGE_ALIGN(__pa(start_pg));
pgend = __pa(end_pg) & PAGE_MASK;
/*
* If there are free pages between these,
* free the section of the memmap array.
*/
if (pg < pgend)
free_bootmem(pg, pgend - pg);
}
/*
* The mem_map array can get very big. Free the unused area of the memory map.
*/
static void __init free_unused_memmap(struct meminfo *mi)
{
unsigned long bank_start, prev_bank_end = 0;
unsigned int i;
/*
* This relies on each bank being in address order.
* The banks are sorted previously in bootmem_init().
*/
for_each_bank(i, mi) {
struct membank *bank = &mi->bank[i];
bank_start = bank_pfn_start(bank);
/*
* If we had a previous bank, and there is a space
* between the current bank and the previous, free it.
*/
if (prev_bank_end && prev_bank_end < bank_start)
free_memmap(prev_bank_end, bank_start);
/*
* Align up here since the VM subsystem insists that the
* memmap entries are valid from the bank end aligned to
* MAX_ORDER_NR_PAGES.
*/
prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES);
}
}
/*
* mem_init() marks the free areas in the mem_map and tells us how much
* memory is free. This is done after various parts of the system have
* claimed their memory after the kernel image.
*/
void __init mem_init(void)
{
unsigned long reserved_pages, free_pages;
struct memblock_region *reg;
int i;
max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
/* this will put all unused low memory onto the freelists */
free_unused_memmap(&meminfo);
totalram_pages += free_all_bootmem();
reserved_pages = free_pages = 0;
for_each_bank(i, &meminfo) {
struct membank *bank = &meminfo.bank[i];
unsigned int pfn1, pfn2;
struct page *page, *end;
pfn1 = bank_pfn_start(bank);
pfn2 = bank_pfn_end(bank);
page = pfn_to_page(pfn1);
end = pfn_to_page(pfn2 - 1) + 1;
do {
if (PageReserved(page))
reserved_pages++;
else if (!page_count(page))
free_pages++;
page++;
} while (page < end);
}
/*
* Since our memory may not be contiguous, calculate the
* real number of pages we have in this system
*/
printk(KERN_INFO "Memory:");
num_physpages = 0;
for_each_memblock(memory, reg) {
unsigned long pages = memblock_region_memory_end_pfn(reg) -
memblock_region_memory_base_pfn(reg);
num_physpages += pages;
printk(" %ldMB", pages >> (20 - PAGE_SHIFT));
}
printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n",
nr_free_pages() << (PAGE_SHIFT-10),
free_pages << (PAGE_SHIFT-10),
reserved_pages << (PAGE_SHIFT-10),
totalhigh_pages << (PAGE_SHIFT-10));
printk(KERN_NOTICE "Virtual kernel memory layout:\n"
" vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
" vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
" lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
" modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
" .init : 0x%p" " - 0x%p" " (%4d kB)\n"
" .text : 0x%p" " - 0x%p" " (%4d kB)\n"
" .data : 0x%p" " - 0x%p" " (%4d kB)\n",
VECTORS_BASE, VECTORS_BASE + PAGE_SIZE,
DIV_ROUND_UP(PAGE_SIZE, SZ_1K),
VMALLOC_START, VMALLOC_END,
DIV_ROUND_UP((VMALLOC_END - VMALLOC_START), SZ_1M),
PAGE_OFFSET, (unsigned long)high_memory,
DIV_ROUND_UP(((unsigned long)high_memory - PAGE_OFFSET), SZ_1M),
MODULES_VADDR, MODULES_END,
DIV_ROUND_UP((MODULES_END - MODULES_VADDR), SZ_1M),
__init_begin, __init_end,
DIV_ROUND_UP((__init_end - __init_begin), SZ_1K),
_stext, _etext,
DIV_ROUND_UP((_etext - _stext), SZ_1K),
_sdata, _edata,
DIV_ROUND_UP((_edata - _sdata), SZ_1K));
BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
BUG_ON(TASK_SIZE > MODULES_VADDR);
if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
/*
* On a machine this small we won't get
* anywhere without overcommit, so turn
* it on by default.
*/
sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
}
}
void free_initmem(void)
{
totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
__phys_to_pfn(__pa(__init_end)),
"init");
}
#ifdef CONFIG_BLK_DEV_INITRD
static int keep_initrd;
void free_initrd_mem(unsigned long start, unsigned long end)
{
if (!keep_initrd)
totalram_pages += free_area(__phys_to_pfn(__pa(start)),
__phys_to_pfn(__pa(end)),
"initrd");
}
static int __init keepinitrd_setup(char *__unused)
{
keep_initrd = 1;
return 1;
}
__setup("keepinitrd", keepinitrd_setup);
#endif

View File

@ -0,0 +1,261 @@
/*
* linux/arch/unicore32/mm/ioremap.c
*
* Code specific to PKUnity SoC and UniCore ISA
*
* Copyright (C) 2001-2010 GUAN Xue-tao
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* Re-map IO memory to kernel address space so that we can access it.
*
* This allows a driver to remap an arbitrary region of bus memory into
* virtual space. One should *only* use readl, writel, memcpy_toio and
* so on with such remapped areas.
*
* Because UniCore only has a 32-bit address space we can't address the
* whole of the (physical) PCI space at once. PCI huge-mode addressing
* allows us to circumvent this restriction by splitting PCI space into
* two 2GB chunks and mapping only one at a time into processor memory.
* We use MMU protection domains to trap any attempt to access the bank
* that is not currently mapped. (This isn't fully implemented yet.)
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/io.h>
#include <asm/cputype.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/sizes.h>
#include <mach/map.h>
#include "mm.h"
/*
* Used by ioremap() and iounmap() code to mark (super)section-mapped
* I/O regions in vm_struct->flags field.
*/
#define VM_UNICORE_SECTION_MAPPING 0x80000000
int ioremap_page(unsigned long virt, unsigned long phys,
const struct mem_type *mtype)
{
return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
__pgprot(mtype->prot_pte));
}
EXPORT_SYMBOL(ioremap_page);
/*
* Section support is unsafe on SMP - If you iounmap and ioremap a region,
* the other CPUs will not see this change until their next context switch.
* Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
* which requires the new ioremap'd region to be referenced, the CPU will
* reference the _old_ region.
*
* Note that get_vm_area_caller() allocates a guard 4K page, so we need to
* mask the size back to 4MB aligned or we will overflow in the loop below.
*/
static void unmap_area_sections(unsigned long virt, unsigned long size)
{
unsigned long addr = virt, end = virt + (size & ~(SZ_4M - 1));
pgd_t *pgd;
flush_cache_vunmap(addr, end);
pgd = pgd_offset_k(addr);
do {
pmd_t pmd, *pmdp = pmd_offset((pud_t *)pgd, addr);
pmd = *pmdp;
if (!pmd_none(pmd)) {
/*
* Clear the PMD from the page table, and
* increment the kvm sequence so others
* notice this change.
*
* Note: this is still racy on SMP machines.
*/
pmd_clear(pmdp);
/*
* Free the page table, if there was one.
*/
if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
}
addr += PGDIR_SIZE;
pgd++;
} while (addr < end);
flush_tlb_kernel_range(virt, end);
}
static int
remap_area_sections(unsigned long virt, unsigned long pfn,
size_t size, const struct mem_type *type)
{
unsigned long addr = virt, end = virt + size;
pgd_t *pgd;
/*
* Remove and free any PTE-based mapping, and
* sync the current kernel mapping.
*/
unmap_area_sections(virt, size);
pgd = pgd_offset_k(addr);
do {
pmd_t *pmd = pmd_offset((pud_t *)pgd, addr);
set_pmd(pmd, __pmd(__pfn_to_phys(pfn) | type->prot_sect));
pfn += SZ_4M >> PAGE_SHIFT;
flush_pmd_entry(pmd);
addr += PGDIR_SIZE;
pgd++;
} while (addr < end);
return 0;
}
void __iomem *__uc32_ioremap_pfn_caller(unsigned long pfn,
unsigned long offset, size_t size, unsigned int mtype, void *caller)
{
const struct mem_type *type;
int err;
unsigned long addr;
struct vm_struct *area;
/*
* High mappings must be section aligned
*/
if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SECTION_MASK))
return NULL;
/*
* Don't allow RAM to be mapped
*/
if (pfn_valid(pfn)) {
printk(KERN_WARNING "BUG: Your driver calls ioremap() on\n"
"system memory. This leads to architecturally\n"
"unpredictable behaviour, and ioremap() will fail in\n"
"the next kernel release. Please fix your driver.\n");
WARN_ON(1);
}
type = get_mem_type(mtype);
if (!type)
return NULL;
/*
* Page align the mapping size, taking account of any offset.
*/
size = PAGE_ALIGN(offset + size);
area = get_vm_area_caller(size, VM_IOREMAP, caller);
if (!area)
return NULL;
addr = (unsigned long)area->addr;
if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
area->flags |= VM_UNICORE_SECTION_MAPPING;
err = remap_area_sections(addr, pfn, size, type);
} else
err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
__pgprot(type->prot_pte));
if (err) {
vunmap((void *)addr);
return NULL;
}
flush_cache_vmap(addr, addr + size);
return (void __iomem *) (offset + addr);
}
void __iomem *__uc32_ioremap_caller(unsigned long phys_addr, size_t size,
unsigned int mtype, void *caller)
{
unsigned long last_addr;
unsigned long offset = phys_addr & ~PAGE_MASK;
unsigned long pfn = __phys_to_pfn(phys_addr);
/*
* Don't allow wraparound or zero size
*/
last_addr = phys_addr + size - 1;
if (!size || last_addr < phys_addr)
return NULL;
return __uc32_ioremap_pfn_caller(pfn, offset, size, mtype, caller);
}
/*
* Remap an arbitrary physical address space into the kernel virtual
* address space. Needed when the kernel wants to access high addresses
* directly.
*
* NOTE! We need to allow non-page-aligned mappings too: we will obviously
* have to convert them into an offset in a page-aligned mapping, but the
* caller shouldn't need to know that small detail.
*/
void __iomem *
__uc32_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
unsigned int mtype)
{
return __uc32_ioremap_pfn_caller(pfn, offset, size, mtype,
__builtin_return_address(0));
}
EXPORT_SYMBOL(__uc32_ioremap_pfn);
void __iomem *
__uc32_ioremap(unsigned long phys_addr, size_t size)
{
return __uc32_ioremap_caller(phys_addr, size, MT_DEVICE,
__builtin_return_address(0));
}
EXPORT_SYMBOL(__uc32_ioremap);
void __iomem *
__uc32_ioremap_cached(unsigned long phys_addr, size_t size)
{
return __uc32_ioremap_caller(phys_addr, size, MT_DEVICE_CACHED,
__builtin_return_address(0));
}
EXPORT_SYMBOL(__uc32_ioremap_cached);
void __uc32_iounmap(volatile void __iomem *io_addr)
{
void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
struct vm_struct **p, *tmp;
/*
* If this is a section based mapping we need to handle it
* specially as the VM subsystem does not know how to handle
* such a beast. We need the lock here b/c we need to clear
* all the mappings before the area can be reclaimed
* by someone else.
*/
write_lock(&vmlist_lock);
for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
if (tmp->flags & VM_UNICORE_SECTION_MAPPING) {
unmap_area_sections((unsigned long)tmp->addr,
tmp->size);
}
break;
}
}
write_unlock(&vmlist_lock);
vunmap(addr);
}
EXPORT_SYMBOL(__uc32_iounmap);

View File

@ -0,0 +1,44 @@
/*
* linux/arch/unicore32/mm/mm.h
*
* Code specific to PKUnity SoC and UniCore ISA
*
* Copyright (C) 2001-2010 GUAN Xue-tao
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <asm/hwdef-copro.h>
/* the upper-most page table pointer */
extern pmd_t *top_pmd;
extern int sysctl_overcommit_memory;
#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt)
{
return pmd_offset((pud_t *)pgd, virt);
}
static inline pmd_t *pmd_off_k(unsigned long virt)
{
return pmd_off(pgd_offset_k(virt), virt);
}
struct mem_type {
unsigned int prot_pte;
unsigned int prot_l1;
unsigned int prot_sect;
};
const struct mem_type *get_mem_type(unsigned int type);
extern void __flush_dcache_page(struct address_space *, struct page *);
extern void hook_fault_code(int nr, int (*fn)
(unsigned long, unsigned int, struct pt_regs *),
int sig, int code, const char *name);
void __init bootmem_init(void);
void uc32_mm_memblock_reserve(void);

View File

@ -0,0 +1,512 @@
/*
* linux/arch/unicore32/mm/mmu.c
*
* Code specific to PKUnity SoC and UniCore ISA
*
* Copyright (C) 2001-2010 GUAN Xue-tao
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/mman.h>
#include <linux/nodemask.h>
#include <linux/memblock.h>
#include <linux/fs.h>
#include <linux/bootmem.h>
#include <linux/io.h>
#include <asm/cputype.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/sizes.h>
#include <asm/tlb.h>
#include <asm/memblock.h>
#include <mach/map.h>
#include "mm.h"
/*
* empty_zero_page is a special page that is used for
* zero-initialized data and COW.
*/
struct page *empty_zero_page;
EXPORT_SYMBOL(empty_zero_page);
/*
* The pmd table for the upper-most set of pages.
*/
pmd_t *top_pmd;
pgprot_t pgprot_user;
EXPORT_SYMBOL(pgprot_user);
pgprot_t pgprot_kernel;
EXPORT_SYMBOL(pgprot_kernel);
static int __init noalign_setup(char *__unused)
{
cr_alignment &= ~CR_A;
cr_no_alignment &= ~CR_A;
set_cr(cr_alignment);
return 1;
}
__setup("noalign", noalign_setup);
void adjust_cr(unsigned long mask, unsigned long set)
{
unsigned long flags;
mask &= ~CR_A;
set &= mask;
local_irq_save(flags);
cr_no_alignment = (cr_no_alignment & ~mask) | set;
cr_alignment = (cr_alignment & ~mask) | set;
set_cr((get_cr() & ~mask) | set);
local_irq_restore(flags);
}
struct map_desc {
unsigned long virtual;
unsigned long pfn;
unsigned long length;
unsigned int type;
};
#define PROT_PTE_DEVICE (PTE_PRESENT | PTE_YOUNG | \
PTE_DIRTY | PTE_READ | PTE_WRITE)
#define PROT_SECT_DEVICE (PMD_TYPE_SECT | PMD_PRESENT | \
PMD_SECT_READ | PMD_SECT_WRITE)
static struct mem_type mem_types[] = {
[MT_DEVICE] = { /* Strongly ordered */
.prot_pte = PROT_PTE_DEVICE,
.prot_l1 = PMD_TYPE_TABLE | PMD_PRESENT,
.prot_sect = PROT_SECT_DEVICE,
},
/*
* MT_KUSER: pte for vecpage -- cacheable,
* and sect for unigfx mmap -- noncacheable
*/
[MT_KUSER] = {
.prot_pte = PTE_PRESENT | PTE_YOUNG | PTE_DIRTY |
PTE_CACHEABLE | PTE_READ | PTE_EXEC,
.prot_l1 = PMD_TYPE_TABLE | PMD_PRESENT,
.prot_sect = PROT_SECT_DEVICE,
},
[MT_HIGH_VECTORS] = {
.prot_pte = PTE_PRESENT | PTE_YOUNG | PTE_DIRTY |
PTE_CACHEABLE | PTE_READ | PTE_WRITE |
PTE_EXEC,
.prot_l1 = PMD_TYPE_TABLE | PMD_PRESENT,
},
[MT_MEMORY] = {
.prot_pte = PTE_PRESENT | PTE_YOUNG | PTE_DIRTY |
PTE_WRITE | PTE_EXEC,
.prot_l1 = PMD_TYPE_TABLE | PMD_PRESENT,
.prot_sect = PMD_TYPE_SECT | PMD_PRESENT | PMD_SECT_CACHEABLE |
PMD_SECT_READ | PMD_SECT_WRITE | PMD_SECT_EXEC,
},
[MT_ROM] = {
.prot_sect = PMD_TYPE_SECT | PMD_PRESENT | PMD_SECT_CACHEABLE |
PMD_SECT_READ,
},
};
const struct mem_type *get_mem_type(unsigned int type)
{
return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
}
EXPORT_SYMBOL(get_mem_type);
/*
* Adjust the PMD section entries according to the CPU in use.
*/
static void __init build_mem_type_table(void)
{
pgprot_user = __pgprot(PTE_PRESENT | PTE_YOUNG | PTE_CACHEABLE);
pgprot_kernel = __pgprot(PTE_PRESENT | PTE_YOUNG |
PTE_DIRTY | PTE_READ | PTE_WRITE |
PTE_EXEC | PTE_CACHEABLE);
}
#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
static void __init *early_alloc(unsigned long sz)
{
void *ptr = __va(memblock_alloc(sz, sz));
memset(ptr, 0, sz);
return ptr;
}
static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr,
unsigned long prot)
{
if (pmd_none(*pmd)) {
pte_t *pte = early_alloc(PTRS_PER_PTE * sizeof(pte_t));
__pmd_populate(pmd, __pa(pte) | prot);
}
BUG_ON(pmd_bad(*pmd));
return pte_offset_kernel(pmd, addr);
}
static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
unsigned long end, unsigned long pfn,
const struct mem_type *type)
{
pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
do {
set_pte(pte, pfn_pte(pfn, __pgprot(type->prot_pte)));
pfn++;
} while (pte++, addr += PAGE_SIZE, addr != end);
}
static void __init alloc_init_section(pgd_t *pgd, unsigned long addr,
unsigned long end, unsigned long phys,
const struct mem_type *type)
{
pmd_t *pmd = pmd_offset((pud_t *)pgd, addr);
/*
* Try a section mapping - end, addr and phys must all be aligned
* to a section boundary.
*/
if (((addr | end | phys) & ~SECTION_MASK) == 0) {
pmd_t *p = pmd;
do {
set_pmd(pmd, __pmd(phys | type->prot_sect));
phys += SECTION_SIZE;
} while (pmd++, addr += SECTION_SIZE, addr != end);
flush_pmd_entry(p);
} else {
/*
* No need to loop; pte's aren't interested in the
* individual L1 entries.
*/
alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
}
}
/*
* Create the page directory entries and any necessary
* page tables for the mapping specified by `md'. We
* are able to cope here with varying sizes and address
* offsets, and we take full advantage of sections.
*/
static void __init create_mapping(struct map_desc *md)
{
unsigned long phys, addr, length, end;
const struct mem_type *type;
pgd_t *pgd;
if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
printk(KERN_WARNING "BUG: not creating mapping for "
"0x%08llx at 0x%08lx in user region\n",
__pfn_to_phys((u64)md->pfn), md->virtual);
return;
}
if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
printk(KERN_WARNING "BUG: mapping for 0x%08llx at 0x%08lx "
"overlaps vmalloc space\n",
__pfn_to_phys((u64)md->pfn), md->virtual);
}
type = &mem_types[md->type];
addr = md->virtual & PAGE_MASK;
phys = (unsigned long)__pfn_to_phys(md->pfn);
length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not "
"be mapped using pages, ignoring.\n",
__pfn_to_phys(md->pfn), addr);
return;
}
pgd = pgd_offset_k(addr);
end = addr + length;
do {
unsigned long next = pgd_addr_end(addr, end);
alloc_init_section(pgd, addr, next, phys, type);
phys += next - addr;
addr = next;
} while (pgd++, addr != end);
}
static void * __initdata vmalloc_min = (void *)(VMALLOC_END - SZ_128M);
/*
* vmalloc=size forces the vmalloc area to be exactly 'size'
* bytes. This can be used to increase (or decrease) the vmalloc
* area - the default is 128m.
*/
static int __init early_vmalloc(char *arg)
{
unsigned long vmalloc_reserve = memparse(arg, NULL);
if (vmalloc_reserve < SZ_16M) {
vmalloc_reserve = SZ_16M;
printk(KERN_WARNING
"vmalloc area too small, limiting to %luMB\n",
vmalloc_reserve >> 20);
}
if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
printk(KERN_WARNING
"vmalloc area is too big, limiting to %luMB\n",
vmalloc_reserve >> 20);
}
vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
return 0;
}
early_param("vmalloc", early_vmalloc);
static phys_addr_t lowmem_limit __initdata = SZ_1G;
static void __init sanity_check_meminfo(void)
{
int i, j;
lowmem_limit = __pa(vmalloc_min - 1) + 1;
memblock_set_current_limit(lowmem_limit);
for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
struct membank *bank = &meminfo.bank[j];
*bank = meminfo.bank[i];
j++;
}
meminfo.nr_banks = j;
}
static inline void prepare_page_table(void)
{
unsigned long addr;
phys_addr_t end;
/*
* Clear out all the mappings below the kernel image.
*/
for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE)
pmd_clear(pmd_off_k(addr));
for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE)
pmd_clear(pmd_off_k(addr));
/*
* Find the end of the first block of lowmem.
*/
end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
if (end >= lowmem_limit)
end = lowmem_limit;
/*
* Clear out all the kernel space mappings, except for the first
* memory bank, up to the end of the vmalloc region.
*/
for (addr = __phys_to_virt(end);
addr < VMALLOC_END; addr += PGDIR_SIZE)
pmd_clear(pmd_off_k(addr));
}
/*
* Reserve the special regions of memory
*/
void __init uc32_mm_memblock_reserve(void)
{
/*
* Reserve the page tables. These are already in use,
* and can only be in node 0.
*/
memblock_reserve(__pa(swapper_pg_dir), PTRS_PER_PGD * sizeof(pgd_t));
}
/*
* Set up device the mappings. Since we clear out the page tables for all
* mappings above VMALLOC_END, we will remove any debug device mappings.
* This means you have to be careful how you debug this function, or any
* called function. This means you can't use any function or debugging
* method which may touch any device, otherwise the kernel _will_ crash.
*/
static void __init devicemaps_init(void)
{
struct map_desc map;
unsigned long addr;
void *vectors;
/*
* Allocate the vector page early.
*/
vectors = early_alloc(PAGE_SIZE);
for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
pmd_clear(pmd_off_k(addr));
/*
* Create a mapping for the machine vectors at the high-vectors
* location (0xffff0000). If we aren't using high-vectors, also
* create a mapping at the low-vectors virtual address.
*/
map.pfn = __phys_to_pfn(virt_to_phys(vectors));
map.virtual = VECTORS_BASE;
map.length = PAGE_SIZE;
map.type = MT_HIGH_VECTORS;
create_mapping(&map);
/*
* Create a mapping for the kuser page at the special
* location (0xbfff0000) to the same vectors location.
*/
map.pfn = __phys_to_pfn(virt_to_phys(vectors));
map.virtual = KUSER_VECPAGE_BASE;
map.length = PAGE_SIZE;
map.type = MT_KUSER;
create_mapping(&map);
/*
* Finally flush the caches and tlb to ensure that we're in a
* consistent state wrt the writebuffer. This also ensures that
* any write-allocated cache lines in the vector page are written
* back. After this point, we can start to touch devices again.
*/
local_flush_tlb_all();
flush_cache_all();
}
static void __init map_lowmem(void)
{
struct memblock_region *reg;
/* Map all the lowmem memory banks. */
for_each_memblock(memory, reg) {
phys_addr_t start = reg->base;
phys_addr_t end = start + reg->size;
struct map_desc map;
if (end > lowmem_limit)
end = lowmem_limit;
if (start >= end)
break;
map.pfn = __phys_to_pfn(start);
map.virtual = __phys_to_virt(start);
map.length = end - start;
map.type = MT_MEMORY;
create_mapping(&map);
}
}
/*
* paging_init() sets up the page tables, initialises the zone memory
* maps, and sets up the zero page, bad page and bad page tables.
*/
void __init paging_init(void)
{
void *zero_page;
build_mem_type_table();
sanity_check_meminfo();
prepare_page_table();
map_lowmem();
devicemaps_init();
top_pmd = pmd_off_k(0xffff0000);
/* allocate the zero page. */
zero_page = early_alloc(PAGE_SIZE);
bootmem_init();
empty_zero_page = virt_to_page(zero_page);
__flush_dcache_page(NULL, empty_zero_page);
}
/*
* In order to soft-boot, we need to insert a 1:1 mapping in place of
* the user-mode pages. This will then ensure that we have predictable
* results when turning the mmu off
*/
void setup_mm_for_reboot(char mode)
{
unsigned long base_pmdval;
pgd_t *pgd;
int i;
/*
* We need to access to user-mode page tables here. For kernel threads
* we don't have any user-mode mappings so we use the context that we
* "borrowed".
*/
pgd = current->active_mm->pgd;
base_pmdval = PMD_SECT_WRITE | PMD_SECT_READ | PMD_TYPE_SECT;
for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) {
unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval;
pmd_t *pmd;
pmd = pmd_off(pgd, i << PGDIR_SHIFT);
set_pmd(pmd, __pmd(pmdval));
flush_pmd_entry(pmd);
}
local_flush_tlb_all();
}
/*
* Take care of architecture specific things when placing a new PTE into
* a page table, or changing an existing PTE. Basically, there are two
* things that we need to take care of:
*
* 1. If PG_dcache_clean is not set for the page, we need to ensure
* that any cache entries for the kernels virtual memory
* range are written back to the page.
* 2. If we have multiple shared mappings of the same space in
* an object, we need to deal with the cache aliasing issues.
*
* Note that the pte lock will be held.
*/
void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
pte_t *ptep)
{
unsigned long pfn = pte_pfn(*ptep);
struct address_space *mapping;
struct page *page;
if (!pfn_valid(pfn))
return;
/*
* The zero page is never written to, so never has any dirty
* cache lines, and therefore never needs to be flushed.
*/
page = pfn_to_page(pfn);
if (page == ZERO_PAGE(0))
return;
mapping = page_mapping(page);
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
__flush_dcache_page(mapping, page);
if (mapping)
if (vma->vm_flags & VM_EXEC)
__flush_icache_all();
}

View File

@ -0,0 +1,102 @@
/*
* linux/arch/unicore32/mm/pgd.c
*
* Code specific to PKUnity SoC and UniCore ISA
*
* Copyright (C) 2001-2010 GUAN Xue-tao
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/mm.h>
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <asm/pgalloc.h>
#include <asm/page.h>
#include <asm/tlbflush.h>
#include "mm.h"
#define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
/*
* need to get a 4k page for level 1
*/
pgd_t *get_pgd_slow(struct mm_struct *mm)
{
pgd_t *new_pgd, *init_pgd;
pmd_t *new_pmd, *init_pmd;
pte_t *new_pte, *init_pte;
new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 0);
if (!new_pgd)
goto no_pgd;
memset(new_pgd, 0, FIRST_KERNEL_PGD_NR * sizeof(pgd_t));
/*
* Copy over the kernel and IO PGD entries
*/
init_pgd = pgd_offset_k(0);
memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
(PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
if (!vectors_high()) {
/*
* On UniCore, first page must always be allocated since it
* contains the machine vectors.
*/
new_pmd = pmd_alloc(mm, (pud_t *)new_pgd, 0);
if (!new_pmd)
goto no_pmd;
new_pte = pte_alloc_map(mm, NULL, new_pmd, 0);
if (!new_pte)
goto no_pte;
init_pmd = pmd_offset((pud_t *)init_pgd, 0);
init_pte = pte_offset_map(init_pmd, 0);
set_pte(new_pte, *init_pte);
pte_unmap(init_pte);
pte_unmap(new_pte);
}
return new_pgd;
no_pte:
pmd_free(mm, new_pmd);
no_pmd:
free_pages((unsigned long)new_pgd, 0);
no_pgd:
return NULL;
}
void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd)
{
pmd_t *pmd;
pgtable_t pte;
if (!pgd)
return;
/* pgd is always present and good */
pmd = pmd_off(pgd, 0);
if (pmd_none(*pmd))
goto free;
if (pmd_bad(*pmd)) {
pmd_ERROR(*pmd);
pmd_clear(pmd);
goto free;
}
pte = pmd_pgtable(*pmd);
pmd_clear(pmd);
pte_free(mm, pte);
pmd_free(mm, pmd);
free:
free_pages((unsigned long) pgd, 0);
}

View File

@ -0,0 +1,145 @@
/*
* linux/arch/unicore32/mm/proc-macros.S
*
* Code specific to PKUnity SoC and UniCore ISA
*
* Copyright (C) 2001-2010 GUAN Xue-tao
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* We need constants.h for:
* VMA_VM_MM
* VMA_VM_FLAGS
* VM_EXEC
*/
#include <generated/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/memory.h>
/*
* the cache line sizes of the I and D cache are the same
*/
#define CACHE_LINESIZE 32
/*
* This is the maximum size of an area which will be invalidated
* using the single invalidate entry instructions. Anything larger
* than this, and we go for the whole cache.
*
* This value should be chosen such that we choose the cheapest
* alternative.
*/
#ifdef CONFIG_CPU_UCV2
#define MAX_AREA_SIZE 0x800 /* 64 cache line */
#endif
/*
* vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
*/
.macro vma_vm_mm, rd, rn
ldw \rd, [\rn+], #VMA_VM_MM
.endm
/*
* vma_vm_flags - get vma->vm_flags
*/
.macro vma_vm_flags, rd, rn
ldw \rd, [\rn+], #VMA_VM_FLAGS
.endm
.macro tsk_mm, rd, rn
ldw \rd, [\rn+], #TI_TASK
ldw \rd, [\rd+], #TSK_ACTIVE_MM
.endm
/*
* act_mm - get current->active_mm
*/
.macro act_mm, rd
andn \rd, sp, #8128
andn \rd, \rd, #63
ldw \rd, [\rd+], #TI_TASK
ldw \rd, [\rd+], #TSK_ACTIVE_MM
.endm
/*
* mmid - get context id from mm pointer (mm->context.id)
*/
.macro mmid, rd, rn
ldw \rd, [\rn+], #MM_CONTEXT_ID
.endm
/*
* mask_asid - mask the ASID from the context ID
*/
.macro asid, rd, rn
and \rd, \rn, #255
.endm
.macro crval, clear, mmuset, ucset
.word \clear
.word \mmuset
.endm
#ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
/*
* va2pa va, pa, tbl, msk, off, err
* This macro is used to translate virtual address to its physical address.
*
* va: virtual address
* pa: physical address, result is stored in this register
* tbl, msk, off: temp registers, will be destroyed
* err: jump to error label if the physical address not exist
* NOTE: all regs must be different
*/
.macro va2pa, va, pa, tbl, msk, off, err=990f
movc \pa, p0.c2, #0
mov \off, \va >> #22 @ off <- index of 1st page table
adr \tbl, 910f @ tbl <- table of 1st page table
900: @ ---- handle 1, 2 page table
add \pa, \pa, #PAGE_OFFSET @ pa <- virt addr of page table
ldw \pa, [\pa+], \off << #2 @ pa <- the content of pt
cand.a \pa, #4 @ test exist bit
beq \err @ if not exist
and \off, \pa, #3 @ off <- the last 2 bits
add \tbl, \tbl, \off << #3 @ cmove table pointer
ldw \msk, [\tbl+], #0 @ get the mask
ldw pc, [\tbl+], #4
930: @ ---- handle 2nd page table
and \pa, \pa, \msk @ pa <- phys addr of 2nd pt
mov \off, \va << #10
cntlo \tbl, \msk @ use tbl as temp reg
mov \off, \off >> \tbl
mov \off, \off >> #2 @ off <- index of 2nd pt
adr \tbl, 920f @ tbl <- table of 2nd pt
b 900b
910: @ 1st level page table
.word 0xfffff000, 930b @ second level page table
.word 0xfffffc00, 930b @ second level large page table
.word 0x00000000, \err @ invalid
.word 0xffc00000, 980f @ super page
920: @ 2nd level page table
.word 0xfffff000, 980f @ page
.word 0xffffc000, 980f @ middle page
.word 0xffff0000, 980f @ large page
.word 0x00000000, \err @ invalid
980:
andn \tbl, \va, \msk
and \pa, \pa, \msk
or \pa, \pa, \tbl
990:
.endm
#endif
.macro dcacheline_flush, addr, t1, t2
mov \t1, \addr << #20
ldw \t2, =_stext @ _stext must ALIGN(4096)
add \t2, \t2, \t1 >> #20
ldw \t1, [\t2+], #0x0000
ldw \t1, [\t2+], #0x1000
ldw \t1, [\t2+], #0x2000
ldw \t1, [\t2+], #0x3000
.endm

View File

@ -0,0 +1,23 @@
/*
* linux/arch/unicore32/mm/proc-syms.c
*
* Code specific to PKUnity SoC and UniCore ISA
*
* Copyright (C) 2001-2010 GUAN Xue-tao
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/mm.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/page.h>
EXPORT_SYMBOL(cpu_dcache_clean_area);
EXPORT_SYMBOL(cpu_set_pte);
EXPORT_SYMBOL(__cpuc_dma_flush_range);
EXPORT_SYMBOL(__cpuc_dma_clean_range);

View File

@ -0,0 +1,134 @@
/*
* linux/arch/unicore32/mm/proc-ucv2.S
*
* Code specific to PKUnity SoC and UniCore ISA
*
* Copyright (C) 2001-2010 GUAN Xue-tao
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
#include "proc-macros.S"
ENTRY(cpu_proc_fin)
stm.w (lr), [sp-]
mov ip, #PSR_R_BIT | PSR_I_BIT | PRIV_MODE
mov.a asr, ip
b.l __cpuc_flush_kern_all
ldm.w (pc), [sp]+
/*
* cpu_reset(loc)
*
* Perform a soft reset of the system. Put the CPU into the
* same state as it would be if it had been reset, and branch
* to what would be the reset vector.
*
* - loc - location to jump to for soft reset
*/
.align 5
ENTRY(cpu_reset)
mov ip, #0
movc p0.c5, ip, #28 @ Cache invalidate all
nop8
movc p0.c6, ip, #6 @ TLB invalidate all
nop8
movc ip, p0.c1, #0 @ ctrl register
or ip, ip, #0x2000 @ vector base address
andn ip, ip, #0x000f @ ............idam
movc p0.c1, ip, #0 @ disable caches and mmu
nop
mov pc, r0 @ jump to loc
nop8
/*
* cpu_do_idle()
*
* Idle the processor (eg, wait for interrupt).
*
* IRQs are already disabled.
*/
ENTRY(cpu_do_idle)
mov r0, #0 @ PCI address
.rept 8
ldw r1, [r0]
.endr
mov pc, lr
ENTRY(cpu_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
csub.a r1, #MAX_AREA_SIZE
bsg 101f
mov r9, #PAGE_SZ
sub r9, r9, #1 @ PAGE_MASK
1: va2pa r0, r10, r11, r12, r13 @ r10 is PA
b 3f
2: cand.a r0, r9
beq 1b
3: movc p0.c5, r10, #11 @ clean D entry
nop8
add r0, r0, #CACHE_LINESIZE
add r10, r10, #CACHE_LINESIZE
sub.a r1, r1, #CACHE_LINESIZE
bua 2b
mov pc, lr
#endif
101: mov ip, #0
movc p0.c5, ip, #10 @ Dcache clean all
nop8
mov pc, lr
/*
* cpu_do_switch_mm(pgd_phys)
*
* Set the translation table base pointer to be pgd_phys
*
* - pgd_phys - physical address of new pgd
*
* It is assumed that:
* - we are not using split page tables
*/
.align 5
ENTRY(cpu_do_switch_mm)
movc p0.c2, r0, #0 @ update page table ptr
nop8
movc p0.c6, ip, #6 @ TLB invalidate all
nop8
mov pc, lr
/*
* cpu_set_pte(ptep, pte)
*
* Set a level 2 translation table entry.
*
* - ptep - pointer to level 2 translation table entry
* - pte - PTE value to store
*/
.align 5
ENTRY(cpu_set_pte)
stw r1, [r0]
#ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
sub r2, r0, #PAGE_OFFSET
movc p0.c5, r2, #11 @ Dcache clean line
nop8
#else
mov ip, #0
movc p0.c5, ip, #10 @ Dcache clean all
nop8
@dcacheline_flush r0, r2, ip
#endif
mov pc, lr

View File

@ -0,0 +1,89 @@
/*
* linux/arch/unicore32/mm/tlb-ucv2.S
*
* Code specific to PKUnity SoC and UniCore ISA
*
* Copyright (C) 2001-2010 GUAN Xue-tao
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/page.h>
#include <asm/tlbflush.h>
#include "proc-macros.S"
/*
* __cpu_flush_user_tlb_range(start, end, vma)
*
* Invalidate a range of TLB entries in the specified address space.
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
* - vma - vma_struct describing address range
*/
ENTRY(__cpu_flush_user_tlb_range)
#ifndef CONFIG_CPU_TLB_SINGLE_ENTRY_DISABLE
mov r0, r0 >> #PAGE_SHIFT @ align address
mov r0, r0 << #PAGE_SHIFT
vma_vm_flags r2, r2 @ get vma->vm_flags
1:
movc p0.c6, r0, #3
nop8
cand.a r2, #VM_EXEC @ Executable area ?
beq 2f
movc p0.c6, r0, #5
nop8
2:
add r0, r0, #PAGE_SZ
csub.a r0, r1
beb 1b
#else
movc p0.c6, r0, #2
nop8
cand.a r2, #VM_EXEC @ Executable area ?
beq 2f
movc p0.c6, r0, #4
nop8
2:
#endif
mov pc, lr
/*
* __cpu_flush_kern_tlb_range(start,end)
*
* Invalidate a range of kernel TLB entries
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
*/
ENTRY(__cpu_flush_kern_tlb_range)
#ifndef CONFIG_CPU_TLB_SINGLE_ENTRY_DISABLE
mov r0, r0 >> #PAGE_SHIFT @ align address
mov r0, r0 << #PAGE_SHIFT
1:
movc p0.c6, r0, #3
nop8
movc p0.c6, r0, #5
nop8
add r0, r0, #PAGE_SZ
csub.a r0, r1
beb 1b
#else
movc p0.c6, r0, #2
nop8
movc p0.c6, r0, #4
nop8
#endif
mov pc, lr