M7350v1_en_gpl

This commit is contained in:
T
2024-09-09 08:52:07 +00:00
commit f9cc65cfda
65988 changed files with 26357421 additions and 0 deletions

View File

@ -0,0 +1,44 @@
include include/asm-generic/Kbuild.asm
header-y += ../arch/
header-y += ucontext.h
header-y += hardwall.h
generic-y += bug.h
generic-y += bugs.h
generic-y += cputime.h
generic-y += device.h
generic-y += div64.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += fb.h
generic-y += fcntl.h
generic-y += ioctl.h
generic-y += ioctls.h
generic-y += ipc.h
generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += kdebug.h
generic-y += local.h
generic-y += module.h
generic-y += msgbuf.h
generic-y += mutex.h
generic-y += param.h
generic-y += parport.h
generic-y += poll.h
generic-y += posix_types.h
generic-y += resource.h
generic-y += scatterlist.h
generic-y += sembuf.h
generic-y += serial.h
generic-y += shmbuf.h
generic-y += shmparam.h
generic-y += socket.h
generic-y += sockios.h
generic-y += statfs.h
generic-y += termbits.h
generic-y += termios.h
generic-y += types.h
generic-y += ucontext.h
generic-y += xor.h

View File

@ -0,0 +1 @@
#include <generated/asm-offsets.h>

View File

@ -0,0 +1,134 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*
* Atomic primitives.
*/
#ifndef _ASM_TILE_ATOMIC_H
#define _ASM_TILE_ATOMIC_H
#include <asm/cmpxchg.h>
#ifndef __ASSEMBLY__
#include <linux/compiler.h>
#include <linux/types.h>
#define ATOMIC_INIT(i) { (i) }
/**
* atomic_read - read atomic variable
* @v: pointer of type atomic_t
*
* Atomically reads the value of @v.
*/
static inline int atomic_read(const atomic_t *v)
{
return ACCESS_ONCE(v->counter);
}
/**
* atomic_sub_return - subtract integer and return
* @v: pointer of type atomic_t
* @i: integer value to subtract
*
* Atomically subtracts @i from @v and returns @v - @i
*/
#define atomic_sub_return(i, v) atomic_add_return((int)(-(i)), (v))
/**
* atomic_sub - subtract integer from atomic variable
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
* Atomically subtracts @i from @v.
*/
#define atomic_sub(i, v) atomic_add((int)(-(i)), (v))
/**
* atomic_sub_and_test - subtract value from variable and test result
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
* Atomically subtracts @i from @v and returns true if the result is
* zero, or false for all other cases.
*/
#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
/**
* atomic_inc_return - increment memory and return
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1 and returns the new value.
*/
#define atomic_inc_return(v) atomic_add_return(1, (v))
/**
* atomic_dec_return - decrement memory and return
* @v: pointer of type atomic_t
*
* Atomically decrements @v by 1 and returns the new value.
*/
#define atomic_dec_return(v) atomic_sub_return(1, (v))
/**
* atomic_inc - increment atomic variable
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1.
*/
#define atomic_inc(v) atomic_add(1, (v))
/**
* atomic_dec - decrement atomic variable
* @v: pointer of type atomic_t
*
* Atomically decrements @v by 1.
*/
#define atomic_dec(v) atomic_sub(1, (v))
/**
* atomic_dec_and_test - decrement and test
* @v: pointer of type atomic_t
*
* Atomically decrements @v by 1 and returns true if the result is 0.
*/
#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
/**
* atomic_inc_and_test - increment and test
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1 and returns true if the result is 0.
*/
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
/**
* atomic_add_negative - add and test if negative
* @v: pointer of type atomic_t
* @i: integer value to add
*
* Atomically adds @i to @v and returns true if the result is
* negative, or false when result is greater than or equal to zero.
*/
#define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0)
#endif /* __ASSEMBLY__ */
#ifndef __tilegx__
#include <asm/atomic_32.h>
#else
#include <asm/atomic_64.h>
#endif
#endif /* _ASM_TILE_ATOMIC_H */

View File

@ -0,0 +1,324 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*
* Do not include directly; use <linux/atomic.h>.
*/
#ifndef _ASM_TILE_ATOMIC_32_H
#define _ASM_TILE_ATOMIC_32_H
#include <asm/barrier.h>
#include <arch/chip.h>
#ifndef __ASSEMBLY__
/* Tile-specific routines to support <linux/atomic.h>. */
int _atomic_xchg(atomic_t *v, int n);
int _atomic_xchg_add(atomic_t *v, int i);
int _atomic_xchg_add_unless(atomic_t *v, int a, int u);
int _atomic_cmpxchg(atomic_t *v, int o, int n);
/**
* atomic_xchg - atomically exchange contents of memory with a new value
* @v: pointer of type atomic_t
* @i: integer value to store in memory
*
* Atomically sets @v to @i and returns old @v
*/
static inline int atomic_xchg(atomic_t *v, int n)
{
smp_mb(); /* barrier for proper semantics */
return _atomic_xchg(v, n);
}
/**
* atomic_cmpxchg - atomically exchange contents of memory if it matches
* @v: pointer of type atomic_t
* @o: old value that memory should have
* @n: new value to write to memory if it matches
*
* Atomically checks if @v holds @o and replaces it with @n if so.
* Returns the old value at @v.
*/
static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
{
smp_mb(); /* barrier for proper semantics */
return _atomic_cmpxchg(v, o, n);
}
/**
* atomic_add - add integer to atomic variable
* @i: integer value to add
* @v: pointer of type atomic_t
*
* Atomically adds @i to @v.
*/
static inline void atomic_add(int i, atomic_t *v)
{
_atomic_xchg_add(v, i);
}
/**
* atomic_add_return - add integer and return
* @v: pointer of type atomic_t
* @i: integer value to add
*
* Atomically adds @i to @v and returns @i + @v
*/
static inline int atomic_add_return(int i, atomic_t *v)
{
smp_mb(); /* barrier for proper semantics */
return _atomic_xchg_add(v, i) + i;
}
/**
* __atomic_add_unless - add unless the number is already a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as @v was not already @u.
* Returns the old value of @v.
*/
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
smp_mb(); /* barrier for proper semantics */
return _atomic_xchg_add_unless(v, a, u);
}
/**
* atomic_set - set atomic variable
* @v: pointer of type atomic_t
* @i: required value
*
* Atomically sets the value of @v to @i.
*
* atomic_set() can't be just a raw store, since it would be lost if it
* fell between the load and store of one of the other atomic ops.
*/
static inline void atomic_set(atomic_t *v, int n)
{
_atomic_xchg(v, n);
}
/* A 64bit atomic type */
typedef struct {
u64 __aligned(8) counter;
} atomic64_t;
#define ATOMIC64_INIT(val) { (val) }
u64 _atomic64_xchg(atomic64_t *v, u64 n);
u64 _atomic64_xchg_add(atomic64_t *v, u64 i);
u64 _atomic64_xchg_add_unless(atomic64_t *v, u64 a, u64 u);
u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n);
/**
* atomic64_read - read atomic variable
* @v: pointer of type atomic64_t
*
* Atomically reads the value of @v.
*/
static inline u64 atomic64_read(const atomic64_t *v)
{
/*
* Requires an atomic op to read both 32-bit parts consistently.
* Casting away const is safe since the atomic support routines
* do not write to memory if the value has not been modified.
*/
return _atomic64_xchg_add((atomic64_t *)v, 0);
}
/**
* atomic64_xchg - atomically exchange contents of memory with a new value
* @v: pointer of type atomic64_t
* @i: integer value to store in memory
*
* Atomically sets @v to @i and returns old @v
*/
static inline u64 atomic64_xchg(atomic64_t *v, u64 n)
{
smp_mb(); /* barrier for proper semantics */
return _atomic64_xchg(v, n);
}
/**
* atomic64_cmpxchg - atomically exchange contents of memory if it matches
* @v: pointer of type atomic64_t
* @o: old value that memory should have
* @n: new value to write to memory if it matches
*
* Atomically checks if @v holds @o and replaces it with @n if so.
* Returns the old value at @v.
*/
static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
{
smp_mb(); /* barrier for proper semantics */
return _atomic64_cmpxchg(v, o, n);
}
/**
* atomic64_add - add integer to atomic variable
* @i: integer value to add
* @v: pointer of type atomic64_t
*
* Atomically adds @i to @v.
*/
static inline void atomic64_add(u64 i, atomic64_t *v)
{
_atomic64_xchg_add(v, i);
}
/**
* atomic64_add_return - add integer and return
* @v: pointer of type atomic64_t
* @i: integer value to add
*
* Atomically adds @i to @v and returns @i + @v
*/
static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
{
smp_mb(); /* barrier for proper semantics */
return _atomic64_xchg_add(v, i) + i;
}
/**
* atomic64_add_unless - add unless the number is already a given value
* @v: pointer of type atomic64_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as @v was not already @u.
* Returns non-zero if @v was not @u, and zero otherwise.
*/
static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
{
smp_mb(); /* barrier for proper semantics */
return _atomic64_xchg_add_unless(v, a, u) != u;
}
/**
* atomic64_set - set atomic variable
* @v: pointer of type atomic64_t
* @i: required value
*
* Atomically sets the value of @v to @i.
*
* atomic64_set() can't be just a raw store, since it would be lost if it
* fell between the load and store of one of the other atomic ops.
*/
static inline void atomic64_set(atomic64_t *v, u64 n)
{
_atomic64_xchg(v, n);
}
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
#define atomic64_inc(v) atomic64_add(1LL, (v))
#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
#define atomic64_sub(i, v) atomic64_add(-(i), (v))
#define atomic64_dec(v) atomic64_sub(1LL, (v))
#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
/*
* We need to barrier before modifying the word, since the _atomic_xxx()
* routines just tns the lock and then read/modify/write of the word.
* But after the word is updated, the routine issues an "mf" before returning,
* and since it's a function call, we don't even need a compiler barrier.
*/
#define smp_mb__before_atomic_dec() smp_mb()
#define smp_mb__before_atomic_inc() smp_mb()
#define smp_mb__after_atomic_dec() do { } while (0)
#define smp_mb__after_atomic_inc() do { } while (0)
#endif /* !__ASSEMBLY__ */
/*
* Internal definitions only beyond this point.
*/
#define ATOMIC_LOCKS_FOUND_VIA_TABLE() \
(!CHIP_HAS_CBOX_HOME_MAP() && defined(CONFIG_SMP))
#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
/* Number of entries in atomic_lock_ptr[]. */
#define ATOMIC_HASH_L1_SHIFT 6
#define ATOMIC_HASH_L1_SIZE (1 << ATOMIC_HASH_L1_SHIFT)
/* Number of locks in each struct pointed to by atomic_lock_ptr[]. */
#define ATOMIC_HASH_L2_SHIFT (CHIP_L2_LOG_LINE_SIZE() - 2)
#define ATOMIC_HASH_L2_SIZE (1 << ATOMIC_HASH_L2_SHIFT)
#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
/*
* Number of atomic locks in atomic_locks[]. Must be a power of two.
* There is no reason for more than PAGE_SIZE / 8 entries, since that
* is the maximum number of pointer bits we can use to index this.
* And we cannot have more than PAGE_SIZE / 4, since this has to
* fit on a single page and each entry takes 4 bytes.
*/
#define ATOMIC_HASH_SHIFT (PAGE_SHIFT - 3)
#define ATOMIC_HASH_SIZE (1 << ATOMIC_HASH_SHIFT)
#ifndef __ASSEMBLY__
extern int atomic_locks[];
#endif
#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
/*
* All the code that may fault while holding an atomic lock must
* place the pointer to the lock in ATOMIC_LOCK_REG so the fault code
* can correctly release and reacquire the lock. Note that we
* mention the register number in a comment in "lib/atomic_asm.S" to help
* assembly coders from using this register by mistake, so if it
* is changed here, change that comment as well.
*/
#define ATOMIC_LOCK_REG 20
#define ATOMIC_LOCK_REG_NAME r20
#ifndef __ASSEMBLY__
/* Called from setup to initialize a hash table to point to per_cpu locks. */
void __init_atomic_per_cpu(void);
#ifdef CONFIG_SMP
/* Support releasing the atomic lock in do_page_fault_ics(). */
void __atomic_fault_unlock(int *lock_ptr);
#endif
/* Private helper routines in lib/atomic_asm_32.S */
extern struct __get_user __atomic_cmpxchg(volatile int *p,
int *lock, int o, int n);
extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n);
extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n);
extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
int *lock, int o, int n);
extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n);
extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n);
extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n);
extern u64 __atomic64_xchg_add_unless(volatile u64 *p,
int *lock, u64 o, u64 n);
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_TILE_ATOMIC_32_H */

View File

@ -0,0 +1,157 @@
/*
* Copyright 2011 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*
* Do not include directly; use <linux/atomic.h>.
*/
#ifndef _ASM_TILE_ATOMIC_64_H
#define _ASM_TILE_ATOMIC_64_H
#ifndef __ASSEMBLY__
#include <asm/barrier.h>
#include <arch/spr_def.h>
/* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */
#define atomic_set(v, i) ((v)->counter = (i))
/*
* The smp_mb() operations throughout are to support the fact that
* Linux requires memory barriers before and after the operation,
* on any routine which updates memory and returns a value.
*/
static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
{
int val;
__insn_mtspr(SPR_CMPEXCH_VALUE, o);
smp_mb(); /* barrier for proper semantics */
val = __insn_cmpexch4((void *)&v->counter, n);
smp_mb(); /* barrier for proper semantics */
return val;
}
static inline int atomic_xchg(atomic_t *v, int n)
{
int val;
smp_mb(); /* barrier for proper semantics */
val = __insn_exch4((void *)&v->counter, n);
smp_mb(); /* barrier for proper semantics */
return val;
}
static inline void atomic_add(int i, atomic_t *v)
{
__insn_fetchadd4((void *)&v->counter, i);
}
static inline int atomic_add_return(int i, atomic_t *v)
{
int val;
smp_mb(); /* barrier for proper semantics */
val = __insn_fetchadd4((void *)&v->counter, i) + i;
barrier(); /* the "+ i" above will wait on memory */
return val;
}
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int guess, oldval = v->counter;
do {
if (oldval == u)
break;
guess = oldval;
oldval = atomic_cmpxchg(v, guess, guess + a);
} while (guess != oldval);
return oldval;
}
/* Now the true 64-bit operations. */
#define ATOMIC64_INIT(i) { (i) }
#define atomic64_read(v) ((v)->counter)
#define atomic64_set(v, i) ((v)->counter = (i))
static inline long atomic64_cmpxchg(atomic64_t *v, long o, long n)
{
long val;
smp_mb(); /* barrier for proper semantics */
__insn_mtspr(SPR_CMPEXCH_VALUE, o);
val = __insn_cmpexch((void *)&v->counter, n);
smp_mb(); /* barrier for proper semantics */
return val;
}
static inline long atomic64_xchg(atomic64_t *v, long n)
{
long val;
smp_mb(); /* barrier for proper semantics */
val = __insn_exch((void *)&v->counter, n);
smp_mb(); /* barrier for proper semantics */
return val;
}
static inline void atomic64_add(long i, atomic64_t *v)
{
__insn_fetchadd((void *)&v->counter, i);
}
static inline long atomic64_add_return(long i, atomic64_t *v)
{
int val;
smp_mb(); /* barrier for proper semantics */
val = __insn_fetchadd((void *)&v->counter, i) + i;
barrier(); /* the "+ i" above will wait on memory */
return val;
}
static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
{
long guess, oldval = v->counter;
do {
if (oldval == u)
break;
guess = oldval;
oldval = atomic64_cmpxchg(v, guess, guess + a);
} while (guess != oldval);
return oldval != u;
}
#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
#define atomic64_sub(i, v) atomic64_add(-(i), (v))
#define atomic64_inc_return(v) atomic64_add_return(1, (v))
#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
#define atomic64_inc(v) atomic64_add(1, (v))
#define atomic64_dec(v) atomic64_sub(1, (v))
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
#define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
/* Atomic dec and inc don't implement barrier, so provide them if needed. */
#define smp_mb__before_atomic_dec() smp_mb()
#define smp_mb__after_atomic_dec() smp_mb()
#define smp_mb__before_atomic_inc() smp_mb()
#define smp_mb__after_atomic_inc() smp_mb()
/* Define this to indicate that cmpxchg is an efficient operation. */
#define __HAVE_ARCH_CMPXCHG
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_TILE_ATOMIC_64_H */

View File

@ -0,0 +1,20 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_AUXVEC_H
#define _ASM_TILE_AUXVEC_H
/* No extensions to auxvec */
#endif /* _ASM_TILE_AUXVEC_H */

View File

@ -0,0 +1,162 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_BACKTRACE_H
#define _ASM_TILE_BACKTRACE_H
#include <linux/types.h>
/* Reads 'size' bytes from 'address' and writes the data to 'result'.
* Returns true if successful, else false (e.g. memory not readable).
*/
typedef bool (*BacktraceMemoryReader)(void *result,
unsigned long address,
unsigned int size,
void *extra);
typedef struct {
/* Current PC. */
unsigned long pc;
/* Current stack pointer value. */
unsigned long sp;
/* Current frame pointer value (i.e. caller's stack pointer) */
unsigned long fp;
/* Internal use only: caller's PC for first frame. */
unsigned long initial_frame_caller_pc;
/* Internal use only: callback to read memory. */
BacktraceMemoryReader read_memory_func;
/* Internal use only: arbitrary argument to read_memory_func. */
void *read_memory_func_extra;
} BacktraceIterator;
typedef enum {
/* We have no idea what the caller's pc is. */
PC_LOC_UNKNOWN,
/* The caller's pc is currently in lr. */
PC_LOC_IN_LR,
/* The caller's pc can be found by dereferencing the caller's sp. */
PC_LOC_ON_STACK
} CallerPCLocation;
typedef enum {
/* We have no idea what the caller's sp is. */
SP_LOC_UNKNOWN,
/* The caller's sp is currently in r52. */
SP_LOC_IN_R52,
/* The caller's sp can be found by adding a certain constant
* to the current value of sp.
*/
SP_LOC_OFFSET
} CallerSPLocation;
/* Bit values ORed into CALLER_* values for info ops. */
enum {
/* Setting the low bit on any of these values means the info op
* applies only to one bundle ago.
*/
ONE_BUNDLE_AGO_FLAG = 1,
/* Setting this bit on a CALLER_SP_* value means the PC is in LR.
* If not set, PC is on the stack.
*/
PC_IN_LR_FLAG = 2,
/* This many of the low bits of a CALLER_SP_* value are for the
* flag bits above.
*/
NUM_INFO_OP_FLAGS = 2,
/* We cannot have one in the memory pipe so this is the maximum. */
MAX_INFO_OPS_PER_BUNDLE = 2
};
/* Internal constants used to define 'info' operands. */
enum {
/* 0 and 1 are reserved, as are all negative numbers. */
CALLER_UNKNOWN_BASE = 2,
CALLER_SP_IN_R52_BASE = 4,
CALLER_SP_OFFSET_BASE = 8,
};
/* Current backtracer state describing where it thinks the caller is. */
typedef struct {
/*
* Public fields
*/
/* How do we find the caller's PC? */
CallerPCLocation pc_location : 8;
/* How do we find the caller's SP? */
CallerSPLocation sp_location : 8;
/* If sp_location == SP_LOC_OFFSET, then caller_sp == sp +
* loc->sp_offset. Else this field is undefined.
*/
uint16_t sp_offset;
/* In the most recently visited bundle a terminating bundle? */
bool at_terminating_bundle;
/*
* Private fields
*/
/* Will the forward scanner see someone clobbering sp
* (i.e. changing it with something other than addi sp, sp, N?)
*/
bool sp_clobber_follows;
/* Operand to next "visible" info op (no more than one bundle past
* the next terminating bundle), or -32768 if none.
*/
int16_t next_info_operand;
/* Is the info of in next_info_op in the very next bundle? */
bool is_next_info_operand_adjacent;
} CallerLocation;
extern void backtrace_init(BacktraceIterator *state,
BacktraceMemoryReader read_memory_func,
void *read_memory_func_extra,
unsigned long pc, unsigned long lr,
unsigned long sp, unsigned long r52);
extern bool backtrace_next(BacktraceIterator *state);
#endif /* _ASM_TILE_BACKTRACE_H */

View File

@ -0,0 +1,148 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_BARRIER_H
#define _ASM_TILE_BARRIER_H
#ifndef __ASSEMBLY__
#include <linux/types.h>
#include <arch/chip.h>
#include <arch/spr_def.h>
#include <asm/timex.h>
/*
* read_barrier_depends - Flush all pending reads that subsequents reads
* depend on.
*
* No data-dependent reads from memory-like regions are ever reordered
* over this barrier. All reads preceding this primitive are guaranteed
* to access memory (but not necessarily other CPUs' caches) before any
* reads following this primitive that depend on the data return by
* any of the preceding reads. This primitive is much lighter weight than
* rmb() on most CPUs, and is never heavier weight than is
* rmb().
*
* These ordering constraints are respected by both the local CPU
* and the compiler.
*
* Ordering is not guaranteed by anything other than these primitives,
* not even by data dependencies. See the documentation for
* memory_barrier() for examples and URLs to more information.
*
* For example, the following code would force ordering (the initial
* value of "a" is zero, "b" is one, and "p" is "&a"):
*
* <programlisting>
* CPU 0 CPU 1
*
* b = 2;
* memory_barrier();
* p = &b; q = p;
* read_barrier_depends();
* d = *q;
* </programlisting>
*
* because the read of "*q" depends on the read of "p" and these
* two reads are separated by a read_barrier_depends(). However,
* the following code, with the same initial values for "a" and "b":
*
* <programlisting>
* CPU 0 CPU 1
*
* a = 2;
* memory_barrier();
* b = 3; y = b;
* read_barrier_depends();
* x = a;
* </programlisting>
*
* does not enforce ordering, since there is no data dependency between
* the read of "a" and the read of "b". Therefore, on some CPUs, such
* as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
* in cases like this where there are no data dependencies.
*/
#define read_barrier_depends() do { } while (0)
#define __sync() __insn_mf()
#if !CHIP_HAS_MF_WAITS_FOR_VICTIMS()
#include <hv/syscall_public.h>
/*
* Issue an uncacheable load to each memory controller, then
* wait until those loads have completed.
*/
static inline void __mb_incoherent(void)
{
long clobber_r10;
asm volatile("swint2"
: "=R10" (clobber_r10)
: "R10" (HV_SYS_fence_incoherent)
: "r0", "r1", "r2", "r3", "r4",
"r5", "r6", "r7", "r8", "r9",
"r11", "r12", "r13", "r14",
"r15", "r16", "r17", "r18", "r19",
"r20", "r21", "r22", "r23", "r24",
"r25", "r26", "r27", "r28", "r29");
}
#endif
/* Fence to guarantee visibility of stores to incoherent memory. */
static inline void
mb_incoherent(void)
{
__insn_mf();
#if !CHIP_HAS_MF_WAITS_FOR_VICTIMS()
{
#if CHIP_HAS_TILE_WRITE_PENDING()
const unsigned long WRITE_TIMEOUT_CYCLES = 400;
unsigned long start = get_cycles_low();
do {
if (__insn_mfspr(SPR_TILE_WRITE_PENDING) == 0)
return;
} while ((get_cycles_low() - start) < WRITE_TIMEOUT_CYCLES);
#endif /* CHIP_HAS_TILE_WRITE_PENDING() */
(void) __mb_incoherent();
}
#endif /* CHIP_HAS_MF_WAITS_FOR_VICTIMS() */
}
#define fast_wmb() __sync()
#define fast_rmb() __sync()
#define fast_mb() __sync()
#define fast_iob() mb_incoherent()
#define wmb() fast_wmb()
#define rmb() fast_rmb()
#define mb() fast_mb()
#define iob() fast_iob()
#ifdef CONFIG_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
#define smp_read_barrier_depends() read_barrier_depends()
#else
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while (0)
#endif
#define set_mb(var, value) \
do { var = value; mb(); } while (0)
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_TILE_BARRIER_H */

View File

@ -0,0 +1,128 @@
/*
* Copyright 1992, Linus Torvalds.
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_BITOPS_H
#define _ASM_TILE_BITOPS_H
#include <linux/types.h>
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
#ifdef __tilegx__
#include <asm/bitops_64.h>
#else
#include <asm/bitops_32.h>
#endif
/**
* __ffs - find first set bit in word
* @word: The word to search
*
* Undefined if no set bit exists, so code should check against 0 first.
*/
static inline unsigned long __ffs(unsigned long word)
{
return __builtin_ctzl(word);
}
/**
* ffz - find first zero bit in word
* @word: The word to search
*
* Undefined if no zero exists, so code should check against ~0UL first.
*/
static inline unsigned long ffz(unsigned long word)
{
return __builtin_ctzl(~word);
}
/**
* __fls - find last set bit in word
* @word: The word to search
*
* Undefined if no set bit exists, so code should check against 0 first.
*/
static inline unsigned long __fls(unsigned long word)
{
return (sizeof(word) * 8) - 1 - __builtin_clzl(word);
}
/**
* ffs - find first set bit in word
* @x: the word to search
*
* This is defined the same way as the libc and compiler builtin ffs
* routines, therefore differs in spirit from the other bitops.
*
* ffs(value) returns 0 if value is 0 or the position of the first
* set bit if value is nonzero. The first (least significant) bit
* is at position 1.
*/
static inline int ffs(int x)
{
return __builtin_ffs(x);
}
/**
* fls - find last set bit in word
* @x: the word to search
*
* This is defined in a similar way as the libc and compiler builtin
* ffs, but returns the position of the most significant set bit.
*
* fls(value) returns 0 if value is 0 or the position of the last
* set bit if value is nonzero. The last (most significant) bit is
* at position 32.
*/
static inline int fls(int x)
{
return (sizeof(int) * 8) - __builtin_clz(x);
}
static inline int fls64(__u64 w)
{
return (sizeof(__u64) * 8) - __builtin_clzll(w);
}
static inline unsigned int __arch_hweight32(unsigned int w)
{
return __builtin_popcount(w);
}
static inline unsigned int __arch_hweight16(unsigned int w)
{
return __builtin_popcount(w & 0xffff);
}
static inline unsigned int __arch_hweight8(unsigned int w)
{
return __builtin_popcount(w & 0xff);
}
static inline unsigned long __arch_hweight64(__u64 w)
{
return __builtin_popcountll(w);
}
#include <asm-generic/bitops/const_hweight.h>
#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/non-atomic.h>
#include <asm-generic/bitops/le.h>
#endif /* _ASM_TILE_BITOPS_H */

View File

@ -0,0 +1,130 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_BITOPS_32_H
#define _ASM_TILE_BITOPS_32_H
#include <linux/compiler.h>
#include <linux/atomic.h>
/* Tile-specific routines to support <asm/bitops.h>. */
unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask);
unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask);
unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask);
/**
* set_bit - Atomically set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
* This function is atomic and may not be reordered.
* See __set_bit() if you do not require the atomic guarantees.
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
static inline void set_bit(unsigned nr, volatile unsigned long *addr)
{
_atomic_or(addr + BIT_WORD(nr), BIT_MASK(nr));
}
/**
* clear_bit - Clears a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
*
* clear_bit() is atomic and may not be reordered.
* See __clear_bit() if you do not require the atomic guarantees.
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*
* clear_bit() may not contain a memory barrier, so if it is used for
* locking purposes, you should call smp_mb__before_clear_bit() and/or
* smp_mb__after_clear_bit() to ensure changes are visible on other cpus.
*/
static inline void clear_bit(unsigned nr, volatile unsigned long *addr)
{
_atomic_andn(addr + BIT_WORD(nr), BIT_MASK(nr));
}
/**
* change_bit - Toggle a bit in memory
* @nr: Bit to change
* @addr: Address to start counting from
*
* change_bit() is atomic and may not be reordered.
* See __change_bit() if you do not require the atomic guarantees.
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
static inline void change_bit(unsigned nr, volatile unsigned long *addr)
{
_atomic_xor(addr + BIT_WORD(nr), BIT_MASK(nr));
}
/**
* test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
addr += BIT_WORD(nr);
smp_mb(); /* barrier for proper semantics */
return (_atomic_or(addr, mask) & mask) != 0;
}
/**
* test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
addr += BIT_WORD(nr);
smp_mb(); /* barrier for proper semantics */
return (_atomic_andn(addr, mask) & mask) != 0;
}
/**
* test_and_change_bit - Change a bit and return its old value
* @nr: Bit to change
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static inline int test_and_change_bit(unsigned nr,
volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
addr += BIT_WORD(nr);
smp_mb(); /* barrier for proper semantics */
return (_atomic_xor(addr, mask) & mask) != 0;
}
/* See discussion at smp_mb__before_atomic_dec() in <asm/atomic_32.h>. */
#define smp_mb__before_clear_bit() smp_mb()
#define smp_mb__after_clear_bit() do {} while (0)
#include <asm-generic/bitops/ext2-atomic.h>
#endif /* _ASM_TILE_BITOPS_32_H */

View File

@ -0,0 +1,101 @@
/*
* Copyright 2011 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_BITOPS_64_H
#define _ASM_TILE_BITOPS_64_H
#include <linux/compiler.h>
#include <linux/atomic.h>
/* See <asm/bitops.h> for API comments. */
static inline void set_bit(unsigned nr, volatile unsigned long *addr)
{
unsigned long mask = (1UL << (nr % BITS_PER_LONG));
__insn_fetchor((void *)(addr + nr / BITS_PER_LONG), mask);
}
static inline void clear_bit(unsigned nr, volatile unsigned long *addr)
{
unsigned long mask = (1UL << (nr % BITS_PER_LONG));
__insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask);
}
#define smp_mb__before_clear_bit() smp_mb()
#define smp_mb__after_clear_bit() smp_mb()
static inline void change_bit(unsigned nr, volatile unsigned long *addr)
{
unsigned long mask = (1UL << (nr % BITS_PER_LONG));
unsigned long guess, oldval;
addr += nr / BITS_PER_LONG;
oldval = *addr;
do {
guess = oldval;
oldval = atomic64_cmpxchg((atomic64_t *)addr,
guess, guess ^ mask);
} while (guess != oldval);
}
/*
* The test_and_xxx_bit() routines require a memory fence before we
* start the operation, and after the operation completes. We use
* smp_mb() before, and rely on the "!= 0" comparison, plus a compiler
* barrier(), to block until the atomic op is complete.
*/
static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr)
{
int val;
unsigned long mask = (1UL << (nr % BITS_PER_LONG));
smp_mb(); /* barrier for proper semantics */
val = (__insn_fetchor((void *)(addr + nr / BITS_PER_LONG), mask)
& mask) != 0;
barrier();
return val;
}
static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr)
{
int val;
unsigned long mask = (1UL << (nr % BITS_PER_LONG));
smp_mb(); /* barrier for proper semantics */
val = (__insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask)
& mask) != 0;
barrier();
return val;
}
static inline int test_and_change_bit(unsigned nr,
volatile unsigned long *addr)
{
unsigned long mask = (1UL << (nr % BITS_PER_LONG));
unsigned long guess, oldval;
addr += nr / BITS_PER_LONG;
oldval = *addr;
do {
guess = oldval;
oldval = atomic64_cmpxchg((atomic64_t *)addr,
guess, guess ^ mask);
} while (guess != oldval);
return (oldval & mask) != 0;
}
#include <asm-generic/bitops/ext2-atomic-setbit.h>
#endif /* _ASM_TILE_BITOPS_64_H */

View File

@ -0,0 +1,26 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_BITSPERLONG_H
#define _ASM_TILE_BITSPERLONG_H
#ifdef __LP64__
# define __BITS_PER_LONG 64
#else
# define __BITS_PER_LONG 32
#endif
#include <asm-generic/bitsperlong.h>
#endif /* _ASM_TILE_BITSPERLONG_H */

View File

@ -0,0 +1 @@
#include <linux/byteorder/little_endian.h>

View File

@ -0,0 +1,51 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_CACHE_H
#define _ASM_TILE_CACHE_H
#include <arch/chip.h>
/* bytes per L1 data cache line */
#define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
/* bytes per L2 cache line */
#define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
#define L2_CACHE_BYTES (1 << L2_CACHE_SHIFT)
#define L2_CACHE_ALIGN(x) (((x)+(L2_CACHE_BYTES-1)) & -L2_CACHE_BYTES)
/*
* TILE-Gx is fully coherent so we don't need to define ARCH_DMA_MINALIGN.
*/
#ifndef __tilegx__
#define ARCH_DMA_MINALIGN L2_CACHE_BYTES
#endif
/* use the cache line size for the L2, which is where it counts */
#define SMP_CACHE_BYTES_SHIFT L2_CACHE_SHIFT
#define SMP_CACHE_BYTES L2_CACHE_BYTES
#define INTERNODE_CACHE_SHIFT L2_CACHE_SHIFT
#define INTERNODE_CACHE_BYTES L2_CACHE_BYTES
/* Group together read-mostly things to avoid cache false sharing */
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
/*
* Attribute for data that is kept read/write coherent until the end of
* initialization, then bumped to read/only incoherent for performance.
*/
#define __write_once __attribute__((__section__(".w1data")))
#endif /* _ASM_TILE_CACHE_H */

View File

@ -0,0 +1,164 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_CACHEFLUSH_H
#define _ASM_TILE_CACHEFLUSH_H
#include <arch/chip.h>
/* Keep includes the same across arches. */
#include <linux/mm.h>
#include <linux/cache.h>
#include <arch/icache.h>
/* Caches are physically-indexed and so don't need special treatment */
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
#define flush_icache_page(vma, pg) do { } while (0)
#define flush_icache_user_range(vma, pg, adr, len) do { } while (0)
/* Flush the icache just on this cpu */
extern void __flush_icache_range(unsigned long start, unsigned long end);
/* Flush the entire icache on this cpu. */
#define __flush_icache() __flush_icache_range(0, CHIP_L1I_CACHE_SIZE())
#ifdef CONFIG_SMP
/*
* When the kernel writes to its own text we need to do an SMP
* broadcast to make the L1I coherent everywhere. This includes
* module load and single step.
*/
extern void flush_icache_range(unsigned long start, unsigned long end);
#else
#define flush_icache_range __flush_icache_range
#endif
/*
* An update to an executable user page requires icache flushing.
* We could carefully update only tiles that are running this process,
* and rely on the fact that we flush the icache on every context
* switch to avoid doing extra work here. But for now, I'll be
* conservative and just do a global icache flush.
*/
static inline void copy_to_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr,
void *dst, void *src, int len)
{
memcpy(dst, src, len);
if (vma->vm_flags & VM_EXEC) {
flush_icache_range((unsigned long) dst,
(unsigned long) dst + len);
}
}
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy((dst), (src), (len))
/*
* Invalidate a VA range; pads to L2 cacheline boundaries.
*
* Note that on TILE64, __inv_buffer() actually flushes modified
* cache lines in addition to invalidating them, i.e., it's the
* same as __finv_buffer().
*/
static inline void __inv_buffer(void *buffer, size_t size)
{
char *next = (char *)((long)buffer & -L2_CACHE_BYTES);
char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size);
while (next < finish) {
__insn_inv(next);
next += CHIP_INV_STRIDE();
}
}
/* Flush a VA range; pads to L2 cacheline boundaries. */
static inline void __flush_buffer(void *buffer, size_t size)
{
char *next = (char *)((long)buffer & -L2_CACHE_BYTES);
char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size);
while (next < finish) {
__insn_flush(next);
next += CHIP_FLUSH_STRIDE();
}
}
/* Flush & invalidate a VA range; pads to L2 cacheline boundaries. */
static inline void __finv_buffer(void *buffer, size_t size)
{
char *next = (char *)((long)buffer & -L2_CACHE_BYTES);
char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size);
while (next < finish) {
__insn_finv(next);
next += CHIP_FINV_STRIDE();
}
}
/* Invalidate a VA range and wait for it to be complete. */
static inline void inv_buffer(void *buffer, size_t size)
{
__inv_buffer(buffer, size);
mb();
}
/*
* Flush a locally-homecached VA range and wait for the evicted
* cachelines to hit memory.
*/
static inline void flush_buffer_local(void *buffer, size_t size)
{
__flush_buffer(buffer, size);
mb_incoherent();
}
/*
* Flush and invalidate a locally-homecached VA range and wait for the
* evicted cachelines to hit memory.
*/
static inline void finv_buffer_local(void *buffer, size_t size)
{
__finv_buffer(buffer, size);
mb_incoherent();
}
/*
* Flush and invalidate a VA range that is homed remotely, waiting
* until the memory controller holds the flushed values. If "hfh" is
* true, we will do a more expensive flush involving additional loads
* to make sure we have touched all the possible home cpus of a buffer
* that is homed with "hash for home".
*/
void finv_buffer_remote(void *buffer, size_t size, int hfh);
/*
* On SMP systems, when the scheduler does migration-cost autodetection,
* it needs a way to flush as much of the CPU's caches as possible:
*
* TODO: fill this in!
*/
static inline void sched_cacheflush(void)
{
}
#endif /* _ASM_TILE_CACHEFLUSH_H */

View File

@ -0,0 +1,24 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_CHECKSUM_H
#define _ASM_TILE_CHECKSUM_H
#include <asm-generic/checksum.h>
/* Allow us to provide a more optimized do_csum(). */
__wsum do_csum(const unsigned char *buff, int len);
#define do_csum do_csum
#endif /* _ASM_TILE_CHECKSUM_H */

View File

@ -0,0 +1,73 @@
/*
* cmpxchg.h -- forked from asm/atomic.h with this copyright:
*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*
*/
#ifndef _ASM_TILE_CMPXCHG_H
#define _ASM_TILE_CMPXCHG_H
#ifndef __ASSEMBLY__
/* Nonexistent functions intended to cause link errors. */
extern unsigned long __xchg_called_with_bad_pointer(void);
extern unsigned long __cmpxchg_called_with_bad_pointer(void);
#define xchg(ptr, x) \
({ \
typeof(*(ptr)) __x; \
switch (sizeof(*(ptr))) { \
case 4: \
__x = (typeof(__x))(typeof(__x-__x))atomic_xchg( \
(atomic_t *)(ptr), \
(u32)(typeof((x)-(x)))(x)); \
break; \
case 8: \
__x = (typeof(__x))(typeof(__x-__x))atomic64_xchg( \
(atomic64_t *)(ptr), \
(u64)(typeof((x)-(x)))(x)); \
break; \
default: \
__xchg_called_with_bad_pointer(); \
} \
__x; \
})
#define cmpxchg(ptr, o, n) \
({ \
typeof(*(ptr)) __x; \
switch (sizeof(*(ptr))) { \
case 4: \
__x = (typeof(__x))(typeof(__x-__x))atomic_cmpxchg( \
(atomic_t *)(ptr), \
(u32)(typeof((o)-(o)))(o), \
(u32)(typeof((n)-(n)))(n)); \
break; \
case 8: \
__x = (typeof(__x))(typeof(__x-__x))atomic64_cmpxchg( \
(atomic64_t *)(ptr), \
(u64)(typeof((o)-(o)))(o), \
(u64)(typeof((n)-(n)))(n)); \
break; \
default: \
__cmpxchg_called_with_bad_pointer(); \
} \
__x; \
})
#define tas(ptr) (xchg((ptr), 1))
#endif /* __ASSEMBLY__ */
#endif /* _ASM_TILE_CMPXCHG_H */

View File

@ -0,0 +1,256 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_COMPAT_H
#define _ASM_TILE_COMPAT_H
/*
* Architecture specific compatibility types
*/
#include <linux/types.h>
#include <linux/sched.h>
#define COMPAT_USER_HZ 100
/* "long" and pointer-based types are different. */
typedef s32 compat_long_t;
typedef u32 compat_ulong_t;
typedef u32 compat_size_t;
typedef s32 compat_ssize_t;
typedef s32 compat_off_t;
typedef s32 compat_time_t;
typedef s32 compat_clock_t;
typedef u32 compat_ino_t;
typedef u32 compat_caddr_t;
typedef u32 compat_uptr_t;
/* Many types are "int" or otherwise the same. */
typedef __kernel_pid_t compat_pid_t;
typedef __kernel_uid_t __compat_uid_t;
typedef __kernel_gid_t __compat_gid_t;
typedef __kernel_uid32_t __compat_uid32_t;
typedef __kernel_uid32_t __compat_gid32_t;
typedef __kernel_mode_t compat_mode_t;
typedef __kernel_dev_t compat_dev_t;
typedef __kernel_loff_t compat_loff_t;
typedef __kernel_nlink_t compat_nlink_t;
typedef __kernel_ipc_pid_t compat_ipc_pid_t;
typedef __kernel_daddr_t compat_daddr_t;
typedef __kernel_fsid_t compat_fsid_t;
typedef __kernel_timer_t compat_timer_t;
typedef __kernel_key_t compat_key_t;
typedef int compat_int_t;
typedef s64 compat_s64;
typedef uint compat_uint_t;
typedef u64 compat_u64;
/* We use the same register dump format in 32-bit images. */
typedef unsigned long compat_elf_greg_t;
#define COMPAT_ELF_NGREG (sizeof(struct pt_regs) / sizeof(compat_elf_greg_t))
typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG];
struct compat_timespec {
compat_time_t tv_sec;
s32 tv_nsec;
};
struct compat_timeval {
compat_time_t tv_sec;
s32 tv_usec;
};
#define compat_stat stat
#define compat_statfs statfs
struct compat_sysctl {
unsigned int name;
int nlen;
unsigned int oldval;
unsigned int oldlenp;
unsigned int newval;
unsigned int newlen;
unsigned int __unused[4];
};
struct compat_flock {
short l_type;
short l_whence;
compat_off_t l_start;
compat_off_t l_len;
compat_pid_t l_pid;
};
#define F_GETLK64 12 /* using 'struct flock64' */
#define F_SETLK64 13
#define F_SETLKW64 14
struct compat_flock64 {
short l_type;
short l_whence;
compat_loff_t l_start;
compat_loff_t l_len;
compat_pid_t l_pid;
};
#define COMPAT_RLIM_INFINITY 0xffffffff
#define _COMPAT_NSIG 64
#define _COMPAT_NSIG_BPW 32
typedef u32 compat_sigset_word;
#define COMPAT_OFF_T_MAX 0x7fffffff
#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
struct compat_ipc64_perm {
compat_key_t key;
__compat_uid32_t uid;
__compat_gid32_t gid;
__compat_uid32_t cuid;
__compat_gid32_t cgid;
unsigned short mode;
unsigned short __pad1;
unsigned short seq;
unsigned short __pad2;
compat_ulong_t unused1;
compat_ulong_t unused2;
};
struct compat_semid64_ds {
struct compat_ipc64_perm sem_perm;
compat_time_t sem_otime;
compat_ulong_t __unused1;
compat_time_t sem_ctime;
compat_ulong_t __unused2;
compat_ulong_t sem_nsems;
compat_ulong_t __unused3;
compat_ulong_t __unused4;
};
struct compat_msqid64_ds {
struct compat_ipc64_perm msg_perm;
compat_time_t msg_stime;
compat_ulong_t __unused1;
compat_time_t msg_rtime;
compat_ulong_t __unused2;
compat_time_t msg_ctime;
compat_ulong_t __unused3;
compat_ulong_t msg_cbytes;
compat_ulong_t msg_qnum;
compat_ulong_t msg_qbytes;
compat_pid_t msg_lspid;
compat_pid_t msg_lrpid;
compat_ulong_t __unused4;
compat_ulong_t __unused5;
};
struct compat_shmid64_ds {
struct compat_ipc64_perm shm_perm;
compat_size_t shm_segsz;
compat_time_t shm_atime;
compat_ulong_t __unused1;
compat_time_t shm_dtime;
compat_ulong_t __unused2;
compat_time_t shm_ctime;
compat_ulong_t __unused3;
compat_pid_t shm_cpid;
compat_pid_t shm_lpid;
compat_ulong_t shm_nattch;
compat_ulong_t __unused4;
compat_ulong_t __unused5;
};
/*
* A pointer passed in from user mode. This should not
* be used for syscall parameters, just declare them
* as pointers because the syscall entry code will have
* appropriately converted them already.
*/
static inline void __user *compat_ptr(compat_uptr_t uptr)
{
return (void __user *)(long)(s32)uptr;
}
static inline compat_uptr_t ptr_to_compat(void __user *uptr)
{
return (u32)(unsigned long)uptr;
}
/* Sign-extend when storing a kernel pointer to a user's ptregs. */
static inline unsigned long ptr_to_compat_reg(void __user *uptr)
{
return (long)(int)(long __force)uptr;
}
static inline void __user *arch_compat_alloc_user_space(long len)
{
struct pt_regs *regs = task_pt_regs(current);
return (void __user *)regs->sp - len;
}
static inline int is_compat_task(void)
{
return current_thread_info()->status & TS_COMPAT;
}
extern int compat_setup_rt_frame(int sig, struct k_sigaction *ka,
siginfo_t *info, sigset_t *set,
struct pt_regs *regs);
/* Compat syscalls. */
struct compat_sigaction;
struct compat_siginfo;
struct compat_sigaltstack;
long compat_sys_execve(const char __user *path,
compat_uptr_t __user *argv,
compat_uptr_t __user *envp, struct pt_regs *);
long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act,
struct compat_sigaction __user *oact,
size_t sigsetsize);
long compat_sys_rt_sigqueueinfo(int pid, int sig,
struct compat_siginfo __user *uinfo);
long compat_sys_rt_sigreturn(struct pt_regs *);
long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
struct compat_sigaltstack __user *uoss_ptr,
struct pt_regs *);
long compat_sys_truncate64(char __user *filename, u32 dummy, u32 low, u32 high);
long compat_sys_ftruncate64(unsigned int fd, u32 dummy, u32 low, u32 high);
long compat_sys_pread64(unsigned int fd, char __user *ubuf, size_t count,
u32 dummy, u32 low, u32 high);
long compat_sys_pwrite64(unsigned int fd, char __user *ubuf, size_t count,
u32 dummy, u32 low, u32 high);
long compat_sys_lookup_dcookie(u32 low, u32 high, char __user *buf, size_t len);
long compat_sys_sync_file_range2(int fd, unsigned int flags,
u32 offset_lo, u32 offset_hi,
u32 nbytes_lo, u32 nbytes_hi);
long compat_sys_fallocate(int fd, int mode,
u32 offset_lo, u32 offset_hi,
u32 len_lo, u32 len_hi);
long compat_sys_sched_rr_get_interval(compat_pid_t pid,
struct compat_timespec __user *interval);
/* Tilera Linux syscalls that don't have "compat" versions. */
#define compat_sys_flush_cache sys_flush_cache
/* These are the intvec_64.S trampolines. */
long _compat_sys_execve(const char __user *path,
const compat_uptr_t __user *argv,
const compat_uptr_t __user *envp);
long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
struct compat_sigaltstack __user *uoss_ptr);
long _compat_sys_rt_sigreturn(void);
#endif /* _ASM_TILE_COMPAT_H */

View File

@ -0,0 +1,31 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_CURRENT_H
#define _ASM_TILE_CURRENT_H
#include <linux/thread_info.h>
struct task_struct;
static inline struct task_struct *get_current(void)
{
return current_thread_info()->task;
}
#define current get_current()
/* Return a usable "task_struct" pointer even if the real one is corrupt. */
struct task_struct *validate_current(void);
#endif /* _ASM_TILE_CURRENT_H */

View File

@ -0,0 +1,34 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_DELAY_H
#define _ASM_TILE_DELAY_H
/* Undefined functions to get compile-time errors. */
extern void __bad_udelay(void);
extern void __bad_ndelay(void);
extern void __udelay(unsigned long usecs);
extern void __ndelay(unsigned long nsecs);
extern void __delay(unsigned long loops);
#define udelay(n) (__builtin_constant_p(n) ? \
((n) > 20000 ? __bad_udelay() : __ndelay((n) * 1000)) : \
__udelay(n))
#define ndelay(n) (__builtin_constant_p(n) ? \
((n) > 20000 ? __bad_ndelay() : __ndelay(n)) : \
__ndelay(n))
#endif /* _ASM_TILE_DELAY_H */

View File

@ -0,0 +1,94 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_DMA_MAPPING_H
#define _ASM_TILE_DMA_MAPPING_H
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/cache.h>
#include <linux/io.h>
/*
* Note that on x86 and powerpc, there is a "struct dma_mapping_ops"
* that is used for all the DMA operations. For now, we don't have an
* equivalent on tile, because we only have a single way of doing DMA.
* (Tilera bug 7994 to use dma_mapping_ops.)
*/
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
extern dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction);
extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction);
extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction);
extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
int nhwentries, enum dma_data_direction);
extern dma_addr_t dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction);
extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
size_t size, enum dma_data_direction);
extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction);
extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction);
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag);
void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle);
extern void dma_sync_single_for_cpu(struct device *, dma_addr_t, size_t,
enum dma_data_direction);
extern void dma_sync_single_for_device(struct device *, dma_addr_t,
size_t, enum dma_data_direction);
extern void dma_sync_single_range_for_cpu(struct device *, dma_addr_t,
unsigned long offset, size_t,
enum dma_data_direction);
extern void dma_sync_single_range_for_device(struct device *, dma_addr_t,
unsigned long offset, size_t,
enum dma_data_direction);
extern void dma_cache_sync(struct device *dev, void *vaddr, size_t,
enum dma_data_direction);
static inline int
dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
static inline int
dma_supported(struct device *dev, u64 mask)
{
return 1;
}
static inline int
dma_set_mask(struct device *dev, u64 mask)
{
if (!dev->dma_mask || !dma_supported(dev, mask))
return -EIO;
*dev->dma_mask = mask;
return 0;
}
#endif /* _ASM_TILE_DMA_MAPPING_H */

View File

@ -0,0 +1,25 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_DMA_H
#define _ASM_TILE_DMA_H
#include <asm-generic/dma.h>
/* Needed by drivers/pci/quirks.c */
#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#endif
#endif /* _ASM_TILE_DMA_H */

View File

@ -0,0 +1,29 @@
/*
* Copyright 2011 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_EDAC_H
#define _ASM_TILE_EDAC_H
/* ECC atomic, DMA, SMP and interrupt safe scrub function */
static inline void atomic_scrub(void *va, u32 size)
{
/*
* These is nothing to be done here because CE is
* corrected by the mshim.
*/
return;
}
#endif /* _ASM_TILE_EDAC_H */

View File

@ -0,0 +1,167 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_ELF_H
#define _ASM_TILE_ELF_H
/*
* ELF register definitions.
*/
#include <arch/chip.h>
#include <linux/ptrace.h>
#include <asm/byteorder.h>
#include <asm/page.h>
typedef unsigned long elf_greg_t;
#define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t))
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
#define EM_TILE64 187
#define EM_TILEPRO 188
#define EM_TILEGX 191
/* Provide a nominal data structure. */
#define ELF_NFPREG 0
typedef double elf_fpreg_t;
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#ifdef __tilegx__
#define ELF_CLASS ELFCLASS64
#else
#define ELF_CLASS ELFCLASS32
#endif
#define ELF_DATA ELFDATA2LSB
/*
* There seems to be a bug in how compat_binfmt_elf.c works: it
* #undefs ELF_ARCH, but it is then used in binfmt_elf.c for fill_note_info().
* Hack around this by providing an enum value of ELF_ARCH.
*/
enum { ELF_ARCH = CHIP_ELF_TYPE() };
#define ELF_ARCH ELF_ARCH
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(x) \
((x)->e_ident[EI_CLASS] == ELF_CLASS && \
(x)->e_machine == CHIP_ELF_TYPE())
/* The module loader only handles a few relocation types. */
#ifndef __tilegx__
#define R_TILE_32 1
#define R_TILE_JOFFLONG_X1 15
#define R_TILE_IMM16_X0_LO 25
#define R_TILE_IMM16_X1_LO 26
#define R_TILE_IMM16_X0_HA 29
#define R_TILE_IMM16_X1_HA 30
#else
#define R_TILEGX_64 1
#define R_TILEGX_JUMPOFF_X1 21
#define R_TILEGX_IMM16_X0_HW0 36
#define R_TILEGX_IMM16_X1_HW0 37
#define R_TILEGX_IMM16_X0_HW1 38
#define R_TILEGX_IMM16_X1_HW1 39
#define R_TILEGX_IMM16_X0_HW2_LAST 48
#define R_TILEGX_IMM16_X1_HW2_LAST 49
#endif
/* Use standard page size for core dumps. */
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/*
* This is the location that an ET_DYN program is loaded if exec'ed. Typical
* use of this is to invoke "./ld.so someprog" to test out a new version of
* the loader. We need to make sure that it is out of the way of the program
* that it will "exec", and that there is sufficient room for the brk.
*/
#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
#define ELF_CORE_COPY_REGS(_dest, _regs) \
memcpy((char *) &_dest, (char *) _regs, \
sizeof(struct pt_regs));
/* No additional FP registers to copy. */
#define ELF_CORE_COPY_FPREGS(t, fpu) 0
/*
* This yields a mask that user programs can use to figure out what
* instruction set this CPU supports. This could be done in user space,
* but it's not easy, and we've already done it here.
*/
#define ELF_HWCAP (0)
/*
* This yields a string that ld.so will use to load implementation
* specific libraries for optimization. This is more specific in
* intent than poking at uname or /proc/cpuinfo.
*/
#define ELF_PLATFORM (NULL)
extern void elf_plat_init(struct pt_regs *regs, unsigned long load_addr);
#define ELF_PLAT_INIT(_r, load_addr) elf_plat_init(_r, load_addr)
extern int dump_task_regs(struct task_struct *, elf_gregset_t *);
#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs)
/* Tilera Linux has no personalities currently, so no need to do anything. */
#define SET_PERSONALITY(ex) do { } while (0)
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
/* Support auto-mapping of the user interrupt vectors. */
struct linux_binprm;
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
int executable_stack);
#ifdef CONFIG_COMPAT
#define COMPAT_ELF_PLATFORM "tilegx-m32"
/*
* "Compat" binaries have the same machine type, but 32-bit class,
* since they're not a separate machine type, but just a 32-bit
* variant of the standard 64-bit architecture.
*/
#define compat_elf_check_arch(x) \
((x)->e_ident[EI_CLASS] == ELFCLASS32 && \
(x)->e_machine == CHIP_ELF_TYPE())
#define compat_start_thread(regs, ip, usp) do { \
regs->pc = ptr_to_compat_reg((void *)(ip)); \
regs->sp = ptr_to_compat_reg((void *)(usp)); \
} while (0)
/*
* Use SET_PERSONALITY to indicate compatibility via TS_COMPAT.
*/
#undef SET_PERSONALITY
#define SET_PERSONALITY(ex) \
do { \
current->personality = PER_LINUX; \
current_thread_info()->status &= ~TS_COMPAT; \
} while (0)
#define COMPAT_SET_PERSONALITY(ex) \
do { \
current->personality = PER_LINUX_32BIT; \
current_thread_info()->status |= TS_COMPAT; \
} while (0)
#define COMPAT_ELF_ET_DYN_BASE (0xffffffff / 3 * 2)
#endif /* CONFIG_COMPAT */
#endif /* _ASM_TILE_ELF_H */

View File

@ -0,0 +1,20 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_EXEC_H
#define _ASM_TILE_EXEC_H
#define arch_align_stack(x) (x)
#endif /* _ASM_TILE_EXEC_H */

View File

@ -0,0 +1,118 @@
/*
* Copyright (C) 1998 Ingo Molnar
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_FIXMAP_H
#define _ASM_TILE_FIXMAP_H
#include <asm/page.h>
#ifndef __ASSEMBLY__
#include <linux/kernel.h>
#ifdef CONFIG_HIGHMEM
#include <linux/threads.h>
#include <asm/kmap_types.h>
#endif
#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
/*
* Here we define all the compile-time 'special' virtual
* addresses. The point is to have a constant address at
* compile time, but to set the physical address only
* in the boot process. We allocate these special addresses
* from the end of supervisor virtual memory backwards.
* Also this lets us do fail-safe vmalloc(), we
* can guarantee that these special addresses and
* vmalloc()-ed addresses never overlap.
*
* these 'compile-time allocated' memory buffers are
* fixed-size 4k pages. (or larger if used with an increment
* higher than 1) use fixmap_set(idx,phys) to associate
* physical memory with fixmap indices.
*
* TLB entries of such buffers will not be flushed across
* task switches.
*
* We don't bother with a FIX_HOLE since above the fixmaps
* is unmapped memory in any case.
*/
enum fixed_addresses {
#ifdef CONFIG_HIGHMEM
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
#endif
__end_of_permanent_fixed_addresses,
/*
* Temporary boot-time mappings, used before ioremap() is functional.
* Not currently needed by the Tile architecture.
*/
#define NR_FIX_BTMAPS 0
#if NR_FIX_BTMAPS
FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1,
__end_of_fixed_addresses
#else
__end_of_fixed_addresses = __end_of_permanent_fixed_addresses
#endif
};
extern void __set_fixmap(enum fixed_addresses idx,
unsigned long phys, pgprot_t flags);
#define set_fixmap(idx, phys) \
__set_fixmap(idx, phys, PAGE_KERNEL)
#define clear_fixmap(idx) \
__set_fixmap(idx, 0, __pgprot(0))
#define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
#define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP + PAGE_SIZE - __FIXADDR_SIZE)
#define FIXADDR_BOOT_START (FIXADDR_TOP + PAGE_SIZE - __FIXADDR_BOOT_SIZE)
extern void __this_fixmap_does_not_exist(void);
/*
* 'index to address' translation. If anyone tries to use the idx
* directly without tranlation, we catch the bug with a NULL-deference
* kernel oops. Illegal ranges of incoming indices are caught too.
*/
static __always_inline unsigned long fix_to_virt(const unsigned int idx)
{
/*
* this branch gets completely eliminated after inlining,
* except when someone tries to use fixaddr indices in an
* illegal way. (such as mixing up address types or using
* out-of-range indices).
*
* If it doesn't get removed, the linker will complain
* loudly with a reasonably clear error message..
*/
if (idx >= __end_of_fixed_addresses)
__this_fixmap_does_not_exist();
return __fix_to_virt(idx);
}
static inline unsigned long virt_to_fix(const unsigned long vaddr)
{
BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
return __virt_to_fix(vaddr);
}
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_TILE_FIXMAP_H */

View File

@ -0,0 +1,20 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_FTRACE_H
#define _ASM_TILE_FTRACE_H
/* empty */
#endif /* _ASM_TILE_FTRACE_H */

View File

@ -0,0 +1,142 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*
* These routines make two important assumptions:
*
* 1. atomic_t is really an int and can be freely cast back and forth
* (validated in __init_atomic_per_cpu).
*
* 2. userspace uses sys_cmpxchg() for all atomic operations, thus using
* the same locking convention that all the kernel atomic routines use.
*/
#ifndef _ASM_TILE_FUTEX_H
#define _ASM_TILE_FUTEX_H
#ifndef __ASSEMBLY__
#include <linux/futex.h>
#include <linux/uaccess.h>
#include <linux/errno.h>
extern struct __get_user futex_set(u32 __user *v, int i);
extern struct __get_user futex_add(u32 __user *v, int n);
extern struct __get_user futex_or(u32 __user *v, int n);
extern struct __get_user futex_andn(u32 __user *v, int n);
extern struct __get_user futex_cmpxchg(u32 __user *v, int o, int n);
#ifndef __tilegx__
extern struct __get_user futex_xor(u32 __user *v, int n);
#else
static inline struct __get_user futex_xor(u32 __user *uaddr, int n)
{
struct __get_user asm_ret = __get_user_4(uaddr);
if (!asm_ret.err) {
int oldval, newval;
do {
oldval = asm_ret.val;
newval = oldval ^ n;
asm_ret = futex_cmpxchg(uaddr, oldval, newval);
} while (asm_ret.err == 0 && oldval != asm_ret.val);
}
return asm_ret;
}
#endif
static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
int oparg = (encoded_op << 8) >> 20;
int cmparg = (encoded_op << 20) >> 20;
int ret;
struct __get_user asm_ret;
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
asm_ret = futex_set(uaddr, oparg);
break;
case FUTEX_OP_ADD:
asm_ret = futex_add(uaddr, oparg);
break;
case FUTEX_OP_OR:
asm_ret = futex_or(uaddr, oparg);
break;
case FUTEX_OP_ANDN:
asm_ret = futex_andn(uaddr, oparg);
break;
case FUTEX_OP_XOR:
asm_ret = futex_xor(uaddr, oparg);
break;
default:
asm_ret.err = -ENOSYS;
}
pagefault_enable();
ret = asm_ret.err;
if (!ret) {
switch (cmp) {
case FUTEX_OP_CMP_EQ:
ret = (asm_ret.val == cmparg);
break;
case FUTEX_OP_CMP_NE:
ret = (asm_ret.val != cmparg);
break;
case FUTEX_OP_CMP_LT:
ret = (asm_ret.val < cmparg);
break;
case FUTEX_OP_CMP_GE:
ret = (asm_ret.val >= cmparg);
break;
case FUTEX_OP_CMP_LE:
ret = (asm_ret.val <= cmparg);
break;
case FUTEX_OP_CMP_GT:
ret = (asm_ret.val > cmparg);
break;
default:
ret = -ENOSYS;
}
}
return ret;
}
static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
struct __get_user asm_ret;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
asm_ret = futex_cmpxchg(uaddr, oldval, newval);
*uval = asm_ret.val;
return asm_ret.err;
}
#ifndef __tilegx__
/* Return failure from the atomic wrappers. */
struct __get_user __atomic_bad_address(int __user *addr);
#endif
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_TILE_FUTEX_H */

View File

@ -0,0 +1,47 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_HARDIRQ_H
#define _ASM_TILE_HARDIRQ_H
#include <linux/threads.h>
#include <linux/cache.h>
#include <asm/irq.h>
typedef struct {
unsigned int __softirq_pending;
long idle_timestamp;
/* Hard interrupt statistics. */
unsigned int irq_timer_count;
unsigned int irq_syscall_count;
unsigned int irq_resched_count;
unsigned int irq_hv_flush_count;
unsigned int irq_call_count;
unsigned int irq_hv_msg_count;
unsigned int irq_dev_intr_count;
} ____cacheline_aligned irq_cpustat_t;
DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
#define __ARCH_IRQ_STAT
#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member)
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
#define HARDIRQ_BITS 8
#endif /* _ASM_TILE_HARDIRQ_H */

View File

@ -0,0 +1,65 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*
* Provide methods for the HARDWALL_FILE for accessing the UDN.
*/
#ifndef _ASM_TILE_HARDWALL_H
#define _ASM_TILE_HARDWALL_H
#include <linux/ioctl.h>
#define HARDWALL_IOCTL_BASE 0xa2
/*
* The HARDWALL_CREATE() ioctl is a macro with a "size" argument.
* The resulting ioctl value is passed to the kernel in conjunction
* with a pointer to a little-endian bitmask of cpus, which must be
* physically in a rectangular configuration on the chip.
* The "size" is the number of bytes of cpu mask data.
*/
#define _HARDWALL_CREATE 1
#define HARDWALL_CREATE(size) \
_IOC(_IOC_READ, HARDWALL_IOCTL_BASE, _HARDWALL_CREATE, (size))
#define _HARDWALL_ACTIVATE 2
#define HARDWALL_ACTIVATE \
_IO(HARDWALL_IOCTL_BASE, _HARDWALL_ACTIVATE)
#define _HARDWALL_DEACTIVATE 3
#define HARDWALL_DEACTIVATE \
_IO(HARDWALL_IOCTL_BASE, _HARDWALL_DEACTIVATE)
#define _HARDWALL_GET_ID 4
#define HARDWALL_GET_ID \
_IO(HARDWALL_IOCTL_BASE, _HARDWALL_GET_ID)
#ifndef __KERNEL__
/* This is the canonical name expected by userspace. */
#define HARDWALL_FILE "/dev/hardwall"
#else
/* /proc hooks for hardwall. */
struct proc_dir_entry;
#ifdef CONFIG_HARDWALL
void proc_tile_hardwall_init(struct proc_dir_entry *root);
int proc_pid_hardwall(struct task_struct *task, char *buffer);
#else
static inline void proc_tile_hardwall_init(struct proc_dir_entry *root) {}
#endif
#endif
#endif /* _ASM_TILE_HARDWALL_H */

View File

@ -0,0 +1,72 @@
/*
* Copyright (C) 1999 Gerhard Wichert, Siemens AG
* Gerhard.Wichert@pdb.siemens.de
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*
* Used in CONFIG_HIGHMEM systems for memory pages which
* are not addressable by direct kernel virtual addresses.
*
*/
#ifndef _ASM_TILE_HIGHMEM_H
#define _ASM_TILE_HIGHMEM_H
#include <linux/interrupt.h>
#include <linux/threads.h>
#include <asm/tlbflush.h>
#include <asm/homecache.h>
/* declarations for highmem.c */
extern unsigned long highstart_pfn, highend_pfn;
extern pte_t *pkmap_page_table;
/*
* Ordering is:
*
* FIXADDR_TOP
* fixed_addresses
* FIXADDR_START
* temp fixed addresses
* FIXADDR_BOOT_START
* Persistent kmap area
* PKMAP_BASE
* VMALLOC_END
* Vmalloc area
* VMALLOC_START
* high_memory
*/
#define LAST_PKMAP_MASK (LAST_PKMAP-1)
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
void *kmap_high(struct page *page);
void kunmap_high(struct page *page);
void *kmap(struct page *page);
void kunmap(struct page *page);
void *kmap_fix_kpte(struct page *page, int finished);
/* This macro is used only in map_new_virtual() to map "page". */
#define kmap_prot page_to_kpgprot(page)
void *kmap_atomic(struct page *page);
void __kunmap_atomic(void *kvaddr);
void *kmap_atomic_pfn(unsigned long pfn);
void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
struct page *kmap_atomic_to_page(void *ptr);
void *kmap_atomic_prot(struct page *page, pgprot_t prot);
void kmap_atomic_fix_kpte(struct page *page, int finished);
#define flush_cache_kmaps() do { } while (0)
#endif /* _ASM_TILE_HIGHMEM_H */

View File

@ -0,0 +1,125 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*
* Handle issues around the Tile "home cache" model of coherence.
*/
#ifndef _ASM_TILE_HOMECACHE_H
#define _ASM_TILE_HOMECACHE_H
#include <asm/page.h>
#include <linux/cpumask.h>
struct page;
struct task_struct;
struct vm_area_struct;
struct zone;
/*
* Coherence point for the page is its memory controller.
* It is not present in any cache (L1 or L2).
*/
#define PAGE_HOME_UNCACHED -1
/*
* Is this page immutable (unwritable) and thus able to be cached more
* widely than would otherwise be possible? On tile64 this means we
* mark the PTE to cache locally; on tilepro it means we have "nc" set.
*/
#define PAGE_HOME_IMMUTABLE -2
/*
* Each cpu considers its own cache to be the home for the page,
* which makes it incoherent.
*/
#define PAGE_HOME_INCOHERENT -3
#if CHIP_HAS_CBOX_HOME_MAP()
/* Home for the page is distributed via hash-for-home. */
#define PAGE_HOME_HASH -4
#endif
/* Homing is unknown or unspecified. Not valid for page_home(). */
#define PAGE_HOME_UNKNOWN -5
/* Home on the current cpu. Not valid for page_home(). */
#define PAGE_HOME_HERE -6
/* Support wrapper to use instead of explicit hv_flush_remote(). */
extern void flush_remote(unsigned long cache_pfn, unsigned long cache_length,
const struct cpumask *cache_cpumask,
HV_VirtAddr tlb_va, unsigned long tlb_length,
unsigned long tlb_pgsize,
const struct cpumask *tlb_cpumask,
HV_Remote_ASID *asids, int asidcount);
/* Set homing-related bits in a PTE (can also pass a pgprot_t). */
extern pte_t pte_set_home(pte_t pte, int home);
/* Do a cache eviction on the specified cpus. */
extern void homecache_evict(const struct cpumask *mask);
/*
* Change a kernel page's homecache. It must not be mapped in user space.
* If !CONFIG_HOMECACHE, only usable on LOWMEM, and can only be called when
* no other cpu can reference the page, and causes a full-chip cache/TLB flush.
*/
extern void homecache_change_page_home(struct page *, int order, int home);
/*
* Flush a page out of whatever cache(s) it is in.
* This is more than just finv, since it properly handles waiting
* for the data to reach memory on tilepro, but it can be quite
* heavyweight, particularly on hash-for-home memory.
*/
extern void homecache_flush_cache(struct page *, int order);
/*
* Allocate a page with the given GFP flags, home, and optionally
* node. These routines are actually just wrappers around the normal
* alloc_pages() / alloc_pages_node() functions, which set and clear
* a per-cpu variable to communicate with homecache_new_kernel_page().
* If !CONFIG_HOMECACHE, uses homecache_change_page_home().
*/
extern struct page *homecache_alloc_pages(gfp_t gfp_mask,
unsigned int order, int home);
extern struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask,
unsigned int order, int home);
#define homecache_alloc_page(gfp_mask, home) \
homecache_alloc_pages(gfp_mask, 0, home)
/*
* These routines are just pass-throughs to free_pages() when
* we support full homecaching. If !CONFIG_HOMECACHE, then these
* routines use homecache_change_page_home() to reset the home
* back to the default before returning the page to the allocator.
*/
void homecache_free_pages(unsigned long addr, unsigned int order);
#define homecache_free_page(page) \
homecache_free_pages((page), 0)
/*
* Report the page home for LOWMEM pages by examining their kernel PTE,
* or for highmem pages as the default home.
*/
extern int page_home(struct page *);
#define homecache_migrate_kthread() do {} while (0)
#define homecache_kpte_lock() 0
#define homecache_kpte_unlock(flags) do {} while (0)
#endif /* _ASM_TILE_HOMECACHE_H */

View File

@ -0,0 +1,109 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_HUGETLB_H
#define _ASM_TILE_HUGETLB_H
#include <asm/page.h>
static inline int is_hugepage_only_range(struct mm_struct *mm,
unsigned long addr,
unsigned long len) {
return 0;
}
/*
* If the arch doesn't supply something else, assume that hugepage
* size aligned regions are ok without further preparation.
*/
static inline int prepare_hugepage_range(struct file *file,
unsigned long addr, unsigned long len)
{
struct hstate *h = hstate_file(file);
if (len & ~huge_page_mask(h))
return -EINVAL;
if (addr & ~huge_page_mask(h))
return -EINVAL;
return 0;
}
static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
{
}
static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
unsigned long addr, unsigned long end,
unsigned long floor,
unsigned long ceiling)
{
free_pgd_range(tlb, addr, end, floor, ceiling);
}
static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
set_pte(ptep, pte);
}
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
return ptep_get_and_clear(mm, addr, ptep);
}
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
ptep_clear_flush(vma, addr, ptep);
}
static inline int huge_pte_none(pte_t pte)
{
return pte_none(pte);
}
static inline pte_t huge_pte_wrprotect(pte_t pte)
{
return pte_wrprotect(pte);
}
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
ptep_set_wrprotect(mm, addr, ptep);
}
static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty)
{
return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
}
static inline pte_t huge_ptep_get(pte_t *ptep)
{
return *ptep;
}
static inline int arch_prepare_hugepage(struct page *page)
{
return 0;
}
static inline void arch_release_hugepage(struct page *page)
{
}
#endif /* _ASM_TILE_HUGETLB_H */

View File

@ -0,0 +1,60 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*
* This header defines a wrapper interface for managing hypervisor
* device calls that will result in an interrupt at some later time.
* In particular, this provides wrappers for hv_preada() and
* hv_pwritea().
*/
#ifndef _ASM_TILE_HV_DRIVER_H
#define _ASM_TILE_HV_DRIVER_H
#include <hv/hypervisor.h>
struct hv_driver_cb;
/* A callback to be invoked when an operation completes. */
typedef void hv_driver_callback_t(struct hv_driver_cb *cb, __hv32 result);
/*
* A structure to hold information about an outstanding call.
* The driver must allocate a separate structure for each call.
*/
struct hv_driver_cb {
hv_driver_callback_t *callback; /* Function to call on interrupt. */
void *dev; /* Driver-specific state variable. */
};
/* Wrapper for invoking hv_dev_preada(). */
static inline int
tile_hv_dev_preada(int devhdl, __hv32 flags, __hv32 sgl_len,
HV_SGL sgl[/* sgl_len */], __hv64 offset,
struct hv_driver_cb *callback)
{
return hv_dev_preada(devhdl, flags, sgl_len, sgl,
offset, (HV_IntArg)callback);
}
/* Wrapper for invoking hv_dev_pwritea(). */
static inline int
tile_hv_dev_pwritea(int devhdl, __hv32 flags, __hv32 sgl_len,
HV_SGL sgl[/* sgl_len */], __hv64 offset,
struct hv_driver_cb *callback)
{
return hv_dev_pwritea(devhdl, flags, sgl_len, sgl,
offset, (HV_IntArg)callback);
}
#endif /* _ASM_TILE_HV_DRIVER_H */

View File

@ -0,0 +1,18 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_HW_IRQ_H
#define _ASM_TILE_HW_IRQ_H
#endif /* _ASM_TILE_HW_IRQ_H */

View File

@ -0,0 +1,25 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_IDE_H
#define _ASM_TILE_IDE_H
/* For IDE on PCI */
#define MAX_HWIFS 10
#define ide_default_io_ctl(base) (0)
#include <asm-generic/ide_iops.h>
#endif /* _ASM_TILE_IDE_H */

View File

@ -0,0 +1,305 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_IO_H
#define _ASM_TILE_IO_H
#include <linux/kernel.h>
#include <linux/bug.h>
#include <asm/page.h>
#define IO_SPACE_LIMIT 0xfffffffful
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access.
*/
#define xlate_dev_mem_ptr(p) __va(p)
/*
* Convert a virtual cached pointer to an uncached pointer.
*/
#define xlate_dev_kmem_ptr(p) p
/*
* Change "struct page" to physical address.
*/
#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
/*
* Some places try to pass in an loff_t for PHYSADDR (?!), so we cast it to
* long before casting it to a pointer to avoid compiler warnings.
*/
#if CHIP_HAS_MMIO()
extern void __iomem *ioremap(resource_size_t offset, unsigned long size);
extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
pgprot_t pgprot);
extern void iounmap(volatile void __iomem *addr);
#else
#define ioremap(physaddr, size) ((void __iomem *)(unsigned long)(physaddr))
#define iounmap(addr) ((void)0)
#endif
#define ioremap_nocache(physaddr, size) ioremap(physaddr, size)
#define ioremap_wc(physaddr, size) ioremap(physaddr, size)
#define ioremap_writethrough(physaddr, size) ioremap(physaddr, size)
#define ioremap_fullcache(physaddr, size) ioremap(physaddr, size)
#define mmiowb()
/* Conversion between virtual and physical mappings. */
#define mm_ptov(addr) ((void *)phys_to_virt(addr))
#define mm_vtop(addr) ((unsigned long)virt_to_phys(addr))
#ifdef CONFIG_PCI
extern u8 _tile_readb(unsigned long addr);
extern u16 _tile_readw(unsigned long addr);
extern u32 _tile_readl(unsigned long addr);
extern u64 _tile_readq(unsigned long addr);
extern void _tile_writeb(u8 val, unsigned long addr);
extern void _tile_writew(u16 val, unsigned long addr);
extern void _tile_writel(u32 val, unsigned long addr);
extern void _tile_writeq(u64 val, unsigned long addr);
#else
/*
* The Tile architecture does not support IOMEM unless PCI is enabled.
* Unfortunately we can't yet simply not declare these methods,
* since some generic code that compiles into the kernel, but
* we never run, uses them unconditionally.
*/
static inline int iomem_panic(void)
{
panic("readb/writeb and friends do not exist on tile without PCI");
return 0;
}
static inline u8 _tile_readb(unsigned long addr)
{
return iomem_panic();
}
static inline u16 _tile_readw(unsigned long addr)
{
return iomem_panic();
}
static inline u32 _tile_readl(unsigned long addr)
{
return iomem_panic();
}
static inline u64 _tile_readq(unsigned long addr)
{
return iomem_panic();
}
static inline void _tile_writeb(u8 val, unsigned long addr)
{
iomem_panic();
}
static inline void _tile_writew(u16 val, unsigned long addr)
{
iomem_panic();
}
static inline void _tile_writel(u32 val, unsigned long addr)
{
iomem_panic();
}
static inline void _tile_writeq(u64 val, unsigned long addr)
{
iomem_panic();
}
#endif
#define readb(addr) _tile_readb((unsigned long)addr)
#define readw(addr) _tile_readw((unsigned long)addr)
#define readl(addr) _tile_readl((unsigned long)addr)
#define readq(addr) _tile_readq((unsigned long)addr)
#define writeb(val, addr) _tile_writeb(val, (unsigned long)addr)
#define writew(val, addr) _tile_writew(val, (unsigned long)addr)
#define writel(val, addr) _tile_writel(val, (unsigned long)addr)
#define writeq(val, addr) _tile_writeq(val, (unsigned long)addr)
#define __raw_readb readb
#define __raw_readw readw
#define __raw_readl readl
#define __raw_readq readq
#define __raw_writeb writeb
#define __raw_writew writew
#define __raw_writel writel
#define __raw_writeq writeq
#define readb_relaxed readb
#define readw_relaxed readw
#define readl_relaxed readl
#define readq_relaxed readq
#define ioread8 readb
#define ioread16 readw
#define ioread32 readl
#define ioread64 readq
#define iowrite8 writeb
#define iowrite16 writew
#define iowrite32 writel
#define iowrite64 writeq
static inline void memset_io(void *dst, int val, size_t len)
{
int x;
BUG_ON((unsigned long)dst & 0x3);
val = (val & 0xff) * 0x01010101;
for (x = 0; x < len; x += 4)
writel(val, dst + x);
}
static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
size_t len)
{
int x;
BUG_ON((unsigned long)src & 0x3);
for (x = 0; x < len; x += 4)
*(u32 *)(dst + x) = readl(src + x);
}
static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
size_t len)
{
int x;
BUG_ON((unsigned long)dst & 0x3);
for (x = 0; x < len; x += 4)
writel(*(u32 *)(src + x), dst + x);
}
/*
* The Tile architecture does not support IOPORT, even with PCI.
* Unfortunately we can't yet simply not declare these methods,
* since some generic code that compiles into the kernel, but
* we never run, uses them unconditionally.
*/
static inline long ioport_panic(void)
{
panic("inb/outb and friends do not exist on tile");
return 0;
}
static inline void __iomem *ioport_map(unsigned long port, unsigned int len)
{
pr_info("ioport_map: mapping IO resources is unsupported on tile.\n");
return NULL;
}
static inline void ioport_unmap(void __iomem *addr)
{
ioport_panic();
}
static inline u8 inb(unsigned long addr)
{
return ioport_panic();
}
static inline u16 inw(unsigned long addr)
{
return ioport_panic();
}
static inline u32 inl(unsigned long addr)
{
return ioport_panic();
}
static inline void outb(u8 b, unsigned long addr)
{
ioport_panic();
}
static inline void outw(u16 b, unsigned long addr)
{
ioport_panic();
}
static inline void outl(u32 b, unsigned long addr)
{
ioport_panic();
}
#define inb_p(addr) inb(addr)
#define inw_p(addr) inw(addr)
#define inl_p(addr) inl(addr)
#define outb_p(x, addr) outb((x), (addr))
#define outw_p(x, addr) outw((x), (addr))
#define outl_p(x, addr) outl((x), (addr))
static inline void insb(unsigned long addr, void *buffer, int count)
{
ioport_panic();
}
static inline void insw(unsigned long addr, void *buffer, int count)
{
ioport_panic();
}
static inline void insl(unsigned long addr, void *buffer, int count)
{
ioport_panic();
}
static inline void outsb(unsigned long addr, const void *buffer, int count)
{
ioport_panic();
}
static inline void outsw(unsigned long addr, const void *buffer, int count)
{
ioport_panic();
}
static inline void outsl(unsigned long addr, const void *buffer, int count)
{
ioport_panic();
}
#define ioread16be(addr) be16_to_cpu(ioread16(addr))
#define ioread32be(addr) be32_to_cpu(ioread32(addr))
#define iowrite16be(v, addr) iowrite16(be16_to_cpu(v), (addr))
#define iowrite32be(v, addr) iowrite32(be32_to_cpu(v), (addr))
#define ioread8_rep(p, dst, count) \
insb((unsigned long) (p), (dst), (count))
#define ioread16_rep(p, dst, count) \
insw((unsigned long) (p), (dst), (count))
#define ioread32_rep(p, dst, count) \
insl((unsigned long) (p), (dst), (count))
#define iowrite8_rep(p, src, count) \
outsb((unsigned long) (p), (src), (count))
#define iowrite16_rep(p, src, count) \
outsw((unsigned long) (p), (src), (count))
#define iowrite32_rep(p, src, count) \
outsl((unsigned long) (p), (src), (count))
#define virt_to_bus virt_to_phys
#define bus_to_virt phys_to_virt
#endif /* _ASM_TILE_IO_H */

View File

@ -0,0 +1,79 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_IRQ_H
#define _ASM_TILE_IRQ_H
#include <linux/hardirq.h>
/* The hypervisor interface provides 32 IRQs. */
#define NR_IRQS 32
/* IRQ numbers used for linux IPIs. */
#define IRQ_RESCHEDULE 0
#define irq_canonicalize(irq) (irq)
void ack_bad_irq(unsigned int irq);
/*
* Different ways of handling interrupts. Tile interrupts are always
* per-cpu; there is no global interrupt controller to implement
* enable/disable. Most onboard devices can send their interrupts to
* many tiles at the same time, and Tile-specific drivers know how to
* deal with this.
*
* However, generic devices (usually PCIE based, sometimes GPIO)
* expect that interrupts will fire on a single core at a time and
* that the irq can be enabled or disabled from any core at any time.
* We implement this by directing such interrupts to a single core.
*
* One added wrinkle is that PCI interrupts can be either
* hardware-cleared (legacy interrupts) or software cleared (MSI).
* Other generic device systems (GPIO) are always software-cleared.
*
* The enums below are used by drivers for onboard devices, including
* the internals of PCI root complex and GPIO. They allow the driver
* to tell the generic irq code what kind of interrupt is mapped to a
* particular IRQ number.
*/
enum {
/* per-cpu interrupt; use enable/disable_percpu_irq() to mask */
TILE_IRQ_PERCPU,
/* global interrupt, hardware responsible for clearing. */
TILE_IRQ_HW_CLEAR,
/* global interrupt, software responsible for clearing. */
TILE_IRQ_SW_CLEAR,
};
/*
* Paravirtualized drivers should call this when they dynamically
* allocate a new IRQ or discover an IRQ that was pre-allocated by the
* hypervisor for use with their particular device. This gives the
* IRQ subsystem an opportunity to do interrupt-type-specific
* initialization.
*
* ISSUE: We should modify this API so that registering anything
* except percpu interrupts also requires providing callback methods
* for enabling and disabling the interrupt. This would allow the
* generic IRQ code to proxy enable/disable_irq() calls back into the
* PCI subsystem, which in turn could enable or disable the interrupt
* at the PCI shim.
*/
void tile_irq_activate(unsigned int irq, int tile_irq_type);
void setup_irq_regs(void);
#endif /* _ASM_TILE_IRQ_H */

View File

@ -0,0 +1,282 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_IRQFLAGS_H
#define _ASM_TILE_IRQFLAGS_H
#include <arch/interrupts.h>
#include <arch/chip.h>
#if !defined(__tilegx__) && defined(__ASSEMBLY__)
/*
* The set of interrupts we want to allow when interrupts are nominally
* disabled. The remainder are effectively "NMI" interrupts from
* the point of view of the generic Linux code. Note that synchronous
* interrupts (aka "non-queued") are not blocked by the mask in any case.
*/
#if CHIP_HAS_AUX_PERF_COUNTERS()
#define LINUX_MASKABLE_INTERRUPTS_HI \
(~(INT_MASK_HI(INT_PERF_COUNT) | INT_MASK_HI(INT_AUX_PERF_COUNT)))
#else
#define LINUX_MASKABLE_INTERRUPTS_HI \
(~(INT_MASK_HI(INT_PERF_COUNT)))
#endif
#else
#if CHIP_HAS_AUX_PERF_COUNTERS()
#define LINUX_MASKABLE_INTERRUPTS \
(~(INT_MASK(INT_PERF_COUNT) | INT_MASK(INT_AUX_PERF_COUNT)))
#else
#define LINUX_MASKABLE_INTERRUPTS \
(~(INT_MASK(INT_PERF_COUNT)))
#endif
#endif
#ifndef __ASSEMBLY__
/* NOTE: we can't include <linux/percpu.h> due to #include dependencies. */
#include <asm/percpu.h>
#include <arch/spr_def.h>
/* Set and clear kernel interrupt masks. */
#if CHIP_HAS_SPLIT_INTR_MASK()
#if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 || INT_MEM_ERROR >= 32
# error Fix assumptions about which word various interrupts are in
#endif
#define interrupt_mask_set(n) do { \
int __n = (n); \
int __mask = 1 << (__n & 0x1f); \
if (__n < 32) \
__insn_mtspr(SPR_INTERRUPT_MASK_SET_K_0, __mask); \
else \
__insn_mtspr(SPR_INTERRUPT_MASK_SET_K_1, __mask); \
} while (0)
#define interrupt_mask_reset(n) do { \
int __n = (n); \
int __mask = 1 << (__n & 0x1f); \
if (__n < 32) \
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, __mask); \
else \
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, __mask); \
} while (0)
#define interrupt_mask_check(n) ({ \
int __n = (n); \
(((__n < 32) ? \
__insn_mfspr(SPR_INTERRUPT_MASK_K_0) : \
__insn_mfspr(SPR_INTERRUPT_MASK_K_1)) \
>> (__n & 0x1f)) & 1; \
})
#define interrupt_mask_set_mask(mask) do { \
unsigned long long __m = (mask); \
__insn_mtspr(SPR_INTERRUPT_MASK_SET_K_0, (unsigned long)(__m)); \
__insn_mtspr(SPR_INTERRUPT_MASK_SET_K_1, (unsigned long)(__m>>32)); \
} while (0)
#define interrupt_mask_reset_mask(mask) do { \
unsigned long long __m = (mask); \
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, (unsigned long)(__m)); \
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, (unsigned long)(__m>>32)); \
} while (0)
#else
#define interrupt_mask_set(n) \
__insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (1UL << (n)))
#define interrupt_mask_reset(n) \
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (1UL << (n)))
#define interrupt_mask_check(n) \
((__insn_mfspr(SPR_INTERRUPT_MASK_K) >> (n)) & 1)
#define interrupt_mask_set_mask(mask) \
__insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (mask))
#define interrupt_mask_reset_mask(mask) \
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (mask))
#endif
/*
* The set of interrupts we want active if irqs are enabled.
* Note that in particular, the tile timer interrupt comes and goes
* from this set, since we have no other way to turn off the timer.
* Likewise, INTCTRL_K is removed and re-added during device
* interrupts, as is the the hardwall UDN_FIREWALL interrupt.
* We use a low bit (MEM_ERROR) as our sentinel value and make sure it
* is always claimed as an "active interrupt" so we can query that bit
* to know our current state.
*/
DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
#define INITIAL_INTERRUPTS_ENABLED INT_MASK(INT_MEM_ERROR)
/* Disable interrupts. */
#define arch_local_irq_disable() \
interrupt_mask_set_mask(LINUX_MASKABLE_INTERRUPTS)
/* Disable all interrupts, including NMIs. */
#define arch_local_irq_disable_all() \
interrupt_mask_set_mask(-1UL)
/* Re-enable all maskable interrupts. */
#define arch_local_irq_enable() \
interrupt_mask_reset_mask(__get_cpu_var(interrupts_enabled_mask))
/* Disable or enable interrupts based on flag argument. */
#define arch_local_irq_restore(disabled) do { \
if (disabled) \
arch_local_irq_disable(); \
else \
arch_local_irq_enable(); \
} while (0)
/* Return true if "flags" argument means interrupts are disabled. */
#define arch_irqs_disabled_flags(flags) ((flags) != 0)
/* Return true if interrupts are currently disabled. */
#define arch_irqs_disabled() interrupt_mask_check(INT_MEM_ERROR)
/* Save whether interrupts are currently disabled. */
#define arch_local_save_flags() arch_irqs_disabled()
/* Save whether interrupts are currently disabled, then disable them. */
#define arch_local_irq_save() ({ \
unsigned long __flags = arch_local_save_flags(); \
arch_local_irq_disable(); \
__flags; })
/* Prevent the given interrupt from being enabled next time we enable irqs. */
#define arch_local_irq_mask(interrupt) \
(__get_cpu_var(interrupts_enabled_mask) &= ~INT_MASK(interrupt))
/* Prevent the given interrupt from being enabled immediately. */
#define arch_local_irq_mask_now(interrupt) do { \
arch_local_irq_mask(interrupt); \
interrupt_mask_set(interrupt); \
} while (0)
/* Allow the given interrupt to be enabled next time we enable irqs. */
#define arch_local_irq_unmask(interrupt) \
(__get_cpu_var(interrupts_enabled_mask) |= INT_MASK(interrupt))
/* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */
#define arch_local_irq_unmask_now(interrupt) do { \
arch_local_irq_unmask(interrupt); \
if (!irqs_disabled()) \
interrupt_mask_reset(interrupt); \
} while (0)
#else /* __ASSEMBLY__ */
/* We provide a somewhat more restricted set for assembly. */
#ifdef __tilegx__
#if INT_MEM_ERROR != 0
# error Fix IRQ_DISABLED() macro
#endif
/* Return 0 or 1 to indicate whether interrupts are currently disabled. */
#define IRQS_DISABLED(tmp) \
mfspr tmp, SPR_INTERRUPT_MASK_K; \
andi tmp, tmp, 1
/* Load up a pointer to &interrupts_enabled_mask. */
#define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \
moveli reg, hw2_last(interrupts_enabled_mask); \
shl16insli reg, reg, hw1(interrupts_enabled_mask); \
shl16insli reg, reg, hw0(interrupts_enabled_mask); \
add reg, reg, tp
/* Disable interrupts. */
#define IRQ_DISABLE(tmp0, tmp1) \
moveli tmp0, hw2_last(LINUX_MASKABLE_INTERRUPTS); \
shl16insli tmp0, tmp0, hw1(LINUX_MASKABLE_INTERRUPTS); \
shl16insli tmp0, tmp0, hw0(LINUX_MASKABLE_INTERRUPTS); \
mtspr SPR_INTERRUPT_MASK_SET_K, tmp0
/* Disable ALL synchronous interrupts (used by NMI entry). */
#define IRQ_DISABLE_ALL(tmp) \
movei tmp, -1; \
mtspr SPR_INTERRUPT_MASK_SET_K, tmp
/* Enable interrupts. */
#define IRQ_ENABLE(tmp0, tmp1) \
GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \
ld tmp0, tmp0; \
mtspr SPR_INTERRUPT_MASK_RESET_K, tmp0
#else /* !__tilegx__ */
/*
* Return 0 or 1 to indicate whether interrupts are currently disabled.
* Note that it's important that we use a bit from the "low" mask word,
* since when we are enabling, that is the word we write first, so if we
* are interrupted after only writing half of the mask, the interrupt
* handler will correctly observe that we have interrupts enabled, and
* will enable interrupts itself on return from the interrupt handler
* (making the original code's write of the "high" mask word idempotent).
*/
#define IRQS_DISABLED(tmp) \
mfspr tmp, SPR_INTERRUPT_MASK_K_0; \
shri tmp, tmp, INT_MEM_ERROR; \
andi tmp, tmp, 1
/* Load up a pointer to &interrupts_enabled_mask. */
#define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \
moveli reg, lo16(interrupts_enabled_mask); \
auli reg, reg, ha16(interrupts_enabled_mask); \
add reg, reg, tp
/* Disable interrupts. */
#define IRQ_DISABLE(tmp0, tmp1) \
{ \
movei tmp0, -1; \
moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS_HI) \
}; \
{ \
mtspr SPR_INTERRUPT_MASK_SET_K_0, tmp0; \
auli tmp1, tmp1, ha16(LINUX_MASKABLE_INTERRUPTS_HI) \
}; \
mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp1
/* Disable ALL synchronous interrupts (used by NMI entry). */
#define IRQ_DISABLE_ALL(tmp) \
movei tmp, -1; \
mtspr SPR_INTERRUPT_MASK_SET_K_0, tmp; \
mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp
/* Enable interrupts. */
#define IRQ_ENABLE(tmp0, tmp1) \
GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \
{ \
lw tmp0, tmp0; \
addi tmp1, tmp0, 4 \
}; \
lw tmp1, tmp1; \
mtspr SPR_INTERRUPT_MASK_RESET_K_0, tmp0; \
mtspr SPR_INTERRUPT_MASK_RESET_K_1, tmp1
#endif
/*
* Do the CPU's IRQ-state tracing from assembly code. We call a
* C function, but almost everywhere we do, we don't mind clobbering
* all the caller-saved registers.
*/
#ifdef CONFIG_TRACE_IRQFLAGS
# define TRACE_IRQS_ON jal trace_hardirqs_on
# define TRACE_IRQS_OFF jal trace_hardirqs_off
#else
# define TRACE_IRQS_ON
# define TRACE_IRQS_OFF
#endif
#endif /* __ASSEMBLY__ */
#endif /* _ASM_TILE_IRQFLAGS_H */

View File

@ -0,0 +1,53 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*
* based on kexec.h from other architectures in linux-2.6.18
*/
#ifndef _ASM_TILE_KEXEC_H
#define _ASM_TILE_KEXEC_H
#include <asm/page.h>
/* Maximum physical address we can use pages from. */
#define KEXEC_SOURCE_MEMORY_LIMIT TASK_SIZE
/* Maximum address we can reach in physical address mode. */
#define KEXEC_DESTINATION_MEMORY_LIMIT TASK_SIZE
/* Maximum address we can use for the control code buffer. */
#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
#define KEXEC_CONTROL_PAGE_SIZE PAGE_SIZE
/*
* We don't bother to provide a unique identifier, since we can only
* reboot with a single type of kernel image anyway.
*/
#define KEXEC_ARCH KEXEC_ARCH_DEFAULT
/* Use the tile override for the page allocator. */
struct page *kimage_alloc_pages_arch(gfp_t gfp_mask, unsigned int order);
#define kimage_alloc_pages_arch kimage_alloc_pages_arch
#define MAX_NOTE_BYTES 1024
/* Defined in arch/tile/kernel/relocate_kernel.S */
extern const unsigned char relocate_new_kernel[];
extern const unsigned long relocate_new_kernel_size;
extern void relocate_new_kernel_end(void);
/* Provide a dummy definition to avoid build failures. */
static inline void crash_setup_regs(struct pt_regs *n, struct pt_regs *o)
{
}
#endif /* _ASM_TILE_KEXEC_H */

View File

@ -0,0 +1,57 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_KMAP_TYPES_H
#define _ASM_TILE_KMAP_TYPES_H
/*
* In 32-bit TILE Linux we have to balance the desire to have a lot of
* nested atomic mappings with the fact that large page sizes and many
* processors chew up address space quickly. In a typical
* 64-processor, 64KB-page layout build, making KM_TYPE_NR one larger
* adds 4MB of required address-space. For now we leave KM_TYPE_NR
* set to depth 8.
*/
enum km_type {
KM_TYPE_NR = 8
};
/*
* We provide dummy definitions of all the stray values that used to be
* required for kmap_atomic() and no longer are.
*/
enum {
KM_BOUNCE_READ,
KM_SKB_SUNRPC_DATA,
KM_SKB_DATA_SOFTIRQ,
KM_USER0,
KM_USER1,
KM_BIO_SRC_IRQ,
KM_BIO_DST_IRQ,
KM_PTE0,
KM_PTE1,
KM_IRQ0,
KM_IRQ1,
KM_SOFTIRQ0,
KM_SOFTIRQ1,
KM_SYNC_ICACHE,
KM_SYNC_DCACHE,
KM_UML_USERCOPY,
KM_IRQ_PTE,
KM_NMI,
KM_NMI_PTE,
KM_KDB
};
#endif /* _ASM_TILE_KMAP_TYPES_H */

View File

@ -0,0 +1,51 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_LINKAGE_H
#define _ASM_TILE_LINKAGE_H
#include <feedback.h>
#define __ALIGN .align 8
/*
* The STD_ENTRY and STD_ENDPROC macros put the function in a
* self-named .text.foo section, and if linker feedback collection
* is enabled, add a suitable call to the feedback collection code.
* STD_ENTRY_SECTION lets you specify a non-standard section name.
*/
#define STD_ENTRY(name) \
.pushsection .text.##name, "ax"; \
ENTRY(name); \
FEEDBACK_ENTER(name)
#define STD_ENTRY_SECTION(name, section) \
.pushsection section, "ax"; \
ENTRY(name); \
FEEDBACK_ENTER_EXPLICIT(name, section, .Lend_##name - name)
#define STD_ENDPROC(name) \
ENDPROC(name); \
.Lend_##name:; \
.popsection
/* Create a file-static function entry set up for feedback gathering. */
#define STD_ENTRY_LOCAL(name) \
.pushsection .text.##name, "ax"; \
ALIGN; \
name:; \
FEEDBACK_ENTER(name)
#endif /* _ASM_TILE_LINKAGE_H */

View File

@ -0,0 +1,33 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*
* The hypervisor's memory controller profiling infrastructure allows
* the programmer to find out what fraction of the available memory
* bandwidth is being consumed at each memory controller. The
* profiler provides start, stop, and clear operations to allows
* profiling over a specific time window, as well as an interface for
* reading the most recent profile values.
*
* This header declares IOCTL codes necessary to control memprof.
*/
#ifndef _ASM_TILE_MEMPROF_H
#define _ASM_TILE_MEMPROF_H
#include <linux/ioctl.h>
#define MEMPROF_IOCTL_TYPE 0xB4
#define MEMPROF_IOCTL_START _IO(MEMPROF_IOCTL_TYPE, 0)
#define MEMPROF_IOCTL_STOP _IO(MEMPROF_IOCTL_TYPE, 1)
#define MEMPROF_IOCTL_CLEAR _IO(MEMPROF_IOCTL_TYPE, 2)
#endif /* _ASM_TILE_MEMPROF_H */

View File

@ -0,0 +1,41 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_MMAN_H
#define _ASM_TILE_MMAN_H
#include <asm-generic/mman-common.h>
#include <arch/chip.h>
/* Standard Linux flags */
#define MAP_POPULATE 0x0040 /* populate (prefault) pagetables */
#define MAP_NONBLOCK 0x0080 /* do not block on IO */
#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
#define MAP_STACK MAP_GROWSDOWN /* provide convenience alias */
#define MAP_LOCKED 0x0200 /* pages are locked */
#define MAP_NORESERVE 0x0400 /* don't check for reservations */
#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
#define MAP_HUGETLB 0x4000 /* create a huge page mapping */
/*
* Flags for mlockall
*/
#define MCL_CURRENT 1 /* lock all current mappings */
#define MCL_FUTURE 2 /* lock all future mappings */
#endif /* _ASM_TILE_MMAN_H */

View File

@ -0,0 +1,31 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_MMU_H
#define _ASM_TILE_MMU_H
/* Capture any arch- and mm-specific information. */
struct mm_context {
/*
* Written under the mmap_sem semaphore; read without the
* semaphore but atomically, but it is conservatively set.
*/
unsigned int priority_cached;
};
typedef struct mm_context mm_context_t;
void leave_mm(int cpu);
#endif /* _ASM_TILE_MMU_H */

View File

@ -0,0 +1,131 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_MMU_CONTEXT_H
#define _ASM_TILE_MMU_CONTEXT_H
#include <linux/smp.h>
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/homecache.h>
#include <asm-generic/mm_hooks.h>
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
return 0;
}
/* Note that arch/tile/kernel/head.S also calls hv_install_context() */
static inline void __install_page_table(pgd_t *pgdir, int asid, pgprot_t prot)
{
/* FIXME: DIRECTIO should not always be set. FIXME. */
int rc = hv_install_context(__pa(pgdir), prot, asid, HV_CTX_DIRECTIO);
if (rc < 0)
panic("hv_install_context failed: %d", rc);
}
static inline void install_page_table(pgd_t *pgdir, int asid)
{
pte_t *ptep = virt_to_pte(NULL, (unsigned long)pgdir);
__install_page_table(pgdir, asid, *ptep);
}
/*
* "Lazy" TLB mode is entered when we are switching to a kernel task,
* which borrows the mm of the previous task. The goal of this
* optimization is to avoid having to install a new page table. On
* early x86 machines (where the concept originated) you couldn't do
* anything short of a full page table install for invalidation, so
* handling a remote TLB invalidate required doing a page table
* re-install. Someone clearly decided that it was silly to keep
* doing this while in "lazy" TLB mode, so the optimization involves
* installing the swapper page table instead the first time one
* occurs, and clearing the cpu out of cpu_vm_mask, so the cpu running
* the kernel task doesn't need to take any more interrupts. At that
* point it's then necessary to explicitly reinstall it when context
* switching back to the original mm.
*
* On Tile, we have to do a page-table install whenever DMA is enabled,
* so in that case lazy mode doesn't help anyway. And more generally,
* we have efficient per-page TLB shootdown, and don't expect to spend
* that much time in kernel tasks in general, so just leaving the
* kernel task borrowing the old page table, but handling TLB
* shootdowns, is a reasonable thing to do. And importantly, this
* lets us use the hypervisor's internal APIs for TLB shootdown, which
* means we don't have to worry about having TLB shootdowns blocked
* when Linux is disabling interrupts; see the page migration code for
* an example of where it's important for TLB shootdowns to complete
* even when interrupts are disabled at the Linux level.
*/
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *t)
{
#if CHIP_HAS_TILE_DMA()
/*
* We have to do an "identity" page table switch in order to
* clear any pending DMA interrupts.
*/
if (current->thread.tile_dma_state.enabled)
install_page_table(mm->pgd, __get_cpu_var(current_asid));
#endif
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
if (likely(prev != next)) {
int cpu = smp_processor_id();
/* Pick new ASID. */
int asid = __get_cpu_var(current_asid) + 1;
if (asid > max_asid) {
asid = min_asid;
local_flush_tlb();
}
__get_cpu_var(current_asid) = asid;
/* Clear cpu from the old mm, and set it in the new one. */
cpumask_clear_cpu(cpu, mm_cpumask(prev));
cpumask_set_cpu(cpu, mm_cpumask(next));
/* Re-load page tables */
install_page_table(next->pgd, asid);
/* See how we should set the red/black cache info */
check_mm_caching(prev, next);
/*
* Since we're changing to a new mm, we have to flush
* the icache in case some physical page now being mapped
* has subsequently been repurposed and has new code.
*/
__flush_icache();
}
}
static inline void activate_mm(struct mm_struct *prev_mm,
struct mm_struct *next_mm)
{
switch_mm(prev_mm, next_mm, NULL);
}
#define destroy_context(mm) do { } while (0)
#define deactivate_mm(tsk, mm) do { } while (0)
#endif /* _ASM_TILE_MMU_CONTEXT_H */

View File

@ -0,0 +1,70 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_MMZONE_H
#define _ASM_TILE_MMZONE_H
extern struct pglist_data node_data[];
#define NODE_DATA(nid) (&node_data[nid])
extern void get_memcfg_numa(void);
#ifdef CONFIG_DISCONTIGMEM
#include <asm/page.h>
/*
* Generally, memory ranges are always doled out by the hypervisor in
* fixed-size, power-of-two increments. That would make computing the node
* very easy. We could just take a couple high bits of the PA, which
* denote the memory shim, and we'd be done. However, when we're doing
* memory striping, this may not be true; PAs with different high bit
* values might be in the same node. Thus, we keep a lookup table to
* translate the high bits of the PFN to the node number.
*/
extern int highbits_to_node[];
static inline int pfn_to_nid(unsigned long pfn)
{
return highbits_to_node[__pfn_to_highbits(pfn)];
}
#define kern_addr_valid(kaddr) virt_addr_valid((void *)kaddr)
static inline int pfn_valid(int pfn)
{
int nid = pfn_to_nid(pfn);
if (nid >= 0)
return (pfn < node_end_pfn(nid));
return 0;
}
/* Information on the NUMA nodes that we compute early */
extern unsigned long node_start_pfn[];
extern unsigned long node_end_pfn[];
extern unsigned long node_memmap_pfn[];
extern unsigned long node_percpu_pfn[];
extern unsigned long node_free_pfn[];
#ifdef CONFIG_HIGHMEM
extern unsigned long node_lowmem_end_pfn[];
#endif
#ifdef CONFIG_PCI
extern unsigned long pci_reserve_start_pfn;
extern unsigned long pci_reserve_end_pfn;
#endif
#endif /* CONFIG_DISCONTIGMEM */
#endif /* _ASM_TILE_MMZONE_H */

View File

@ -0,0 +1,336 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_PAGE_H
#define _ASM_TILE_PAGE_H
#include <linux/const.h>
#include <hv/hypervisor.h>
#include <arch/chip.h>
/* PAGE_SHIFT and HPAGE_SHIFT determine the page sizes. */
#define PAGE_SHIFT HV_LOG2_PAGE_SIZE_SMALL
#define HPAGE_SHIFT HV_LOG2_PAGE_SIZE_LARGE
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE - 1))
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
/*
* If the Kconfig doesn't specify, set a maximum zone order that
* is enough so that we can create huge pages from small pages given
* the respective sizes of the two page types. See <linux/mmzone.h>.
*/
#ifndef CONFIG_FORCE_MAX_ZONEORDER
#define CONFIG_FORCE_MAX_ZONEORDER (HPAGE_SHIFT - PAGE_SHIFT + 1)
#endif
#ifndef __ASSEMBLY__
#include <linux/types.h>
#include <linux/string.h>
struct page;
static inline void clear_page(void *page)
{
memset(page, 0, PAGE_SIZE);
}
static inline void copy_page(void *to, void *from)
{
memcpy(to, from, PAGE_SIZE);
}
static inline void clear_user_page(void *page, unsigned long vaddr,
struct page *pg)
{
clear_page(page);
}
static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
struct page *topage)
{
copy_page(to, from);
}
/*
* Hypervisor page tables are made of the same basic structure.
*/
typedef HV_PTE pte_t;
typedef HV_PTE pgd_t;
typedef HV_PTE pgprot_t;
/*
* User L2 page tables are managed as one L2 page table per page,
* because we use the page allocator for them. This keeps the allocation
* simple and makes it potentially useful to implement HIGHPTE at some point.
* However, it's also inefficient, since L2 page tables are much smaller
* than pages (currently 2KB vs 64KB). So we should revisit this.
*/
typedef struct page *pgtable_t;
/* Must be a macro since it is used to create constants. */
#define __pgprot(val) hv_pte(val)
/* Rarely-used initializers, typically with a "zero" value. */
#define __pte(x) hv_pte(x)
#define __pgd(x) hv_pte(x)
static inline u64 pgprot_val(pgprot_t pgprot)
{
return hv_pte_val(pgprot);
}
static inline u64 pte_val(pte_t pte)
{
return hv_pte_val(pte);
}
static inline u64 pgd_val(pgd_t pgd)
{
return hv_pte_val(pgd);
}
#ifdef __tilegx__
typedef HV_PTE pmd_t;
#define __pmd(x) hv_pte(x)
static inline u64 pmd_val(pmd_t pmd)
{
return hv_pte_val(pmd);
}
#endif
static inline __attribute_const__ int get_order(unsigned long size)
{
return BITS_PER_LONG - __builtin_clzl((size - 1) >> PAGE_SHIFT);
}
#endif /* !__ASSEMBLY__ */
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#define HUGE_MAX_HSTATE 2
#ifdef CONFIG_HUGETLB_PAGE
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#endif
/* Each memory controller has PAs distinct in their high bits. */
#define NR_PA_HIGHBIT_SHIFT (CHIP_PA_WIDTH() - CHIP_LOG_NUM_MSHIMS())
#define NR_PA_HIGHBIT_VALUES (1 << CHIP_LOG_NUM_MSHIMS())
#define __pa_to_highbits(pa) ((phys_addr_t)(pa) >> NR_PA_HIGHBIT_SHIFT)
#define __pfn_to_highbits(pfn) ((pfn) >> (NR_PA_HIGHBIT_SHIFT - PAGE_SHIFT))
#ifdef __tilegx__
/*
* We reserve the lower half of memory for user-space programs, and the
* upper half for system code. We re-map all of physical memory in the
* upper half, which takes a quarter of our VA space. Then we have
* the vmalloc regions. The supervisor code lives at 0xfffffff700000000,
* with the hypervisor above that.
*
* Loadable kernel modules are placed immediately after the static
* supervisor code, with each being allocated a 256MB region of
* address space, so we don't have to worry about the range of "jal"
* and other branch instructions.
*
* For now we keep life simple and just allocate one pmd (4GB) for vmalloc.
* Similarly, for now we don't play any struct page mapping games.
*/
#if CHIP_PA_WIDTH() + 2 > CHIP_VA_WIDTH()
# error Too much PA to map with the VA available!
#endif
#define HALF_VA_SPACE (_AC(1, UL) << (CHIP_VA_WIDTH() - 1))
#define MEM_LOW_END (HALF_VA_SPACE - 1) /* low half */
#define MEM_HIGH_START (-HALF_VA_SPACE) /* high half */
#define PAGE_OFFSET MEM_HIGH_START
#define _VMALLOC_START _AC(0xfffffff500000000, UL) /* 4 GB */
#define HUGE_VMAP_BASE _AC(0xfffffff600000000, UL) /* 4 GB */
#define MEM_SV_START _AC(0xfffffff700000000, UL) /* 256 MB */
#define MEM_SV_INTRPT MEM_SV_START
#define MEM_MODULE_START _AC(0xfffffff710000000, UL) /* 256 MB */
#define MEM_MODULE_END (MEM_MODULE_START + (256*1024*1024))
#define MEM_HV_START _AC(0xfffffff800000000, UL) /* 32 GB */
/* Highest DTLB address we will use */
#define KERNEL_HIGH_VADDR MEM_SV_START
/* Since we don't currently provide any fixmaps, we use an impossible VA. */
#define FIXADDR_TOP MEM_HV_START
#else /* !__tilegx__ */
/*
* A PAGE_OFFSET of 0xC0000000 means that the kernel has
* a virtual address space of one gigabyte, which limits the
* amount of physical memory you can use to about 768MB.
* If you want more physical memory than this then see the CONFIG_HIGHMEM
* option in the kernel configuration.
*
* The top 16MB chunk in the table below is unavailable to Linux. Since
* the kernel interrupt vectors must live at ether 0xfe000000 or 0xfd000000
* (depending on whether the kernel is at PL2 or Pl1), we map all of the
* bottom of RAM at this address with a huge page table entry to minimize
* its ITLB footprint (as well as at PAGE_OFFSET). The last architected
* requirement is that user interrupt vectors live at 0xfc000000, so we
* make that range of memory available to user processes. The remaining
* regions are sized as shown; the first four addresses use the PL 1
* values, and after that, we show "typical" values, since the actual
* addresses depend on kernel #defines.
*
* MEM_HV_INTRPT 0xfe000000
* MEM_SV_INTRPT (kernel code) 0xfd000000
* MEM_USER_INTRPT (user vector) 0xfc000000
* FIX_KMAP_xxx 0xf8000000 (via NR_CPUS * KM_TYPE_NR)
* PKMAP_BASE 0xf7000000 (via LAST_PKMAP)
* HUGE_VMAP 0xf3000000 (via CONFIG_NR_HUGE_VMAPS)
* VMALLOC_START 0xf0000000 (via __VMALLOC_RESERVE)
* mapped LOWMEM 0xc0000000
*/
#define MEM_USER_INTRPT _AC(0xfc000000, UL)
#if CONFIG_KERNEL_PL == 1
#define MEM_SV_INTRPT _AC(0xfd000000, UL)
#define MEM_HV_INTRPT _AC(0xfe000000, UL)
#else
#define MEM_GUEST_INTRPT _AC(0xfd000000, UL)
#define MEM_SV_INTRPT _AC(0xfe000000, UL)
#define MEM_HV_INTRPT _AC(0xff000000, UL)
#endif
#define INTRPT_SIZE 0x4000
/* Tolerate page size larger than the architecture interrupt region size. */
#if PAGE_SIZE > INTRPT_SIZE
#undef INTRPT_SIZE
#define INTRPT_SIZE PAGE_SIZE
#endif
#define KERNEL_HIGH_VADDR MEM_USER_INTRPT
#define FIXADDR_TOP (KERNEL_HIGH_VADDR - PAGE_SIZE)
#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
/* On 32-bit architectures we mix kernel modules in with other vmaps. */
#define MEM_MODULE_START VMALLOC_START
#define MEM_MODULE_END VMALLOC_END
#endif /* __tilegx__ */
#ifndef __ASSEMBLY__
#ifdef CONFIG_HIGHMEM
/* Map kernel virtual addresses to page frames, in HPAGE_SIZE chunks. */
extern unsigned long pbase_map[];
extern void *vbase_map[];
static inline unsigned long kaddr_to_pfn(const volatile void *_kaddr)
{
unsigned long kaddr = (unsigned long)_kaddr;
return pbase_map[kaddr >> HPAGE_SHIFT] +
((kaddr & (HPAGE_SIZE - 1)) >> PAGE_SHIFT);
}
static inline void *pfn_to_kaddr(unsigned long pfn)
{
return vbase_map[__pfn_to_highbits(pfn)] + (pfn << PAGE_SHIFT);
}
static inline phys_addr_t virt_to_phys(const volatile void *kaddr)
{
unsigned long pfn = kaddr_to_pfn(kaddr);
return ((phys_addr_t)pfn << PAGE_SHIFT) +
((unsigned long)kaddr & (PAGE_SIZE-1));
}
static inline void *phys_to_virt(phys_addr_t paddr)
{
return pfn_to_kaddr(paddr >> PAGE_SHIFT) + (paddr & (PAGE_SIZE-1));
}
/* With HIGHMEM, we pack PAGE_OFFSET through high_memory with all valid VAs. */
static inline int virt_addr_valid(const volatile void *kaddr)
{
extern void *high_memory; /* copied from <linux/mm.h> */
return ((unsigned long)kaddr >= PAGE_OFFSET && kaddr < high_memory);
}
#else /* !CONFIG_HIGHMEM */
static inline unsigned long kaddr_to_pfn(const volatile void *kaddr)
{
return ((unsigned long)kaddr - PAGE_OFFSET) >> PAGE_SHIFT;
}
static inline void *pfn_to_kaddr(unsigned long pfn)
{
return (void *)((pfn << PAGE_SHIFT) + PAGE_OFFSET);
}
static inline phys_addr_t virt_to_phys(const volatile void *kaddr)
{
return (phys_addr_t)((unsigned long)kaddr - PAGE_OFFSET);
}
static inline void *phys_to_virt(phys_addr_t paddr)
{
return (void *)((unsigned long)paddr + PAGE_OFFSET);
}
/* Check that the given address is within some mapped range of PAs. */
#define virt_addr_valid(kaddr) pfn_valid(kaddr_to_pfn(kaddr))
#endif /* !CONFIG_HIGHMEM */
/* All callers are not consistent in how they call these functions. */
#define __pa(kaddr) virt_to_phys((void *)(unsigned long)(kaddr))
#define __va(paddr) phys_to_virt((phys_addr_t)(paddr))
extern int devmem_is_allowed(unsigned long pagenr);
#ifdef CONFIG_FLATMEM
static inline int pfn_valid(unsigned long pfn)
{
return pfn < max_mapnr;
}
#endif
/* Provide as macros since these require some other headers included. */
#define page_to_pa(page) ((phys_addr_t)(page_to_pfn(page)) << PAGE_SHIFT)
#define virt_to_page(kaddr) pfn_to_page(kaddr_to_pfn((void *)(kaddr)))
#define page_to_virt(page) pfn_to_kaddr(page_to_pfn(page))
struct mm_struct;
extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
#endif /* !__ASSEMBLY__ */
#define VM_DATA_DEFAULT_FLAGS \
(VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#include <asm-generic/memory_model.h>
#endif /* _ASM_TILE_PAGE_H */

View File

@ -0,0 +1,97 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_PCI_H
#define _ASM_TILE_PCI_H
#include <linux/pci.h>
#include <asm-generic/pci_iomap.h>
/*
* Structure of a PCI controller (host bridge)
*/
struct pci_controller {
int index; /* PCI domain number */
struct pci_bus *root_bus;
int first_busno;
int last_busno;
int hv_cfg_fd[2]; /* config{0,1} fds for this PCIe controller */
int hv_mem_fd; /* fd to Hypervisor for MMIO operations */
struct pci_ops *ops;
int irq_base; /* Base IRQ from the Hypervisor */
int plx_gen1; /* flag for PLX Gen 1 configuration */
/* Address ranges that are routed to this controller/bridge. */
struct resource mem_resources[3];
};
/*
* The hypervisor maps the entirety of CPA-space as bus addresses, so
* bus addresses are physical addresses. The networking and block
* device layers use this boolean for bounce buffer decisions.
*/
#define PCI_DMA_BUS_IS_PHYS 1
int __init tile_pci_init(void);
int __init pcibios_init(void);
static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {}
void __devinit pcibios_fixup_bus(struct pci_bus *bus);
#define TILE_NUM_PCIE 2
#define pci_domain_nr(bus) (((struct pci_controller *)(bus)->sysdata)->index)
/*
* This decides whether to display the domain number in /proc.
*/
static inline int pci_proc_domain(struct pci_bus *bus)
{
return 1;
}
/*
* pcibios_assign_all_busses() tells whether or not the bus numbers
* should be reassigned, in case the BIOS didn't do it correctly, or
* in case we don't have a BIOS and we want to let Linux do it.
*/
static inline int pcibios_assign_all_busses(void)
{
return 1;
}
#define PCIBIOS_MIN_MEM 0
#define PCIBIOS_MIN_IO 0
/*
* This flag tells if the platform is TILEmpower that needs
* special configuration for the PLX switch chip.
*/
extern int tile_plx_gen1;
/* Use any cpu for PCI. */
#define cpumask_of_pcibus(bus) cpu_online_mask
/* implement the pci_ DMA API in terms of the generic device dma_ one */
#include <asm-generic/pci-dma-compat.h>
/* generic pci stuff */
#include <asm-generic/pci.h>
#endif /* _ASM_TILE_PCI_H */

View File

@ -0,0 +1,24 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_PERCPU_H
#define _ASM_TILE_PERCPU_H
register unsigned long __my_cpu_offset __asm__("tp");
#define __my_cpu_offset __my_cpu_offset
#define set_my_cpu_offset(tp) (__my_cpu_offset = (tp))
#include <asm-generic/percpu.h>
#endif /* _ASM_TILE_PERCPU_H */

View File

@ -0,0 +1,122 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_PGALLOC_H
#define _ASM_TILE_PGALLOC_H
#include <linux/threads.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <asm/fixmap.h>
#include <hv/hypervisor.h>
/* Bits for the size of the second-level page table. */
#define L2_KERNEL_PGTABLE_SHIFT \
(HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL + HV_LOG2_PTE_SIZE)
/* We currently allocate user L2 page tables by page (unlike kernel L2s). */
#if L2_KERNEL_PGTABLE_SHIFT < HV_LOG2_PAGE_SIZE_SMALL
#define L2_USER_PGTABLE_SHIFT HV_LOG2_PAGE_SIZE_SMALL
#else
#define L2_USER_PGTABLE_SHIFT L2_KERNEL_PGTABLE_SHIFT
#endif
/* How many pages do we need, as an "order", for a user L2 page table? */
#define L2_USER_PGTABLE_ORDER (L2_USER_PGTABLE_SHIFT - HV_LOG2_PAGE_SIZE_SMALL)
/* How big is a kernel L2 page table? */
#define L2_KERNEL_PGTABLE_SIZE (1 << L2_KERNEL_PGTABLE_SHIFT)
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
{
#ifdef CONFIG_64BIT
set_pte(pmdp, pmd);
#else
set_pte(&pmdp->pud.pgd, pmd.pud.pgd);
#endif
}
static inline void pmd_populate_kernel(struct mm_struct *mm,
pmd_t *pmd, pte_t *ptep)
{
set_pmd(pmd, ptfn_pmd(__pa(ptep) >> HV_LOG2_PAGE_TABLE_ALIGN,
__pgprot(_PAGE_PRESENT)));
}
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
pgtable_t page)
{
set_pmd(pmd, ptfn_pmd(HV_PFN_TO_PTFN(page_to_pfn(page)),
__pgprot(_PAGE_PRESENT)));
}
/*
* Allocate and free page tables.
*/
extern pgd_t *pgd_alloc(struct mm_struct *mm);
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address);
extern void pte_free(struct mm_struct *mm, struct page *pte);
#define pmd_pgtable(pmd) pmd_page(pmd)
static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
return pfn_to_kaddr(page_to_pfn(pte_alloc_one(mm, address)));
}
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
pte_free(mm, virt_to_page(pte));
}
extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
unsigned long address);
#define check_pgt_cache() do { } while (0)
/*
* Get the small-page pte_t lowmem entry for a given pfn.
* This may or may not be in use, depending on whether the initial
* huge-page entry for the page has already been shattered.
*/
pte_t *get_prealloc_pte(unsigned long pfn);
/* During init, we can shatter kernel huge pages if needed. */
void shatter_pmd(pmd_t *pmd);
/* After init, a more complex technique is required. */
void shatter_huge_page(unsigned long addr);
#ifdef __tilegx__
/* We share a single page allocator for both L1 and L2 page tables. */
#if HV_L1_SIZE != HV_L2_SIZE
# error Rework assumption that L1 and L2 page tables are same size.
#endif
#define L1_USER_PGTABLE_ORDER L2_USER_PGTABLE_ORDER
#define pud_populate(mm, pud, pmd) \
pmd_populate_kernel((mm), (pmd_t *)(pud), (pte_t *)(pmd))
#define pmd_alloc_one(mm, addr) \
((pmd_t *)page_to_virt(pte_alloc_one((mm), (addr))))
#define pmd_free(mm, pmdp) \
pte_free((mm), virt_to_page(pmdp))
#define __pmd_free_tlb(tlb, pmdp, address) \
__pte_free_tlb((tlb), virt_to_page(pmdp), (address))
#endif
#endif /* _ASM_TILE_PGALLOC_H */

View File

@ -0,0 +1,465 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*
* This file contains the functions and defines necessary to modify and use
* the TILE page table tree.
*/
#ifndef _ASM_TILE_PGTABLE_H
#define _ASM_TILE_PGTABLE_H
#include <hv/hypervisor.h>
#ifndef __ASSEMBLY__
#include <linux/bitops.h>
#include <linux/threads.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <asm/processor.h>
#include <asm/fixmap.h>
struct mm_struct;
struct vm_area_struct;
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
extern pgd_t swapper_pg_dir[];
extern pgprot_t swapper_pgprot;
extern struct kmem_cache *pgd_cache;
extern spinlock_t pgd_lock;
extern struct list_head pgd_list;
/*
* The very last slots in the pgd_t are for addresses unusable by Linux
* (pgd_addr_invalid() returns true). So we use them for the list structure.
* The x86 code we are modelled on uses the page->private/index fields
* (older 2.6 kernels) or the lru list (newer 2.6 kernels), but since
* our pgds are so much smaller than a page, it seems a waste to
* spend a whole page on each pgd.
*/
#define PGD_LIST_OFFSET \
((PTRS_PER_PGD * sizeof(pgd_t)) - sizeof(struct list_head))
#define pgd_to_list(pgd) \
((struct list_head *)((char *)(pgd) + PGD_LIST_OFFSET))
#define list_to_pgd(list) \
((pgd_t *)((char *)(list) - PGD_LIST_OFFSET))
extern void pgtable_cache_init(void);
extern void paging_init(void);
extern void set_page_homes(void);
#define FIRST_USER_ADDRESS 0
#define _PAGE_PRESENT HV_PTE_PRESENT
#define _PAGE_HUGE_PAGE HV_PTE_PAGE
#define _PAGE_READABLE HV_PTE_READABLE
#define _PAGE_WRITABLE HV_PTE_WRITABLE
#define _PAGE_EXECUTABLE HV_PTE_EXECUTABLE
#define _PAGE_ACCESSED HV_PTE_ACCESSED
#define _PAGE_DIRTY HV_PTE_DIRTY
#define _PAGE_GLOBAL HV_PTE_GLOBAL
#define _PAGE_USER HV_PTE_USER
/*
* All the "standard" bits. Cache-control bits are managed elsewhere.
* This is used to test for valid level-2 page table pointers by checking
* all the bits, and to mask away the cache control bits for mprotect.
*/
#define _PAGE_ALL (\
_PAGE_PRESENT | \
_PAGE_HUGE_PAGE | \
_PAGE_READABLE | \
_PAGE_WRITABLE | \
_PAGE_EXECUTABLE | \
_PAGE_ACCESSED | \
_PAGE_DIRTY | \
_PAGE_GLOBAL | \
_PAGE_USER \
)
#define PAGE_NONE \
__pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
#define PAGE_SHARED \
__pgprot(_PAGE_PRESENT | _PAGE_READABLE | _PAGE_WRITABLE | \
_PAGE_USER | _PAGE_ACCESSED)
#define PAGE_SHARED_EXEC \
__pgprot(_PAGE_PRESENT | _PAGE_READABLE | _PAGE_WRITABLE | \
_PAGE_EXECUTABLE | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_COPY_NOEXEC \
__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_READABLE)
#define PAGE_COPY_EXEC \
__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | \
_PAGE_READABLE | _PAGE_EXECUTABLE)
#define PAGE_COPY \
PAGE_COPY_NOEXEC
#define PAGE_READONLY \
__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_READABLE)
#define PAGE_READONLY_EXEC \
__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | \
_PAGE_READABLE | _PAGE_EXECUTABLE)
#define _PAGE_KERNEL_RO \
(_PAGE_PRESENT | _PAGE_GLOBAL | _PAGE_READABLE | _PAGE_ACCESSED)
#define _PAGE_KERNEL \
(_PAGE_KERNEL_RO | _PAGE_WRITABLE | _PAGE_DIRTY)
#define _PAGE_KERNEL_EXEC (_PAGE_KERNEL_RO | _PAGE_EXECUTABLE)
#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
#define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL_RO)
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
#define page_to_kpgprot(p) PAGE_KERNEL
/*
* We could tighten these up, but for now writable or executable
* implies readable.
*/
#define __P000 PAGE_NONE
#define __P001 PAGE_READONLY
#define __P010 PAGE_COPY /* this is write-only, which we won't support */
#define __P011 PAGE_COPY
#define __P100 PAGE_READONLY_EXEC
#define __P101 PAGE_READONLY_EXEC
#define __P110 PAGE_COPY_EXEC
#define __P111 PAGE_COPY_EXEC
#define __S000 PAGE_NONE
#define __S001 PAGE_READONLY
#define __S010 PAGE_SHARED
#define __S011 PAGE_SHARED
#define __S100 PAGE_READONLY_EXEC
#define __S101 PAGE_READONLY_EXEC
#define __S110 PAGE_SHARED_EXEC
#define __S111 PAGE_SHARED_EXEC
/*
* All the normal _PAGE_ALL bits are ignored for PMDs, except PAGE_PRESENT
* and PAGE_HUGE_PAGE, which must be one and zero, respectively.
* We set the ignored bits to zero.
*/
#define _PAGE_TABLE _PAGE_PRESENT
/* Inherit the caching flags from the old protection bits. */
#define pgprot_modify(oldprot, newprot) \
(pgprot_t) { ((oldprot).val & ~_PAGE_ALL) | (newprot).val }
/* Just setting the PFN to zero suffices. */
#define pte_pgprot(x) hv_pte_set_pfn((x), 0)
/*
* For PTEs and PDEs, we must clear the Present bit first when
* clearing a page table entry, so clear the bottom half first and
* enforce ordering with a barrier.
*/
static inline void __pte_clear(pte_t *ptep)
{
#ifdef __tilegx__
ptep->val = 0;
#else
u32 *tmp = (u32 *)ptep;
tmp[0] = 0;
barrier();
tmp[1] = 0;
#endif
}
#define pte_clear(mm, addr, ptep) __pte_clear(ptep)
/*
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
#define pte_present hv_pte_get_present
#define pte_user hv_pte_get_user
#define pte_read hv_pte_get_readable
#define pte_dirty hv_pte_get_dirty
#define pte_young hv_pte_get_accessed
#define pte_write hv_pte_get_writable
#define pte_exec hv_pte_get_executable
#define pte_huge hv_pte_get_page
#define pte_rdprotect hv_pte_clear_readable
#define pte_exprotect hv_pte_clear_executable
#define pte_mkclean hv_pte_clear_dirty
#define pte_mkold hv_pte_clear_accessed
#define pte_wrprotect hv_pte_clear_writable
#define pte_mksmall hv_pte_clear_page
#define pte_mkread hv_pte_set_readable
#define pte_mkexec hv_pte_set_executable
#define pte_mkdirty hv_pte_set_dirty
#define pte_mkyoung hv_pte_set_accessed
#define pte_mkwrite hv_pte_set_writable
#define pte_mkhuge hv_pte_set_page
#define pte_special(pte) 0
#define pte_mkspecial(pte) (pte)
/*
* Use some spare bits in the PTE for user-caching tags.
*/
#define pte_set_forcecache hv_pte_set_client0
#define pte_get_forcecache hv_pte_get_client0
#define pte_clear_forcecache hv_pte_clear_client0
#define pte_set_anyhome hv_pte_set_client1
#define pte_get_anyhome hv_pte_get_client1
#define pte_clear_anyhome hv_pte_clear_client1
/*
* A migrating PTE has PAGE_PRESENT clear but all the other bits preserved.
*/
#define pte_migrating hv_pte_get_migrating
#define pte_mkmigrate(x) hv_pte_set_migrating(hv_pte_clear_present(x))
#define pte_donemigrate(x) hv_pte_set_present(hv_pte_clear_migrating(x))
#define pte_ERROR(e) \
pr_err("%s:%d: bad pte 0x%016llx.\n", __FILE__, __LINE__, pte_val(e))
#define pgd_ERROR(e) \
pr_err("%s:%d: bad pgd 0x%016llx.\n", __FILE__, __LINE__, pgd_val(e))
/* Return PA and protection info for a given kernel VA. */
int va_to_cpa_and_pte(void *va, phys_addr_t *cpa, pte_t *pte);
/*
* __set_pte() ensures we write the 64-bit PTE with 32-bit words in
* the right order on 32-bit platforms and also allows us to write
* hooks to check valid PTEs, etc., if we want.
*/
void __set_pte(pte_t *ptep, pte_t pte);
/*
* set_pte() sets the given PTE and also sanity-checks the
* requested PTE against the page homecaching. Unspecified parts
* of the PTE are filled in when it is written to memory, i.e. all
* caching attributes if "!forcecache", or the home cpu if "anyhome".
*/
extern void set_pte(pte_t *ptep, pte_t pte);
#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
#define set_pte_atomic(pteptr, pteval) set_pte(pteptr, pteval)
#define pte_page(x) pfn_to_page(pte_pfn(x))
static inline int pte_none(pte_t pte)
{
return !pte.val;
}
static inline unsigned long pte_pfn(pte_t pte)
{
return hv_pte_get_pfn(pte);
}
/* Set or get the remote cache cpu in a pgprot with remote caching. */
extern pgprot_t set_remote_cache_cpu(pgprot_t prot, int cpu);
extern int get_remote_cache_cpu(pgprot_t prot);
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
{
return hv_pte_set_pfn(prot, pfn);
}
/* Support for priority mappings. */
extern void start_mm_caching(struct mm_struct *mm);
extern void check_mm_caching(struct mm_struct *prev, struct mm_struct *next);
/*
* Support non-linear file mappings (see sys_remap_file_pages).
* This is defined by CLIENT1 set but CLIENT0 and _PAGE_PRESENT clear, and the
* file offset in the 32 high bits.
*/
#define _PAGE_FILE HV_PTE_CLIENT1
#define PTE_FILE_MAX_BITS 32
#define pte_file(pte) (hv_pte_get_client1(pte) && !hv_pte_get_client0(pte))
#define pte_to_pgoff(pte) ((pte).val >> 32)
#define pgoff_to_pte(off) ((pte_t) { (((long long)(off)) << 32) | _PAGE_FILE })
/*
* Encode and de-code a swap entry (see <linux/swapops.h>).
* We put the swap file type+offset in the 32 high bits;
* I believe we can just leave the low bits clear.
*/
#define __swp_type(swp) ((swp).val & 0x1f)
#define __swp_offset(swp) ((swp).val >> 5)
#define __swp_entry(type, off) ((swp_entry_t) { (type) | ((off) << 5) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).val >> 32 })
#define __swp_entry_to_pte(swp) ((pte_t) { (((long long) ((swp).val)) << 32) })
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
/*
* If we are doing an mprotect(), just accept the new vma->vm_page_prot
* value and combine it with the PFN from the old PTE to get a new PTE.
*/
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
return pfn_pte(hv_pte_get_pfn(pte), newprot);
}
/*
* The pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
*
* This macro returns the index of the entry in the pgd page which would
* control the given virtual address.
*/
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
/*
* pgd_offset() returns a (pgd_t *)
* pgd_index() is used get the offset into the pgd page's array of pgd_t's.
*/
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
/*
* A shortcut which implies the use of the kernel's pgd, instead
* of a process's.
*/
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
#if defined(CONFIG_HIGHPTE)
extern pte_t *pte_offset_map(pmd_t *, unsigned long address);
#define pte_unmap(pte) kunmap_atomic(pte)
#else
#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
#define pte_unmap(pte) do { } while (0)
#endif
/* Clear a non-executable kernel PTE and flush it from the TLB. */
#define kpte_clear_flush(ptep, vaddr) \
do { \
pte_clear(&init_mm, (vaddr), (ptep)); \
local_flush_tlb_page(FLUSH_NONEXEC, (vaddr), PAGE_SIZE); \
} while (0)
/*
* The kernel page tables contain what we need, and we flush when we
* change specific page table entries.
*/
#define update_mmu_cache(vma, address, pte) do { } while (0)
#ifdef CONFIG_FLATMEM
#define kern_addr_valid(addr) (1)
#endif /* CONFIG_FLATMEM */
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
extern void vmalloc_sync_all(void);
#endif /* !__ASSEMBLY__ */
#ifdef __tilegx__
#include <asm/pgtable_64.h>
#else
#include <asm/pgtable_32.h>
#endif
#ifndef __ASSEMBLY__
static inline int pmd_none(pmd_t pmd)
{
/*
* Only check low word on 32-bit platforms, since it might be
* out of sync with upper half.
*/
return (unsigned long)pmd_val(pmd) == 0;
}
static inline int pmd_present(pmd_t pmd)
{
return pmd_val(pmd) & _PAGE_PRESENT;
}
static inline int pmd_bad(pmd_t pmd)
{
return ((pmd_val(pmd) & _PAGE_ALL) != _PAGE_TABLE);
}
static inline unsigned long pages_to_mb(unsigned long npg)
{
return npg >> (20 - PAGE_SHIFT);
}
/*
* The pmd can be thought of an array like this: pmd_t[PTRS_PER_PMD]
*
* This function returns the index of the entry in the pmd which would
* control the given virtual address.
*/
static inline unsigned long pmd_index(unsigned long address)
{
return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
}
/*
* A given kernel pmd_t maps to a specific virtual address (either a
* kernel huge page or a kernel pte_t table). Since kernel pte_t
* tables can be aligned at sub-page granularity, this function can
* return non-page-aligned pointers, despite its name.
*/
static inline unsigned long pmd_page_vaddr(pmd_t pmd)
{
phys_addr_t pa =
(phys_addr_t)pmd_ptfn(pmd) << HV_LOG2_PAGE_TABLE_ALIGN;
return (unsigned long)__va(pa);
}
/*
* A pmd_t points to the base of a huge page or to a pte_t array.
* If a pte_t array, since we can have multiple per page, we don't
* have a one-to-one mapping of pmd_t's to pages. However, this is
* OK for pte_lockptr(), since we just end up with potentially one
* lock being used for several pte_t arrays.
*/
#define pmd_page(pmd) pfn_to_page(HV_PTFN_TO_PFN(pmd_ptfn(pmd)))
/*
* The pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
*
* This macro returns the index of the entry in the pte page which would
* control the given virtual address.
*/
static inline unsigned long pte_index(unsigned long address)
{
return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
}
static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
{
return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
}
static inline int pmd_huge_page(pmd_t pmd)
{
return pmd_val(pmd) & _PAGE_HUGE_PAGE;
}
#include <asm-generic/pgtable.h>
/* Support /proc/NN/pgtable API. */
struct seq_file;
int arch_proc_pgtable_show(struct seq_file *m, struct mm_struct *mm,
unsigned long vaddr, pte_t *ptep, void **datap);
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_TILE_PGTABLE_H */

View File

@ -0,0 +1,135 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*
*/
#ifndef _ASM_TILE_PGTABLE_32_H
#define _ASM_TILE_PGTABLE_32_H
/*
* The level-1 index is defined by the huge page size. A PGD is composed
* of PTRS_PER_PGD pgd_t's and is the top level of the page table.
*/
#define PGDIR_SHIFT HV_LOG2_PAGE_SIZE_LARGE
#define PGDIR_SIZE HV_PAGE_SIZE_LARGE
#define PGDIR_MASK (~(PGDIR_SIZE-1))
#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
#define SIZEOF_PGD (PTRS_PER_PGD * sizeof(pgd_t))
/*
* The level-2 index is defined by the difference between the huge
* page size and the normal page size. A PTE is composed of
* PTRS_PER_PTE pte_t's and is the bottom level of the page table.
* Note that the hypervisor docs use PTE for what we call pte_t, so
* this nomenclature is somewhat confusing.
*/
#define PTRS_PER_PTE (1 << (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL))
#define SIZEOF_PTE (PTRS_PER_PTE * sizeof(pte_t))
#ifndef __ASSEMBLY__
/*
* Right now we initialize only a single pte table. It can be extended
* easily, subsequent pte tables have to be allocated in one physical
* chunk of RAM.
*
* HOWEVER, if we are using an allocation scheme with slop after the
* end of the page table (e.g. where our L2 page tables are 2KB but
* our pages are 64KB and we are allocating via the page allocator)
* we can't extend it easily.
*/
#define LAST_PKMAP PTRS_PER_PTE
#define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE*LAST_PKMAP) & PGDIR_MASK)
#ifdef CONFIG_HIGHMEM
# define __VMAPPING_END (PKMAP_BASE & ~(HPAGE_SIZE-1))
#else
# define __VMAPPING_END (FIXADDR_START & ~(HPAGE_SIZE-1))
#endif
#ifdef CONFIG_HUGEVMAP
#define HUGE_VMAP_END __VMAPPING_END
#define HUGE_VMAP_BASE (HUGE_VMAP_END - CONFIG_NR_HUGE_VMAPS * HPAGE_SIZE)
#define _VMALLOC_END HUGE_VMAP_BASE
#else
#define _VMALLOC_END __VMAPPING_END
#endif
/*
* Align the vmalloc area to an L2 page table, and leave a guard page
* at the beginning and end. The vmalloc code also puts in an internal
* guard page between each allocation.
*/
#define VMALLOC_END (_VMALLOC_END - PAGE_SIZE)
extern unsigned long VMALLOC_RESERVE /* = CONFIG_VMALLOC_RESERVE */;
#define _VMALLOC_START (_VMALLOC_END - VMALLOC_RESERVE)
#define VMALLOC_START (_VMALLOC_START + PAGE_SIZE)
/* This is the maximum possible amount of lowmem. */
#define MAXMEM (_VMALLOC_START - PAGE_OFFSET)
/* We have no pmd or pud since we are strictly a two-level page table */
#include <asm-generic/pgtable-nopmd.h>
/* We don't define any pgds for these addresses. */
static inline int pgd_addr_invalid(unsigned long addr)
{
return addr >= MEM_HV_INTRPT;
}
/*
* Provide versions of these routines that can be used safely when
* the hypervisor may be asynchronously modifying dirty/accessed bits.
* ptep_get_and_clear() matches the generic one but we provide it to
* be parallel with the 64-bit code.
*/
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
extern int ptep_test_and_clear_young(struct vm_area_struct *,
unsigned long addr, pte_t *);
extern void ptep_set_wrprotect(struct mm_struct *,
unsigned long addr, pte_t *);
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
pte_t pte = *ptep;
pte_clear(_mm, addr, ptep);
return pte;
}
static inline void __set_pmd(pmd_t *pmdp, pmd_t pmdval)
{
set_pte(&pmdp->pud.pgd, pmdval.pud.pgd);
}
/* Create a pmd from a PTFN. */
static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot)
{
return (pmd_t){ { hv_pte_set_ptfn(prot, ptfn) } };
}
/* Return the page-table frame number (ptfn) that a pmd_t points at. */
#define pmd_ptfn(pmd) hv_pte_get_ptfn((pmd).pud.pgd)
static inline void pmd_clear(pmd_t *pmdp)
{
__pte_clear(&pmdp->pud.pgd);
}
#endif /* __ASSEMBLY__ */
#endif /* _ASM_TILE_PGTABLE_32_H */

View File

@ -0,0 +1,175 @@
/*
* Copyright 2011 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*
*/
#ifndef _ASM_TILE_PGTABLE_64_H
#define _ASM_TILE_PGTABLE_64_H
/* The level-0 page table breaks the address space into 32-bit chunks. */
#define PGDIR_SHIFT HV_LOG2_L1_SPAN
#define PGDIR_SIZE HV_L1_SPAN
#define PGDIR_MASK (~(PGDIR_SIZE-1))
#define PTRS_PER_PGD HV_L0_ENTRIES
#define SIZEOF_PGD (PTRS_PER_PGD * sizeof(pgd_t))
/*
* The level-1 index is defined by the huge page size. A PMD is composed
* of PTRS_PER_PMD pgd_t's and is the middle level of the page table.
*/
#define PMD_SHIFT HV_LOG2_PAGE_SIZE_LARGE
#define PMD_SIZE HV_PAGE_SIZE_LARGE
#define PMD_MASK (~(PMD_SIZE-1))
#define PTRS_PER_PMD (1 << (PGDIR_SHIFT - PMD_SHIFT))
#define SIZEOF_PMD (PTRS_PER_PMD * sizeof(pmd_t))
/*
* The level-2 index is defined by the difference between the huge
* page size and the normal page size. A PTE is composed of
* PTRS_PER_PTE pte_t's and is the bottom level of the page table.
* Note that the hypervisor docs use PTE for what we call pte_t, so
* this nomenclature is somewhat confusing.
*/
#define PTRS_PER_PTE (1 << (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL))
#define SIZEOF_PTE (PTRS_PER_PTE * sizeof(pte_t))
/*
* Align the vmalloc area to an L2 page table, and leave a guard page
* at the beginning and end. The vmalloc code also puts in an internal
* guard page between each allocation.
*/
#define _VMALLOC_END HUGE_VMAP_BASE
#define VMALLOC_END (_VMALLOC_END - PAGE_SIZE)
#define VMALLOC_START (_VMALLOC_START + PAGE_SIZE)
#define HUGE_VMAP_END (HUGE_VMAP_BASE + PGDIR_SIZE)
#ifndef __ASSEMBLY__
/* We have no pud since we are a three-level page table. */
#include <asm-generic/pgtable-nopud.h>
static inline int pud_none(pud_t pud)
{
return pud_val(pud) == 0;
}
static inline int pud_present(pud_t pud)
{
return pud_val(pud) & _PAGE_PRESENT;
}
#define pmd_ERROR(e) \
pr_err("%s:%d: bad pmd 0x%016llx.\n", __FILE__, __LINE__, pmd_val(e))
static inline void pud_clear(pud_t *pudp)
{
__pte_clear(&pudp->pgd);
}
static inline int pud_bad(pud_t pud)
{
return ((pud_val(pud) & _PAGE_ALL) != _PAGE_TABLE);
}
/* Return the page-table frame number (ptfn) that a pud_t points at. */
#define pud_ptfn(pud) hv_pte_get_ptfn((pud).pgd)
/*
* A given kernel pud_t maps to a kernel pmd_t table at a specific
* virtual address. Since kernel pmd_t tables can be aligned at
* sub-page granularity, this macro can return non-page-aligned
* pointers, despite its name.
*/
#define pud_page_vaddr(pud) \
(__va((phys_addr_t)pud_ptfn(pud) << HV_LOG2_PAGE_TABLE_ALIGN))
/*
* A pud_t points to a pmd_t array. Since we can have multiple per
* page, we don't have a one-to-one mapping of pud_t's to pages.
*/
#define pud_page(pud) pfn_to_page(HV_PTFN_TO_PFN(pud_ptfn(pud)))
static inline unsigned long pud_index(unsigned long address)
{
return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
}
#define pmd_offset(pud, address) \
((pmd_t *)pud_page_vaddr(*(pud)) + pmd_index(address))
static inline void __set_pmd(pmd_t *pmdp, pmd_t pmdval)
{
set_pte(pmdp, pmdval);
}
/* Create a pmd from a PTFN and pgprot. */
static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot)
{
return hv_pte_set_ptfn(prot, ptfn);
}
/* Return the page-table frame number (ptfn) that a pmd_t points at. */
static inline unsigned long pmd_ptfn(pmd_t pmd)
{
return hv_pte_get_ptfn(pmd);
}
static inline void pmd_clear(pmd_t *pmdp)
{
__pte_clear(pmdp);
}
/* Normalize an address to having the correct high bits set. */
#define pgd_addr_normalize pgd_addr_normalize
static inline unsigned long pgd_addr_normalize(unsigned long addr)
{
return ((long)addr << (CHIP_WORD_SIZE() - CHIP_VA_WIDTH())) >>
(CHIP_WORD_SIZE() - CHIP_VA_WIDTH());
}
/* We don't define any pgds for these addresses. */
static inline int pgd_addr_invalid(unsigned long addr)
{
return addr >= MEM_HV_START ||
(addr > MEM_LOW_END && addr < MEM_HIGH_START);
}
/*
* Use atomic instructions to provide atomicity against the hypervisor.
*/
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
return (__insn_fetchand(&ptep->val, ~HV_PTE_ACCESSED) >>
HV_PTE_INDEX_ACCESSED) & 0x1;
}
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
static inline void ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
__insn_fetchand(&ptep->val, ~HV_PTE_WRITABLE);
}
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
return hv_pte(__insn_exch(&ptep->val, 0UL));
}
#endif /* __ASSEMBLY__ */
#endif /* _ASM_TILE_PGTABLE_64_H */

View File

@ -0,0 +1,357 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_PROCESSOR_H
#define _ASM_TILE_PROCESSOR_H
#ifndef __ASSEMBLY__
/*
* NOTE: we don't include <linux/ptrace.h> or <linux/percpu.h> as one
* normally would, due to #include dependencies.
*/
#include <linux/types.h>
#include <asm/ptrace.h>
#include <asm/percpu.h>
#include <arch/chip.h>
#include <arch/spr_def.h>
struct task_struct;
struct thread_struct;
typedef struct {
unsigned long seg;
} mm_segment_t;
/*
* Default implementation of macro that returns current
* instruction pointer ("program counter").
*/
void *current_text_addr(void);
#if CHIP_HAS_TILE_DMA()
/* Capture the state of a suspended DMA. */
struct tile_dma_state {
int enabled;
unsigned long src;
unsigned long dest;
unsigned long strides;
unsigned long chunk_size;
unsigned long src_chunk;
unsigned long dest_chunk;
unsigned long byte;
unsigned long status;
};
/*
* A mask of the DMA status register for selecting only the 'running'
* and 'done' bits.
*/
#define DMA_STATUS_MASK \
(SPR_DMA_STATUS__RUNNING_MASK | SPR_DMA_STATUS__DONE_MASK)
#endif
/*
* Track asynchronous TLB events (faults and access violations)
* that occur while we are in kernel mode from DMA or the SN processor.
*/
struct async_tlb {
short fault_num; /* original fault number; 0 if none */
char is_fault; /* was it a fault (vs an access violation) */
char is_write; /* for fault: was it caused by a write? */
unsigned long address; /* what address faulted? */
};
#ifdef CONFIG_HARDWALL
struct hardwall_info;
#endif
struct thread_struct {
/* kernel stack pointer */
unsigned long ksp;
/* kernel PC */
unsigned long pc;
/* starting user stack pointer (for page migration) */
unsigned long usp0;
/* pid of process that created this one */
pid_t creator_pid;
#if CHIP_HAS_TILE_DMA()
/* DMA info for suspended threads (byte == 0 means no DMA state) */
struct tile_dma_state tile_dma_state;
#endif
/* User EX_CONTEXT registers */
unsigned long ex_context[2];
/* User SYSTEM_SAVE registers */
unsigned long system_save[4];
/* User interrupt mask */
unsigned long long interrupt_mask;
/* User interrupt-control 0 state */
unsigned long intctrl_0;
#if CHIP_HAS_PROC_STATUS_SPR()
/* Any other miscellaneous processor state bits */
unsigned long proc_status;
#endif
#if !CHIP_HAS_FIXED_INTVEC_BASE()
/* Interrupt base for PL0 interrupts */
unsigned long interrupt_vector_base;
#endif
#if CHIP_HAS_TILE_RTF_HWM()
/* Tile cache retry fifo high-water mark */
unsigned long tile_rtf_hwm;
#endif
#if CHIP_HAS_DSTREAM_PF()
/* Data stream prefetch control */
unsigned long dstream_pf;
#endif
#ifdef CONFIG_HARDWALL
/* Is this task tied to an activated hardwall? */
struct hardwall_info *hardwall;
/* Chains this task into the list at hardwall->list. */
struct list_head hardwall_list;
#endif
#if CHIP_HAS_TILE_DMA()
/* Async DMA TLB fault information */
struct async_tlb dma_async_tlb;
#endif
#if CHIP_HAS_SN_PROC()
/* Was static network processor when we were switched out? */
int sn_proc_running;
/* Async SNI TLB fault information */
struct async_tlb sn_async_tlb;
#endif
};
#endif /* !__ASSEMBLY__ */
/*
* Start with "sp" this many bytes below the top of the kernel stack.
* This preserves the invariant that a called function may write to *sp.
*/
#define STACK_TOP_DELTA 8
/*
* When entering the kernel via a fault, start with the top of the
* pt_regs structure this many bytes below the top of the page.
* This aligns the pt_regs structure optimally for cache-line access.
*/
#ifdef __tilegx__
#define KSTK_PTREGS_GAP 48
#else
#define KSTK_PTREGS_GAP 56
#endif
#ifndef __ASSEMBLY__
#ifdef __tilegx__
#define TASK_SIZE_MAX (MEM_LOW_END + 1)
#else
#define TASK_SIZE_MAX PAGE_OFFSET
#endif
/* TASK_SIZE and related variables are always checked in "current" context. */
#ifdef CONFIG_COMPAT
#define COMPAT_TASK_SIZE (1UL << 31)
#define TASK_SIZE ((current_thread_info()->status & TS_COMPAT) ?\
COMPAT_TASK_SIZE : TASK_SIZE_MAX)
#else
#define TASK_SIZE TASK_SIZE_MAX
#endif
/* We provide a minimal "vdso" a la x86; just the sigreturn code for now. */
#define VDSO_BASE (TASK_SIZE - PAGE_SIZE)
#define STACK_TOP VDSO_BASE
/* STACK_TOP_MAX is used temporarily in execve and should not check COMPAT. */
#define STACK_TOP_MAX TASK_SIZE_MAX
/*
* This decides where the kernel will search for a free chunk of vm
* space during mmap's, if it is using bottom-up mapping.
*/
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
#define HAVE_ARCH_PICK_MMAP_LAYOUT
#define INIT_THREAD { \
.ksp = (unsigned long)init_stack + THREAD_SIZE - STACK_TOP_DELTA, \
.interrupt_mask = -1ULL \
}
/* Kernel stack top for the task that first boots on this cpu. */
DECLARE_PER_CPU(unsigned long, boot_sp);
/* PC to boot from on this cpu. */
DECLARE_PER_CPU(unsigned long, boot_pc);
/* Do necessary setup to start up a newly executed thread. */
static inline void start_thread(struct pt_regs *regs,
unsigned long pc, unsigned long usp)
{
regs->pc = pc;
regs->sp = usp;
}
/* Free all resources held by a thread. */
static inline void release_thread(struct task_struct *dead_task)
{
/* Nothing for now */
}
/* Prepare to copy thread state - unlazy all lazy status. */
#define prepare_to_copy(tsk) do { } while (0)
extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
extern int do_work_pending(struct pt_regs *regs, u32 flags);
/*
* Return saved (kernel) PC of a blocked thread.
* Only used in a printk() in kernel/sched.c, so don't work too hard.
*/
#define thread_saved_pc(t) ((t)->thread.pc)
unsigned long get_wchan(struct task_struct *p);
/* Return initial ksp value for given task. */
#define task_ksp0(task) ((unsigned long)(task)->stack + THREAD_SIZE)
/* Return some info about the user process TASK. */
#define KSTK_TOP(task) (task_ksp0(task) - STACK_TOP_DELTA)
#define task_pt_regs(task) \
((struct pt_regs *)(task_ksp0(task) - KSTK_PTREGS_GAP) - 1)
#define task_sp(task) (task_pt_regs(task)->sp)
#define task_pc(task) (task_pt_regs(task)->pc)
/* Aliases for pc and sp (used in fs/proc/array.c) */
#define KSTK_EIP(task) task_pc(task)
#define KSTK_ESP(task) task_sp(task)
/* Standard format for printing registers and other word-size data. */
#ifdef __tilegx__
# define REGFMT "0x%016lx"
#else
# define REGFMT "0x%08lx"
#endif
/*
* Do some slow action (e.g. read a slow SPR).
* Note that this must also have compiler-barrier semantics since
* it may be used in a busy loop reading memory.
*/
static inline void cpu_relax(void)
{
__insn_mfspr(SPR_PASS);
barrier();
}
/* Info on this processor (see fs/proc/cpuinfo.c) */
struct seq_operations;
extern const struct seq_operations cpuinfo_op;
/* Provide information about the chip model. */
extern char chip_model[64];
/* Data on which physical memory controller corresponds to which NUMA node. */
extern int node_controller[];
#if CHIP_HAS_CBOX_HOME_MAP()
/* Does the heap allocator return hash-for-home pages by default? */
extern int hash_default;
/* Should kernel stack pages be hash-for-home? */
extern int kstack_hash;
/* Does MAP_ANONYMOUS return hash-for-home pages by default? */
#define uheap_hash hash_default
#else
#define hash_default 0
#define kstack_hash 0
#define uheap_hash 0
#endif
/* Are we using huge pages in the TLB for kernel data? */
extern int kdata_huge;
/* Support standard Linux prefetching. */
#define ARCH_HAS_PREFETCH
#define prefetch(x) __builtin_prefetch(x)
#define PREFETCH_STRIDE CHIP_L2_LINE_SIZE()
/* Bring a value into the L1D, faulting the TLB if necessary. */
#ifdef __tilegx__
#define prefetch_L1(x) __insn_prefetch_l1_fault((void *)(x))
#else
#define prefetch_L1(x) __insn_prefetch_L1((void *)(x))
#endif
#else /* __ASSEMBLY__ */
/* Do some slow action (e.g. read a slow SPR). */
#define CPU_RELAX mfspr zero, SPR_PASS
#endif /* !__ASSEMBLY__ */
/* Assembly code assumes that the PL is in the low bits. */
#if SPR_EX_CONTEXT_1_1__PL_SHIFT != 0
# error Fix assembly assumptions about PL
#endif
/* We sometimes use these macros for EX_CONTEXT_0_1 as well. */
#if SPR_EX_CONTEXT_1_1__PL_SHIFT != SPR_EX_CONTEXT_0_1__PL_SHIFT || \
SPR_EX_CONTEXT_1_1__PL_RMASK != SPR_EX_CONTEXT_0_1__PL_RMASK || \
SPR_EX_CONTEXT_1_1__ICS_SHIFT != SPR_EX_CONTEXT_0_1__ICS_SHIFT || \
SPR_EX_CONTEXT_1_1__ICS_RMASK != SPR_EX_CONTEXT_0_1__ICS_RMASK
# error Fix assumptions that EX1 macros work for both PL0 and PL1
#endif
/* Allow pulling apart and recombining the PL and ICS bits in EX_CONTEXT. */
#define EX1_PL(ex1) \
(((ex1) >> SPR_EX_CONTEXT_1_1__PL_SHIFT) & SPR_EX_CONTEXT_1_1__PL_RMASK)
#define EX1_ICS(ex1) \
(((ex1) >> SPR_EX_CONTEXT_1_1__ICS_SHIFT) & SPR_EX_CONTEXT_1_1__ICS_RMASK)
#define PL_ICS_EX1(pl, ics) \
(((pl) << SPR_EX_CONTEXT_1_1__PL_SHIFT) | \
((ics) << SPR_EX_CONTEXT_1_1__ICS_SHIFT))
/*
* Provide symbolic constants for PLs.
* Note that assembly code assumes that USER_PL is zero.
*/
#define USER_PL 0
#if CONFIG_KERNEL_PL == 2
#define GUEST_PL 1
#endif
#define KERNEL_PL CONFIG_KERNEL_PL
/* SYSTEM_SAVE_K_0 holds the current cpu number ORed with ksp0. */
#define CPU_LOG_MASK_VALUE 12
#define CPU_MASK_VALUE ((1 << CPU_LOG_MASK_VALUE) - 1)
#if CONFIG_NR_CPUS > CPU_MASK_VALUE
# error Too many cpus!
#endif
#define raw_smp_processor_id() \
((int)__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & CPU_MASK_VALUE)
#define get_current_ksp0() \
(__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & ~CPU_MASK_VALUE)
#define next_current_ksp0(task) ({ \
unsigned long __ksp0 = task_ksp0(task); \
int __cpu = raw_smp_processor_id(); \
BUG_ON(__ksp0 & CPU_MASK_VALUE); \
__ksp0 | __cpu; \
})
#endif /* _ASM_TILE_PROCESSOR_H */

View File

@ -0,0 +1,164 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_PTRACE_H
#define _ASM_TILE_PTRACE_H
#include <arch/chip.h>
#include <arch/abi.h>
/* These must match struct pt_regs, below. */
#if CHIP_WORD_SIZE() == 32
#define PTREGS_OFFSET_REG(n) ((n)*4)
#else
#define PTREGS_OFFSET_REG(n) ((n)*8)
#endif
#define PTREGS_OFFSET_BASE 0
#define PTREGS_OFFSET_TP PTREGS_OFFSET_REG(53)
#define PTREGS_OFFSET_SP PTREGS_OFFSET_REG(54)
#define PTREGS_OFFSET_LR PTREGS_OFFSET_REG(55)
#define PTREGS_NR_GPRS 56
#define PTREGS_OFFSET_PC PTREGS_OFFSET_REG(56)
#define PTREGS_OFFSET_EX1 PTREGS_OFFSET_REG(57)
#define PTREGS_OFFSET_FAULTNUM PTREGS_OFFSET_REG(58)
#define PTREGS_OFFSET_ORIG_R0 PTREGS_OFFSET_REG(59)
#define PTREGS_OFFSET_FLAGS PTREGS_OFFSET_REG(60)
#if CHIP_HAS_CMPEXCH()
#define PTREGS_OFFSET_CMPEXCH PTREGS_OFFSET_REG(61)
#endif
#define PTREGS_SIZE PTREGS_OFFSET_REG(64)
#ifndef __ASSEMBLY__
#ifdef __KERNEL__
/* Benefit from consistent use of "long" on all chips. */
typedef unsigned long pt_reg_t;
#else
/* Provide appropriate length type to userspace regardless of -m32/-m64. */
typedef uint_reg_t pt_reg_t;
#endif
/*
* This struct defines the way the registers are stored on the stack during a
* system call or exception. "struct sigcontext" has the same shape.
*/
struct pt_regs {
/* Saved main processor registers; 56..63 are special. */
/* tp, sp, and lr must immediately follow regs[] for aliasing. */
pt_reg_t regs[53];
pt_reg_t tp; /* aliases regs[TREG_TP] */
pt_reg_t sp; /* aliases regs[TREG_SP] */
pt_reg_t lr; /* aliases regs[TREG_LR] */
/* Saved special registers. */
pt_reg_t pc; /* stored in EX_CONTEXT_K_0 */
pt_reg_t ex1; /* stored in EX_CONTEXT_K_1 (PL and ICS bit) */
pt_reg_t faultnum; /* fault number (INT_SWINT_1 for syscall) */
pt_reg_t orig_r0; /* r0 at syscall entry, else zero */
pt_reg_t flags; /* flags (see below) */
#if !CHIP_HAS_CMPEXCH()
pt_reg_t pad[3];
#else
pt_reg_t cmpexch; /* value of CMPEXCH_VALUE SPR at interrupt */
pt_reg_t pad[2];
#endif
};
#endif /* __ASSEMBLY__ */
#define PTRACE_GETREGS 12
#define PTRACE_SETREGS 13
#define PTRACE_GETFPREGS 14
#define PTRACE_SETFPREGS 15
/* Support TILE-specific ptrace options, with events starting at 16. */
#define PTRACE_O_TRACEMIGRATE 0x00010000
#define PTRACE_EVENT_MIGRATE 16
#ifdef __KERNEL__
#define PTRACE_O_MASK_TILE (PTRACE_O_TRACEMIGRATE)
#define PT_TRACE_MIGRATE 0x00080000
#define PT_TRACE_MASK_TILE (PT_TRACE_MIGRATE)
#endif
#ifdef __KERNEL__
/* Flag bits in pt_regs.flags */
#define PT_FLAGS_DISABLE_IRQ 1 /* on return to kernel, disable irqs */
#define PT_FLAGS_CALLER_SAVES 2 /* caller-save registers are valid */
#define PT_FLAGS_RESTORE_REGS 4 /* restore callee-save regs on return */
#ifndef __ASSEMBLY__
#define instruction_pointer(regs) ((regs)->pc)
#define profile_pc(regs) instruction_pointer(regs)
/* Does the process account for user or for system time? */
#define user_mode(regs) (EX1_PL((regs)->ex1) == USER_PL)
/* Fill in a struct pt_regs with the current kernel registers. */
struct pt_regs *get_pt_regs(struct pt_regs *);
/* Trace the current syscall. */
extern void do_syscall_trace(void);
#define arch_has_single_step() (1)
/*
* A structure for all single-stepper state.
*
* Also update defines in assembler section if it changes
*/
struct single_step_state {
/* the page to which we will write hacked-up bundles */
void __user *buffer;
union {
int flags;
struct {
unsigned long is_enabled:1, update:1, update_reg:6;
};
};
unsigned long orig_pc; /* the original PC */
unsigned long next_pc; /* return PC if no branch (PC + 1) */
unsigned long branch_next_pc; /* return PC if we did branch/jump */
unsigned long update_value; /* value to restore to update_target */
};
/* Single-step the instruction at regs->pc */
extern void single_step_once(struct pt_regs *regs);
/* Clean up after execve(). */
extern void single_step_execve(void);
struct task_struct;
extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
int error_code);
#ifdef __tilegx__
/* We need this since sigval_t has a user pointer in it, for GETSIGINFO etc. */
#define __ARCH_WANT_COMPAT_SYS_PTRACE
#endif
#endif /* !__ASSEMBLY__ */
#define SINGLESTEP_STATE_MASK_IS_ENABLED 0x1
#define SINGLESTEP_STATE_MASK_UPDATE 0x2
#define SINGLESTEP_STATE_TARGET_LB 2
#define SINGLESTEP_STATE_TARGET_UB 7
#endif /* !__KERNEL__ */
#endif /* _ASM_TILE_PTRACE_H */

View File

@ -0,0 +1,44 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_SECTIONS_H
#define _ASM_TILE_SECTIONS_H
#define arch_is_kernel_data arch_is_kernel_data
#include <asm-generic/sections.h>
/* Text and data are at different areas in the kernel VA space. */
extern char _sinitdata[], _einitdata[];
/* Write-once data is writable only till the end of initialization. */
extern char __w1data_begin[], __w1data_end[];
/* Not exactly sections, but PC comparison points in the code. */
extern char __rt_sigreturn[], __rt_sigreturn_end[];
#ifndef __tilegx__
extern char sys_cmpxchg[], __sys_cmpxchg_end[];
extern char __sys_cmpxchg_grab_lock[];
extern char __start_atomic_asm_code[], __end_atomic_asm_code[];
#endif
/* Handle the discontiguity between _sdata and _stext. */
static inline int arch_is_kernel_data(unsigned long addr)
{
return addr >= (unsigned long)_sdata &&
addr < (unsigned long)_end;
}
#endif /* _ASM_TILE_SECTIONS_H */

View File

@ -0,0 +1,58 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_SETUP_H
#define _ASM_TILE_SETUP_H
#define COMMAND_LINE_SIZE 2048
#ifdef __KERNEL__
#include <linux/pfn.h>
#include <linux/init.h>
/*
* Reserved space for vmalloc and iomap - defined in asm/page.h
*/
#define MAXMEM_PFN PFN_DOWN(MAXMEM)
void early_panic(const char *fmt, ...);
void warn_early_printk(void);
void __init disable_early_printk(void);
/* Init-time routine to do tile-specific per-cpu setup. */
void setup_cpu(int boot);
/* User-level DMA management functions */
void grant_dma_mpls(void);
void restrict_dma_mpls(void);
#ifdef CONFIG_HARDWALL
/* User-level network management functions */
void reset_network_state(void);
void grant_network_mpls(void);
void restrict_network_mpls(void);
struct task_struct;
int hardwall_deactivate(struct task_struct *task);
/* Hook hardwall code into changes in affinity. */
#define arch_set_cpus_allowed(p, new_mask) do { \
if (p->thread.hardwall && !cpumask_equal(&p->cpus_allowed, new_mask)) \
hardwall_deactivate(p); \
} while (0)
#endif
#endif /* __KERNEL__ */
#endif /* _ASM_TILE_SETUP_H */

View File

@ -0,0 +1,37 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_SIGCONTEXT_H
#define _ASM_TILE_SIGCONTEXT_H
/* Don't pollute the namespace since <signal.h> includes this file. */
#define __need_int_reg_t
#include <arch/abi.h>
/*
* struct sigcontext has the same shape as struct pt_regs,
* but is simplified since we know the fault is from userspace.
*/
struct sigcontext {
__uint_reg_t gregs[53]; /* General-purpose registers. */
__uint_reg_t tp; /* Aliases gregs[TREG_TP]. */
__uint_reg_t sp; /* Aliases gregs[TREG_SP]. */
__uint_reg_t lr; /* Aliases gregs[TREG_LR]. */
__uint_reg_t pc; /* Program counter. */
__uint_reg_t ics; /* In Interrupt Critical Section? */
__uint_reg_t faultnum; /* Fault number. */
__uint_reg_t pad[5];
};
#endif /* _ASM_TILE_SIGCONTEXT_H */

View File

@ -0,0 +1,33 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_SIGFRAME_H
#define _ASM_TILE_SIGFRAME_H
/* Indicate that syscall return should not examine r0 */
#define INT_SWINT_1_SIGRETURN (~0)
#ifndef __ASSEMBLY__
#include <arch/abi.h>
struct rt_sigframe {
unsigned char save_area[C_ABI_SAVE_AREA_SIZE]; /* caller save area */
struct siginfo info;
struct ucontext uc;
};
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_TILE_SIGFRAME_H */

View File

@ -0,0 +1,34 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_SIGINFO_H
#define _ASM_TILE_SIGINFO_H
#define __ARCH_SI_TRAPNO
#ifdef __LP64__
# define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
#endif
#include <asm-generic/siginfo.h>
/*
* Additional Tile-specific SIGILL si_codes
*/
#define ILL_DBLFLT (__SI_FAULT|9) /* double fault */
#define ILL_HARDWALL (__SI_FAULT|10) /* user networks hardwall violation */
#undef NSIGILL
#define NSIGILL 10
#endif /* _ASM_TILE_SIGINFO_H */

View File

@ -0,0 +1,39 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_SIGNAL_H
#define _ASM_TILE_SIGNAL_H
/* Do not notify a ptracer when this signal is handled. */
#define SA_NOPTRACE 0x02000000u
/* Used in earlier Tilera releases, so keeping for binary compatibility. */
#define SA_RESTORER 0x04000000u
#include <asm-generic/signal.h>
#if defined(__KERNEL__)
#if !defined(__ASSEMBLY__)
struct pt_regs;
int restore_sigcontext(struct pt_regs *, struct sigcontext __user *);
int setup_sigcontext(struct sigcontext __user *, struct pt_regs *);
void do_signal(struct pt_regs *regs);
void signal_fault(const char *type, struct pt_regs *,
void __user *frame, int sig);
void trace_unhandled_signal(const char *type, struct pt_regs *regs,
unsigned long address, int signo);
#endif
#endif
#endif /* _ASM_TILE_SIGNAL_H */

View File

@ -0,0 +1,140 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_SMP_H
#define _ASM_TILE_SMP_H
#ifdef CONFIG_SMP
#include <asm/processor.h>
#include <linux/cpumask.h>
#include <linux/irqreturn.h>
#include <hv/hypervisor.h>
/* Set up this tile to support receiving hypervisor messages */
void init_messaging(void);
/* Set up this tile to support receiving device interrupts and IPIs. */
void init_per_tile_IRQs(void);
/* Send a message to processors specified in mask */
void send_IPI_many(const struct cpumask *mask, int tag);
/* Send a message to all but the sending processor */
void send_IPI_allbutself(int tag);
/* Send a message to a specific processor */
void send_IPI_single(int dest, int tag);
/* Process an IPI message */
void evaluate_message(int tag);
/* Boot a secondary cpu */
void online_secondary(void);
/* Topology of the supervisor tile grid, and coordinates of boot processor */
extern HV_Topology smp_topology;
/* Accessors for grid size */
#define smp_height (smp_topology.height)
#define smp_width (smp_topology.width)
/* Convenience functions for converting cpu <-> coords. */
static inline int cpu_x(int cpu)
{
return cpu % smp_width;
}
static inline int cpu_y(int cpu)
{
return cpu / smp_width;
}
static inline int xy_to_cpu(int x, int y)
{
return y * smp_width + x;
}
/* Hypervisor message tags sent via the tile send_IPI*() routines. */
#define MSG_TAG_START_CPU 1
#define MSG_TAG_STOP_CPU 2
#define MSG_TAG_CALL_FUNCTION_MANY 3
#define MSG_TAG_CALL_FUNCTION_SINGLE 4
/* Hook for the generic smp_call_function_many() routine. */
static inline void arch_send_call_function_ipi_mask(struct cpumask *mask)
{
send_IPI_many(mask, MSG_TAG_CALL_FUNCTION_MANY);
}
/* Hook for the generic smp_call_function_single() routine. */
static inline void arch_send_call_function_single_ipi(int cpu)
{
send_IPI_single(cpu, MSG_TAG_CALL_FUNCTION_SINGLE);
}
/* Print out the boot string describing which cpus were disabled. */
void print_disabled_cpus(void);
#else /* !CONFIG_SMP */
#define smp_master_cpu 0
#define smp_height 1
#define smp_width 1
#define cpu_x(cpu) 0
#define cpu_y(cpu) 0
#define xy_to_cpu(x, y) 0
#endif /* !CONFIG_SMP */
/* Which cpus may be used as the lotar in a page table entry. */
extern struct cpumask cpu_lotar_map;
#define cpu_is_valid_lotar(cpu) cpumask_test_cpu((cpu), &cpu_lotar_map)
#if CHIP_HAS_CBOX_HOME_MAP()
/* Which processors are used for hash-for-home mapping */
extern struct cpumask hash_for_home_map;
#endif
/* Which cpus can have their cache flushed by hv_flush_remote(). */
extern struct cpumask cpu_cacheable_map;
#define cpu_cacheable(cpu) cpumask_test_cpu((cpu), &cpu_cacheable_map)
/* Convert an HV_LOTAR value into a cpu. */
static inline int hv_lotar_to_cpu(HV_LOTAR lotar)
{
return HV_LOTAR_X(lotar) + (HV_LOTAR_Y(lotar) * smp_width);
}
/*
* Extension of <linux/cpumask.h> functionality when you just want
* to express a mask or suppression or inclusion region without
* being too concerned about exactly which cpus are valid in that region.
*/
int bitmap_parselist_crop(const char *bp, unsigned long *maskp, int nmaskbits);
#define cpulist_parse_crop(buf, dst) \
__cpulist_parse_crop((buf), (dst), NR_CPUS)
static inline int __cpulist_parse_crop(const char *buf, struct cpumask *dstp,
int nbits)
{
return bitmap_parselist_crop(buf, cpumask_bits(dstp), nbits);
}
/* Initialize the IPI subsystem. */
void ipi_init(void);
/* Function for start-cpu message to cause us to jump to. */
extern unsigned long start_cpu_function_addr;
#endif /* _ASM_TILE_SMP_H */

View File

@ -0,0 +1,24 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_SPINLOCK_H
#define _ASM_TILE_SPINLOCK_H
#ifdef __tilegx__
#include <asm/spinlock_64.h>
#else
#include <asm/spinlock_32.h>
#endif
#endif /* _ASM_TILE_SPINLOCK_H */

View File

@ -0,0 +1,129 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*
* 32-bit SMP spinlocks.
*/
#ifndef _ASM_TILE_SPINLOCK_32_H
#define _ASM_TILE_SPINLOCK_32_H
#include <linux/atomic.h>
#include <asm/page.h>
#include <linux/compiler.h>
/*
* We only use even ticket numbers so the '1' inserted by a tns is
* an unambiguous "ticket is busy" flag.
*/
#define TICKET_QUANTUM 2
/*
* SMP ticket spinlocks, allowing only a single CPU anywhere
*
* (the type definitions are in asm/spinlock_types.h)
*/
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
/*
* Note that even if a new ticket is in the process of being
* acquired, so lock->next_ticket is 1, it's still reasonable
* to claim the lock is held, since it will be momentarily
* if not already. There's no need to wait for a "valid"
* lock->next_ticket to become available.
*/
return lock->next_ticket != lock->current_ticket;
}
void arch_spin_lock(arch_spinlock_t *lock);
/* We cannot take an interrupt after getting a ticket, so don't enable them. */
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
int arch_spin_trylock(arch_spinlock_t *lock);
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
/* For efficiency, overlap fetching the old ticket with the wmb(). */
int old_ticket = lock->current_ticket;
wmb(); /* guarantee anything modified under the lock is visible */
lock->current_ticket = old_ticket + TICKET_QUANTUM;
}
void arch_spin_unlock_wait(arch_spinlock_t *lock);
/*
* Read-write spinlocks, allowing multiple readers
* but only one writer.
*
* We use a "tns/store-back" technique on a single word to manage
* the lock state, looping around to retry if the tns returns 1.
*/
/* Internal layout of the word; do not use. */
#define _WR_NEXT_SHIFT 8
#define _WR_CURR_SHIFT 16
#define _WR_WIDTH 8
#define _RD_COUNT_SHIFT 24
#define _RD_COUNT_WIDTH 8
/**
* arch_read_can_lock() - would read_trylock() succeed?
*/
static inline int arch_read_can_lock(arch_rwlock_t *rwlock)
{
return (rwlock->lock << _RD_COUNT_WIDTH) == 0;
}
/**
* arch_write_can_lock() - would write_trylock() succeed?
*/
static inline int arch_write_can_lock(arch_rwlock_t *rwlock)
{
return rwlock->lock == 0;
}
/**
* arch_read_lock() - acquire a read lock.
*/
void arch_read_lock(arch_rwlock_t *rwlock);
/**
* arch_write_lock() - acquire a write lock.
*/
void arch_write_lock(arch_rwlock_t *rwlock);
/**
* arch_read_trylock() - try to acquire a read lock.
*/
int arch_read_trylock(arch_rwlock_t *rwlock);
/**
* arch_write_trylock() - try to acquire a write lock.
*/
int arch_write_trylock(arch_rwlock_t *rwlock);
/**
* arch_read_unlock() - release a read lock.
*/
void arch_read_unlock(arch_rwlock_t *rwlock);
/**
* arch_write_unlock() - release a write lock.
*/
void arch_write_unlock(arch_rwlock_t *rwlock);
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
#endif /* _ASM_TILE_SPINLOCK_32_H */

View File

@ -0,0 +1,161 @@
/*
* Copyright 2011 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*
* 64-bit SMP ticket spinlocks, allowing only a single CPU anywhere
* (the type definitions are in asm/spinlock_types.h)
*/
#ifndef _ASM_TILE_SPINLOCK_64_H
#define _ASM_TILE_SPINLOCK_64_H
/* Shifts and masks for the various fields in "lock". */
#define __ARCH_SPIN_CURRENT_SHIFT 17
#define __ARCH_SPIN_NEXT_MASK 0x7fff
#define __ARCH_SPIN_NEXT_OVERFLOW 0x8000
/*
* Return the "current" portion of a ticket lock value,
* i.e. the number that currently owns the lock.
*/
static inline int arch_spin_current(u32 val)
{
return val >> __ARCH_SPIN_CURRENT_SHIFT;
}
/*
* Return the "next" portion of a ticket lock value,
* i.e. the number that the next task to try to acquire the lock will get.
*/
static inline int arch_spin_next(u32 val)
{
return val & __ARCH_SPIN_NEXT_MASK;
}
/* The lock is locked if a task would have to wait to get it. */
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
u32 val = lock->lock;
return arch_spin_current(val) != arch_spin_next(val);
}
/* Bump the current ticket so the next task owns the lock. */
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
wmb(); /* guarantee anything modified under the lock is visible */
__insn_fetchadd4(&lock->lock, 1U << __ARCH_SPIN_CURRENT_SHIFT);
}
void arch_spin_unlock_wait(arch_spinlock_t *lock);
void arch_spin_lock_slow(arch_spinlock_t *lock, u32 val);
/* Grab the "next" ticket number and bump it atomically.
* If the current ticket is not ours, go to the slow path.
* We also take the slow path if the "next" value overflows.
*/
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
u32 val = __insn_fetchadd4(&lock->lock, 1);
u32 ticket = val & (__ARCH_SPIN_NEXT_MASK | __ARCH_SPIN_NEXT_OVERFLOW);
if (unlikely(arch_spin_current(val) != ticket))
arch_spin_lock_slow(lock, ticket);
}
/* Try to get the lock, and return whether we succeeded. */
int arch_spin_trylock(arch_spinlock_t *lock);
/* We cannot take an interrupt after getting a ticket, so don't enable them. */
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
/*
* Read-write spinlocks, allowing multiple readers
* but only one writer.
*
* We use fetchadd() for readers, and fetchor() with the sign bit
* for writers.
*/
#define __WRITE_LOCK_BIT (1 << 31)
static inline int arch_write_val_locked(int val)
{
return val < 0; /* Optimize "val & __WRITE_LOCK_BIT". */
}
/**
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
static inline int arch_read_can_lock(arch_rwlock_t *rw)
{
return !arch_write_val_locked(rw->lock);
}
/**
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
static inline int arch_write_can_lock(arch_rwlock_t *rw)
{
return rw->lock == 0;
}
extern void __read_lock_failed(arch_rwlock_t *rw);
static inline void arch_read_lock(arch_rwlock_t *rw)
{
u32 val = __insn_fetchaddgez4(&rw->lock, 1);
if (unlikely(arch_write_val_locked(val)))
__read_lock_failed(rw);
}
extern void __write_lock_failed(arch_rwlock_t *rw, u32 val);
static inline void arch_write_lock(arch_rwlock_t *rw)
{
u32 val = __insn_fetchor4(&rw->lock, __WRITE_LOCK_BIT);
if (unlikely(val != 0))
__write_lock_failed(rw, val);
}
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
__insn_mf();
__insn_fetchadd4(&rw->lock, -1);
}
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
__insn_mf();
__insn_exch4(&rw->lock, 0); /* Avoid waiting in the write buffer. */
}
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
return !arch_write_val_locked(__insn_fetchaddgez4(&rw->lock, 1));
}
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
u32 val = __insn_fetchor4(&rw->lock, __WRITE_LOCK_BIT);
if (likely(val == 0))
return 1;
if (!arch_write_val_locked(val))
__insn_fetchand4(&rw->lock, ~__WRITE_LOCK_BIT);
return 0;
}
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
#endif /* _ASM_TILE_SPINLOCK_64_H */

View File

@ -0,0 +1,60 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_SPINLOCK_TYPES_H
#define _ASM_TILE_SPINLOCK_TYPES_H
#ifndef __LINUX_SPINLOCK_TYPES_H
# error "please don't include this file directly"
#endif
#ifdef __tilegx__
/* Low 15 bits are "next"; high 15 bits are "current". */
typedef struct arch_spinlock {
unsigned int lock;
} arch_spinlock_t;
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
/* High bit is "writer owns"; low 31 bits are a count of readers. */
typedef struct arch_rwlock {
unsigned int lock;
} arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#else
typedef struct arch_spinlock {
/* Next ticket number to hand out. */
int next_ticket;
/* The ticket number that currently owns this lock. */
int current_ticket;
} arch_spinlock_t;
#define __ARCH_SPIN_LOCK_UNLOCKED { 0, 0 }
/*
* Byte 0 for tns (only the low bit is used), byte 1 for ticket-lock "next",
* byte 2 for ticket-lock "current", byte 3 for reader count.
*/
typedef struct arch_rwlock {
unsigned int lock;
} arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif
#endif /* _ASM_TILE_SPINLOCK_TYPES_H */

View File

@ -0,0 +1,74 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_STACK_H
#define _ASM_TILE_STACK_H
#include <linux/types.h>
#include <linux/sched.h>
#include <asm/backtrace.h>
#include <asm/page.h>
#include <hv/hypervisor.h>
/* Everything we need to keep track of a backtrace iteration */
struct KBacktraceIterator {
BacktraceIterator it;
struct task_struct *task; /* task we are backtracing */
int end; /* iteration complete. */
int new_context; /* new context is starting */
int profile; /* profiling, so stop on async intrpt */
int verbose; /* printk extra info (don't want to
* do this for profiling) */
int is_current; /* backtracing current task */
};
/* Iteration methods for kernel backtraces */
/*
* Initialize a KBacktraceIterator from a task_struct, and optionally from
* a set of registers. If the registers are omitted, the process is
* assumed to be descheduled, and registers are read from the process's
* thread_struct and stack. "verbose" means to printk some additional
* information about fault handlers as we pass them on the stack.
*/
extern void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
struct task_struct *, struct pt_regs *);
/* Initialize iterator based on current stack. */
extern void KBacktraceIterator_init_current(struct KBacktraceIterator *kbt);
/* Helper method for above. */
extern void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt,
ulong pc, ulong lr, ulong sp, ulong r52);
/* No more frames? */
extern int KBacktraceIterator_end(struct KBacktraceIterator *kbt);
/* Advance to the next frame. */
extern void KBacktraceIterator_next(struct KBacktraceIterator *kbt);
/*
* Dump stack given complete register info. Use only from the
* architecture-specific code; show_stack()
* and dump_stack() (in entry.S) are architecture-independent entry points.
*/
extern void tile_show_stack(struct KBacktraceIterator *, int headers);
/* Dump stack of current process, with registers to seed the backtrace. */
extern void dump_stack_regs(struct pt_regs *);
/* Helper method for assembly dump_stack(). */
extern void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52);
#endif /* _ASM_TILE_STACK_H */

View File

@ -0,0 +1,4 @@
#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
#define __ARCH_WANT_STAT64 /* Used for compat_sys_stat64() etc. */
#endif
#include <asm-generic/stat.h>

View File

@ -0,0 +1,32 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_STRING_H
#define _ASM_TILE_STRING_H
#define __HAVE_ARCH_MEMCHR
#define __HAVE_ARCH_MEMSET
#define __HAVE_ARCH_MEMCPY
#define __HAVE_ARCH_MEMMOVE
#define __HAVE_ARCH_STRCHR
#define __HAVE_ARCH_STRLEN
extern __kernel_size_t strlen(const char *);
extern char *strchr(const char *s, int c);
extern void *memchr(const void *s, int c, size_t n);
extern void *memset(void *, int, __kernel_size_t);
extern void *memcpy(void *, const void *, __kernel_size_t);
extern void *memmove(void *, const void *, __kernel_size_t);
#endif /* _ASM_TILE_STRING_H */

View File

@ -0,0 +1,23 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_SWAB_H
#define _ASM_TILE_SWAB_H
/* Tile gcc is always >= 4.3.0, so we use __builtin_bswap. */
#define __arch_swab32(x) __builtin_bswap32(x)
#define __arch_swab64(x) __builtin_bswap64(x)
#define __arch_swab16(x) (__builtin_bswap32(x) >> 16)
#endif /* _ASM_TILE_SWAB_H */

View File

@ -0,0 +1,76 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_SWITCH_TO_H
#define _ASM_TILE_SWITCH_TO_H
#include <arch/sim_def.h>
/*
* switch_to(n) should switch tasks to task nr n, first
* checking that n isn't the current task, in which case it does nothing.
* The number of callee-saved registers saved on the kernel stack
* is defined here for use in copy_thread() and must agree with __switch_to().
*/
#define CALLEE_SAVED_FIRST_REG 30
#define CALLEE_SAVED_REGS_COUNT 24 /* r30 to r52, plus an empty to align */
#ifndef __ASSEMBLY__
struct task_struct;
/*
* Pause the DMA engine and static network before task switching.
*/
#define prepare_arch_switch(next) _prepare_arch_switch(next)
void _prepare_arch_switch(struct task_struct *next);
struct task_struct;
#define switch_to(prev, next, last) ((last) = _switch_to((prev), (next)))
extern struct task_struct *_switch_to(struct task_struct *prev,
struct task_struct *next);
/* Helper function for _switch_to(). */
extern struct task_struct *__switch_to(struct task_struct *prev,
struct task_struct *next,
unsigned long new_system_save_k_0);
/* Address that switched-away from tasks are at. */
extern unsigned long get_switch_to_pc(void);
/*
* Kernel threads can check to see if they need to migrate their
* stack whenever they return from a context switch; for user
* threads, we defer until they are returning to user-space.
*/
#define finish_arch_switch(prev) do { \
if (unlikely((prev)->state == TASK_DEAD)) \
__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT | \
((prev)->pid << _SIM_CONTROL_OPERATOR_BITS)); \
__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_SWITCH | \
(current->pid << _SIM_CONTROL_OPERATOR_BITS)); \
if (current->mm == NULL && !kstack_hash && \
current_thread_info()->homecache_cpu != smp_processor_id()) \
homecache_migrate_kthread(); \
} while (0)
/* Support function for forking a new task. */
void ret_from_fork(void);
/* Called from ret_from_fork() when a new process starts up. */
struct task_struct *sim_notify_fork(struct task_struct *prev);
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_TILE_SWITCH_TO_H */

View File

@ -0,0 +1,79 @@
/*
* Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved.
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*
* See asm-generic/syscall.h for descriptions of what we must do here.
*/
#ifndef _ASM_TILE_SYSCALL_H
#define _ASM_TILE_SYSCALL_H
#include <linux/sched.h>
#include <linux/err.h>
#include <arch/abi.h>
/*
* Only the low 32 bits of orig_r0 are meaningful, so we return int.
* This importantly ignores the high bits on 64-bit, so comparisons
* sign-extend the low 32 bits.
*/
static inline int syscall_get_nr(struct task_struct *t, struct pt_regs *regs)
{
return regs->regs[TREG_SYSCALL_NR];
}
static inline void syscall_rollback(struct task_struct *task,
struct pt_regs *regs)
{
regs->regs[0] = regs->orig_r0;
}
static inline long syscall_get_error(struct task_struct *task,
struct pt_regs *regs)
{
unsigned long error = regs->regs[0];
return IS_ERR_VALUE(error) ? error : 0;
}
static inline long syscall_get_return_value(struct task_struct *task,
struct pt_regs *regs)
{
return regs->regs[0];
}
static inline void syscall_set_return_value(struct task_struct *task,
struct pt_regs *regs,
int error, long val)
{
regs->regs[0] = (long) error ?: val;
}
static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs,
unsigned int i, unsigned int n,
unsigned long *args)
{
BUG_ON(i + n > 6);
memcpy(args, &regs[i], n * sizeof(args[0]));
}
static inline void syscall_set_arguments(struct task_struct *task,
struct pt_regs *regs,
unsigned int i, unsigned int n,
const unsigned long *args)
{
BUG_ON(i + n > 6);
memcpy(&regs[i], args, n * sizeof(args[0]));
}
#endif /* _ASM_TILE_SYSCALL_H */

View File

@ -0,0 +1,76 @@
/*
* syscalls.h - Linux syscall interfaces (arch-specific)
*
* Copyright (c) 2008 Jaswinder Singh Rajput
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_SYSCALLS_H
#define _ASM_TILE_SYSCALLS_H
#include <linux/compiler.h>
#include <linux/linkage.h>
#include <linux/signal.h>
#include <linux/types.h>
#include <linux/compat.h>
/* The array of function pointers for syscalls. */
extern void *sys_call_table[];
#ifdef CONFIG_COMPAT
extern void *compat_sys_call_table[];
#endif
/*
* Note that by convention, any syscall which requires the current
* register set takes an additional "struct pt_regs *" pointer; a
* _sys_xxx() trampoline in intvec*.S just sets up the pointer and
* jumps to sys_xxx().
*/
/* kernel/sys.c */
ssize_t sys32_readahead(int fd, u32 offset_lo, u32 offset_hi, u32 count);
long sys32_fadvise64(int fd, u32 offset_lo, u32 offset_hi,
u32 len, int advice);
int sys32_fadvise64_64(int fd, u32 offset_lo, u32 offset_hi,
u32 len_lo, u32 len_hi, int advice);
long sys_flush_cache(void);
#ifndef __tilegx__ /* No mmap() in the 32-bit kernel. */
#define sys_mmap sys_mmap
#endif
#ifndef __tilegx__
/* mm/fault.c */
long sys_cmpxchg_badaddr(unsigned long address, struct pt_regs *);
long _sys_cmpxchg_badaddr(unsigned long address);
#endif
#ifdef CONFIG_COMPAT
/* These four are not defined for 64-bit, but serve as "compat" syscalls. */
long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg);
long sys_fstat64(unsigned long fd, struct stat64 __user *statbuf);
long sys_truncate64(const char __user *path, loff_t length);
long sys_ftruncate64(unsigned int fd, loff_t length);
#endif
/* These are the intvec*.S trampolines. */
long _sys_sigaltstack(const stack_t __user *, stack_t __user *);
long _sys_rt_sigreturn(void);
long _sys_clone(unsigned long clone_flags, unsigned long newsp,
void __user *parent_tid, void __user *child_tid);
long _sys_execve(const char __user *filename,
const char __user *const __user *argv,
const char __user *const __user *envp);
#include <asm-generic/syscalls.h>
#endif /* _ASM_TILE_SYSCALLS_H */

View File

@ -0,0 +1,175 @@
/*
* Copyright (C) 2002 David Howells (dhowells@redhat.com)
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_THREAD_INFO_H
#define _ASM_TILE_THREAD_INFO_H
#include <asm/processor.h>
#include <asm/page.h>
#ifndef __ASSEMBLY__
/*
* Low level task data that assembly code needs immediate access to.
* The structure is placed at the bottom of the supervisor stack.
*/
struct thread_info {
struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
unsigned long status; /* thread-synchronous flags */
__u32 homecache_cpu; /* CPU we are homecached on */
__u32 cpu; /* current CPU */
int preempt_count; /* 0 => preemptable,
<0 => BUG */
mm_segment_t addr_limit; /* thread address space
(KERNEL_DS or USER_DS) */
struct restart_block restart_block;
struct single_step_state *step_state; /* single step state
(if non-zero) */
};
/*
* macros/functions for gaining access to the thread information structure.
*/
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
.exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
.restart_block = { \
.fn = do_no_restart_syscall, \
}, \
.step_state = NULL, \
}
#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
#endif /* !__ASSEMBLY__ */
#if PAGE_SIZE < 8192
#define THREAD_SIZE_ORDER (13 - PAGE_SHIFT)
#else
#define THREAD_SIZE_ORDER (0)
#endif
#define THREAD_SIZE_PAGES (1 << THREAD_SIZE_ORDER)
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
#define LOG2_THREAD_SIZE (PAGE_SHIFT + THREAD_SIZE_ORDER)
#define STACK_WARN (THREAD_SIZE/8)
#ifndef __ASSEMBLY__
/* How to get the thread information struct from C. */
register unsigned long stack_pointer __asm__("sp");
#define current_thread_info() \
((struct thread_info *)(stack_pointer & -THREAD_SIZE))
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
extern struct thread_info *alloc_thread_info_node(struct task_struct *task, int node);
extern void free_thread_info(struct thread_info *info);
/* Sit on a nap instruction until interrupted. */
extern void smp_nap(void);
/* Enable interrupts racelessly and nap forever: helper for cpu_idle(). */
extern void _cpu_idle(void);
/* Switch boot idle thread to a freshly-allocated stack and free old stack. */
extern void cpu_idle_on_new_stack(struct thread_info *old_ti,
unsigned long new_sp,
unsigned long new_ss10);
#else /* __ASSEMBLY__ */
/*
* How to get the thread information struct from assembly.
* Note that we use different macros since different architectures
* have different semantics in their "mm" instruction and we would
* like to guarantee that the macro expands to exactly one instruction.
*/
#ifdef __tilegx__
#define EXTRACT_THREAD_INFO(reg) mm reg, zero, LOG2_THREAD_SIZE, 63
#else
#define GET_THREAD_INFO(reg) mm reg, sp, zero, LOG2_THREAD_SIZE, 31
#endif
#endif /* !__ASSEMBLY__ */
#define PREEMPT_ACTIVE 0x10000000
/*
* Thread information flags that various assembly files may need to access.
* Keep flags accessed frequently in low bits, particular since it makes
* it easier to build constants in assembly.
*/
#define TIF_SIGPENDING 0 /* signal pending */
#define TIF_NEED_RESCHED 1 /* rescheduling necessary */
#define TIF_SINGLESTEP 2 /* restore singlestep on return to
user mode */
#define TIF_ASYNC_TLB 3 /* got an async TLB fault in kernel */
#define TIF_SYSCALL_TRACE 4 /* syscall trace active */
#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
#define TIF_SECCOMP 6 /* secure computing */
#define TIF_MEMDIE 7 /* OOM killer at work */
#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
#define _TIF_ASYNC_TLB (1<<TIF_ASYNC_TLB)
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1<<TIF_SECCOMP)
#define _TIF_MEMDIE (1<<TIF_MEMDIE)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
/* Work to do on any return to user space. */
#define _TIF_ALLWORK_MASK \
(_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SINGLESTEP|\
_TIF_ASYNC_TLB|_TIF_NOTIFY_RESUME)
/*
* Thread-synchronous status.
*
* This is different from the flags in that nobody else
* ever touches our thread-synchronous status, so we don't
* have to worry about atomic accesses.
*/
#ifdef __tilegx__
#define TS_COMPAT 0x0001 /* 32-bit compatibility mode */
#endif
#define TS_POLLING 0x0004 /* in idle loop but not sleeping */
#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal */
#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
#ifndef __ASSEMBLY__
#define HAVE_SET_RESTORE_SIGMASK 1
static inline void set_restore_sigmask(void)
{
struct thread_info *ti = current_thread_info();
ti->status |= TS_RESTORE_SIGMASK;
set_bit(TIF_SIGPENDING, &ti->flags);
}
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_TILE_THREAD_INFO_H */

View File

@ -0,0 +1,19 @@
/*
* Copyright 2011 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef __tilegx__
#include <asm/tile-desc_32.h>
#else
#include <asm/tile-desc_64.h>
#endif

View File

@ -0,0 +1,553 @@
/* TILEPro opcode information.
*
* Copyright 2011 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*
*
*
*
*
*/
#ifndef opcode_tilepro_h
#define opcode_tilepro_h
#include <arch/opcode.h>
enum
{
TILEPRO_MAX_OPERANDS = 5 /* mm */
};
typedef enum
{
TILEPRO_OPC_BPT,
TILEPRO_OPC_INFO,
TILEPRO_OPC_INFOL,
TILEPRO_OPC_J,
TILEPRO_OPC_JAL,
TILEPRO_OPC_MOVE,
TILEPRO_OPC_MOVE_SN,
TILEPRO_OPC_MOVEI,
TILEPRO_OPC_MOVEI_SN,
TILEPRO_OPC_MOVELI,
TILEPRO_OPC_MOVELI_SN,
TILEPRO_OPC_MOVELIS,
TILEPRO_OPC_PREFETCH,
TILEPRO_OPC_RAISE,
TILEPRO_OPC_ADD,
TILEPRO_OPC_ADD_SN,
TILEPRO_OPC_ADDB,
TILEPRO_OPC_ADDB_SN,
TILEPRO_OPC_ADDBS_U,
TILEPRO_OPC_ADDBS_U_SN,
TILEPRO_OPC_ADDH,
TILEPRO_OPC_ADDH_SN,
TILEPRO_OPC_ADDHS,
TILEPRO_OPC_ADDHS_SN,
TILEPRO_OPC_ADDI,
TILEPRO_OPC_ADDI_SN,
TILEPRO_OPC_ADDIB,
TILEPRO_OPC_ADDIB_SN,
TILEPRO_OPC_ADDIH,
TILEPRO_OPC_ADDIH_SN,
TILEPRO_OPC_ADDLI,
TILEPRO_OPC_ADDLI_SN,
TILEPRO_OPC_ADDLIS,
TILEPRO_OPC_ADDS,
TILEPRO_OPC_ADDS_SN,
TILEPRO_OPC_ADIFFB_U,
TILEPRO_OPC_ADIFFB_U_SN,
TILEPRO_OPC_ADIFFH,
TILEPRO_OPC_ADIFFH_SN,
TILEPRO_OPC_AND,
TILEPRO_OPC_AND_SN,
TILEPRO_OPC_ANDI,
TILEPRO_OPC_ANDI_SN,
TILEPRO_OPC_AULI,
TILEPRO_OPC_AVGB_U,
TILEPRO_OPC_AVGB_U_SN,
TILEPRO_OPC_AVGH,
TILEPRO_OPC_AVGH_SN,
TILEPRO_OPC_BBNS,
TILEPRO_OPC_BBNS_SN,
TILEPRO_OPC_BBNST,
TILEPRO_OPC_BBNST_SN,
TILEPRO_OPC_BBS,
TILEPRO_OPC_BBS_SN,
TILEPRO_OPC_BBST,
TILEPRO_OPC_BBST_SN,
TILEPRO_OPC_BGEZ,
TILEPRO_OPC_BGEZ_SN,
TILEPRO_OPC_BGEZT,
TILEPRO_OPC_BGEZT_SN,
TILEPRO_OPC_BGZ,
TILEPRO_OPC_BGZ_SN,
TILEPRO_OPC_BGZT,
TILEPRO_OPC_BGZT_SN,
TILEPRO_OPC_BITX,
TILEPRO_OPC_BITX_SN,
TILEPRO_OPC_BLEZ,
TILEPRO_OPC_BLEZ_SN,
TILEPRO_OPC_BLEZT,
TILEPRO_OPC_BLEZT_SN,
TILEPRO_OPC_BLZ,
TILEPRO_OPC_BLZ_SN,
TILEPRO_OPC_BLZT,
TILEPRO_OPC_BLZT_SN,
TILEPRO_OPC_BNZ,
TILEPRO_OPC_BNZ_SN,
TILEPRO_OPC_BNZT,
TILEPRO_OPC_BNZT_SN,
TILEPRO_OPC_BYTEX,
TILEPRO_OPC_BYTEX_SN,
TILEPRO_OPC_BZ,
TILEPRO_OPC_BZ_SN,
TILEPRO_OPC_BZT,
TILEPRO_OPC_BZT_SN,
TILEPRO_OPC_CLZ,
TILEPRO_OPC_CLZ_SN,
TILEPRO_OPC_CRC32_32,
TILEPRO_OPC_CRC32_32_SN,
TILEPRO_OPC_CRC32_8,
TILEPRO_OPC_CRC32_8_SN,
TILEPRO_OPC_CTZ,
TILEPRO_OPC_CTZ_SN,
TILEPRO_OPC_DRAIN,
TILEPRO_OPC_DTLBPR,
TILEPRO_OPC_DWORD_ALIGN,
TILEPRO_OPC_DWORD_ALIGN_SN,
TILEPRO_OPC_FINV,
TILEPRO_OPC_FLUSH,
TILEPRO_OPC_FNOP,
TILEPRO_OPC_ICOH,
TILEPRO_OPC_ILL,
TILEPRO_OPC_INTHB,
TILEPRO_OPC_INTHB_SN,
TILEPRO_OPC_INTHH,
TILEPRO_OPC_INTHH_SN,
TILEPRO_OPC_INTLB,
TILEPRO_OPC_INTLB_SN,
TILEPRO_OPC_INTLH,
TILEPRO_OPC_INTLH_SN,
TILEPRO_OPC_INV,
TILEPRO_OPC_IRET,
TILEPRO_OPC_JALB,
TILEPRO_OPC_JALF,
TILEPRO_OPC_JALR,
TILEPRO_OPC_JALRP,
TILEPRO_OPC_JB,
TILEPRO_OPC_JF,
TILEPRO_OPC_JR,
TILEPRO_OPC_JRP,
TILEPRO_OPC_LB,
TILEPRO_OPC_LB_SN,
TILEPRO_OPC_LB_U,
TILEPRO_OPC_LB_U_SN,
TILEPRO_OPC_LBADD,
TILEPRO_OPC_LBADD_SN,
TILEPRO_OPC_LBADD_U,
TILEPRO_OPC_LBADD_U_SN,
TILEPRO_OPC_LH,
TILEPRO_OPC_LH_SN,
TILEPRO_OPC_LH_U,
TILEPRO_OPC_LH_U_SN,
TILEPRO_OPC_LHADD,
TILEPRO_OPC_LHADD_SN,
TILEPRO_OPC_LHADD_U,
TILEPRO_OPC_LHADD_U_SN,
TILEPRO_OPC_LNK,
TILEPRO_OPC_LNK_SN,
TILEPRO_OPC_LW,
TILEPRO_OPC_LW_SN,
TILEPRO_OPC_LW_NA,
TILEPRO_OPC_LW_NA_SN,
TILEPRO_OPC_LWADD,
TILEPRO_OPC_LWADD_SN,
TILEPRO_OPC_LWADD_NA,
TILEPRO_OPC_LWADD_NA_SN,
TILEPRO_OPC_MAXB_U,
TILEPRO_OPC_MAXB_U_SN,
TILEPRO_OPC_MAXH,
TILEPRO_OPC_MAXH_SN,
TILEPRO_OPC_MAXIB_U,
TILEPRO_OPC_MAXIB_U_SN,
TILEPRO_OPC_MAXIH,
TILEPRO_OPC_MAXIH_SN,
TILEPRO_OPC_MF,
TILEPRO_OPC_MFSPR,
TILEPRO_OPC_MINB_U,
TILEPRO_OPC_MINB_U_SN,
TILEPRO_OPC_MINH,
TILEPRO_OPC_MINH_SN,
TILEPRO_OPC_MINIB_U,
TILEPRO_OPC_MINIB_U_SN,
TILEPRO_OPC_MINIH,
TILEPRO_OPC_MINIH_SN,
TILEPRO_OPC_MM,
TILEPRO_OPC_MNZ,
TILEPRO_OPC_MNZ_SN,
TILEPRO_OPC_MNZB,
TILEPRO_OPC_MNZB_SN,
TILEPRO_OPC_MNZH,
TILEPRO_OPC_MNZH_SN,
TILEPRO_OPC_MTSPR,
TILEPRO_OPC_MULHH_SS,
TILEPRO_OPC_MULHH_SS_SN,
TILEPRO_OPC_MULHH_SU,
TILEPRO_OPC_MULHH_SU_SN,
TILEPRO_OPC_MULHH_UU,
TILEPRO_OPC_MULHH_UU_SN,
TILEPRO_OPC_MULHHA_SS,
TILEPRO_OPC_MULHHA_SS_SN,
TILEPRO_OPC_MULHHA_SU,
TILEPRO_OPC_MULHHA_SU_SN,
TILEPRO_OPC_MULHHA_UU,
TILEPRO_OPC_MULHHA_UU_SN,
TILEPRO_OPC_MULHHSA_UU,
TILEPRO_OPC_MULHHSA_UU_SN,
TILEPRO_OPC_MULHL_SS,
TILEPRO_OPC_MULHL_SS_SN,
TILEPRO_OPC_MULHL_SU,
TILEPRO_OPC_MULHL_SU_SN,
TILEPRO_OPC_MULHL_US,
TILEPRO_OPC_MULHL_US_SN,
TILEPRO_OPC_MULHL_UU,
TILEPRO_OPC_MULHL_UU_SN,
TILEPRO_OPC_MULHLA_SS,
TILEPRO_OPC_MULHLA_SS_SN,
TILEPRO_OPC_MULHLA_SU,
TILEPRO_OPC_MULHLA_SU_SN,
TILEPRO_OPC_MULHLA_US,
TILEPRO_OPC_MULHLA_US_SN,
TILEPRO_OPC_MULHLA_UU,
TILEPRO_OPC_MULHLA_UU_SN,
TILEPRO_OPC_MULHLSA_UU,
TILEPRO_OPC_MULHLSA_UU_SN,
TILEPRO_OPC_MULLL_SS,
TILEPRO_OPC_MULLL_SS_SN,
TILEPRO_OPC_MULLL_SU,
TILEPRO_OPC_MULLL_SU_SN,
TILEPRO_OPC_MULLL_UU,
TILEPRO_OPC_MULLL_UU_SN,
TILEPRO_OPC_MULLLA_SS,
TILEPRO_OPC_MULLLA_SS_SN,
TILEPRO_OPC_MULLLA_SU,
TILEPRO_OPC_MULLLA_SU_SN,
TILEPRO_OPC_MULLLA_UU,
TILEPRO_OPC_MULLLA_UU_SN,
TILEPRO_OPC_MULLLSA_UU,
TILEPRO_OPC_MULLLSA_UU_SN,
TILEPRO_OPC_MVNZ,
TILEPRO_OPC_MVNZ_SN,
TILEPRO_OPC_MVZ,
TILEPRO_OPC_MVZ_SN,
TILEPRO_OPC_MZ,
TILEPRO_OPC_MZ_SN,
TILEPRO_OPC_MZB,
TILEPRO_OPC_MZB_SN,
TILEPRO_OPC_MZH,
TILEPRO_OPC_MZH_SN,
TILEPRO_OPC_NAP,
TILEPRO_OPC_NOP,
TILEPRO_OPC_NOR,
TILEPRO_OPC_NOR_SN,
TILEPRO_OPC_OR,
TILEPRO_OPC_OR_SN,
TILEPRO_OPC_ORI,
TILEPRO_OPC_ORI_SN,
TILEPRO_OPC_PACKBS_U,
TILEPRO_OPC_PACKBS_U_SN,
TILEPRO_OPC_PACKHB,
TILEPRO_OPC_PACKHB_SN,
TILEPRO_OPC_PACKHS,
TILEPRO_OPC_PACKHS_SN,
TILEPRO_OPC_PACKLB,
TILEPRO_OPC_PACKLB_SN,
TILEPRO_OPC_PCNT,
TILEPRO_OPC_PCNT_SN,
TILEPRO_OPC_RL,
TILEPRO_OPC_RL_SN,
TILEPRO_OPC_RLI,
TILEPRO_OPC_RLI_SN,
TILEPRO_OPC_S1A,
TILEPRO_OPC_S1A_SN,
TILEPRO_OPC_S2A,
TILEPRO_OPC_S2A_SN,
TILEPRO_OPC_S3A,
TILEPRO_OPC_S3A_SN,
TILEPRO_OPC_SADAB_U,
TILEPRO_OPC_SADAB_U_SN,
TILEPRO_OPC_SADAH,
TILEPRO_OPC_SADAH_SN,
TILEPRO_OPC_SADAH_U,
TILEPRO_OPC_SADAH_U_SN,
TILEPRO_OPC_SADB_U,
TILEPRO_OPC_SADB_U_SN,
TILEPRO_OPC_SADH,
TILEPRO_OPC_SADH_SN,
TILEPRO_OPC_SADH_U,
TILEPRO_OPC_SADH_U_SN,
TILEPRO_OPC_SB,
TILEPRO_OPC_SBADD,
TILEPRO_OPC_SEQ,
TILEPRO_OPC_SEQ_SN,
TILEPRO_OPC_SEQB,
TILEPRO_OPC_SEQB_SN,
TILEPRO_OPC_SEQH,
TILEPRO_OPC_SEQH_SN,
TILEPRO_OPC_SEQI,
TILEPRO_OPC_SEQI_SN,
TILEPRO_OPC_SEQIB,
TILEPRO_OPC_SEQIB_SN,
TILEPRO_OPC_SEQIH,
TILEPRO_OPC_SEQIH_SN,
TILEPRO_OPC_SH,
TILEPRO_OPC_SHADD,
TILEPRO_OPC_SHL,
TILEPRO_OPC_SHL_SN,
TILEPRO_OPC_SHLB,
TILEPRO_OPC_SHLB_SN,
TILEPRO_OPC_SHLH,
TILEPRO_OPC_SHLH_SN,
TILEPRO_OPC_SHLI,
TILEPRO_OPC_SHLI_SN,
TILEPRO_OPC_SHLIB,
TILEPRO_OPC_SHLIB_SN,
TILEPRO_OPC_SHLIH,
TILEPRO_OPC_SHLIH_SN,
TILEPRO_OPC_SHR,
TILEPRO_OPC_SHR_SN,
TILEPRO_OPC_SHRB,
TILEPRO_OPC_SHRB_SN,
TILEPRO_OPC_SHRH,
TILEPRO_OPC_SHRH_SN,
TILEPRO_OPC_SHRI,
TILEPRO_OPC_SHRI_SN,
TILEPRO_OPC_SHRIB,
TILEPRO_OPC_SHRIB_SN,
TILEPRO_OPC_SHRIH,
TILEPRO_OPC_SHRIH_SN,
TILEPRO_OPC_SLT,
TILEPRO_OPC_SLT_SN,
TILEPRO_OPC_SLT_U,
TILEPRO_OPC_SLT_U_SN,
TILEPRO_OPC_SLTB,
TILEPRO_OPC_SLTB_SN,
TILEPRO_OPC_SLTB_U,
TILEPRO_OPC_SLTB_U_SN,
TILEPRO_OPC_SLTE,
TILEPRO_OPC_SLTE_SN,
TILEPRO_OPC_SLTE_U,
TILEPRO_OPC_SLTE_U_SN,
TILEPRO_OPC_SLTEB,
TILEPRO_OPC_SLTEB_SN,
TILEPRO_OPC_SLTEB_U,
TILEPRO_OPC_SLTEB_U_SN,
TILEPRO_OPC_SLTEH,
TILEPRO_OPC_SLTEH_SN,
TILEPRO_OPC_SLTEH_U,
TILEPRO_OPC_SLTEH_U_SN,
TILEPRO_OPC_SLTH,
TILEPRO_OPC_SLTH_SN,
TILEPRO_OPC_SLTH_U,
TILEPRO_OPC_SLTH_U_SN,
TILEPRO_OPC_SLTI,
TILEPRO_OPC_SLTI_SN,
TILEPRO_OPC_SLTI_U,
TILEPRO_OPC_SLTI_U_SN,
TILEPRO_OPC_SLTIB,
TILEPRO_OPC_SLTIB_SN,
TILEPRO_OPC_SLTIB_U,
TILEPRO_OPC_SLTIB_U_SN,
TILEPRO_OPC_SLTIH,
TILEPRO_OPC_SLTIH_SN,
TILEPRO_OPC_SLTIH_U,
TILEPRO_OPC_SLTIH_U_SN,
TILEPRO_OPC_SNE,
TILEPRO_OPC_SNE_SN,
TILEPRO_OPC_SNEB,
TILEPRO_OPC_SNEB_SN,
TILEPRO_OPC_SNEH,
TILEPRO_OPC_SNEH_SN,
TILEPRO_OPC_SRA,
TILEPRO_OPC_SRA_SN,
TILEPRO_OPC_SRAB,
TILEPRO_OPC_SRAB_SN,
TILEPRO_OPC_SRAH,
TILEPRO_OPC_SRAH_SN,
TILEPRO_OPC_SRAI,
TILEPRO_OPC_SRAI_SN,
TILEPRO_OPC_SRAIB,
TILEPRO_OPC_SRAIB_SN,
TILEPRO_OPC_SRAIH,
TILEPRO_OPC_SRAIH_SN,
TILEPRO_OPC_SUB,
TILEPRO_OPC_SUB_SN,
TILEPRO_OPC_SUBB,
TILEPRO_OPC_SUBB_SN,
TILEPRO_OPC_SUBBS_U,
TILEPRO_OPC_SUBBS_U_SN,
TILEPRO_OPC_SUBH,
TILEPRO_OPC_SUBH_SN,
TILEPRO_OPC_SUBHS,
TILEPRO_OPC_SUBHS_SN,
TILEPRO_OPC_SUBS,
TILEPRO_OPC_SUBS_SN,
TILEPRO_OPC_SW,
TILEPRO_OPC_SWADD,
TILEPRO_OPC_SWINT0,
TILEPRO_OPC_SWINT1,
TILEPRO_OPC_SWINT2,
TILEPRO_OPC_SWINT3,
TILEPRO_OPC_TBLIDXB0,
TILEPRO_OPC_TBLIDXB0_SN,
TILEPRO_OPC_TBLIDXB1,
TILEPRO_OPC_TBLIDXB1_SN,
TILEPRO_OPC_TBLIDXB2,
TILEPRO_OPC_TBLIDXB2_SN,
TILEPRO_OPC_TBLIDXB3,
TILEPRO_OPC_TBLIDXB3_SN,
TILEPRO_OPC_TNS,
TILEPRO_OPC_TNS_SN,
TILEPRO_OPC_WH64,
TILEPRO_OPC_XOR,
TILEPRO_OPC_XOR_SN,
TILEPRO_OPC_XORI,
TILEPRO_OPC_XORI_SN,
TILEPRO_OPC_NONE
} tilepro_mnemonic;
typedef enum
{
TILEPRO_PIPELINE_X0,
TILEPRO_PIPELINE_X1,
TILEPRO_PIPELINE_Y0,
TILEPRO_PIPELINE_Y1,
TILEPRO_PIPELINE_Y2,
} tilepro_pipeline;
#define tilepro_is_x_pipeline(p) ((int)(p) <= (int)TILEPRO_PIPELINE_X1)
typedef enum
{
TILEPRO_OP_TYPE_REGISTER,
TILEPRO_OP_TYPE_IMMEDIATE,
TILEPRO_OP_TYPE_ADDRESS,
TILEPRO_OP_TYPE_SPR
} tilepro_operand_type;
struct tilepro_operand
{
/* Is this operand a register, immediate or address? */
tilepro_operand_type type;
/* The default relocation type for this operand. */
signed int default_reloc : 16;
/* How many bits is this value? (used for range checking) */
unsigned int num_bits : 5;
/* Is the value signed? (used for range checking) */
unsigned int is_signed : 1;
/* Is this operand a source register? */
unsigned int is_src_reg : 1;
/* Is this operand written? (i.e. is it a destination register) */
unsigned int is_dest_reg : 1;
/* Is this operand PC-relative? */
unsigned int is_pc_relative : 1;
/* By how many bits do we right shift the value before inserting? */
unsigned int rightshift : 2;
/* Return the bits for this operand to be ORed into an existing bundle. */
tilepro_bundle_bits (*insert) (int op);
/* Extract this operand and return it. */
unsigned int (*extract) (tilepro_bundle_bits bundle);
};
extern const struct tilepro_operand tilepro_operands[];
/* One finite-state machine per pipe for rapid instruction decoding. */
extern const unsigned short * const
tilepro_bundle_decoder_fsms[TILEPRO_NUM_PIPELINE_ENCODINGS];
struct tilepro_opcode
{
/* The opcode mnemonic, e.g. "add" */
const char *name;
/* The enum value for this mnemonic. */
tilepro_mnemonic mnemonic;
/* A bit mask of which of the five pipes this instruction
is compatible with:
X0 0x01
X1 0x02
Y0 0x04
Y1 0x08
Y2 0x10 */
unsigned char pipes;
/* How many operands are there? */
unsigned char num_operands;
/* Which register does this write implicitly, or TREG_ZERO if none? */
unsigned char implicitly_written_register;
/* Can this be bundled with other instructions (almost always true). */
unsigned char can_bundle;
/* The description of the operands. Each of these is an
* index into the tilepro_operands[] table. */
unsigned char operands[TILEPRO_NUM_PIPELINE_ENCODINGS][TILEPRO_MAX_OPERANDS];
};
extern const struct tilepro_opcode tilepro_opcodes[];
/* Used for non-textual disassembly into structs. */
struct tilepro_decoded_instruction
{
const struct tilepro_opcode *opcode;
const struct tilepro_operand *operands[TILEPRO_MAX_OPERANDS];
int operand_values[TILEPRO_MAX_OPERANDS];
};
/* Disassemble a bundle into a struct for machine processing. */
extern int parse_insn_tilepro(tilepro_bundle_bits bits,
unsigned int pc,
struct tilepro_decoded_instruction
decoded[TILEPRO_MAX_INSTRUCTIONS_PER_BUNDLE]);
/* Given a set of bundle bits and a specific pipe, returns which
* instruction the bundle contains in that pipe.
*/
extern const struct tilepro_opcode *
find_opcode(tilepro_bundle_bits bits, tilepro_pipeline pipe);
#endif /* opcode_tilepro_h */

View File

@ -0,0 +1,483 @@
/* TILE-Gx opcode information.
*
* Copyright 2011 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*
*
*
*
*
*/
#ifndef opcode_tile_h
#define opcode_tile_h
#include <arch/opcode.h>
enum
{
TILEGX_MAX_OPERANDS = 4 /* bfexts */
};
typedef enum
{
TILEGX_OPC_BPT,
TILEGX_OPC_INFO,
TILEGX_OPC_INFOL,
TILEGX_OPC_MOVE,
TILEGX_OPC_MOVEI,
TILEGX_OPC_MOVELI,
TILEGX_OPC_PREFETCH,
TILEGX_OPC_PREFETCH_ADD_L1,
TILEGX_OPC_PREFETCH_ADD_L1_FAULT,
TILEGX_OPC_PREFETCH_ADD_L2,
TILEGX_OPC_PREFETCH_ADD_L2_FAULT,
TILEGX_OPC_PREFETCH_ADD_L3,
TILEGX_OPC_PREFETCH_ADD_L3_FAULT,
TILEGX_OPC_PREFETCH_L1,
TILEGX_OPC_PREFETCH_L1_FAULT,
TILEGX_OPC_PREFETCH_L2,
TILEGX_OPC_PREFETCH_L2_FAULT,
TILEGX_OPC_PREFETCH_L3,
TILEGX_OPC_PREFETCH_L3_FAULT,
TILEGX_OPC_RAISE,
TILEGX_OPC_ADD,
TILEGX_OPC_ADDI,
TILEGX_OPC_ADDLI,
TILEGX_OPC_ADDX,
TILEGX_OPC_ADDXI,
TILEGX_OPC_ADDXLI,
TILEGX_OPC_ADDXSC,
TILEGX_OPC_AND,
TILEGX_OPC_ANDI,
TILEGX_OPC_BEQZ,
TILEGX_OPC_BEQZT,
TILEGX_OPC_BFEXTS,
TILEGX_OPC_BFEXTU,
TILEGX_OPC_BFINS,
TILEGX_OPC_BGEZ,
TILEGX_OPC_BGEZT,
TILEGX_OPC_BGTZ,
TILEGX_OPC_BGTZT,
TILEGX_OPC_BLBC,
TILEGX_OPC_BLBCT,
TILEGX_OPC_BLBS,
TILEGX_OPC_BLBST,
TILEGX_OPC_BLEZ,
TILEGX_OPC_BLEZT,
TILEGX_OPC_BLTZ,
TILEGX_OPC_BLTZT,
TILEGX_OPC_BNEZ,
TILEGX_OPC_BNEZT,
TILEGX_OPC_CLZ,
TILEGX_OPC_CMOVEQZ,
TILEGX_OPC_CMOVNEZ,
TILEGX_OPC_CMPEQ,
TILEGX_OPC_CMPEQI,
TILEGX_OPC_CMPEXCH,
TILEGX_OPC_CMPEXCH4,
TILEGX_OPC_CMPLES,
TILEGX_OPC_CMPLEU,
TILEGX_OPC_CMPLTS,
TILEGX_OPC_CMPLTSI,
TILEGX_OPC_CMPLTU,
TILEGX_OPC_CMPLTUI,
TILEGX_OPC_CMPNE,
TILEGX_OPC_CMUL,
TILEGX_OPC_CMULA,
TILEGX_OPC_CMULAF,
TILEGX_OPC_CMULF,
TILEGX_OPC_CMULFR,
TILEGX_OPC_CMULH,
TILEGX_OPC_CMULHR,
TILEGX_OPC_CRC32_32,
TILEGX_OPC_CRC32_8,
TILEGX_OPC_CTZ,
TILEGX_OPC_DBLALIGN,
TILEGX_OPC_DBLALIGN2,
TILEGX_OPC_DBLALIGN4,
TILEGX_OPC_DBLALIGN6,
TILEGX_OPC_DRAIN,
TILEGX_OPC_DTLBPR,
TILEGX_OPC_EXCH,
TILEGX_OPC_EXCH4,
TILEGX_OPC_FDOUBLE_ADD_FLAGS,
TILEGX_OPC_FDOUBLE_ADDSUB,
TILEGX_OPC_FDOUBLE_MUL_FLAGS,
TILEGX_OPC_FDOUBLE_PACK1,
TILEGX_OPC_FDOUBLE_PACK2,
TILEGX_OPC_FDOUBLE_SUB_FLAGS,
TILEGX_OPC_FDOUBLE_UNPACK_MAX,
TILEGX_OPC_FDOUBLE_UNPACK_MIN,
TILEGX_OPC_FETCHADD,
TILEGX_OPC_FETCHADD4,
TILEGX_OPC_FETCHADDGEZ,
TILEGX_OPC_FETCHADDGEZ4,
TILEGX_OPC_FETCHAND,
TILEGX_OPC_FETCHAND4,
TILEGX_OPC_FETCHOR,
TILEGX_OPC_FETCHOR4,
TILEGX_OPC_FINV,
TILEGX_OPC_FLUSH,
TILEGX_OPC_FLUSHWB,
TILEGX_OPC_FNOP,
TILEGX_OPC_FSINGLE_ADD1,
TILEGX_OPC_FSINGLE_ADDSUB2,
TILEGX_OPC_FSINGLE_MUL1,
TILEGX_OPC_FSINGLE_MUL2,
TILEGX_OPC_FSINGLE_PACK1,
TILEGX_OPC_FSINGLE_PACK2,
TILEGX_OPC_FSINGLE_SUB1,
TILEGX_OPC_ICOH,
TILEGX_OPC_ILL,
TILEGX_OPC_INV,
TILEGX_OPC_IRET,
TILEGX_OPC_J,
TILEGX_OPC_JAL,
TILEGX_OPC_JALR,
TILEGX_OPC_JALRP,
TILEGX_OPC_JR,
TILEGX_OPC_JRP,
TILEGX_OPC_LD,
TILEGX_OPC_LD1S,
TILEGX_OPC_LD1S_ADD,
TILEGX_OPC_LD1U,
TILEGX_OPC_LD1U_ADD,
TILEGX_OPC_LD2S,
TILEGX_OPC_LD2S_ADD,
TILEGX_OPC_LD2U,
TILEGX_OPC_LD2U_ADD,
TILEGX_OPC_LD4S,
TILEGX_OPC_LD4S_ADD,
TILEGX_OPC_LD4U,
TILEGX_OPC_LD4U_ADD,
TILEGX_OPC_LD_ADD,
TILEGX_OPC_LDNA,
TILEGX_OPC_LDNA_ADD,
TILEGX_OPC_LDNT,
TILEGX_OPC_LDNT1S,
TILEGX_OPC_LDNT1S_ADD,
TILEGX_OPC_LDNT1U,
TILEGX_OPC_LDNT1U_ADD,
TILEGX_OPC_LDNT2S,
TILEGX_OPC_LDNT2S_ADD,
TILEGX_OPC_LDNT2U,
TILEGX_OPC_LDNT2U_ADD,
TILEGX_OPC_LDNT4S,
TILEGX_OPC_LDNT4S_ADD,
TILEGX_OPC_LDNT4U,
TILEGX_OPC_LDNT4U_ADD,
TILEGX_OPC_LDNT_ADD,
TILEGX_OPC_LNK,
TILEGX_OPC_MF,
TILEGX_OPC_MFSPR,
TILEGX_OPC_MM,
TILEGX_OPC_MNZ,
TILEGX_OPC_MTSPR,
TILEGX_OPC_MUL_HS_HS,
TILEGX_OPC_MUL_HS_HU,
TILEGX_OPC_MUL_HS_LS,
TILEGX_OPC_MUL_HS_LU,
TILEGX_OPC_MUL_HU_HU,
TILEGX_OPC_MUL_HU_LS,
TILEGX_OPC_MUL_HU_LU,
TILEGX_OPC_MUL_LS_LS,
TILEGX_OPC_MUL_LS_LU,
TILEGX_OPC_MUL_LU_LU,
TILEGX_OPC_MULA_HS_HS,
TILEGX_OPC_MULA_HS_HU,
TILEGX_OPC_MULA_HS_LS,
TILEGX_OPC_MULA_HS_LU,
TILEGX_OPC_MULA_HU_HU,
TILEGX_OPC_MULA_HU_LS,
TILEGX_OPC_MULA_HU_LU,
TILEGX_OPC_MULA_LS_LS,
TILEGX_OPC_MULA_LS_LU,
TILEGX_OPC_MULA_LU_LU,
TILEGX_OPC_MULAX,
TILEGX_OPC_MULX,
TILEGX_OPC_MZ,
TILEGX_OPC_NAP,
TILEGX_OPC_NOP,
TILEGX_OPC_NOR,
TILEGX_OPC_OR,
TILEGX_OPC_ORI,
TILEGX_OPC_PCNT,
TILEGX_OPC_REVBITS,
TILEGX_OPC_REVBYTES,
TILEGX_OPC_ROTL,
TILEGX_OPC_ROTLI,
TILEGX_OPC_SHL,
TILEGX_OPC_SHL16INSLI,
TILEGX_OPC_SHL1ADD,
TILEGX_OPC_SHL1ADDX,
TILEGX_OPC_SHL2ADD,
TILEGX_OPC_SHL2ADDX,
TILEGX_OPC_SHL3ADD,
TILEGX_OPC_SHL3ADDX,
TILEGX_OPC_SHLI,
TILEGX_OPC_SHLX,
TILEGX_OPC_SHLXI,
TILEGX_OPC_SHRS,
TILEGX_OPC_SHRSI,
TILEGX_OPC_SHRU,
TILEGX_OPC_SHRUI,
TILEGX_OPC_SHRUX,
TILEGX_OPC_SHRUXI,
TILEGX_OPC_SHUFFLEBYTES,
TILEGX_OPC_ST,
TILEGX_OPC_ST1,
TILEGX_OPC_ST1_ADD,
TILEGX_OPC_ST2,
TILEGX_OPC_ST2_ADD,
TILEGX_OPC_ST4,
TILEGX_OPC_ST4_ADD,
TILEGX_OPC_ST_ADD,
TILEGX_OPC_STNT,
TILEGX_OPC_STNT1,
TILEGX_OPC_STNT1_ADD,
TILEGX_OPC_STNT2,
TILEGX_OPC_STNT2_ADD,
TILEGX_OPC_STNT4,
TILEGX_OPC_STNT4_ADD,
TILEGX_OPC_STNT_ADD,
TILEGX_OPC_SUB,
TILEGX_OPC_SUBX,
TILEGX_OPC_SUBXSC,
TILEGX_OPC_SWINT0,
TILEGX_OPC_SWINT1,
TILEGX_OPC_SWINT2,
TILEGX_OPC_SWINT3,
TILEGX_OPC_TBLIDXB0,
TILEGX_OPC_TBLIDXB1,
TILEGX_OPC_TBLIDXB2,
TILEGX_OPC_TBLIDXB3,
TILEGX_OPC_V1ADD,
TILEGX_OPC_V1ADDI,
TILEGX_OPC_V1ADDUC,
TILEGX_OPC_V1ADIFFU,
TILEGX_OPC_V1AVGU,
TILEGX_OPC_V1CMPEQ,
TILEGX_OPC_V1CMPEQI,
TILEGX_OPC_V1CMPLES,
TILEGX_OPC_V1CMPLEU,
TILEGX_OPC_V1CMPLTS,
TILEGX_OPC_V1CMPLTSI,
TILEGX_OPC_V1CMPLTU,
TILEGX_OPC_V1CMPLTUI,
TILEGX_OPC_V1CMPNE,
TILEGX_OPC_V1DDOTPU,
TILEGX_OPC_V1DDOTPUA,
TILEGX_OPC_V1DDOTPUS,
TILEGX_OPC_V1DDOTPUSA,
TILEGX_OPC_V1DOTP,
TILEGX_OPC_V1DOTPA,
TILEGX_OPC_V1DOTPU,
TILEGX_OPC_V1DOTPUA,
TILEGX_OPC_V1DOTPUS,
TILEGX_OPC_V1DOTPUSA,
TILEGX_OPC_V1INT_H,
TILEGX_OPC_V1INT_L,
TILEGX_OPC_V1MAXU,
TILEGX_OPC_V1MAXUI,
TILEGX_OPC_V1MINU,
TILEGX_OPC_V1MINUI,
TILEGX_OPC_V1MNZ,
TILEGX_OPC_V1MULTU,
TILEGX_OPC_V1MULU,
TILEGX_OPC_V1MULUS,
TILEGX_OPC_V1MZ,
TILEGX_OPC_V1SADAU,
TILEGX_OPC_V1SADU,
TILEGX_OPC_V1SHL,
TILEGX_OPC_V1SHLI,
TILEGX_OPC_V1SHRS,
TILEGX_OPC_V1SHRSI,
TILEGX_OPC_V1SHRU,
TILEGX_OPC_V1SHRUI,
TILEGX_OPC_V1SUB,
TILEGX_OPC_V1SUBUC,
TILEGX_OPC_V2ADD,
TILEGX_OPC_V2ADDI,
TILEGX_OPC_V2ADDSC,
TILEGX_OPC_V2ADIFFS,
TILEGX_OPC_V2AVGS,
TILEGX_OPC_V2CMPEQ,
TILEGX_OPC_V2CMPEQI,
TILEGX_OPC_V2CMPLES,
TILEGX_OPC_V2CMPLEU,
TILEGX_OPC_V2CMPLTS,
TILEGX_OPC_V2CMPLTSI,
TILEGX_OPC_V2CMPLTU,
TILEGX_OPC_V2CMPLTUI,
TILEGX_OPC_V2CMPNE,
TILEGX_OPC_V2DOTP,
TILEGX_OPC_V2DOTPA,
TILEGX_OPC_V2INT_H,
TILEGX_OPC_V2INT_L,
TILEGX_OPC_V2MAXS,
TILEGX_OPC_V2MAXSI,
TILEGX_OPC_V2MINS,
TILEGX_OPC_V2MINSI,
TILEGX_OPC_V2MNZ,
TILEGX_OPC_V2MULFSC,
TILEGX_OPC_V2MULS,
TILEGX_OPC_V2MULTS,
TILEGX_OPC_V2MZ,
TILEGX_OPC_V2PACKH,
TILEGX_OPC_V2PACKL,
TILEGX_OPC_V2PACKUC,
TILEGX_OPC_V2SADAS,
TILEGX_OPC_V2SADAU,
TILEGX_OPC_V2SADS,
TILEGX_OPC_V2SADU,
TILEGX_OPC_V2SHL,
TILEGX_OPC_V2SHLI,
TILEGX_OPC_V2SHLSC,
TILEGX_OPC_V2SHRS,
TILEGX_OPC_V2SHRSI,
TILEGX_OPC_V2SHRU,
TILEGX_OPC_V2SHRUI,
TILEGX_OPC_V2SUB,
TILEGX_OPC_V2SUBSC,
TILEGX_OPC_V4ADD,
TILEGX_OPC_V4ADDSC,
TILEGX_OPC_V4INT_H,
TILEGX_OPC_V4INT_L,
TILEGX_OPC_V4PACKSC,
TILEGX_OPC_V4SHL,
TILEGX_OPC_V4SHLSC,
TILEGX_OPC_V4SHRS,
TILEGX_OPC_V4SHRU,
TILEGX_OPC_V4SUB,
TILEGX_OPC_V4SUBSC,
TILEGX_OPC_WH64,
TILEGX_OPC_XOR,
TILEGX_OPC_XORI,
TILEGX_OPC_NONE
} tilegx_mnemonic;
typedef enum
{
TILEGX_PIPELINE_X0,
TILEGX_PIPELINE_X1,
TILEGX_PIPELINE_Y0,
TILEGX_PIPELINE_Y1,
TILEGX_PIPELINE_Y2,
} tilegx_pipeline;
#define tilegx_is_x_pipeline(p) ((int)(p) <= (int)TILEGX_PIPELINE_X1)
typedef enum
{
TILEGX_OP_TYPE_REGISTER,
TILEGX_OP_TYPE_IMMEDIATE,
TILEGX_OP_TYPE_ADDRESS,
TILEGX_OP_TYPE_SPR
} tilegx_operand_type;
struct tilegx_operand
{
/* Is this operand a register, immediate or address? */
tilegx_operand_type type;
/* The default relocation type for this operand. */
signed int default_reloc : 16;
/* How many bits is this value? (used for range checking) */
unsigned int num_bits : 5;
/* Is the value signed? (used for range checking) */
unsigned int is_signed : 1;
/* Is this operand a source register? */
unsigned int is_src_reg : 1;
/* Is this operand written? (i.e. is it a destination register) */
unsigned int is_dest_reg : 1;
/* Is this operand PC-relative? */
unsigned int is_pc_relative : 1;
/* By how many bits do we right shift the value before inserting? */
unsigned int rightshift : 2;
/* Return the bits for this operand to be ORed into an existing bundle. */
tilegx_bundle_bits (*insert) (int op);
/* Extract this operand and return it. */
unsigned int (*extract) (tilegx_bundle_bits bundle);
};
extern const struct tilegx_operand tilegx_operands[];
/* One finite-state machine per pipe for rapid instruction decoding. */
extern const unsigned short * const
tilegx_bundle_decoder_fsms[TILEGX_NUM_PIPELINE_ENCODINGS];
struct tilegx_opcode
{
/* The opcode mnemonic, e.g. "add" */
const char *name;
/* The enum value for this mnemonic. */
tilegx_mnemonic mnemonic;
/* A bit mask of which of the five pipes this instruction
is compatible with:
X0 0x01
X1 0x02
Y0 0x04
Y1 0x08
Y2 0x10 */
unsigned char pipes;
/* How many operands are there? */
unsigned char num_operands;
/* Which register does this write implicitly, or TREG_ZERO if none? */
unsigned char implicitly_written_register;
/* Can this be bundled with other instructions (almost always true). */
unsigned char can_bundle;
/* The description of the operands. Each of these is an
* index into the tilegx_operands[] table. */
unsigned char operands[TILEGX_NUM_PIPELINE_ENCODINGS][TILEGX_MAX_OPERANDS];
};
extern const struct tilegx_opcode tilegx_opcodes[];
/* Used for non-textual disassembly into structs. */
struct tilegx_decoded_instruction
{
const struct tilegx_opcode *opcode;
const struct tilegx_operand *operands[TILEGX_MAX_OPERANDS];
long long operand_values[TILEGX_MAX_OPERANDS];
};
/* Disassemble a bundle into a struct for machine processing. */
extern int parse_insn_tilegx(tilegx_bundle_bits bits,
unsigned long long pc,
struct tilegx_decoded_instruction
decoded[TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE]);
#endif /* opcode_tilegx_h */

View File

@ -0,0 +1,52 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_TIMEX_H
#define _ASM_TILE_TIMEX_H
/*
* This rate should be a multiple of the possible HZ values (100, 250, 1000)
* and a fraction of the possible hardware timer frequencies. Our timer
* frequency is highly tunable but also quite precise, so for the primary use
* of this value (setting ACT_HZ from HZ) we just pick a value that causes
* ACT_HZ to be set to HZ. We make the value somewhat large just to be
* more robust in case someone tries out a new value of HZ.
*/
#define CLOCK_TICK_RATE 1000000
typedef unsigned long long cycles_t;
#if CHIP_HAS_SPLIT_CYCLE()
cycles_t get_cycles(void);
#define get_cycles_low() __insn_mfspr(SPR_CYCLE_LOW)
#else
static inline cycles_t get_cycles(void)
{
return __insn_mfspr(SPR_CYCLE);
}
#define get_cycles_low() __insn_mfspr(SPR_CYCLE) /* just get all 64 bits */
#endif
cycles_t get_clock_rate(void);
/* Convert nanoseconds to core clock cycles. */
cycles_t ns2cycles(unsigned long nsecs);
/* Called at cpu initialization to set some low-level constants. */
void setup_clock(void);
/* Called at cpu initialization to start the tile-timer clock device. */
void setup_tile_timer(void);
#endif /* _ASM_TILE_TIMEX_H */

View File

@ -0,0 +1,25 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_TLB_H
#define _ASM_TILE_TLB_H
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
#include <asm-generic/tlb.h>
#endif /* _ASM_TILE_TLB_H */

View File

@ -0,0 +1,128 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_TLBFLUSH_H
#define _ASM_TILE_TLBFLUSH_H
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <asm/cacheflush.h>
#include <asm/page.h>
#include <hv/hypervisor.h>
/*
* Rather than associating each mm with its own ASID, we just use
* ASIDs to allow us to lazily flush the TLB when we switch mms.
* This way we only have to do an actual TLB flush on mm switch
* every time we wrap ASIDs, not every single time we switch.
*
* FIXME: We might improve performance by keeping ASIDs around
* properly, though since the hypervisor direct-maps VAs to TSB
* entries, we're likely to have lost at least the executable page
* mappings by the time we switch back to the original mm.
*/
DECLARE_PER_CPU(int, current_asid);
/* The hypervisor tells us what ASIDs are available to us. */
extern int min_asid, max_asid;
static inline unsigned long hv_page_size(const struct vm_area_struct *vma)
{
return (vma->vm_flags & VM_HUGETLB) ? HPAGE_SIZE : PAGE_SIZE;
}
/* Pass as vma pointer for non-executable mapping, if no vma available. */
#define FLUSH_NONEXEC ((const struct vm_area_struct *)-1UL)
/* Flush a single user page on this cpu. */
static inline void local_flush_tlb_page(const struct vm_area_struct *vma,
unsigned long addr,
unsigned long page_size)
{
int rc = hv_flush_page(addr, page_size);
if (rc < 0)
panic("hv_flush_page(%#lx,%#lx) failed: %d",
addr, page_size, rc);
if (!vma || (vma != FLUSH_NONEXEC && (vma->vm_flags & VM_EXEC)))
__flush_icache();
}
/* Flush range of user pages on this cpu. */
static inline void local_flush_tlb_pages(const struct vm_area_struct *vma,
unsigned long addr,
unsigned long page_size,
unsigned long len)
{
int rc = hv_flush_pages(addr, page_size, len);
if (rc < 0)
panic("hv_flush_pages(%#lx,%#lx,%#lx) failed: %d",
addr, page_size, len, rc);
if (!vma || (vma != FLUSH_NONEXEC && (vma->vm_flags & VM_EXEC)))
__flush_icache();
}
/* Flush all user pages on this cpu. */
static inline void local_flush_tlb(void)
{
int rc = hv_flush_all(1); /* preserve global mappings */
if (rc < 0)
panic("hv_flush_all(1) failed: %d", rc);
__flush_icache();
}
/*
* Global pages have to be flushed a bit differently. Not a real
* performance problem because this does not happen often.
*/
static inline void local_flush_tlb_all(void)
{
int i;
for (i = 0; ; ++i) {
HV_VirtAddrRange r = hv_inquire_virtual(i);
if (r.size == 0)
break;
local_flush_tlb_pages(NULL, r.start, PAGE_SIZE, r.size);
local_flush_tlb_pages(NULL, r.start, HPAGE_SIZE, r.size);
}
}
/*
* TLB flushing:
*
* - flush_tlb() flushes the current mm struct TLBs
* - flush_tlb_all() flushes all processes TLBs
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
* - flush_tlb_others(cpumask, mm, va) flushes TLBs on other cpus
*
* Here (as in vm_area_struct), "end" means the first byte after
* our end address.
*/
extern void flush_tlb_all(void);
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
extern void flush_tlb_current_task(void);
extern void flush_tlb_mm(struct mm_struct *);
extern void flush_tlb_page(const struct vm_area_struct *, unsigned long);
extern void flush_tlb_page_mm(const struct vm_area_struct *,
struct mm_struct *, unsigned long);
extern void flush_tlb_range(const struct vm_area_struct *,
unsigned long start, unsigned long end);
#define flush_tlb() flush_tlb_current_task()
#endif /* _ASM_TILE_TLBFLUSH_H */

View File

@ -0,0 +1,124 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_TOPOLOGY_H
#define _ASM_TILE_TOPOLOGY_H
#ifdef CONFIG_NUMA
#include <linux/cpumask.h>
/* Mappings between logical cpu number and node number. */
extern struct cpumask node_2_cpu_mask[];
extern char cpu_2_node[];
/* Returns the number of the node containing CPU 'cpu'. */
static inline int cpu_to_node(int cpu)
{
return cpu_2_node[cpu];
}
/*
* Returns the number of the node containing Node 'node'.
* This architecture is flat, so it is a pretty simple function!
*/
#define parent_node(node) (node)
/* Returns a bitmask of CPUs on Node 'node'. */
static inline const struct cpumask *cpumask_of_node(int node)
{
return &node_2_cpu_mask[node];
}
/* For now, use numa node -1 for global allocation. */
#define pcibus_to_node(bus) ((void)(bus), -1)
/*
* TILE architecture has many cores integrated in one processor, so we need
* setup bigger balance_interval for both CPU/NODE scheduling domains to
* reduce process scheduling costs.
*/
/* sched_domains SD_CPU_INIT for TILE architecture */
#define SD_CPU_INIT (struct sched_domain) { \
.min_interval = 4, \
.max_interval = 128, \
.busy_factor = 64, \
.imbalance_pct = 125, \
.cache_nice_tries = 1, \
.busy_idx = 2, \
.idle_idx = 1, \
.newidle_idx = 0, \
.wake_idx = 0, \
.forkexec_idx = 0, \
\
.flags = 1*SD_LOAD_BALANCE \
| 1*SD_BALANCE_NEWIDLE \
| 1*SD_BALANCE_EXEC \
| 1*SD_BALANCE_FORK \
| 0*SD_BALANCE_WAKE \
| 0*SD_WAKE_AFFINE \
| 0*SD_PREFER_LOCAL \
| 0*SD_SHARE_CPUPOWER \
| 0*SD_SHARE_PKG_RESOURCES \
| 0*SD_SERIALIZE \
, \
.last_balance = jiffies, \
.balance_interval = 32, \
}
/* sched_domains SD_NODE_INIT for TILE architecture */
#define SD_NODE_INIT (struct sched_domain) { \
.min_interval = 16, \
.max_interval = 512, \
.busy_factor = 32, \
.imbalance_pct = 125, \
.cache_nice_tries = 1, \
.busy_idx = 3, \
.idle_idx = 1, \
.newidle_idx = 2, \
.wake_idx = 1, \
.flags = 1*SD_LOAD_BALANCE \
| 1*SD_BALANCE_NEWIDLE \
| 1*SD_BALANCE_EXEC \
| 1*SD_BALANCE_FORK \
| 0*SD_BALANCE_WAKE \
| 0*SD_WAKE_AFFINE \
| 0*SD_PREFER_LOCAL \
| 0*SD_SHARE_CPUPOWER \
| 0*SD_SHARE_PKG_RESOURCES \
| 1*SD_SERIALIZE \
, \
.last_balance = jiffies, \
.balance_interval = 128, \
}
/* By definition, we create nodes based on online memory. */
#define node_has_online_mem(nid) 1
#endif /* CONFIG_NUMA */
#include <asm-generic/topology.h>
#ifdef CONFIG_SMP
#define topology_physical_package_id(cpu) ((void)(cpu), 0)
#define topology_core_id(cpu) (cpu)
#define topology_core_cpumask(cpu) ((void)(cpu), cpu_online_mask)
#define topology_thread_cpumask(cpu) cpumask_of(cpu)
/* indicates that pointers to the topology struct cpumask maps are valid */
#define arch_provides_topology_pointers yes
#endif
#endif /* _ASM_TILE_TOPOLOGY_H */

View File

@ -0,0 +1,74 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_TRAPS_H
#define _ASM_TILE_TRAPS_H
#include <arch/chip.h>
/* mm/fault.c */
void do_page_fault(struct pt_regs *, int fault_num,
unsigned long address, unsigned long write);
#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
void do_async_page_fault(struct pt_regs *);
#endif
#ifndef __tilegx__
/*
* We return this structure in registers to avoid having to write
* additional save/restore code in the intvec.S caller.
*/
struct intvec_state {
void *handler;
unsigned long vecnum;
unsigned long fault_num;
unsigned long info;
unsigned long retval;
};
struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
unsigned long address,
unsigned long info);
#endif
/* kernel/traps.c */
void do_trap(struct pt_regs *, int fault_num, unsigned long reason);
void kernel_double_fault(int dummy, ulong pc, ulong lr, ulong sp, ulong r52);
/* kernel/time.c */
void do_timer_interrupt(struct pt_regs *, int fault_num);
/* kernel/messaging.c */
void hv_message_intr(struct pt_regs *, int intnum);
/* kernel/irq.c */
void tile_dev_intr(struct pt_regs *, int intnum);
#ifdef CONFIG_HARDWALL
/* kernel/hardwall.c */
void do_hardwall_trap(struct pt_regs *, int fault_num);
#endif
/* kernel/ptrace.c */
void do_breakpoint(struct pt_regs *, int fault_num);
#ifdef __tilegx__
/* kernel/single_step.c */
void gx_singlestep_handle(struct pt_regs *, int fault_num);
/* kernel/intvec_64.S */
void fill_ra_stack(void);
#endif
#endif /* _ASM_TILE_TRAPS_H */

View File

@ -0,0 +1,580 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_UACCESS_H
#define _ASM_TILE_UACCESS_H
/*
* User space memory access functions
*/
#include <linux/sched.h>
#include <linux/mm.h>
#include <asm-generic/uaccess-unaligned.h>
#include <asm/processor.h>
#include <asm/page.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
/*
* The fs value determines whether argument validity checking should be
* performed or not. If get_fs() == USER_DS, checking is performed, with
* get_fs() == KERNEL_DS, checking is bypassed.
*
* For historical reasons, these macros are grossly misnamed.
*/
#define MAKE_MM_SEG(a) ((mm_segment_t) { (a) })
#define KERNEL_DS MAKE_MM_SEG(-1UL)
#define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
#define get_ds() (KERNEL_DS)
#define get_fs() (current_thread_info()->addr_limit)
#define set_fs(x) (current_thread_info()->addr_limit = (x))
#define segment_eq(a, b) ((a).seg == (b).seg)
#ifndef __tilegx__
/*
* We could allow mapping all 16 MB at 0xfc000000, but we set up a
* special hack in arch_setup_additional_pages() to auto-create a mapping
* for the first 16 KB, and it would seem strange to have different
* user-accessible semantics for memory at 0xfc000000 and above 0xfc004000.
*/
static inline int is_arch_mappable_range(unsigned long addr,
unsigned long size)
{
return (addr >= MEM_USER_INTRPT &&
addr < (MEM_USER_INTRPT + INTRPT_SIZE) &&
size <= (MEM_USER_INTRPT + INTRPT_SIZE) - addr);
}
#define is_arch_mappable_range is_arch_mappable_range
#else
#define is_arch_mappable_range(addr, size) 0
#endif
/*
* Test whether a block of memory is a valid user space address.
* Returns 0 if the range is valid, nonzero otherwise.
*/
int __range_ok(unsigned long addr, unsigned long size);
/**
* access_ok: - Checks if a user space pointer is valid
* @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
* %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
* to write to a block, it is always safe to read from it.
* @addr: User space pointer to start of block to check
* @size: Size of block to check
*
* Context: User context only. This function may sleep.
*
* Checks if a pointer to a block of memory in user space is valid.
*
* Returns true (nonzero) if the memory block may be valid, false (zero)
* if it is definitely invalid.
*
* Note that, depending on architecture, this function probably just
* checks that the pointer is in the user space range - after calling
* this function, memory access functions may still return -EFAULT.
*/
#define access_ok(type, addr, size) ({ \
__chk_user_ptr(addr); \
likely(__range_ok((unsigned long)(addr), (size)) == 0); \
})
/*
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
* the address at which the program should continue. No registers are
* modified, so it is entirely up to the continuation code to figure out
* what to do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
* we don't even have to jump over them. Further, they do not intrude
* on our cache or tlb entries.
*/
struct exception_table_entry {
unsigned long insn, fixup;
};
extern int fixup_exception(struct pt_regs *regs);
/*
* We return the __get_user_N function results in a structure,
* thus in r0 and r1. If "err" is zero, "val" is the result
* of the read; otherwise, "err" is -EFAULT.
*
* We rarely need 8-byte values on a 32-bit architecture, but
* we size the structure to accommodate. In practice, for the
* the smaller reads, we can zero the high word for free, and
* the caller will ignore it by virtue of casting anyway.
*/
struct __get_user {
unsigned long long val;
int err;
};
/*
* FIXME: we should express these as inline extended assembler, since
* they're fundamentally just a variable dereference and some
* supporting exception_table gunk. Note that (a la i386) we can
* extend the copy_to_user and copy_from_user routines to call into
* such extended assembler routines, though we will have to use a
* different return code in that case (1, 2, or 4, rather than -EFAULT).
*/
extern struct __get_user __get_user_1(const void __user *);
extern struct __get_user __get_user_2(const void __user *);
extern struct __get_user __get_user_4(const void __user *);
extern struct __get_user __get_user_8(const void __user *);
extern int __put_user_1(long, void __user *);
extern int __put_user_2(long, void __user *);
extern int __put_user_4(long, void __user *);
extern int __put_user_8(long long, void __user *);
/* Unimplemented routines to cause linker failures */
extern struct __get_user __get_user_bad(void);
extern int __put_user_bad(void);
/*
* Careful: we have to cast the result to the type of the pointer
* for sign reasons.
*/
/**
* __get_user: - Get a simple variable from user space, with less checking.
* @x: Variable to store result.
* @ptr: Source address, in user space.
*
* Context: User context only. This function may sleep.
*
* This macro copies a single simple variable from user space to kernel
* space. It supports simple types like char and int, but not larger
* data types like structures or arrays.
*
* @ptr must have pointer-to-simple-variable type, and the result of
* dereferencing @ptr must be assignable to @x without a cast.
*
* Returns zero on success, or -EFAULT on error.
* On error, the variable @x is set to zero.
*
* Caller must check the pointer with access_ok() before calling this
* function.
*/
#define __get_user(x, ptr) \
({ struct __get_user __ret; \
__typeof__(*(ptr)) const __user *__gu_addr = (ptr); \
__chk_user_ptr(__gu_addr); \
switch (sizeof(*(__gu_addr))) { \
case 1: \
__ret = __get_user_1(__gu_addr); \
break; \
case 2: \
__ret = __get_user_2(__gu_addr); \
break; \
case 4: \
__ret = __get_user_4(__gu_addr); \
break; \
case 8: \
__ret = __get_user_8(__gu_addr); \
break; \
default: \
__ret = __get_user_bad(); \
break; \
} \
(x) = (__typeof__(*__gu_addr)) (__typeof__(*__gu_addr - *__gu_addr)) \
__ret.val; \
__ret.err; \
})
/**
* __put_user: - Write a simple value into user space, with less checking.
* @x: Value to copy to user space.
* @ptr: Destination address, in user space.
*
* Context: User context only. This function may sleep.
*
* This macro copies a single simple value from kernel space to user
* space. It supports simple types like char and int, but not larger
* data types like structures or arrays.
*
* @ptr must have pointer-to-simple-variable type, and @x must be assignable
* to the result of dereferencing @ptr.
*
* Caller must check the pointer with access_ok() before calling this
* function.
*
* Returns zero on success, or -EFAULT on error.
*
* Implementation note: The "case 8" logic of casting to the type of
* the result of subtracting the value from itself is basically a way
* of keeping all integer types the same, but casting any pointers to
* ptrdiff_t, i.e. also an integer type. This way there are no
* questionable casts seen by the compiler on an ILP32 platform.
*/
#define __put_user(x, ptr) \
({ \
int __pu_err = 0; \
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
typeof(*__pu_addr) __pu_val = (x); \
__chk_user_ptr(__pu_addr); \
switch (sizeof(__pu_val)) { \
case 1: \
__pu_err = __put_user_1((long)__pu_val, __pu_addr); \
break; \
case 2: \
__pu_err = __put_user_2((long)__pu_val, __pu_addr); \
break; \
case 4: \
__pu_err = __put_user_4((long)__pu_val, __pu_addr); \
break; \
case 8: \
__pu_err = \
__put_user_8((__typeof__(__pu_val - __pu_val))__pu_val,\
__pu_addr); \
break; \
default: \
__pu_err = __put_user_bad(); \
break; \
} \
__pu_err; \
})
/*
* The versions of get_user and put_user without initial underscores
* check the address of their arguments to make sure they are not
* in kernel space.
*/
#define put_user(x, ptr) \
({ \
__typeof__(*(ptr)) __user *__Pu_addr = (ptr); \
access_ok(VERIFY_WRITE, (__Pu_addr), sizeof(*(__Pu_addr))) ? \
__put_user((x), (__Pu_addr)) : \
-EFAULT; \
})
#define get_user(x, ptr) \
({ \
__typeof__(*(ptr)) const __user *__Gu_addr = (ptr); \
access_ok(VERIFY_READ, (__Gu_addr), sizeof(*(__Gu_addr))) ? \
__get_user((x), (__Gu_addr)) : \
((x) = 0, -EFAULT); \
})
/**
* __copy_to_user() - copy data into user space, with less checking.
* @to: Destination address, in user space.
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep.
*
* Copy data from kernel space to user space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*
* An alternate version - __copy_to_user_inatomic() - is designed
* to be called from atomic context, typically bracketed by calls
* to pagefault_disable() and pagefault_enable().
*/
extern unsigned long __must_check __copy_to_user_inatomic(
void __user *to, const void *from, unsigned long n);
static inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
return __copy_to_user_inatomic(to, from, n);
}
static inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n))
n = __copy_to_user(to, from, n);
return n;
}
/**
* __copy_from_user() - copy data from user space, with less checking.
* @to: Destination address, in kernel space.
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep.
*
* Copy data from user space to kernel space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*
* An alternate version - __copy_from_user_inatomic() - is designed
* to be called from atomic context, typically bracketed by calls
* to pagefault_disable() and pagefault_enable(). This version
* does *NOT* pad with zeros.
*/
extern unsigned long __must_check __copy_from_user_inatomic(
void *to, const void __user *from, unsigned long n);
extern unsigned long __must_check __copy_from_user_zeroing(
void *to, const void __user *from, unsigned long n);
static inline unsigned long __must_check
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
might_fault();
return __copy_from_user_zeroing(to, from, n);
}
static inline unsigned long __must_check
_copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (access_ok(VERIFY_READ, from, n))
n = __copy_from_user(to, from, n);
else
memset(to, 0, n);
return n;
}
#ifdef CONFIG_DEBUG_COPY_FROM_USER
extern void copy_from_user_overflow(void)
__compiletime_warning("copy_from_user() size is not provably correct");
static inline unsigned long __must_check copy_from_user(void *to,
const void __user *from,
unsigned long n)
{
int sz = __compiletime_object_size(to);
if (likely(sz == -1 || sz >= n))
n = _copy_from_user(to, from, n);
else
copy_from_user_overflow();
return n;
}
#else
#define copy_from_user _copy_from_user
#endif
#ifdef __tilegx__
/**
* __copy_in_user() - copy data within user space, with less checking.
* @to: Destination address, in user space.
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep.
*
* Copy data from user space to user space. Caller must check
* the specified blocks with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
extern unsigned long __copy_in_user_inatomic(
void __user *to, const void __user *from, unsigned long n);
static inline unsigned long __must_check
__copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
might_sleep();
return __copy_in_user_inatomic(to, from, n);
}
static inline unsigned long __must_check
copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n))
n = __copy_in_user(to, from, n);
return n;
}
#endif
/**
* strlen_user: - Get the size of a string in user space.
* @str: The string to measure.
*
* Context: User context only. This function may sleep.
*
* Get the size of a NUL-terminated string in user space.
*
* Returns the size of the string INCLUDING the terminating NUL.
* On exception, returns 0.
*
* If there is a limit on the length of a valid string, you may wish to
* consider using strnlen_user() instead.
*/
extern long strnlen_user_asm(const char __user *str, long n);
static inline long __must_check strnlen_user(const char __user *str, long n)
{
might_fault();
return strnlen_user_asm(str, n);
}
#define strlen_user(str) strnlen_user(str, LONG_MAX)
/**
* strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
* @dst: Destination address, in kernel space. This buffer must be at
* least @count bytes long.
* @src: Source address, in user space.
* @count: Maximum number of bytes to copy, including the trailing NUL.
*
* Copies a NUL-terminated string from userspace to kernel space.
* Caller must check the specified block with access_ok() before calling
* this function.
*
* On success, returns the length of the string (not including the trailing
* NUL).
*
* If access to userspace fails, returns -EFAULT (some data may have been
* copied).
*
* If @count is smaller than the length of the string, copies @count bytes
* and returns @count.
*/
extern long strncpy_from_user_asm(char *dst, const char __user *src, long);
static inline long __must_check __strncpy_from_user(
char *dst, const char __user *src, long count)
{
might_fault();
return strncpy_from_user_asm(dst, src, count);
}
static inline long __must_check strncpy_from_user(
char *dst, const char __user *src, long count)
{
if (access_ok(VERIFY_READ, src, 1))
return __strncpy_from_user(dst, src, count);
return -EFAULT;
}
/**
* clear_user: - Zero a block of memory in user space.
* @mem: Destination address, in user space.
* @len: Number of bytes to zero.
*
* Zero a block of memory in user space.
*
* Returns number of bytes that could not be cleared.
* On success, this will be zero.
*/
extern unsigned long clear_user_asm(void __user *mem, unsigned long len);
static inline unsigned long __must_check __clear_user(
void __user *mem, unsigned long len)
{
might_fault();
return clear_user_asm(mem, len);
}
static inline unsigned long __must_check clear_user(
void __user *mem, unsigned long len)
{
if (access_ok(VERIFY_WRITE, mem, len))
return __clear_user(mem, len);
return len;
}
/**
* flush_user: - Flush a block of memory in user space from cache.
* @mem: Destination address, in user space.
* @len: Number of bytes to flush.
*
* Returns number of bytes that could not be flushed.
* On success, this will be zero.
*/
extern unsigned long flush_user_asm(void __user *mem, unsigned long len);
static inline unsigned long __must_check __flush_user(
void __user *mem, unsigned long len)
{
int retval;
might_fault();
retval = flush_user_asm(mem, len);
mb_incoherent();
return retval;
}
static inline unsigned long __must_check flush_user(
void __user *mem, unsigned long len)
{
if (access_ok(VERIFY_WRITE, mem, len))
return __flush_user(mem, len);
return len;
}
/**
* inv_user: - Invalidate a block of memory in user space from cache.
* @mem: Destination address, in user space.
* @len: Number of bytes to invalidate.
*
* Returns number of bytes that could not be invalidated.
* On success, this will be zero.
*
* Note that on Tile64, the "inv" operation is in fact a
* "flush and invalidate", so cache write-backs will occur prior
* to the cache being marked invalid.
*/
extern unsigned long inv_user_asm(void __user *mem, unsigned long len);
static inline unsigned long __must_check __inv_user(
void __user *mem, unsigned long len)
{
int retval;
might_fault();
retval = inv_user_asm(mem, len);
mb_incoherent();
return retval;
}
static inline unsigned long __must_check inv_user(
void __user *mem, unsigned long len)
{
if (access_ok(VERIFY_WRITE, mem, len))
return __inv_user(mem, len);
return len;
}
/**
* finv_user: - Flush-inval a block of memory in user space from cache.
* @mem: Destination address, in user space.
* @len: Number of bytes to invalidate.
*
* Returns number of bytes that could not be flush-invalidated.
* On success, this will be zero.
*/
extern unsigned long finv_user_asm(void __user *mem, unsigned long len);
static inline unsigned long __must_check __finv_user(
void __user *mem, unsigned long len)
{
int retval;
might_fault();
retval = finv_user_asm(mem, len);
mb_incoherent();
return retval;
}
static inline unsigned long __must_check finv_user(
void __user *mem, unsigned long len)
{
if (access_ok(VERIFY_WRITE, mem, len))
return __finv_user(mem, len);
return len;
}
#endif /* _ASM_TILE_UACCESS_H */

View File

@ -0,0 +1,39 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_UNALIGNED_H
#define _ASM_TILE_UNALIGNED_H
#include <linux/unaligned/le_struct.h>
#include <linux/unaligned/be_byteshift.h>
#include <linux/unaligned/generic.h>
#define get_unaligned __get_unaligned_le
#define put_unaligned __put_unaligned_le
/*
* Is the kernel doing fixups of unaligned accesses? If <0, no kernel
* intervention occurs and SIGBUS is delivered with no data address
* info. If 0, the kernel single-steps the instruction to discover
* the data address to provide with the SIGBUS. If 1, the kernel does
* a fixup.
*/
extern int unaligned_fixup;
/* Is the kernel printing on each unaligned fixup? */
extern int unaligned_printk;
/* Number of unaligned fixups performed */
extern unsigned int unaligned_fixup_count;
#endif /* _ASM_TILE_UNALIGNED_H */

View File

@ -0,0 +1,47 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#if !defined(_ASM_TILE_UNISTD_H) || defined(__SYSCALL)
#define _ASM_TILE_UNISTD_H
#if !defined(__LP64__) || defined(__SYSCALL_COMPAT)
/* Use the flavor of this syscall that matches the 32-bit API better. */
#define __ARCH_WANT_SYNC_FILE_RANGE2
#endif
/* Use the standard ABI for syscalls. */
#include <asm-generic/unistd.h>
/* Additional Tilera-specific syscalls. */
#define __NR_flush_cache (__NR_arch_specific_syscall + 1)
__SYSCALL(__NR_flush_cache, sys_flush_cache)
#ifndef __tilegx__
/* "Fast" syscalls provide atomic support for 32-bit chips. */
#define __NR_FAST_cmpxchg -1
#define __NR_FAST_atomic_update -2
#define __NR_FAST_cmpxchg64 -3
#define __NR_cmpxchg_badaddr (__NR_arch_specific_syscall + 0)
__SYSCALL(__NR_cmpxchg_badaddr, sys_cmpxchg_badaddr)
#endif
#ifdef __KERNEL__
/* In compat mode, we use sys_llseek() for compat_sys_llseek(). */
#ifdef CONFIG_COMPAT
#define __ARCH_WANT_SYS_LLSEEK
#endif
#define __ARCH_WANT_SYS_NEWFSTATAT
#endif
#endif /* _ASM_TILE_UNISTD_H */

View File

@ -0,0 +1,21 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*
*/
#ifndef _ASM_TILE_USER_H
#define _ASM_TILE_USER_H
/* This header is for a.out file formats, which TILE does not support. */
#endif /* _ASM_TILE_USER_H */

View File

@ -0,0 +1,39 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*
* Access to VGA videoram.
*/
#ifndef _ASM_TILE_VGA_H
#define _ASM_TILE_VGA_H
#include <asm/io.h>
#define VT_BUF_HAVE_RW
static inline void scr_writew(u16 val, volatile u16 *addr)
{
__raw_writew(val, (volatile u16 __iomem *) addr);
}
static inline u16 scr_readw(volatile const u16 *addr)
{
return __raw_readw((volatile const u16 __iomem *) addr);
}
#define vga_readb(a) readb((u8 __iomem *)(a))
#define vga_writeb(v,a) writeb(v, (u8 __iomem *)(a))
#define VGA_MAP_MEM(x,s) ((unsigned long) ioremap(x, s))
#endif