M7350v1_en_gpl

This commit is contained in:
T
2024-09-09 08:52:07 +00:00
commit f9cc65cfda
65988 changed files with 26357421 additions and 0 deletions

View File

@@ -0,0 +1,13 @@
#
# arch/blackfin/mach-common/Makefile
#
obj-y := \
cache.o cache-c.o entry.o head.o \
interrupt.o arch_checks.o ints-priority.o
obj-$(CONFIG_PM) += pm.o dpmc_modes.o
obj-$(CONFIG_CPU_FREQ) += cpufreq.o
obj-$(CONFIG_CPU_VOLTAGE) += dpmc.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_BFIN_KERNEL_CLOCK) += clocks-init.o

View File

@@ -0,0 +1,66 @@
/*
* Do some checking to make sure things are OK
*
* Copyright 2007-2010 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
#include <asm/fixed_code.h>
#include <mach/anomaly.h>
#include <asm/clocks.h>
#ifdef CONFIG_BFIN_KERNEL_CLOCK
# if (CONFIG_VCO_HZ > CONFIG_MAX_VCO_HZ)
# error "VCO selected is more than maximum value. Please change the VCO multipler"
# endif
# if (CONFIG_SCLK_HZ > CONFIG_MAX_SCLK_HZ)
# error "Sclk value selected is more than maximum. Please select a proper value for SCLK multiplier"
# endif
# if (CONFIG_SCLK_HZ < CONFIG_MIN_SCLK_HZ)
# error "Sclk value selected is less than minimum. Please select a proper value for SCLK multiplier"
# endif
# if (ANOMALY_05000273) && (CONFIG_SCLK_HZ * 2 > CONFIG_CCLK_HZ)
# error "ANOMALY 05000273, please make sure CCLK is at least 2x SCLK"
# endif
# if (CONFIG_SCLK_HZ > CONFIG_CCLK_HZ) && (CONFIG_SCLK_HZ != CONFIG_CLKIN_HZ) && (CONFIG_CCLK_HZ != CONFIG_CLKIN_HZ)
# error "Please select sclk less than cclk"
# endif
#endif /* CONFIG_BFIN_KERNEL_CLOCK */
#if CONFIG_BOOT_LOAD < FIXED_CODE_END
# error "The kernel load address must be after the fixed code section"
#endif
#if (CONFIG_BOOT_LOAD & 0x3)
# error "The kernel load address must be 4 byte aligned"
#endif
/* The entire kernel must be able to make a 24bit pcrel call to start of L1 */
#if ((0xffffffff - L1_CODE_START + 1) + CONFIG_BOOT_LOAD) > 0x1000000
# error "The kernel load address is too high; keep it below 10meg for safety"
#endif
#if ANOMALY_05000263 && defined(CONFIG_MPU)
# error the MPU will not function safely while Anomaly 05000263 applies
#endif
#if ANOMALY_05000448
# error You are using a part with anomaly 05000448, this issue causes random memory read/write failures - that means random crashes.
#endif
/* if 220 exists, can not set External Memory WB and L2 not_cached, either External Memory not_cached and L2 WB */
#if ANOMALY_05000220 && \
(defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK))
# error "Anomaly 05000220 does not allow you to use Write Back cache with L2 or External Memory"
#endif
#if ANOMALY_05000491 && !defined(CONFIG_ICACHE_FLUSH_L1)
# error You need IFLUSH in L1 inst while Anomaly 05000491 applies
#endif

View File

@@ -0,0 +1,76 @@
/*
* Blackfin cache control code (simpler control-style functions)
*
* Copyright 2004-2009 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
#include <linux/init.h>
#include <asm/blackfin.h>
#include <asm/cplbinit.h>
/* Invalidate the Entire Data cache by
* clearing DMC[1:0] bits
*/
void blackfin_invalidate_entire_dcache(void)
{
u32 dmem = bfin_read_DMEM_CONTROL();
bfin_write_DMEM_CONTROL(dmem & ~0xc);
SSYNC();
bfin_write_DMEM_CONTROL(dmem);
SSYNC();
}
/* Invalidate the Entire Instruction cache by
* clearing IMC bit
*/
void blackfin_invalidate_entire_icache(void)
{
u32 imem = bfin_read_IMEM_CONTROL();
bfin_write_IMEM_CONTROL(imem & ~0x4);
SSYNC();
bfin_write_IMEM_CONTROL(imem);
SSYNC();
}
#if defined(CONFIG_BFIN_ICACHE) || defined(CONFIG_BFIN_DCACHE)
static void
bfin_cache_init(struct cplb_entry *cplb_tbl, unsigned long cplb_addr,
unsigned long cplb_data, unsigned long mem_control,
unsigned long mem_mask)
{
int i;
for (i = 0; i < MAX_CPLBS; i++) {
bfin_write32(cplb_addr + i * 4, cplb_tbl[i].addr);
bfin_write32(cplb_data + i * 4, cplb_tbl[i].data);
}
_enable_cplb(mem_control, mem_mask);
}
#ifdef CONFIG_BFIN_ICACHE
void __cpuinit bfin_icache_init(struct cplb_entry *icplb_tbl)
{
bfin_cache_init(icplb_tbl, ICPLB_ADDR0, ICPLB_DATA0, IMEM_CONTROL,
(IMC | ENICPLB));
}
#endif
#ifdef CONFIG_BFIN_DCACHE
void __cpuinit bfin_dcache_init(struct cplb_entry *dcplb_tbl)
{
/*
* Anomaly notes:
* 05000287 - We implement workaround #2 - Change the DMEM_CONTROL
* register, so that the port preferences for DAG0 and DAG1 are set
* to port B
*/
bfin_cache_init(dcplb_tbl, DCPLB_ADDR0, DCPLB_DATA0, DMEM_CONTROL,
(DMEM_CNTR | PORT_PREF0 | (ANOMALY_05000287 ? PORT_PREF1 : 0)));
}
#endif
#endif

View File

@@ -0,0 +1,124 @@
/*
* Blackfin cache control code
*
* Copyright 2004-2008 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
#include <linux/linkage.h>
#include <asm/blackfin.h>
#include <asm/cache.h>
#include <asm/page.h>
/* 05000443 - IFLUSH cannot be last instruction in hardware loop */
#if ANOMALY_05000443
# define BROK_FLUSH_INST "IFLUSH"
#else
# define BROK_FLUSH_INST "no anomaly! yeah!"
#endif
/* Since all L1 caches work the same way, we use the same method for flushing
* them. Only the actual flush instruction differs. We write this in asm as
* GCC can be hard to coax into writing nice hardware loops.
*
* Also, we assume the following register setup:
* R0 = start address
* R1 = end address
*/
.macro do_flush flushins:req label
R2 = -L1_CACHE_BYTES;
/* start = (start & -L1_CACHE_BYTES) */
R0 = R0 & R2;
/* end = ((end - 1) & -L1_CACHE_BYTES) + L1_CACHE_BYTES; */
R1 += -1;
R1 = R1 & R2;
R1 += L1_CACHE_BYTES;
/* count = (end - start) >> L1_CACHE_SHIFT */
R2 = R1 - R0;
R2 >>= L1_CACHE_SHIFT;
P1 = R2;
.ifnb \label
\label :
.endif
P0 = R0;
LSETUP (1f, 2f) LC1 = P1;
1:
.ifeqs "\flushins", BROK_FLUSH_INST
\flushins [P0++];
nop;
nop;
2: nop;
.else
2: \flushins [P0++];
.endif
RTS;
.endm
#ifdef CONFIG_ICACHE_FLUSH_L1
.section .l1.text
#else
.text
#endif
/* Invalidate all instruction cache lines assocoiated with this memory area */
#ifdef CONFIG_SMP
# define _blackfin_icache_flush_range _blackfin_icache_flush_range_l1
#endif
ENTRY(_blackfin_icache_flush_range)
do_flush IFLUSH
ENDPROC(_blackfin_icache_flush_range)
#ifdef CONFIG_SMP
.text
# undef _blackfin_icache_flush_range
ENTRY(_blackfin_icache_flush_range)
p0.L = LO(DSPID);
p0.H = HI(DSPID);
r3 = [p0];
r3 = r3.b (z);
p2 = r3;
p0.L = _blackfin_iflush_l1_entry;
p0.H = _blackfin_iflush_l1_entry;
p0 = p0 + (p2 << 2);
p1 = [p0];
jump (p1);
ENDPROC(_blackfin_icache_flush_range)
#endif
#ifdef CONFIG_DCACHE_FLUSH_L1
.section .l1.text
#else
.text
#endif
/* Throw away all D-cached data in specified region without any obligation to
* write them back. Since the Blackfin ISA does not have an "invalidate"
* instruction, we use flush/invalidate. Perhaps as a speed optimization we
* could bang on the DTEST MMRs ...
*/
ENTRY(_blackfin_dcache_invalidate_range)
do_flush FLUSHINV
ENDPROC(_blackfin_dcache_invalidate_range)
/* Flush all data cache lines assocoiated with this memory area */
ENTRY(_blackfin_dcache_flush_range)
do_flush FLUSH, .Ldfr
ENDPROC(_blackfin_dcache_flush_range)
/* Our headers convert the page structure to an address, so just need to flush
* its contents like normal. We know the start address is page aligned (which
* greater than our cache alignment), as is the end address. So just jump into
* the middle of the dcache flush function.
*/
ENTRY(_blackfin_dflush_page)
P1 = 1 << (PAGE_SHIFT - L1_CACHE_SHIFT);
jump .Ldfr;
ENDPROC(_blackfin_dflush_page)

View File

@@ -0,0 +1,96 @@
/*
* arch/blackfin/mach-common/clocks-init.c - reprogram clocks / memory
*
* Copyright 2004-2008 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/blackfin.h>
#include <asm/dma.h>
#include <asm/clocks.h>
#include <asm/mem_init.h>
#include <asm/dpmc.h>
#define SDGCTL_WIDTH (1 << 31) /* SDRAM external data path width */
#define PLL_CTL_VAL \
(((CONFIG_VCO_MULT & 63) << 9) | CLKIN_HALF | \
(PLL_BYPASS << 8) | (ANOMALY_05000305 ? 0 : 0x8000))
__attribute__((l1_text))
static void do_sync(void)
{
__builtin_bfin_ssync();
}
__attribute__((l1_text))
void init_clocks(void)
{
/* Kill any active DMAs as they may trigger external memory accesses
* in the middle of reprogramming things, and that'll screw us up.
* For example, any automatic DMAs left by U-Boot for splash screens.
*/
size_t i;
for (i = 0; i < MAX_DMA_CHANNELS; ++i) {
struct dma_register *dma = dma_io_base_addr[i];
dma->cfg = 0;
}
do_sync();
#ifdef SIC_IWR0
bfin_write_SIC_IWR0(IWR_ENABLE(0));
# ifdef SIC_IWR1
/* BF52x system reset does not properly reset SIC_IWR1 which
* will screw up the bootrom as it relies on MDMA0/1 waking it
* up from IDLE instructions. See this report for more info:
* http://blackfin.uclinux.org/gf/tracker/4323
*/
if (ANOMALY_05000435)
bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11));
else
bfin_write_SIC_IWR1(IWR_DISABLE_ALL);
# endif
# ifdef SIC_IWR2
bfin_write_SIC_IWR2(IWR_DISABLE_ALL);
# endif
#else
bfin_write_SIC_IWR(IWR_ENABLE(0));
#endif
do_sync();
#ifdef EBIU_SDGCTL
bfin_write_EBIU_SDGCTL(bfin_read_EBIU_SDGCTL() | SRFS);
do_sync();
#endif
#ifdef CLKBUFOE
bfin_write16(VR_CTL, bfin_read_VR_CTL() | CLKBUFOE);
do_sync();
__asm__ __volatile__("IDLE;");
#endif
bfin_write_PLL_LOCKCNT(0x300);
do_sync();
/* We always write PLL_CTL thus avoiding Anomaly 05000242 */
bfin_write16(PLL_CTL, PLL_CTL_VAL);
__asm__ __volatile__("IDLE;");
bfin_write_PLL_DIV(CONFIG_CCLK_ACT_DIV | CONFIG_SCLK_DIV);
#ifdef EBIU_SDGCTL
bfin_write_EBIU_SDRRC(mem_SDRRC);
bfin_write_EBIU_SDGCTL((bfin_read_EBIU_SDGCTL() & SDGCTL_WIDTH) | mem_SDGCTL);
#else
bfin_write_EBIU_RSTCTL(bfin_read_EBIU_RSTCTL() & ~(SRREQ));
do_sync();
bfin_write_EBIU_RSTCTL(bfin_read_EBIU_RSTCTL() | 0x1);
bfin_write_EBIU_DDRCTL0(mem_DDRCTL0);
bfin_write_EBIU_DDRCTL1(mem_DDRCTL1);
bfin_write_EBIU_DDRCTL2(mem_DDRCTL2);
#ifdef CONFIG_MEM_EBIU_DDRQUE
bfin_write_EBIU_DDRQUE(CONFIG_MEM_EBIU_DDRQUE);
#endif
#endif
do_sync();
bfin_read16(0);
}

View File

@@ -0,0 +1,220 @@
/*
* Blackfin core clock scaling
*
* Copyright 2008-2011 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/fs.h>
#include <linux/delay.h>
#include <asm/blackfin.h>
#include <asm/time.h>
#include <asm/dpmc.h>
/* this is the table of CCLK frequencies, in Hz */
/* .index is the entry in the auxiliary dpm_state_table[] */
static struct cpufreq_frequency_table bfin_freq_table[] = {
{
.frequency = CPUFREQ_TABLE_END,
.index = 0,
},
{
.frequency = CPUFREQ_TABLE_END,
.index = 1,
},
{
.frequency = CPUFREQ_TABLE_END,
.index = 2,
},
{
.frequency = CPUFREQ_TABLE_END,
.index = 0,
},
};
static struct bfin_dpm_state {
unsigned int csel; /* system clock divider */
unsigned int tscale; /* change the divider on the core timer interrupt */
} dpm_state_table[3];
#if defined(CONFIG_CYCLES_CLOCKSOURCE)
/*
* normalized to maximum frequency offset for CYCLES,
* used in time-ts cycles clock source, but could be used
* somewhere also.
*/
unsigned long long __bfin_cycles_off;
unsigned int __bfin_cycles_mod;
#endif
/**************************************************************************/
static void __init bfin_init_tables(unsigned long cclk, unsigned long sclk)
{
unsigned long csel, min_cclk;
int index;
/* Anomaly 273 seems to still exist on non-BF54x w/dcache turned on */
#if ANOMALY_05000273 || ANOMALY_05000274 || \
(!defined(CONFIG_BF54x) && defined(CONFIG_BFIN_EXTMEM_DCACHEABLE))
min_cclk = sclk * 2;
#else
min_cclk = sclk;
#endif
csel = ((bfin_read_PLL_DIV() & CSEL) >> 4);
for (index = 0; (cclk >> index) >= min_cclk && csel <= 3; index++, csel++) {
bfin_freq_table[index].frequency = cclk >> index;
dpm_state_table[index].csel = csel << 4; /* Shift now into PLL_DIV bitpos */
dpm_state_table[index].tscale = (TIME_SCALE / (1 << csel)) - 1;
pr_debug("cpufreq: freq:%d csel:0x%x tscale:%d\n",
bfin_freq_table[index].frequency,
dpm_state_table[index].csel,
dpm_state_table[index].tscale);
}
return;
}
static void bfin_adjust_core_timer(void *info)
{
unsigned int tscale;
unsigned int index = *(unsigned int *)info;
/* we have to adjust the core timer, because it is using cclk */
tscale = dpm_state_table[index].tscale;
bfin_write_TSCALE(tscale);
return;
}
static unsigned int bfin_getfreq_khz(unsigned int cpu)
{
/* Both CoreA/B have the same core clock */
return get_cclk() / 1000;
}
static int bfin_target(struct cpufreq_policy *poli,
unsigned int target_freq, unsigned int relation)
{
unsigned int index, plldiv, cpu;
unsigned long flags, cclk_hz;
struct cpufreq_freqs freqs;
static unsigned long lpj_ref;
static unsigned int lpj_ref_freq;
#if defined(CONFIG_CYCLES_CLOCKSOURCE)
cycles_t cycles;
#endif
for_each_online_cpu(cpu) {
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
if (!policy)
continue;
if (cpufreq_frequency_table_target(policy, bfin_freq_table,
target_freq, relation, &index))
return -EINVAL;
cclk_hz = bfin_freq_table[index].frequency;
freqs.old = bfin_getfreq_khz(0);
freqs.new = cclk_hz;
freqs.cpu = cpu;
pr_debug("cpufreq: changing cclk to %lu; target = %u, oldfreq = %u\n",
cclk_hz, target_freq, freqs.old);
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
if (cpu == CPUFREQ_CPU) {
flags = hard_local_irq_save();
plldiv = (bfin_read_PLL_DIV() & SSEL) |
dpm_state_table[index].csel;
bfin_write_PLL_DIV(plldiv);
on_each_cpu(bfin_adjust_core_timer, &index, 1);
#if defined(CONFIG_CYCLES_CLOCKSOURCE)
cycles = get_cycles();
SSYNC();
cycles += 10; /* ~10 cycles we lose after get_cycles() */
__bfin_cycles_off +=
(cycles << __bfin_cycles_mod) - (cycles << index);
__bfin_cycles_mod = index;
#endif
if (!lpj_ref_freq) {
lpj_ref = loops_per_jiffy;
lpj_ref_freq = freqs.old;
}
if (freqs.new != freqs.old) {
loops_per_jiffy = cpufreq_scale(lpj_ref,
lpj_ref_freq, freqs.new);
}
hard_local_irq_restore(flags);
}
/* TODO: just test case for cycles clock source, remove later */
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
}
pr_debug("cpufreq: done\n");
return 0;
}
static int bfin_verify_speed(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, bfin_freq_table);
}
static int __init __bfin_cpu_init(struct cpufreq_policy *policy)
{
unsigned long cclk, sclk;
cclk = get_cclk() / 1000;
sclk = get_sclk() / 1000;
if (policy->cpu == CPUFREQ_CPU)
bfin_init_tables(cclk, sclk);
policy->cpuinfo.transition_latency = 50000; /* 50us assumed */
policy->cur = cclk;
cpufreq_frequency_table_get_attr(bfin_freq_table, policy->cpu);
return cpufreq_frequency_table_cpuinfo(policy, bfin_freq_table);
}
static struct freq_attr *bfin_freq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver bfin_driver = {
.verify = bfin_verify_speed,
.target = bfin_target,
.get = bfin_getfreq_khz,
.init = __bfin_cpu_init,
.name = "bfin cpufreq",
.owner = THIS_MODULE,
.attr = bfin_freq_attr,
};
static int __init bfin_cpu_init(void)
{
return cpufreq_register_driver(&bfin_driver);
}
static void __exit bfin_cpu_exit(void)
{
cpufreq_unregister_driver(&bfin_driver);
}
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("cpufreq driver for Blackfin");
MODULE_LICENSE("GPL");
module_init(bfin_cpu_init);
module_exit(bfin_cpu_exit);

View File

@@ -0,0 +1,181 @@
/*
* Copyright 2008 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <linux/cpufreq.h>
#include <asm/delay.h>
#include <asm/dpmc.h>
#define DRIVER_NAME "bfin dpmc"
struct bfin_dpmc_platform_data *pdata;
/**
* bfin_set_vlev - Update VLEV field in VR_CTL Reg.
* Avoid BYPASS sequence
*/
static void bfin_set_vlev(unsigned int vlev)
{
unsigned pll_lcnt;
pll_lcnt = bfin_read_PLL_LOCKCNT();
bfin_write_PLL_LOCKCNT(1);
bfin_write_VR_CTL((bfin_read_VR_CTL() & ~VLEV) | vlev);
bfin_write_PLL_LOCKCNT(pll_lcnt);
}
/**
* bfin_get_vlev - Get CPU specific VLEV from platform device data
*/
static unsigned int bfin_get_vlev(unsigned int freq)
{
int i;
if (!pdata)
goto err_out;
freq >>= 16;
for (i = 0; i < pdata->tabsize; i++)
if (freq <= (pdata->tuple_tab[i] & 0xFFFF))
return pdata->tuple_tab[i] >> 16;
err_out:
printk(KERN_WARNING "DPMC: No suitable CCLK VDDINT voltage pair found\n");
return VLEV_120;
}
#ifdef CONFIG_CPU_FREQ
# ifdef CONFIG_SMP
static void bfin_idle_this_cpu(void *info)
{
unsigned long flags = 0;
unsigned long iwr0, iwr1, iwr2;
unsigned int cpu = smp_processor_id();
local_irq_save_hw(flags);
bfin_iwr_set_sup0(&iwr0, &iwr1, &iwr2);
platform_clear_ipi(cpu, IRQ_SUPPLE_0);
SSYNC();
asm("IDLE;");
bfin_iwr_restore(iwr0, iwr1, iwr2);
local_irq_restore_hw(flags);
}
static void bfin_idle_cpu(void)
{
smp_call_function(bfin_idle_this_cpu, NULL, 0);
}
static void bfin_wakeup_cpu(void)
{
unsigned int cpu;
unsigned int this_cpu = smp_processor_id();
cpumask_t mask;
cpumask_copy(&mask, cpu_online_mask);
cpumask_clear_cpu(this_cpu, &mask);
for_each_cpu(cpu, &mask)
platform_send_ipi_cpu(cpu, IRQ_SUPPLE_0);
}
# else
static void bfin_idle_cpu(void) {}
static void bfin_wakeup_cpu(void) {}
# endif
static int
vreg_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
{
struct cpufreq_freqs *freq = data;
if (freq->cpu != CPUFREQ_CPU)
return 0;
if (val == CPUFREQ_PRECHANGE && freq->old < freq->new) {
bfin_idle_cpu();
bfin_set_vlev(bfin_get_vlev(freq->new));
udelay(pdata->vr_settling_time); /* Wait until Volatge settled */
bfin_wakeup_cpu();
} else if (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) {
bfin_idle_cpu();
bfin_set_vlev(bfin_get_vlev(freq->new));
bfin_wakeup_cpu();
}
return 0;
}
static struct notifier_block vreg_cpufreq_notifier_block = {
.notifier_call = vreg_cpufreq_notifier
};
#endif /* CONFIG_CPU_FREQ */
/**
* bfin_dpmc_probe -
*
*/
static int __devinit bfin_dpmc_probe(struct platform_device *pdev)
{
if (pdev->dev.platform_data)
pdata = pdev->dev.platform_data;
else
return -EINVAL;
return cpufreq_register_notifier(&vreg_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
}
/**
* bfin_dpmc_remove -
*/
static int __devexit bfin_dpmc_remove(struct platform_device *pdev)
{
pdata = NULL;
return cpufreq_unregister_notifier(&vreg_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
}
struct platform_driver bfin_dpmc_device_driver = {
.probe = bfin_dpmc_probe,
.remove = __devexit_p(bfin_dpmc_remove),
.driver = {
.name = DRIVER_NAME,
}
};
/**
* bfin_dpmc_init - Init driver
*/
static int __init bfin_dpmc_init(void)
{
return platform_driver_register(&bfin_dpmc_device_driver);
}
module_init(bfin_dpmc_init);
/**
* bfin_dpmc_exit - break down driver
*/
static void __exit bfin_dpmc_exit(void)
{
platform_driver_unregister(&bfin_dpmc_device_driver);
}
module_exit(bfin_dpmc_exit);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("cpu power management driver for Blackfin");
MODULE_LICENSE("GPL");

View File

@@ -0,0 +1,894 @@
/*
* Copyright 2004-2008 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
#include <linux/linkage.h>
#include <asm/blackfin.h>
#include <mach/irq.h>
#include <asm/dpmc.h>
.section .l1.text
ENTRY(_sleep_mode)
[--SP] = (R7:4, P5:3);
[--SP] = RETS;
call _set_sic_iwr;
P0.H = hi(PLL_CTL);
P0.L = lo(PLL_CTL);
R1 = W[P0](z);
BITSET (R1, 3);
W[P0] = R1.L;
CLI R2;
SSYNC;
IDLE;
STI R2;
call _test_pll_locked;
R0 = IWR_ENABLE(0);
R1 = IWR_DISABLE_ALL;
R2 = IWR_DISABLE_ALL;
call _set_sic_iwr;
P0.H = hi(PLL_CTL);
P0.L = lo(PLL_CTL);
R7 = w[p0](z);
BITCLR (R7, 3);
BITCLR (R7, 5);
w[p0] = R7.L;
IDLE;
call _test_pll_locked;
RETS = [SP++];
(R7:4, P5:3) = [SP++];
RTS;
ENDPROC(_sleep_mode)
/*
* This func never returns as it puts the part into hibernate, and
* is only called from do_hibernate, so we don't bother saving or
* restoring any of the normal C runtime state. When we wake up,
* the entry point will be in do_hibernate and not here.
*
* We accept just one argument -- the value to write to VR_CTL.
*/
ENTRY(_hibernate_mode)
/* Save/setup the regs we need early for minor pipeline optimization */
R4 = R0;
P3.H = hi(VR_CTL);
P3.L = lo(VR_CTL);
/* Disable all wakeup sources */
R0 = IWR_DISABLE_ALL;
R1 = IWR_DISABLE_ALL;
R2 = IWR_DISABLE_ALL;
call _set_sic_iwr;
call _set_dram_srfs;
SSYNC;
/* Finally, we climb into our cave to hibernate */
W[P3] = R4.L;
CLI R2;
IDLE;
.Lforever:
jump .Lforever;
ENDPROC(_hibernate_mode)
ENTRY(_sleep_deeper)
[--SP] = (R7:4, P5:3);
[--SP] = RETS;
CLI R4;
P3 = R0;
P4 = R1;
P5 = R2;
R0 = IWR_ENABLE(0);
R1 = IWR_DISABLE_ALL;
R2 = IWR_DISABLE_ALL;
call _set_sic_iwr;
call _set_dram_srfs; /* Set SDRAM Self Refresh */
P0.H = hi(PLL_DIV);
P0.L = lo(PLL_DIV);
R6 = W[P0](z);
R0.L = 0xF;
W[P0] = R0.l; /* Set Max VCO to SCLK divider */
P0.H = hi(PLL_CTL);
P0.L = lo(PLL_CTL);
R5 = W[P0](z);
R0.L = (CONFIG_MIN_VCO_HZ/CONFIG_CLKIN_HZ) << 9;
W[P0] = R0.l; /* Set Min CLKIN to VCO multiplier */
SSYNC;
IDLE;
call _test_pll_locked;
P0.H = hi(VR_CTL);
P0.L = lo(VR_CTL);
R7 = W[P0](z);
R1 = 0x6;
R1 <<= 16;
R2 = 0x0404(Z);
R1 = R1|R2;
R2 = DEPOSIT(R7, R1);
W[P0] = R2; /* Set Min Core Voltage */
SSYNC;
IDLE;
call _test_pll_locked;
R0 = P3;
R1 = P4;
R3 = P5;
call _set_sic_iwr; /* Set Awake from IDLE */
P0.H = hi(PLL_CTL);
P0.L = lo(PLL_CTL);
R0 = W[P0](z);
BITSET (R0, 3);
W[P0] = R0.L; /* Turn CCLK OFF */
SSYNC;
IDLE;
call _test_pll_locked;
R0 = IWR_ENABLE(0);
R1 = IWR_DISABLE_ALL;
R2 = IWR_DISABLE_ALL;
call _set_sic_iwr; /* Set Awake from IDLE PLL */
P0.H = hi(VR_CTL);
P0.L = lo(VR_CTL);
W[P0]= R7;
SSYNC;
IDLE;
call _test_pll_locked;
P0.H = hi(PLL_DIV);
P0.L = lo(PLL_DIV);
W[P0]= R6; /* Restore CCLK and SCLK divider */
P0.H = hi(PLL_CTL);
P0.L = lo(PLL_CTL);
w[p0] = R5; /* Restore VCO multiplier */
IDLE;
call _test_pll_locked;
call _unset_dram_srfs; /* SDRAM Self Refresh Off */
STI R4;
RETS = [SP++];
(R7:4, P5:3) = [SP++];
RTS;
ENDPROC(_sleep_deeper)
ENTRY(_set_dram_srfs)
/* set the dram to self refresh mode */
SSYNC;
#if defined(EBIU_RSTCTL) /* DDR */
P0.H = hi(EBIU_RSTCTL);
P0.L = lo(EBIU_RSTCTL);
R2 = [P0];
BITSET(R2, 3); /* SRREQ enter self-refresh mode */
[P0] = R2;
SSYNC;
1:
R2 = [P0];
CC = BITTST(R2, 4);
if !CC JUMP 1b;
#else /* SDRAM */
P0.L = lo(EBIU_SDGCTL);
P0.H = hi(EBIU_SDGCTL);
P1.L = lo(EBIU_SDSTAT);
P1.H = hi(EBIU_SDSTAT);
R2 = [P0];
BITSET(R2, 24); /* SRFS enter self-refresh mode */
[P0] = R2;
SSYNC;
1:
R2 = w[P1];
SSYNC;
cc = BITTST(R2, 1); /* SDSRA poll self-refresh status */
if !cc jump 1b;
R2 = [P0];
BITCLR(R2, 0); /* SCTLE disable CLKOUT */
[P0] = R2;
#endif
RTS;
ENDPROC(_set_dram_srfs)
ENTRY(_unset_dram_srfs)
/* set the dram out of self refresh mode */
#if defined(EBIU_RSTCTL) /* DDR */
P0.H = hi(EBIU_RSTCTL);
P0.L = lo(EBIU_RSTCTL);
R2 = [P0];
BITCLR(R2, 3); /* clear SRREQ bit */
[P0] = R2;
#elif defined(EBIU_SDGCTL) /* SDRAM */
/* release CLKOUT from self-refresh */
P0.L = lo(EBIU_SDGCTL);
P0.H = hi(EBIU_SDGCTL);
R2 = [P0];
BITSET(R2, 0); /* SCTLE enable CLKOUT */
[P0] = R2
SSYNC;
/* release SDRAM from self-refresh */
R2 = [P0];
BITCLR(R2, 24); /* clear SRFS bit */
[P0] = R2
#endif
SSYNC;
RTS;
ENDPROC(_unset_dram_srfs)
ENTRY(_set_sic_iwr)
#ifdef SIC_IWR0
P0.H = hi(SYSMMR_BASE);
P0.L = lo(SYSMMR_BASE);
[P0 + (SIC_IWR0 - SYSMMR_BASE)] = R0;
[P0 + (SIC_IWR1 - SYSMMR_BASE)] = R1;
# ifdef SIC_IWR2
[P0 + (SIC_IWR2 - SYSMMR_BASE)] = R2;
# endif
#else
P0.H = hi(SIC_IWR);
P0.L = lo(SIC_IWR);
[P0] = R0;
#endif
SSYNC;
RTS;
ENDPROC(_set_sic_iwr)
ENTRY(_test_pll_locked)
P0.H = hi(PLL_STAT);
P0.L = lo(PLL_STAT);
1:
R0 = W[P0] (Z);
CC = BITTST(R0,5);
IF !CC JUMP 1b;
RTS;
ENDPROC(_test_pll_locked)
.section .text
#define PM_REG0 R7
#define PM_REG1 R6
#define PM_REG2 R5
#define PM_REG3 R4
#define PM_REG4 R3
#define PM_REG5 R2
#define PM_REG6 R1
#define PM_REG7 R0
#define PM_REG8 P5
#define PM_REG9 P4
#define PM_REG10 P3
#define PM_REG11 P2
#define PM_REG12 P1
#define PM_REG13 P0
#define PM_REGSET0 R7:7
#define PM_REGSET1 R7:6
#define PM_REGSET2 R7:5
#define PM_REGSET3 R7:4
#define PM_REGSET4 R7:3
#define PM_REGSET5 R7:2
#define PM_REGSET6 R7:1
#define PM_REGSET7 R7:0
#define PM_REGSET8 R7:0, P5:5
#define PM_REGSET9 R7:0, P5:4
#define PM_REGSET10 R7:0, P5:3
#define PM_REGSET11 R7:0, P5:2
#define PM_REGSET12 R7:0, P5:1
#define PM_REGSET13 R7:0, P5:0
#define _PM_PUSH(n, x, w, base) PM_REG##n = w[FP + ((x) - (base))];
#define _PM_POP(n, x, w, base) w[FP + ((x) - (base))] = PM_REG##n;
#define PM_PUSH_SYNC(n) [--sp] = (PM_REGSET##n);
#define PM_POP_SYNC(n) (PM_REGSET##n) = [sp++];
#define PM_PUSH(n, x) PM_REG##n = [FP++];
#define PM_POP(n, x) [FP--] = PM_REG##n;
#define PM_CORE_PUSH(n, x) _PM_PUSH(n, x, , COREMMR_BASE)
#define PM_CORE_POP(n, x) _PM_POP(n, x, , COREMMR_BASE)
#define PM_SYS_PUSH(n, x) _PM_PUSH(n, x, , SYSMMR_BASE)
#define PM_SYS_POP(n, x) _PM_POP(n, x, , SYSMMR_BASE)
#define PM_SYS_PUSH16(n, x) _PM_PUSH(n, x, w, SYSMMR_BASE)
#define PM_SYS_POP16(n, x) _PM_POP(n, x, w, SYSMMR_BASE)
ENTRY(_do_hibernate)
/*
* Save the core regs early so we can blow them away when
* saving/restoring MMR states
*/
[--sp] = (R7:0, P5:0);
[--sp] = fp;
[--sp] = usp;
[--sp] = i0;
[--sp] = i1;
[--sp] = i2;
[--sp] = i3;
[--sp] = m0;
[--sp] = m1;
[--sp] = m2;
[--sp] = m3;
[--sp] = l0;
[--sp] = l1;
[--sp] = l2;
[--sp] = l3;
[--sp] = b0;
[--sp] = b1;
[--sp] = b2;
[--sp] = b3;
[--sp] = a0.x;
[--sp] = a0.w;
[--sp] = a1.x;
[--sp] = a1.w;
[--sp] = LC0;
[--sp] = LC1;
[--sp] = LT0;
[--sp] = LT1;
[--sp] = LB0;
[--sp] = LB1;
/* We can't push RETI directly as that'll change IPEND[4] */
r7 = RETI;
[--sp] = RETS;
[--sp] = ASTAT;
[--sp] = CYCLES;
[--sp] = CYCLES2;
[--sp] = SYSCFG;
[--sp] = RETX;
[--sp] = SEQSTAT;
[--sp] = r7;
/* Save first func arg in M3 */
M3 = R0;
/* Save system MMRs */
FP.H = hi(SYSMMR_BASE);
FP.L = lo(SYSMMR_BASE);
#ifdef SIC_IMASK0
PM_SYS_PUSH(0, SIC_IMASK0)
PM_SYS_PUSH(1, SIC_IMASK1)
# ifdef SIC_IMASK2
PM_SYS_PUSH(2, SIC_IMASK2)
# endif
#else
PM_SYS_PUSH(0, SIC_IMASK)
#endif
#ifdef SIC_IAR0
PM_SYS_PUSH(3, SIC_IAR0)
PM_SYS_PUSH(4, SIC_IAR1)
PM_SYS_PUSH(5, SIC_IAR2)
#endif
#ifdef SIC_IAR3
PM_SYS_PUSH(6, SIC_IAR3)
#endif
#ifdef SIC_IAR4
PM_SYS_PUSH(7, SIC_IAR4)
PM_SYS_PUSH(8, SIC_IAR5)
PM_SYS_PUSH(9, SIC_IAR6)
#endif
#ifdef SIC_IAR7
PM_SYS_PUSH(10, SIC_IAR7)
#endif
#ifdef SIC_IAR8
PM_SYS_PUSH(11, SIC_IAR8)
PM_SYS_PUSH(12, SIC_IAR9)
PM_SYS_PUSH(13, SIC_IAR10)
#endif
PM_PUSH_SYNC(13)
#ifdef SIC_IAR11
PM_SYS_PUSH(0, SIC_IAR11)
#endif
#ifdef SIC_IWR
PM_SYS_PUSH(1, SIC_IWR)
#endif
#ifdef SIC_IWR0
PM_SYS_PUSH(1, SIC_IWR0)
#endif
#ifdef SIC_IWR1
PM_SYS_PUSH(2, SIC_IWR1)
#endif
#ifdef SIC_IWR2
PM_SYS_PUSH(3, SIC_IWR2)
#endif
#ifdef PINT0_ASSIGN
PM_SYS_PUSH(4, PINT0_MASK_SET)
PM_SYS_PUSH(5, PINT1_MASK_SET)
PM_SYS_PUSH(6, PINT2_MASK_SET)
PM_SYS_PUSH(7, PINT3_MASK_SET)
PM_SYS_PUSH(8, PINT0_ASSIGN)
PM_SYS_PUSH(9, PINT1_ASSIGN)
PM_SYS_PUSH(10, PINT2_ASSIGN)
PM_SYS_PUSH(11, PINT3_ASSIGN)
PM_SYS_PUSH(12, PINT0_INVERT_SET)
PM_SYS_PUSH(13, PINT1_INVERT_SET)
PM_PUSH_SYNC(13)
PM_SYS_PUSH(0, PINT2_INVERT_SET)
PM_SYS_PUSH(1, PINT3_INVERT_SET)
PM_SYS_PUSH(2, PINT0_EDGE_SET)
PM_SYS_PUSH(3, PINT1_EDGE_SET)
PM_SYS_PUSH(4, PINT2_EDGE_SET)
PM_SYS_PUSH(5, PINT3_EDGE_SET)
#endif
PM_SYS_PUSH16(6, SYSCR)
PM_SYS_PUSH16(7, EBIU_AMGCTL)
PM_SYS_PUSH(8, EBIU_AMBCTL0)
PM_SYS_PUSH(9, EBIU_AMBCTL1)
#ifdef EBIU_FCTL
PM_SYS_PUSH(10, EBIU_MBSCTL)
PM_SYS_PUSH(11, EBIU_MODE)
PM_SYS_PUSH(12, EBIU_FCTL)
PM_PUSH_SYNC(12)
#else
PM_PUSH_SYNC(9)
#endif
/* Save Core MMRs */
I0.H = hi(COREMMR_BASE);
I0.L = lo(COREMMR_BASE);
I1 = I0;
I2 = I0;
I3 = I0;
B0 = I0;
B1 = I0;
B2 = I0;
B3 = I0;
I1.L = lo(DCPLB_ADDR0);
I2.L = lo(DCPLB_DATA0);
I3.L = lo(ICPLB_ADDR0);
B0.L = lo(ICPLB_DATA0);
B1.L = lo(EVT2);
B2.L = lo(IMASK);
B3.L = lo(TCNTL);
/* DCPLB Addr */
FP = I1;
PM_PUSH(0, DCPLB_ADDR0)
PM_PUSH(1, DCPLB_ADDR1)
PM_PUSH(2, DCPLB_ADDR2)
PM_PUSH(3, DCPLB_ADDR3)
PM_PUSH(4, DCPLB_ADDR4)
PM_PUSH(5, DCPLB_ADDR5)
PM_PUSH(6, DCPLB_ADDR6)
PM_PUSH(7, DCPLB_ADDR7)
PM_PUSH(8, DCPLB_ADDR8)
PM_PUSH(9, DCPLB_ADDR9)
PM_PUSH(10, DCPLB_ADDR10)
PM_PUSH(11, DCPLB_ADDR11)
PM_PUSH(12, DCPLB_ADDR12)
PM_PUSH(13, DCPLB_ADDR13)
PM_PUSH_SYNC(13)
PM_PUSH(0, DCPLB_ADDR14)
PM_PUSH(1, DCPLB_ADDR15)
/* DCPLB Data */
FP = I2;
PM_PUSH(2, DCPLB_DATA0)
PM_PUSH(3, DCPLB_DATA1)
PM_PUSH(4, DCPLB_DATA2)
PM_PUSH(5, DCPLB_DATA3)
PM_PUSH(6, DCPLB_DATA4)
PM_PUSH(7, DCPLB_DATA5)
PM_PUSH(8, DCPLB_DATA6)
PM_PUSH(9, DCPLB_DATA7)
PM_PUSH(10, DCPLB_DATA8)
PM_PUSH(11, DCPLB_DATA9)
PM_PUSH(12, DCPLB_DATA10)
PM_PUSH(13, DCPLB_DATA11)
PM_PUSH_SYNC(13)
PM_PUSH(0, DCPLB_DATA12)
PM_PUSH(1, DCPLB_DATA13)
PM_PUSH(2, DCPLB_DATA14)
PM_PUSH(3, DCPLB_DATA15)
/* ICPLB Addr */
FP = I3;
PM_PUSH(4, ICPLB_ADDR0)
PM_PUSH(5, ICPLB_ADDR1)
PM_PUSH(6, ICPLB_ADDR2)
PM_PUSH(7, ICPLB_ADDR3)
PM_PUSH(8, ICPLB_ADDR4)
PM_PUSH(9, ICPLB_ADDR5)
PM_PUSH(10, ICPLB_ADDR6)
PM_PUSH(11, ICPLB_ADDR7)
PM_PUSH(12, ICPLB_ADDR8)
PM_PUSH(13, ICPLB_ADDR9)
PM_PUSH_SYNC(13)
PM_PUSH(0, ICPLB_ADDR10)
PM_PUSH(1, ICPLB_ADDR11)
PM_PUSH(2, ICPLB_ADDR12)
PM_PUSH(3, ICPLB_ADDR13)
PM_PUSH(4, ICPLB_ADDR14)
PM_PUSH(5, ICPLB_ADDR15)
/* ICPLB Data */
FP = B0;
PM_PUSH(6, ICPLB_DATA0)
PM_PUSH(7, ICPLB_DATA1)
PM_PUSH(8, ICPLB_DATA2)
PM_PUSH(9, ICPLB_DATA3)
PM_PUSH(10, ICPLB_DATA4)
PM_PUSH(11, ICPLB_DATA5)
PM_PUSH(12, ICPLB_DATA6)
PM_PUSH(13, ICPLB_DATA7)
PM_PUSH_SYNC(13)
PM_PUSH(0, ICPLB_DATA8)
PM_PUSH(1, ICPLB_DATA9)
PM_PUSH(2, ICPLB_DATA10)
PM_PUSH(3, ICPLB_DATA11)
PM_PUSH(4, ICPLB_DATA12)
PM_PUSH(5, ICPLB_DATA13)
PM_PUSH(6, ICPLB_DATA14)
PM_PUSH(7, ICPLB_DATA15)
/* Event Vectors */
FP = B1;
PM_PUSH(8, EVT2)
PM_PUSH(9, EVT3)
FP += 4; /* EVT4 */
PM_PUSH(10, EVT5)
PM_PUSH(11, EVT6)
PM_PUSH(12, EVT7)
PM_PUSH(13, EVT8)
PM_PUSH_SYNC(13)
PM_PUSH(0, EVT9)
PM_PUSH(1, EVT10)
PM_PUSH(2, EVT11)
PM_PUSH(3, EVT12)
PM_PUSH(4, EVT13)
PM_PUSH(5, EVT14)
PM_PUSH(6, EVT15)
/* CEC */
FP = B2;
PM_PUSH(7, IMASK)
FP += 4; /* IPEND */
PM_PUSH(8, ILAT)
PM_PUSH(9, IPRIO)
/* Core Timer */
FP = B3;
PM_PUSH(10, TCNTL)
PM_PUSH(11, TPERIOD)
PM_PUSH(12, TSCALE)
PM_PUSH(13, TCOUNT)
PM_PUSH_SYNC(13)
/* Misc non-contiguous registers */
FP = I0;
PM_CORE_PUSH(0, DMEM_CONTROL);
PM_CORE_PUSH(1, IMEM_CONTROL);
PM_CORE_PUSH(2, TBUFCTL);
PM_PUSH_SYNC(2)
/* Setup args to hibernate mode early for pipeline optimization */
R0 = M3;
P1.H = _hibernate_mode;
P1.L = _hibernate_mode;
/* Save Magic, return address and Stack Pointer */
P0 = 0;
R1.H = 0xDEAD; /* Hibernate Magic */
R1.L = 0xBEEF;
R2.H = .Lpm_resume_here;
R2.L = .Lpm_resume_here;
[P0++] = R1; /* Store Hibernate Magic */
[P0++] = R2; /* Save Return Address */
[P0++] = SP; /* Save Stack Pointer */
/* Must use an indirect call as we need to jump to L1 */
call (P1); /* Goodbye */
.Lpm_resume_here:
/* Restore Core MMRs */
I0.H = hi(COREMMR_BASE);
I0.L = lo(COREMMR_BASE);
I1 = I0;
I2 = I0;
I3 = I0;
B0 = I0;
B1 = I0;
B2 = I0;
B3 = I0;
I1.L = lo(DCPLB_ADDR15);
I2.L = lo(DCPLB_DATA15);
I3.L = lo(ICPLB_ADDR15);
B0.L = lo(ICPLB_DATA15);
B1.L = lo(EVT15);
B2.L = lo(IPRIO);
B3.L = lo(TCOUNT);
/* Misc non-contiguous registers */
FP = I0;
PM_POP_SYNC(2)
PM_CORE_POP(2, TBUFCTL)
PM_CORE_POP(1, IMEM_CONTROL)
PM_CORE_POP(0, DMEM_CONTROL)
/* Core Timer */
PM_POP_SYNC(13)
FP = B3;
PM_POP(13, TCOUNT)
PM_POP(12, TSCALE)
PM_POP(11, TPERIOD)
PM_POP(10, TCNTL)
/* CEC */
FP = B2;
PM_POP(9, IPRIO)
PM_POP(8, ILAT)
FP += -4; /* IPEND */
PM_POP(7, IMASK)
/* Event Vectors */
FP = B1;
PM_POP(6, EVT15)
PM_POP(5, EVT14)
PM_POP(4, EVT13)
PM_POP(3, EVT12)
PM_POP(2, EVT11)
PM_POP(1, EVT10)
PM_POP(0, EVT9)
PM_POP_SYNC(13)
PM_POP(13, EVT8)
PM_POP(12, EVT7)
PM_POP(11, EVT6)
PM_POP(10, EVT5)
FP += -4; /* EVT4 */
PM_POP(9, EVT3)
PM_POP(8, EVT2)
/* ICPLB Data */
FP = B0;
PM_POP(7, ICPLB_DATA15)
PM_POP(6, ICPLB_DATA14)
PM_POP(5, ICPLB_DATA13)
PM_POP(4, ICPLB_DATA12)
PM_POP(3, ICPLB_DATA11)
PM_POP(2, ICPLB_DATA10)
PM_POP(1, ICPLB_DATA9)
PM_POP(0, ICPLB_DATA8)
PM_POP_SYNC(13)
PM_POP(13, ICPLB_DATA7)
PM_POP(12, ICPLB_DATA6)
PM_POP(11, ICPLB_DATA5)
PM_POP(10, ICPLB_DATA4)
PM_POP(9, ICPLB_DATA3)
PM_POP(8, ICPLB_DATA2)
PM_POP(7, ICPLB_DATA1)
PM_POP(6, ICPLB_DATA0)
/* ICPLB Addr */
FP = I3;
PM_POP(5, ICPLB_ADDR15)
PM_POP(4, ICPLB_ADDR14)
PM_POP(3, ICPLB_ADDR13)
PM_POP(2, ICPLB_ADDR12)
PM_POP(1, ICPLB_ADDR11)
PM_POP(0, ICPLB_ADDR10)
PM_POP_SYNC(13)
PM_POP(13, ICPLB_ADDR9)
PM_POP(12, ICPLB_ADDR8)
PM_POP(11, ICPLB_ADDR7)
PM_POP(10, ICPLB_ADDR6)
PM_POP(9, ICPLB_ADDR5)
PM_POP(8, ICPLB_ADDR4)
PM_POP(7, ICPLB_ADDR3)
PM_POP(6, ICPLB_ADDR2)
PM_POP(5, ICPLB_ADDR1)
PM_POP(4, ICPLB_ADDR0)
/* DCPLB Data */
FP = I2;
PM_POP(3, DCPLB_DATA15)
PM_POP(2, DCPLB_DATA14)
PM_POP(1, DCPLB_DATA13)
PM_POP(0, DCPLB_DATA12)
PM_POP_SYNC(13)
PM_POP(13, DCPLB_DATA11)
PM_POP(12, DCPLB_DATA10)
PM_POP(11, DCPLB_DATA9)
PM_POP(10, DCPLB_DATA8)
PM_POP(9, DCPLB_DATA7)
PM_POP(8, DCPLB_DATA6)
PM_POP(7, DCPLB_DATA5)
PM_POP(6, DCPLB_DATA4)
PM_POP(5, DCPLB_DATA3)
PM_POP(4, DCPLB_DATA2)
PM_POP(3, DCPLB_DATA1)
PM_POP(2, DCPLB_DATA0)
/* DCPLB Addr */
FP = I1;
PM_POP(1, DCPLB_ADDR15)
PM_POP(0, DCPLB_ADDR14)
PM_POP_SYNC(13)
PM_POP(13, DCPLB_ADDR13)
PM_POP(12, DCPLB_ADDR12)
PM_POP(11, DCPLB_ADDR11)
PM_POP(10, DCPLB_ADDR10)
PM_POP(9, DCPLB_ADDR9)
PM_POP(8, DCPLB_ADDR8)
PM_POP(7, DCPLB_ADDR7)
PM_POP(6, DCPLB_ADDR6)
PM_POP(5, DCPLB_ADDR5)
PM_POP(4, DCPLB_ADDR4)
PM_POP(3, DCPLB_ADDR3)
PM_POP(2, DCPLB_ADDR2)
PM_POP(1, DCPLB_ADDR1)
PM_POP(0, DCPLB_ADDR0)
/* Restore System MMRs */
FP.H = hi(SYSMMR_BASE);
FP.L = lo(SYSMMR_BASE);
#ifdef EBIU_FCTL
PM_POP_SYNC(12)
PM_SYS_POP(12, EBIU_FCTL)
PM_SYS_POP(11, EBIU_MODE)
PM_SYS_POP(10, EBIU_MBSCTL)
#else
PM_POP_SYNC(9)
#endif
PM_SYS_POP(9, EBIU_AMBCTL1)
PM_SYS_POP(8, EBIU_AMBCTL0)
PM_SYS_POP16(7, EBIU_AMGCTL)
PM_SYS_POP16(6, SYSCR)
#ifdef PINT0_ASSIGN
PM_SYS_POP(5, PINT3_EDGE_SET)
PM_SYS_POP(4, PINT2_EDGE_SET)
PM_SYS_POP(3, PINT1_EDGE_SET)
PM_SYS_POP(2, PINT0_EDGE_SET)
PM_SYS_POP(1, PINT3_INVERT_SET)
PM_SYS_POP(0, PINT2_INVERT_SET)
PM_POP_SYNC(13)
PM_SYS_POP(13, PINT1_INVERT_SET)
PM_SYS_POP(12, PINT0_INVERT_SET)
PM_SYS_POP(11, PINT3_ASSIGN)
PM_SYS_POP(10, PINT2_ASSIGN)
PM_SYS_POP(9, PINT1_ASSIGN)
PM_SYS_POP(8, PINT0_ASSIGN)
PM_SYS_POP(7, PINT3_MASK_SET)
PM_SYS_POP(6, PINT2_MASK_SET)
PM_SYS_POP(5, PINT1_MASK_SET)
PM_SYS_POP(4, PINT0_MASK_SET)
#endif
#ifdef SIC_IWR2
PM_SYS_POP(3, SIC_IWR2)
#endif
#ifdef SIC_IWR1
PM_SYS_POP(2, SIC_IWR1)
#endif
#ifdef SIC_IWR0
PM_SYS_POP(1, SIC_IWR0)
#endif
#ifdef SIC_IWR
PM_SYS_POP(1, SIC_IWR)
#endif
#ifdef SIC_IAR11
PM_SYS_POP(0, SIC_IAR11)
#endif
PM_POP_SYNC(13)
#ifdef SIC_IAR8
PM_SYS_POP(13, SIC_IAR10)
PM_SYS_POP(12, SIC_IAR9)
PM_SYS_POP(11, SIC_IAR8)
#endif
#ifdef SIC_IAR7
PM_SYS_POP(10, SIC_IAR7)
#endif
#ifdef SIC_IAR6
PM_SYS_POP(9, SIC_IAR6)
PM_SYS_POP(8, SIC_IAR5)
PM_SYS_POP(7, SIC_IAR4)
#endif
#ifdef SIC_IAR3
PM_SYS_POP(6, SIC_IAR3)
#endif
#ifdef SIC_IAR0
PM_SYS_POP(5, SIC_IAR2)
PM_SYS_POP(4, SIC_IAR1)
PM_SYS_POP(3, SIC_IAR0)
#endif
#ifdef SIC_IMASK0
# ifdef SIC_IMASK2
PM_SYS_POP(2, SIC_IMASK2)
# endif
PM_SYS_POP(1, SIC_IMASK1)
PM_SYS_POP(0, SIC_IMASK0)
#else
PM_SYS_POP(0, SIC_IMASK)
#endif
/* Restore Core Registers */
RETI = [sp++];
SEQSTAT = [sp++];
RETX = [sp++];
SYSCFG = [sp++];
CYCLES2 = [sp++];
CYCLES = [sp++];
ASTAT = [sp++];
RETS = [sp++];
LB1 = [sp++];
LB0 = [sp++];
LT1 = [sp++];
LT0 = [sp++];
LC1 = [sp++];
LC0 = [sp++];
a1.w = [sp++];
a1.x = [sp++];
a0.w = [sp++];
a0.x = [sp++];
b3 = [sp++];
b2 = [sp++];
b1 = [sp++];
b0 = [sp++];
l3 = [sp++];
l2 = [sp++];
l1 = [sp++];
l0 = [sp++];
m3 = [sp++];
m2 = [sp++];
m1 = [sp++];
m0 = [sp++];
i3 = [sp++];
i2 = [sp++];
i1 = [sp++];
i0 = [sp++];
usp = [sp++];
fp = [sp++];
(R7:0, P5:0) = [sp++];
[--sp] = RETI; /* Clear Global Interrupt Disable */
SP += 4;
RTS;
ENDPROC(_do_hibernate)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,231 @@
/*
* Common Blackfin startup code
*
* Copyright 2004-2008 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/blackfin.h>
#include <asm/thread_info.h>
#include <asm/trace.h>
#include <asm/asm-offsets.h>
__INIT
ENTRY(__init_clear_bss)
r2 = r2 - r1;
cc = r2 == 0;
if cc jump .L_bss_done;
r2 >>= 2;
p1 = r1;
p2 = r2;
lsetup (1f, 1f) lc0 = p2;
1: [p1++] = r0;
.L_bss_done:
rts;
ENDPROC(__init_clear_bss)
ENTRY(__start)
/* R0: argument of command line string, passed from uboot, save it */
R7 = R0;
/* Enable Cycle Counter and Nesting Of Interrupts */
#ifdef CONFIG_BFIN_SCRATCH_REG_CYCLES
R0 = SYSCFG_SNEN;
#else
R0 = SYSCFG_SNEN | SYSCFG_CCEN;
#endif
SYSCFG = R0;
/* Optimization register tricks: keep a base value in the
* reserved P registers so we use the load/store with an
* offset syntax. R0 = [P5 + <constant>];
* P5 - core MMR base
* R6 - 0
*/
r6 = 0;
p5.l = 0;
p5.h = hi(COREMMR_BASE);
/* Zero out registers required by Blackfin ABI */
/* Disable circular buffers */
L0 = r6;
L1 = r6;
L2 = r6;
L3 = r6;
/* Disable hardware loops in case we were started by 'go' */
LC0 = r6;
LC1 = r6;
/*
* Clear ITEST_COMMAND and DTEST_COMMAND registers,
* Leaving these as non-zero can confuse the emulator
*/
[p5 + (DTEST_COMMAND - COREMMR_BASE)] = r6;
[p5 + (ITEST_COMMAND - COREMMR_BASE)] = r6;
CSYNC;
trace_buffer_init(p0,r0);
/* Turn off the icache */
r1 = [p5 + (IMEM_CONTROL - COREMMR_BASE)];
BITCLR (r1, ENICPLB_P);
[p5 + (IMEM_CONTROL - COREMMR_BASE)] = r1;
SSYNC;
/* Turn off the dcache */
r1 = [p5 + (DMEM_CONTROL - COREMMR_BASE)];
BITCLR (r1, ENDCPLB_P);
[p5 + (DMEM_CONTROL - COREMMR_BASE)] = r1;
SSYNC;
/* in case of double faults, save a few things */
p1.l = _initial_pda;
p1.h = _initial_pda;
r4 = RETX;
#ifdef CONFIG_DEBUG_DOUBLEFAULT
/* Only save these if we are storing them,
* This happens here, since L1 gets clobbered
* below
*/
GET_PDA(p0, r0);
r0 = [p0 + PDA_DF_RETX];
r1 = [p0 + PDA_DF_DCPLB];
r2 = [p0 + PDA_DF_ICPLB];
r3 = [p0 + PDA_DF_SEQSTAT];
[p1 + PDA_INIT_DF_RETX] = r0;
[p1 + PDA_INIT_DF_DCPLB] = r1;
[p1 + PDA_INIT_DF_ICPLB] = r2;
[p1 + PDA_INIT_DF_SEQSTAT] = r3;
#endif
[p1 + PDA_INIT_RETX] = r4;
/* Initialize stack pointer */
sp.l = _init_thread_union + THREAD_SIZE;
sp.h = _init_thread_union + THREAD_SIZE;
fp = sp;
usp = sp;
#ifdef CONFIG_EARLY_PRINTK
call _init_early_exception_vectors;
r0 = (EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
sti r0;
#endif
r0 = r6;
/* Zero out all of the fun bss regions */
#if L1_DATA_A_LENGTH > 0
r1.l = __sbss_l1;
r1.h = __sbss_l1;
r2.l = __ebss_l1;
r2.h = __ebss_l1;
call __init_clear_bss
#endif
#if L1_DATA_B_LENGTH > 0
r1.l = __sbss_b_l1;
r1.h = __sbss_b_l1;
r2.l = __ebss_b_l1;
r2.h = __ebss_b_l1;
call __init_clear_bss
#endif
#if L2_LENGTH > 0
r1.l = __sbss_l2;
r1.h = __sbss_l2;
r2.l = __ebss_l2;
r2.h = __ebss_l2;
call __init_clear_bss
#endif
r1.l = ___bss_start;
r1.h = ___bss_start;
r2.l = ___bss_stop;
r2.h = ___bss_stop;
call __init_clear_bss
/* Put The Code for PLL Programming and SDRAM Programming in L1 ISRAM */
call _bfin_relocate_l1_mem;
#ifdef CONFIG_ROMKERNEL
call _bfin_relocate_xip_data;
#endif
#ifdef CONFIG_BFIN_KERNEL_CLOCK
/* Only use on-chip scratch space for stack when absolutely required
* to avoid Anomaly 05000227 ... we know the init_clocks() func only
* uses L1 text and stack space and no other memory region.
*/
# define KERNEL_CLOCK_STACK (L1_SCRATCH_START + L1_SCRATCH_LENGTH - 12)
sp.l = lo(KERNEL_CLOCK_STACK);
sp.h = hi(KERNEL_CLOCK_STACK);
call _init_clocks;
sp = usp; /* usp hasn't been touched, so restore from there */
#endif
/* This section keeps the processor in supervisor mode
* during kernel boot. Switches to user mode at end of boot.
* See page 3-9 of Hardware Reference manual for documentation.
*/
/* EVT15 = _real_start */
p1.l = _real_start;
p1.h = _real_start;
[p5 + (EVT15 - COREMMR_BASE)] = p1;
csync;
#ifdef CONFIG_EARLY_PRINTK
r0 = (EVT_IVG15 | EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU) (z);
#else
r0 = EVT_IVG15 (z);
#endif
sti r0;
raise 15;
#ifdef CONFIG_EARLY_PRINTK
p0.l = _early_trap;
p0.h = _early_trap;
#else
p0.l = .LWAIT_HERE;
p0.h = .LWAIT_HERE;
#endif
reti = p0;
#if ANOMALY_05000281
nop; nop; nop;
#endif
rti;
.LWAIT_HERE:
jump .LWAIT_HERE;
ENDPROC(__start)
/* A little BF561 glue ... */
#ifndef WDOG_CTL
# define WDOG_CTL WDOGA_CTL
#endif
ENTRY(_real_start)
/* Enable nested interrupts */
[--sp] = reti;
/* watchdog off for now */
p0.l = lo(WDOG_CTL);
p0.h = hi(WDOG_CTL);
r0 = 0xAD6(z);
w[p0] = r0;
ssync;
/* Pass the u-boot arguments to the global value command line */
R0 = R7;
call _cmdline_init;
sp += -12 + 4; /* +4 is for reti loading above */
call _init_pda
sp += 12;
jump.l _start_kernel;
ENDPROC(_real_start)
__FINIT

View File

@@ -0,0 +1,326 @@
/*
* Interrupt Entries
*
* Copyright 2005-2009 Analog Devices Inc.
* D. Jeff Dionne <jeff@ryeham.ee.ryerson.ca>
* Kenneth Albanowski <kjahds@kjahds.com>
*
* Licensed under the GPL-2 or later.
*/
#include <asm/blackfin.h>
#include <mach/irq.h>
#include <linux/linkage.h>
#include <asm/entry.h>
#include <asm/asm-offsets.h>
#include <asm/trace.h>
#include <asm/traps.h>
#include <asm/thread_info.h>
#include <asm/context.S>
.extern _ret_from_exception
#ifdef CONFIG_I_ENTRY_L1
.section .l1.text
#else
.text
#endif
.align 4 /* just in case */
/* Common interrupt entry code. First we do CLI, then push
* RETI, to keep interrupts disabled, but to allow this state to be changed
* by local_bh_enable.
* R0 contains the interrupt number, while R1 may contain the value of IPEND,
* or garbage if IPEND won't be needed by the ISR. */
__common_int_entry:
[--sp] = fp;
[--sp] = usp;
[--sp] = i0;
[--sp] = i1;
[--sp] = i2;
[--sp] = i3;
[--sp] = m0;
[--sp] = m1;
[--sp] = m2;
[--sp] = m3;
[--sp] = l0;
[--sp] = l1;
[--sp] = l2;
[--sp] = l3;
[--sp] = b0;
[--sp] = b1;
[--sp] = b2;
[--sp] = b3;
[--sp] = a0.x;
[--sp] = a0.w;
[--sp] = a1.x;
[--sp] = a1.w;
[--sp] = LC0;
[--sp] = LC1;
[--sp] = LT0;
[--sp] = LT1;
[--sp] = LB0;
[--sp] = LB1;
[--sp] = ASTAT;
[--sp] = r0; /* Skip reserved */
[--sp] = RETS;
r2 = RETI;
[--sp] = r2;
[--sp] = RETX;
[--sp] = RETN;
[--sp] = RETE;
[--sp] = SEQSTAT;
[--sp] = r1; /* IPEND - R1 may or may not be set up before jumping here. */
/* Switch to other method of keeping interrupts disabled. */
#ifdef CONFIG_DEBUG_HWERR
r1 = 0x3f;
sti r1;
#else
cli r1;
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
[--sp] = r0;
sp += -12;
call _trace_hardirqs_off;
sp += 12;
r0 = [sp++];
#endif
[--sp] = RETI; /* orig_pc */
/* Clear all L registers. */
r1 = 0 (x);
l0 = r1;
l1 = r1;
l2 = r1;
l3 = r1;
#ifdef CONFIG_FRAME_POINTER
fp = 0;
#endif
ANOMALY_283_315_WORKAROUND(p5, r7)
r1 = sp;
SP += -12;
#ifdef CONFIG_IPIPE
call ___ipipe_grab_irq
SP += 12;
cc = r0 == 0;
if cc jump .Lcommon_restore_context;
#else /* CONFIG_IPIPE */
#ifdef CONFIG_PREEMPT
r7 = sp;
r4.l = lo(ALIGN_PAGE_MASK);
r4.h = hi(ALIGN_PAGE_MASK);
r7 = r7 & r4;
p5 = r7;
r7 = [p5 + TI_PREEMPT]; /* get preempt count */
r7 += 1; /* increment it */
[p5 + TI_PREEMPT] = r7;
#endif
pseudo_long_call _do_irq, p2;
#ifdef CONFIG_PREEMPT
r7 += -1;
[p5 + TI_PREEMPT] = r7; /* restore preempt count */
#endif
SP += 12;
#endif /* CONFIG_IPIPE */
pseudo_long_call _return_from_int, p2;
.Lcommon_restore_context:
RESTORE_CONTEXT
rti;
/* interrupt routine for ivhw - 5 */
ENTRY(_evt_ivhw)
/* In case a single action kicks off multiple memory transactions, (like
* a cache line fetch, - this can cause multiple hardware errors, let's
* catch them all. First - make sure all the actions are complete, and
* the core sees the hardware errors.
*/
SSYNC;
SSYNC;
SAVE_ALL_SYS
#ifdef CONFIG_FRAME_POINTER
fp = 0;
#endif
ANOMALY_283_315_WORKAROUND(p5, r7)
/* Handle all stacked hardware errors
* To make sure we don't hang forever, only do it 10 times
*/
R0 = 0;
R2 = 10;
1:
P0.L = LO(ILAT);
P0.H = HI(ILAT);
R1 = [P0];
CC = BITTST(R1, EVT_IVHW_P);
IF ! CC JUMP 2f;
/* OK a hardware error is pending - clear it */
R1 = EVT_IVHW_P;
[P0] = R1;
R0 += 1;
CC = R1 == R2;
if CC JUMP 2f;
JUMP 1b;
2:
# We are going to dump something out, so make sure we print IPEND properly
p2.l = lo(IPEND);
p2.h = hi(IPEND);
r0 = [p2];
[sp + PT_IPEND] = r0;
/* set the EXCAUSE to HWERR for trap_c */
r0 = [sp + PT_SEQSTAT];
R1.L = LO(VEC_HWERR);
R1.H = HI(VEC_HWERR);
R0 = R0 | R1;
[sp + PT_SEQSTAT] = R0;
r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */
SP += -12;
pseudo_long_call _trap_c, p5;
SP += 12;
#ifdef EBIU_ERRMST
/* make sure EBIU_ERRMST is clear */
p0.l = LO(EBIU_ERRMST);
p0.h = HI(EBIU_ERRMST);
r0.l = (CORE_ERROR | CORE_MERROR);
w[p0] = r0.l;
#endif
pseudo_long_call _ret_from_exception, p2;
.Lcommon_restore_all_sys:
RESTORE_ALL_SYS
rti;
ENDPROC(_evt_ivhw)
/* Interrupt routine for evt2 (NMI).
* For inner circle type details, please see:
* http://docs.blackfin.uclinux.org/doku.php?id=linux-kernel:nmi
*/
ENTRY(_evt_nmi)
#ifndef CONFIG_NMI_WATCHDOG
.weak _evt_nmi
#else
/* Not take account of CPLBs, this handler will not return */
SAVE_ALL_SYS
r0 = sp;
r1 = retn;
[sp + PT_PC] = r1;
trace_buffer_save(p4,r5);
ANOMALY_283_315_WORKAROUND(p4, r5)
SP += -12;
call _do_nmi;
SP += 12;
1:
jump 1b;
#endif
rtn;
ENDPROC(_evt_nmi)
/* interrupt routine for core timer - 6 */
ENTRY(_evt_timer)
TIMER_INTERRUPT_ENTRY(EVT_IVTMR_P)
/* interrupt routine for evt7 - 7 */
ENTRY(_evt_evt7)
INTERRUPT_ENTRY(EVT_IVG7_P)
ENTRY(_evt_evt8)
INTERRUPT_ENTRY(EVT_IVG8_P)
ENTRY(_evt_evt9)
INTERRUPT_ENTRY(EVT_IVG9_P)
ENTRY(_evt_evt10)
INTERRUPT_ENTRY(EVT_IVG10_P)
ENTRY(_evt_evt11)
INTERRUPT_ENTRY(EVT_IVG11_P)
ENTRY(_evt_evt12)
INTERRUPT_ENTRY(EVT_IVG12_P)
ENTRY(_evt_evt13)
INTERRUPT_ENTRY(EVT_IVG13_P)
/* interrupt routine for system_call - 15 */
ENTRY(_evt_system_call)
SAVE_CONTEXT_SYSCALL
#ifdef CONFIG_FRAME_POINTER
fp = 0;
#endif
pseudo_long_call _system_call, p2;
jump .Lcommon_restore_context;
ENDPROC(_evt_system_call)
#ifdef CONFIG_IPIPE
/*
* __ipipe_call_irqtail: lowers the current priority level to EVT15
* before running a user-defined routine, then raises the priority
* level to EVT14 to prepare the caller for a normal interrupt
* return through RTI.
*
* We currently use this feature in two occasions:
*
* - before branching to __ipipe_irq_tail_hook as requested by a high
* priority domain after the pipeline delivered an interrupt,
* e.g. such as Xenomai, in order to start its rescheduling
* procedure, since we may not switch tasks when IRQ levels are
* nested on the Blackfin, so we have to fake an interrupt return
* so that we may reschedule immediately.
*
* - before branching to __ipipe_sync_root(), in order to play any interrupt
* pending for the root domain (i.e. the Linux kernel). This lowers
* the core priority level enough so that Linux IRQ handlers may
* never delay interrupts handled by high priority domains; we defer
* those handlers until this point instead. This is a substitute
* to using a threaded interrupt model for the Linux kernel.
*
* r0: address of user-defined routine
* context: caller must have preempted EVT15, hw interrupts must be off.
*/
ENTRY(___ipipe_call_irqtail)
p0 = r0;
r0.l = 1f;
r0.h = 1f;
reti = r0;
rti;
1:
[--sp] = rets;
[--sp] = ( r7:4, p5:3 );
sp += -12;
call (p0);
sp += 12;
( r7:4, p5:3 ) = [sp++];
rets = [sp++];
#ifdef CONFIG_DEBUG_HWERR
/* enable irq14 & hwerr interrupt, until we transition to _evt_evt14 */
r0 = (EVT_IVG14 | EVT_IVHW | \
EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
#else
/* Only enable irq14 interrupt, until we transition to _evt_evt14 */
r0 = (EVT_IVG14 | \
EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
#endif
sti r0;
raise 14; /* Branches to _evt_evt14 */
2:
jump 2b; /* Likely paranoid. */
ENDPROC(___ipipe_call_irqtail)
#endif /* CONFIG_IPIPE */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,237 @@
/*
* Blackfin power management
*
* Copyright 2006-2009 Analog Devices Inc.
*
* Licensed under the GPL-2
* based on arm/mach-omap/pm.c
* Copyright 2001, Cliff Brake <cbrake@accelent.com> and others
*/
#include <linux/suspend.h>
#include <linux/sched.h>
#include <linux/proc_fs.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <asm/cplb.h>
#include <asm/gpio.h>
#include <asm/dma.h>
#include <asm/dpmc.h>
void bfin_pm_suspend_standby_enter(void)
{
bfin_pm_standby_setup();
#ifdef CONFIG_PM_BFIN_SLEEP_DEEPER
sleep_deeper(bfin_sic_iwr[0], bfin_sic_iwr[1], bfin_sic_iwr[2]);
#else
sleep_mode(bfin_sic_iwr[0], bfin_sic_iwr[1], bfin_sic_iwr[2]);
#endif
bfin_pm_standby_restore();
#ifdef SIC_IWR0
bfin_write_SIC_IWR0(IWR_DISABLE_ALL);
# ifdef SIC_IWR1
/* BF52x system reset does not properly reset SIC_IWR1 which
* will screw up the bootrom as it relies on MDMA0/1 waking it
* up from IDLE instructions. See this report for more info:
* http://blackfin.uclinux.org/gf/tracker/4323
*/
if (ANOMALY_05000435)
bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11));
else
bfin_write_SIC_IWR1(IWR_DISABLE_ALL);
# endif
# ifdef SIC_IWR2
bfin_write_SIC_IWR2(IWR_DISABLE_ALL);
# endif
#else
bfin_write_SIC_IWR(IWR_DISABLE_ALL);
#endif
}
int bf53x_suspend_l1_mem(unsigned char *memptr)
{
dma_memcpy_nocache(memptr, (const void *) L1_CODE_START,
L1_CODE_LENGTH);
dma_memcpy_nocache(memptr + L1_CODE_LENGTH,
(const void *) L1_DATA_A_START, L1_DATA_A_LENGTH);
dma_memcpy_nocache(memptr + L1_CODE_LENGTH + L1_DATA_A_LENGTH,
(const void *) L1_DATA_B_START, L1_DATA_B_LENGTH);
memcpy(memptr + L1_CODE_LENGTH + L1_DATA_A_LENGTH +
L1_DATA_B_LENGTH, (const void *) L1_SCRATCH_START,
L1_SCRATCH_LENGTH);
return 0;
}
int bf53x_resume_l1_mem(unsigned char *memptr)
{
dma_memcpy_nocache((void *) L1_CODE_START, memptr, L1_CODE_LENGTH);
dma_memcpy_nocache((void *) L1_DATA_A_START, memptr + L1_CODE_LENGTH,
L1_DATA_A_LENGTH);
dma_memcpy_nocache((void *) L1_DATA_B_START, memptr + L1_CODE_LENGTH +
L1_DATA_A_LENGTH, L1_DATA_B_LENGTH);
memcpy((void *) L1_SCRATCH_START, memptr + L1_CODE_LENGTH +
L1_DATA_A_LENGTH + L1_DATA_B_LENGTH, L1_SCRATCH_LENGTH);
return 0;
}
#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK)
static void flushinv_all_dcache(void)
{
u32 way, bank, subbank, set;
u32 status, addr;
u32 dmem_ctl = bfin_read_DMEM_CONTROL();
for (bank = 0; bank < 2; ++bank) {
if (!(dmem_ctl & (1 << (DMC1_P - bank))))
continue;
for (way = 0; way < 2; ++way)
for (subbank = 0; subbank < 4; ++subbank)
for (set = 0; set < 64; ++set) {
bfin_write_DTEST_COMMAND(
way << 26 |
bank << 23 |
subbank << 16 |
set << 5
);
CSYNC();
status = bfin_read_DTEST_DATA0();
/* only worry about valid/dirty entries */
if ((status & 0x3) != 0x3)
continue;
/* construct the address using the tag */
addr = (status & 0xFFFFC800) | (subbank << 12) | (set << 5);
/* flush it */
__asm__ __volatile__("FLUSHINV[%0];" : : "a"(addr));
}
}
}
#endif
int bfin_pm_suspend_mem_enter(void)
{
int wakeup, ret;
unsigned char *memptr = kmalloc(L1_CODE_LENGTH + L1_DATA_A_LENGTH
+ L1_DATA_B_LENGTH + L1_SCRATCH_LENGTH,
GFP_KERNEL);
if (memptr == NULL) {
panic("bf53x_suspend_l1_mem malloc failed");
return -ENOMEM;
}
wakeup = bfin_read_VR_CTL() & ~FREQ;
wakeup |= SCKELOW;
#ifdef CONFIG_PM_BFIN_WAKE_PH6
wakeup |= PHYWE;
#endif
#ifdef CONFIG_PM_BFIN_WAKE_GP
wakeup |= GPWE;
#endif
ret = blackfin_dma_suspend();
if (ret) {
kfree(memptr);
return ret;
}
bfin_gpio_pm_hibernate_suspend();
#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK)
flushinv_all_dcache();
#endif
_disable_dcplb();
_disable_icplb();
bf53x_suspend_l1_mem(memptr);
do_hibernate(wakeup | vr_wakeup); /* See you later! */
bf53x_resume_l1_mem(memptr);
_enable_icplb();
_enable_dcplb();
bfin_gpio_pm_hibernate_restore();
blackfin_dma_resume();
kfree(memptr);
return 0;
}
/*
* bfin_pm_valid - Tell the PM core that we only support the standby sleep
* state
* @state: suspend state we're checking.
*
*/
static int bfin_pm_valid(suspend_state_t state)
{
return (state == PM_SUSPEND_STANDBY
#if !(defined(BF533_FAMILY) || defined(CONFIG_BF561))
/*
* On BF533/2/1:
* If we enter Hibernate the SCKE Pin is driven Low,
* so that the SDRAM enters Self Refresh Mode.
* However when the reset sequence that follows hibernate
* state is executed, SCKE is driven High, taking the
* SDRAM out of Self Refresh.
*
* If you reconfigure and access the SDRAM "very quickly",
* you are likely to avoid errors, otherwise the SDRAM
* start losing its contents.
* An external HW workaround is possible using logic gates.
*/
|| state == PM_SUSPEND_MEM
#endif
);
}
/*
* bfin_pm_enter - Actually enter a sleep state.
* @state: State we're entering.
*
*/
static int bfin_pm_enter(suspend_state_t state)
{
switch (state) {
case PM_SUSPEND_STANDBY:
bfin_pm_suspend_standby_enter();
break;
case PM_SUSPEND_MEM:
bfin_pm_suspend_mem_enter();
break;
default:
return -EINVAL;
}
return 0;
}
static const struct platform_suspend_ops bfin_pm_ops = {
.enter = bfin_pm_enter,
.valid = bfin_pm_valid,
};
static int __init bfin_pm_init(void)
{
suspend_set_ops(&bfin_pm_ops);
return 0;
}
__initcall(bfin_pm_init);

View File

@@ -0,0 +1,543 @@
/*
* IPI management based on arch/arm/kernel/smp.c (Copyright 2002 ARM Limited)
*
* Copyright 2007-2009 Analog Devices Inc.
* Philippe Gerum <rpm@xenomai.org>
*
* Licensed under the GPL-2.
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/cache.h>
#include <linux/clockchips.h>
#include <linux/profile.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/cpumask.h>
#include <linux/seq_file.h>
#include <linux/irq.h>
#include <linux/slab.h>
#include <linux/atomic.h>
#include <asm/cacheflush.h>
#include <asm/irq_handler.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/cpu.h>
#include <asm/time.h>
#include <linux/err.h>
/*
* Anomaly notes:
* 05000120 - we always define corelock as 32-bit integer in L2
*/
struct corelock_slot corelock __attribute__ ((__section__(".l2.bss")));
#ifdef CONFIG_ICACHE_FLUSH_L1
unsigned long blackfin_iflush_l1_entry[NR_CPUS];
#endif
struct blackfin_initial_pda __cpuinitdata initial_pda_coreb;
#define BFIN_IPI_TIMER 0
#define BFIN_IPI_RESCHEDULE 1
#define BFIN_IPI_CALL_FUNC 2
#define BFIN_IPI_CPU_STOP 3
struct blackfin_flush_data {
unsigned long start;
unsigned long end;
};
void *secondary_stack;
struct smp_call_struct {
void (*func)(void *info);
void *info;
int wait;
cpumask_t *waitmask;
};
static struct blackfin_flush_data smp_flush_data;
static DEFINE_SPINLOCK(stop_lock);
struct ipi_message {
unsigned long type;
struct smp_call_struct call_struct;
};
/* A magic number - stress test shows this is safe for common cases */
#define BFIN_IPI_MSGQ_LEN 5
/* Simple FIFO buffer, overflow leads to panic */
struct ipi_message_queue {
spinlock_t lock;
unsigned long count;
unsigned long head; /* head of the queue */
struct ipi_message ipi_message[BFIN_IPI_MSGQ_LEN];
};
static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue);
static void ipi_cpu_stop(unsigned int cpu)
{
spin_lock(&stop_lock);
printk(KERN_CRIT "CPU%u: stopping\n", cpu);
dump_stack();
spin_unlock(&stop_lock);
set_cpu_online(cpu, false);
local_irq_disable();
while (1)
SSYNC();
}
static void ipi_flush_icache(void *info)
{
struct blackfin_flush_data *fdata = info;
/* Invalidate the memory holding the bounds of the flushed region. */
blackfin_dcache_invalidate_range((unsigned long)fdata,
(unsigned long)fdata + sizeof(*fdata));
/* Make sure all write buffers in the data side of the core
* are flushed before trying to invalidate the icache. This
* needs to be after the data flush and before the icache
* flush so that the SSYNC does the right thing in preventing
* the instruction prefetcher from hitting things in cached
* memory at the wrong time -- it runs much further ahead than
* the pipeline.
*/
SSYNC();
/* ipi_flaush_icache is invoked by generic flush_icache_range,
* so call blackfin arch icache flush directly here.
*/
blackfin_icache_flush_range(fdata->start, fdata->end);
}
static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
{
int wait;
void (*func)(void *info);
void *info;
func = msg->call_struct.func;
info = msg->call_struct.info;
wait = msg->call_struct.wait;
func(info);
if (wait) {
#ifdef __ARCH_SYNC_CORE_DCACHE
/*
* 'wait' usually means synchronization between CPUs.
* Invalidate D cache in case shared data was changed
* by func() to ensure cache coherence.
*/
resync_core_dcache();
#endif
cpumask_clear_cpu(cpu, msg->call_struct.waitmask);
}
}
/* Use IRQ_SUPPLE_0 to request reschedule.
* When returning from interrupt to user space,
* there is chance to reschedule */
static irqreturn_t ipi_handler_int0(int irq, void *dev_instance)
{
unsigned int cpu = smp_processor_id();
platform_clear_ipi(cpu, IRQ_SUPPLE_0);
return IRQ_HANDLED;
}
DECLARE_PER_CPU(struct clock_event_device, coretmr_events);
void ipi_timer(void)
{
int cpu = smp_processor_id();
struct clock_event_device *evt = &per_cpu(coretmr_events, cpu);
evt->event_handler(evt);
}
static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
{
struct ipi_message *msg;
struct ipi_message_queue *msg_queue;
unsigned int cpu = smp_processor_id();
unsigned long flags;
platform_clear_ipi(cpu, IRQ_SUPPLE_1);
msg_queue = &__get_cpu_var(ipi_msg_queue);
spin_lock_irqsave(&msg_queue->lock, flags);
while (msg_queue->count) {
msg = &msg_queue->ipi_message[msg_queue->head];
switch (msg->type) {
case BFIN_IPI_TIMER:
ipi_timer();
break;
case BFIN_IPI_RESCHEDULE:
scheduler_ipi();
break;
case BFIN_IPI_CALL_FUNC:
ipi_call_function(cpu, msg);
break;
case BFIN_IPI_CPU_STOP:
ipi_cpu_stop(cpu);
break;
default:
printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%lx\n",
cpu, msg->type);
break;
}
msg_queue->head++;
msg_queue->head %= BFIN_IPI_MSGQ_LEN;
msg_queue->count--;
}
spin_unlock_irqrestore(&msg_queue->lock, flags);
return IRQ_HANDLED;
}
static void ipi_queue_init(void)
{
unsigned int cpu;
struct ipi_message_queue *msg_queue;
for_each_possible_cpu(cpu) {
msg_queue = &per_cpu(ipi_msg_queue, cpu);
spin_lock_init(&msg_queue->lock);
msg_queue->count = 0;
msg_queue->head = 0;
}
}
static inline void smp_send_message(cpumask_t callmap, unsigned long type,
void (*func) (void *info), void *info, int wait)
{
unsigned int cpu;
struct ipi_message_queue *msg_queue;
struct ipi_message *msg;
unsigned long flags, next_msg;
cpumask_t waitmask; /* waitmask is shared by all cpus */
cpumask_copy(&waitmask, &callmap);
for_each_cpu(cpu, &callmap) {
msg_queue = &per_cpu(ipi_msg_queue, cpu);
spin_lock_irqsave(&msg_queue->lock, flags);
if (msg_queue->count < BFIN_IPI_MSGQ_LEN) {
next_msg = (msg_queue->head + msg_queue->count)
% BFIN_IPI_MSGQ_LEN;
msg = &msg_queue->ipi_message[next_msg];
msg->type = type;
if (type == BFIN_IPI_CALL_FUNC) {
msg->call_struct.func = func;
msg->call_struct.info = info;
msg->call_struct.wait = wait;
msg->call_struct.waitmask = &waitmask;
}
msg_queue->count++;
} else
panic("IPI message queue overflow\n");
spin_unlock_irqrestore(&msg_queue->lock, flags);
platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1);
}
if (wait) {
while (!cpumask_empty(&waitmask))
blackfin_dcache_invalidate_range(
(unsigned long)(&waitmask),
(unsigned long)(&waitmask));
#ifdef __ARCH_SYNC_CORE_DCACHE
/*
* Invalidate D cache in case shared data was changed by
* other processors to ensure cache coherence.
*/
resync_core_dcache();
#endif
}
}
int smp_call_function(void (*func)(void *info), void *info, int wait)
{
cpumask_t callmap;
preempt_disable();
cpumask_copy(&callmap, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &callmap);
if (!cpumask_empty(&callmap))
smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
preempt_enable();
return 0;
}
EXPORT_SYMBOL_GPL(smp_call_function);
int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
int wait)
{
unsigned int cpu = cpuid;
cpumask_t callmap;
if (cpu_is_offline(cpu))
return 0;
cpumask_clear(&callmap);
cpumask_set_cpu(cpu, &callmap);
smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
return 0;
}
EXPORT_SYMBOL_GPL(smp_call_function_single);
void smp_send_reschedule(int cpu)
{
cpumask_t callmap;
/* simply trigger an ipi */
cpumask_clear(&callmap);
cpumask_set_cpu(cpu, &callmap);
smp_send_message(callmap, BFIN_IPI_RESCHEDULE, NULL, NULL, 0);
return;
}
void smp_send_msg(const struct cpumask *mask, unsigned long type)
{
smp_send_message(*mask, type, NULL, NULL, 0);
}
void smp_timer_broadcast(const struct cpumask *mask)
{
smp_send_msg(mask, BFIN_IPI_TIMER);
}
void smp_send_stop(void)
{
cpumask_t callmap;
preempt_disable();
cpumask_copy(&callmap, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &callmap);
if (!cpumask_empty(&callmap))
smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0);
preempt_enable();
return;
}
int __cpuinit __cpu_up(unsigned int cpu)
{
int ret;
struct blackfin_cpudata *ci = &per_cpu(cpu_data, cpu);
struct task_struct *idle = ci->idle;
if (idle) {
free_task(idle);
idle = NULL;
}
if (!idle) {
idle = fork_idle(cpu);
if (IS_ERR(idle)) {
printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
return PTR_ERR(idle);
}
ci->idle = idle;
} else {
init_idle(idle, cpu);
}
secondary_stack = task_stack_page(idle) + THREAD_SIZE;
ret = platform_boot_secondary(cpu, idle);
secondary_stack = NULL;
return ret;
}
static void __cpuinit setup_secondary(unsigned int cpu)
{
unsigned long ilat;
bfin_write_IMASK(0);
CSYNC();
ilat = bfin_read_ILAT();
CSYNC();
bfin_write_ILAT(ilat);
CSYNC();
/* Enable interrupt levels IVG7-15. IARs have been already
* programmed by the boot CPU. */
bfin_irq_flags |= IMASK_IVG15 |
IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
}
void __cpuinit secondary_start_kernel(void)
{
unsigned int cpu = smp_processor_id();
struct mm_struct *mm = &init_mm;
if (_bfin_swrst & SWRST_DBL_FAULT_B) {
printk(KERN_EMERG "CoreB Recovering from DOUBLE FAULT event\n");
#ifdef CONFIG_DEBUG_DOUBLEFAULT
printk(KERN_EMERG " While handling exception (EXCAUSE = %#x) at %pF\n",
initial_pda_coreb.seqstat_doublefault & SEQSTAT_EXCAUSE,
initial_pda_coreb.retx_doublefault);
printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %pF\n",
initial_pda_coreb.dcplb_doublefault_addr);
printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %pF\n",
initial_pda_coreb.icplb_doublefault_addr);
#endif
printk(KERN_NOTICE " The instruction at %pF caused a double exception\n",
initial_pda_coreb.retx);
}
/*
* We want the D-cache to be enabled early, in case the atomic
* support code emulates cache coherence (see
* __ARCH_SYNC_CORE_DCACHE).
*/
init_exception_vectors();
local_irq_disable();
/* Attach the new idle task to the global mm. */
atomic_inc(&mm->mm_users);
atomic_inc(&mm->mm_count);
current->active_mm = mm;
preempt_disable();
setup_secondary(cpu);
platform_secondary_init(cpu);
/* setup local core timer */
bfin_local_timer_setup();
local_irq_enable();
bfin_setup_caches(cpu);
notify_cpu_starting(cpu);
/*
* Calibrate loops per jiffy value.
* IRQs need to be enabled here - D-cache can be invalidated
* in timer irq handler, so core B can read correct jiffies.
*/
calibrate_delay();
cpu_idle();
}
void __init smp_prepare_boot_cpu(void)
{
}
void __init smp_prepare_cpus(unsigned int max_cpus)
{
platform_prepare_cpus(max_cpus);
ipi_queue_init();
platform_request_ipi(IRQ_SUPPLE_0, ipi_handler_int0);
platform_request_ipi(IRQ_SUPPLE_1, ipi_handler_int1);
}
void __init smp_cpus_done(unsigned int max_cpus)
{
unsigned long bogosum = 0;
unsigned int cpu;
for_each_online_cpu(cpu)
bogosum += loops_per_jiffy;
printk(KERN_INFO "SMP: Total of %d processors activated "
"(%lu.%02lu BogoMIPS).\n",
num_online_cpus(),
bogosum / (500000/HZ),
(bogosum / (5000/HZ)) % 100);
}
void smp_icache_flush_range_others(unsigned long start, unsigned long end)
{
smp_flush_data.start = start;
smp_flush_data.end = end;
preempt_disable();
if (smp_call_function(&ipi_flush_icache, &smp_flush_data, 1))
printk(KERN_WARNING "SMP: failed to run I-cache flush request on other CPUs\n");
preempt_enable();
}
EXPORT_SYMBOL_GPL(smp_icache_flush_range_others);
#ifdef __ARCH_SYNC_CORE_ICACHE
unsigned long icache_invld_count[NR_CPUS];
void resync_core_icache(void)
{
unsigned int cpu = get_cpu();
blackfin_invalidate_entire_icache();
icache_invld_count[cpu]++;
put_cpu();
}
EXPORT_SYMBOL(resync_core_icache);
#endif
#ifdef __ARCH_SYNC_CORE_DCACHE
unsigned long dcache_invld_count[NR_CPUS];
unsigned long barrier_mask __attribute__ ((__section__(".l2.bss")));
void resync_core_dcache(void)
{
unsigned int cpu = get_cpu();
blackfin_invalidate_entire_dcache();
dcache_invld_count[cpu]++;
put_cpu();
}
EXPORT_SYMBOL(resync_core_dcache);
#endif
#ifdef CONFIG_HOTPLUG_CPU
int __cpuexit __cpu_disable(void)
{
unsigned int cpu = smp_processor_id();
if (cpu == 0)
return -EPERM;
set_cpu_online(cpu, false);
return 0;
}
static DECLARE_COMPLETION(cpu_killed);
int __cpuexit __cpu_die(unsigned int cpu)
{
return wait_for_completion_timeout(&cpu_killed, 5000);
}
void cpu_die(void)
{
complete(&cpu_killed);
atomic_dec(&init_mm.mm_users);
atomic_dec(&init_mm.mm_count);
local_irq_disable();
platform_cpu_die();
}
#endif