M7350v1_en_gpl

This commit is contained in:
T
2024-09-09 08:52:07 +00:00
commit f9cc65cfda
65988 changed files with 26357421 additions and 0 deletions

View File

@ -0,0 +1,31 @@
config CLKSRC_I8253
bool
config CLKEVT_I8253
bool
config I8253_LOCK
bool
config CLKBLD_I8253
def_bool y if CLKSRC_I8253 || CLKEVT_I8253 || I8253_LOCK
config CLKSRC_MMIO
bool
config DW_APB_TIMER
bool
config CLKSRC_DBX500_PRCMU
bool "Clocksource PRCMU Timer"
depends on UX500_SOC_DB5500 || UX500_SOC_DB8500
default y
help
Use the always on PRCMU Timer as clocksource
config CLKSRC_DBX500_PRCMU_SCHED_CLOCK
bool "Clocksource PRCMU Timer sched_clock"
depends on (CLKSRC_DBX500_PRCMU && !NOMADIK_MTU_SCHED_CLOCK)
default y
help
Use the always on PRCMU Timer as sched_clock

View File

@ -0,0 +1,12 @@
obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o
obj-$(CONFIG_X86_CYCLONE_TIMER) += cyclone.o
obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o
obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o
obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o
obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o
obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o
obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o
obj-$(CONFIG_CLKBLD_I8253) += i8253.o
obj-$(CONFIG_CLKSRC_MMIO) += mmio.o
obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o
obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o

View File

@ -0,0 +1,250 @@
/*
* linux/drivers/clocksource/acpi_pm.c
*
* This file contains the ACPI PM based clocksource.
*
* This code was largely moved from the i386 timer_pm.c file
* which was (C) Dominik Brodowski <linux@brodo.de> 2003
* and contained the following comments:
*
* Driver to use the Power Management Timer (PMTMR) available in some
* southbridges as primary timing source for the Linux kernel.
*
* Based on parts of linux/drivers/acpi/hardware/hwtimer.c, timer_pit.c,
* timer_hpet.c, and on Arjan van de Ven's implementation for 2.4.
*
* This file is licensed under the GPL v2.
*/
#include <linux/acpi_pmtmr.h>
#include <linux/clocksource.h>
#include <linux/timex.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <asm/io.h>
/*
* The I/O port the PMTMR resides at.
* The location is detected during setup_arch(),
* in arch/i386/kernel/acpi/boot.c
*/
u32 pmtmr_ioport __read_mostly;
static inline u32 read_pmtmr(void)
{
/* mask the output to 24 bits */
return inl(pmtmr_ioport) & ACPI_PM_MASK;
}
u32 acpi_pm_read_verified(void)
{
u32 v1 = 0, v2 = 0, v3 = 0;
/*
* It has been reported that because of various broken
* chipsets (ICH4, PIIX4 and PIIX4E) where the ACPI PM clock
* source is not latched, you must read it multiple
* times to ensure a safe value is read:
*/
do {
v1 = read_pmtmr();
v2 = read_pmtmr();
v3 = read_pmtmr();
} while (unlikely((v1 > v2 && v1 < v3) || (v2 > v3 && v2 < v1)
|| (v3 > v1 && v3 < v2)));
return v2;
}
static cycle_t acpi_pm_read(struct clocksource *cs)
{
return (cycle_t)read_pmtmr();
}
static struct clocksource clocksource_acpi_pm = {
.name = "acpi_pm",
.rating = 200,
.read = acpi_pm_read,
.mask = (cycle_t)ACPI_PM_MASK,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
#ifdef CONFIG_PCI
static int __devinitdata acpi_pm_good;
static int __init acpi_pm_good_setup(char *__str)
{
acpi_pm_good = 1;
return 1;
}
__setup("acpi_pm_good", acpi_pm_good_setup);
static cycle_t acpi_pm_read_slow(struct clocksource *cs)
{
return (cycle_t)acpi_pm_read_verified();
}
static inline void acpi_pm_need_workaround(void)
{
clocksource_acpi_pm.read = acpi_pm_read_slow;
clocksource_acpi_pm.rating = 120;
}
/*
* PIIX4 Errata:
*
* The power management timer may return improper results when read.
* Although the timer value settles properly after incrementing,
* while incrementing there is a 3 ns window every 69.8 ns where the
* timer value is indeterminate (a 4.2% chance that the data will be
* incorrect when read). As a result, the ACPI free running count up
* timer specification is violated due to erroneous reads.
*/
static void __devinit acpi_pm_check_blacklist(struct pci_dev *dev)
{
if (acpi_pm_good)
return;
/* the bug has been fixed in PIIX4M */
if (dev->revision < 3) {
printk(KERN_WARNING "* Found PM-Timer Bug on the chipset."
" Due to workarounds for a bug,\n"
"* this clock source is slow. Consider trying"
" other clock sources\n");
acpi_pm_need_workaround();
}
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3,
acpi_pm_check_blacklist);
static void __devinit acpi_pm_check_graylist(struct pci_dev *dev)
{
if (acpi_pm_good)
return;
printk(KERN_WARNING "* The chipset may have PM-Timer Bug. Due to"
" workarounds for a bug,\n"
"* this clock source is slow. If you are sure your timer"
" does not have\n"
"* this bug, please use \"acpi_pm_good\" to disable the"
" workaround\n");
acpi_pm_need_workaround();
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0,
acpi_pm_check_graylist);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_LE,
acpi_pm_check_graylist);
#endif
#ifndef CONFIG_X86_64
#include <asm/mach_timer.h>
#define PMTMR_EXPECTED_RATE \
((CALIBRATE_LATCH * (PMTMR_TICKS_PER_SEC >> 10)) / (PIT_TICK_RATE>>10))
/*
* Some boards have the PMTMR running way too fast. We check
* the PMTMR rate against PIT channel 2 to catch these cases.
*/
static int verify_pmtmr_rate(void)
{
cycle_t value1, value2;
unsigned long count, delta;
mach_prepare_counter();
value1 = clocksource_acpi_pm.read(&clocksource_acpi_pm);
mach_countup(&count);
value2 = clocksource_acpi_pm.read(&clocksource_acpi_pm);
delta = (value2 - value1) & ACPI_PM_MASK;
/* Check that the PMTMR delta is within 5% of what we expect */
if (delta < (PMTMR_EXPECTED_RATE * 19) / 20 ||
delta > (PMTMR_EXPECTED_RATE * 21) / 20) {
printk(KERN_INFO "PM-Timer running at invalid rate: %lu%% "
"of normal - aborting.\n",
100UL * delta / PMTMR_EXPECTED_RATE);
return -1;
}
return 0;
}
#else
#define verify_pmtmr_rate() (0)
#endif
/* Number of monotonicity checks to perform during initialization */
#define ACPI_PM_MONOTONICITY_CHECKS 10
/* Number of reads we try to get two different values */
#define ACPI_PM_READ_CHECKS 10000
static int __init init_acpi_pm_clocksource(void)
{
cycle_t value1, value2;
unsigned int i, j = 0;
if (!pmtmr_ioport)
return -ENODEV;
/* "verify" this timing source: */
for (j = 0; j < ACPI_PM_MONOTONICITY_CHECKS; j++) {
udelay(100 * j);
value1 = clocksource_acpi_pm.read(&clocksource_acpi_pm);
for (i = 0; i < ACPI_PM_READ_CHECKS; i++) {
value2 = clocksource_acpi_pm.read(&clocksource_acpi_pm);
if (value2 == value1)
continue;
if (value2 > value1)
break;
if ((value2 < value1) && ((value2) < 0xFFF))
break;
printk(KERN_INFO "PM-Timer had inconsistent results:"
" 0x%#llx, 0x%#llx - aborting.\n",
value1, value2);
pmtmr_ioport = 0;
return -EINVAL;
}
if (i == ACPI_PM_READ_CHECKS) {
printk(KERN_INFO "PM-Timer failed consistency check "
" (0x%#llx) - aborting.\n", value1);
pmtmr_ioport = 0;
return -ENODEV;
}
}
if (verify_pmtmr_rate() != 0){
pmtmr_ioport = 0;
return -ENODEV;
}
return clocksource_register_hz(&clocksource_acpi_pm,
PMTMR_TICKS_PER_SEC);
}
/* We use fs_initcall because we want the PCI fixups to have run
* but we still need to load before device_initcall
*/
fs_initcall(init_acpi_pm_clocksource);
/*
* Allow an override of the IOPort. Stupid BIOSes do not tell us about
* the PMTimer, but we might know where it is.
*/
static int __init parse_pmtmr(char *arg)
{
unsigned long base;
if (strict_strtoul(arg, 16, &base))
return -EINVAL;
#ifdef CONFIG_X86_64
if (base > UINT_MAX)
return -ERANGE;
#endif
printk(KERN_INFO "PMTMR IOPort override: 0x%04x -> 0x%04lx\n",
pmtmr_ioport, base);
pmtmr_ioport = base;
return 1;
}
__setup("pmtmr=", parse_pmtmr);

View File

@ -0,0 +1,93 @@
/*
* Copyright (C) ST-Ericsson SA 2011
*
* License Terms: GNU General Public License v2
* Author: Mattias Wallin <mattias.wallin@stericsson.com> for ST-Ericsson
* Author: Sundar Iyer for ST-Ericsson
* sched_clock implementation is based on:
* plat-nomadik/timer.c Linus Walleij <linus.walleij@stericsson.com>
*
* DBx500-PRCMU Timer
* The PRCMU has 5 timers which are available in a always-on
* power domain. We use the Timer 4 for our always-on clock
* source on DB8500 and Timer 3 on DB5500.
*/
#include <linux/clockchips.h>
#include <linux/clksrc-dbx500-prcmu.h>
#include <asm/sched_clock.h>
#include <mach/setup.h>
#include <mach/hardware.h>
#define RATE_32K 32768
#define TIMER_MODE_CONTINOUS 0x1
#define TIMER_DOWNCOUNT_VAL 0xffffffff
#define PRCMU_TIMER_REF 0
#define PRCMU_TIMER_DOWNCOUNT 0x4
#define PRCMU_TIMER_MODE 0x8
#define SCHED_CLOCK_MIN_WRAP 131072 /* 2^32 / 32768 */
static void __iomem *clksrc_dbx500_timer_base;
static cycle_t clksrc_dbx500_prcmu_read(struct clocksource *cs)
{
u32 count, count2;
do {
count = readl(clksrc_dbx500_timer_base +
PRCMU_TIMER_DOWNCOUNT);
count2 = readl(clksrc_dbx500_timer_base +
PRCMU_TIMER_DOWNCOUNT);
} while (count2 != count);
/* Negate because the timer is a decrementing counter */
return ~count;
}
static struct clocksource clocksource_dbx500_prcmu = {
.name = "dbx500-prcmu-timer",
.rating = 300,
.read = clksrc_dbx500_prcmu_read,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
#ifdef CONFIG_CLKSRC_DBX500_PRCMU_SCHED_CLOCK
static u32 notrace dbx500_prcmu_sched_clock_read(void)
{
if (unlikely(!clksrc_dbx500_timer_base))
return 0;
return clksrc_dbx500_prcmu_read(&clocksource_dbx500_prcmu);
}
#endif
void __init clksrc_dbx500_prcmu_init(void __iomem *base)
{
clksrc_dbx500_timer_base = base;
/*
* The A9 sub system expects the timer to be configured as
* a continous looping timer.
* The PRCMU should configure it but if it for some reason
* don't we do it here.
*/
if (readl(clksrc_dbx500_timer_base + PRCMU_TIMER_MODE) !=
TIMER_MODE_CONTINOUS) {
writel(TIMER_MODE_CONTINOUS,
clksrc_dbx500_timer_base + PRCMU_TIMER_MODE);
writel(TIMER_DOWNCOUNT_VAL,
clksrc_dbx500_timer_base + PRCMU_TIMER_REF);
}
#ifdef CONFIG_CLKSRC_DBX500_PRCMU_SCHED_CLOCK
setup_sched_clock(dbx500_prcmu_sched_clock_read,
32, RATE_32K);
#endif
clocksource_register_hz(&clocksource_dbx500_prcmu, RATE_32K);
}

View File

@ -0,0 +1,198 @@
/*
* Clock event driver for the CS5535/CS5536
*
* Copyright (C) 2006, Advanced Micro Devices, Inc.
* Copyright (C) 2007 Andres Salomon <dilinger@debian.org>
* Copyright (C) 2009 Andres Salomon <dilinger@collabora.co.uk>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*
* The MFGPTs are documented in AMD Geode CS5536 Companion Device Data Book.
*/
#include <linux/kernel.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/cs5535.h>
#include <linux/clockchips.h>
#define DRV_NAME "cs5535-clockevt"
static int timer_irq;
module_param_named(irq, timer_irq, int, 0644);
MODULE_PARM_DESC(irq, "Which IRQ to use for the clock source MFGPT ticks.");
/*
* We are using the 32.768kHz input clock - it's the only one that has the
* ranges we find desirable. The following table lists the suitable
* divisors and the associated Hz, minimum interval and the maximum interval:
*
* Divisor Hz Min Delta (s) Max Delta (s)
* 1 32768 .00048828125 2.000
* 2 16384 .0009765625 4.000
* 4 8192 .001953125 8.000
* 8 4096 .00390625 16.000
* 16 2048 .0078125 32.000
* 32 1024 .015625 64.000
* 64 512 .03125 128.000
* 128 256 .0625 256.000
* 256 128 .125 512.000
*/
static unsigned int cs5535_tick_mode = CLOCK_EVT_MODE_SHUTDOWN;
static struct cs5535_mfgpt_timer *cs5535_event_clock;
/* Selected from the table above */
#define MFGPT_DIVISOR 16
#define MFGPT_SCALE 4 /* divisor = 2^(scale) */
#define MFGPT_HZ (32768 / MFGPT_DIVISOR)
#define MFGPT_PERIODIC (MFGPT_HZ / HZ)
/*
* The MFPGT timers on the CS5536 provide us with suitable timers to use
* as clock event sources - not as good as a HPET or APIC, but certainly
* better than the PIT. This isn't a general purpose MFGPT driver, but
* a simplified one designed specifically to act as a clock event source.
* For full details about the MFGPT, please consult the CS5536 data sheet.
*/
static void disable_timer(struct cs5535_mfgpt_timer *timer)
{
/* avoid races by clearing CMP1 and CMP2 unconditionally */
cs5535_mfgpt_write(timer, MFGPT_REG_SETUP,
(uint16_t) ~MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP1 |
MFGPT_SETUP_CMP2);
}
static void start_timer(struct cs5535_mfgpt_timer *timer, uint16_t delta)
{
cs5535_mfgpt_write(timer, MFGPT_REG_CMP2, delta);
cs5535_mfgpt_write(timer, MFGPT_REG_COUNTER, 0);
cs5535_mfgpt_write(timer, MFGPT_REG_SETUP,
MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP2);
}
static void mfgpt_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
disable_timer(cs5535_event_clock);
if (mode == CLOCK_EVT_MODE_PERIODIC)
start_timer(cs5535_event_clock, MFGPT_PERIODIC);
cs5535_tick_mode = mode;
}
static int mfgpt_next_event(unsigned long delta, struct clock_event_device *evt)
{
start_timer(cs5535_event_clock, delta);
return 0;
}
static struct clock_event_device cs5535_clockevent = {
.name = DRV_NAME,
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
.set_mode = mfgpt_set_mode,
.set_next_event = mfgpt_next_event,
.rating = 250,
.shift = 32
};
static irqreturn_t mfgpt_tick(int irq, void *dev_id)
{
uint16_t val = cs5535_mfgpt_read(cs5535_event_clock, MFGPT_REG_SETUP);
/* See if the interrupt was for us */
if (!(val & (MFGPT_SETUP_SETUP | MFGPT_SETUP_CMP2 | MFGPT_SETUP_CMP1)))
return IRQ_NONE;
/* Turn off the clock (and clear the event) */
disable_timer(cs5535_event_clock);
if (cs5535_tick_mode == CLOCK_EVT_MODE_SHUTDOWN)
return IRQ_HANDLED;
/* Clear the counter */
cs5535_mfgpt_write(cs5535_event_clock, MFGPT_REG_COUNTER, 0);
/* Restart the clock in periodic mode */
if (cs5535_tick_mode == CLOCK_EVT_MODE_PERIODIC)
cs5535_mfgpt_write(cs5535_event_clock, MFGPT_REG_SETUP,
MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP2);
cs5535_clockevent.event_handler(&cs5535_clockevent);
return IRQ_HANDLED;
}
static struct irqaction mfgptirq = {
.handler = mfgpt_tick,
.flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER | IRQF_SHARED,
.name = DRV_NAME,
};
static int __init cs5535_mfgpt_init(void)
{
struct cs5535_mfgpt_timer *timer;
int ret;
uint16_t val;
timer = cs5535_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING);
if (!timer) {
printk(KERN_ERR DRV_NAME ": Could not allocate MFPGT timer\n");
return -ENODEV;
}
cs5535_event_clock = timer;
/* Set up the IRQ on the MFGPT side */
if (cs5535_mfgpt_setup_irq(timer, MFGPT_CMP2, &timer_irq)) {
printk(KERN_ERR DRV_NAME ": Could not set up IRQ %d\n",
timer_irq);
goto err_timer;
}
/* And register it with the kernel */
ret = setup_irq(timer_irq, &mfgptirq);
if (ret) {
printk(KERN_ERR DRV_NAME ": Unable to set up the interrupt.\n");
goto err_irq;
}
/* Set the clock scale and enable the event mode for CMP2 */
val = MFGPT_SCALE | (3 << 8);
cs5535_mfgpt_write(cs5535_event_clock, MFGPT_REG_SETUP, val);
/* Set up the clock event */
cs5535_clockevent.mult = div_sc(MFGPT_HZ, NSEC_PER_SEC,
cs5535_clockevent.shift);
cs5535_clockevent.min_delta_ns = clockevent_delta2ns(0xF,
&cs5535_clockevent);
cs5535_clockevent.max_delta_ns = clockevent_delta2ns(0xFFFE,
&cs5535_clockevent);
printk(KERN_INFO DRV_NAME
": Registering MFGPT timer as a clock event, using IRQ %d\n",
timer_irq);
clockevents_register_device(&cs5535_clockevent);
return 0;
err_irq:
cs5535_mfgpt_release_irq(cs5535_event_clock, MFGPT_CMP2, &timer_irq);
err_timer:
cs5535_mfgpt_free_timer(cs5535_event_clock);
printk(KERN_ERR DRV_NAME ": Unable to set up the MFGPT clock source\n");
return -EIO;
}
module_init(cs5535_mfgpt_init);
MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
MODULE_DESCRIPTION("CS5535/CS5536 MFGPT clock event driver");
MODULE_LICENSE("GPL");

View File

@ -0,0 +1,113 @@
#include <linux/clocksource.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/timex.h>
#include <linux/init.h>
#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/mach_timer.h>
#define CYCLONE_CBAR_ADDR 0xFEB00CD0 /* base address ptr */
#define CYCLONE_PMCC_OFFSET 0x51A0 /* offset to control register */
#define CYCLONE_MPCS_OFFSET 0x51A8 /* offset to select register */
#define CYCLONE_MPMC_OFFSET 0x51D0 /* offset to count register */
#define CYCLONE_TIMER_FREQ 99780000 /* 100Mhz, but not really */
#define CYCLONE_TIMER_MASK CLOCKSOURCE_MASK(32) /* 32 bit mask */
int use_cyclone = 0;
static void __iomem *cyclone_ptr;
static cycle_t read_cyclone(struct clocksource *cs)
{
return (cycle_t)readl(cyclone_ptr);
}
static struct clocksource clocksource_cyclone = {
.name = "cyclone",
.rating = 250,
.read = read_cyclone,
.mask = CYCLONE_TIMER_MASK,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static int __init init_cyclone_clocksource(void)
{
unsigned long base; /* saved value from CBAR */
unsigned long offset;
u32 __iomem* volatile cyclone_timer; /* Cyclone MPMC0 register */
u32 __iomem* reg;
int i;
/* make sure we're on a summit box: */
if (!use_cyclone)
return -ENODEV;
printk(KERN_INFO "Summit chipset: Starting Cyclone Counter.\n");
/* find base address: */
offset = CYCLONE_CBAR_ADDR;
reg = ioremap_nocache(offset, sizeof(reg));
if (!reg) {
printk(KERN_ERR "Summit chipset: Could not find valid CBAR register.\n");
return -ENODEV;
}
/* even on 64bit systems, this is only 32bits: */
base = readl(reg);
iounmap(reg);
if (!base) {
printk(KERN_ERR "Summit chipset: Could not find valid CBAR value.\n");
return -ENODEV;
}
/* setup PMCC: */
offset = base + CYCLONE_PMCC_OFFSET;
reg = ioremap_nocache(offset, sizeof(reg));
if (!reg) {
printk(KERN_ERR "Summit chipset: Could not find valid PMCC register.\n");
return -ENODEV;
}
writel(0x00000001,reg);
iounmap(reg);
/* setup MPCS: */
offset = base + CYCLONE_MPCS_OFFSET;
reg = ioremap_nocache(offset, sizeof(reg));
if (!reg) {
printk(KERN_ERR "Summit chipset: Could not find valid MPCS register.\n");
return -ENODEV;
}
writel(0x00000001,reg);
iounmap(reg);
/* map in cyclone_timer: */
offset = base + CYCLONE_MPMC_OFFSET;
cyclone_timer = ioremap_nocache(offset, sizeof(u64));
if (!cyclone_timer) {
printk(KERN_ERR "Summit chipset: Could not find valid MPMC register.\n");
return -ENODEV;
}
/* quick test to make sure its ticking: */
for (i = 0; i < 3; i++){
u32 old = readl(cyclone_timer);
int stall = 100;
while (stall--)
barrier();
if (readl(cyclone_timer) == old) {
printk(KERN_ERR "Summit chipset: Counter not counting! DISABLED\n");
iounmap(cyclone_timer);
cyclone_timer = NULL;
return -ENODEV;
}
}
cyclone_ptr = cyclone_timer;
return clocksource_register_hz(&clocksource_cyclone,
CYCLONE_TIMER_FREQ);
}
arch_initcall(init_cyclone_clocksource);

View File

@ -0,0 +1,401 @@
/*
* (C) Copyright 2009 Intel Corporation
* Author: Jacob Pan (jacob.jun.pan@intel.com)
*
* Shared with ARM platforms, Jamie Iles, Picochip 2011
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Support for the Synopsys DesignWare APB Timers.
*/
#include <linux/dw_apb_timer.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/slab.h>
#define APBT_MIN_PERIOD 4
#define APBT_MIN_DELTA_USEC 200
#define APBTMR_N_LOAD_COUNT 0x00
#define APBTMR_N_CURRENT_VALUE 0x04
#define APBTMR_N_CONTROL 0x08
#define APBTMR_N_EOI 0x0c
#define APBTMR_N_INT_STATUS 0x10
#define APBTMRS_INT_STATUS 0xa0
#define APBTMRS_EOI 0xa4
#define APBTMRS_RAW_INT_STATUS 0xa8
#define APBTMRS_COMP_VERSION 0xac
#define APBTMR_CONTROL_ENABLE (1 << 0)
/* 1: periodic, 0:free running. */
#define APBTMR_CONTROL_MODE_PERIODIC (1 << 1)
#define APBTMR_CONTROL_INT (1 << 2)
static inline struct dw_apb_clock_event_device *
ced_to_dw_apb_ced(struct clock_event_device *evt)
{
return container_of(evt, struct dw_apb_clock_event_device, ced);
}
static inline struct dw_apb_clocksource *
clocksource_to_dw_apb_clocksource(struct clocksource *cs)
{
return container_of(cs, struct dw_apb_clocksource, cs);
}
static unsigned long apbt_readl(struct dw_apb_timer *timer, unsigned long offs)
{
return readl(timer->base + offs);
}
static void apbt_writel(struct dw_apb_timer *timer, unsigned long val,
unsigned long offs)
{
writel(val, timer->base + offs);
}
static void apbt_disable_int(struct dw_apb_timer *timer)
{
unsigned long ctrl = apbt_readl(timer, APBTMR_N_CONTROL);
ctrl |= APBTMR_CONTROL_INT;
apbt_writel(timer, ctrl, APBTMR_N_CONTROL);
}
/**
* dw_apb_clockevent_pause() - stop the clock_event_device from running
*
* @dw_ced: The APB clock to stop generating events.
*/
void dw_apb_clockevent_pause(struct dw_apb_clock_event_device *dw_ced)
{
disable_irq(dw_ced->timer.irq);
apbt_disable_int(&dw_ced->timer);
}
static void apbt_eoi(struct dw_apb_timer *timer)
{
apbt_readl(timer, APBTMR_N_EOI);
}
static irqreturn_t dw_apb_clockevent_irq(int irq, void *data)
{
struct clock_event_device *evt = data;
struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt);
if (!evt->event_handler) {
pr_info("Spurious APBT timer interrupt %d", irq);
return IRQ_NONE;
}
if (dw_ced->eoi)
dw_ced->eoi(&dw_ced->timer);
evt->event_handler(evt);
return IRQ_HANDLED;
}
static void apbt_enable_int(struct dw_apb_timer *timer)
{
unsigned long ctrl = apbt_readl(timer, APBTMR_N_CONTROL);
/* clear pending intr */
apbt_readl(timer, APBTMR_N_EOI);
ctrl &= ~APBTMR_CONTROL_INT;
apbt_writel(timer, ctrl, APBTMR_N_CONTROL);
}
static void apbt_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
unsigned long ctrl;
unsigned long period;
struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt);
pr_debug("%s CPU %d mode=%d\n", __func__, first_cpu(*evt->cpumask),
mode);
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
period = DIV_ROUND_UP(dw_ced->timer.freq, HZ);
ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL);
ctrl |= APBTMR_CONTROL_MODE_PERIODIC;
apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
/*
* DW APB p. 46, have to disable timer before load counter,
* may cause sync problem.
*/
ctrl &= ~APBTMR_CONTROL_ENABLE;
apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
udelay(1);
pr_debug("Setting clock period %lu for HZ %d\n", period, HZ);
apbt_writel(&dw_ced->timer, period, APBTMR_N_LOAD_COUNT);
ctrl |= APBTMR_CONTROL_ENABLE;
apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
break;
case CLOCK_EVT_MODE_ONESHOT:
ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL);
/*
* set free running mode, this mode will let timer reload max
* timeout which will give time (3min on 25MHz clock) to rearm
* the next event, therefore emulate the one-shot mode.
*/
ctrl &= ~APBTMR_CONTROL_ENABLE;
ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC;
apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
/* write again to set free running mode */
apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
/*
* DW APB p. 46, load counter with all 1s before starting free
* running mode.
*/
apbt_writel(&dw_ced->timer, ~0, APBTMR_N_LOAD_COUNT);
ctrl &= ~APBTMR_CONTROL_INT;
ctrl |= APBTMR_CONTROL_ENABLE;
apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
break;
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL);
ctrl &= ~APBTMR_CONTROL_ENABLE;
apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
break;
case CLOCK_EVT_MODE_RESUME:
apbt_enable_int(&dw_ced->timer);
break;
}
}
static int apbt_next_event(unsigned long delta,
struct clock_event_device *evt)
{
unsigned long ctrl;
struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt);
/* Disable timer */
ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL);
ctrl &= ~APBTMR_CONTROL_ENABLE;
apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
/* write new count */
apbt_writel(&dw_ced->timer, delta, APBTMR_N_LOAD_COUNT);
ctrl |= APBTMR_CONTROL_ENABLE;
apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
return 0;
}
/**
* dw_apb_clockevent_init() - use an APB timer as a clock_event_device
*
* @cpu: The CPU the events will be targeted at.
* @name: The name used for the timer and the IRQ for it.
* @rating: The rating to give the timer.
* @base: I/O base for the timer registers.
* @irq: The interrupt number to use for the timer.
* @freq: The frequency that the timer counts at.
*
* This creates a clock_event_device for using with the generic clock layer
* but does not start and register it. This should be done with
* dw_apb_clockevent_register() as the next step. If this is the first time
* it has been called for a timer then the IRQ will be requested, if not it
* just be enabled to allow CPU hotplug to avoid repeatedly requesting and
* releasing the IRQ.
*/
struct dw_apb_clock_event_device *
dw_apb_clockevent_init(int cpu, const char *name, unsigned rating,
void __iomem *base, int irq, unsigned long freq)
{
struct dw_apb_clock_event_device *dw_ced =
kzalloc(sizeof(*dw_ced), GFP_KERNEL);
int err;
if (!dw_ced)
return NULL;
dw_ced->timer.base = base;
dw_ced->timer.irq = irq;
dw_ced->timer.freq = freq;
clockevents_calc_mult_shift(&dw_ced->ced, freq, APBT_MIN_PERIOD);
dw_ced->ced.max_delta_ns = clockevent_delta2ns(0x7fffffff,
&dw_ced->ced);
dw_ced->ced.min_delta_ns = clockevent_delta2ns(5000, &dw_ced->ced);
dw_ced->ced.cpumask = cpumask_of(cpu);
dw_ced->ced.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
dw_ced->ced.set_mode = apbt_set_mode;
dw_ced->ced.set_next_event = apbt_next_event;
dw_ced->ced.irq = dw_ced->timer.irq;
dw_ced->ced.rating = rating;
dw_ced->ced.name = name;
dw_ced->irqaction.name = dw_ced->ced.name;
dw_ced->irqaction.handler = dw_apb_clockevent_irq;
dw_ced->irqaction.dev_id = &dw_ced->ced;
dw_ced->irqaction.irq = irq;
dw_ced->irqaction.flags = IRQF_TIMER | IRQF_IRQPOLL |
IRQF_NOBALANCING |
IRQF_DISABLED;
dw_ced->eoi = apbt_eoi;
err = setup_irq(irq, &dw_ced->irqaction);
if (err) {
pr_err("failed to request timer irq\n");
kfree(dw_ced);
dw_ced = NULL;
}
return dw_ced;
}
/**
* dw_apb_clockevent_resume() - resume a clock that has been paused.
*
* @dw_ced: The APB clock to resume.
*/
void dw_apb_clockevent_resume(struct dw_apb_clock_event_device *dw_ced)
{
enable_irq(dw_ced->timer.irq);
}
/**
* dw_apb_clockevent_stop() - stop the clock_event_device and release the IRQ.
*
* @dw_ced: The APB clock to stop generating the events.
*/
void dw_apb_clockevent_stop(struct dw_apb_clock_event_device *dw_ced)
{
free_irq(dw_ced->timer.irq, &dw_ced->ced);
}
/**
* dw_apb_clockevent_register() - register the clock with the generic layer
*
* @dw_ced: The APB clock to register as a clock_event_device.
*/
void dw_apb_clockevent_register(struct dw_apb_clock_event_device *dw_ced)
{
apbt_writel(&dw_ced->timer, 0, APBTMR_N_CONTROL);
clockevents_register_device(&dw_ced->ced);
apbt_enable_int(&dw_ced->timer);
}
/**
* dw_apb_clocksource_start() - start the clocksource counting.
*
* @dw_cs: The clocksource to start.
*
* This is used to start the clocksource before registration and can be used
* to enable calibration of timers.
*/
void dw_apb_clocksource_start(struct dw_apb_clocksource *dw_cs)
{
/*
* start count down from 0xffff_ffff. this is done by toggling the
* enable bit then load initial load count to ~0.
*/
unsigned long ctrl = apbt_readl(&dw_cs->timer, APBTMR_N_CONTROL);
ctrl &= ~APBTMR_CONTROL_ENABLE;
apbt_writel(&dw_cs->timer, ctrl, APBTMR_N_CONTROL);
apbt_writel(&dw_cs->timer, ~0, APBTMR_N_LOAD_COUNT);
/* enable, mask interrupt */
ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC;
ctrl |= (APBTMR_CONTROL_ENABLE | APBTMR_CONTROL_INT);
apbt_writel(&dw_cs->timer, ctrl, APBTMR_N_CONTROL);
/* read it once to get cached counter value initialized */
dw_apb_clocksource_read(dw_cs);
}
static cycle_t __apbt_read_clocksource(struct clocksource *cs)
{
unsigned long current_count;
struct dw_apb_clocksource *dw_cs =
clocksource_to_dw_apb_clocksource(cs);
current_count = apbt_readl(&dw_cs->timer, APBTMR_N_CURRENT_VALUE);
return (cycle_t)~current_count;
}
static void apbt_restart_clocksource(struct clocksource *cs)
{
struct dw_apb_clocksource *dw_cs =
clocksource_to_dw_apb_clocksource(cs);
dw_apb_clocksource_start(dw_cs);
}
/**
* dw_apb_clocksource_init() - use an APB timer as a clocksource.
*
* @rating: The rating to give the clocksource.
* @name: The name for the clocksource.
* @base: The I/O base for the timer registers.
* @freq: The frequency that the timer counts at.
*
* This creates a clocksource using an APB timer but does not yet register it
* with the clocksource system. This should be done with
* dw_apb_clocksource_register() as the next step.
*/
struct dw_apb_clocksource *
dw_apb_clocksource_init(unsigned rating, const char *name, void __iomem *base,
unsigned long freq)
{
struct dw_apb_clocksource *dw_cs = kzalloc(sizeof(*dw_cs), GFP_KERNEL);
if (!dw_cs)
return NULL;
dw_cs->timer.base = base;
dw_cs->timer.freq = freq;
dw_cs->cs.name = name;
dw_cs->cs.rating = rating;
dw_cs->cs.read = __apbt_read_clocksource;
dw_cs->cs.mask = CLOCKSOURCE_MASK(32);
dw_cs->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS;
dw_cs->cs.resume = apbt_restart_clocksource;
return dw_cs;
}
/**
* dw_apb_clocksource_register() - register the APB clocksource.
*
* @dw_cs: The clocksource to register.
*/
void dw_apb_clocksource_register(struct dw_apb_clocksource *dw_cs)
{
clocksource_register_hz(&dw_cs->cs, dw_cs->timer.freq);
}
/**
* dw_apb_clocksource_read() - read the current value of a clocksource.
*
* @dw_cs: The clocksource to read.
*/
cycle_t dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs)
{
return (cycle_t)~apbt_readl(&dw_cs->timer, APBTMR_N_CURRENT_VALUE);
}
/**
* dw_apb_clocksource_unregister() - unregister and free a clocksource.
*
* @dw_cs: The clocksource to unregister/free.
*/
void dw_apb_clocksource_unregister(struct dw_apb_clocksource *dw_cs)
{
clocksource_unregister(&dw_cs->cs);
kfree(dw_cs);
}

View File

@ -0,0 +1,186 @@
/*
* i8253 PIT clocksource
*/
#include <linux/clockchips.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/spinlock.h>
#include <linux/timex.h>
#include <linux/module.h>
#include <linux/i8253.h>
#include <linux/smp.h>
/*
* Protects access to I/O ports
*
* 0040-0043 : timer0, i8253 / i8254
* 0061-0061 : NMI Control Register which contains two speaker control bits.
*/
DEFINE_RAW_SPINLOCK(i8253_lock);
EXPORT_SYMBOL(i8253_lock);
#ifdef CONFIG_CLKSRC_I8253
/*
* Since the PIT overflows every tick, its not very useful
* to just read by itself. So use jiffies to emulate a free
* running counter:
*/
static cycle_t i8253_read(struct clocksource *cs)
{
static int old_count;
static u32 old_jifs;
unsigned long flags;
int count;
u32 jifs;
raw_spin_lock_irqsave(&i8253_lock, flags);
/*
* Although our caller may have the read side of xtime_lock,
* this is now a seqlock, and we are cheating in this routine
* by having side effects on state that we cannot undo if
* there is a collision on the seqlock and our caller has to
* retry. (Namely, old_jifs and old_count.) So we must treat
* jiffies as volatile despite the lock. We read jiffies
* before latching the timer count to guarantee that although
* the jiffies value might be older than the count (that is,
* the counter may underflow between the last point where
* jiffies was incremented and the point where we latch the
* count), it cannot be newer.
*/
jifs = jiffies;
outb_p(0x00, PIT_MODE); /* latch the count ASAP */
count = inb_p(PIT_CH0); /* read the latched count */
count |= inb_p(PIT_CH0) << 8;
/* VIA686a test code... reset the latch if count > max + 1 */
if (count > PIT_LATCH) {
outb_p(0x34, PIT_MODE);
outb_p(PIT_LATCH & 0xff, PIT_CH0);
outb_p(PIT_LATCH >> 8, PIT_CH0);
count = PIT_LATCH - 1;
}
/*
* It's possible for count to appear to go the wrong way for a
* couple of reasons:
*
* 1. The timer counter underflows, but we haven't handled the
* resulting interrupt and incremented jiffies yet.
* 2. Hardware problem with the timer, not giving us continuous time,
* the counter does small "jumps" upwards on some Pentium systems,
* (see c't 95/10 page 335 for Neptun bug.)
*
* Previous attempts to handle these cases intelligently were
* buggy, so we just do the simple thing now.
*/
if (count > old_count && jifs == old_jifs)
count = old_count;
old_count = count;
old_jifs = jifs;
raw_spin_unlock_irqrestore(&i8253_lock, flags);
count = (PIT_LATCH - 1) - count;
return (cycle_t)(jifs * PIT_LATCH) + count;
}
static struct clocksource i8253_cs = {
.name = "pit",
.rating = 110,
.read = i8253_read,
.mask = CLOCKSOURCE_MASK(32),
};
int __init clocksource_i8253_init(void)
{
return clocksource_register_hz(&i8253_cs, PIT_TICK_RATE);
}
#endif
#ifdef CONFIG_CLKEVT_I8253
/*
* Initialize the PIT timer.
*
* This is also called after resume to bring the PIT into operation again.
*/
static void init_pit_timer(enum clock_event_mode mode,
struct clock_event_device *evt)
{
raw_spin_lock(&i8253_lock);
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
/* binary, mode 2, LSB/MSB, ch 0 */
outb_p(0x34, PIT_MODE);
outb_p(PIT_LATCH & 0xff , PIT_CH0); /* LSB */
outb_p(PIT_LATCH >> 8 , PIT_CH0); /* MSB */
break;
case CLOCK_EVT_MODE_SHUTDOWN:
case CLOCK_EVT_MODE_UNUSED:
if (evt->mode == CLOCK_EVT_MODE_PERIODIC ||
evt->mode == CLOCK_EVT_MODE_ONESHOT) {
outb_p(0x30, PIT_MODE);
outb_p(0, PIT_CH0);
outb_p(0, PIT_CH0);
}
break;
case CLOCK_EVT_MODE_ONESHOT:
/* One shot setup */
outb_p(0x38, PIT_MODE);
break;
case CLOCK_EVT_MODE_RESUME:
/* Nothing to do here */
break;
}
raw_spin_unlock(&i8253_lock);
}
/*
* Program the next event in oneshot mode
*
* Delta is given in PIT ticks
*/
static int pit_next_event(unsigned long delta, struct clock_event_device *evt)
{
raw_spin_lock(&i8253_lock);
outb_p(delta & 0xff , PIT_CH0); /* LSB */
outb_p(delta >> 8 , PIT_CH0); /* MSB */
raw_spin_unlock(&i8253_lock);
return 0;
}
/*
* On UP the PIT can serve all of the possible timer functions. On SMP systems
* it can be solely used for the global tick.
*/
struct clock_event_device i8253_clockevent = {
.name = "pit",
.features = CLOCK_EVT_FEAT_PERIODIC,
.set_mode = init_pit_timer,
.set_next_event = pit_next_event,
};
/*
* Initialize the conversion factor and the min/max deltas of the clock event
* structure and register the clock event source with the framework.
*/
void __init clockevent_i8253_init(bool oneshot)
{
if (oneshot)
i8253_clockevent.features |= CLOCK_EVT_FEAT_ONESHOT;
/*
* Start pit with the boot cpu mask. x86 might make it global
* when it is used as broadcast device later.
*/
i8253_clockevent.cpumask = cpumask_of(smp_processor_id());
clockevents_config_and_register(&i8253_clockevent, PIT_TICK_RATE,
0xF, 0x7FFF);
}
#endif

View File

@ -0,0 +1,73 @@
/*
* Generic MMIO clocksource support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/clocksource.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
struct clocksource_mmio {
void __iomem *reg;
struct clocksource clksrc;
};
static inline struct clocksource_mmio *to_mmio_clksrc(struct clocksource *c)
{
return container_of(c, struct clocksource_mmio, clksrc);
}
cycle_t clocksource_mmio_readl_up(struct clocksource *c)
{
return readl_relaxed(to_mmio_clksrc(c)->reg);
}
cycle_t clocksource_mmio_readl_down(struct clocksource *c)
{
return ~readl_relaxed(to_mmio_clksrc(c)->reg);
}
cycle_t clocksource_mmio_readw_up(struct clocksource *c)
{
return readw_relaxed(to_mmio_clksrc(c)->reg);
}
cycle_t clocksource_mmio_readw_down(struct clocksource *c)
{
return ~(unsigned)readw_relaxed(to_mmio_clksrc(c)->reg);
}
/**
* clocksource_mmio_init - Initialize a simple mmio based clocksource
* @base: Virtual address of the clock readout register
* @name: Name of the clocksource
* @hz: Frequency of the clocksource in Hz
* @rating: Rating of the clocksource
* @bits: Number of valid bits
* @read: One of clocksource_mmio_read*() above
*/
int __init clocksource_mmio_init(void __iomem *base, const char *name,
unsigned long hz, int rating, unsigned bits,
cycle_t (*read)(struct clocksource *))
{
struct clocksource_mmio *cs;
if (bits > 32 || bits < 16)
return -EINVAL;
cs = kzalloc(sizeof(struct clocksource_mmio), GFP_KERNEL);
if (!cs)
return -ENOMEM;
cs->reg = base;
cs->clksrc.name = name;
cs->clksrc.rating = rating;
cs->clksrc.read = read;
cs->clksrc.mask = CLOCKSOURCE_MASK(bits);
cs->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
return clocksource_register_hz(&cs->clksrc, hz);
}

View File

@ -0,0 +1,93 @@
/*
* Copyright (C) 2006 Jim Cromie
*
* This is a clocksource driver for the Geode SCx200's 1 or 27 MHz
* high-resolution timer. The Geode SC-1100 (at least) has a buggy
* time stamp counter (TSC), which loses time unless 'idle=poll' is
* given as a boot-arg. In its absence, the Generic Timekeeping code
* will detect and de-rate the bad TSC, allowing this timer to take
* over timekeeping duties.
*
* Based on work by John Stultz, and Ted Phelps (in a 2.6.12-rc6 patch)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*/
#include <linux/clocksource.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/scx200.h>
#define NAME "scx200_hrt"
static int mhz27;
module_param(mhz27, int, 0); /* load time only */
MODULE_PARM_DESC(mhz27, "count at 27.0 MHz (default is 1.0 MHz)");
static int ppm;
module_param(ppm, int, 0); /* load time only */
MODULE_PARM_DESC(ppm, "+-adjust to actual XO freq (ppm)");
/* HiRes Timer configuration register address */
#define SCx200_TMCNFG_OFFSET (SCx200_TIMER_OFFSET + 5)
/* and config settings */
#define HR_TMEN (1 << 0) /* timer interrupt enable */
#define HR_TMCLKSEL (1 << 1) /* 1|0 counts at 27|1 MHz */
#define HR_TM27MPD (1 << 2) /* 1 turns off input clock (power-down) */
/* The base timer frequency, * 27 if selected */
#define HRT_FREQ 1000000
static cycle_t read_hrt(struct clocksource *cs)
{
/* Read the timer value */
return (cycle_t) inl(scx200_cb_base + SCx200_TIMER_OFFSET);
}
static struct clocksource cs_hrt = {
.name = "scx200_hrt",
.rating = 250,
.read = read_hrt,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
/* mult, shift are set based on mhz27 flag */
};
static int __init init_hrt_clocksource(void)
{
u32 freq;
/* Make sure scx200 has initialized the configuration block */
if (!scx200_cb_present())
return -ENODEV;
/* Reserve the timer's ISA io-region for ourselves */
if (!request_region(scx200_cb_base + SCx200_TIMER_OFFSET,
SCx200_TIMER_SIZE,
"NatSemi SCx200 High-Resolution Timer")) {
pr_warn("unable to lock timer region\n");
return -ENODEV;
}
/* write timer config */
outb(HR_TMEN | (mhz27 ? HR_TMCLKSEL : 0),
scx200_cb_base + SCx200_TMCNFG_OFFSET);
freq = (HRT_FREQ + ppm);
if (mhz27)
freq *= 27;
pr_info("enabling scx200 high-res timer (%s MHz +%d ppm)\n", mhz27 ? "27":"1", ppm);
return clocksource_register_hz(&cs_hrt, freq);
}
module_init(init_hrt_clocksource);
MODULE_AUTHOR("Jim Cromie <jim.cromie@gmail.com>");
MODULE_DESCRIPTION("clocksource on SCx200 HiRes Timer");
MODULE_LICENSE("GPL");

View File

@ -0,0 +1,744 @@
/*
* SuperH Timer Support - CMT
*
* Copyright (C) 2008 Magnus Damm
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/irq.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/sh_timer.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/pm_domain.h>
struct sh_cmt_priv {
void __iomem *mapbase;
struct clk *clk;
unsigned long width; /* 16 or 32 bit version of hardware block */
unsigned long overflow_bit;
unsigned long clear_bits;
struct irqaction irqaction;
struct platform_device *pdev;
unsigned long flags;
unsigned long match_value;
unsigned long next_match_value;
unsigned long max_match_value;
unsigned long rate;
spinlock_t lock;
struct clock_event_device ced;
struct clocksource cs;
unsigned long total_cycles;
};
static DEFINE_SPINLOCK(sh_cmt_lock);
#define CMSTR -1 /* shared register */
#define CMCSR 0 /* channel register */
#define CMCNT 1 /* channel register */
#define CMCOR 2 /* channel register */
static inline unsigned long sh_cmt_read(struct sh_cmt_priv *p, int reg_nr)
{
struct sh_timer_config *cfg = p->pdev->dev.platform_data;
void __iomem *base = p->mapbase;
unsigned long offs;
if (reg_nr == CMSTR) {
offs = 0;
base -= cfg->channel_offset;
} else
offs = reg_nr;
if (p->width == 16)
offs <<= 1;
else {
offs <<= 2;
if ((reg_nr == CMCNT) || (reg_nr == CMCOR))
return ioread32(base + offs);
}
return ioread16(base + offs);
}
static inline void sh_cmt_write(struct sh_cmt_priv *p, int reg_nr,
unsigned long value)
{
struct sh_timer_config *cfg = p->pdev->dev.platform_data;
void __iomem *base = p->mapbase;
unsigned long offs;
if (reg_nr == CMSTR) {
offs = 0;
base -= cfg->channel_offset;
} else
offs = reg_nr;
if (p->width == 16)
offs <<= 1;
else {
offs <<= 2;
if ((reg_nr == CMCNT) || (reg_nr == CMCOR)) {
iowrite32(value, base + offs);
return;
}
}
iowrite16(value, base + offs);
}
static unsigned long sh_cmt_get_counter(struct sh_cmt_priv *p,
int *has_wrapped)
{
unsigned long v1, v2, v3;
int o1, o2;
o1 = sh_cmt_read(p, CMCSR) & p->overflow_bit;
/* Make sure the timer value is stable. Stolen from acpi_pm.c */
do {
o2 = o1;
v1 = sh_cmt_read(p, CMCNT);
v2 = sh_cmt_read(p, CMCNT);
v3 = sh_cmt_read(p, CMCNT);
o1 = sh_cmt_read(p, CMCSR) & p->overflow_bit;
} while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3)
|| (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2)));
*has_wrapped = o1;
return v2;
}
static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
{
struct sh_timer_config *cfg = p->pdev->dev.platform_data;
unsigned long flags, value;
/* start stop register shared by multiple timer channels */
spin_lock_irqsave(&sh_cmt_lock, flags);
value = sh_cmt_read(p, CMSTR);
if (start)
value |= 1 << cfg->timer_bit;
else
value &= ~(1 << cfg->timer_bit);
sh_cmt_write(p, CMSTR, value);
spin_unlock_irqrestore(&sh_cmt_lock, flags);
}
static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
{
int k, ret;
/* enable clock */
ret = clk_enable(p->clk);
if (ret) {
dev_err(&p->pdev->dev, "cannot enable clock\n");
goto err0;
}
/* make sure channel is disabled */
sh_cmt_start_stop_ch(p, 0);
/* configure channel, periodic mode and maximum timeout */
if (p->width == 16) {
*rate = clk_get_rate(p->clk) / 512;
sh_cmt_write(p, CMCSR, 0x43);
} else {
*rate = clk_get_rate(p->clk) / 8;
sh_cmt_write(p, CMCSR, 0x01a4);
}
sh_cmt_write(p, CMCOR, 0xffffffff);
sh_cmt_write(p, CMCNT, 0);
/*
* According to the sh73a0 user's manual, as CMCNT can be operated
* only by the RCLK (Pseudo 32 KHz), there's one restriction on
* modifying CMCNT register; two RCLK cycles are necessary before
* this register is either read or any modification of the value
* it holds is reflected in the LSI's actual operation.
*
* While at it, we're supposed to clear out the CMCNT as of this
* moment, so make sure it's processed properly here. This will
* take RCLKx2 at maximum.
*/
for (k = 0; k < 100; k++) {
if (!sh_cmt_read(p, CMCNT))
break;
udelay(1);
}
if (sh_cmt_read(p, CMCNT)) {
dev_err(&p->pdev->dev, "cannot clear CMCNT\n");
ret = -ETIMEDOUT;
goto err1;
}
/* enable channel */
sh_cmt_start_stop_ch(p, 1);
return 0;
err1:
/* stop clock */
clk_disable(p->clk);
err0:
return ret;
}
static void sh_cmt_disable(struct sh_cmt_priv *p)
{
/* disable channel */
sh_cmt_start_stop_ch(p, 0);
/* disable interrupts in CMT block */
sh_cmt_write(p, CMCSR, 0);
/* stop clock */
clk_disable(p->clk);
}
/* private flags */
#define FLAG_CLOCKEVENT (1 << 0)
#define FLAG_CLOCKSOURCE (1 << 1)
#define FLAG_REPROGRAM (1 << 2)
#define FLAG_SKIPEVENT (1 << 3)
#define FLAG_IRQCONTEXT (1 << 4)
static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p,
int absolute)
{
unsigned long new_match;
unsigned long value = p->next_match_value;
unsigned long delay = 0;
unsigned long now = 0;
int has_wrapped;
now = sh_cmt_get_counter(p, &has_wrapped);
p->flags |= FLAG_REPROGRAM; /* force reprogram */
if (has_wrapped) {
/* we're competing with the interrupt handler.
* -> let the interrupt handler reprogram the timer.
* -> interrupt number two handles the event.
*/
p->flags |= FLAG_SKIPEVENT;
return;
}
if (absolute)
now = 0;
do {
/* reprogram the timer hardware,
* but don't save the new match value yet.
*/
new_match = now + value + delay;
if (new_match > p->max_match_value)
new_match = p->max_match_value;
sh_cmt_write(p, CMCOR, new_match);
now = sh_cmt_get_counter(p, &has_wrapped);
if (has_wrapped && (new_match > p->match_value)) {
/* we are changing to a greater match value,
* so this wrap must be caused by the counter
* matching the old value.
* -> first interrupt reprograms the timer.
* -> interrupt number two handles the event.
*/
p->flags |= FLAG_SKIPEVENT;
break;
}
if (has_wrapped) {
/* we are changing to a smaller match value,
* so the wrap must be caused by the counter
* matching the new value.
* -> save programmed match value.
* -> let isr handle the event.
*/
p->match_value = new_match;
break;
}
/* be safe: verify hardware settings */
if (now < new_match) {
/* timer value is below match value, all good.
* this makes sure we won't miss any match events.
* -> save programmed match value.
* -> let isr handle the event.
*/
p->match_value = new_match;
break;
}
/* the counter has reached a value greater
* than our new match value. and since the
* has_wrapped flag isn't set we must have
* programmed a too close event.
* -> increase delay and retry.
*/
if (delay)
delay <<= 1;
else
delay = 1;
if (!delay)
dev_warn(&p->pdev->dev, "too long delay\n");
} while (delay);
}
static void __sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
{
if (delta > p->max_match_value)
dev_warn(&p->pdev->dev, "delta out of range\n");
p->next_match_value = delta;
sh_cmt_clock_event_program_verify(p, 0);
}
static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
{
unsigned long flags;
spin_lock_irqsave(&p->lock, flags);
__sh_cmt_set_next(p, delta);
spin_unlock_irqrestore(&p->lock, flags);
}
static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
{
struct sh_cmt_priv *p = dev_id;
/* clear flags */
sh_cmt_write(p, CMCSR, sh_cmt_read(p, CMCSR) & p->clear_bits);
/* update clock source counter to begin with if enabled
* the wrap flag should be cleared by the timer specific
* isr before we end up here.
*/
if (p->flags & FLAG_CLOCKSOURCE)
p->total_cycles += p->match_value + 1;
if (!(p->flags & FLAG_REPROGRAM))
p->next_match_value = p->max_match_value;
p->flags |= FLAG_IRQCONTEXT;
if (p->flags & FLAG_CLOCKEVENT) {
if (!(p->flags & FLAG_SKIPEVENT)) {
if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT) {
p->next_match_value = p->max_match_value;
p->flags |= FLAG_REPROGRAM;
}
p->ced.event_handler(&p->ced);
}
}
p->flags &= ~FLAG_SKIPEVENT;
if (p->flags & FLAG_REPROGRAM) {
p->flags &= ~FLAG_REPROGRAM;
sh_cmt_clock_event_program_verify(p, 1);
if (p->flags & FLAG_CLOCKEVENT)
if ((p->ced.mode == CLOCK_EVT_MODE_SHUTDOWN)
|| (p->match_value == p->next_match_value))
p->flags &= ~FLAG_REPROGRAM;
}
p->flags &= ~FLAG_IRQCONTEXT;
return IRQ_HANDLED;
}
static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag)
{
int ret = 0;
unsigned long flags;
spin_lock_irqsave(&p->lock, flags);
if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
ret = sh_cmt_enable(p, &p->rate);
if (ret)
goto out;
p->flags |= flag;
/* setup timeout if no clockevent */
if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT)))
__sh_cmt_set_next(p, p->max_match_value);
out:
spin_unlock_irqrestore(&p->lock, flags);
return ret;
}
static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag)
{
unsigned long flags;
unsigned long f;
spin_lock_irqsave(&p->lock, flags);
f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
p->flags &= ~flag;
if (f && !(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
sh_cmt_disable(p);
/* adjust the timeout to maximum if only clocksource left */
if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE))
__sh_cmt_set_next(p, p->max_match_value);
spin_unlock_irqrestore(&p->lock, flags);
}
static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs)
{
return container_of(cs, struct sh_cmt_priv, cs);
}
static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
{
struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
unsigned long flags, raw;
unsigned long value;
int has_wrapped;
spin_lock_irqsave(&p->lock, flags);
value = p->total_cycles;
raw = sh_cmt_get_counter(p, &has_wrapped);
if (unlikely(has_wrapped))
raw += p->match_value + 1;
spin_unlock_irqrestore(&p->lock, flags);
return value + raw;
}
static int sh_cmt_clocksource_enable(struct clocksource *cs)
{
int ret;
struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
p->total_cycles = 0;
ret = sh_cmt_start(p, FLAG_CLOCKSOURCE);
if (!ret)
__clocksource_updatefreq_hz(cs, p->rate);
return ret;
}
static void sh_cmt_clocksource_disable(struct clocksource *cs)
{
sh_cmt_stop(cs_to_sh_cmt(cs), FLAG_CLOCKSOURCE);
}
static void sh_cmt_clocksource_resume(struct clocksource *cs)
{
sh_cmt_start(cs_to_sh_cmt(cs), FLAG_CLOCKSOURCE);
}
static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
char *name, unsigned long rating)
{
struct clocksource *cs = &p->cs;
cs->name = name;
cs->rating = rating;
cs->read = sh_cmt_clocksource_read;
cs->enable = sh_cmt_clocksource_enable;
cs->disable = sh_cmt_clocksource_disable;
cs->suspend = sh_cmt_clocksource_disable;
cs->resume = sh_cmt_clocksource_resume;
cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
dev_info(&p->pdev->dev, "used as clock source\n");
/* Register with dummy 1 Hz value, gets updated in ->enable() */
clocksource_register_hz(cs, 1);
return 0;
}
static struct sh_cmt_priv *ced_to_sh_cmt(struct clock_event_device *ced)
{
return container_of(ced, struct sh_cmt_priv, ced);
}
static void sh_cmt_clock_event_start(struct sh_cmt_priv *p, int periodic)
{
struct clock_event_device *ced = &p->ced;
sh_cmt_start(p, FLAG_CLOCKEVENT);
/* TODO: calculate good shift from rate and counter bit width */
ced->shift = 32;
ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift);
ced->max_delta_ns = clockevent_delta2ns(p->max_match_value, ced);
ced->min_delta_ns = clockevent_delta2ns(0x1f, ced);
if (periodic)
sh_cmt_set_next(p, ((p->rate + HZ/2) / HZ) - 1);
else
sh_cmt_set_next(p, p->max_match_value);
}
static void sh_cmt_clock_event_mode(enum clock_event_mode mode,
struct clock_event_device *ced)
{
struct sh_cmt_priv *p = ced_to_sh_cmt(ced);
/* deal with old setting first */
switch (ced->mode) {
case CLOCK_EVT_MODE_PERIODIC:
case CLOCK_EVT_MODE_ONESHOT:
sh_cmt_stop(p, FLAG_CLOCKEVENT);
break;
default:
break;
}
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
dev_info(&p->pdev->dev, "used for periodic clock events\n");
sh_cmt_clock_event_start(p, 1);
break;
case CLOCK_EVT_MODE_ONESHOT:
dev_info(&p->pdev->dev, "used for oneshot clock events\n");
sh_cmt_clock_event_start(p, 0);
break;
case CLOCK_EVT_MODE_SHUTDOWN:
case CLOCK_EVT_MODE_UNUSED:
sh_cmt_stop(p, FLAG_CLOCKEVENT);
break;
default:
break;
}
}
static int sh_cmt_clock_event_next(unsigned long delta,
struct clock_event_device *ced)
{
struct sh_cmt_priv *p = ced_to_sh_cmt(ced);
BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);
if (likely(p->flags & FLAG_IRQCONTEXT))
p->next_match_value = delta - 1;
else
sh_cmt_set_next(p, delta - 1);
return 0;
}
static void sh_cmt_register_clockevent(struct sh_cmt_priv *p,
char *name, unsigned long rating)
{
struct clock_event_device *ced = &p->ced;
memset(ced, 0, sizeof(*ced));
ced->name = name;
ced->features = CLOCK_EVT_FEAT_PERIODIC;
ced->features |= CLOCK_EVT_FEAT_ONESHOT;
ced->rating = rating;
ced->cpumask = cpumask_of(0);
ced->set_next_event = sh_cmt_clock_event_next;
ced->set_mode = sh_cmt_clock_event_mode;
dev_info(&p->pdev->dev, "used for clock events\n");
clockevents_register_device(ced);
}
static int sh_cmt_register(struct sh_cmt_priv *p, char *name,
unsigned long clockevent_rating,
unsigned long clocksource_rating)
{
if (p->width == (sizeof(p->max_match_value) * 8))
p->max_match_value = ~0;
else
p->max_match_value = (1 << p->width) - 1;
p->match_value = p->max_match_value;
spin_lock_init(&p->lock);
if (clockevent_rating)
sh_cmt_register_clockevent(p, name, clockevent_rating);
if (clocksource_rating)
sh_cmt_register_clocksource(p, name, clocksource_rating);
return 0;
}
static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
{
struct sh_timer_config *cfg = pdev->dev.platform_data;
struct resource *res;
int irq, ret;
ret = -ENXIO;
memset(p, 0, sizeof(*p));
p->pdev = pdev;
if (!cfg) {
dev_err(&p->pdev->dev, "missing platform data\n");
goto err0;
}
platform_set_drvdata(pdev, p);
res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&p->pdev->dev, "failed to get I/O memory\n");
goto err0;
}
irq = platform_get_irq(p->pdev, 0);
if (irq < 0) {
dev_err(&p->pdev->dev, "failed to get irq\n");
goto err0;
}
/* map memory, let mapbase point to our channel */
p->mapbase = ioremap_nocache(res->start, resource_size(res));
if (p->mapbase == NULL) {
dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
goto err0;
}
/* request irq using setup_irq() (too early for request_irq()) */
p->irqaction.name = dev_name(&p->pdev->dev);
p->irqaction.handler = sh_cmt_interrupt;
p->irqaction.dev_id = p;
p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
IRQF_IRQPOLL | IRQF_NOBALANCING;
/* get hold of clock */
p->clk = clk_get(&p->pdev->dev, "cmt_fck");
if (IS_ERR(p->clk)) {
dev_err(&p->pdev->dev, "cannot get clock\n");
ret = PTR_ERR(p->clk);
goto err1;
}
if (resource_size(res) == 6) {
p->width = 16;
p->overflow_bit = 0x80;
p->clear_bits = ~0x80;
} else {
p->width = 32;
p->overflow_bit = 0x8000;
p->clear_bits = ~0xc000;
}
ret = sh_cmt_register(p, (char *)dev_name(&p->pdev->dev),
cfg->clockevent_rating,
cfg->clocksource_rating);
if (ret) {
dev_err(&p->pdev->dev, "registration failed\n");
goto err1;
}
ret = setup_irq(irq, &p->irqaction);
if (ret) {
dev_err(&p->pdev->dev, "failed to request irq %d\n", irq);
goto err1;
}
return 0;
err1:
iounmap(p->mapbase);
err0:
return ret;
}
static int __devinit sh_cmt_probe(struct platform_device *pdev)
{
struct sh_cmt_priv *p = platform_get_drvdata(pdev);
int ret;
if (!is_early_platform_device(pdev))
pm_genpd_dev_always_on(&pdev->dev, true);
if (p) {
dev_info(&pdev->dev, "kept as earlytimer\n");
return 0;
}
p = kmalloc(sizeof(*p), GFP_KERNEL);
if (p == NULL) {
dev_err(&pdev->dev, "failed to allocate driver data\n");
return -ENOMEM;
}
ret = sh_cmt_setup(p, pdev);
if (ret) {
kfree(p);
platform_set_drvdata(pdev, NULL);
}
return ret;
}
static int __devexit sh_cmt_remove(struct platform_device *pdev)
{
return -EBUSY; /* cannot unregister clockevent and clocksource */
}
static struct platform_driver sh_cmt_device_driver = {
.probe = sh_cmt_probe,
.remove = __devexit_p(sh_cmt_remove),
.driver = {
.name = "sh_cmt",
}
};
static int __init sh_cmt_init(void)
{
return platform_driver_register(&sh_cmt_device_driver);
}
static void __exit sh_cmt_exit(void)
{
platform_driver_unregister(&sh_cmt_device_driver);
}
early_platform_init("earlytimer", &sh_cmt_device_driver);
module_init(sh_cmt_init);
module_exit(sh_cmt_exit);
MODULE_AUTHOR("Magnus Damm");
MODULE_DESCRIPTION("SuperH CMT Timer Driver");
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,361 @@
/*
* SuperH Timer Support - MTU2
*
* Copyright (C) 2009 Magnus Damm
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/irq.h>
#include <linux/err.h>
#include <linux/clockchips.h>
#include <linux/sh_timer.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/pm_domain.h>
struct sh_mtu2_priv {
void __iomem *mapbase;
struct clk *clk;
struct irqaction irqaction;
struct platform_device *pdev;
unsigned long rate;
unsigned long periodic;
struct clock_event_device ced;
};
static DEFINE_SPINLOCK(sh_mtu2_lock);
#define TSTR -1 /* shared register */
#define TCR 0 /* channel register */
#define TMDR 1 /* channel register */
#define TIOR 2 /* channel register */
#define TIER 3 /* channel register */
#define TSR 4 /* channel register */
#define TCNT 5 /* channel register */
#define TGR 6 /* channel register */
static unsigned long mtu2_reg_offs[] = {
[TCR] = 0,
[TMDR] = 1,
[TIOR] = 2,
[TIER] = 4,
[TSR] = 5,
[TCNT] = 6,
[TGR] = 8,
};
static inline unsigned long sh_mtu2_read(struct sh_mtu2_priv *p, int reg_nr)
{
struct sh_timer_config *cfg = p->pdev->dev.platform_data;
void __iomem *base = p->mapbase;
unsigned long offs;
if (reg_nr == TSTR)
return ioread8(base + cfg->channel_offset);
offs = mtu2_reg_offs[reg_nr];
if ((reg_nr == TCNT) || (reg_nr == TGR))
return ioread16(base + offs);
else
return ioread8(base + offs);
}
static inline void sh_mtu2_write(struct sh_mtu2_priv *p, int reg_nr,
unsigned long value)
{
struct sh_timer_config *cfg = p->pdev->dev.platform_data;
void __iomem *base = p->mapbase;
unsigned long offs;
if (reg_nr == TSTR) {
iowrite8(value, base + cfg->channel_offset);
return;
}
offs = mtu2_reg_offs[reg_nr];
if ((reg_nr == TCNT) || (reg_nr == TGR))
iowrite16(value, base + offs);
else
iowrite8(value, base + offs);
}
static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start)
{
struct sh_timer_config *cfg = p->pdev->dev.platform_data;
unsigned long flags, value;
/* start stop register shared by multiple timer channels */
spin_lock_irqsave(&sh_mtu2_lock, flags);
value = sh_mtu2_read(p, TSTR);
if (start)
value |= 1 << cfg->timer_bit;
else
value &= ~(1 << cfg->timer_bit);
sh_mtu2_write(p, TSTR, value);
spin_unlock_irqrestore(&sh_mtu2_lock, flags);
}
static int sh_mtu2_enable(struct sh_mtu2_priv *p)
{
int ret;
/* enable clock */
ret = clk_enable(p->clk);
if (ret) {
dev_err(&p->pdev->dev, "cannot enable clock\n");
return ret;
}
/* make sure channel is disabled */
sh_mtu2_start_stop_ch(p, 0);
p->rate = clk_get_rate(p->clk) / 64;
p->periodic = (p->rate + HZ/2) / HZ;
/* "Periodic Counter Operation" */
sh_mtu2_write(p, TCR, 0x23); /* TGRA clear, divide clock by 64 */
sh_mtu2_write(p, TIOR, 0);
sh_mtu2_write(p, TGR, p->periodic);
sh_mtu2_write(p, TCNT, 0);
sh_mtu2_write(p, TMDR, 0);
sh_mtu2_write(p, TIER, 0x01);
/* enable channel */
sh_mtu2_start_stop_ch(p, 1);
return 0;
}
static void sh_mtu2_disable(struct sh_mtu2_priv *p)
{
/* disable channel */
sh_mtu2_start_stop_ch(p, 0);
/* stop clock */
clk_disable(p->clk);
}
static irqreturn_t sh_mtu2_interrupt(int irq, void *dev_id)
{
struct sh_mtu2_priv *p = dev_id;
/* acknowledge interrupt */
sh_mtu2_read(p, TSR);
sh_mtu2_write(p, TSR, 0xfe);
/* notify clockevent layer */
p->ced.event_handler(&p->ced);
return IRQ_HANDLED;
}
static struct sh_mtu2_priv *ced_to_sh_mtu2(struct clock_event_device *ced)
{
return container_of(ced, struct sh_mtu2_priv, ced);
}
static void sh_mtu2_clock_event_mode(enum clock_event_mode mode,
struct clock_event_device *ced)
{
struct sh_mtu2_priv *p = ced_to_sh_mtu2(ced);
int disabled = 0;
/* deal with old setting first */
switch (ced->mode) {
case CLOCK_EVT_MODE_PERIODIC:
sh_mtu2_disable(p);
disabled = 1;
break;
default:
break;
}
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
dev_info(&p->pdev->dev, "used for periodic clock events\n");
sh_mtu2_enable(p);
break;
case CLOCK_EVT_MODE_UNUSED:
if (!disabled)
sh_mtu2_disable(p);
break;
case CLOCK_EVT_MODE_SHUTDOWN:
default:
break;
}
}
static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p,
char *name, unsigned long rating)
{
struct clock_event_device *ced = &p->ced;
int ret;
memset(ced, 0, sizeof(*ced));
ced->name = name;
ced->features = CLOCK_EVT_FEAT_PERIODIC;
ced->rating = rating;
ced->cpumask = cpumask_of(0);
ced->set_mode = sh_mtu2_clock_event_mode;
dev_info(&p->pdev->dev, "used for clock events\n");
clockevents_register_device(ced);
ret = setup_irq(p->irqaction.irq, &p->irqaction);
if (ret) {
dev_err(&p->pdev->dev, "failed to request irq %d\n",
p->irqaction.irq);
return;
}
}
static int sh_mtu2_register(struct sh_mtu2_priv *p, char *name,
unsigned long clockevent_rating)
{
if (clockevent_rating)
sh_mtu2_register_clockevent(p, name, clockevent_rating);
return 0;
}
static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
{
struct sh_timer_config *cfg = pdev->dev.platform_data;
struct resource *res;
int irq, ret;
ret = -ENXIO;
memset(p, 0, sizeof(*p));
p->pdev = pdev;
if (!cfg) {
dev_err(&p->pdev->dev, "missing platform data\n");
goto err0;
}
platform_set_drvdata(pdev, p);
res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&p->pdev->dev, "failed to get I/O memory\n");
goto err0;
}
irq = platform_get_irq(p->pdev, 0);
if (irq < 0) {
dev_err(&p->pdev->dev, "failed to get irq\n");
goto err0;
}
/* map memory, let mapbase point to our channel */
p->mapbase = ioremap_nocache(res->start, resource_size(res));
if (p->mapbase == NULL) {
dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
goto err0;
}
/* setup data for setup_irq() (too early for request_irq()) */
p->irqaction.name = dev_name(&p->pdev->dev);
p->irqaction.handler = sh_mtu2_interrupt;
p->irqaction.dev_id = p;
p->irqaction.irq = irq;
p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
IRQF_IRQPOLL | IRQF_NOBALANCING;
/* get hold of clock */
p->clk = clk_get(&p->pdev->dev, "mtu2_fck");
if (IS_ERR(p->clk)) {
dev_err(&p->pdev->dev, "cannot get clock\n");
ret = PTR_ERR(p->clk);
goto err1;
}
return sh_mtu2_register(p, (char *)dev_name(&p->pdev->dev),
cfg->clockevent_rating);
err1:
iounmap(p->mapbase);
err0:
return ret;
}
static int __devinit sh_mtu2_probe(struct platform_device *pdev)
{
struct sh_mtu2_priv *p = platform_get_drvdata(pdev);
int ret;
if (!is_early_platform_device(pdev))
pm_genpd_dev_always_on(&pdev->dev, true);
if (p) {
dev_info(&pdev->dev, "kept as earlytimer\n");
return 0;
}
p = kmalloc(sizeof(*p), GFP_KERNEL);
if (p == NULL) {
dev_err(&pdev->dev, "failed to allocate driver data\n");
return -ENOMEM;
}
ret = sh_mtu2_setup(p, pdev);
if (ret) {
kfree(p);
platform_set_drvdata(pdev, NULL);
}
return ret;
}
static int __devexit sh_mtu2_remove(struct platform_device *pdev)
{
return -EBUSY; /* cannot unregister clockevent */
}
static struct platform_driver sh_mtu2_device_driver = {
.probe = sh_mtu2_probe,
.remove = __devexit_p(sh_mtu2_remove),
.driver = {
.name = "sh_mtu2",
}
};
static int __init sh_mtu2_init(void)
{
return platform_driver_register(&sh_mtu2_device_driver);
}
static void __exit sh_mtu2_exit(void)
{
platform_driver_unregister(&sh_mtu2_device_driver);
}
early_platform_init("earlytimer", &sh_mtu2_device_driver);
module_init(sh_mtu2_init);
module_exit(sh_mtu2_exit);
MODULE_AUTHOR("Magnus Damm");
MODULE_DESCRIPTION("SuperH MTU2 Timer Driver");
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,465 @@
/*
* SuperH Timer Support - TMU
*
* Copyright (C) 2009 Magnus Damm
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/irq.h>
#include <linux/err.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/sh_timer.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/pm_domain.h>
struct sh_tmu_priv {
void __iomem *mapbase;
struct clk *clk;
struct irqaction irqaction;
struct platform_device *pdev;
unsigned long rate;
unsigned long periodic;
struct clock_event_device ced;
struct clocksource cs;
};
static DEFINE_SPINLOCK(sh_tmu_lock);
#define TSTR -1 /* shared register */
#define TCOR 0 /* channel register */
#define TCNT 1 /* channel register */
#define TCR 2 /* channel register */
static inline unsigned long sh_tmu_read(struct sh_tmu_priv *p, int reg_nr)
{
struct sh_timer_config *cfg = p->pdev->dev.platform_data;
void __iomem *base = p->mapbase;
unsigned long offs;
if (reg_nr == TSTR)
return ioread8(base - cfg->channel_offset);
offs = reg_nr << 2;
if (reg_nr == TCR)
return ioread16(base + offs);
else
return ioread32(base + offs);
}
static inline void sh_tmu_write(struct sh_tmu_priv *p, int reg_nr,
unsigned long value)
{
struct sh_timer_config *cfg = p->pdev->dev.platform_data;
void __iomem *base = p->mapbase;
unsigned long offs;
if (reg_nr == TSTR) {
iowrite8(value, base - cfg->channel_offset);
return;
}
offs = reg_nr << 2;
if (reg_nr == TCR)
iowrite16(value, base + offs);
else
iowrite32(value, base + offs);
}
static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start)
{
struct sh_timer_config *cfg = p->pdev->dev.platform_data;
unsigned long flags, value;
/* start stop register shared by multiple timer channels */
spin_lock_irqsave(&sh_tmu_lock, flags);
value = sh_tmu_read(p, TSTR);
if (start)
value |= 1 << cfg->timer_bit;
else
value &= ~(1 << cfg->timer_bit);
sh_tmu_write(p, TSTR, value);
spin_unlock_irqrestore(&sh_tmu_lock, flags);
}
static int sh_tmu_enable(struct sh_tmu_priv *p)
{
int ret;
/* enable clock */
ret = clk_enable(p->clk);
if (ret) {
dev_err(&p->pdev->dev, "cannot enable clock\n");
return ret;
}
/* make sure channel is disabled */
sh_tmu_start_stop_ch(p, 0);
/* maximum timeout */
sh_tmu_write(p, TCOR, 0xffffffff);
sh_tmu_write(p, TCNT, 0xffffffff);
/* configure channel to parent clock / 4, irq off */
p->rate = clk_get_rate(p->clk) / 4;
sh_tmu_write(p, TCR, 0x0000);
/* enable channel */
sh_tmu_start_stop_ch(p, 1);
return 0;
}
static void sh_tmu_disable(struct sh_tmu_priv *p)
{
/* disable channel */
sh_tmu_start_stop_ch(p, 0);
/* disable interrupts in TMU block */
sh_tmu_write(p, TCR, 0x0000);
/* stop clock */
clk_disable(p->clk);
}
static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta,
int periodic)
{
/* stop timer */
sh_tmu_start_stop_ch(p, 0);
/* acknowledge interrupt */
sh_tmu_read(p, TCR);
/* enable interrupt */
sh_tmu_write(p, TCR, 0x0020);
/* reload delta value in case of periodic timer */
if (periodic)
sh_tmu_write(p, TCOR, delta);
else
sh_tmu_write(p, TCOR, 0xffffffff);
sh_tmu_write(p, TCNT, delta);
/* start timer */
sh_tmu_start_stop_ch(p, 1);
}
static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id)
{
struct sh_tmu_priv *p = dev_id;
/* disable or acknowledge interrupt */
if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT)
sh_tmu_write(p, TCR, 0x0000);
else
sh_tmu_write(p, TCR, 0x0020);
/* notify clockevent layer */
p->ced.event_handler(&p->ced);
return IRQ_HANDLED;
}
static struct sh_tmu_priv *cs_to_sh_tmu(struct clocksource *cs)
{
return container_of(cs, struct sh_tmu_priv, cs);
}
static cycle_t sh_tmu_clocksource_read(struct clocksource *cs)
{
struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
return sh_tmu_read(p, TCNT) ^ 0xffffffff;
}
static int sh_tmu_clocksource_enable(struct clocksource *cs)
{
struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
int ret;
ret = sh_tmu_enable(p);
if (!ret)
__clocksource_updatefreq_hz(cs, p->rate);
return ret;
}
static void sh_tmu_clocksource_disable(struct clocksource *cs)
{
sh_tmu_disable(cs_to_sh_tmu(cs));
}
static int sh_tmu_register_clocksource(struct sh_tmu_priv *p,
char *name, unsigned long rating)
{
struct clocksource *cs = &p->cs;
cs->name = name;
cs->rating = rating;
cs->read = sh_tmu_clocksource_read;
cs->enable = sh_tmu_clocksource_enable;
cs->disable = sh_tmu_clocksource_disable;
cs->mask = CLOCKSOURCE_MASK(32);
cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
dev_info(&p->pdev->dev, "used as clock source\n");
/* Register with dummy 1 Hz value, gets updated in ->enable() */
clocksource_register_hz(cs, 1);
return 0;
}
static struct sh_tmu_priv *ced_to_sh_tmu(struct clock_event_device *ced)
{
return container_of(ced, struct sh_tmu_priv, ced);
}
static void sh_tmu_clock_event_start(struct sh_tmu_priv *p, int periodic)
{
struct clock_event_device *ced = &p->ced;
sh_tmu_enable(p);
/* TODO: calculate good shift from rate and counter bit width */
ced->shift = 32;
ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift);
ced->max_delta_ns = clockevent_delta2ns(0xffffffff, ced);
ced->min_delta_ns = 5000;
if (periodic) {
p->periodic = (p->rate + HZ/2) / HZ;
sh_tmu_set_next(p, p->periodic, 1);
}
}
static void sh_tmu_clock_event_mode(enum clock_event_mode mode,
struct clock_event_device *ced)
{
struct sh_tmu_priv *p = ced_to_sh_tmu(ced);
int disabled = 0;
/* deal with old setting first */
switch (ced->mode) {
case CLOCK_EVT_MODE_PERIODIC:
case CLOCK_EVT_MODE_ONESHOT:
sh_tmu_disable(p);
disabled = 1;
break;
default:
break;
}
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
dev_info(&p->pdev->dev, "used for periodic clock events\n");
sh_tmu_clock_event_start(p, 1);
break;
case CLOCK_EVT_MODE_ONESHOT:
dev_info(&p->pdev->dev, "used for oneshot clock events\n");
sh_tmu_clock_event_start(p, 0);
break;
case CLOCK_EVT_MODE_UNUSED:
if (!disabled)
sh_tmu_disable(p);
break;
case CLOCK_EVT_MODE_SHUTDOWN:
default:
break;
}
}
static int sh_tmu_clock_event_next(unsigned long delta,
struct clock_event_device *ced)
{
struct sh_tmu_priv *p = ced_to_sh_tmu(ced);
BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);
/* program new delta value */
sh_tmu_set_next(p, delta, 0);
return 0;
}
static void sh_tmu_register_clockevent(struct sh_tmu_priv *p,
char *name, unsigned long rating)
{
struct clock_event_device *ced = &p->ced;
int ret;
memset(ced, 0, sizeof(*ced));
ced->name = name;
ced->features = CLOCK_EVT_FEAT_PERIODIC;
ced->features |= CLOCK_EVT_FEAT_ONESHOT;
ced->rating = rating;
ced->cpumask = cpumask_of(0);
ced->set_next_event = sh_tmu_clock_event_next;
ced->set_mode = sh_tmu_clock_event_mode;
dev_info(&p->pdev->dev, "used for clock events\n");
clockevents_register_device(ced);
ret = setup_irq(p->irqaction.irq, &p->irqaction);
if (ret) {
dev_err(&p->pdev->dev, "failed to request irq %d\n",
p->irqaction.irq);
return;
}
}
static int sh_tmu_register(struct sh_tmu_priv *p, char *name,
unsigned long clockevent_rating,
unsigned long clocksource_rating)
{
if (clockevent_rating)
sh_tmu_register_clockevent(p, name, clockevent_rating);
else if (clocksource_rating)
sh_tmu_register_clocksource(p, name, clocksource_rating);
return 0;
}
static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
{
struct sh_timer_config *cfg = pdev->dev.platform_data;
struct resource *res;
int irq, ret;
ret = -ENXIO;
memset(p, 0, sizeof(*p));
p->pdev = pdev;
if (!cfg) {
dev_err(&p->pdev->dev, "missing platform data\n");
goto err0;
}
platform_set_drvdata(pdev, p);
res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&p->pdev->dev, "failed to get I/O memory\n");
goto err0;
}
irq = platform_get_irq(p->pdev, 0);
if (irq < 0) {
dev_err(&p->pdev->dev, "failed to get irq\n");
goto err0;
}
/* map memory, let mapbase point to our channel */
p->mapbase = ioremap_nocache(res->start, resource_size(res));
if (p->mapbase == NULL) {
dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
goto err0;
}
/* setup data for setup_irq() (too early for request_irq()) */
p->irqaction.name = dev_name(&p->pdev->dev);
p->irqaction.handler = sh_tmu_interrupt;
p->irqaction.dev_id = p;
p->irqaction.irq = irq;
p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
IRQF_IRQPOLL | IRQF_NOBALANCING;
/* get hold of clock */
p->clk = clk_get(&p->pdev->dev, "tmu_fck");
if (IS_ERR(p->clk)) {
dev_err(&p->pdev->dev, "cannot get clock\n");
ret = PTR_ERR(p->clk);
goto err1;
}
return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev),
cfg->clockevent_rating,
cfg->clocksource_rating);
err1:
iounmap(p->mapbase);
err0:
return ret;
}
static int __devinit sh_tmu_probe(struct platform_device *pdev)
{
struct sh_tmu_priv *p = platform_get_drvdata(pdev);
int ret;
if (!is_early_platform_device(pdev))
pm_genpd_dev_always_on(&pdev->dev, true);
if (p) {
dev_info(&pdev->dev, "kept as earlytimer\n");
return 0;
}
p = kmalloc(sizeof(*p), GFP_KERNEL);
if (p == NULL) {
dev_err(&pdev->dev, "failed to allocate driver data\n");
return -ENOMEM;
}
ret = sh_tmu_setup(p, pdev);
if (ret) {
kfree(p);
platform_set_drvdata(pdev, NULL);
}
return ret;
}
static int __devexit sh_tmu_remove(struct platform_device *pdev)
{
return -EBUSY; /* cannot unregister clockevent and clocksource */
}
static struct platform_driver sh_tmu_device_driver = {
.probe = sh_tmu_probe,
.remove = __devexit_p(sh_tmu_remove),
.driver = {
.name = "sh_tmu",
}
};
static int __init sh_tmu_init(void)
{
return platform_driver_register(&sh_tmu_device_driver);
}
static void __exit sh_tmu_exit(void)
{
platform_driver_unregister(&sh_tmu_device_driver);
}
early_platform_init("earlytimer", &sh_tmu_device_driver);
module_init(sh_tmu_init);
module_exit(sh_tmu_exit);
MODULE_AUTHOR("Magnus Damm");
MODULE_DESCRIPTION("SuperH TMU Timer Driver");
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,334 @@
#include <linux/init.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/ioport.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/atmel_tc.h>
/*
* We're configured to use a specific TC block, one that's not hooked
* up to external hardware, to provide a time solution:
*
* - Two channels combine to create a free-running 32 bit counter
* with a base rate of 5+ MHz, packaged as a clocksource (with
* resolution better than 200 nsec).
* - Some chips support 32 bit counter. A single channel is used for
* this 32 bit free-running counter. the second channel is not used.
*
* - The third channel may be used to provide a 16-bit clockevent
* source, used in either periodic or oneshot mode. This runs
* at 32 KiHZ, and can handle delays of up to two seconds.
*
* A boot clocksource and clockevent source are also currently needed,
* unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so
* this code can be used when init_timers() is called, well before most
* devices are set up. (Some low end AT91 parts, which can run uClinux,
* have only the timers in one TC block... they currently don't support
* the tclib code, because of that initialization issue.)
*
* REVISIT behavior during system suspend states... we should disable
* all clocks and save the power. Easily done for clockevent devices,
* but clocksources won't necessarily get the needed notifications.
* For deeper system sleep states, this will be mandatory...
*/
static void __iomem *tcaddr;
static cycle_t tc_get_cycles(struct clocksource *cs)
{
unsigned long flags;
u32 lower, upper;
raw_local_irq_save(flags);
do {
upper = __raw_readl(tcaddr + ATMEL_TC_REG(1, CV));
lower = __raw_readl(tcaddr + ATMEL_TC_REG(0, CV));
} while (upper != __raw_readl(tcaddr + ATMEL_TC_REG(1, CV)));
raw_local_irq_restore(flags);
return (upper << 16) | lower;
}
static cycle_t tc_get_cycles32(struct clocksource *cs)
{
return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV));
}
static struct clocksource clksrc = {
.name = "tcb_clksrc",
.rating = 200,
.read = tc_get_cycles,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
#ifdef CONFIG_GENERIC_CLOCKEVENTS
struct tc_clkevt_device {
struct clock_event_device clkevt;
struct clk *clk;
void __iomem *regs;
};
static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt)
{
return container_of(clkevt, struct tc_clkevt_device, clkevt);
}
/* For now, we always use the 32K clock ... this optimizes for NO_HZ,
* because using one of the divided clocks would usually mean the
* tick rate can never be less than several dozen Hz (vs 0.5 Hz).
*
* A divided clock could be good for high resolution timers, since
* 30.5 usec resolution can seem "low".
*/
static u32 timer_clock;
static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
{
struct tc_clkevt_device *tcd = to_tc_clkevt(d);
void __iomem *regs = tcd->regs;
if (tcd->clkevt.mode == CLOCK_EVT_MODE_PERIODIC
|| tcd->clkevt.mode == CLOCK_EVT_MODE_ONESHOT) {
__raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
__raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
clk_disable(tcd->clk);
}
switch (m) {
/* By not making the gentime core emulate periodic mode on top
* of oneshot, we get lower overhead and improved accuracy.
*/
case CLOCK_EVT_MODE_PERIODIC:
clk_enable(tcd->clk);
/* slow clock, count up to RC, then irq and restart */
__raw_writel(timer_clock
| ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
regs + ATMEL_TC_REG(2, CMR));
__raw_writel((32768 + HZ/2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
/* Enable clock and interrupts on RC compare */
__raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
/* go go gadget! */
__raw_writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
regs + ATMEL_TC_REG(2, CCR));
break;
case CLOCK_EVT_MODE_ONESHOT:
clk_enable(tcd->clk);
/* slow clock, count up to RC, then irq and stop */
__raw_writel(timer_clock | ATMEL_TC_CPCSTOP
| ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
regs + ATMEL_TC_REG(2, CMR));
__raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
/* set_next_event() configures and starts the timer */
break;
default:
break;
}
}
static int tc_next_event(unsigned long delta, struct clock_event_device *d)
{
__raw_writel(delta, tcaddr + ATMEL_TC_REG(2, RC));
/* go go gadget! */
__raw_writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
tcaddr + ATMEL_TC_REG(2, CCR));
return 0;
}
static struct tc_clkevt_device clkevt = {
.clkevt = {
.name = "tc_clkevt",
.features = CLOCK_EVT_FEAT_PERIODIC
| CLOCK_EVT_FEAT_ONESHOT,
.shift = 32,
/* Should be lower than at91rm9200's system timer */
.rating = 125,
.set_next_event = tc_next_event,
.set_mode = tc_mode,
},
};
static irqreturn_t ch2_irq(int irq, void *handle)
{
struct tc_clkevt_device *dev = handle;
unsigned int sr;
sr = __raw_readl(dev->regs + ATMEL_TC_REG(2, SR));
if (sr & ATMEL_TC_CPCS) {
dev->clkevt.event_handler(&dev->clkevt);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static struct irqaction tc_irqaction = {
.name = "tc_clkevt",
.flags = IRQF_TIMER | IRQF_DISABLED,
.handler = ch2_irq,
};
static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
{
struct clk *t2_clk = tc->clk[2];
int irq = tc->irq[2];
clkevt.regs = tc->regs;
clkevt.clk = t2_clk;
tc_irqaction.dev_id = &clkevt;
timer_clock = clk32k_divisor_idx;
clkevt.clkevt.mult = div_sc(32768, NSEC_PER_SEC, clkevt.clkevt.shift);
clkevt.clkevt.max_delta_ns
= clockevent_delta2ns(0xffff, &clkevt.clkevt);
clkevt.clkevt.min_delta_ns = clockevent_delta2ns(1, &clkevt.clkevt) + 1;
clkevt.clkevt.cpumask = cpumask_of(0);
clockevents_register_device(&clkevt.clkevt);
setup_irq(irq, &tc_irqaction);
}
#else /* !CONFIG_GENERIC_CLOCKEVENTS */
static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
{
/* NOTHING */
}
#endif
static void __init tcb_setup_dual_chan(struct atmel_tc *tc, int mck_divisor_idx)
{
/* channel 0: waveform mode, input mclk/8, clock TIOA0 on overflow */
__raw_writel(mck_divisor_idx /* likely divide-by-8 */
| ATMEL_TC_WAVE
| ATMEL_TC_WAVESEL_UP /* free-run */
| ATMEL_TC_ACPA_SET /* TIOA0 rises at 0 */
| ATMEL_TC_ACPC_CLEAR, /* (duty cycle 50%) */
tcaddr + ATMEL_TC_REG(0, CMR));
__raw_writel(0x0000, tcaddr + ATMEL_TC_REG(0, RA));
__raw_writel(0x8000, tcaddr + ATMEL_TC_REG(0, RC));
__raw_writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR)); /* no irqs */
__raw_writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR));
/* channel 1: waveform mode, input TIOA0 */
__raw_writel(ATMEL_TC_XC1 /* input: TIOA0 */
| ATMEL_TC_WAVE
| ATMEL_TC_WAVESEL_UP, /* free-run */
tcaddr + ATMEL_TC_REG(1, CMR));
__raw_writel(0xff, tcaddr + ATMEL_TC_REG(1, IDR)); /* no irqs */
__raw_writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(1, CCR));
/* chain channel 0 to channel 1*/
__raw_writel(ATMEL_TC_TC1XC1S_TIOA0, tcaddr + ATMEL_TC_BMR);
/* then reset all the timers */
__raw_writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
}
static void __init tcb_setup_single_chan(struct atmel_tc *tc, int mck_divisor_idx)
{
/* channel 0: waveform mode, input mclk/8 */
__raw_writel(mck_divisor_idx /* likely divide-by-8 */
| ATMEL_TC_WAVE
| ATMEL_TC_WAVESEL_UP, /* free-run */
tcaddr + ATMEL_TC_REG(0, CMR));
__raw_writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR)); /* no irqs */
__raw_writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR));
/* then reset all the timers */
__raw_writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
}
static int __init tcb_clksrc_init(void)
{
static char bootinfo[] __initdata
= KERN_DEBUG "%s: tc%d at %d.%03d MHz\n";
struct platform_device *pdev;
struct atmel_tc *tc;
struct clk *t0_clk;
u32 rate, divided_rate = 0;
int best_divisor_idx = -1;
int clk32k_divisor_idx = -1;
int i;
tc = atmel_tc_alloc(CONFIG_ATMEL_TCB_CLKSRC_BLOCK, clksrc.name);
if (!tc) {
pr_debug("can't alloc TC for clocksource\n");
return -ENODEV;
}
tcaddr = tc->regs;
pdev = tc->pdev;
t0_clk = tc->clk[0];
clk_enable(t0_clk);
/* How fast will we be counting? Pick something over 5 MHz. */
rate = (u32) clk_get_rate(t0_clk);
for (i = 0; i < 5; i++) {
unsigned divisor = atmel_tc_divisors[i];
unsigned tmp;
/* remember 32 KiHz clock for later */
if (!divisor) {
clk32k_divisor_idx = i;
continue;
}
tmp = rate / divisor;
pr_debug("TC: %u / %-3u [%d] --> %u\n", rate, divisor, i, tmp);
if (best_divisor_idx > 0) {
if (tmp < 5 * 1000 * 1000)
continue;
}
divided_rate = tmp;
best_divisor_idx = i;
}
printk(bootinfo, clksrc.name, CONFIG_ATMEL_TCB_CLKSRC_BLOCK,
divided_rate / 1000000,
((divided_rate + 500000) % 1000000) / 1000);
if (tc->tcb_config && tc->tcb_config->counter_width == 32) {
/* use apropriate function to read 32 bit counter */
clksrc.read = tc_get_cycles32;
/* setup ony channel 0 */
tcb_setup_single_chan(tc, best_divisor_idx);
} else {
/* tclib will give us three clocks no matter what the
* underlying platform supports.
*/
clk_enable(tc->clk[1]);
/* setup both channel 0 & 1 */
tcb_setup_dual_chan(tc, best_divisor_idx);
}
/* and away we go! */
clocksource_register_hz(&clksrc, divided_rate);
/* channel 2: periodic and oneshot timer support */
setup_clkevents(tc, clk32k_divisor_idx);
return 0;
}
arch_initcall(tcb_clksrc_init);