2024-09-09 08:52:07 +00:00
|
|
|
/*
|
|
|
|
* linux/arch/arm/include/asm/pmu.h
|
|
|
|
*
|
|
|
|
* Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __ARM_PMU_H__
|
|
|
|
#define __ARM_PMU_H__
|
|
|
|
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/perf_event.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* struct arm_pmu_platdata - ARM PMU platform data
|
|
|
|
*
|
|
|
|
* @handle_irq: an optional handler which will be called from the
|
|
|
|
* interrupt and passed the address of the low level handler,
|
|
|
|
* and can be used to implement any platform specific handling
|
|
|
|
* before or after calling it.
|
2024-09-09 08:57:42 +00:00
|
|
|
* @runtime_resume: an optional handler which will be called by the
|
|
|
|
* runtime PM framework following a call to pm_runtime_get().
|
|
|
|
* Note that if pm_runtime_get() is called more than once in
|
|
|
|
* succession this handler will only be called once.
|
|
|
|
* @runtime_suspend: an optional handler which will be called by the
|
|
|
|
* runtime PM framework following a call to pm_runtime_put().
|
|
|
|
* Note that if pm_runtime_get() is called more than once in
|
|
|
|
* succession this handler will only be called following the
|
|
|
|
* final call to pm_runtime_put() that actually disables the
|
|
|
|
* hardware.
|
2024-09-09 08:52:07 +00:00
|
|
|
*/
|
|
|
|
struct arm_pmu_platdata {
|
|
|
|
irqreturn_t (*handle_irq)(int irq, void *dev,
|
|
|
|
irq_handler_t pmu_handler);
|
2024-09-09 08:57:42 +00:00
|
|
|
int (*runtime_resume)(struct device *dev);
|
|
|
|
int (*runtime_suspend)(struct device *dev);
|
2024-09-09 08:52:07 +00:00
|
|
|
};
|
|
|
|
|
2024-09-09 08:57:42 +00:00
|
|
|
#ifdef CONFIG_HW_PERF_EVENTS
|
2024-09-09 08:52:07 +00:00
|
|
|
|
2024-09-09 08:57:42 +00:00
|
|
|
/*
|
|
|
|
* The ARMv7 CPU PMU supports up to 32 event counters.
|
2024-09-09 08:52:07 +00:00
|
|
|
*/
|
2024-09-09 08:57:42 +00:00
|
|
|
#define ARMPMU_MAX_HWEVENTS 32
|
2024-09-09 08:52:07 +00:00
|
|
|
|
2024-09-09 08:57:42 +00:00
|
|
|
#define HW_OP_UNSUPPORTED 0xFFFF
|
|
|
|
#define C(_x) PERF_COUNT_HW_CACHE_##_x
|
|
|
|
#define CACHE_OP_UNSUPPORTED 0xFFFF
|
2024-09-09 08:52:07 +00:00
|
|
|
|
2024-09-09 08:57:42 +00:00
|
|
|
#define PERF_MAP_ALL_UNSUPPORTED \
|
|
|
|
[0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED
|
2024-09-09 08:52:07 +00:00
|
|
|
|
2024-09-09 08:57:42 +00:00
|
|
|
#define PERF_CACHE_MAP_ALL_UNSUPPORTED \
|
|
|
|
[0 ... C(MAX) - 1] = { \
|
|
|
|
[0 ... C(OP_MAX) - 1] = { \
|
|
|
|
[0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED, \
|
|
|
|
}, \
|
2024-09-09 08:52:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* The events for a given PMU register set. */
|
|
|
|
struct pmu_hw_events {
|
|
|
|
/*
|
|
|
|
* The events that are active on the PMU for the given index.
|
|
|
|
*/
|
|
|
|
struct perf_event **events;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A 1 bit for an index indicates that the counter is being used for
|
|
|
|
* an event. A 0 means that the counter can be used.
|
|
|
|
*/
|
|
|
|
unsigned long *used_mask;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Hardware lock to serialize accesses to PMU registers. Needed for the
|
|
|
|
* read/modify/write sequences.
|
|
|
|
*/
|
|
|
|
raw_spinlock_t pmu_lock;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct arm_pmu {
|
|
|
|
struct pmu pmu;
|
|
|
|
cpumask_t active_irqs;
|
2024-09-09 08:57:42 +00:00
|
|
|
char *name;
|
2024-09-09 08:52:07 +00:00
|
|
|
irqreturn_t (*handle_irq)(int irq_num, void *dev);
|
2024-09-09 08:57:42 +00:00
|
|
|
void (*enable)(struct perf_event *event);
|
|
|
|
void (*disable)(struct perf_event *event);
|
2024-09-09 08:52:07 +00:00
|
|
|
int (*get_event_idx)(struct pmu_hw_events *hw_events,
|
2024-09-09 08:57:42 +00:00
|
|
|
struct perf_event *event);
|
|
|
|
void (*clear_event_idx)(struct pmu_hw_events *hw_events,
|
|
|
|
struct perf_event *event);
|
2024-09-09 08:52:07 +00:00
|
|
|
int (*set_event_filter)(struct hw_perf_event *evt,
|
|
|
|
struct perf_event_attr *attr);
|
2024-09-09 08:57:42 +00:00
|
|
|
u32 (*read_counter)(struct perf_event *event);
|
|
|
|
void (*write_counter)(struct perf_event *event, u32 val);
|
|
|
|
void (*start)(struct arm_pmu *);
|
|
|
|
void (*stop)(struct arm_pmu *);
|
2024-09-09 08:52:07 +00:00
|
|
|
void (*reset)(void *);
|
2024-09-09 08:57:42 +00:00
|
|
|
int (*request_irq)(struct arm_pmu *, irq_handler_t handler);
|
|
|
|
void (*free_irq)(struct arm_pmu *);
|
2024-09-09 08:52:07 +00:00
|
|
|
int (*map_event)(struct perf_event *event);
|
2024-09-09 08:57:42 +00:00
|
|
|
int num_events;
|
|
|
|
atomic_t active_events;
|
|
|
|
struct mutex reserve_mutex;
|
|
|
|
u64 max_period;
|
|
|
|
struct platform_device *plat_device;
|
2024-09-09 08:52:07 +00:00
|
|
|
struct pmu_hw_events *(*get_hw_events)(void);
|
|
|
|
};
|
|
|
|
|
|
|
|
#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
|
|
|
|
|
2024-09-09 08:57:42 +00:00
|
|
|
extern const struct dev_pm_ops armpmu_dev_pm_ops;
|
|
|
|
|
|
|
|
int armpmu_register(struct arm_pmu *armpmu, int type);
|
|
|
|
|
|
|
|
u64 armpmu_event_update(struct perf_event *event);
|
2024-09-09 08:52:07 +00:00
|
|
|
|
2024-09-09 08:57:42 +00:00
|
|
|
int armpmu_event_set_period(struct perf_event *event);
|
2024-09-09 08:52:07 +00:00
|
|
|
|
2024-09-09 08:57:42 +00:00
|
|
|
int armpmu_map_event(struct perf_event *event,
|
|
|
|
const unsigned (*event_map)[PERF_COUNT_HW_MAX],
|
|
|
|
const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
|
|
|
|
[PERF_COUNT_HW_CACHE_OP_MAX]
|
|
|
|
[PERF_COUNT_HW_CACHE_RESULT_MAX],
|
|
|
|
u32 raw_event_mask);
|
2024-09-09 08:52:07 +00:00
|
|
|
|
|
|
|
#endif /* CONFIG_HW_PERF_EVENTS */
|
|
|
|
|
|
|
|
#endif /* __ARM_PMU_H__ */
|