2024-09-09 08:52:07 +00:00
|
|
|
/*
|
|
|
|
* include/asm-xtensa/io.h
|
|
|
|
*
|
|
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
|
|
* for more details.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2001 - 2005 Tensilica Inc.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _XTENSA_IO_H
|
|
|
|
#define _XTENSA_IO_H
|
|
|
|
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
#include <asm/byteorder.h>
|
|
|
|
#include <asm/page.h>
|
2024-09-09 08:57:42 +00:00
|
|
|
#include <asm/vectors.h>
|
2024-09-09 08:52:07 +00:00
|
|
|
#include <linux/bug.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
|
|
|
|
#include <linux/types.h>
|
|
|
|
|
|
|
|
#define IOADDR(x) (XCHAL_KIO_BYPASS_VADDR + (x))
|
2024-09-09 08:57:42 +00:00
|
|
|
#define IO_SPACE_LIMIT ~0
|
2024-09-09 08:52:07 +00:00
|
|
|
|
2024-09-09 08:57:42 +00:00
|
|
|
#ifdef CONFIG_MMU
|
2024-09-09 08:52:07 +00:00
|
|
|
|
2024-09-09 08:57:42 +00:00
|
|
|
#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
|
|
|
|
extern unsigned long xtensa_kio_paddr;
|
2024-09-09 08:52:07 +00:00
|
|
|
|
2024-09-09 08:57:42 +00:00
|
|
|
static inline unsigned long xtensa_get_kio_paddr(void)
|
2024-09-09 08:52:07 +00:00
|
|
|
{
|
2024-09-09 08:57:42 +00:00
|
|
|
return xtensa_kio_paddr;
|
2024-09-09 08:52:07 +00:00
|
|
|
}
|
2024-09-09 08:57:42 +00:00
|
|
|
#endif
|
2024-09-09 08:52:07 +00:00
|
|
|
|
|
|
|
/*
|
2024-09-09 08:57:42 +00:00
|
|
|
* Return the virtual address for the specified bus memory.
|
2024-09-09 08:52:07 +00:00
|
|
|
* Note that we currently don't support any address outside the KIO segment.
|
|
|
|
*/
|
2024-09-09 08:57:42 +00:00
|
|
|
static inline void __iomem *ioremap_nocache(unsigned long offset,
|
|
|
|
unsigned long size)
|
2024-09-09 08:52:07 +00:00
|
|
|
{
|
|
|
|
if (offset >= XCHAL_KIO_PADDR
|
2024-09-09 08:57:42 +00:00
|
|
|
&& offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
|
2024-09-09 08:52:07 +00:00
|
|
|
return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_BYPASS_VADDR);
|
|
|
|
else
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|
2024-09-09 08:57:42 +00:00
|
|
|
static inline void __iomem *ioremap_cache(unsigned long offset,
|
|
|
|
unsigned long size)
|
2024-09-09 08:52:07 +00:00
|
|
|
{
|
|
|
|
if (offset >= XCHAL_KIO_PADDR
|
2024-09-09 08:57:42 +00:00
|
|
|
&& offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
|
2024-09-09 08:52:07 +00:00
|
|
|
return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_CACHED_VADDR);
|
|
|
|
else
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|
2024-09-09 08:57:42 +00:00
|
|
|
#define ioremap_wc ioremap_nocache
|
2024-09-09 08:52:07 +00:00
|
|
|
|
2024-09-09 08:57:42 +00:00
|
|
|
static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
|
2024-09-09 08:52:07 +00:00
|
|
|
{
|
2024-09-09 08:57:42 +00:00
|
|
|
return ioremap_nocache(offset, size);
|
2024-09-09 08:52:07 +00:00
|
|
|
}
|
2024-09-09 08:57:42 +00:00
|
|
|
|
|
|
|
static inline void iounmap(volatile void __iomem *addr)
|
2024-09-09 08:52:07 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2024-09-09 08:57:42 +00:00
|
|
|
#define virt_to_bus virt_to_phys
|
|
|
|
#define bus_to_virt phys_to_virt
|
2024-09-09 08:52:07 +00:00
|
|
|
|
2024-09-09 08:57:42 +00:00
|
|
|
#endif /* CONFIG_MMU */
|
2024-09-09 08:52:07 +00:00
|
|
|
|
|
|
|
/*
|
2024-09-09 08:57:42 +00:00
|
|
|
* Generic I/O
|
2024-09-09 08:52:07 +00:00
|
|
|
*/
|
2024-09-09 08:57:42 +00:00
|
|
|
#define readb_relaxed readb
|
|
|
|
#define readw_relaxed readw
|
|
|
|
#define readl_relaxed readl
|
2024-09-09 08:52:07 +00:00
|
|
|
|
|
|
|
#endif /* __KERNEL__ */
|
|
|
|
|
2024-09-09 08:57:42 +00:00
|
|
|
#include <asm-generic/io.h>
|
|
|
|
|
2024-09-09 08:52:07 +00:00
|
|
|
#endif /* _XTENSA_IO_H */
|