2019-06-04 16:11:37 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2006-02-01 19:05:16 +08:00
|
|
|
/*
|
|
|
|
* Copyright 2006 PathScale, Inc. All Rights Reserved.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _LINUX_IO_H
|
|
|
|
#define _LINUX_IO_H
|
|
|
|
|
2006-10-08 22:08:45 +08:00
|
|
|
#include <linux/types.h>
|
2015-06-02 17:01:38 +08:00
|
|
|
#include <linux/init.h>
|
2015-08-17 22:00:35 +08:00
|
|
|
#include <linux/bug.h>
|
|
|
|
#include <linux/err.h>
|
2006-02-01 19:05:16 +08:00
|
|
|
#include <asm/io.h>
|
2006-10-01 14:29:12 +08:00
|
|
|
#include <asm/page.h>
|
2006-02-01 19:05:16 +08:00
|
|
|
|
2007-02-03 10:07:15 +08:00
|
|
|
struct device;
|
2015-08-17 22:00:35 +08:00
|
|
|
struct resource;
|
2007-02-03 10:07:15 +08:00
|
|
|
|
2014-02-08 15:51:58 +08:00
|
|
|
__visible void __iowrite32_copy(void __iomem *to, const void *from, size_t count);
|
2016-01-21 06:58:35 +08:00
|
|
|
void __ioread32_copy(void *to, const void __iomem *from, size_t count);
|
2006-06-21 11:03:02 +08:00
|
|
|
void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
|
2006-02-01 19:05:16 +08:00
|
|
|
|
2007-05-15 16:41:02 +08:00
|
|
|
#ifdef CONFIG_MMU
|
2006-10-01 14:29:12 +08:00
|
|
|
int ioremap_page_range(unsigned long addr, unsigned long end,
|
2010-06-18 11:22:40 +08:00
|
|
|
phys_addr_t phys_addr, pgprot_t prot);
|
2007-05-15 16:41:02 +08:00
|
|
|
#else
|
|
|
|
static inline int ioremap_page_range(unsigned long addr, unsigned long end,
|
2010-06-18 11:22:40 +08:00
|
|
|
phys_addr_t phys_addr, pgprot_t prot)
|
2007-05-15 16:41:02 +08:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
2006-10-01 14:29:12 +08:00
|
|
|
|
2015-04-15 06:47:20 +08:00
|
|
|
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
|
|
|
|
void __init ioremap_huge_init(void);
|
2019-07-17 07:27:33 +08:00
|
|
|
int arch_ioremap_p4d_supported(void);
|
2015-04-15 06:47:20 +08:00
|
|
|
int arch_ioremap_pud_supported(void);
|
|
|
|
int arch_ioremap_pmd_supported(void);
|
|
|
|
#else
|
|
|
|
static inline void ioremap_huge_init(void) { }
|
|
|
|
#endif
|
|
|
|
|
devres: device resource management
Implement device resource management, in short, devres. A device
driver can allocate arbirary size of devres data which is associated
with a release function. On driver detach, release function is
invoked on the devres data, then, devres data is freed.
devreses are typed by associated release functions. Some devreses are
better represented by single instance of the type while others need
multiple instances sharing the same release function. Both usages are
supported.
devreses can be grouped using devres group such that a device driver
can easily release acquired resources halfway through initialization
or selectively release resources (e.g. resources for port 1 out of 4
ports).
This patch adds devres core including documentation and the following
managed interfaces.
* alloc/free : devm_kzalloc(), devm_kzfree()
* IO region : devm_request_region(), devm_release_region()
* IRQ : devm_request_irq(), devm_free_irq()
* DMA : dmam_alloc_coherent(), dmam_free_coherent(),
dmam_declare_coherent_memory(), dmam_pool_create(),
dmam_pool_destroy()
* PCI : pcim_enable_device(), pcim_pin_device(), pci_is_managed()
* iomap : devm_ioport_map(), devm_ioport_unmap(), devm_ioremap(),
devm_ioremap_nocache(), devm_iounmap(), pcim_iomap_table(),
pcim_iomap(), pcim_iounmap()
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-01-20 15:00:26 +08:00
|
|
|
/*
|
|
|
|
* Managed iomap interface
|
|
|
|
*/
|
2014-04-08 06:39:19 +08:00
|
|
|
#ifdef CONFIG_HAS_IOPORT_MAP
|
devres: device resource management
Implement device resource management, in short, devres. A device
driver can allocate arbirary size of devres data which is associated
with a release function. On driver detach, release function is
invoked on the devres data, then, devres data is freed.
devreses are typed by associated release functions. Some devreses are
better represented by single instance of the type while others need
multiple instances sharing the same release function. Both usages are
supported.
devreses can be grouped using devres group such that a device driver
can easily release acquired resources halfway through initialization
or selectively release resources (e.g. resources for port 1 out of 4
ports).
This patch adds devres core including documentation and the following
managed interfaces.
* alloc/free : devm_kzalloc(), devm_kzfree()
* IO region : devm_request_region(), devm_release_region()
* IRQ : devm_request_irq(), devm_free_irq()
* DMA : dmam_alloc_coherent(), dmam_free_coherent(),
dmam_declare_coherent_memory(), dmam_pool_create(),
dmam_pool_destroy()
* PCI : pcim_enable_device(), pcim_pin_device(), pci_is_managed()
* iomap : devm_ioport_map(), devm_ioport_unmap(), devm_ioremap(),
devm_ioremap_nocache(), devm_iounmap(), pcim_iomap_table(),
pcim_iomap(), pcim_iounmap()
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-01-20 15:00:26 +08:00
|
|
|
void __iomem * devm_ioport_map(struct device *dev, unsigned long port,
|
|
|
|
unsigned int nr);
|
|
|
|
void devm_ioport_unmap(struct device *dev, void __iomem *addr);
|
2007-04-17 15:32:26 +08:00
|
|
|
#else
|
|
|
|
static inline void __iomem *devm_ioport_map(struct device *dev,
|
|
|
|
unsigned long port,
|
|
|
|
unsigned int nr)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void devm_ioport_unmap(struct device *dev, void __iomem *addr)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
devres: device resource management
Implement device resource management, in short, devres. A device
driver can allocate arbirary size of devres data which is associated
with a release function. On driver detach, release function is
invoked on the devres data, then, devres data is freed.
devreses are typed by associated release functions. Some devreses are
better represented by single instance of the type while others need
multiple instances sharing the same release function. Both usages are
supported.
devreses can be grouped using devres group such that a device driver
can easily release acquired resources halfway through initialization
or selectively release resources (e.g. resources for port 1 out of 4
ports).
This patch adds devres core including documentation and the following
managed interfaces.
* alloc/free : devm_kzalloc(), devm_kzfree()
* IO region : devm_request_region(), devm_release_region()
* IRQ : devm_request_irq(), devm_free_irq()
* DMA : dmam_alloc_coherent(), dmam_free_coherent(),
dmam_declare_coherent_memory(), dmam_pool_create(),
dmam_pool_destroy()
* PCI : pcim_enable_device(), pcim_pin_device(), pci_is_managed()
* iomap : devm_ioport_map(), devm_ioport_unmap(), devm_ioremap(),
devm_ioremap_nocache(), devm_iounmap(), pcim_iomap_table(),
pcim_iomap(), pcim_iounmap()
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-01-20 15:00:26 +08:00
|
|
|
|
2014-07-18 17:36:39 +08:00
|
|
|
#define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err)
|
|
|
|
|
2008-04-29 23:25:48 +08:00
|
|
|
void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
|
2014-10-07 23:25:43 +08:00
|
|
|
resource_size_t size);
|
2019-10-17 05:06:28 +08:00
|
|
|
void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset,
|
|
|
|
resource_size_t size);
|
2015-02-06 21:45:27 +08:00
|
|
|
void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset,
|
|
|
|
resource_size_t size);
|
2021-02-11 20:35:46 +08:00
|
|
|
void __iomem *devm_ioremap_np(struct device *dev, resource_size_t offset,
|
|
|
|
resource_size_t size);
|
devres: device resource management
Implement device resource management, in short, devres. A device
driver can allocate arbirary size of devres data which is associated
with a release function. On driver detach, release function is
invoked on the devres data, then, devres data is freed.
devreses are typed by associated release functions. Some devreses are
better represented by single instance of the type while others need
multiple instances sharing the same release function. Both usages are
supported.
devreses can be grouped using devres group such that a device driver
can easily release acquired resources halfway through initialization
or selectively release resources (e.g. resources for port 1 out of 4
ports).
This patch adds devres core including documentation and the following
managed interfaces.
* alloc/free : devm_kzalloc(), devm_kzfree()
* IO region : devm_request_region(), devm_release_region()
* IRQ : devm_request_irq(), devm_free_irq()
* DMA : dmam_alloc_coherent(), dmam_free_coherent(),
dmam_declare_coherent_memory(), dmam_pool_create(),
dmam_pool_destroy()
* PCI : pcim_enable_device(), pcim_pin_device(), pci_is_managed()
* iomap : devm_ioport_map(), devm_ioport_unmap(), devm_ioremap(),
devm_ioremap_nocache(), devm_iounmap(), pcim_iomap_table(),
pcim_iomap(), pcim_iounmap()
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-01-20 15:00:26 +08:00
|
|
|
void devm_iounmap(struct device *dev, void __iomem *addr);
|
2007-07-16 14:41:38 +08:00
|
|
|
int check_signature(const volatile void __iomem *io_addr,
|
|
|
|
const unsigned char *signature, int length);
|
2008-05-03 04:34:04 +08:00
|
|
|
void devm_ioremap_release(struct device *dev, void *res);
|
2006-10-11 16:22:02 +08:00
|
|
|
|
2015-08-11 11:07:07 +08:00
|
|
|
void *devm_memremap(struct device *dev, resource_size_t offset,
|
|
|
|
size_t size, unsigned long flags);
|
|
|
|
void devm_memunmap(struct device *dev, void *addr);
|
|
|
|
|
2017-04-20 00:48:51 +08:00
|
|
|
#ifdef CONFIG_PCI
|
|
|
|
/*
|
|
|
|
* The PCI specifications (Rev 3.0, 3.2.5 "Transaction Ordering and
|
|
|
|
* Posting") mandate non-posted configuration transactions. There is
|
|
|
|
* no ioremap API in the kernel that can guarantee non-posted write
|
|
|
|
* semantics across arches so provide a default implementation for
|
2020-01-06 16:43:50 +08:00
|
|
|
* mapping PCI config space that defaults to ioremap(); arches
|
2017-04-20 00:48:51 +08:00
|
|
|
* should override it if they have memory mapping implementations that
|
|
|
|
* guarantee non-posted writes semantics to make the memory mapping
|
|
|
|
* compliant with the PCI specification.
|
|
|
|
*/
|
|
|
|
#ifndef pci_remap_cfgspace
|
|
|
|
#define pci_remap_cfgspace pci_remap_cfgspace
|
|
|
|
static inline void __iomem *pci_remap_cfgspace(phys_addr_t offset,
|
|
|
|
size_t size)
|
|
|
|
{
|
2020-01-06 16:43:50 +08:00
|
|
|
return ioremap(offset, size);
|
2017-04-20 00:48:51 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
2012-07-11 13:18:44 +08:00
|
|
|
/*
|
|
|
|
* Some systems do not have legacy ISA devices.
|
|
|
|
* /dev/port is not a valid interface on these systems.
|
|
|
|
* So for those archs, <asm/io.h> should define the following symbol.
|
|
|
|
*/
|
|
|
|
#ifndef arch_has_dev_port
|
|
|
|
#define arch_has_dev_port() (1)
|
|
|
|
#endif
|
|
|
|
|
2013-05-14 07:58:40 +08:00
|
|
|
/*
|
|
|
|
* Some systems (x86 without PAT) have a somewhat reliable way to mark a
|
|
|
|
* physical address range such that uncached mappings will actually
|
|
|
|
* end up write-combining. This facility should be used in conjunction
|
|
|
|
* with pgprot_writecombine, ioremap-wc, or set_memory_wc, since it has
|
|
|
|
* no effect if the per-page mechanisms are functional.
|
|
|
|
* (On x86 without PAT, these functions manipulate MTRRs.)
|
|
|
|
*
|
|
|
|
* arch_phys_del_wc(0) or arch_phys_del_wc(any error code) is guaranteed
|
|
|
|
* to have no effect.
|
|
|
|
*/
|
|
|
|
#ifndef arch_phys_wc_add
|
|
|
|
static inline int __must_check arch_phys_wc_add(unsigned long base,
|
|
|
|
unsigned long size)
|
|
|
|
{
|
|
|
|
return 0; /* It worked (i.e. did nothing). */
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void arch_phys_wc_del(int handle)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
#define arch_phys_wc_add arch_phys_wc_add
|
2015-05-26 16:28:13 +08:00
|
|
|
#ifndef arch_phys_wc_index
|
|
|
|
static inline int arch_phys_wc_index(int handle)
|
|
|
|
{
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
#define arch_phys_wc_index arch_phys_wc_index
|
|
|
|
#endif
|
2013-05-14 07:58:40 +08:00
|
|
|
#endif
|
|
|
|
|
2015-08-11 11:07:06 +08:00
|
|
|
enum {
|
|
|
|
/* See memremap() kernel-doc for usage description... */
|
|
|
|
MEMREMAP_WB = 1 << 0,
|
|
|
|
MEMREMAP_WT = 1 << 1,
|
2016-03-23 05:28:00 +08:00
|
|
|
MEMREMAP_WC = 1 << 2,
|
2017-07-18 05:10:16 +08:00
|
|
|
MEMREMAP_ENC = 1 << 3,
|
|
|
|
MEMREMAP_DEC = 1 << 4,
|
2015-08-11 11:07:06 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
void *memremap(resource_size_t offset, size_t size, unsigned long flags);
|
|
|
|
void memunmap(void *addr);
|
|
|
|
|
2016-10-24 13:27:59 +08:00
|
|
|
/*
|
|
|
|
* On x86 PAT systems we have memory tracking that keeps track of
|
|
|
|
* the allowed mappings on memory ranges. This tracking works for
|
|
|
|
* all the in-kernel mapping APIs (ioremap*), but where the user
|
|
|
|
* wishes to map a range from a physical device into user memory
|
|
|
|
* the tracking won't be updated. This API is to be used by
|
|
|
|
* drivers which remap physical device pages into userspace,
|
|
|
|
* and wants to make sure they are mapped WC and not UC.
|
|
|
|
*/
|
|
|
|
#ifndef arch_io_reserve_memtype_wc
|
|
|
|
static inline int arch_io_reserve_memtype_wc(resource_size_t base,
|
|
|
|
resource_size_t size)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void arch_io_free_memtype_wc(resource_size_t base,
|
|
|
|
resource_size_t size)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2006-02-01 19:05:16 +08:00
|
|
|
#endif /* _LINUX_IO_H */
|