ARM development updates for 5.20-rc1

Not much this time around, the 5.20-rc1 development updates for arm are:
 - add KASAN support for vmalloc space on arm
 - some sparse fixes from Ben Dooks
 - rework amba device handling (so device addition isn't deferred)
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEuNNh8scc2k/wOAE+9OeQG+StrGQFAmLqf38ACgkQ9OeQG+St
 rGSsTg/9FZQUFwSOfCDoxQx1KAlbwckBjwWAnr7er19EqF2dEOZhZcHTnVT+w3dT
 o1LvWGzPWFkHCV+PWum3OA/QfkH0DaZDEG5LTKF4Y9+R3HPzHYvt58d05x7vz05k
 5DEURGJvqtirGEfqXDWpNDv2H2Pac1QiDVgT3pwL4mKhN2E550BXecDHDswZsCcJ
 YOsIwCNcKPxWGLC11LZYLGWiVnxxBXSWu4LVYDvUy67kmSpeA5MzB8cK+jq4D4JT
 im/KXjQjLAl9FQmTeND354IBwp20pUzGcY2jbSrkYIyVzJEU1nZhu75/diB0hnZC
 JyeSWFdeQy9p3O+fbUBPFi9fepQ9QOfIsljgwD+BRjWHYgK9q8+jEy7j2k/QDJdY
 pGJN41KLw+qjvK3JZCXlLvbOa9+I/p2R3ryq6eHQY3eF3Yr9IC4rCqyGEyx+MriU
 iupDC443by0LMFelbPm+o8HBlmwJw51r225sw1rc5QBAQ7Q/7eo7ngBZjwaqeqyh
 rsMASQvflCTy8WN98Pd/4FXqdERRzi3RvAzOrtEFeFs1PIvPhKvf3FRhlgfCtQwR
 8lJ6aYBoFM5yKFJYPzy6fWDtbxn68nVG+UNNV85p44HnECZS84+/sc0R8Cs0j1kc
 hVCJyY1WlXU3jXZDQnfa9bCidDzhB+CTkCXlzEEkeB3rY4o9BTQ=
 =1c+V
 -----END PGP SIGNATURE-----

Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM updates from Russell King:
 "Not much this time around, the 5.20-rc1 development updates for arm
  are:

   - add KASAN support for vmalloc space on arm

   - some sparse fixes from Ben Dooks

   - rework amba device handling (so device addition isn't deferred)"

* tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm:
  ARM: 9220/1: amba: Remove deferred device addition
  ARM: 9219/1: fix undeclared soft_restart
  ARM: 9218/1: dma-mapping: fix pointer/integer warning
  ARM: 9217/1: add definition of arch_irq_work_raise()
  ARM: 9203/1: kconfig: fix MODULE_PLTS for KASAN with KASAN_VMALLOC
  ARM: 9202/1: kasan: support CONFIG_KASAN_VMALLOC
This commit is contained in:
Linus Torvalds 2022-08-04 15:31:09 -07:00
commit 995177a4c7
6 changed files with 157 additions and 171 deletions

View File

@ -75,6 +75,7 @@ config ARM
select HAVE_ARCH_KFENCE if MMU && !XIP_KERNEL
select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL
select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_PFN_VALID
select HAVE_ARCH_SECCOMP
@ -1419,6 +1420,7 @@ config HW_PERF_EVENTS
config ARM_MODULE_PLTS
bool "Use PLTs to allow module memory to spill over into vmalloc area"
depends on MODULES
select KASAN_VMALLOC if KASAN
default y
help
Allocate PLTs when loading modules so that jumps and calls whose

View File

@ -9,4 +9,6 @@ static inline bool arch_irq_work_has_interrupt(void)
return is_smp();
}
extern void arch_irq_work_raise(void);
#endif /* _ASM_ARM_IRQ_WORK_H */

View File

@ -10,6 +10,7 @@
#include <asm/cacheflush.h>
#include <asm/idmap.h>
#include <asm/virt.h>
#include <asm/system_misc.h>
#include "reboot.h"

View File

@ -709,7 +709,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
*handle = DMA_MAPPING_ERROR;
allowblock = gfpflags_allow_blocking(gfp);
cma = allowblock ? dev_get_cma_area(dev) : false;
cma = allowblock ? dev_get_cma_area(dev) : NULL;
if (cma)
buf->allocator = &cma_allocator;

View File

@ -236,7 +236,11 @@ void __init kasan_init(void)
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_START),
if (!IS_ENABLED(CONFIG_KASAN_VMALLOC))
kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_START),
kasan_mem_to_shadow((void *)VMALLOC_END));
kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_END),
kasan_mem_to_shadow((void *)-1UL) + 1);
for_each_mem_range(i, &pa_start, &pa_end) {

View File

@ -130,11 +130,100 @@ static struct attribute *amba_dev_attrs[] = {
};
ATTRIBUTE_GROUPS(amba_dev);
static int amba_read_periphid(struct amba_device *dev)
{
struct reset_control *rstc;
u32 size, pid, cid;
void __iomem *tmp;
int i, ret;
ret = dev_pm_domain_attach(&dev->dev, true);
if (ret) {
dev_dbg(&dev->dev, "can't get PM domain: %d\n", ret);
goto err_out;
}
ret = amba_get_enable_pclk(dev);
if (ret) {
dev_dbg(&dev->dev, "can't get pclk: %d\n", ret);
goto err_pm;
}
/*
* Find reset control(s) of the amba bus and de-assert them.
*/
rstc = of_reset_control_array_get_optional_shared(dev->dev.of_node);
if (IS_ERR(rstc)) {
ret = PTR_ERR(rstc);
if (ret != -EPROBE_DEFER)
dev_err(&dev->dev, "can't get reset: %d\n", ret);
goto err_clk;
}
reset_control_deassert(rstc);
reset_control_put(rstc);
size = resource_size(&dev->res);
tmp = ioremap(dev->res.start, size);
if (!tmp) {
ret = -ENOMEM;
goto err_clk;
}
/*
* Read pid and cid based on size of resource
* they are located at end of region
*/
for (pid = 0, i = 0; i < 4; i++)
pid |= (readl(tmp + size - 0x20 + 4 * i) & 255) << (i * 8);
for (cid = 0, i = 0; i < 4; i++)
cid |= (readl(tmp + size - 0x10 + 4 * i) & 255) << (i * 8);
if (cid == CORESIGHT_CID) {
/* set the base to the start of the last 4k block */
void __iomem *csbase = tmp + size - 4096;
dev->uci.devarch = readl(csbase + UCI_REG_DEVARCH_OFFSET);
dev->uci.devtype = readl(csbase + UCI_REG_DEVTYPE_OFFSET) & 0xff;
}
if (cid == AMBA_CID || cid == CORESIGHT_CID) {
dev->periphid = pid;
dev->cid = cid;
}
if (!dev->periphid)
ret = -ENODEV;
iounmap(tmp);
err_clk:
amba_put_disable_pclk(dev);
err_pm:
dev_pm_domain_detach(&dev->dev, true);
err_out:
return ret;
}
static int amba_match(struct device *dev, struct device_driver *drv)
{
struct amba_device *pcdev = to_amba_device(dev);
struct amba_driver *pcdrv = to_amba_driver(drv);
if (!pcdev->periphid) {
int ret = amba_read_periphid(pcdev);
/*
* Returning any error other than -EPROBE_DEFER from bus match
* can cause driver registration failure. So, if there's a
* permanent failure in reading pid and cid, simply map it to
* -EPROBE_DEFER.
*/
if (ret)
return -EPROBE_DEFER;
dev_set_uevent_suppress(dev, false);
kobject_uevent(&dev->kobj, KOBJ_ADD);
}
/* When driver_override is set, only bind to the matching driver */
if (pcdev->driver_override)
return !strcmp(pcdev->driver_override, drv->name);
@ -368,6 +457,42 @@ static int __init amba_init(void)
postcore_initcall(amba_init);
static int amba_proxy_probe(struct amba_device *adev,
const struct amba_id *id)
{
WARN(1, "Stub driver should never match any device.\n");
return -ENODEV;
}
static const struct amba_id amba_stub_drv_ids[] = {
{ 0, 0 },
};
static struct amba_driver amba_proxy_drv = {
.drv = {
.name = "amba-proxy",
},
.probe = amba_proxy_probe,
.id_table = amba_stub_drv_ids,
};
static int __init amba_stub_drv_init(void)
{
if (!IS_ENABLED(CONFIG_MODULES))
return 0;
/*
* The amba_match() function will get called only if there is at least
* one amba driver registered. If all amba drivers are modules and are
* only loaded based on uevents, then we'll hit a chicken-and-egg
* situation where amba_match() is waiting on drivers and drivers are
* waiting on amba_match(). So, register a stub driver to make sure
* amba_match() is called even if no amba driver has been registered.
*/
return amba_driver_register(&amba_proxy_drv);
}
late_initcall_sync(amba_stub_drv_init);
/**
* amba_driver_register - register an AMBA device driver
* @drv: amba device driver structure
@ -410,156 +535,6 @@ static void amba_device_release(struct device *dev)
kfree(d);
}
static int amba_read_periphid(struct amba_device *dev)
{
struct reset_control *rstc;
u32 size, pid, cid;
void __iomem *tmp;
int i, ret;
ret = dev_pm_domain_attach(&dev->dev, true);
if (ret)
goto err_out;
ret = amba_get_enable_pclk(dev);
if (ret)
goto err_pm;
/*
* Find reset control(s) of the amba bus and de-assert them.
*/
rstc = of_reset_control_array_get_optional_shared(dev->dev.of_node);
if (IS_ERR(rstc)) {
ret = PTR_ERR(rstc);
if (ret != -EPROBE_DEFER)
dev_err(&dev->dev, "can't get reset: %d\n", ret);
goto err_clk;
}
reset_control_deassert(rstc);
reset_control_put(rstc);
size = resource_size(&dev->res);
tmp = ioremap(dev->res.start, size);
if (!tmp) {
ret = -ENOMEM;
goto err_clk;
}
/*
* Read pid and cid based on size of resource
* they are located at end of region
*/
for (pid = 0, i = 0; i < 4; i++)
pid |= (readl(tmp + size - 0x20 + 4 * i) & 255) << (i * 8);
for (cid = 0, i = 0; i < 4; i++)
cid |= (readl(tmp + size - 0x10 + 4 * i) & 255) << (i * 8);
if (cid == CORESIGHT_CID) {
/* set the base to the start of the last 4k block */
void __iomem *csbase = tmp + size - 4096;
dev->uci.devarch = readl(csbase + UCI_REG_DEVARCH_OFFSET);
dev->uci.devtype = readl(csbase + UCI_REG_DEVTYPE_OFFSET) & 0xff;
}
if (cid == AMBA_CID || cid == CORESIGHT_CID) {
dev->periphid = pid;
dev->cid = cid;
}
if (!dev->periphid)
ret = -ENODEV;
iounmap(tmp);
err_clk:
amba_put_disable_pclk(dev);
err_pm:
dev_pm_domain_detach(&dev->dev, true);
err_out:
return ret;
}
static int amba_device_try_add(struct amba_device *dev, struct resource *parent)
{
int ret;
ret = request_resource(parent, &dev->res);
if (ret)
goto err_out;
/* Hard-coded primecell ID instead of plug-n-play */
if (dev->periphid != 0)
goto skip_probe;
ret = amba_read_periphid(dev);
if (ret)
goto err_release;
skip_probe:
ret = device_add(&dev->dev);
err_release:
if (ret)
release_resource(&dev->res);
err_out:
return ret;
}
/*
* Registration of AMBA device require reading its pid and cid registers.
* To do this, the device must be turned on (if it is a part of power domain)
* and have clocks enabled. However in some cases those resources might not be
* yet available. Returning EPROBE_DEFER is not a solution in such case,
* because callers don't handle this special error code. Instead such devices
* are added to the special list and their registration is retried from
* periodic worker, until all resources are available and registration succeeds.
*/
struct deferred_device {
struct amba_device *dev;
struct resource *parent;
struct list_head node;
};
static LIST_HEAD(deferred_devices);
static DEFINE_MUTEX(deferred_devices_lock);
static void amba_deferred_retry_func(struct work_struct *dummy);
static DECLARE_DELAYED_WORK(deferred_retry_work, amba_deferred_retry_func);
#define DEFERRED_DEVICE_TIMEOUT (msecs_to_jiffies(5 * 1000))
static int amba_deferred_retry(void)
{
struct deferred_device *ddev, *tmp;
mutex_lock(&deferred_devices_lock);
list_for_each_entry_safe(ddev, tmp, &deferred_devices, node) {
int ret = amba_device_try_add(ddev->dev, ddev->parent);
if (ret == -EPROBE_DEFER)
continue;
list_del_init(&ddev->node);
amba_device_put(ddev->dev);
kfree(ddev);
}
mutex_unlock(&deferred_devices_lock);
return 0;
}
late_initcall(amba_deferred_retry);
static void amba_deferred_retry_func(struct work_struct *dummy)
{
amba_deferred_retry();
if (!list_empty(&deferred_devices))
schedule_delayed_work(&deferred_retry_work,
DEFERRED_DEVICE_TIMEOUT);
}
/**
* amba_device_add - add a previously allocated AMBA device structure
* @dev: AMBA device allocated by amba_device_alloc
@ -571,28 +546,30 @@ static void amba_deferred_retry_func(struct work_struct *dummy)
*/
int amba_device_add(struct amba_device *dev, struct resource *parent)
{
int ret = amba_device_try_add(dev, parent);
int ret;
if (ret == -EPROBE_DEFER) {
struct deferred_device *ddev;
ret = request_resource(parent, &dev->res);
if (ret)
return ret;
ddev = kmalloc(sizeof(*ddev), GFP_KERNEL);
if (!ddev)
return -ENOMEM;
ddev->dev = dev;
ddev->parent = parent;
ret = 0;
mutex_lock(&deferred_devices_lock);
if (list_empty(&deferred_devices))
schedule_delayed_work(&deferred_retry_work,
DEFERRED_DEVICE_TIMEOUT);
list_add_tail(&ddev->node, &deferred_devices);
mutex_unlock(&deferred_devices_lock);
/* If primecell ID isn't hard-coded, figure it out */
if (!dev->periphid) {
/*
* AMBA device uevents require reading its pid and cid
* registers. To do this, the device must be on, clocked and
* out of reset. However in some cases those resources might
* not yet be available. If that's the case, we suppress the
* generation of uevents until we can read the pid and cid
* registers. See also amba_match().
*/
if (amba_read_periphid(dev))
dev_set_uevent_suppress(&dev->dev, true);
}
ret = device_add(&dev->dev);
if (ret)
release_resource(&dev->res);
return ret;
}
EXPORT_SYMBOL_GPL(amba_device_add);