locking, drivers/dca: Annotate dca_lock as raw

The dca_lock can be taken in atomic context:

[   25.607536] Call Trace:
[   25.607557]  [<ffffffff820078a1>] try_stack_unwind+0x151/0x1a0
[   25.607566]  [<ffffffff820062c2>] dump_trace+0x92/0x370
[   25.607573]  [<ffffffff8200731c>] show_trace_log_lvl+0x5c/0x80
[   25.607578]  [<ffffffff82007355>] show_trace+0x15/0x20
[   25.607587]  [<ffffffff823f4588>] dump_stack+0x77/0x8f
[   25.607595]  [<ffffffff82043f2a>] __might_sleep+0x11a/0x130
[   25.607602]  [<ffffffff823f7b93>] rt_spin_lock+0x83/0x90
[   25.607611]  [<ffffffffa0209138>] dca_common_get_tag+0x28/0x80 [dca]
[   25.607622]  [<ffffffffa02091c8>] dca3_get_tag+0x18/0x20 [dca]
[   25.607634]  [<ffffffffa0244e71>] igb_update_dca+0xb1/0x1d0 [igb]
[   25.607649]  [<ffffffffa0244ff5>] igb_setup_dca+0x65/0x80 [igb]
[   25.607663]  [<ffffffffa02535a6>] igb_probe+0x946/0xe4d [igb]
[   25.607678]  [<ffffffff82247517>] local_pci_probe+0x17/0x20
[   25.607686]  [<ffffffff82248661>] pci_device_probe+0x121/0x130
[   25.607699]  [<ffffffff822e4832>] driver_probe_device+0xd2/0x2e0
[   25.607707]  [<ffffffff822e4adb>] __driver_attach+0x9b/0xa0
[   25.607714]  [<ffffffff822e3d1b>] bus_for_each_dev+0x6b/0xa0
[   25.607720]  [<ffffffff822e4591>] driver_attach+0x21/0x30
[   25.607727]  [<ffffffff822e3425>] bus_add_driver+0x1e5/0x350
[   25.607734]  [<ffffffff822e4e41>] driver_register+0x81/0x160
[   25.607742]  [<ffffffff8224890f>] __pci_register_driver+0x6f/0xf0
[   25.607752]  [<ffffffffa011505b>] igb_init_module+0x5b/0x5d [igb]
[   25.607769]  [<ffffffff820001dd>] do_one_initcall+0x3d/0x1a0
[   25.607778]  [<ffffffff820961f6>] sys_init_module+0xe6/0x270
[   25.607786]  [<ffffffff82003232>] system_call_fastpath+0x16/0x1b
[   25.607794]  [<00007f84d6783f4a>] 0x7f84d6783f4a

and thus must not be preempted on -rt.

In mainline this change documents the low level nature of
the lock - otherwise there's no functional difference. Lockdep
and Sparse checking will work as usual.

Signed-off-by: Mike Galbraith <efault@gmx.de>
[ Fixed the domain allocation which was calling kzalloc from the irq disabled section ]
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Mike Galbraith 2010-07-07 10:29:01 +02:00 committed by Ingo Molnar
parent bccc2f7b4c
commit a1741e7fcb
1 changed files with 44 additions and 34 deletions

View File

@ -35,7 +35,7 @@ MODULE_VERSION(DCA_VERSION);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Intel Corporation");
static DEFINE_SPINLOCK(dca_lock);
static DEFINE_RAW_SPINLOCK(dca_lock);
static LIST_HEAD(dca_domains);
@ -101,10 +101,10 @@ static void unregister_dca_providers(void)
INIT_LIST_HEAD(&unregistered_providers);
spin_lock_irqsave(&dca_lock, flags);
raw_spin_lock_irqsave(&dca_lock, flags);
if (list_empty(&dca_domains)) {
spin_unlock_irqrestore(&dca_lock, flags);
raw_spin_unlock_irqrestore(&dca_lock, flags);
return;
}
@ -116,7 +116,7 @@ static void unregister_dca_providers(void)
dca_free_domain(domain);
spin_unlock_irqrestore(&dca_lock, flags);
raw_spin_unlock_irqrestore(&dca_lock, flags);
list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
dca_sysfs_remove_provider(dca);
@ -144,13 +144,8 @@ static struct dca_domain *dca_get_domain(struct device *dev)
domain = dca_find_domain(rc);
if (!domain) {
if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) {
if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains))
dca_providers_blocked = 1;
} else {
domain = dca_allocate_domain(rc);
if (domain)
list_add(&domain->node, &dca_domains);
}
}
return domain;
@ -198,19 +193,19 @@ int dca_add_requester(struct device *dev)
if (!dev)
return -EFAULT;
spin_lock_irqsave(&dca_lock, flags);
raw_spin_lock_irqsave(&dca_lock, flags);
/* check if the requester has not been added already */
dca = dca_find_provider_by_dev(dev);
if (dca) {
spin_unlock_irqrestore(&dca_lock, flags);
raw_spin_unlock_irqrestore(&dca_lock, flags);
return -EEXIST;
}
pci_rc = dca_pci_rc_from_dev(dev);
domain = dca_find_domain(pci_rc);
if (!domain) {
spin_unlock_irqrestore(&dca_lock, flags);
raw_spin_unlock_irqrestore(&dca_lock, flags);
return -ENODEV;
}
@ -220,17 +215,17 @@ int dca_add_requester(struct device *dev)
break;
}
spin_unlock_irqrestore(&dca_lock, flags);
raw_spin_unlock_irqrestore(&dca_lock, flags);
if (slot < 0)
return slot;
err = dca_sysfs_add_req(dca, dev, slot);
if (err) {
spin_lock_irqsave(&dca_lock, flags);
raw_spin_lock_irqsave(&dca_lock, flags);
if (dca == dca_find_provider_by_dev(dev))
dca->ops->remove_requester(dca, dev);
spin_unlock_irqrestore(&dca_lock, flags);
raw_spin_unlock_irqrestore(&dca_lock, flags);
return err;
}
@ -251,14 +246,14 @@ int dca_remove_requester(struct device *dev)
if (!dev)
return -EFAULT;
spin_lock_irqsave(&dca_lock, flags);
raw_spin_lock_irqsave(&dca_lock, flags);
dca = dca_find_provider_by_dev(dev);
if (!dca) {
spin_unlock_irqrestore(&dca_lock, flags);
raw_spin_unlock_irqrestore(&dca_lock, flags);
return -ENODEV;
}
slot = dca->ops->remove_requester(dca, dev);
spin_unlock_irqrestore(&dca_lock, flags);
raw_spin_unlock_irqrestore(&dca_lock, flags);
if (slot < 0)
return slot;
@ -280,16 +275,16 @@ u8 dca_common_get_tag(struct device *dev, int cpu)
u8 tag;
unsigned long flags;
spin_lock_irqsave(&dca_lock, flags);
raw_spin_lock_irqsave(&dca_lock, flags);
dca = dca_find_provider_by_dev(dev);
if (!dca) {
spin_unlock_irqrestore(&dca_lock, flags);
raw_spin_unlock_irqrestore(&dca_lock, flags);
return -ENODEV;
}
tag = dca->ops->get_tag(dca, dev, cpu);
spin_unlock_irqrestore(&dca_lock, flags);
raw_spin_unlock_irqrestore(&dca_lock, flags);
return tag;
}
@ -360,36 +355,51 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev)
{
int err;
unsigned long flags;
struct dca_domain *domain;
struct dca_domain *domain, *newdomain = NULL;
spin_lock_irqsave(&dca_lock, flags);
raw_spin_lock_irqsave(&dca_lock, flags);
if (dca_providers_blocked) {
spin_unlock_irqrestore(&dca_lock, flags);
raw_spin_unlock_irqrestore(&dca_lock, flags);
return -ENODEV;
}
spin_unlock_irqrestore(&dca_lock, flags);
raw_spin_unlock_irqrestore(&dca_lock, flags);
err = dca_sysfs_add_provider(dca, dev);
if (err)
return err;
spin_lock_irqsave(&dca_lock, flags);
raw_spin_lock_irqsave(&dca_lock, flags);
domain = dca_get_domain(dev);
if (!domain) {
struct pci_bus *rc;
if (dca_providers_blocked) {
spin_unlock_irqrestore(&dca_lock, flags);
raw_spin_unlock_irqrestore(&dca_lock, flags);
dca_sysfs_remove_provider(dca);
unregister_dca_providers();
} else {
spin_unlock_irqrestore(&dca_lock, flags);
}
return -ENODEV;
}
raw_spin_unlock_irqrestore(&dca_lock, flags);
rc = dca_pci_rc_from_dev(dev);
newdomain = dca_allocate_domain(rc);
if (!newdomain)
return -ENODEV;
raw_spin_lock_irqsave(&dca_lock, flags);
/* Recheck, we might have raced after dropping the lock */
domain = dca_get_domain(dev);
if (!domain) {
domain = newdomain;
newdomain = NULL;
list_add(&domain->node, &dca_domains);
}
}
list_add(&dca->node, &domain->dca_providers);
spin_unlock_irqrestore(&dca_lock, flags);
raw_spin_unlock_irqrestore(&dca_lock, flags);
blocking_notifier_call_chain(&dca_provider_chain,
DCA_PROVIDER_ADD, NULL);
kfree(newdomain);
return 0;
}
EXPORT_SYMBOL_GPL(register_dca_provider);
@ -407,7 +417,7 @@ void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
blocking_notifier_call_chain(&dca_provider_chain,
DCA_PROVIDER_REMOVE, NULL);
spin_lock_irqsave(&dca_lock, flags);
raw_spin_lock_irqsave(&dca_lock, flags);
list_del(&dca->node);
@ -416,7 +426,7 @@ void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
if (list_empty(&domain->dca_providers))
dca_free_domain(domain);
spin_unlock_irqrestore(&dca_lock, flags);
raw_spin_unlock_irqrestore(&dca_lock, flags);
dca_sysfs_remove_provider(dca);
}