Merge branch 'akpm' (final batch from Andrew)
Merge third patch-bumb from Andrew Morton: "This wraps me up for -rc1. - Lots of misc stuff and things which were deferred/missed from patchbombings 1 & 2. - ocfs2 things - lib/scatterlist - hfsplus - fatfs - documentation - signals - procfs - lockdep - coredump - seqfile core - kexec - Tejun's large IDR tree reworkings - ipmi - partitions - nbd - random() things - kfifo - tools/testing/selftests updates - Sasha's large and pointless hlist cleanup" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (163 commits) hlist: drop the node parameter from iterators kcmp: make it depend on CHECKPOINT_RESTORE selftests: add a simple doc tools/testing/selftests/Makefile: rearrange targets selftests/efivarfs: add create-read test selftests/efivarfs: add empty file creation test selftests: add tests for efivarfs kfifo: fix kfifo_alloc() and kfifo_init() kfifo: move kfifo.c from kernel/ to lib/ arch Kconfig: centralise CONFIG_ARCH_NO_VIRT_TO_BUS w1: add support for DS2413 Dual Channel Addressable Switch memstick: move the dereference below the NULL test drivers/pps/clients/pps-gpio.c: use devm_kzalloc Documentation/DMA-API-HOWTO.txt: fix typo include/linux/eventfd.h: fix incorrect filename is a comment mtd: mtd_stresstest: use prandom_bytes() mtd: mtd_subpagetest: convert to use prandom library mtd: mtd_speedtest: use prandom_bytes mtd: mtd_pagetest: convert to use prandom library mtd: mtd_oobtest: convert to use prandom library ...
This commit is contained in:
commit
2a7d2b96d5
Documentation
MAINTAINERSarch
Kconfig
alpha
arm
avr32
blackfin
cris
frv
h8300
ia64
m32r
m68k
microblaze
mips
mn10300
openrisc
parisc
powerpc
s390
score
sh
sparc
tile
unicore32
x86
xtensa
block
crypto
drivers
atm
block
char
clk
dca
dma
firewire
gpio
gpu/drm
i2c
infiniband
core
hw
|
@ -488,9 +488,10 @@ will invoke the generic mapping error check interface. Doing so will ensure
|
|||
that the mapping code will work correctly on all dma implementations without
|
||||
any dependency on the specifics of the underlying implementation. Using the
|
||||
returned address without checking for errors could result in failures ranging
|
||||
from panics to silent data corruption. Couple of example of incorrect ways to
|
||||
check for errors that make assumptions about the underlying dma implementation
|
||||
are as follows and these are applicable to dma_map_page() as well.
|
||||
from panics to silent data corruption. A couple of examples of incorrect ways
|
||||
to check for errors that make assumptions about the underlying dma
|
||||
implementation are as follows and these are applicable to dma_map_page() as
|
||||
well.
|
||||
|
||||
Incorrect example 1:
|
||||
dma_addr_t dma_handle;
|
||||
|
@ -751,7 +752,7 @@ Example 1:
|
|||
dma_unmap_single(dma_handle1);
|
||||
map_error_handling1:
|
||||
|
||||
Example 2: (if buffers are allocated a loop, unmap all mapped buffers when
|
||||
Example 2: (if buffers are allocated in a loop, unmap all mapped buffers when
|
||||
mapping error is detected in the middle)
|
||||
|
||||
dma_addr_t dma_addr;
|
||||
|
|
|
@ -348,34 +348,40 @@ You can change this at module load time (for a module) with:
|
|||
|
||||
modprobe ipmi_si.o type=<type1>,<type2>....
|
||||
ports=<port1>,<port2>... addrs=<addr1>,<addr2>...
|
||||
irqs=<irq1>,<irq2>... trydefaults=[0|1]
|
||||
irqs=<irq1>,<irq2>...
|
||||
regspacings=<sp1>,<sp2>,... regsizes=<size1>,<size2>,...
|
||||
regshifts=<shift1>,<shift2>,...
|
||||
slave_addrs=<addr1>,<addr2>,...
|
||||
force_kipmid=<enable1>,<enable2>,...
|
||||
kipmid_max_busy_us=<ustime1>,<ustime2>,...
|
||||
unload_when_empty=[0|1]
|
||||
trydefaults=[0|1] trydmi=[0|1] tryacpi=[0|1]
|
||||
tryplatform=[0|1] trypci=[0|1]
|
||||
|
||||
Each of these except si_trydefaults is a list, the first item for the
|
||||
Each of these except try... items is a list, the first item for the
|
||||
first interface, second item for the second interface, etc.
|
||||
|
||||
The si_type may be either "kcs", "smic", or "bt". If you leave it blank, it
|
||||
defaults to "kcs".
|
||||
|
||||
If you specify si_addrs as non-zero for an interface, the driver will
|
||||
If you specify addrs as non-zero for an interface, the driver will
|
||||
use the memory address given as the address of the device. This
|
||||
overrides si_ports.
|
||||
|
||||
If you specify si_ports as non-zero for an interface, the driver will
|
||||
If you specify ports as non-zero for an interface, the driver will
|
||||
use the I/O port given as the device address.
|
||||
|
||||
If you specify si_irqs as non-zero for an interface, the driver will
|
||||
If you specify irqs as non-zero for an interface, the driver will
|
||||
attempt to use the given interrupt for the device.
|
||||
|
||||
si_trydefaults sets whether the standard IPMI interface at 0xca2 and
|
||||
trydefaults sets whether the standard IPMI interface at 0xca2 and
|
||||
any interfaces specified by ACPE are tried. By default, the driver
|
||||
tries it, set this value to zero to turn this off.
|
||||
|
||||
The other try... items disable discovery by their corresponding
|
||||
names. These are all enabled by default, set them to zero to disable
|
||||
them. The tryplatform disables openfirmware.
|
||||
|
||||
The next three parameters have to do with register layout. The
|
||||
registers used by the interfaces may not appear at successive
|
||||
locations and they may not be in 8-bit registers. These parameters
|
||||
|
|
|
@ -4,43 +4,13 @@
|
|||
can use a remote server as one of its block devices. So every time
|
||||
the client computer wants to read, e.g., /dev/nb0, it sends a
|
||||
request over TCP to the server, which will reply with the data read.
|
||||
This can be used for stations with low disk space (or even diskless -
|
||||
if you boot from floppy) to borrow disk space from another computer.
|
||||
Unlike NFS, it is possible to put any filesystem on it, etc. It should
|
||||
even be possible to use NBD as a root filesystem (I've never tried),
|
||||
but it requires a user-level program to be in the initrd to start.
|
||||
It also allows you to run block-device in user land (making server
|
||||
and client physically the same computer, communicating using loopback).
|
||||
|
||||
Current state: It currently works. Network block device is stable.
|
||||
I originally thought that it was impossible to swap over TCP. It
|
||||
turned out not to be true - swapping over TCP now works and seems
|
||||
to be deadlock-free, but it requires heavy patches into Linux's
|
||||
network layer.
|
||||
|
||||
This can be used for stations with low disk space (or even diskless)
|
||||
to borrow disk space from another computer.
|
||||
Unlike NFS, it is possible to put any filesystem on it, etc.
|
||||
|
||||
For more information, or to download the nbd-client and nbd-server
|
||||
tools, go to http://nbd.sf.net/.
|
||||
|
||||
Howto: To setup nbd, you can simply do the following:
|
||||
|
||||
First, serve a device or file from a remote server:
|
||||
|
||||
nbd-server <port-number> <device-or-file-to-serve-to-client>
|
||||
|
||||
e.g.,
|
||||
root@server1 # nbd-server 1234 /dev/sdb1
|
||||
|
||||
(serves sdb1 partition on TCP port 1234)
|
||||
|
||||
Then, on the local (client) system:
|
||||
|
||||
nbd-client <server-name-or-IP> <server-port-number> /dev/nb[0-n]
|
||||
|
||||
e.g.,
|
||||
root@client1 # nbd-client server1 1234 /dev/nb0
|
||||
|
||||
(creates the nb0 device on client1)
|
||||
|
||||
The nbd kernel module need only be installed on the client
|
||||
system, as the nbd-server is completely in userspace. In fact,
|
||||
the nbd-server has been successfully ported to other operating
|
||||
|
|
|
@ -75,7 +75,7 @@ Throttling/Upper Limit policy
|
|||
mount -t cgroup -o blkio none /sys/fs/cgroup/blkio
|
||||
|
||||
- Specify a bandwidth rate on particular device for root group. The format
|
||||
for policy is "<major>:<minor> <byes_per_second>".
|
||||
for policy is "<major>:<minor> <bytes_per_second>".
|
||||
|
||||
echo "8:16 1048576" > /sys/fs/cgroup/blkio/blkio.throttle.read_bps_device
|
||||
|
||||
|
|
15
MAINTAINERS
15
MAINTAINERS
|
@ -97,12 +97,13 @@ Descriptions of section entries:
|
|||
X: net/ipv6/
|
||||
matches all files in and below net excluding net/ipv6/
|
||||
K: Keyword perl extended regex pattern to match content in a
|
||||
patch or file. For instance:
|
||||
patch or file, or an affected filename. For instance:
|
||||
K: of_get_profile
|
||||
matches patches or files that contain "of_get_profile"
|
||||
matches patch or file content, or filenames, that contain
|
||||
"of_get_profile"
|
||||
K: \b(printk|pr_(info|err))\b
|
||||
matches patches or files that contain one or more of the words
|
||||
printk, pr_info or pr_err
|
||||
matches patch or file content, or filenames, that contain one or
|
||||
more of the words printk, pr_info or pr_err
|
||||
One regex pattern per line. Multiple K: lines acceptable.
|
||||
|
||||
Note: For the hard of thinking, this list is meant to remain in alphabetical
|
||||
|
@ -5437,6 +5438,7 @@ F: net/netrom/
|
|||
NETWORK BLOCK DEVICE (NBD)
|
||||
M: Paul Clements <Paul.Clements@steeleye.com>
|
||||
S: Maintained
|
||||
L: nbd-general@lists.sourceforge.net
|
||||
F: Documentation/blockdev/nbd.txt
|
||||
F: drivers/block/nbd.c
|
||||
F: include/linux/nbd.h
|
||||
|
@ -7539,6 +7541,7 @@ STAGING - NVIDIA COMPLIANT EMBEDDED CONTROLLER INTERFACE (nvec)
|
|||
M: Julian Andres Klode <jak@jak-linux.org>
|
||||
M: Marc Dietrich <marvin24@gmx.de>
|
||||
L: ac100@lists.launchpad.net (moderated for non-subscribers)
|
||||
L: linux-tegra@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/staging/nvec/
|
||||
|
||||
|
@ -7831,9 +7834,7 @@ L: linux-tegra@vger.kernel.org
|
|||
Q: http://patchwork.ozlabs.org/project/linux-tegra/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-tegra.git
|
||||
S: Supported
|
||||
F: arch/arm/mach-tegra
|
||||
F: arch/arm/boot/dts/tegra*
|
||||
F: arch/arm/configs/tegra_defconfig
|
||||
K: (?i)[^a-z]tegra
|
||||
|
||||
TEHUTI ETHERNET DRIVER
|
||||
M: Andy Gospodarek <andy@greyhouse.net>
|
||||
|
|
|
@ -303,6 +303,13 @@ config ARCH_WANT_OLD_COMPAT_IPC
|
|||
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
|
||||
bool
|
||||
|
||||
config HAVE_VIRT_TO_BUS
|
||||
bool
|
||||
help
|
||||
An architecture should select this if it implements the
|
||||
deprecated interface virt_to_bus(). All new architectures
|
||||
should probably not select this.
|
||||
|
||||
config HAVE_ARCH_SECCOMP_FILTER
|
||||
bool
|
||||
help
|
||||
|
|
|
@ -9,6 +9,7 @@ config ALPHA
|
|||
select HAVE_PERF_EVENTS
|
||||
select HAVE_DMA_ATTRS
|
||||
select HAVE_GENERIC_HARDIRQS
|
||||
select HAVE_VIRT_TO_BUS
|
||||
select GENERIC_IRQ_PROBE
|
||||
select AUTO_IRQ_AFFINITY if SMP
|
||||
select GENERIC_IRQ_SHOW
|
||||
|
|
|
@ -49,6 +49,7 @@ config ARM
|
|||
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||
select HAVE_SYSCALL_TRACEPOINTS
|
||||
select HAVE_UID16
|
||||
select HAVE_VIRT_TO_BUS
|
||||
select KTIME_SCALAR
|
||||
select PERF_USE_VMALLOC
|
||||
select RTC_LIB
|
||||
|
|
|
@ -395,7 +395,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
|
|||
{
|
||||
struct kretprobe_instance *ri = NULL;
|
||||
struct hlist_head *head, empty_rp;
|
||||
struct hlist_node *node, *tmp;
|
||||
struct hlist_node *tmp;
|
||||
unsigned long flags, orig_ret_address = 0;
|
||||
unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
|
||||
|
||||
|
@ -415,7 +415,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
|
|||
* real return address, and all the rest will point to
|
||||
* kretprobe_trampoline
|
||||
*/
|
||||
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
|
||||
hlist_for_each_entry_safe(ri, tmp, head, hlist) {
|
||||
if (ri->task != current)
|
||||
/* another task is sharing our hash bucket */
|
||||
continue;
|
||||
|
@ -442,7 +442,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
|
|||
kretprobe_assert(ri, orig_ret_address, trampoline_address);
|
||||
kretprobe_hash_unlock(current, &flags);
|
||||
|
||||
hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
|
||||
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
|
||||
hlist_del(&ri->hlist);
|
||||
kfree(ri);
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@ config AVR32
|
|||
select HAVE_OPROFILE
|
||||
select HAVE_KPROBES
|
||||
select HAVE_GENERIC_HARDIRQS
|
||||
select HAVE_VIRT_TO_BUS
|
||||
select GENERIC_IRQ_PROBE
|
||||
select GENERIC_ATOMIC64
|
||||
select HARDIRQS_SW_RESEND
|
||||
|
|
|
@ -33,6 +33,7 @@ config BLACKFIN
|
|||
select ARCH_HAVE_CUSTOM_GPIO_H
|
||||
select ARCH_WANT_OPTIONAL_GPIOLIB
|
||||
select HAVE_UID16
|
||||
select HAVE_VIRT_TO_BUS
|
||||
select ARCH_WANT_IPC_PARSE_VERSION
|
||||
select HAVE_GENERIC_HARDIRQS
|
||||
select GENERIC_ATOMIC64
|
||||
|
|
|
@ -43,6 +43,7 @@ config CRIS
|
|||
select GENERIC_ATOMIC64
|
||||
select HAVE_GENERIC_HARDIRQS
|
||||
select HAVE_UID16
|
||||
select HAVE_VIRT_TO_BUS
|
||||
select ARCH_WANT_IPC_PARSE_VERSION
|
||||
select GENERIC_IRQ_SHOW
|
||||
select GENERIC_IOMAP
|
||||
|
|
|
@ -6,6 +6,7 @@ config FRV
|
|||
select HAVE_PERF_EVENTS
|
||||
select HAVE_UID16
|
||||
select HAVE_GENERIC_HARDIRQS
|
||||
select HAVE_VIRT_TO_BUS
|
||||
select GENERIC_IRQ_SHOW
|
||||
select HAVE_DEBUG_BUGVERBOSE
|
||||
select ARCH_HAVE_NMI_SAFE_CMPXCHG
|
||||
|
|
|
@ -60,7 +60,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
|
|||
unsigned long pgoff, unsigned long flags)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long limit;
|
||||
struct vm_unmapped_area_info info;
|
||||
|
||||
if (len > TASK_SIZE)
|
||||
return -ENOMEM;
|
||||
|
@ -79,39 +79,24 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
|
|||
}
|
||||
|
||||
/* search between the bottom of user VM and the stack grow area */
|
||||
addr = PAGE_SIZE;
|
||||
limit = (current->mm->start_stack - 0x00200000);
|
||||
if (addr + len <= limit) {
|
||||
limit -= len;
|
||||
|
||||
if (addr <= limit) {
|
||||
vma = find_vma(current->mm, PAGE_SIZE);
|
||||
for (; vma; vma = vma->vm_next) {
|
||||
if (addr > limit)
|
||||
break;
|
||||
if (addr + len <= vma->vm_start)
|
||||
goto success;
|
||||
addr = vma->vm_end;
|
||||
}
|
||||
}
|
||||
}
|
||||
info.flags = 0;
|
||||
info.length = len;
|
||||
info.low_limit = PAGE_SIZE;
|
||||
info.high_limit = (current->mm->start_stack - 0x00200000);
|
||||
info.align_mask = 0;
|
||||
info.align_offset = 0;
|
||||
addr = vm_unmapped_area(&info);
|
||||
if (!(addr & ~PAGE_MASK))
|
||||
goto success;
|
||||
VM_BUG_ON(addr != -ENOMEM);
|
||||
|
||||
/* search from just above the WorkRAM area to the top of memory */
|
||||
addr = PAGE_ALIGN(0x80000000);
|
||||
limit = TASK_SIZE - len;
|
||||
if (addr <= limit) {
|
||||
vma = find_vma(current->mm, addr);
|
||||
for (; vma; vma = vma->vm_next) {
|
||||
if (addr > limit)
|
||||
break;
|
||||
if (addr + len <= vma->vm_start)
|
||||
goto success;
|
||||
addr = vma->vm_end;
|
||||
}
|
||||
|
||||
if (!vma && addr <= limit)
|
||||
goto success;
|
||||
}
|
||||
info.low_limit = PAGE_ALIGN(0x80000000);
|
||||
info.high_limit = TASK_SIZE;
|
||||
addr = vm_unmapped_area(&info);
|
||||
if (!(addr & ~PAGE_MASK))
|
||||
goto success;
|
||||
VM_BUG_ON(addr != -ENOMEM);
|
||||
|
||||
#if 0
|
||||
printk("[area] l=%lx (ENOMEM) f='%s'\n",
|
||||
|
|
|
@ -5,6 +5,7 @@ config H8300
|
|||
select HAVE_GENERIC_HARDIRQS
|
||||
select GENERIC_ATOMIC64
|
||||
select HAVE_UID16
|
||||
select HAVE_VIRT_TO_BUS
|
||||
select ARCH_WANT_IPC_PARSE_VERSION
|
||||
select GENERIC_IRQ_SHOW
|
||||
select GENERIC_CPU_DEVICES
|
||||
|
|
|
@ -26,6 +26,7 @@ config IA64
|
|||
select HAVE_MEMBLOCK
|
||||
select HAVE_MEMBLOCK_NODE_MAP
|
||||
select HAVE_VIRT_CPU_ACCOUNTING
|
||||
select HAVE_VIRT_TO_BUS
|
||||
select ARCH_DISCARD_MEMBLOCK
|
||||
select GENERIC_IRQ_PROBE
|
||||
select GENERIC_PENDING_IRQ if SMP
|
||||
|
|
|
@ -423,7 +423,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
|
|||
{
|
||||
struct kretprobe_instance *ri = NULL;
|
||||
struct hlist_head *head, empty_rp;
|
||||
struct hlist_node *node, *tmp;
|
||||
struct hlist_node *tmp;
|
||||
unsigned long flags, orig_ret_address = 0;
|
||||
unsigned long trampoline_address =
|
||||
((struct fnptr *)kretprobe_trampoline)->ip;
|
||||
|
@ -444,7 +444,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
|
|||
* real return address, and all the rest will point to
|
||||
* kretprobe_trampoline
|
||||
*/
|
||||
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
|
||||
hlist_for_each_entry_safe(ri, tmp, head, hlist) {
|
||||
if (ri->task != current)
|
||||
/* another task is sharing our hash bucket */
|
||||
continue;
|
||||
|
@ -461,7 +461,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
|
|||
|
||||
regs->cr_iip = orig_ret_address;
|
||||
|
||||
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
|
||||
hlist_for_each_entry_safe(ri, tmp, head, hlist) {
|
||||
if (ri->task != current)
|
||||
/* another task is sharing our hash bucket */
|
||||
continue;
|
||||
|
@ -487,7 +487,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
|
|||
kretprobe_hash_unlock(current, &flags);
|
||||
preempt_enable_no_resched();
|
||||
|
||||
hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
|
||||
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
|
||||
hlist_del(&ri->hlist);
|
||||
kfree(ri);
|
||||
}
|
||||
|
|
|
@ -10,6 +10,7 @@ config M32R
|
|||
select ARCH_WANT_IPC_PARSE_VERSION
|
||||
select HAVE_DEBUG_BUGVERBOSE
|
||||
select HAVE_GENERIC_HARDIRQS
|
||||
select HAVE_VIRT_TO_BUS
|
||||
select GENERIC_IRQ_PROBE
|
||||
select GENERIC_IRQ_SHOW
|
||||
select GENERIC_ATOMIC64
|
||||
|
|
|
@ -8,6 +8,7 @@ config M68K
|
|||
select GENERIC_IRQ_SHOW
|
||||
select GENERIC_ATOMIC64
|
||||
select HAVE_UID16
|
||||
select HAVE_VIRT_TO_BUS
|
||||
select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
|
||||
select GENERIC_CPU_DEVICES
|
||||
select GENERIC_STRNCPY_FROM_USER if MMU
|
||||
|
|
|
@ -19,6 +19,7 @@ config MICROBLAZE
|
|||
select HAVE_DEBUG_KMEMLEAK
|
||||
select IRQ_DOMAIN
|
||||
select HAVE_GENERIC_HARDIRQS
|
||||
select HAVE_VIRT_TO_BUS
|
||||
select GENERIC_IRQ_PROBE
|
||||
select GENERIC_IRQ_SHOW
|
||||
select GENERIC_PCI_IOMAP
|
||||
|
|
|
@ -38,6 +38,7 @@ config MIPS
|
|||
select GENERIC_CLOCKEVENTS
|
||||
select GENERIC_CMOS_UPDATE
|
||||
select HAVE_MOD_ARCH_SPECIFIC
|
||||
select HAVE_VIRT_TO_BUS
|
||||
select MODULES_USE_ELF_REL if MODULES
|
||||
select MODULES_USE_ELF_RELA if MODULES && 64BIT
|
||||
select CLONE_BACKWARDS
|
||||
|
|
|
@ -598,7 +598,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
|
|||
{
|
||||
struct kretprobe_instance *ri = NULL;
|
||||
struct hlist_head *head, empty_rp;
|
||||
struct hlist_node *node, *tmp;
|
||||
struct hlist_node *tmp;
|
||||
unsigned long flags, orig_ret_address = 0;
|
||||
unsigned long trampoline_address = (unsigned long)kretprobe_trampoline;
|
||||
|
||||
|
@ -618,7 +618,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
|
|||
* real return address, and all the rest will point to
|
||||
* kretprobe_trampoline
|
||||
*/
|
||||
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
|
||||
hlist_for_each_entry_safe(ri, tmp, head, hlist) {
|
||||
if (ri->task != current)
|
||||
/* another task is sharing our hash bucket */
|
||||
continue;
|
||||
|
@ -645,7 +645,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
|
|||
kretprobe_hash_unlock(current, &flags);
|
||||
preempt_enable_no_resched();
|
||||
|
||||
hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
|
||||
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
|
||||
hlist_del(&ri->hlist);
|
||||
kfree(ri);
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@ config MN10300
|
|||
select HAVE_ARCH_KGDB
|
||||
select GENERIC_ATOMIC64
|
||||
select HAVE_NMI_WATCHDOG if MN10300_WD_TIMER
|
||||
select HAVE_VIRT_TO_BUS
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select MODULES_USE_ELF_RELA
|
||||
select OLD_SIGSUSPEND3
|
||||
|
|
|
@ -12,6 +12,7 @@ config OPENRISC
|
|||
select ARCH_WANT_OPTIONAL_GPIOLIB
|
||||
select HAVE_ARCH_TRACEHOOK
|
||||
select HAVE_GENERIC_HARDIRQS
|
||||
select HAVE_VIRT_TO_BUS
|
||||
select GENERIC_IRQ_CHIP
|
||||
select GENERIC_IRQ_PROBE
|
||||
select GENERIC_IRQ_SHOW
|
||||
|
|
|
@ -19,6 +19,7 @@ config PARISC
|
|||
select GENERIC_SMP_IDLE_THREAD
|
||||
select GENERIC_STRNCPY_FROM_USER
|
||||
select HAVE_MOD_ARCH_SPECIFIC
|
||||
select HAVE_VIRT_TO_BUS
|
||||
select MODULES_USE_ELF_RELA
|
||||
select CLONE_BACKWARDS
|
||||
select TTY # Needed for pdc_cons.c
|
||||
|
|
|
@ -35,22 +35,17 @@
|
|||
|
||||
static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
struct vm_unmapped_area_info info;
|
||||
|
||||
addr = PAGE_ALIGN(addr);
|
||||
|
||||
for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
|
||||
/* At this point: (!vma || addr < vma->vm_end). */
|
||||
if (TASK_SIZE - len < addr)
|
||||
return -ENOMEM;
|
||||
if (!vma || addr + len <= vma->vm_start)
|
||||
return addr;
|
||||
addr = vma->vm_end;
|
||||
}
|
||||
info.flags = 0;
|
||||
info.length = len;
|
||||
info.low_limit = PAGE_ALIGN(addr);
|
||||
info.high_limit = TASK_SIZE;
|
||||
info.align_mask = 0;
|
||||
info.align_offset = 0;
|
||||
return vm_unmapped_area(&info);
|
||||
}
|
||||
|
||||
#define DCACHE_ALIGN(addr) (((addr) + (SHMLBA - 1)) &~ (SHMLBA - 1))
|
||||
|
||||
/*
|
||||
* We need to know the offset to use. Old scheme was to look for
|
||||
* existing mapping and use the same offset. New scheme is to use the
|
||||
|
@ -63,30 +58,21 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
|
|||
*/
|
||||
static int get_offset(struct address_space *mapping)
|
||||
{
|
||||
int offset = (unsigned long) mapping << (PAGE_SHIFT - 8);
|
||||
return offset & 0x3FF000;
|
||||
return (unsigned long) mapping >> 8;
|
||||
}
|
||||
|
||||
static unsigned long get_shared_area(struct address_space *mapping,
|
||||
unsigned long addr, unsigned long len, unsigned long pgoff)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
int offset = mapping ? get_offset(mapping) : 0;
|
||||
struct vm_unmapped_area_info info;
|
||||
|
||||
offset = (offset + (pgoff << PAGE_SHIFT)) & 0x3FF000;
|
||||
|
||||
addr = DCACHE_ALIGN(addr - offset) + offset;
|
||||
|
||||
for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
|
||||
/* At this point: (!vma || addr < vma->vm_end). */
|
||||
if (TASK_SIZE - len < addr)
|
||||
return -ENOMEM;
|
||||
if (!vma || addr + len <= vma->vm_start)
|
||||
return addr;
|
||||
addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
|
||||
if (addr < vma->vm_end) /* handle wraparound */
|
||||
return -ENOMEM;
|
||||
}
|
||||
info.flags = 0;
|
||||
info.length = len;
|
||||
info.low_limit = PAGE_ALIGN(addr);
|
||||
info.high_limit = TASK_SIZE;
|
||||
info.align_mask = PAGE_MASK & (SHMLBA - 1);
|
||||
info.align_offset = (get_offset(mapping) + pgoff) << PAGE_SHIFT;
|
||||
return vm_unmapped_area(&info);
|
||||
}
|
||||
|
||||
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
||||
|
|
|
@ -87,9 +87,6 @@ config GENERIC_GPIO
|
|||
help
|
||||
Generic GPIO API support
|
||||
|
||||
config ARCH_NO_VIRT_TO_BUS
|
||||
def_bool PPC64
|
||||
|
||||
config PPC
|
||||
bool
|
||||
default y
|
||||
|
@ -101,6 +98,7 @@ config PPC
|
|||
select HAVE_FUNCTION_GRAPH_TRACER
|
||||
select SYSCTL_EXCEPTION_TRACE
|
||||
select ARCH_WANT_OPTIONAL_GPIOLIB
|
||||
select HAVE_VIRT_TO_BUS if !PPC64
|
||||
select HAVE_IDE
|
||||
select HAVE_IOREMAP_PROT
|
||||
select HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
|
|
|
@ -310,7 +310,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
|
|||
{
|
||||
struct kretprobe_instance *ri = NULL;
|
||||
struct hlist_head *head, empty_rp;
|
||||
struct hlist_node *node, *tmp;
|
||||
struct hlist_node *tmp;
|
||||
unsigned long flags, orig_ret_address = 0;
|
||||
unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
|
||||
|
||||
|
@ -330,7 +330,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
|
|||
* real return address, and all the rest will point to
|
||||
* kretprobe_trampoline
|
||||
*/
|
||||
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
|
||||
hlist_for_each_entry_safe(ri, tmp, head, hlist) {
|
||||
if (ri->task != current)
|
||||
/* another task is sharing our hash bucket */
|
||||
continue;
|
||||
|
@ -357,7 +357,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
|
|||
kretprobe_hash_unlock(current, &flags);
|
||||
preempt_enable_no_resched();
|
||||
|
||||
hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
|
||||
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
|
||||
hlist_del(&ri->hlist);
|
||||
kfree(ri);
|
||||
}
|
||||
|
|
|
@ -124,7 +124,6 @@ static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
|
|||
{
|
||||
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
|
||||
struct hpte_cache *pte;
|
||||
struct hlist_node *node;
|
||||
int i;
|
||||
|
||||
rcu_read_lock();
|
||||
|
@ -132,7 +131,7 @@ static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
|
|||
for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
|
||||
struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
|
||||
|
||||
hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
|
||||
hlist_for_each_entry_rcu(pte, list, list_vpte_long)
|
||||
invalidate_pte(vcpu, pte);
|
||||
}
|
||||
|
||||
|
@ -143,7 +142,6 @@ static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
|
|||
{
|
||||
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
|
||||
struct hlist_head *list;
|
||||
struct hlist_node *node;
|
||||
struct hpte_cache *pte;
|
||||
|
||||
/* Find the list of entries in the map */
|
||||
|
@ -152,7 +150,7 @@ static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
|
|||
rcu_read_lock();
|
||||
|
||||
/* Check the list for matching entries and invalidate */
|
||||
hlist_for_each_entry_rcu(pte, node, list, list_pte)
|
||||
hlist_for_each_entry_rcu(pte, list, list_pte)
|
||||
if ((pte->pte.eaddr & ~0xfffUL) == guest_ea)
|
||||
invalidate_pte(vcpu, pte);
|
||||
|
||||
|
@ -163,7 +161,6 @@ static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
|
|||
{
|
||||
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
|
||||
struct hlist_head *list;
|
||||
struct hlist_node *node;
|
||||
struct hpte_cache *pte;
|
||||
|
||||
/* Find the list of entries in the map */
|
||||
|
@ -173,7 +170,7 @@ static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
|
|||
rcu_read_lock();
|
||||
|
||||
/* Check the list for matching entries and invalidate */
|
||||
hlist_for_each_entry_rcu(pte, node, list, list_pte_long)
|
||||
hlist_for_each_entry_rcu(pte, list, list_pte_long)
|
||||
if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea)
|
||||
invalidate_pte(vcpu, pte);
|
||||
|
||||
|
@ -207,7 +204,6 @@ static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
|
|||
{
|
||||
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
|
||||
struct hlist_head *list;
|
||||
struct hlist_node *node;
|
||||
struct hpte_cache *pte;
|
||||
u64 vp_mask = 0xfffffffffULL;
|
||||
|
||||
|
@ -216,7 +212,7 @@ static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
|
|||
rcu_read_lock();
|
||||
|
||||
/* Check the list for matching entries and invalidate */
|
||||
hlist_for_each_entry_rcu(pte, node, list, list_vpte)
|
||||
hlist_for_each_entry_rcu(pte, list, list_vpte)
|
||||
if ((pte->pte.vpage & vp_mask) == guest_vp)
|
||||
invalidate_pte(vcpu, pte);
|
||||
|
||||
|
@ -228,7 +224,6 @@ static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
|
|||
{
|
||||
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
|
||||
struct hlist_head *list;
|
||||
struct hlist_node *node;
|
||||
struct hpte_cache *pte;
|
||||
u64 vp_mask = 0xffffff000ULL;
|
||||
|
||||
|
@ -238,7 +233,7 @@ static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
|
|||
rcu_read_lock();
|
||||
|
||||
/* Check the list for matching entries and invalidate */
|
||||
hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
|
||||
hlist_for_each_entry_rcu(pte, list, list_vpte_long)
|
||||
if ((pte->pte.vpage & vp_mask) == guest_vp)
|
||||
invalidate_pte(vcpu, pte);
|
||||
|
||||
|
@ -266,7 +261,6 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
|
|||
void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
|
||||
{
|
||||
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
|
||||
struct hlist_node *node;
|
||||
struct hpte_cache *pte;
|
||||
int i;
|
||||
|
||||
|
@ -277,7 +271,7 @@ void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
|
|||
for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
|
||||
struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
|
||||
|
||||
hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
|
||||
hlist_for_each_entry_rcu(pte, list, list_vpte_long)
|
||||
if ((pte->pte.raddr >= pa_start) &&
|
||||
(pte->pte.raddr < pa_end))
|
||||
invalidate_pte(vcpu, pte);
|
||||
|
|
|
@ -134,6 +134,7 @@ config S390
|
|||
select HAVE_SYSCALL_WRAPPERS
|
||||
select HAVE_UID16 if 32BIT
|
||||
select HAVE_VIRT_CPU_ACCOUNTING
|
||||
select HAVE_VIRT_TO_BUS
|
||||
select INIT_ALL_POSSIBLE
|
||||
select KTIME_SCALAR if 32BIT
|
||||
select MODULES_USE_ELF_RELA
|
||||
|
|
|
@ -354,7 +354,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
|
|||
{
|
||||
struct kretprobe_instance *ri;
|
||||
struct hlist_head *head, empty_rp;
|
||||
struct hlist_node *node, *tmp;
|
||||
struct hlist_node *tmp;
|
||||
unsigned long flags, orig_ret_address;
|
||||
unsigned long trampoline_address;
|
||||
kprobe_opcode_t *correct_ret_addr;
|
||||
|
@ -379,7 +379,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
|
|||
orig_ret_address = 0;
|
||||
correct_ret_addr = NULL;
|
||||
trampoline_address = (unsigned long) &kretprobe_trampoline;
|
||||
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
|
||||
hlist_for_each_entry_safe(ri, tmp, head, hlist) {
|
||||
if (ri->task != current)
|
||||
/* another task is sharing our hash bucket */
|
||||
continue;
|
||||
|
@ -398,7 +398,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
|
|||
kretprobe_assert(ri, orig_ret_address, trampoline_address);
|
||||
|
||||
correct_ret_addr = ri->ret_addr;
|
||||
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
|
||||
hlist_for_each_entry_safe(ri, tmp, head, hlist) {
|
||||
if (ri->task != current)
|
||||
/* another task is sharing our hash bucket */
|
||||
continue;
|
||||
|
@ -427,7 +427,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
|
|||
kretprobe_hash_unlock(current, &flags);
|
||||
preempt_enable_no_resched();
|
||||
|
||||
hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
|
||||
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
|
||||
hlist_del(&ri->hlist);
|
||||
kfree(ri);
|
||||
}
|
||||
|
|
|
@ -25,10 +25,9 @@ static DEFINE_SPINLOCK(msi_map_lock);
|
|||
|
||||
struct msi_desc *__irq_get_msi_desc(unsigned int irq)
|
||||
{
|
||||
struct hlist_node *entry;
|
||||
struct msi_map *map;
|
||||
|
||||
hlist_for_each_entry_rcu(map, entry,
|
||||
hlist_for_each_entry_rcu(map,
|
||||
&msi_hash[msi_hashfn(irq)], msi_chain)
|
||||
if (map->irq == irq)
|
||||
return map->msi;
|
||||
|
|
|
@ -12,6 +12,7 @@ config SCORE
|
|||
select GENERIC_CPU_DEVICES
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select HAVE_MOD_ARCH_SPECIFIC
|
||||
select HAVE_VIRT_TO_BUS
|
||||
select MODULES_USE_ELF_REL
|
||||
select CLONE_BACKWARDS
|
||||
|
||||
|
|
|
@ -148,9 +148,6 @@ config ARCH_HAS_ILOG2_U32
|
|||
config ARCH_HAS_ILOG2_U64
|
||||
def_bool n
|
||||
|
||||
config ARCH_NO_VIRT_TO_BUS
|
||||
def_bool y
|
||||
|
||||
config ARCH_HAS_DEFAULT_IDLE
|
||||
def_bool y
|
||||
|
||||
|
|
|
@ -310,7 +310,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
|
|||
{
|
||||
struct kretprobe_instance *ri = NULL;
|
||||
struct hlist_head *head, empty_rp;
|
||||
struct hlist_node *node, *tmp;
|
||||
struct hlist_node *tmp;
|
||||
unsigned long flags, orig_ret_address = 0;
|
||||
unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
|
||||
|
||||
|
@ -330,7 +330,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
|
|||
* real return address, and all the rest will point to
|
||||
* kretprobe_trampoline
|
||||
*/
|
||||
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
|
||||
hlist_for_each_entry_safe(ri, tmp, head, hlist) {
|
||||
if (ri->task != current)
|
||||
/* another task is sharing our hash bucket */
|
||||
continue;
|
||||
|
@ -360,7 +360,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
|
|||
|
||||
preempt_enable_no_resched();
|
||||
|
||||
hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
|
||||
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
|
||||
hlist_del(&ri->hlist);
|
||||
kfree(ri);
|
||||
}
|
||||
|
|
|
@ -146,9 +146,6 @@ config GENERIC_GPIO
|
|||
help
|
||||
Generic GPIO API support
|
||||
|
||||
config ARCH_NO_VIRT_TO_BUS
|
||||
def_bool y
|
||||
|
||||
config ARCH_SUPPORTS_DEBUG_PAGEALLOC
|
||||
def_bool y if SPARC64
|
||||
|
||||
|
|
|
@ -511,7 +511,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
|
|||
{
|
||||
struct kretprobe_instance *ri = NULL;
|
||||
struct hlist_head *head, empty_rp;
|
||||
struct hlist_node *node, *tmp;
|
||||
struct hlist_node *tmp;
|
||||
unsigned long flags, orig_ret_address = 0;
|
||||
unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
|
||||
|
||||
|
@ -531,7 +531,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
|
|||
* real return address, and all the rest will point to
|
||||
* kretprobe_trampoline
|
||||
*/
|
||||
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
|
||||
hlist_for_each_entry_safe(ri, tmp, head, hlist) {
|
||||
if (ri->task != current)
|
||||
/* another task is sharing our hash bucket */
|
||||
continue;
|
||||
|
@ -559,7 +559,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
|
|||
kretprobe_hash_unlock(current, &flags);
|
||||
preempt_enable_no_resched();
|
||||
|
||||
hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
|
||||
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
|
||||
hlist_del(&ri->hlist);
|
||||
kfree(ri);
|
||||
}
|
||||
|
|
|
@ -953,9 +953,8 @@ static HLIST_HEAD(ldc_channel_list);
|
|||
static int __ldc_channel_exists(unsigned long id)
|
||||
{
|
||||
struct ldc_channel *lp;
|
||||
struct hlist_node *n;
|
||||
|
||||
hlist_for_each_entry(lp, n, &ldc_channel_list, list) {
|
||||
hlist_for_each_entry(lp, &ldc_channel_list, list) {
|
||||
if (lp->id == id)
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@ config TILE
|
|||
select GENERIC_IRQ_SHOW
|
||||
select HAVE_DEBUG_BUGVERBOSE
|
||||
select HAVE_SYSCALL_WRAPPERS if TILEGX
|
||||
select HAVE_VIRT_TO_BUS
|
||||
select SYS_HYPERVISOR
|
||||
select ARCH_HAVE_NMI_SAFE_CMPXCHG
|
||||
select GENERIC_CLOCKEVENTS
|
||||
|
|
|
@ -9,6 +9,7 @@ config UNICORE32
|
|||
select GENERIC_ATOMIC64
|
||||
select HAVE_KERNEL_LZO
|
||||
select HAVE_KERNEL_LZMA
|
||||
select HAVE_VIRT_TO_BUS
|
||||
select ARCH_HAVE_CUSTOM_GPIO_H
|
||||
select GENERIC_FIND_FIRST_BIT
|
||||
select GENERIC_IRQ_PROBE
|
||||
|
|
|
@ -112,6 +112,7 @@ config X86
|
|||
select GENERIC_STRNLEN_USER
|
||||
select HAVE_CONTEXT_TRACKING if X86_64
|
||||
select HAVE_IRQ_TIME_ACCOUNTING
|
||||
select HAVE_VIRT_TO_BUS
|
||||
select MODULES_USE_ELF_REL if X86_32
|
||||
select MODULES_USE_ELF_RELA if X86_64
|
||||
select CLONE_BACKWARDS if X86_32
|
||||
|
|
|
@ -652,7 +652,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
|
|||
{
|
||||
struct kretprobe_instance *ri = NULL;
|
||||
struct hlist_head *head, empty_rp;
|
||||
struct hlist_node *node, *tmp;
|
||||
struct hlist_node *tmp;
|
||||
unsigned long flags, orig_ret_address = 0;
|
||||
unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
|
||||
kprobe_opcode_t *correct_ret_addr = NULL;
|
||||
|
@ -682,7 +682,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
|
|||
* will be the real return address, and all the rest will
|
||||
* point to kretprobe_trampoline.
|
||||
*/
|
||||
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
|
||||
hlist_for_each_entry_safe(ri, tmp, head, hlist) {
|
||||
if (ri->task != current)
|
||||
/* another task is sharing our hash bucket */
|
||||
continue;
|
||||
|
@ -701,7 +701,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
|
|||
kretprobe_assert(ri, orig_ret_address, trampoline_address);
|
||||
|
||||
correct_ret_addr = ri->ret_addr;
|
||||
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
|
||||
hlist_for_each_entry_safe(ri, tmp, head, hlist) {
|
||||
if (ri->task != current)
|
||||
/* another task is sharing our hash bucket */
|
||||
continue;
|
||||
|
@ -728,7 +728,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
|
|||
|
||||
kretprobe_hash_unlock(current, &flags);
|
||||
|
||||
hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
|
||||
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
|
||||
hlist_del(&ri->hlist);
|
||||
kfree(ri);
|
||||
}
|
||||
|
|
|
@ -1644,13 +1644,13 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
|
|||
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
|
||||
struct list_head *invalid_list);
|
||||
|
||||
#define for_each_gfn_sp(kvm, sp, gfn, pos) \
|
||||
hlist_for_each_entry(sp, pos, \
|
||||
#define for_each_gfn_sp(kvm, sp, gfn) \
|
||||
hlist_for_each_entry(sp, \
|
||||
&(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \
|
||||
if ((sp)->gfn != (gfn)) {} else
|
||||
|
||||
#define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos) \
|
||||
hlist_for_each_entry(sp, pos, \
|
||||
#define for_each_gfn_indirect_valid_sp(kvm, sp, gfn) \
|
||||
hlist_for_each_entry(sp, \
|
||||
&(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \
|
||||
if ((sp)->gfn != (gfn) || (sp)->role.direct || \
|
||||
(sp)->role.invalid) {} else
|
||||
|
@ -1706,11 +1706,10 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
|||
static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
|
||||
{
|
||||
struct kvm_mmu_page *s;
|
||||
struct hlist_node *node;
|
||||
LIST_HEAD(invalid_list);
|
||||
bool flush = false;
|
||||
|
||||
for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
|
||||
for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
|
||||
if (!s->unsync)
|
||||
continue;
|
||||
|
||||
|
@ -1848,7 +1847,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
|
|||
union kvm_mmu_page_role role;
|
||||
unsigned quadrant;
|
||||
struct kvm_mmu_page *sp;
|
||||
struct hlist_node *node;
|
||||
bool need_sync = false;
|
||||
|
||||
role = vcpu->arch.mmu.base_role;
|
||||
|
@ -1863,7 +1861,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
|
|||
quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
|
||||
role.quadrant = quadrant;
|
||||
}
|
||||
for_each_gfn_sp(vcpu->kvm, sp, gfn, node) {
|
||||
for_each_gfn_sp(vcpu->kvm, sp, gfn) {
|
||||
if (!need_sync && sp->unsync)
|
||||
need_sync = true;
|
||||
|
||||
|
@ -2151,14 +2149,13 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
|
|||
int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
|
||||
{
|
||||
struct kvm_mmu_page *sp;
|
||||
struct hlist_node *node;
|
||||
LIST_HEAD(invalid_list);
|
||||
int r;
|
||||
|
||||
pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
|
||||
r = 0;
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
|
||||
for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
|
||||
pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
|
||||
sp->role.word);
|
||||
r = 1;
|
||||
|
@ -2288,9 +2285,8 @@ static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
|
|||
static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
|
||||
{
|
||||
struct kvm_mmu_page *s;
|
||||
struct hlist_node *node;
|
||||
|
||||
for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
|
||||
for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
|
||||
if (s->unsync)
|
||||
continue;
|
||||
WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
|
||||
|
@ -2302,10 +2298,9 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
|
|||
bool can_unsync)
|
||||
{
|
||||
struct kvm_mmu_page *s;
|
||||
struct hlist_node *node;
|
||||
bool need_unsync = false;
|
||||
|
||||
for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
|
||||
for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
|
||||
if (!can_unsync)
|
||||
return 1;
|
||||
|
||||
|
@ -3933,7 +3928,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
|
|||
gfn_t gfn = gpa >> PAGE_SHIFT;
|
||||
union kvm_mmu_page_role mask = { .word = 0 };
|
||||
struct kvm_mmu_page *sp;
|
||||
struct hlist_node *node;
|
||||
LIST_HEAD(invalid_list);
|
||||
u64 entry, gentry, *spte;
|
||||
int npte;
|
||||
|
@ -3964,7 +3958,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
|
|||
kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
|
||||
|
||||
mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
|
||||
for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) {
|
||||
for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
|
||||
if (detect_write_misaligned(sp, gpa, bytes) ||
|
||||
detect_write_flooding(sp)) {
|
||||
zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
|
||||
|
|
|
@ -9,6 +9,7 @@ config XTENSA
|
|||
select HAVE_IDE
|
||||
select GENERIC_ATOMIC64
|
||||
select HAVE_GENERIC_HARDIRQS
|
||||
select HAVE_VIRT_TO_BUS
|
||||
select GENERIC_IRQ_SHOW
|
||||
select GENERIC_CPU_DEVICES
|
||||
select MODULES_USE_ELF_RELA
|
||||
|
|
|
@ -357,7 +357,6 @@ static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
|
|||
{
|
||||
struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
|
||||
struct blkcg_gq *blkg;
|
||||
struct hlist_node *n;
|
||||
int i;
|
||||
|
||||
mutex_lock(&blkcg_pol_mutex);
|
||||
|
@ -368,7 +367,7 @@ static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
|
|||
* stat updates. This is a debug feature which shouldn't exist
|
||||
* anyway. If you get hit by a race, retry.
|
||||
*/
|
||||
hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
|
||||
hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
|
||||
for (i = 0; i < BLKCG_MAX_POLS; i++) {
|
||||
struct blkcg_policy *pol = blkcg_policy[i];
|
||||
|
||||
|
@ -415,11 +414,10 @@ void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
|
|||
bool show_total)
|
||||
{
|
||||
struct blkcg_gq *blkg;
|
||||
struct hlist_node *n;
|
||||
u64 total = 0;
|
||||
|
||||
spin_lock_irq(&blkcg->lock);
|
||||
hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
|
||||
hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node)
|
||||
if (blkcg_policy_enabled(blkg->q, pol))
|
||||
total += prfill(sf, blkg->pd[pol->plid], data);
|
||||
spin_unlock_irq(&blkcg->lock);
|
||||
|
|
|
@ -164,7 +164,6 @@ EXPORT_SYMBOL(put_io_context);
|
|||
*/
|
||||
void put_io_context_active(struct io_context *ioc)
|
||||
{
|
||||
struct hlist_node *n;
|
||||
unsigned long flags;
|
||||
struct io_cq *icq;
|
||||
|
||||
|
@ -180,7 +179,7 @@ void put_io_context_active(struct io_context *ioc)
|
|||
*/
|
||||
retry:
|
||||
spin_lock_irqsave_nested(&ioc->lock, flags, 1);
|
||||
hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node) {
|
||||
hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
|
||||
if (icq->flags & ICQ_EXITED)
|
||||
continue;
|
||||
if (spin_trylock(icq->q->queue_lock)) {
|
||||
|
|
29
block/bsg.c
29
block/bsg.c
|
@ -800,11 +800,10 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
|
|||
static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
|
||||
{
|
||||
struct bsg_device *bd;
|
||||
struct hlist_node *entry;
|
||||
|
||||
mutex_lock(&bsg_mutex);
|
||||
|
||||
hlist_for_each_entry(bd, entry, bsg_dev_idx_hash(minor), dev_list) {
|
||||
hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) {
|
||||
if (bd->queue == q) {
|
||||
atomic_inc(&bd->ref_count);
|
||||
goto found;
|
||||
|
@ -997,7 +996,7 @@ int bsg_register_queue(struct request_queue *q, struct device *parent,
|
|||
{
|
||||
struct bsg_class_device *bcd;
|
||||
dev_t dev;
|
||||
int ret, minor;
|
||||
int ret;
|
||||
struct device *class_dev = NULL;
|
||||
const char *devname;
|
||||
|
||||
|
@ -1017,23 +1016,16 @@ int bsg_register_queue(struct request_queue *q, struct device *parent,
|
|||
|
||||
mutex_lock(&bsg_mutex);
|
||||
|
||||
ret = idr_pre_get(&bsg_minor_idr, GFP_KERNEL);
|
||||
if (!ret) {
|
||||
ret = -ENOMEM;
|
||||
ret = idr_alloc(&bsg_minor_idr, bcd, 0, BSG_MAX_DEVS, GFP_KERNEL);
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOSPC) {
|
||||
printk(KERN_ERR "bsg: too many bsg devices\n");
|
||||
ret = -EINVAL;
|
||||
}
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
ret = idr_get_new(&bsg_minor_idr, bcd, &minor);
|
||||
if (ret < 0)
|
||||
goto unlock;
|
||||
|
||||
if (minor >= BSG_MAX_DEVS) {
|
||||
printk(KERN_ERR "bsg: too many bsg devices\n");
|
||||
ret = -EINVAL;
|
||||
goto remove_idr;
|
||||
}
|
||||
|
||||
bcd->minor = minor;
|
||||
bcd->minor = ret;
|
||||
bcd->queue = q;
|
||||
bcd->parent = get_device(parent);
|
||||
bcd->release = release;
|
||||
|
@ -1059,8 +1051,7 @@ unregister_class_dev:
|
|||
device_unregister(class_dev);
|
||||
put_dev:
|
||||
put_device(parent);
|
||||
remove_idr:
|
||||
idr_remove(&bsg_minor_idr, minor);
|
||||
idr_remove(&bsg_minor_idr, bcd->minor);
|
||||
unlock:
|
||||
mutex_unlock(&bsg_mutex);
|
||||
return ret;
|
||||
|
|
|
@ -1435,7 +1435,6 @@ static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
|
|||
{
|
||||
struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
|
||||
struct blkcg_gq *blkg;
|
||||
struct hlist_node *n;
|
||||
|
||||
if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX)
|
||||
return -EINVAL;
|
||||
|
@ -1443,7 +1442,7 @@ static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
|
|||
spin_lock_irq(&blkcg->lock);
|
||||
blkcg->cfq_weight = (unsigned int)val;
|
||||
|
||||
hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
|
||||
hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
|
||||
struct cfq_group *cfqg = blkg_to_cfqg(blkg);
|
||||
|
||||
if (cfqg && !cfqg->dev_weight)
|
||||
|
|
|
@ -288,10 +288,10 @@ static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
|
|||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
|
||||
struct hlist_node *entry, *next;
|
||||
struct hlist_node *next;
|
||||
struct request *rq;
|
||||
|
||||
hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
|
||||
hlist_for_each_entry_safe(rq, next, hash_list, hash) {
|
||||
BUG_ON(!ELV_ON_HASH(rq));
|
||||
|
||||
if (unlikely(!rq_mergeable(rq))) {
|
||||
|
|
|
@ -26,7 +26,7 @@ static DEFINE_MUTEX(block_class_lock);
|
|||
struct kobject *block_depr;
|
||||
|
||||
/* for extended dynamic devt allocation, currently only one major is used */
|
||||
#define MAX_EXT_DEVT (1 << MINORBITS)
|
||||
#define NR_EXT_DEVT (1 << MINORBITS)
|
||||
|
||||
/* For extended devt allocation. ext_devt_mutex prevents look up
|
||||
* results from going away underneath its user.
|
||||
|
@ -411,7 +411,7 @@ static int blk_mangle_minor(int minor)
|
|||
int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
|
||||
{
|
||||
struct gendisk *disk = part_to_disk(part);
|
||||
int idx, rc;
|
||||
int idx;
|
||||
|
||||
/* in consecutive minor range? */
|
||||
if (part->partno < disk->minors) {
|
||||
|
@ -420,19 +420,11 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
|
|||
}
|
||||
|
||||
/* allocate ext devt */
|
||||
do {
|
||||
if (!idr_pre_get(&ext_devt_idr, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
rc = idr_get_new(&ext_devt_idr, part, &idx);
|
||||
} while (rc == -EAGAIN);
|
||||
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (idx > MAX_EXT_DEVT) {
|
||||
idr_remove(&ext_devt_idr, idx);
|
||||
return -EBUSY;
|
||||
}
|
||||
mutex_lock(&ext_devt_mutex);
|
||||
idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_KERNEL);
|
||||
mutex_unlock(&ext_devt_mutex);
|
||||
if (idx < 0)
|
||||
return idx == -ENOSPC ? -EBUSY : idx;
|
||||
|
||||
*devt = MKDEV(BLOCK_EXT_MAJOR, blk_mangle_minor(idx));
|
||||
return 0;
|
||||
|
@ -655,7 +647,6 @@ void del_gendisk(struct gendisk *disk)
|
|||
disk_part_iter_exit(&piter);
|
||||
|
||||
invalidate_partition(disk, 0);
|
||||
blk_free_devt(disk_to_dev(disk)->devt);
|
||||
set_capacity(disk, 0);
|
||||
disk->flags &= ~GENHD_FL_UP;
|
||||
|
||||
|
@ -674,6 +665,7 @@ void del_gendisk(struct gendisk *disk)
|
|||
sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
|
||||
pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
|
||||
device_del(disk_to_dev(disk));
|
||||
blk_free_devt(disk_to_dev(disk)->devt);
|
||||
}
|
||||
EXPORT_SYMBOL(del_gendisk);
|
||||
|
||||
|
|
|
@ -249,11 +249,11 @@ void delete_partition(struct gendisk *disk, int partno)
|
|||
if (!part)
|
||||
return;
|
||||
|
||||
blk_free_devt(part_devt(part));
|
||||
rcu_assign_pointer(ptbl->part[partno], NULL);
|
||||
rcu_assign_pointer(ptbl->last_lookup, NULL);
|
||||
kobject_put(part->holder_dir);
|
||||
device_del(part_to_dev(part));
|
||||
blk_free_devt(part_devt(part));
|
||||
|
||||
hd_struct_put(part);
|
||||
}
|
||||
|
@ -418,7 +418,7 @@ int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
|
|||
int p, highest, res;
|
||||
rescan:
|
||||
if (state && !IS_ERR(state)) {
|
||||
kfree(state);
|
||||
free_partitions(state);
|
||||
state = NULL;
|
||||
}
|
||||
|
||||
|
@ -525,7 +525,7 @@ rescan:
|
|||
md_autodetect_dev(part_to_dev(part)->devt);
|
||||
#endif
|
||||
}
|
||||
kfree(state);
|
||||
free_partitions(state);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/genhd.h>
|
||||
|
||||
|
@ -106,18 +107,45 @@ static int (*check_part[])(struct parsed_partitions *) = {
|
|||
NULL
|
||||
};
|
||||
|
||||
static struct parsed_partitions *allocate_partitions(struct gendisk *hd)
|
||||
{
|
||||
struct parsed_partitions *state;
|
||||
int nr;
|
||||
|
||||
state = kzalloc(sizeof(*state), GFP_KERNEL);
|
||||
if (!state)
|
||||
return NULL;
|
||||
|
||||
nr = disk_max_parts(hd);
|
||||
state->parts = vzalloc(nr * sizeof(state->parts[0]));
|
||||
if (!state->parts) {
|
||||
kfree(state);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
state->limit = nr;
|
||||
|
||||
return state;
|
||||
}
|
||||
|
||||
void free_partitions(struct parsed_partitions *state)
|
||||
{
|
||||
vfree(state->parts);
|
||||
kfree(state);
|
||||
}
|
||||
|
||||
struct parsed_partitions *
|
||||
check_partition(struct gendisk *hd, struct block_device *bdev)
|
||||
{
|
||||
struct parsed_partitions *state;
|
||||
int i, res, err;
|
||||
|
||||
state = kzalloc(sizeof(struct parsed_partitions), GFP_KERNEL);
|
||||
state = allocate_partitions(hd);
|
||||
if (!state)
|
||||
return NULL;
|
||||
state->pp_buf = (char *)__get_free_page(GFP_KERNEL);
|
||||
if (!state->pp_buf) {
|
||||
kfree(state);
|
||||
free_partitions(state);
|
||||
return NULL;
|
||||
}
|
||||
state->pp_buf[0] = '\0';
|
||||
|
@ -128,10 +156,9 @@ check_partition(struct gendisk *hd, struct block_device *bdev)
|
|||
if (isdigit(state->name[strlen(state->name)-1]))
|
||||
sprintf(state->name, "p");
|
||||
|
||||
state->limit = disk_max_parts(hd);
|
||||
i = res = err = 0;
|
||||
while (!res && check_part[i]) {
|
||||
memset(&state->parts, 0, sizeof(state->parts));
|
||||
memset(state->parts, 0, state->limit * sizeof(state->parts[0]));
|
||||
res = check_part[i++](state);
|
||||
if (res < 0) {
|
||||
/* We have hit an I/O error which we don't report now.
|
||||
|
@ -161,6 +188,6 @@ check_partition(struct gendisk *hd, struct block_device *bdev)
|
|||
printk(KERN_INFO "%s", state->pp_buf);
|
||||
|
||||
free_page((unsigned long)state->pp_buf);
|
||||
kfree(state);
|
||||
free_partitions(state);
|
||||
return ERR_PTR(res);
|
||||
}
|
||||
|
|
|
@ -15,13 +15,15 @@ struct parsed_partitions {
|
|||
int flags;
|
||||
bool has_info;
|
||||
struct partition_meta_info info;
|
||||
} parts[DISK_MAX_PARTS];
|
||||
} *parts;
|
||||
int next;
|
||||
int limit;
|
||||
bool access_beyond_eod;
|
||||
char *pp_buf;
|
||||
};
|
||||
|
||||
void free_partitions(struct parsed_partitions *state);
|
||||
|
||||
struct parsed_partitions *
|
||||
check_partition(struct gendisk *, struct block_device *);
|
||||
|
||||
|
|
|
@ -310,15 +310,23 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
|
|||
goto fail;
|
||||
}
|
||||
|
||||
/* Check the GUID Partition Table header size */
|
||||
/* Check the GUID Partition Table header size is too big */
|
||||
if (le32_to_cpu((*gpt)->header_size) >
|
||||
bdev_logical_block_size(state->bdev)) {
|
||||
pr_debug("GUID Partition Table Header size is wrong: %u > %u\n",
|
||||
pr_debug("GUID Partition Table Header size is too large: %u > %u\n",
|
||||
le32_to_cpu((*gpt)->header_size),
|
||||
bdev_logical_block_size(state->bdev));
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Check the GUID Partition Table header size is too small */
|
||||
if (le32_to_cpu((*gpt)->header_size) < sizeof(gpt_header)) {
|
||||
pr_debug("GUID Partition Table Header size is too small: %u < %zu\n",
|
||||
le32_to_cpu((*gpt)->header_size),
|
||||
sizeof(gpt_header));
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Check the GUID Partition Table CRC */
|
||||
origcrc = le32_to_cpu((*gpt)->header_crc32);
|
||||
(*gpt)->header_crc32 = 0;
|
||||
|
|
|
@ -63,6 +63,10 @@ int mac_partition(struct parsed_partitions *state)
|
|||
put_dev_sector(sect);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (blocks_in_map >= state->limit)
|
||||
blocks_in_map = state->limit - 1;
|
||||
|
||||
strlcat(state->pp_buf, " [mac]", PAGE_SIZE);
|
||||
for (slot = 1; slot <= blocks_in_map; ++slot) {
|
||||
int pos = slot * secsize;
|
||||
|
|
|
@ -455,17 +455,22 @@ int msdos_partition(struct parsed_partitions *state)
|
|||
data = read_part_sector(state, 0, §);
|
||||
if (!data)
|
||||
return -1;
|
||||
if (!msdos_magic_present(data + 510)) {
|
||||
put_dev_sector(sect);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note order! (some AIX disks, e.g. unbootable kind,
|
||||
* have no MSDOS 55aa)
|
||||
*/
|
||||
if (aix_magic_present(state, data)) {
|
||||
put_dev_sector(sect);
|
||||
strlcat(state->pp_buf, " [AIX]", PAGE_SIZE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!msdos_magic_present(data + 510)) {
|
||||
put_dev_sector(sect);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Now that the 55aa signature is present, this is probably
|
||||
* either the boot sector of a FAT filesystem or a DOS-type
|
||||
|
|
|
@ -447,7 +447,7 @@ EXPORT_SYMBOL_GPL(crypto_register_template);
|
|||
void crypto_unregister_template(struct crypto_template *tmpl)
|
||||
{
|
||||
struct crypto_instance *inst;
|
||||
struct hlist_node *p, *n;
|
||||
struct hlist_node *n;
|
||||
struct hlist_head *list;
|
||||
LIST_HEAD(users);
|
||||
|
||||
|
@ -457,7 +457,7 @@ void crypto_unregister_template(struct crypto_template *tmpl)
|
|||
list_del_init(&tmpl->list);
|
||||
|
||||
list = &tmpl->instances;
|
||||
hlist_for_each_entry(inst, p, list, list) {
|
||||
hlist_for_each_entry(inst, list, list) {
|
||||
int err = crypto_remove_alg(&inst->alg, &users);
|
||||
BUG_ON(err);
|
||||
}
|
||||
|
@ -466,7 +466,7 @@ void crypto_unregister_template(struct crypto_template *tmpl)
|
|||
|
||||
up_write(&crypto_alg_sem);
|
||||
|
||||
hlist_for_each_entry_safe(inst, p, n, list, list) {
|
||||
hlist_for_each_entry_safe(inst, n, list, list) {
|
||||
BUG_ON(atomic_read(&inst->alg.cra_refcnt) != 1);
|
||||
tmpl->free(inst);
|
||||
}
|
||||
|
|
|
@ -157,7 +157,6 @@ static int atmtcp_v_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
|
|||
{
|
||||
struct atm_cirange ci;
|
||||
struct atm_vcc *vcc;
|
||||
struct hlist_node *node;
|
||||
struct sock *s;
|
||||
int i;
|
||||
|
||||
|
@ -171,7 +170,7 @@ static int atmtcp_v_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
|
|||
for(i = 0; i < VCC_HTABLE_SIZE; ++i) {
|
||||
struct hlist_head *head = &vcc_hash[i];
|
||||
|
||||
sk_for_each(s, node, head) {
|
||||
sk_for_each(s, head) {
|
||||
vcc = atm_sk(s);
|
||||
if (vcc->dev != dev)
|
||||
continue;
|
||||
|
@ -264,12 +263,11 @@ static struct atm_vcc *find_vcc(struct atm_dev *dev, short vpi, int vci)
|
|||
{
|
||||
struct hlist_head *head;
|
||||
struct atm_vcc *vcc;
|
||||
struct hlist_node *node;
|
||||
struct sock *s;
|
||||
|
||||
head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
|
||||
|
||||
sk_for_each(s, node, head) {
|
||||
sk_for_each(s, head) {
|
||||
vcc = atm_sk(s);
|
||||
if (vcc->dev == dev &&
|
||||
vcc->vci == vci && vcc->vpi == vpi &&
|
||||
|
|
|
@ -2093,7 +2093,6 @@ static unsigned char eni_phy_get(struct atm_dev *dev,unsigned long addr)
|
|||
|
||||
static int eni_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
|
||||
{
|
||||
struct hlist_node *node;
|
||||
struct sock *s;
|
||||
static const char *signal[] = { "LOST","unknown","okay" };
|
||||
struct eni_dev *eni_dev = ENI_DEV(dev);
|
||||
|
@ -2171,7 +2170,7 @@ static int eni_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
|
|||
for(i = 0; i < VCC_HTABLE_SIZE; ++i) {
|
||||
struct hlist_head *head = &vcc_hash[i];
|
||||
|
||||
sk_for_each(s, node, head) {
|
||||
sk_for_each(s, head) {
|
||||
struct eni_vcc *eni_vcc;
|
||||
int length;
|
||||
|
||||
|
|
|
@ -329,7 +329,6 @@ __find_vcc(struct he_dev *he_dev, unsigned cid)
|
|||
{
|
||||
struct hlist_head *head;
|
||||
struct atm_vcc *vcc;
|
||||
struct hlist_node *node;
|
||||
struct sock *s;
|
||||
short vpi;
|
||||
int vci;
|
||||
|
@ -338,7 +337,7 @@ __find_vcc(struct he_dev *he_dev, unsigned cid)
|
|||
vci = cid & ((1 << he_dev->vcibits) - 1);
|
||||
head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
|
||||
|
||||
sk_for_each(s, node, head) {
|
||||
sk_for_each(s, head) {
|
||||
vcc = atm_sk(s);
|
||||
if (vcc->dev == he_dev->atm_dev &&
|
||||
vcc->vci == vci && vcc->vpi == vpi &&
|
||||
|
|
|
@ -251,7 +251,6 @@ static void nicstar_remove_one(struct pci_dev *pcidev)
|
|||
if (card->scd2vc[j] != NULL)
|
||||
free_scq(card, card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc);
|
||||
}
|
||||
idr_remove_all(&card->idr);
|
||||
idr_destroy(&card->idr);
|
||||
pci_free_consistent(card->pcidev, NS_RSQSIZE + NS_RSQ_ALIGNMENT,
|
||||
card->rsq.org, card->rsq.dma);
|
||||
|
@ -950,11 +949,10 @@ static void free_scq(ns_dev *card, scq_info *scq, struct atm_vcc *vcc)
|
|||
static void push_rxbufs(ns_dev * card, struct sk_buff *skb)
|
||||
{
|
||||
struct sk_buff *handle1, *handle2;
|
||||
u32 id1 = 0, id2 = 0;
|
||||
int id1, id2;
|
||||
u32 addr1, addr2;
|
||||
u32 stat;
|
||||
unsigned long flags;
|
||||
int err;
|
||||
|
||||
/* *BARF* */
|
||||
handle2 = NULL;
|
||||
|
@ -1027,23 +1025,12 @@ static void push_rxbufs(ns_dev * card, struct sk_buff *skb)
|
|||
card->lbfqc += 2;
|
||||
}
|
||||
|
||||
do {
|
||||
if (!idr_pre_get(&card->idr, GFP_ATOMIC)) {
|
||||
printk(KERN_ERR
|
||||
"nicstar%d: no free memory for idr\n",
|
||||
card->index);
|
||||
goto out;
|
||||
}
|
||||
id1 = idr_alloc(&card->idr, handle1, 0, 0, GFP_ATOMIC);
|
||||
if (id1 < 0)
|
||||
goto out;
|
||||
|
||||
if (!id1)
|
||||
err = idr_get_new_above(&card->idr, handle1, 0, &id1);
|
||||
|
||||
if (!id2 && err == 0)
|
||||
err = idr_get_new_above(&card->idr, handle2, 0, &id2);
|
||||
|
||||
} while (err == -EAGAIN);
|
||||
|
||||
if (err)
|
||||
id2 = idr_alloc(&card->idr, handle2, 0, 0, GFP_ATOMIC);
|
||||
if (id2 < 0)
|
||||
goto out;
|
||||
|
||||
spin_lock_irqsave(&card->res_lock, flags);
|
||||
|
|
|
@ -896,12 +896,11 @@ static struct atm_vcc *find_vcc(struct atm_dev *dev, short vpi, int vci)
|
|||
{
|
||||
struct hlist_head *head;
|
||||
struct atm_vcc *vcc = NULL;
|
||||
struct hlist_node *node;
|
||||
struct sock *s;
|
||||
|
||||
read_lock(&vcc_sklist_lock);
|
||||
head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
|
||||
sk_for_each(s, node, head) {
|
||||
sk_for_each(s, head) {
|
||||
vcc = atm_sk(s);
|
||||
if (vcc->dev == dev && vcc->vci == vci &&
|
||||
vcc->vpi == vpi && vcc->qos.rxtp.traffic_class != ATM_NONE &&
|
||||
|
|
|
@ -2660,25 +2660,24 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor,
|
|||
mdev->read_requests = RB_ROOT;
|
||||
mdev->write_requests = RB_ROOT;
|
||||
|
||||
if (!idr_pre_get(&minors, GFP_KERNEL))
|
||||
minor_got = idr_alloc(&minors, mdev, minor, minor + 1, GFP_KERNEL);
|
||||
if (minor_got < 0) {
|
||||
if (minor_got == -ENOSPC) {
|
||||
err = ERR_MINOR_EXISTS;
|
||||
drbd_msg_put_info("requested minor exists already");
|
||||
}
|
||||
goto out_no_minor_idr;
|
||||
if (idr_get_new_above(&minors, mdev, minor, &minor_got))
|
||||
goto out_no_minor_idr;
|
||||
if (minor_got != minor) {
|
||||
err = ERR_MINOR_EXISTS;
|
||||
drbd_msg_put_info("requested minor exists already");
|
||||
}
|
||||
|
||||
vnr_got = idr_alloc(&tconn->volumes, mdev, vnr, vnr + 1, GFP_KERNEL);
|
||||
if (vnr_got < 0) {
|
||||
if (vnr_got == -ENOSPC) {
|
||||
err = ERR_INVALID_REQUEST;
|
||||
drbd_msg_put_info("requested volume exists already");
|
||||
}
|
||||
goto out_idr_remove_minor;
|
||||
}
|
||||
|
||||
if (!idr_pre_get(&tconn->volumes, GFP_KERNEL))
|
||||
goto out_idr_remove_minor;
|
||||
if (idr_get_new_above(&tconn->volumes, mdev, vnr, &vnr_got))
|
||||
goto out_idr_remove_minor;
|
||||
if (vnr_got != vnr) {
|
||||
err = ERR_INVALID_REQUEST;
|
||||
drbd_msg_put_info("requested volume exists already");
|
||||
goto out_idr_remove_vol;
|
||||
}
|
||||
add_disk(disk);
|
||||
kref_init(&mdev->kref); /* one ref for both idrs and the the add_disk */
|
||||
|
||||
|
@ -2689,8 +2688,6 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor,
|
|||
|
||||
return NO_ERROR;
|
||||
|
||||
out_idr_remove_vol:
|
||||
idr_remove(&tconn->volumes, vnr_got);
|
||||
out_idr_remove_minor:
|
||||
idr_remove(&minors, minor_got);
|
||||
synchronize_rcu();
|
||||
|
|
|
@ -1624,30 +1624,17 @@ static int loop_add(struct loop_device **l, int i)
|
|||
if (!lo)
|
||||
goto out;
|
||||
|
||||
if (!idr_pre_get(&loop_index_idr, GFP_KERNEL))
|
||||
goto out_free_dev;
|
||||
|
||||
/* allocate id, if @id >= 0, we're requesting that specific id */
|
||||
if (i >= 0) {
|
||||
int m;
|
||||
|
||||
/* create specific i in the index */
|
||||
err = idr_get_new_above(&loop_index_idr, lo, i, &m);
|
||||
if (err >= 0 && i != m) {
|
||||
idr_remove(&loop_index_idr, m);
|
||||
err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL);
|
||||
if (err == -ENOSPC)
|
||||
err = -EEXIST;
|
||||
}
|
||||
} else if (i == -1) {
|
||||
int m;
|
||||
|
||||
/* get next free nr */
|
||||
err = idr_get_new(&loop_index_idr, lo, &m);
|
||||
if (err >= 0)
|
||||
i = m;
|
||||
} else {
|
||||
err = -EINVAL;
|
||||
err = idr_alloc(&loop_index_idr, lo, 0, 0, GFP_KERNEL);
|
||||
}
|
||||
if (err < 0)
|
||||
goto out_free_dev;
|
||||
i = err;
|
||||
|
||||
lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
|
||||
if (!lo->lo_queue)
|
||||
|
@ -1911,7 +1898,6 @@ static void __exit loop_exit(void)
|
|||
range = max_loop ? max_loop << part_shift : 1UL << MINORBITS;
|
||||
|
||||
idr_for_each(&loop_index_idr, &loop_exit_cb, NULL);
|
||||
idr_remove_all(&loop_index_idr);
|
||||
idr_destroy(&loop_index_idr);
|
||||
|
||||
blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range);
|
||||
|
|
|
@ -98,6 +98,7 @@ static const char *nbdcmd_to_ascii(int cmd)
|
|||
case NBD_CMD_READ: return "read";
|
||||
case NBD_CMD_WRITE: return "write";
|
||||
case NBD_CMD_DISC: return "disconnect";
|
||||
case NBD_CMD_FLUSH: return "flush";
|
||||
case NBD_CMD_TRIM: return "trim/discard";
|
||||
}
|
||||
return "invalid";
|
||||
|
@ -244,8 +245,15 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req)
|
|||
|
||||
request.magic = htonl(NBD_REQUEST_MAGIC);
|
||||
request.type = htonl(nbd_cmd(req));
|
||||
request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
|
||||
request.len = htonl(size);
|
||||
|
||||
if (nbd_cmd(req) == NBD_CMD_FLUSH) {
|
||||
/* Other values are reserved for FLUSH requests. */
|
||||
request.from = 0;
|
||||
request.len = 0;
|
||||
} else {
|
||||
request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
|
||||
request.len = htonl(size);
|
||||
}
|
||||
memcpy(request.handle, &req, sizeof(req));
|
||||
|
||||
dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%uB)\n",
|
||||
|
@ -482,6 +490,11 @@ static void nbd_handle_req(struct nbd_device *nbd, struct request *req)
|
|||
}
|
||||
}
|
||||
|
||||
if (req->cmd_flags & REQ_FLUSH) {
|
||||
BUG_ON(unlikely(blk_rq_sectors(req)));
|
||||
nbd_cmd(req) = NBD_CMD_FLUSH;
|
||||
}
|
||||
|
||||
req->errors = 0;
|
||||
|
||||
mutex_lock(&nbd->tx_lock);
|
||||
|
@ -551,6 +564,7 @@ static int nbd_thread(void *data)
|
|||
*/
|
||||
|
||||
static void do_nbd_request(struct request_queue *q)
|
||||
__releases(q->queue_lock) __acquires(q->queue_lock)
|
||||
{
|
||||
struct request *req;
|
||||
|
||||
|
@ -595,12 +609,20 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
|
|||
struct request sreq;
|
||||
|
||||
dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
|
||||
if (!nbd->sock)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_unlock(&nbd->tx_lock);
|
||||
fsync_bdev(bdev);
|
||||
mutex_lock(&nbd->tx_lock);
|
||||
blk_rq_init(NULL, &sreq);
|
||||
sreq.cmd_type = REQ_TYPE_SPECIAL;
|
||||
nbd_cmd(&sreq) = NBD_CMD_DISC;
|
||||
|
||||
/* Check again after getting mutex back. */
|
||||
if (!nbd->sock)
|
||||
return -EINVAL;
|
||||
|
||||
nbd_send_req(nbd, &sreq);
|
||||
return 0;
|
||||
}
|
||||
|
@ -614,6 +636,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
|
|||
nbd_clear_que(nbd);
|
||||
BUG_ON(!list_empty(&nbd->queue_head));
|
||||
BUG_ON(!list_empty(&nbd->waiting_queue));
|
||||
kill_bdev(bdev);
|
||||
if (file)
|
||||
fput(file);
|
||||
return 0;
|
||||
|
@ -681,9 +704,15 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
|
|||
|
||||
mutex_unlock(&nbd->tx_lock);
|
||||
|
||||
if (nbd->flags & NBD_FLAG_READ_ONLY)
|
||||
set_device_ro(bdev, true);
|
||||
if (nbd->flags & NBD_FLAG_SEND_TRIM)
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
|
||||
nbd->disk->queue);
|
||||
if (nbd->flags & NBD_FLAG_SEND_FLUSH)
|
||||
blk_queue_flush(nbd->disk->queue, REQ_FLUSH);
|
||||
else
|
||||
blk_queue_flush(nbd->disk->queue, 0);
|
||||
|
||||
thread = kthread_create(nbd_thread, nbd, nbd->disk->disk_name);
|
||||
if (IS_ERR(thread)) {
|
||||
|
@ -702,9 +731,12 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
|
|||
nbd->file = NULL;
|
||||
nbd_clear_que(nbd);
|
||||
dev_warn(disk_to_dev(nbd->disk), "queue cleared\n");
|
||||
kill_bdev(bdev);
|
||||
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
|
||||
set_device_ro(bdev, false);
|
||||
if (file)
|
||||
fput(file);
|
||||
nbd->flags = 0;
|
||||
nbd->bytesize = 0;
|
||||
bdev->bd_inode->i_size = 0;
|
||||
set_capacity(nbd->disk, 0);
|
||||
|
|
|
@ -1208,6 +1208,16 @@ static int smi_num; /* Used to sequence the SMIs */
|
|||
#define DEFAULT_REGSPACING 1
|
||||
#define DEFAULT_REGSIZE 1
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
static bool si_tryacpi = 1;
|
||||
#endif
|
||||
#ifdef CONFIG_DMI
|
||||
static bool si_trydmi = 1;
|
||||
#endif
|
||||
static bool si_tryplatform = 1;
|
||||
#ifdef CONFIG_PCI
|
||||
static bool si_trypci = 1;
|
||||
#endif
|
||||
static bool si_trydefaults = 1;
|
||||
static char *si_type[SI_MAX_PARMS];
|
||||
#define MAX_SI_TYPE_STR 30
|
||||
|
@ -1238,6 +1248,25 @@ MODULE_PARM_DESC(hotmod, "Add and remove interfaces. See"
|
|||
" Documentation/IPMI.txt in the kernel sources for the"
|
||||
" gory details.");
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
module_param_named(tryacpi, si_tryacpi, bool, 0);
|
||||
MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
|
||||
" default scan of the interfaces identified via ACPI");
|
||||
#endif
|
||||
#ifdef CONFIG_DMI
|
||||
module_param_named(trydmi, si_trydmi, bool, 0);
|
||||
MODULE_PARM_DESC(trydmi, "Setting this to zero will disable the"
|
||||
" default scan of the interfaces identified via DMI");
|
||||
#endif
|
||||
module_param_named(tryplatform, si_tryplatform, bool, 0);
|
||||
MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
|
||||
" default scan of the interfaces identified via platform"
|
||||
" interfaces like openfirmware");
|
||||
#ifdef CONFIG_PCI
|
||||
module_param_named(trypci, si_trypci, bool, 0);
|
||||
MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
|
||||
" default scan of the interfaces identified via pci");
|
||||
#endif
|
||||
module_param_named(trydefaults, si_trydefaults, bool, 0);
|
||||
MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
|
||||
" default scan of the KCS and SMIC interface at the standard"
|
||||
|
@ -3371,13 +3400,15 @@ static int init_ipmi_si(void)
|
|||
return 0;
|
||||
initialized = 1;
|
||||
|
||||
rv = platform_driver_register(&ipmi_driver);
|
||||
if (rv) {
|
||||
printk(KERN_ERR PFX "Unable to register driver: %d\n", rv);
|
||||
return rv;
|
||||
if (si_tryplatform) {
|
||||
rv = platform_driver_register(&ipmi_driver);
|
||||
if (rv) {
|
||||
printk(KERN_ERR PFX "Unable to register "
|
||||
"driver: %d\n", rv);
|
||||
return rv;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Parse out the si_type string into its components. */
|
||||
str = si_type_str;
|
||||
if (*str != '\0') {
|
||||
|
@ -3400,24 +3431,31 @@ static int init_ipmi_si(void)
|
|||
return 0;
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
rv = pci_register_driver(&ipmi_pci_driver);
|
||||
if (rv)
|
||||
printk(KERN_ERR PFX "Unable to register PCI driver: %d\n", rv);
|
||||
else
|
||||
pci_registered = 1;
|
||||
if (si_trypci) {
|
||||
rv = pci_register_driver(&ipmi_pci_driver);
|
||||
if (rv)
|
||||
printk(KERN_ERR PFX "Unable to register "
|
||||
"PCI driver: %d\n", rv);
|
||||
else
|
||||
pci_registered = 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
pnp_register_driver(&ipmi_pnp_driver);
|
||||
pnp_registered = 1;
|
||||
if (si_tryacpi) {
|
||||
pnp_register_driver(&ipmi_pnp_driver);
|
||||
pnp_registered = 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DMI
|
||||
dmi_find_bmc();
|
||||
if (si_trydmi)
|
||||
dmi_find_bmc();
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
spmi_find_bmc();
|
||||
if (si_tryacpi)
|
||||
spmi_find_bmc();
|
||||
#endif
|
||||
|
||||
/* We prefer devices with interrupts, but in the case of a machine
|
||||
|
|
|
@ -183,19 +183,12 @@ static const struct file_operations misc_fops = {
|
|||
|
||||
int misc_register(struct miscdevice * misc)
|
||||
{
|
||||
struct miscdevice *c;
|
||||
dev_t dev;
|
||||
int err = 0;
|
||||
|
||||
INIT_LIST_HEAD(&misc->list);
|
||||
|
||||
mutex_lock(&misc_mtx);
|
||||
list_for_each_entry(c, &misc_list, list) {
|
||||
if (c->minor == misc->minor) {
|
||||
mutex_unlock(&misc_mtx);
|
||||
return -EBUSY;
|
||||
}
|
||||
}
|
||||
|
||||
if (misc->minor == MISC_DYNAMIC_MINOR) {
|
||||
int i = find_first_zero_bit(misc_minors, DYNAMIC_MINORS);
|
||||
|
@ -205,6 +198,15 @@ int misc_register(struct miscdevice * misc)
|
|||
}
|
||||
misc->minor = DYNAMIC_MINORS - i - 1;
|
||||
set_bit(i, misc_minors);
|
||||
} else {
|
||||
struct miscdevice *c;
|
||||
|
||||
list_for_each_entry(c, &misc_list, list) {
|
||||
if (c->minor == misc->minor) {
|
||||
mutex_unlock(&misc_mtx);
|
||||
return -EBUSY;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dev = MKDEV(MISC_MAJOR, misc->minor);
|
||||
|
|
|
@ -52,31 +52,29 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk *c,
|
|||
int level)
|
||||
{
|
||||
struct clk *child;
|
||||
struct hlist_node *tmp;
|
||||
|
||||
if (!c)
|
||||
return;
|
||||
|
||||
clk_summary_show_one(s, c, level);
|
||||
|
||||
hlist_for_each_entry(child, tmp, &c->children, child_node)
|
||||
hlist_for_each_entry(child, &c->children, child_node)
|
||||
clk_summary_show_subtree(s, child, level + 1);
|
||||
}
|
||||
|
||||
static int clk_summary_show(struct seq_file *s, void *data)
|
||||
{
|
||||
struct clk *c;
|
||||
struct hlist_node *tmp;
|
||||
|
||||
seq_printf(s, " clock enable_cnt prepare_cnt rate\n");
|
||||
seq_printf(s, "---------------------------------------------------------------------\n");
|
||||
|
||||
mutex_lock(&prepare_lock);
|
||||
|
||||
hlist_for_each_entry(c, tmp, &clk_root_list, child_node)
|
||||
hlist_for_each_entry(c, &clk_root_list, child_node)
|
||||
clk_summary_show_subtree(s, c, 0);
|
||||
|
||||
hlist_for_each_entry(c, tmp, &clk_orphan_list, child_node)
|
||||
hlist_for_each_entry(c, &clk_orphan_list, child_node)
|
||||
clk_summary_show_subtree(s, c, 0);
|
||||
|
||||
mutex_unlock(&prepare_lock);
|
||||
|
@ -111,14 +109,13 @@ static void clk_dump_one(struct seq_file *s, struct clk *c, int level)
|
|||
static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
|
||||
{
|
||||
struct clk *child;
|
||||
struct hlist_node *tmp;
|
||||
|
||||
if (!c)
|
||||
return;
|
||||
|
||||
clk_dump_one(s, c, level);
|
||||
|
||||
hlist_for_each_entry(child, tmp, &c->children, child_node) {
|
||||
hlist_for_each_entry(child, &c->children, child_node) {
|
||||
seq_printf(s, ",");
|
||||
clk_dump_subtree(s, child, level + 1);
|
||||
}
|
||||
|
@ -129,21 +126,20 @@ static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
|
|||
static int clk_dump(struct seq_file *s, void *data)
|
||||
{
|
||||
struct clk *c;
|
||||
struct hlist_node *tmp;
|
||||
bool first_node = true;
|
||||
|
||||
seq_printf(s, "{");
|
||||
|
||||
mutex_lock(&prepare_lock);
|
||||
|
||||
hlist_for_each_entry(c, tmp, &clk_root_list, child_node) {
|
||||
hlist_for_each_entry(c, &clk_root_list, child_node) {
|
||||
if (!first_node)
|
||||
seq_printf(s, ",");
|
||||
first_node = false;
|
||||
clk_dump_subtree(s, c, 0);
|
||||
}
|
||||
|
||||
hlist_for_each_entry(c, tmp, &clk_orphan_list, child_node) {
|
||||
hlist_for_each_entry(c, &clk_orphan_list, child_node) {
|
||||
seq_printf(s, ",");
|
||||
clk_dump_subtree(s, c, 0);
|
||||
}
|
||||
|
@ -222,7 +218,6 @@ out:
|
|||
static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
|
||||
{
|
||||
struct clk *child;
|
||||
struct hlist_node *tmp;
|
||||
int ret = -EINVAL;;
|
||||
|
||||
if (!clk || !pdentry)
|
||||
|
@ -233,7 +228,7 @@ static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
|
|||
if (ret)
|
||||
goto out;
|
||||
|
||||
hlist_for_each_entry(child, tmp, &clk->children, child_node)
|
||||
hlist_for_each_entry(child, &clk->children, child_node)
|
||||
clk_debug_create_subtree(child, clk->dentry);
|
||||
|
||||
ret = 0;
|
||||
|
@ -299,7 +294,6 @@ out:
|
|||
static int __init clk_debug_init(void)
|
||||
{
|
||||
struct clk *clk;
|
||||
struct hlist_node *tmp;
|
||||
struct dentry *d;
|
||||
|
||||
rootdir = debugfs_create_dir("clk", NULL);
|
||||
|
@ -324,10 +318,10 @@ static int __init clk_debug_init(void)
|
|||
|
||||
mutex_lock(&prepare_lock);
|
||||
|
||||
hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
|
||||
hlist_for_each_entry(clk, &clk_root_list, child_node)
|
||||
clk_debug_create_subtree(clk, rootdir);
|
||||
|
||||
hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
|
||||
hlist_for_each_entry(clk, &clk_orphan_list, child_node)
|
||||
clk_debug_create_subtree(clk, orphandir);
|
||||
|
||||
inited = 1;
|
||||
|
@ -345,13 +339,12 @@ static inline int clk_debug_register(struct clk *clk) { return 0; }
|
|||
static void clk_disable_unused_subtree(struct clk *clk)
|
||||
{
|
||||
struct clk *child;
|
||||
struct hlist_node *tmp;
|
||||
unsigned long flags;
|
||||
|
||||
if (!clk)
|
||||
goto out;
|
||||
|
||||
hlist_for_each_entry(child, tmp, &clk->children, child_node)
|
||||
hlist_for_each_entry(child, &clk->children, child_node)
|
||||
clk_disable_unused_subtree(child);
|
||||
|
||||
spin_lock_irqsave(&enable_lock, flags);
|
||||
|
@ -384,14 +377,13 @@ out:
|
|||
static int clk_disable_unused(void)
|
||||
{
|
||||
struct clk *clk;
|
||||
struct hlist_node *tmp;
|
||||
|
||||
mutex_lock(&prepare_lock);
|
||||
|
||||
hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
|
||||
hlist_for_each_entry(clk, &clk_root_list, child_node)
|
||||
clk_disable_unused_subtree(clk);
|
||||
|
||||
hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
|
||||
hlist_for_each_entry(clk, &clk_orphan_list, child_node)
|
||||
clk_disable_unused_subtree(clk);
|
||||
|
||||
mutex_unlock(&prepare_lock);
|
||||
|
@ -484,12 +476,11 @@ static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
|
|||
{
|
||||
struct clk *child;
|
||||
struct clk *ret;
|
||||
struct hlist_node *tmp;
|
||||
|
||||
if (!strcmp(clk->name, name))
|
||||
return clk;
|
||||
|
||||
hlist_for_each_entry(child, tmp, &clk->children, child_node) {
|
||||
hlist_for_each_entry(child, &clk->children, child_node) {
|
||||
ret = __clk_lookup_subtree(name, child);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -502,20 +493,19 @@ struct clk *__clk_lookup(const char *name)
|
|||
{
|
||||
struct clk *root_clk;
|
||||
struct clk *ret;
|
||||
struct hlist_node *tmp;
|
||||
|
||||
if (!name)
|
||||
return NULL;
|
||||
|
||||
/* search the 'proper' clk tree first */
|
||||
hlist_for_each_entry(root_clk, tmp, &clk_root_list, child_node) {
|
||||
hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
|
||||
ret = __clk_lookup_subtree(name, root_clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* if not found, then search the orphan tree */
|
||||
hlist_for_each_entry(root_clk, tmp, &clk_orphan_list, child_node) {
|
||||
hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
|
||||
ret = __clk_lookup_subtree(name, root_clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -812,7 +802,6 @@ static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
|
|||
{
|
||||
unsigned long old_rate;
|
||||
unsigned long parent_rate = 0;
|
||||
struct hlist_node *tmp;
|
||||
struct clk *child;
|
||||
|
||||
old_rate = clk->rate;
|
||||
|
@ -832,7 +821,7 @@ static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
|
|||
if (clk->notifier_count && msg)
|
||||
__clk_notify(clk, msg, old_rate, clk->rate);
|
||||
|
||||
hlist_for_each_entry(child, tmp, &clk->children, child_node)
|
||||
hlist_for_each_entry(child, &clk->children, child_node)
|
||||
__clk_recalc_rates(child, msg);
|
||||
}
|
||||
|
||||
|
@ -878,7 +867,6 @@ EXPORT_SYMBOL_GPL(clk_get_rate);
|
|||
*/
|
||||
static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
|
||||
{
|
||||
struct hlist_node *tmp;
|
||||
struct clk *child;
|
||||
unsigned long new_rate;
|
||||
int ret = NOTIFY_DONE;
|
||||
|
@ -895,7 +883,7 @@ static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
|
|||
if (ret == NOTIFY_BAD)
|
||||
goto out;
|
||||
|
||||
hlist_for_each_entry(child, tmp, &clk->children, child_node) {
|
||||
hlist_for_each_entry(child, &clk->children, child_node) {
|
||||
ret = __clk_speculate_rates(child, new_rate);
|
||||
if (ret == NOTIFY_BAD)
|
||||
break;
|
||||
|
@ -908,11 +896,10 @@ out:
|
|||
static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
|
||||
{
|
||||
struct clk *child;
|
||||
struct hlist_node *tmp;
|
||||
|
||||
clk->new_rate = new_rate;
|
||||
|
||||
hlist_for_each_entry(child, tmp, &clk->children, child_node) {
|
||||
hlist_for_each_entry(child, &clk->children, child_node) {
|
||||
if (child->ops->recalc_rate)
|
||||
child->new_rate = child->ops->recalc_rate(child->hw, new_rate);
|
||||
else
|
||||
|
@ -983,7 +970,6 @@ out:
|
|||
*/
|
||||
static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
|
||||
{
|
||||
struct hlist_node *tmp;
|
||||
struct clk *child, *fail_clk = NULL;
|
||||
int ret = NOTIFY_DONE;
|
||||
|
||||
|
@ -996,7 +982,7 @@ static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long even
|
|||
fail_clk = clk;
|
||||
}
|
||||
|
||||
hlist_for_each_entry(child, tmp, &clk->children, child_node) {
|
||||
hlist_for_each_entry(child, &clk->children, child_node) {
|
||||
clk = clk_propagate_rate_change(child, event);
|
||||
if (clk)
|
||||
fail_clk = clk;
|
||||
|
@ -1014,7 +1000,6 @@ static void clk_change_rate(struct clk *clk)
|
|||
struct clk *child;
|
||||
unsigned long old_rate;
|
||||
unsigned long best_parent_rate = 0;
|
||||
struct hlist_node *tmp;
|
||||
|
||||
old_rate = clk->rate;
|
||||
|
||||
|
@ -1032,7 +1017,7 @@ static void clk_change_rate(struct clk *clk)
|
|||
if (clk->notifier_count && old_rate != clk->rate)
|
||||
__clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
|
||||
|
||||
hlist_for_each_entry(child, tmp, &clk->children, child_node)
|
||||
hlist_for_each_entry(child, &clk->children, child_node)
|
||||
clk_change_rate(child);
|
||||
}
|
||||
|
||||
|
@ -1348,7 +1333,7 @@ int __clk_init(struct device *dev, struct clk *clk)
|
|||
{
|
||||
int i, ret = 0;
|
||||
struct clk *orphan;
|
||||
struct hlist_node *tmp, *tmp2;
|
||||
struct hlist_node *tmp2;
|
||||
|
||||
if (!clk)
|
||||
return -EINVAL;
|
||||
|
@ -1448,7 +1433,7 @@ int __clk_init(struct device *dev, struct clk *clk)
|
|||
* walk the list of orphan clocks and reparent any that are children of
|
||||
* this clock
|
||||
*/
|
||||
hlist_for_each_entry_safe(orphan, tmp, tmp2, &clk_orphan_list, child_node) {
|
||||
hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
|
||||
if (orphan->ops->get_parent) {
|
||||
i = orphan->ops->get_parent(orphan->hw);
|
||||
if (!strcmp(clk->name, orphan->parent_names[i]))
|
||||
|
|
|
@ -53,22 +53,19 @@ void dca_sysfs_remove_req(struct dca_provider *dca, int slot)
|
|||
int dca_sysfs_add_provider(struct dca_provider *dca, struct device *dev)
|
||||
{
|
||||
struct device *cd;
|
||||
int err = 0;
|
||||
int ret;
|
||||
|
||||
idr_try_again:
|
||||
if (!idr_pre_get(&dca_idr, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
idr_preload(GFP_KERNEL);
|
||||
spin_lock(&dca_idr_lock);
|
||||
err = idr_get_new(&dca_idr, dca, &dca->id);
|
||||
|
||||
ret = idr_alloc(&dca_idr, dca, 0, 0, GFP_NOWAIT);
|
||||
if (ret >= 0)
|
||||
dca->id = ret;
|
||||
|
||||
spin_unlock(&dca_idr_lock);
|
||||
switch (err) {
|
||||
case 0:
|
||||
break;
|
||||
case -EAGAIN:
|
||||
goto idr_try_again;
|
||||
default:
|
||||
return err;
|
||||
}
|
||||
idr_preload_end();
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
cd = device_create(dca_class, dev, MKDEV(0, 0), NULL, "dca%d", dca->id);
|
||||
if (IS_ERR(cd)) {
|
||||
|
|
|
@ -686,18 +686,14 @@ static int get_dma_id(struct dma_device *device)
|
|||
{
|
||||
int rc;
|
||||
|
||||
idr_retry:
|
||||
if (!idr_pre_get(&dma_idr, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
mutex_lock(&dma_list_mutex);
|
||||
rc = idr_get_new(&dma_idr, NULL, &device->dev_id);
|
||||
mutex_unlock(&dma_list_mutex);
|
||||
if (rc == -EAGAIN)
|
||||
goto idr_retry;
|
||||
else if (rc != 0)
|
||||
return rc;
|
||||
|
||||
return 0;
|
||||
rc = idr_alloc(&dma_idr, NULL, 0, 0, GFP_KERNEL);
|
||||
if (rc >= 0)
|
||||
device->dev_id = rc;
|
||||
|
||||
mutex_unlock(&dma_list_mutex);
|
||||
return rc < 0 ? rc : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -487,27 +487,28 @@ static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
|
|||
static int add_client_resource(struct client *client,
|
||||
struct client_resource *resource, gfp_t gfp_mask)
|
||||
{
|
||||
bool preload = gfp_mask & __GFP_WAIT;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
retry:
|
||||
if (idr_pre_get(&client->resource_idr, gfp_mask) == 0)
|
||||
return -ENOMEM;
|
||||
|
||||
if (preload)
|
||||
idr_preload(gfp_mask);
|
||||
spin_lock_irqsave(&client->lock, flags);
|
||||
|
||||
if (client->in_shutdown)
|
||||
ret = -ECANCELED;
|
||||
else
|
||||
ret = idr_get_new(&client->resource_idr, resource,
|
||||
&resource->handle);
|
||||
ret = idr_alloc(&client->resource_idr, resource, 0, 0,
|
||||
GFP_NOWAIT);
|
||||
if (ret >= 0) {
|
||||
resource->handle = ret;
|
||||
client_get(client);
|
||||
schedule_if_iso_resource(resource);
|
||||
}
|
||||
spin_unlock_irqrestore(&client->lock, flags);
|
||||
|
||||
if (ret == -EAGAIN)
|
||||
goto retry;
|
||||
spin_unlock_irqrestore(&client->lock, flags);
|
||||
if (preload)
|
||||
idr_preload_end();
|
||||
|
||||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
|
@ -1779,7 +1780,6 @@ static int fw_device_op_release(struct inode *inode, struct file *file)
|
|||
wait_event(client->tx_flush_wait, !has_outbound_transactions(client));
|
||||
|
||||
idr_for_each(&client->resource_idr, shutdown_resource, client);
|
||||
idr_remove_all(&client->resource_idr);
|
||||
idr_destroy(&client->resource_idr);
|
||||
|
||||
list_for_each_entry_safe(event, next_event, &client->event_list, link)
|
||||
|
|
|
@ -1017,12 +1017,11 @@ static void fw_device_init(struct work_struct *work)
|
|||
|
||||
fw_device_get(device);
|
||||
down_write(&fw_device_rwsem);
|
||||
ret = idr_pre_get(&fw_device_idr, GFP_KERNEL) ?
|
||||
idr_get_new(&fw_device_idr, device, &minor) :
|
||||
-ENOMEM;
|
||||
minor = idr_alloc(&fw_device_idr, device, 0, 1 << MINORBITS,
|
||||
GFP_KERNEL);
|
||||
up_write(&fw_device_rwsem);
|
||||
|
||||
if (ret < 0)
|
||||
if (minor < 0)
|
||||
goto error;
|
||||
|
||||
device->device.bus = &fw_bus_type;
|
||||
|
|
|
@ -411,15 +411,10 @@ static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev,
|
|||
goto err_out;
|
||||
}
|
||||
|
||||
do {
|
||||
ret = -ENOMEM;
|
||||
if (idr_pre_get(&dirent_idr, GFP_KERNEL))
|
||||
ret = idr_get_new_above(&dirent_idr, value_sd,
|
||||
1, &id);
|
||||
} while (ret == -EAGAIN);
|
||||
|
||||
if (ret)
|
||||
ret = idr_alloc(&dirent_idr, value_sd, 1, 0, GFP_KERNEL);
|
||||
if (ret < 0)
|
||||
goto free_sd;
|
||||
id = ret;
|
||||
|
||||
desc->flags &= GPIO_FLAGS_MASK;
|
||||
desc->flags |= (unsigned long)id << ID_SHIFT;
|
||||
|
|
|
@ -74,24 +74,13 @@ void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
|
|||
*/
|
||||
static int drm_ctxbitmap_next(struct drm_device * dev)
|
||||
{
|
||||
int new_id;
|
||||
int ret;
|
||||
|
||||
again:
|
||||
if (idr_pre_get(&dev->ctx_idr, GFP_KERNEL) == 0) {
|
||||
DRM_ERROR("Out of memory expanding drawable idr\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
ret = idr_get_new_above(&dev->ctx_idr, NULL,
|
||||
DRM_RESERVED_CONTEXTS, &new_id);
|
||||
ret = idr_alloc(&dev->ctx_idr, NULL, DRM_RESERVED_CONTEXTS, 0,
|
||||
GFP_KERNEL);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
if (ret == -EAGAIN)
|
||||
goto again;
|
||||
else if (ret)
|
||||
return ret;
|
||||
|
||||
return new_id;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -118,7 +107,7 @@ int drm_ctxbitmap_init(struct drm_device * dev)
|
|||
void drm_ctxbitmap_cleanup(struct drm_device * dev)
|
||||
{
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
idr_remove_all(&dev->ctx_idr);
|
||||
idr_destroy(&dev->ctx_idr);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
|
|
|
@ -266,32 +266,21 @@ char *drm_get_connector_status_name(enum drm_connector_status status)
|
|||
static int drm_mode_object_get(struct drm_device *dev,
|
||||
struct drm_mode_object *obj, uint32_t obj_type)
|
||||
{
|
||||
int new_id = 0;
|
||||
int ret;
|
||||
|
||||
again:
|
||||
if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) {
|
||||
DRM_ERROR("Ran out memory getting a mode number\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
mutex_lock(&dev->mode_config.idr_mutex);
|
||||
ret = idr_get_new_above(&dev->mode_config.crtc_idr, obj, 1, &new_id);
|
||||
|
||||
if (!ret) {
|
||||
ret = idr_alloc(&dev->mode_config.crtc_idr, obj, 1, 0, GFP_KERNEL);
|
||||
if (ret >= 0) {
|
||||
/*
|
||||
* Set up the object linking under the protection of the idr
|
||||
* lock so that other users can't see inconsistent state.
|
||||
*/
|
||||
obj->id = new_id;
|
||||
obj->id = ret;
|
||||
obj->type = obj_type;
|
||||
}
|
||||
mutex_unlock(&dev->mode_config.idr_mutex);
|
||||
|
||||
if (ret == -EAGAIN)
|
||||
goto again;
|
||||
|
||||
return ret;
|
||||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1272,7 +1261,6 @@ void drm_mode_config_cleanup(struct drm_device *dev)
|
|||
crtc->funcs->destroy(crtc);
|
||||
}
|
||||
|
||||
idr_remove_all(&dev->mode_config.crtc_idr);
|
||||
idr_destroy(&dev->mode_config.crtc_idr);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_mode_config_cleanup);
|
||||
|
|
|
@ -297,7 +297,6 @@ static void __exit drm_core_exit(void)
|
|||
|
||||
unregister_chrdev(DRM_MAJOR, "drm");
|
||||
|
||||
idr_remove_all(&drm_minors_idr);
|
||||
idr_destroy(&drm_minors_idr);
|
||||
}
|
||||
|
||||
|
|
|
@ -270,21 +270,19 @@ drm_gem_handle_create(struct drm_file *file_priv,
|
|||
int ret;
|
||||
|
||||
/*
|
||||
* Get the user-visible handle using idr.
|
||||
* Get the user-visible handle using idr. Preload and perform
|
||||
* allocation under our spinlock.
|
||||
*/
|
||||
again:
|
||||
/* ensure there is space available to allocate a handle */
|
||||
if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
|
||||
return -ENOMEM;
|
||||
|
||||
/* do the allocation under our spinlock */
|
||||
idr_preload(GFP_KERNEL);
|
||||
spin_lock(&file_priv->table_lock);
|
||||
ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep);
|
||||
|
||||
ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
|
||||
|
||||
spin_unlock(&file_priv->table_lock);
|
||||
if (ret == -EAGAIN)
|
||||
goto again;
|
||||
else if (ret)
|
||||
idr_preload_end();
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
*handlep = ret;
|
||||
|
||||
drm_gem_object_handle_reference(obj);
|
||||
|
||||
|
@ -451,29 +449,25 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
|
|||
if (obj == NULL)
|
||||
return -ENOENT;
|
||||
|
||||
again:
|
||||
if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
idr_preload(GFP_KERNEL);
|
||||
spin_lock(&dev->object_name_lock);
|
||||
if (!obj->name) {
|
||||
ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
|
||||
&obj->name);
|
||||
ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
|
||||
obj->name = ret;
|
||||
args->name = (uint64_t) obj->name;
|
||||
spin_unlock(&dev->object_name_lock);
|
||||
idr_preload_end();
|
||||
|
||||
if (ret == -EAGAIN)
|
||||
goto again;
|
||||
else if (ret)
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
ret = 0;
|
||||
|
||||
/* Allocate a reference for the name table. */
|
||||
drm_gem_object_reference(obj);
|
||||
} else {
|
||||
args->name = (uint64_t) obj->name;
|
||||
spin_unlock(&dev->object_name_lock);
|
||||
idr_preload_end();
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
|
@ -561,8 +555,6 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
|
|||
{
|
||||
idr_for_each(&file_private->object_idr,
|
||||
&drm_gem_object_release_handle, file_private);
|
||||
|
||||
idr_remove_all(&file_private->object_idr);
|
||||
idr_destroy(&file_private->object_idr);
|
||||
}
|
||||
|
||||
|
|
|
@ -60,14 +60,13 @@ void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
|
|||
{
|
||||
struct drm_hash_item *entry;
|
||||
struct hlist_head *h_list;
|
||||
struct hlist_node *list;
|
||||
unsigned int hashed_key;
|
||||
int count = 0;
|
||||
|
||||
hashed_key = hash_long(key, ht->order);
|
||||
DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
|
||||
h_list = &ht->table[hashed_key];
|
||||
hlist_for_each_entry(entry, list, h_list, head)
|
||||
hlist_for_each_entry(entry, h_list, head)
|
||||
DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
|
||||
}
|
||||
|
||||
|
@ -76,14 +75,13 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
|
|||
{
|
||||
struct drm_hash_item *entry;
|
||||
struct hlist_head *h_list;
|
||||
struct hlist_node *list;
|
||||
unsigned int hashed_key;
|
||||
|
||||
hashed_key = hash_long(key, ht->order);
|
||||
h_list = &ht->table[hashed_key];
|
||||
hlist_for_each_entry(entry, list, h_list, head) {
|
||||
hlist_for_each_entry(entry, h_list, head) {
|
||||
if (entry->key == key)
|
||||
return list;
|
||||
return &entry->head;
|
||||
if (entry->key > key)
|
||||
break;
|
||||
}
|
||||
|
@ -95,14 +93,13 @@ static struct hlist_node *drm_ht_find_key_rcu(struct drm_open_hash *ht,
|
|||
{
|
||||
struct drm_hash_item *entry;
|
||||
struct hlist_head *h_list;
|
||||
struct hlist_node *list;
|
||||
unsigned int hashed_key;
|
||||
|
||||
hashed_key = hash_long(key, ht->order);
|
||||
h_list = &ht->table[hashed_key];
|
||||
hlist_for_each_entry_rcu(entry, list, h_list, head) {
|
||||
hlist_for_each_entry_rcu(entry, h_list, head) {
|
||||
if (entry->key == key)
|
||||
return list;
|
||||
return &entry->head;
|
||||
if (entry->key > key)
|
||||
break;
|
||||
}
|
||||
|
@ -113,19 +110,19 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
|
|||
{
|
||||
struct drm_hash_item *entry;
|
||||
struct hlist_head *h_list;
|
||||
struct hlist_node *list, *parent;
|
||||
struct hlist_node *parent;
|
||||
unsigned int hashed_key;
|
||||
unsigned long key = item->key;
|
||||
|
||||
hashed_key = hash_long(key, ht->order);
|
||||
h_list = &ht->table[hashed_key];
|
||||
parent = NULL;
|
||||
hlist_for_each_entry(entry, list, h_list, head) {
|
||||
hlist_for_each_entry(entry, h_list, head) {
|
||||
if (entry->key == key)
|
||||
return -EINVAL;
|
||||
if (entry->key > key)
|
||||
break;
|
||||
parent = list;
|
||||
parent = &entry->head;
|
||||
}
|
||||
if (parent) {
|
||||
hlist_add_after_rcu(parent, &item->head);
|
||||
|
|
|
@ -109,7 +109,6 @@ EXPORT_SYMBOL(drm_ut_debug_printk);
|
|||
|
||||
static int drm_minor_get_id(struct drm_device *dev, int type)
|
||||
{
|
||||
int new_id;
|
||||
int ret;
|
||||
int base = 0, limit = 63;
|
||||
|
||||
|
@ -121,25 +120,11 @@ static int drm_minor_get_id(struct drm_device *dev, int type)
|
|||
limit = base + 255;
|
||||
}
|
||||
|
||||
again:
|
||||
if (idr_pre_get(&drm_minors_idr, GFP_KERNEL) == 0) {
|
||||
DRM_ERROR("Out of memory expanding drawable idr\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
ret = idr_get_new_above(&drm_minors_idr, NULL,
|
||||
base, &new_id);
|
||||
ret = idr_alloc(&drm_minors_idr, NULL, base, limit, GFP_KERNEL);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
if (ret == -EAGAIN)
|
||||
goto again;
|
||||
else if (ret)
|
||||
return ret;
|
||||
|
||||
if (new_id >= limit) {
|
||||
idr_remove(&drm_minors_idr, new_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
return new_id;
|
||||
return ret == -ENOSPC ? -EINVAL : ret;
|
||||
}
|
||||
|
||||
struct drm_master *drm_master_create(struct drm_minor *minor)
|
||||
|
|
|
@ -137,21 +137,15 @@ static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
|
|||
|
||||
DRM_DEBUG_KMS("%s\n", __func__);
|
||||
|
||||
again:
|
||||
/* ensure there is space available to allocate a handle */
|
||||
if (idr_pre_get(id_idr, GFP_KERNEL) == 0) {
|
||||
DRM_ERROR("failed to get idr.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* do the allocation under our mutexlock */
|
||||
mutex_lock(lock);
|
||||
ret = idr_get_new_above(id_idr, obj, 1, (int *)idp);
|
||||
ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL);
|
||||
mutex_unlock(lock);
|
||||
if (ret == -EAGAIN)
|
||||
goto again;
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return ret;
|
||||
*idp = ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
|
||||
|
@ -1786,8 +1780,6 @@ err_iommu:
|
|||
drm_iommu_detach_device(drm_dev, ippdrv->dev);
|
||||
|
||||
err_idr:
|
||||
idr_remove_all(&ctx->ipp_idr);
|
||||
idr_remove_all(&ctx->prop_idr);
|
||||
idr_destroy(&ctx->ipp_idr);
|
||||
idr_destroy(&ctx->prop_idr);
|
||||
return ret;
|
||||
|
@ -1965,8 +1957,6 @@ static int ipp_remove(struct platform_device *pdev)
|
|||
exynos_drm_subdrv_unregister(&ctx->subdrv);
|
||||
|
||||
/* remove,destroy ipp idr */
|
||||
idr_remove_all(&ctx->ipp_idr);
|
||||
idr_remove_all(&ctx->prop_idr);
|
||||
idr_destroy(&ctx->ipp_idr);
|
||||
idr_destroy(&ctx->prop_idr);
|
||||
|
||||
|
|
|
@ -139,7 +139,7 @@ create_hw_context(struct drm_device *dev,
|
|||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_hw_context *ctx;
|
||||
int ret, id;
|
||||
int ret;
|
||||
|
||||
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
|
||||
if (ctx == NULL)
|
||||
|
@ -164,22 +164,11 @@ create_hw_context(struct drm_device *dev,
|
|||
|
||||
ctx->file_priv = file_priv;
|
||||
|
||||
again:
|
||||
if (idr_pre_get(&file_priv->context_idr, GFP_KERNEL) == 0) {
|
||||
ret = -ENOMEM;
|
||||
DRM_DEBUG_DRIVER("idr allocation failed\n");
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
ret = idr_get_new_above(&file_priv->context_idr, ctx,
|
||||
DEFAULT_CONTEXT_ID + 1, &id);
|
||||
if (ret == 0)
|
||||
ctx->id = id;
|
||||
|
||||
if (ret == -EAGAIN)
|
||||
goto again;
|
||||
else if (ret)
|
||||
ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID + 1, 0,
|
||||
GFP_KERNEL);
|
||||
if (ret < 0)
|
||||
goto err_out;
|
||||
ctx->id = ret;
|
||||
|
||||
return ctx;
|
||||
|
||||
|
|
|
@ -58,7 +58,6 @@ static int sis_driver_unload(struct drm_device *dev)
|
|||
{
|
||||
drm_sis_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
idr_remove_all(&dev_priv->object_idr);
|
||||
idr_destroy(&dev_priv->object_idr);
|
||||
|
||||
kfree(dev_priv);
|
||||
|
|
|
@ -128,17 +128,10 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
|
|||
if (retval)
|
||||
goto fail_alloc;
|
||||
|
||||
again:
|
||||
if (idr_pre_get(&dev_priv->object_idr, GFP_KERNEL) == 0) {
|
||||
retval = -ENOMEM;
|
||||
goto fail_idr;
|
||||
}
|
||||
|
||||
retval = idr_get_new_above(&dev_priv->object_idr, item, 1, &user_key);
|
||||
if (retval == -EAGAIN)
|
||||
goto again;
|
||||
if (retval)
|
||||
retval = idr_alloc(&dev_priv->object_idr, item, 1, 0, GFP_KERNEL);
|
||||
if (retval < 0)
|
||||
goto fail_idr;
|
||||
user_key = retval;
|
||||
|
||||
list_add(&item->owner_list, &file_priv->obj_list);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
|
|
@ -120,7 +120,6 @@ int via_driver_unload(struct drm_device *dev)
|
|||
{
|
||||
drm_via_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
idr_remove_all(&dev_priv->object_idr);
|
||||
idr_destroy(&dev_priv->object_idr);
|
||||
|
||||
kfree(dev_priv);
|
||||
|
|
|
@ -148,17 +148,10 @@ int via_mem_alloc(struct drm_device *dev, void *data,
|
|||
if (retval)
|
||||
goto fail_alloc;
|
||||
|
||||
again:
|
||||
if (idr_pre_get(&dev_priv->object_idr, GFP_KERNEL) == 0) {
|
||||
retval = -ENOMEM;
|
||||
goto fail_idr;
|
||||
}
|
||||
|
||||
retval = idr_get_new_above(&dev_priv->object_idr, item, 1, &user_key);
|
||||
if (retval == -EAGAIN)
|
||||
goto again;
|
||||
if (retval)
|
||||
retval = idr_alloc(&dev_priv->object_idr, item, 1, 0, GFP_KERNEL);
|
||||
if (retval < 0)
|
||||
goto fail_idr;
|
||||
user_key = retval;
|
||||
|
||||
list_add(&item->owner_list, &file_priv->obj_list);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
|
|
@ -177,17 +177,16 @@ int vmw_resource_alloc_id(struct vmw_resource *res)
|
|||
|
||||
BUG_ON(res->id != -1);
|
||||
|
||||
do {
|
||||
if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
|
||||
return -ENOMEM;
|
||||
idr_preload(GFP_KERNEL);
|
||||
write_lock(&dev_priv->resource_lock);
|
||||
|
||||
write_lock(&dev_priv->resource_lock);
|
||||
ret = idr_get_new_above(idr, res, 1, &res->id);
|
||||
write_unlock(&dev_priv->resource_lock);
|
||||
ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
|
||||
if (ret >= 0)
|
||||
res->id = ret;
|
||||
|
||||
} while (ret == -EAGAIN);
|
||||
|
||||
return ret;
|
||||
write_unlock(&dev_priv->resource_lock);
|
||||
idr_preload_end();
|
||||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -935,25 +935,17 @@ out_list:
|
|||
*/
|
||||
int i2c_add_adapter(struct i2c_adapter *adapter)
|
||||
{
|
||||
int id, res = 0;
|
||||
|
||||
retry:
|
||||
if (idr_pre_get(&i2c_adapter_idr, GFP_KERNEL) == 0)
|
||||
return -ENOMEM;
|
||||
int id;
|
||||
|
||||
mutex_lock(&core_lock);
|
||||
/* "above" here means "above or equal to", sigh */
|
||||
res = idr_get_new_above(&i2c_adapter_idr, adapter,
|
||||
__i2c_first_dynamic_bus_num, &id);
|
||||
id = idr_alloc(&i2c_adapter_idr, adapter,
|
||||
__i2c_first_dynamic_bus_num, 0, GFP_KERNEL);
|
||||
mutex_unlock(&core_lock);
|
||||
|
||||
if (res < 0) {
|
||||
if (res == -EAGAIN)
|
||||
goto retry;
|
||||
return res;
|
||||
}
|
||||
if (id < 0)
|
||||
return id;
|
||||
|
||||
adapter->nr = id;
|
||||
|
||||
return i2c_register_adapter(adapter);
|
||||
}
|
||||
EXPORT_SYMBOL(i2c_add_adapter);
|
||||
|
@ -984,33 +976,17 @@ EXPORT_SYMBOL(i2c_add_adapter);
|
|||
int i2c_add_numbered_adapter(struct i2c_adapter *adap)
|
||||
{
|
||||
int id;
|
||||
int status;
|
||||
|
||||
if (adap->nr == -1) /* -1 means dynamically assign bus id */
|
||||
return i2c_add_adapter(adap);
|
||||
if (adap->nr & ~MAX_IDR_MASK)
|
||||
return -EINVAL;
|
||||
|
||||
retry:
|
||||
if (idr_pre_get(&i2c_adapter_idr, GFP_KERNEL) == 0)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&core_lock);
|
||||
/* "above" here means "above or equal to", sigh;
|
||||
* we need the "equal to" result to force the result
|
||||
*/
|
||||
status = idr_get_new_above(&i2c_adapter_idr, adap, adap->nr, &id);
|
||||
if (status == 0 && id != adap->nr) {
|
||||
status = -EBUSY;
|
||||
idr_remove(&i2c_adapter_idr, id);
|
||||
}
|
||||
id = idr_alloc(&i2c_adapter_idr, adap, adap->nr, adap->nr + 1,
|
||||
GFP_KERNEL);
|
||||
mutex_unlock(&core_lock);
|
||||
if (status == -EAGAIN)
|
||||
goto retry;
|
||||
|
||||
if (status == 0)
|
||||
status = i2c_register_adapter(adap);
|
||||
return status;
|
||||
if (id < 0)
|
||||
return id == -ENOSPC ? -EBUSY : id;
|
||||
return i2c_register_adapter(adap);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(i2c_add_numbered_adapter);
|
||||
|
||||
|
|
|
@ -382,20 +382,21 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
|
|||
static int cm_alloc_id(struct cm_id_private *cm_id_priv)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret, id;
|
||||
int id;
|
||||
static int next_id;
|
||||
|
||||
do {
|
||||
spin_lock_irqsave(&cm.lock, flags);
|
||||
ret = idr_get_new_above(&cm.local_id_table, cm_id_priv,
|
||||
next_id, &id);
|
||||
if (!ret)
|
||||
next_id = ((unsigned) id + 1) & MAX_IDR_MASK;
|
||||
spin_unlock_irqrestore(&cm.lock, flags);
|
||||
} while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) );
|
||||
idr_preload(GFP_KERNEL);
|
||||
spin_lock_irqsave(&cm.lock, flags);
|
||||
|
||||
id = idr_alloc(&cm.local_id_table, cm_id_priv, next_id, 0, GFP_NOWAIT);
|
||||
if (id >= 0)
|
||||
next_id = max(id + 1, 0);
|
||||
|
||||
spin_unlock_irqrestore(&cm.lock, flags);
|
||||
idr_preload_end();
|
||||
|
||||
cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
|
||||
return ret;
|
||||
return id < 0 ? id : 0;
|
||||
}
|
||||
|
||||
static void cm_free_id(__be32 local_id)
|
||||
|
@ -3844,7 +3845,6 @@ static int __init ib_cm_init(void)
|
|||
cm.remote_sidr_table = RB_ROOT;
|
||||
idr_init(&cm.local_id_table);
|
||||
get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
|
||||
idr_pre_get(&cm.local_id_table, GFP_KERNEL);
|
||||
INIT_LIST_HEAD(&cm.timewait_list);
|
||||
|
||||
ret = class_register(&cm_class);
|
||||
|
|
|
@ -2143,33 +2143,23 @@ static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv,
|
|||
unsigned short snum)
|
||||
{
|
||||
struct rdma_bind_list *bind_list;
|
||||
int port, ret;
|
||||
int ret;
|
||||
|
||||
bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
|
||||
if (!bind_list)
|
||||
return -ENOMEM;
|
||||
|
||||
do {
|
||||
ret = idr_get_new_above(ps, bind_list, snum, &port);
|
||||
} while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL));
|
||||
|
||||
if (ret)
|
||||
goto err1;
|
||||
|
||||
if (port != snum) {
|
||||
ret = -EADDRNOTAVAIL;
|
||||
goto err2;
|
||||
}
|
||||
ret = idr_alloc(ps, bind_list, snum, snum + 1, GFP_KERNEL);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
|
||||
bind_list->ps = ps;
|
||||
bind_list->port = (unsigned short) port;
|
||||
bind_list->port = (unsigned short)ret;
|
||||
cma_bind_port(bind_list, id_priv);
|
||||
return 0;
|
||||
err2:
|
||||
idr_remove(ps, port);
|
||||
err1:
|
||||
err:
|
||||
kfree(bind_list);
|
||||
return ret;
|
||||
return ret == -ENOSPC ? -EADDRNOTAVAIL : ret;
|
||||
}
|
||||
|
||||
static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
|
||||
|
@ -2214,10 +2204,9 @@ static int cma_check_port(struct rdma_bind_list *bind_list,
|
|||
{
|
||||
struct rdma_id_private *cur_id;
|
||||
struct sockaddr *addr, *cur_addr;
|
||||
struct hlist_node *node;
|
||||
|
||||
addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr;
|
||||
hlist_for_each_entry(cur_id, node, &bind_list->owners, node) {
|
||||
hlist_for_each_entry(cur_id, &bind_list->owners, node) {
|
||||
if (id_priv == cur_id)
|
||||
continue;
|
||||
|
||||
|
|
|
@ -118,14 +118,13 @@ static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool,
|
|||
{
|
||||
struct hlist_head *bucket;
|
||||
struct ib_pool_fmr *fmr;
|
||||
struct hlist_node *pos;
|
||||
|
||||
if (!pool->cache_bucket)
|
||||
return NULL;
|
||||
|
||||
bucket = pool->cache_bucket + ib_fmr_hash(*page_list);
|
||||
|
||||
hlist_for_each_entry(fmr, pos, bucket, cache_node)
|
||||
hlist_for_each_entry(fmr, bucket, cache_node)
|
||||
if (io_virtual_address == fmr->io_virtual_address &&
|
||||
page_list_len == fmr->page_list_len &&
|
||||
!memcmp(page_list, fmr->page_list,
|
||||
|
|
|
@ -611,19 +611,21 @@ static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent)
|
|||
|
||||
static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
|
||||
{
|
||||
bool preload = gfp_mask & __GFP_WAIT;
|
||||
unsigned long flags;
|
||||
int ret, id;
|
||||
|
||||
retry:
|
||||
if (!idr_pre_get(&query_idr, gfp_mask))
|
||||
return -ENOMEM;
|
||||
if (preload)
|
||||
idr_preload(gfp_mask);
|
||||
spin_lock_irqsave(&idr_lock, flags);
|
||||
ret = idr_get_new(&query_idr, query, &id);
|
||||
|
||||
id = idr_alloc(&query_idr, query, 0, 0, GFP_NOWAIT);
|
||||
|
||||
spin_unlock_irqrestore(&idr_lock, flags);
|
||||
if (ret == -EAGAIN)
|
||||
goto retry;
|
||||
if (ret)
|
||||
return ret;
|
||||
if (preload)
|
||||
idr_preload_end();
|
||||
if (id < 0)
|
||||
return id;
|
||||
|
||||
query->mad_buf->timeout_ms = timeout_ms;
|
||||
query->mad_buf->context[0] = query;
|
||||
|
|
|
@ -176,7 +176,6 @@ static void ib_ucm_cleanup_events(struct ib_ucm_context *ctx)
|
|||
static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
|
||||
{
|
||||
struct ib_ucm_context *ctx;
|
||||
int result;
|
||||
|
||||
ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
|
||||
if (!ctx)
|
||||
|
@ -187,17 +186,10 @@ static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
|
|||
ctx->file = file;
|
||||
INIT_LIST_HEAD(&ctx->events);
|
||||
|
||||
do {
|
||||
result = idr_pre_get(&ctx_id_table, GFP_KERNEL);
|
||||
if (!result)
|
||||
goto error;
|
||||
|
||||
mutex_lock(&ctx_id_mutex);
|
||||
result = idr_get_new(&ctx_id_table, ctx, &ctx->id);
|
||||
mutex_unlock(&ctx_id_mutex);
|
||||
} while (result == -EAGAIN);
|
||||
|
||||
if (result)
|
||||
mutex_lock(&ctx_id_mutex);
|
||||
ctx->id = idr_alloc(&ctx_id_table, ctx, 0, 0, GFP_KERNEL);
|
||||
mutex_unlock(&ctx_id_mutex);
|
||||
if (ctx->id < 0)
|
||||
goto error;
|
||||
|
||||
list_add_tail(&ctx->file_list, &file->ctxs);
|
||||
|
|
|
@ -145,7 +145,6 @@ static void ucma_put_ctx(struct ucma_context *ctx)
|
|||
static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
|
||||
{
|
||||
struct ucma_context *ctx;
|
||||
int ret;
|
||||
|
||||
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
|
||||
if (!ctx)
|
||||
|
@ -156,17 +155,10 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
|
|||
INIT_LIST_HEAD(&ctx->mc_list);
|
||||
ctx->file = file;
|
||||
|
||||
do {
|
||||
ret = idr_pre_get(&ctx_idr, GFP_KERNEL);
|
||||
if (!ret)
|
||||
goto error;
|
||||
|
||||
mutex_lock(&mut);
|
||||
ret = idr_get_new(&ctx_idr, ctx, &ctx->id);
|
||||
mutex_unlock(&mut);
|
||||
} while (ret == -EAGAIN);
|
||||
|
||||
if (ret)
|
||||
mutex_lock(&mut);
|
||||
ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL);
|
||||
mutex_unlock(&mut);
|
||||
if (ctx->id < 0)
|
||||
goto error;
|
||||
|
||||
list_add_tail(&ctx->list, &file->ctx_list);
|
||||
|
@ -180,23 +172,15 @@ error:
|
|||
static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
|
||||
{
|
||||
struct ucma_multicast *mc;
|
||||
int ret;
|
||||
|
||||
mc = kzalloc(sizeof(*mc), GFP_KERNEL);
|
||||
if (!mc)
|
||||
return NULL;
|
||||
|
||||
do {
|
||||
ret = idr_pre_get(&multicast_idr, GFP_KERNEL);
|
||||
if (!ret)
|
||||
goto error;
|
||||
|
||||
mutex_lock(&mut);
|
||||
ret = idr_get_new(&multicast_idr, mc, &mc->id);
|
||||
mutex_unlock(&mut);
|
||||
} while (ret == -EAGAIN);
|
||||
|
||||
if (ret)
|
||||
mutex_lock(&mut);
|
||||
mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL);
|
||||
mutex_unlock(&mut);
|
||||
if (mc->id < 0)
|
||||
goto error;
|
||||
|
||||
mc->ctx = ctx;
|
||||
|
|
|
@ -125,18 +125,17 @@ static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj)
|
|||
{
|
||||
int ret;
|
||||
|
||||
retry:
|
||||
if (!idr_pre_get(idr, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
|
||||
idr_preload(GFP_KERNEL);
|
||||
spin_lock(&ib_uverbs_idr_lock);
|
||||
ret = idr_get_new(idr, uobj, &uobj->id);
|
||||
|
||||
ret = idr_alloc(idr, uobj, 0, 0, GFP_NOWAIT);
|
||||
if (ret >= 0)
|
||||
uobj->id = ret;
|
||||
|
||||
spin_unlock(&ib_uverbs_idr_lock);
|
||||
idr_preload_end();
|
||||
|
||||
if (ret == -EAGAIN)
|
||||
goto retry;
|
||||
|
||||
return ret;
|
||||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
|
||||
void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj)
|
||||
|
|
|
@ -382,14 +382,17 @@ static int c2_alloc_qpn(struct c2_dev *c2dev, struct c2_qp *qp)
|
|||
{
|
||||
int ret;
|
||||
|
||||
do {
|
||||
spin_lock_irq(&c2dev->qp_table.lock);
|
||||
ret = idr_get_new_above(&c2dev->qp_table.idr, qp,
|
||||
c2dev->qp_table.last++, &qp->qpn);
|
||||
spin_unlock_irq(&c2dev->qp_table.lock);
|
||||
} while ((ret == -EAGAIN) &&
|
||||
idr_pre_get(&c2dev->qp_table.idr, GFP_KERNEL));
|
||||
return ret;
|
||||
idr_preload(GFP_KERNEL);
|
||||
spin_lock_irq(&c2dev->qp_table.lock);
|
||||
|
||||
ret = idr_alloc(&c2dev->qp_table.idr, qp, c2dev->qp_table.last++, 0,
|
||||
GFP_NOWAIT);
|
||||
if (ret >= 0)
|
||||
qp->qpn = ret;
|
||||
|
||||
spin_unlock_irq(&c2dev->qp_table.lock);
|
||||
idr_preload_end();
|
||||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
|
||||
static void c2_free_qpn(struct c2_dev *c2dev, int qpn)
|
||||
|
|
|
@ -153,19 +153,17 @@ static inline int insert_handle(struct iwch_dev *rhp, struct idr *idr,
|
|||
void *handle, u32 id)
|
||||
{
|
||||
int ret;
|
||||
int newid;
|
||||
|
||||
do {
|
||||
if (!idr_pre_get(idr, GFP_KERNEL)) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
spin_lock_irq(&rhp->lock);
|
||||
ret = idr_get_new_above(idr, handle, id, &newid);
|
||||
BUG_ON(newid != id);
|
||||
spin_unlock_irq(&rhp->lock);
|
||||
} while (ret == -EAGAIN);
|
||||
idr_preload(GFP_KERNEL);
|
||||
spin_lock_irq(&rhp->lock);
|
||||
|
||||
return ret;
|
||||
ret = idr_alloc(idr, handle, id, id + 1, GFP_NOWAIT);
|
||||
|
||||
spin_unlock_irq(&rhp->lock);
|
||||
idr_preload_end();
|
||||
|
||||
BUG_ON(ret == -ENOSPC);
|
||||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
|
||||
static inline void remove_handle(struct iwch_dev *rhp, struct idr *idr, u32 id)
|
||||
|
|
|
@ -260,20 +260,21 @@ static inline int _insert_handle(struct c4iw_dev *rhp, struct idr *idr,
|
|||
void *handle, u32 id, int lock)
|
||||
{
|
||||
int ret;
|
||||
int newid;
|
||||
|
||||
do {
|
||||
if (!idr_pre_get(idr, lock ? GFP_KERNEL : GFP_ATOMIC))
|
||||
return -ENOMEM;
|
||||
if (lock)
|
||||
spin_lock_irq(&rhp->lock);
|
||||
ret = idr_get_new_above(idr, handle, id, &newid);
|
||||
BUG_ON(!ret && newid != id);
|
||||
if (lock)
|
||||
spin_unlock_irq(&rhp->lock);
|
||||
} while (ret == -EAGAIN);
|
||||
if (lock) {
|
||||
idr_preload(GFP_KERNEL);
|
||||
spin_lock_irq(&rhp->lock);
|
||||
}
|
||||
|
||||
return ret;
|
||||
ret = idr_alloc(idr, handle, id, id + 1, GFP_ATOMIC);
|
||||
|
||||
if (lock) {
|
||||
spin_unlock_irq(&rhp->lock);
|
||||
idr_preload_end();
|
||||
}
|
||||
|
||||
BUG_ON(ret == -ENOSPC);
|
||||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
|
||||
static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr,
|
||||
|
|
|
@ -128,7 +128,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
|
|||
void *vpage;
|
||||
u32 counter;
|
||||
u64 rpage, cqx_fec, h_ret;
|
||||
int ipz_rc, ret, i;
|
||||
int ipz_rc, i;
|
||||
unsigned long flags;
|
||||
|
||||
if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
|
||||
|
@ -163,32 +163,19 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
|
|||
adapter_handle = shca->ipz_hca_handle;
|
||||
param.eq_handle = shca->eq.ipz_eq_handle;
|
||||
|
||||
do {
|
||||
if (!idr_pre_get(&ehca_cq_idr, GFP_KERNEL)) {
|
||||
cq = ERR_PTR(-ENOMEM);
|
||||
ehca_err(device, "Can't reserve idr nr. device=%p",
|
||||
device);
|
||||
goto create_cq_exit1;
|
||||
}
|
||||
idr_preload(GFP_KERNEL);
|
||||
write_lock_irqsave(&ehca_cq_idr_lock, flags);
|
||||
my_cq->token = idr_alloc(&ehca_cq_idr, my_cq, 0, 0x2000000, GFP_NOWAIT);
|
||||
write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
|
||||
idr_preload_end();
|
||||
|
||||
write_lock_irqsave(&ehca_cq_idr_lock, flags);
|
||||
ret = idr_get_new(&ehca_cq_idr, my_cq, &my_cq->token);
|
||||
write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
|
||||
} while (ret == -EAGAIN);
|
||||
|
||||
if (ret) {
|
||||
if (my_cq->token < 0) {
|
||||
cq = ERR_PTR(-ENOMEM);
|
||||
ehca_err(device, "Can't allocate new idr entry. device=%p",
|
||||
device);
|
||||
goto create_cq_exit1;
|
||||
}
|
||||
|
||||
if (my_cq->token > 0x1FFFFFF) {
|
||||
cq = ERR_PTR(-ENOMEM);
|
||||
ehca_err(device, "Invalid number of cq. device=%p", device);
|
||||
goto create_cq_exit2;
|
||||
}
|
||||
|
||||
/*
|
||||
* CQs maximum depth is 4GB-64, but we need additional 20 as buffer
|
||||
* for receiving errors CQEs.
|
||||
|
|
|
@ -636,30 +636,26 @@ static struct ehca_qp *internal_create_qp(
|
|||
my_qp->send_cq =
|
||||
container_of(init_attr->send_cq, struct ehca_cq, ib_cq);
|
||||
|
||||
do {
|
||||
if (!idr_pre_get(&ehca_qp_idr, GFP_KERNEL)) {
|
||||
idr_preload(GFP_KERNEL);
|
||||
write_lock_irqsave(&ehca_qp_idr_lock, flags);
|
||||
|
||||
ret = idr_alloc(&ehca_qp_idr, my_qp, 0, 0x2000000, GFP_NOWAIT);
|
||||
if (ret >= 0)
|
||||
my_qp->token = ret;
|
||||
|
||||
write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
|
||||
idr_preload_end();
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOSPC) {
|
||||
ret = -EINVAL;
|
||||
ehca_err(pd->device, "Invalid number of qp");
|
||||
} else {
|
||||
ret = -ENOMEM;
|
||||
ehca_err(pd->device, "Can't reserve idr resources.");
|
||||
goto create_qp_exit0;
|
||||
ehca_err(pd->device, "Can't allocate new idr entry.");
|
||||
}
|
||||
|
||||
write_lock_irqsave(&ehca_qp_idr_lock, flags);
|
||||
ret = idr_get_new(&ehca_qp_idr, my_qp, &my_qp->token);
|
||||
write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
|
||||
} while (ret == -EAGAIN);
|
||||
|
||||
if (ret) {
|
||||
ret = -ENOMEM;
|
||||
ehca_err(pd->device, "Can't allocate new idr entry.");
|
||||
goto create_qp_exit0;
|
||||
}
|
||||
|
||||
if (my_qp->token > 0x1FFFFFF) {
|
||||
ret = -EINVAL;
|
||||
ehca_err(pd->device, "Invalid number of qp");
|
||||
goto create_qp_exit1;
|
||||
}
|
||||
|
||||
if (has_srq)
|
||||
parms.srq_token = my_qp->token;
|
||||
|
||||
|
|
|
@ -194,11 +194,6 @@ static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
|
|||
struct ipath_devdata *dd;
|
||||
int ret;
|
||||
|
||||
if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
|
||||
dd = ERR_PTR(-ENOMEM);
|
||||
goto bail;
|
||||
}
|
||||
|
||||
dd = vzalloc(sizeof(*dd));
|
||||
if (!dd) {
|
||||
dd = ERR_PTR(-ENOMEM);
|
||||
|
@ -206,9 +201,10 @@ static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
|
|||
}
|
||||
dd->ipath_unit = -1;
|
||||
|
||||
idr_preload(GFP_KERNEL);
|
||||
spin_lock_irqsave(&ipath_devs_lock, flags);
|
||||
|
||||
ret = idr_get_new(&unit_table, dd, &dd->ipath_unit);
|
||||
ret = idr_alloc(&unit_table, dd, 0, 0, GFP_NOWAIT);
|
||||
if (ret < 0) {
|
||||
printk(KERN_ERR IPATH_DRV_NAME
|
||||
": Could not allocate unit ID: error %d\n", -ret);
|
||||
|
@ -216,6 +212,7 @@ static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
|
|||
dd = ERR_PTR(ret);
|
||||
goto bail_unlock;
|
||||
}
|
||||
dd->ipath_unit = ret;
|
||||
|
||||
dd->pcidev = pdev;
|
||||
pci_set_drvdata(pdev, dd);
|
||||
|
@ -224,7 +221,7 @@ static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
|
|||
|
||||
bail_unlock:
|
||||
spin_unlock_irqrestore(&ipath_devs_lock, flags);
|
||||
|
||||
idr_preload_end();
|
||||
bail:
|
||||
return dd;
|
||||
}
|
||||
|
@ -2503,11 +2500,6 @@ static int __init infinipath_init(void)
|
|||
* the PCI subsystem.
|
||||
*/
|
||||
idr_init(&unit_table);
|
||||
if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
|
||||
printk(KERN_ERR IPATH_DRV_NAME ": idr_pre_get() failed\n");
|
||||
ret = -ENOMEM;
|
||||
goto bail;
|
||||
}
|
||||
|
||||
ret = pci_register_driver(&ipath_driver);
|
||||
if (ret < 0) {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue