Merge branch 'master' of /home/trondmy/repositories/git/linux-2.6/
This commit is contained in:
commit
dd504ea16f
|
@ -551,10 +551,12 @@
|
|||
<function>spin_lock_irqsave()</function>, which is a superset
|
||||
of all other spinlock primitives.
|
||||
</para>
|
||||
|
||||
<table>
|
||||
<title>Table of Locking Requirements</title>
|
||||
<tgroup cols="11">
|
||||
<tbody>
|
||||
|
||||
<row>
|
||||
<entry></entry>
|
||||
<entry>IRQ Handler A</entry>
|
||||
|
@ -576,97 +578,128 @@
|
|||
|
||||
<row>
|
||||
<entry>IRQ Handler B</entry>
|
||||
<entry>spin_lock_irqsave</entry>
|
||||
<entry>SLIS</entry>
|
||||
<entry>None</entry>
|
||||
</row>
|
||||
|
||||
<row>
|
||||
<entry>Softirq A</entry>
|
||||
<entry>spin_lock_irq</entry>
|
||||
<entry>spin_lock_irq</entry>
|
||||
<entry>spin_lock</entry>
|
||||
<entry>SLI</entry>
|
||||
<entry>SLI</entry>
|
||||
<entry>SL</entry>
|
||||
</row>
|
||||
|
||||
<row>
|
||||
<entry>Softirq B</entry>
|
||||
<entry>spin_lock_irq</entry>
|
||||
<entry>spin_lock_irq</entry>
|
||||
<entry>spin_lock</entry>
|
||||
<entry>spin_lock</entry>
|
||||
<entry>SLI</entry>
|
||||
<entry>SLI</entry>
|
||||
<entry>SL</entry>
|
||||
<entry>SL</entry>
|
||||
</row>
|
||||
|
||||
<row>
|
||||
<entry>Tasklet A</entry>
|
||||
<entry>spin_lock_irq</entry>
|
||||
<entry>spin_lock_irq</entry>
|
||||
<entry>spin_lock</entry>
|
||||
<entry>spin_lock</entry>
|
||||
<entry>SLI</entry>
|
||||
<entry>SLI</entry>
|
||||
<entry>SL</entry>
|
||||
<entry>SL</entry>
|
||||
<entry>None</entry>
|
||||
</row>
|
||||
|
||||
<row>
|
||||
<entry>Tasklet B</entry>
|
||||
<entry>spin_lock_irq</entry>
|
||||
<entry>spin_lock_irq</entry>
|
||||
<entry>spin_lock</entry>
|
||||
<entry>spin_lock</entry>
|
||||
<entry>spin_lock</entry>
|
||||
<entry>SLI</entry>
|
||||
<entry>SLI</entry>
|
||||
<entry>SL</entry>
|
||||
<entry>SL</entry>
|
||||
<entry>SL</entry>
|
||||
<entry>None</entry>
|
||||
</row>
|
||||
|
||||
<row>
|
||||
<entry>Timer A</entry>
|
||||
<entry>spin_lock_irq</entry>
|
||||
<entry>spin_lock_irq</entry>
|
||||
<entry>spin_lock</entry>
|
||||
<entry>spin_lock</entry>
|
||||
<entry>spin_lock</entry>
|
||||
<entry>spin_lock</entry>
|
||||
<entry>SLI</entry>
|
||||
<entry>SLI</entry>
|
||||
<entry>SL</entry>
|
||||
<entry>SL</entry>
|
||||
<entry>SL</entry>
|
||||
<entry>SL</entry>
|
||||
<entry>None</entry>
|
||||
</row>
|
||||
|
||||
<row>
|
||||
<entry>Timer B</entry>
|
||||
<entry>spin_lock_irq</entry>
|
||||
<entry>spin_lock_irq</entry>
|
||||
<entry>spin_lock</entry>
|
||||
<entry>spin_lock</entry>
|
||||
<entry>spin_lock</entry>
|
||||
<entry>spin_lock</entry>
|
||||
<entry>spin_lock</entry>
|
||||
<entry>SLI</entry>
|
||||
<entry>SLI</entry>
|
||||
<entry>SL</entry>
|
||||
<entry>SL</entry>
|
||||
<entry>SL</entry>
|
||||
<entry>SL</entry>
|
||||
<entry>SL</entry>
|
||||
<entry>None</entry>
|
||||
</row>
|
||||
|
||||
<row>
|
||||
<entry>User Context A</entry>
|
||||
<entry>spin_lock_irq</entry>
|
||||
<entry>spin_lock_irq</entry>
|
||||
<entry>spin_lock_bh</entry>
|
||||
<entry>spin_lock_bh</entry>
|
||||
<entry>spin_lock_bh</entry>
|
||||
<entry>spin_lock_bh</entry>
|
||||
<entry>spin_lock_bh</entry>
|
||||
<entry>spin_lock_bh</entry>
|
||||
<entry>SLI</entry>
|
||||
<entry>SLI</entry>
|
||||
<entry>SLBH</entry>
|
||||
<entry>SLBH</entry>
|
||||
<entry>SLBH</entry>
|
||||
<entry>SLBH</entry>
|
||||
<entry>SLBH</entry>
|
||||
<entry>SLBH</entry>
|
||||
<entry>None</entry>
|
||||
</row>
|
||||
|
||||
<row>
|
||||
<entry>User Context B</entry>
|
||||
<entry>spin_lock_irq</entry>
|
||||
<entry>spin_lock_irq</entry>
|
||||
<entry>spin_lock_bh</entry>
|
||||
<entry>spin_lock_bh</entry>
|
||||
<entry>spin_lock_bh</entry>
|
||||
<entry>spin_lock_bh</entry>
|
||||
<entry>spin_lock_bh</entry>
|
||||
<entry>spin_lock_bh</entry>
|
||||
<entry>down_interruptible</entry>
|
||||
<entry>SLI</entry>
|
||||
<entry>SLI</entry>
|
||||
<entry>SLBH</entry>
|
||||
<entry>SLBH</entry>
|
||||
<entry>SLBH</entry>
|
||||
<entry>SLBH</entry>
|
||||
<entry>SLBH</entry>
|
||||
<entry>SLBH</entry>
|
||||
<entry>DI</entry>
|
||||
<entry>None</entry>
|
||||
</row>
|
||||
|
||||
</tbody>
|
||||
</tgroup>
|
||||
</table>
|
||||
|
||||
<table>
|
||||
<title>Legend for Locking Requirements Table</title>
|
||||
<tgroup cols="2">
|
||||
<tbody>
|
||||
|
||||
<row>
|
||||
<entry>SLIS</entry>
|
||||
<entry>spin_lock_irqsave</entry>
|
||||
</row>
|
||||
<row>
|
||||
<entry>SLI</entry>
|
||||
<entry>spin_lock_irq</entry>
|
||||
</row>
|
||||
<row>
|
||||
<entry>SL</entry>
|
||||
<entry>spin_lock</entry>
|
||||
</row>
|
||||
<row>
|
||||
<entry>SLBH</entry>
|
||||
<entry>spin_lock_bh</entry>
|
||||
</row>
|
||||
<row>
|
||||
<entry>DI</entry>
|
||||
<entry>down_interruptible</entry>
|
||||
</row>
|
||||
|
||||
</tbody>
|
||||
</tgroup>
|
||||
</table>
|
||||
|
||||
</sect1>
|
||||
</chapter>
|
||||
|
||||
|
|
|
@ -111,7 +111,9 @@ setting up a platform_device using the GPIO, is mark its direction:
|
|||
|
||||
The return value is zero for success, else a negative errno. It should
|
||||
be checked, since the get/set calls don't have error returns and since
|
||||
misconfiguration is possible. (These calls could sleep.)
|
||||
misconfiguration is possible. You should normally issue these calls from
|
||||
a task context. However, for spinlock-safe GPIOs it's OK to use them
|
||||
before tasking is enabled, as part of early board setup.
|
||||
|
||||
For output GPIOs, the value provided becomes the initial output value.
|
||||
This helps avoid signal glitching during system startup.
|
||||
|
@ -197,7 +199,9 @@ However, many platforms don't currently support this mechanism.
|
|||
|
||||
Passing invalid GPIO numbers to gpio_request() will fail, as will requesting
|
||||
GPIOs that have already been claimed with that call. The return value of
|
||||
gpio_request() must be checked. (These calls could sleep.)
|
||||
gpio_request() must be checked. You should normally issue these calls from
|
||||
a task context. However, for spinlock-safe GPIOs it's OK to request GPIOs
|
||||
before tasking is enabled, as part of early board setup.
|
||||
|
||||
These calls serve two basic purposes. One is marking the signals which
|
||||
are actually in use as GPIOs, for better diagnostics; systems may have
|
||||
|
|
|
@ -242,6 +242,9 @@ void decode_numa_list(int *numa, char *t)
|
|||
|
||||
memset(numa, 0, MAX_NODES * sizeof(int));
|
||||
|
||||
if (!t)
|
||||
return;
|
||||
|
||||
while (*t == 'N') {
|
||||
t++;
|
||||
node = strtoul(t, &t, 10);
|
||||
|
@ -386,7 +389,9 @@ void report(struct slabinfo *s)
|
|||
{
|
||||
if (strcmp(s->name, "*") == 0)
|
||||
return;
|
||||
printf("\nSlabcache: %-20s Aliases: %2d Order : %2d\n", s->name, s->aliases, s->order);
|
||||
|
||||
printf("\nSlabcache: %-20s Aliases: %2d Order : %2d Objects: %d\n",
|
||||
s->name, s->aliases, s->order, s->objects);
|
||||
if (s->hwcache_align)
|
||||
printf("** Hardware cacheline aligned\n");
|
||||
if (s->cache_dma)
|
||||
|
@ -791,11 +796,11 @@ void totals(void)
|
|||
|
||||
store_size(b1, total_size);store_size(b2, total_waste);
|
||||
store_size(b3, total_waste * 100 / total_used);
|
||||
printf("Memory used: %6s # Loss : %6s MRatio: %6s%%\n", b1, b2, b3);
|
||||
printf("Memory used: %6s # Loss : %6s MRatio:%6s%%\n", b1, b2, b3);
|
||||
|
||||
store_size(b1, total_objects);store_size(b2, total_partobj);
|
||||
store_size(b3, total_partobj * 100 / total_objects);
|
||||
printf("# Objects : %6s # PartObj: %6s ORatio: %6s%%\n", b1, b2, b3);
|
||||
printf("# Objects : %6s # PartObj: %6s ORatio:%6s%%\n", b1, b2, b3);
|
||||
|
||||
printf("\n");
|
||||
printf("Per Cache Average Min Max Total\n");
|
||||
|
@ -818,7 +823,7 @@ void totals(void)
|
|||
store_size(b1, avg_ppart);store_size(b2, min_ppart);
|
||||
store_size(b3, max_ppart);
|
||||
store_size(b4, total_partial * 100 / total_slabs);
|
||||
printf("%%PartSlab %10s%% %10s%% %10s%% %10s%%\n",
|
||||
printf("%%PartSlab%10s%% %10s%% %10s%% %10s%%\n",
|
||||
b1, b2, b3, b4);
|
||||
|
||||
store_size(b1, avg_partobj);store_size(b2, min_partobj);
|
||||
|
@ -830,7 +835,7 @@ void totals(void)
|
|||
store_size(b1, avg_ppartobj);store_size(b2, min_ppartobj);
|
||||
store_size(b3, max_ppartobj);
|
||||
store_size(b4, total_partobj * 100 / total_objects);
|
||||
printf("%% PartObj %10s%% %10s%% %10s%% %10s%%\n",
|
||||
printf("%% PartObj%10s%% %10s%% %10s%% %10s%%\n",
|
||||
b1, b2, b3, b4);
|
||||
|
||||
store_size(b1, avg_size);store_size(b2, min_size);
|
||||
|
@ -1100,6 +1105,8 @@ void output_slabs(void)
|
|||
ops(slab);
|
||||
else if (show_slab)
|
||||
slabcache(slab);
|
||||
else if (show_report)
|
||||
report(slab);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2689,13 +2689,13 @@ L: i2c@lm-sensors.org
|
|||
S: Maintained
|
||||
|
||||
PARALLEL PORT SUPPORT
|
||||
L: linux-parport@lists.infradead.org
|
||||
L: linux-parport@lists.infradead.org (subscribers-only)
|
||||
S: Orphan
|
||||
|
||||
PARIDE DRIVERS FOR PARALLEL PORT IDE DEVICES
|
||||
P: Tim Waugh
|
||||
M: tim@cyberelk.net
|
||||
L: linux-parport@lists.infradead.org
|
||||
L: linux-parport@lists.infradead.org (subscribers-only)
|
||||
W: http://www.torque.net/linux-pp.html
|
||||
S: Maintained
|
||||
|
||||
|
|
|
@ -560,14 +560,6 @@ endchoice
|
|||
|
||||
source "mm/Kconfig"
|
||||
|
||||
config LARGE_ALLOCS
|
||||
bool "Allow allocating large blocks (> 1MB) of memory"
|
||||
help
|
||||
Allow the slab memory allocator to keep chains for very large
|
||||
memory sizes - upto 32MB. You may need this if your system has
|
||||
a lot of RAM, and you need to able to allocate very large
|
||||
contiguous chunks. If unsure, say N.
|
||||
|
||||
config BFIN_DMA_5XX
|
||||
bool "Enable DMA Support"
|
||||
depends on (BF533 || BF532 || BF531 || BF537 || BF536 || BF534 || BF561)
|
||||
|
|
|
@ -102,14 +102,6 @@ config HIGHPTE
|
|||
with a lot of RAM, this can be wasteful of precious low memory.
|
||||
Setting this option will put user-space page tables in high memory.
|
||||
|
||||
config LARGE_ALLOCS
|
||||
bool "Allow allocating large blocks (> 1MB) of memory"
|
||||
help
|
||||
Allow the slab memory allocator to keep chains for very large memory
|
||||
sizes - up to 32MB. You may need this if your system has a lot of
|
||||
RAM, and you need to able to allocate very large contiguous chunks.
|
||||
If unsure, say N.
|
||||
|
||||
source "mm/Kconfig"
|
||||
|
||||
choice
|
||||
|
|
|
@ -78,7 +78,7 @@ static void __cpuinit print_fixed(unsigned base, unsigned step, const mtrr_type*
|
|||
}
|
||||
|
||||
/* Grab all of the MTRR state for this CPU into *state */
|
||||
void __init get_mtrr_state(void)
|
||||
void get_mtrr_state(void)
|
||||
{
|
||||
unsigned int i;
|
||||
struct mtrr_var_range *vrs;
|
||||
|
|
|
@ -639,7 +639,7 @@ static struct sysdev_driver mtrr_sysdev_driver = {
|
|||
* initialized (i.e. before smp_init()).
|
||||
*
|
||||
*/
|
||||
void __init mtrr_bp_init(void)
|
||||
void mtrr_bp_init(void)
|
||||
{
|
||||
init_ifs();
|
||||
|
||||
|
|
|
@ -421,7 +421,7 @@ void flush_tlb_mm (struct mm_struct * mm)
|
|||
}
|
||||
if (!cpus_empty(cpu_mask))
|
||||
flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
|
||||
check_pgt_cache();
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
|
|
|
@ -470,14 +470,6 @@ config AVNET
|
|||
default y
|
||||
depends on (AVNET5282)
|
||||
|
||||
config LARGE_ALLOCS
|
||||
bool "Allow allocating large blocks (> 1MB) of memory"
|
||||
help
|
||||
Allow the slab memory allocator to keep chains for very large
|
||||
memory sizes - upto 32MB. You may need this if your system has
|
||||
a lot of RAM, and you need to able to allocate very large
|
||||
contiguous chunks. If unsure, say N.
|
||||
|
||||
config 4KSTACKS
|
||||
bool "Use 4Kb for kernel stacks instead of 8Kb"
|
||||
default y
|
||||
|
|
|
@ -71,9 +71,7 @@ spufs_init_once(void *p, struct kmem_cache * cachep, unsigned long flags)
|
|||
{
|
||||
struct spufs_inode_info *ei = p;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
||||
static struct inode *
|
||||
|
|
|
@ -240,14 +240,6 @@ menu "Processor type and features"
|
|||
config RESET_GUARD
|
||||
bool "Reset Guard"
|
||||
|
||||
config LARGE_ALLOCS
|
||||
bool "Allow allocating large blocks (> 1MB) of memory"
|
||||
help
|
||||
Allow the slab memory allocator to keep chains for very large
|
||||
memory sizes - upto 32MB. You may need this if your system has
|
||||
a lot of RAM, and you need to able to allocate very large
|
||||
contiguous chunks. If unsure, say N.
|
||||
|
||||
source "mm/Kconfig"
|
||||
|
||||
endmenu
|
||||
|
|
|
@ -40,19 +40,19 @@ static nodemask_t nodes_found_map = NODE_MASK_NONE;
|
|||
#define NID_INVAL -1
|
||||
|
||||
/* maps to convert between proximity domain and logical node ID */
|
||||
int __cpuinitdata pxm_to_node_map[MAX_PXM_DOMAINS]
|
||||
static int pxm_to_node_map[MAX_PXM_DOMAINS]
|
||||
= { [0 ... MAX_PXM_DOMAINS - 1] = NID_INVAL };
|
||||
int __cpuinitdata node_to_pxm_map[MAX_NUMNODES]
|
||||
static int node_to_pxm_map[MAX_NUMNODES]
|
||||
= { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
|
||||
|
||||
int __cpuinit pxm_to_node(int pxm)
|
||||
int pxm_to_node(int pxm)
|
||||
{
|
||||
if (pxm < 0)
|
||||
return NID_INVAL;
|
||||
return pxm_to_node_map[pxm];
|
||||
}
|
||||
|
||||
int __cpuinit node_to_pxm(int node)
|
||||
int node_to_pxm(int node)
|
||||
{
|
||||
if (node < 0)
|
||||
return PXM_INVAL;
|
||||
|
|
|
@ -940,9 +940,6 @@ static void ltree_entry_ctor(void *obj, struct kmem_cache *cache,
|
|||
{
|
||||
struct ltree_entry *le = obj;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
return;
|
||||
|
||||
le->users = 0;
|
||||
init_rwsem(&le->mutex);
|
||||
}
|
||||
|
|
|
@ -59,7 +59,7 @@ comment "RTC interfaces"
|
|||
depends on RTC_CLASS
|
||||
|
||||
config RTC_INTF_SYSFS
|
||||
boolean "sysfs"
|
||||
boolean "/sys/class/rtc/rtcN (sysfs)"
|
||||
depends on RTC_CLASS && SYSFS
|
||||
default RTC_CLASS
|
||||
help
|
||||
|
@ -70,7 +70,7 @@ config RTC_INTF_SYSFS
|
|||
will be called rtc-sysfs.
|
||||
|
||||
config RTC_INTF_PROC
|
||||
boolean "proc"
|
||||
boolean "/proc/driver/rtc (procfs for rtc0)"
|
||||
depends on RTC_CLASS && PROC_FS
|
||||
default RTC_CLASS
|
||||
help
|
||||
|
@ -82,7 +82,7 @@ config RTC_INTF_PROC
|
|||
will be called rtc-proc.
|
||||
|
||||
config RTC_INTF_DEV
|
||||
boolean "dev"
|
||||
boolean "/dev/rtcN (character devices)"
|
||||
depends on RTC_CLASS
|
||||
default RTC_CLASS
|
||||
help
|
||||
|
|
|
@ -371,7 +371,7 @@ static int __devinit omap_rtc_probe(struct platform_device *pdev)
|
|||
goto fail;
|
||||
}
|
||||
platform_set_drvdata(pdev, rtc);
|
||||
dev_set_devdata(&rtc->dev, mem);
|
||||
dev_set_drvdata(&rtc->dev, mem);
|
||||
|
||||
/* clear pending irqs, and set 1/second periodic,
|
||||
* which we'll use instead of update irqs
|
||||
|
@ -453,7 +453,7 @@ static int __devexit omap_rtc_remove(struct platform_device *pdev)
|
|||
free_irq(omap_rtc_timer, rtc);
|
||||
free_irq(omap_rtc_alarm, rtc);
|
||||
|
||||
release_resource(dev_get_devdata(&rtc->dev));
|
||||
release_resource(dev_get_drvdata(&rtc->dev));
|
||||
rtc_device_unregister(rtc);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -894,7 +894,7 @@ static void autoconfig_16550a(struct uart_8250_port *up)
|
|||
quot = serial_dl_read(up);
|
||||
quot <<= 3;
|
||||
|
||||
status1 = serial_in(up, 0x04); /* EXCR1 */
|
||||
status1 = serial_in(up, 0x04); /* EXCR2 */
|
||||
status1 &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */
|
||||
status1 |= 0x10; /* 1.625 divisor for baud_base --> 921600 */
|
||||
serial_outp(up, 0x04, status1);
|
||||
|
@ -2617,7 +2617,22 @@ void serial8250_suspend_port(int line)
|
|||
*/
|
||||
void serial8250_resume_port(int line)
|
||||
{
|
||||
uart_resume_port(&serial8250_reg, &serial8250_ports[line].port);
|
||||
struct uart_8250_port *up = &serial8250_ports[line];
|
||||
|
||||
if (up->capabilities & UART_NATSEMI) {
|
||||
unsigned char tmp;
|
||||
|
||||
/* Ensure it's still in high speed mode */
|
||||
serial_outp(up, UART_LCR, 0xE0);
|
||||
|
||||
tmp = serial_in(up, 0x04); /* EXCR2 */
|
||||
tmp &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */
|
||||
tmp |= 0x10; /* 1.625 divisor for baud_base --> 921600 */
|
||||
serial_outp(up, 0x04, tmp);
|
||||
|
||||
serial_outp(up, UART_LCR, 0);
|
||||
}
|
||||
uart_resume_port(&serial8250_reg, &up->port);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2694,7 +2709,7 @@ static int serial8250_resume(struct platform_device *dev)
|
|||
struct uart_8250_port *up = &serial8250_ports[i];
|
||||
|
||||
if (up->port.type != PORT_UNKNOWN && up->port.dev == &dev->dev)
|
||||
uart_resume_port(&serial8250_reg, &up->port);
|
||||
serial8250_resume_port(i);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -69,33 +69,40 @@
|
|||
|
||||
static const struct pci_device_id icom_pci_table[] = {
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_IBM,
|
||||
.device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_1,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = ADAPTER_V1,
|
||||
},
|
||||
.vendor = PCI_VENDOR_ID_IBM,
|
||||
.device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_1,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = ADAPTER_V1,
|
||||
},
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_IBM,
|
||||
.device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2,
|
||||
.subvendor = PCI_VENDOR_ID_IBM,
|
||||
.subdevice = PCI_DEVICE_ID_IBM_ICOM_V2_TWO_PORTS_RVX,
|
||||
.driver_data = ADAPTER_V2,
|
||||
},
|
||||
.vendor = PCI_VENDOR_ID_IBM,
|
||||
.device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2,
|
||||
.subvendor = PCI_VENDOR_ID_IBM,
|
||||
.subdevice = PCI_DEVICE_ID_IBM_ICOM_V2_TWO_PORTS_RVX,
|
||||
.driver_data = ADAPTER_V2,
|
||||
},
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_IBM,
|
||||
.device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2,
|
||||
.subvendor = PCI_VENDOR_ID_IBM,
|
||||
.subdevice = PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM,
|
||||
.driver_data = ADAPTER_V2,
|
||||
},
|
||||
.vendor = PCI_VENDOR_ID_IBM,
|
||||
.device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2,
|
||||
.subvendor = PCI_VENDOR_ID_IBM,
|
||||
.subdevice = PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM,
|
||||
.driver_data = ADAPTER_V2,
|
||||
},
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_IBM,
|
||||
.device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2,
|
||||
.subvendor = PCI_VENDOR_ID_IBM,
|
||||
.subdevice = PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL,
|
||||
.driver_data = ADAPTER_V2,
|
||||
},
|
||||
.vendor = PCI_VENDOR_ID_IBM,
|
||||
.device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2,
|
||||
.subvendor = PCI_VENDOR_ID_IBM,
|
||||
.subdevice = PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL,
|
||||
.driver_data = ADAPTER_V2,
|
||||
},
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_IBM,
|
||||
.device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2,
|
||||
.subvendor = PCI_VENDOR_ID_IBM,
|
||||
.subdevice = PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM_PCIE,
|
||||
.driver_data = ADAPTER_V2,
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
|
|
|
@ -368,9 +368,14 @@ static const char *vgacon_startup(void)
|
|||
#endif
|
||||
}
|
||||
|
||||
/* SCREEN_INFO initialized? */
|
||||
if ((ORIG_VIDEO_MODE == 0) &&
|
||||
(ORIG_VIDEO_LINES == 0) &&
|
||||
(ORIG_VIDEO_COLS == 0))
|
||||
goto no_vga;
|
||||
|
||||
/* VGA16 modes are not handled by VGACON */
|
||||
if ((ORIG_VIDEO_MODE == 0x00) || /* SCREEN_INFO not initialized */
|
||||
(ORIG_VIDEO_MODE == 0x0D) || /* 320x200/4 */
|
||||
if ((ORIG_VIDEO_MODE == 0x0D) || /* 320x200/4 */
|
||||
(ORIG_VIDEO_MODE == 0x0E) || /* 640x200/4 */
|
||||
(ORIG_VIDEO_MODE == 0x10) || /* 640x350/4 */
|
||||
(ORIG_VIDEO_MODE == 0x12) || /* 640x480/4 */
|
||||
|
|
|
@ -232,8 +232,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
|
|||
{
|
||||
struct adfs_inode_info *ei = (struct adfs_inode_info *) foo;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
||||
static int init_inodecache(void)
|
||||
|
|
|
@ -87,11 +87,9 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
|
|||
{
|
||||
struct affs_inode_info *ei = (struct affs_inode_info *) foo;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
init_MUTEX(&ei->i_link_lock);
|
||||
init_MUTEX(&ei->i_ext_lock);
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
init_MUTEX(&ei->i_link_lock);
|
||||
init_MUTEX(&ei->i_ext_lock);
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
||||
static int init_inodecache(void)
|
||||
|
|
|
@ -451,17 +451,15 @@ static void afs_i_init_once(void *_vnode, struct kmem_cache *cachep,
|
|||
{
|
||||
struct afs_vnode *vnode = _vnode;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
memset(vnode, 0, sizeof(*vnode));
|
||||
inode_init_once(&vnode->vfs_inode);
|
||||
init_waitqueue_head(&vnode->update_waitq);
|
||||
mutex_init(&vnode->permits_lock);
|
||||
mutex_init(&vnode->validate_lock);
|
||||
spin_lock_init(&vnode->writeback_lock);
|
||||
spin_lock_init(&vnode->lock);
|
||||
INIT_LIST_HEAD(&vnode->writebacks);
|
||||
INIT_WORK(&vnode->cb_broken_work, afs_broken_callback_work);
|
||||
}
|
||||
memset(vnode, 0, sizeof(*vnode));
|
||||
inode_init_once(&vnode->vfs_inode);
|
||||
init_waitqueue_head(&vnode->update_waitq);
|
||||
mutex_init(&vnode->permits_lock);
|
||||
mutex_init(&vnode->validate_lock);
|
||||
spin_lock_init(&vnode->writeback_lock);
|
||||
spin_lock_init(&vnode->lock);
|
||||
INIT_LIST_HEAD(&vnode->writebacks);
|
||||
INIT_WORK(&vnode->cb_broken_work, afs_broken_callback_work);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -292,10 +292,8 @@ befs_destroy_inode(struct inode *inode)
|
|||
static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
|
||||
{
|
||||
struct befs_inode_info *bi = (struct befs_inode_info *) foo;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
inode_init_once(&bi->vfs_inode);
|
||||
}
|
||||
|
||||
inode_init_once(&bi->vfs_inode);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
@ -248,8 +248,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
|
|||
{
|
||||
struct bfs_inode_info *bi = foo;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(&bi->vfs_inode);
|
||||
inode_init_once(&bi->vfs_inode);
|
||||
}
|
||||
|
||||
static int init_inodecache(void)
|
||||
|
|
|
@ -458,17 +458,15 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
|
|||
struct bdev_inode *ei = (struct bdev_inode *) foo;
|
||||
struct block_device *bdev = &ei->bdev;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
memset(bdev, 0, sizeof(*bdev));
|
||||
mutex_init(&bdev->bd_mutex);
|
||||
sema_init(&bdev->bd_mount_sem, 1);
|
||||
INIT_LIST_HEAD(&bdev->bd_inodes);
|
||||
INIT_LIST_HEAD(&bdev->bd_list);
|
||||
memset(bdev, 0, sizeof(*bdev));
|
||||
mutex_init(&bdev->bd_mutex);
|
||||
sema_init(&bdev->bd_mount_sem, 1);
|
||||
INIT_LIST_HEAD(&bdev->bd_inodes);
|
||||
INIT_LIST_HEAD(&bdev->bd_list);
|
||||
#ifdef CONFIG_SYSFS
|
||||
INIT_LIST_HEAD(&bdev->bd_holder_list);
|
||||
INIT_LIST_HEAD(&bdev->bd_holder_list);
|
||||
#endif
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
||||
static inline void __bd_forget(struct inode *inode)
|
||||
|
|
25
fs/buffer.c
25
fs/buffer.c
|
@ -981,7 +981,8 @@ grow_dev_page(struct block_device *bdev, sector_t block,
|
|||
struct page *page;
|
||||
struct buffer_head *bh;
|
||||
|
||||
page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
|
||||
page = find_or_create_page(inode->i_mapping, index,
|
||||
mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
|
@ -2898,8 +2899,9 @@ static void recalc_bh_state(void)
|
|||
|
||||
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
|
||||
{
|
||||
struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
|
||||
struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
|
||||
if (ret) {
|
||||
INIT_LIST_HEAD(&ret->b_assoc_buffers);
|
||||
get_cpu_var(bh_accounting).nr++;
|
||||
recalc_bh_state();
|
||||
put_cpu_var(bh_accounting);
|
||||
|
@ -2918,17 +2920,6 @@ void free_buffer_head(struct buffer_head *bh)
|
|||
}
|
||||
EXPORT_SYMBOL(free_buffer_head);
|
||||
|
||||
static void
|
||||
init_buffer_head(void *data, struct kmem_cache *cachep, unsigned long flags)
|
||||
{
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
struct buffer_head * bh = (struct buffer_head *)data;
|
||||
|
||||
memset(bh, 0, sizeof(*bh));
|
||||
INIT_LIST_HEAD(&bh->b_assoc_buffers);
|
||||
}
|
||||
}
|
||||
|
||||
static void buffer_exit_cpu(int cpu)
|
||||
{
|
||||
int i;
|
||||
|
@ -2955,12 +2946,8 @@ void __init buffer_init(void)
|
|||
{
|
||||
int nrpages;
|
||||
|
||||
bh_cachep = kmem_cache_create("buffer_head",
|
||||
sizeof(struct buffer_head), 0,
|
||||
(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
|
||||
SLAB_MEM_SPREAD),
|
||||
init_buffer_head,
|
||||
NULL);
|
||||
bh_cachep = KMEM_CACHE(buffer_head,
|
||||
SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
|
||||
|
||||
/*
|
||||
* Limit the bh occupancy to 10% of ZONE_NORMAL
|
||||
|
|
|
@ -701,10 +701,8 @@ cifs_init_once(void *inode, struct kmem_cache * cachep, unsigned long flags)
|
|||
{
|
||||
struct cifsInodeInfo *cifsi = inode;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
inode_init_once(&cifsi->vfs_inode);
|
||||
INIT_LIST_HEAD(&cifsi->lockList);
|
||||
}
|
||||
inode_init_once(&cifsi->vfs_inode);
|
||||
INIT_LIST_HEAD(&cifsi->lockList);
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -62,8 +62,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
|
|||
{
|
||||
struct coda_inode_info *ei = (struct coda_inode_info *) foo;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
||||
int coda_init_inodecache(void)
|
||||
|
|
13
fs/compat.c
13
fs/compat.c
|
@ -2230,21 +2230,16 @@ asmlinkage long compat_sys_signalfd(int ufd,
|
|||
asmlinkage long compat_sys_timerfd(int ufd, int clockid, int flags,
|
||||
const struct compat_itimerspec __user *utmr)
|
||||
{
|
||||
long res;
|
||||
struct itimerspec t;
|
||||
struct itimerspec __user *ut;
|
||||
|
||||
res = -EFAULT;
|
||||
if (get_compat_itimerspec(&t, utmr))
|
||||
goto err_exit;
|
||||
return -EFAULT;
|
||||
ut = compat_alloc_user_space(sizeof(*ut));
|
||||
if (copy_to_user(ut, &t, sizeof(t)) )
|
||||
goto err_exit;
|
||||
if (copy_to_user(ut, &t, sizeof(t)))
|
||||
return -EFAULT;
|
||||
|
||||
res = sys_timerfd(ufd, clockid, flags, ut);
|
||||
err_exit:
|
||||
return res;
|
||||
return sys_timerfd(ufd, clockid, flags, ut);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_TIMERFD */
|
||||
|
||||
|
|
|
@ -1421,7 +1421,7 @@ int vfs_quota_off(struct super_block *sb, int type)
|
|||
/* If quota was reenabled in the meantime, we have
|
||||
* nothing to do */
|
||||
if (!sb_has_quota_enabled(sb, cnt)) {
|
||||
mutex_lock(&toputinode[cnt]->i_mutex);
|
||||
mutex_lock_nested(&toputinode[cnt]->i_mutex, I_MUTEX_QUOTA);
|
||||
toputinode[cnt]->i_flags &= ~(S_IMMUTABLE |
|
||||
S_NOATIME | S_NOQUOTA);
|
||||
truncate_inode_pages(&toputinode[cnt]->i_data, 0);
|
||||
|
|
|
@ -583,8 +583,7 @@ inode_info_init_once(void *vptr, struct kmem_cache *cachep, unsigned long flags)
|
|||
{
|
||||
struct ecryptfs_inode_info *ei = (struct ecryptfs_inode_info *)vptr;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
||||
static struct ecryptfs_cache_info {
|
||||
|
|
|
@ -364,18 +364,14 @@ static int fill_zeros_to_end_of_page(struct page *page, unsigned int to)
|
|||
{
|
||||
struct inode *inode = page->mapping->host;
|
||||
int end_byte_in_page;
|
||||
char *page_virt;
|
||||
|
||||
if ((i_size_read(inode) / PAGE_CACHE_SIZE) != page->index)
|
||||
goto out;
|
||||
end_byte_in_page = i_size_read(inode) % PAGE_CACHE_SIZE;
|
||||
if (to > end_byte_in_page)
|
||||
end_byte_in_page = to;
|
||||
page_virt = kmap_atomic(page, KM_USER0);
|
||||
memset((page_virt + end_byte_in_page), 0,
|
||||
(PAGE_CACHE_SIZE - end_byte_in_page));
|
||||
kunmap_atomic(page_virt, KM_USER0);
|
||||
flush_dcache_page(page);
|
||||
zero_user_page(page, end_byte_in_page,
|
||||
PAGE_CACHE_SIZE - end_byte_in_page, KM_USER0);
|
||||
out:
|
||||
return 0;
|
||||
}
|
||||
|
@ -740,7 +736,6 @@ int write_zeros(struct file *file, pgoff_t index, int start, int num_zeros)
|
|||
{
|
||||
int rc = 0;
|
||||
struct page *tmp_page;
|
||||
char *tmp_page_virt;
|
||||
|
||||
tmp_page = ecryptfs_get1page(file, index);
|
||||
if (IS_ERR(tmp_page)) {
|
||||
|
@ -757,10 +752,7 @@ int write_zeros(struct file *file, pgoff_t index, int start, int num_zeros)
|
|||
page_cache_release(tmp_page);
|
||||
goto out;
|
||||
}
|
||||
tmp_page_virt = kmap_atomic(tmp_page, KM_USER0);
|
||||
memset(((char *)tmp_page_virt + start), 0, num_zeros);
|
||||
kunmap_atomic(tmp_page_virt, KM_USER0);
|
||||
flush_dcache_page(tmp_page);
|
||||
zero_user_page(tmp_page, start, num_zeros, KM_USER0);
|
||||
rc = ecryptfs_commit_write(file, tmp_page, start, start + num_zeros);
|
||||
if (rc < 0) {
|
||||
ecryptfs_printk(KERN_ERR, "Error attempting to write zero's "
|
||||
|
|
|
@ -72,8 +72,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
|
|||
{
|
||||
struct efs_inode_info *ei = (struct efs_inode_info *) foo;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
||||
static int init_inodecache(void)
|
||||
|
|
|
@ -60,7 +60,7 @@
|
|||
#endif
|
||||
|
||||
int core_uses_pid;
|
||||
char core_pattern[128] = "core";
|
||||
char core_pattern[CORENAME_MAX_SIZE] = "core";
|
||||
int suid_dumpable = 0;
|
||||
|
||||
EXPORT_SYMBOL(suid_dumpable);
|
||||
|
@ -1264,8 +1264,6 @@ int set_binfmt(struct linux_binfmt *new)
|
|||
|
||||
EXPORT_SYMBOL(set_binfmt);
|
||||
|
||||
#define CORENAME_MAX_SIZE 64
|
||||
|
||||
/* format_corename will inspect the pattern parameter, and output a
|
||||
* name into corename, which must have space for at least
|
||||
* CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
|
||||
|
|
|
@ -160,13 +160,11 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
|
|||
{
|
||||
struct ext2_inode_info *ei = (struct ext2_inode_info *) foo;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
rwlock_init(&ei->i_meta_lock);
|
||||
rwlock_init(&ei->i_meta_lock);
|
||||
#ifdef CONFIG_EXT2_FS_XATTR
|
||||
init_rwsem(&ei->xattr_sem);
|
||||
init_rwsem(&ei->xattr_sem);
|
||||
#endif
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
||||
static int init_inodecache(void)
|
||||
|
|
|
@ -466,14 +466,12 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
|
|||
{
|
||||
struct ext3_inode_info *ei = (struct ext3_inode_info *) foo;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
INIT_LIST_HEAD(&ei->i_orphan);
|
||||
INIT_LIST_HEAD(&ei->i_orphan);
|
||||
#ifdef CONFIG_EXT3_FS_XATTR
|
||||
init_rwsem(&ei->xattr_sem);
|
||||
init_rwsem(&ei->xattr_sem);
|
||||
#endif
|
||||
mutex_init(&ei->truncate_mutex);
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
mutex_init(&ei->truncate_mutex);
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
||||
static int init_inodecache(void)
|
||||
|
|
|
@ -517,14 +517,12 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
|
|||
{
|
||||
struct ext4_inode_info *ei = (struct ext4_inode_info *) foo;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
INIT_LIST_HEAD(&ei->i_orphan);
|
||||
INIT_LIST_HEAD(&ei->i_orphan);
|
||||
#ifdef CONFIG_EXT4DEV_FS_XATTR
|
||||
init_rwsem(&ei->xattr_sem);
|
||||
init_rwsem(&ei->xattr_sem);
|
||||
#endif
|
||||
mutex_init(&ei->truncate_mutex);
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
mutex_init(&ei->truncate_mutex);
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
||||
static int init_inodecache(void)
|
||||
|
|
|
@ -40,8 +40,7 @@ static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
|
|||
{
|
||||
struct fat_cache *cache = (struct fat_cache *)foo;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
INIT_LIST_HEAD(&cache->cache_list);
|
||||
INIT_LIST_HEAD(&cache->cache_list);
|
||||
}
|
||||
|
||||
int __init fat_cache_init(void)
|
||||
|
|
|
@ -500,14 +500,12 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
|
|||
{
|
||||
struct msdos_inode_info *ei = (struct msdos_inode_info *)foo;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
spin_lock_init(&ei->cache_lru_lock);
|
||||
ei->nr_caches = 0;
|
||||
ei->cache_valid_id = FAT_CACHE_VALID + 1;
|
||||
INIT_LIST_HEAD(&ei->cache_lru);
|
||||
INIT_HLIST_NODE(&ei->i_fat_hash);
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
spin_lock_init(&ei->cache_lru_lock);
|
||||
ei->nr_caches = 0;
|
||||
ei->cache_valid_id = FAT_CACHE_VALID + 1;
|
||||
INIT_LIST_HEAD(&ei->cache_lru);
|
||||
INIT_HLIST_NODE(&ei->i_fat_hash);
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
||||
static int __init fat_init_inodecache(void)
|
||||
|
|
|
@ -687,8 +687,7 @@ static void fuse_inode_init_once(void *foo, struct kmem_cache *cachep,
|
|||
{
|
||||
struct inode * inode = foo;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(inode);
|
||||
inode_init_once(inode);
|
||||
}
|
||||
|
||||
static int __init fuse_fs_init(void)
|
||||
|
|
|
@ -27,29 +27,27 @@
|
|||
static void gfs2_init_inode_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
|
||||
{
|
||||
struct gfs2_inode *ip = foo;
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
inode_init_once(&ip->i_inode);
|
||||
spin_lock_init(&ip->i_spin);
|
||||
init_rwsem(&ip->i_rw_mutex);
|
||||
memset(ip->i_cache, 0, sizeof(ip->i_cache));
|
||||
}
|
||||
|
||||
inode_init_once(&ip->i_inode);
|
||||
spin_lock_init(&ip->i_spin);
|
||||
init_rwsem(&ip->i_rw_mutex);
|
||||
memset(ip->i_cache, 0, sizeof(ip->i_cache));
|
||||
}
|
||||
|
||||
static void gfs2_init_glock_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
|
||||
{
|
||||
struct gfs2_glock *gl = foo;
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
INIT_HLIST_NODE(&gl->gl_list);
|
||||
spin_lock_init(&gl->gl_spin);
|
||||
INIT_LIST_HEAD(&gl->gl_holders);
|
||||
INIT_LIST_HEAD(&gl->gl_waiters1);
|
||||
INIT_LIST_HEAD(&gl->gl_waiters3);
|
||||
gl->gl_lvb = NULL;
|
||||
atomic_set(&gl->gl_lvb_count, 0);
|
||||
INIT_LIST_HEAD(&gl->gl_reclaim);
|
||||
INIT_LIST_HEAD(&gl->gl_ail_list);
|
||||
atomic_set(&gl->gl_ail_count, 0);
|
||||
}
|
||||
|
||||
INIT_HLIST_NODE(&gl->gl_list);
|
||||
spin_lock_init(&gl->gl_spin);
|
||||
INIT_LIST_HEAD(&gl->gl_holders);
|
||||
INIT_LIST_HEAD(&gl->gl_waiters1);
|
||||
INIT_LIST_HEAD(&gl->gl_waiters3);
|
||||
gl->gl_lvb = NULL;
|
||||
atomic_set(&gl->gl_lvb_count, 0);
|
||||
INIT_LIST_HEAD(&gl->gl_reclaim);
|
||||
INIT_LIST_HEAD(&gl->gl_ail_list);
|
||||
atomic_set(&gl->gl_ail_count, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -434,8 +434,7 @@ static void hfs_init_once(void *p, struct kmem_cache *cachep, unsigned long flag
|
|||
{
|
||||
struct hfs_inode_info *i = p;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(&i->vfs_inode);
|
||||
inode_init_once(&i->vfs_inode);
|
||||
}
|
||||
|
||||
static int __init init_hfs_fs(void)
|
||||
|
|
|
@ -470,8 +470,7 @@ static void hfsplus_init_once(void *p, struct kmem_cache *cachep, unsigned long
|
|||
{
|
||||
struct hfsplus_inode_info *i = p;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(&i->vfs_inode);
|
||||
inode_init_once(&i->vfs_inode);
|
||||
}
|
||||
|
||||
static int __init init_hfsplus_fs(void)
|
||||
|
|
|
@ -176,11 +176,9 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
|
|||
{
|
||||
struct hpfs_inode_info *ei = (struct hpfs_inode_info *) foo;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
mutex_init(&ei->i_mutex);
|
||||
mutex_init(&ei->i_parent_mutex);
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
mutex_init(&ei->i_mutex);
|
||||
mutex_init(&ei->i_parent_mutex);
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
||||
static int init_inodecache(void)
|
||||
|
|
|
@ -556,8 +556,7 @@ static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
|
|||
{
|
||||
struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
||||
const struct file_operations hugetlbfs_file_operations = {
|
||||
|
|
|
@ -213,8 +213,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
|
|||
{
|
||||
struct inode * inode = (struct inode *) foo;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(inode);
|
||||
inode_init_once(inode);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -77,8 +77,7 @@ static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags
|
|||
{
|
||||
struct iso_inode_info *ei = foo;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
||||
static int init_inodecache(void)
|
||||
|
|
|
@ -47,10 +47,8 @@ static void jffs2_i_init_once(void * foo, struct kmem_cache * cachep, unsigned l
|
|||
{
|
||||
struct jffs2_inode_info *ei = (struct jffs2_inode_info *) foo;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
init_MUTEX(&ei->sem);
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
init_MUTEX(&ei->sem);
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
||||
static int jffs2_sync_fs(struct super_block *sb, int wait)
|
||||
|
|
|
@ -184,16 +184,14 @@ static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
|
|||
{
|
||||
struct metapage *mp = (struct metapage *)foo;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
mp->lid = 0;
|
||||
mp->lsn = 0;
|
||||
mp->flag = 0;
|
||||
mp->data = NULL;
|
||||
mp->clsn = 0;
|
||||
mp->log = NULL;
|
||||
set_bit(META_free, &mp->flag);
|
||||
init_waitqueue_head(&mp->wait);
|
||||
}
|
||||
mp->lid = 0;
|
||||
mp->lsn = 0;
|
||||
mp->flag = 0;
|
||||
mp->data = NULL;
|
||||
mp->clsn = 0;
|
||||
mp->log = NULL;
|
||||
set_bit(META_free, &mp->flag);
|
||||
init_waitqueue_head(&mp->wait);
|
||||
}
|
||||
|
||||
static inline struct metapage *alloc_metapage(gfp_t gfp_mask)
|
||||
|
|
|
@ -752,20 +752,18 @@ static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags
|
|||
{
|
||||
struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
memset(jfs_ip, 0, sizeof(struct jfs_inode_info));
|
||||
INIT_LIST_HEAD(&jfs_ip->anon_inode_list);
|
||||
init_rwsem(&jfs_ip->rdwrlock);
|
||||
mutex_init(&jfs_ip->commit_mutex);
|
||||
init_rwsem(&jfs_ip->xattr_sem);
|
||||
spin_lock_init(&jfs_ip->ag_lock);
|
||||
jfs_ip->active_ag = -1;
|
||||
memset(jfs_ip, 0, sizeof(struct jfs_inode_info));
|
||||
INIT_LIST_HEAD(&jfs_ip->anon_inode_list);
|
||||
init_rwsem(&jfs_ip->rdwrlock);
|
||||
mutex_init(&jfs_ip->commit_mutex);
|
||||
init_rwsem(&jfs_ip->xattr_sem);
|
||||
spin_lock_init(&jfs_ip->ag_lock);
|
||||
jfs_ip->active_ag = -1;
|
||||
#ifdef CONFIG_JFS_POSIX_ACL
|
||||
jfs_ip->i_acl = JFS_ACL_NOT_CACHED;
|
||||
jfs_ip->i_default_acl = JFS_ACL_NOT_CACHED;
|
||||
jfs_ip->i_acl = JFS_ACL_NOT_CACHED;
|
||||
jfs_ip->i_default_acl = JFS_ACL_NOT_CACHED;
|
||||
#endif
|
||||
inode_init_once(&jfs_ip->vfs_inode);
|
||||
}
|
||||
inode_init_once(&jfs_ip->vfs_inode);
|
||||
}
|
||||
|
||||
static int __init init_jfs_fs(void)
|
||||
|
|
|
@ -203,9 +203,6 @@ static void init_once(void *foo, struct kmem_cache *cache, unsigned long flags)
|
|||
{
|
||||
struct file_lock *lock = (struct file_lock *) foo;
|
||||
|
||||
if (!(flags & SLAB_CTOR_CONSTRUCTOR))
|
||||
return;
|
||||
|
||||
locks_init_lock(lock);
|
||||
}
|
||||
|
||||
|
|
|
@ -73,8 +73,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
|
|||
{
|
||||
struct minix_inode_info *ei = (struct minix_inode_info *) foo;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
||||
static int init_inodecache(void)
|
||||
|
|
|
@ -60,10 +60,8 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
|
|||
{
|
||||
struct ncp_inode_info *ei = (struct ncp_inode_info *) foo;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
mutex_init(&ei->open_mutex);
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
mutex_init(&ei->open_mutex);
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
||||
static int init_inodecache(void)
|
||||
|
|
|
@ -1164,21 +1164,19 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
|
|||
{
|
||||
struct nfs_inode *nfsi = (struct nfs_inode *) foo;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
inode_init_once(&nfsi->vfs_inode);
|
||||
spin_lock_init(&nfsi->req_lock);
|
||||
INIT_LIST_HEAD(&nfsi->dirty);
|
||||
INIT_LIST_HEAD(&nfsi->commit);
|
||||
INIT_LIST_HEAD(&nfsi->open_files);
|
||||
INIT_LIST_HEAD(&nfsi->access_cache_entry_lru);
|
||||
INIT_LIST_HEAD(&nfsi->access_cache_inode_lru);
|
||||
INIT_RADIX_TREE(&nfsi->nfs_page_tree, GFP_ATOMIC);
|
||||
atomic_set(&nfsi->data_updates, 0);
|
||||
nfsi->ndirty = 0;
|
||||
nfsi->ncommit = 0;
|
||||
nfsi->npages = 0;
|
||||
nfs4_init_once(nfsi);
|
||||
}
|
||||
inode_init_once(&nfsi->vfs_inode);
|
||||
spin_lock_init(&nfsi->req_lock);
|
||||
INIT_LIST_HEAD(&nfsi->dirty);
|
||||
INIT_LIST_HEAD(&nfsi->commit);
|
||||
INIT_LIST_HEAD(&nfsi->open_files);
|
||||
INIT_LIST_HEAD(&nfsi->access_cache_entry_lru);
|
||||
INIT_LIST_HEAD(&nfsi->access_cache_inode_lru);
|
||||
INIT_RADIX_TREE(&nfsi->nfs_page_tree, GFP_ATOMIC);
|
||||
atomic_set(&nfsi->data_updates, 0);
|
||||
nfsi->ndirty = 0;
|
||||
nfsi->ncommit = 0;
|
||||
nfsi->npages = 0;
|
||||
nfs4_init_once(nfsi);
|
||||
}
|
||||
|
||||
static int __init nfs_init_inodecache(void)
|
||||
|
|
|
@ -3085,8 +3085,7 @@ static void ntfs_big_inode_init_once(void *foo, struct kmem_cache *cachep,
|
|||
{
|
||||
ntfs_inode *ni = (ntfs_inode *)foo;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(VFS_I(ni));
|
||||
inode_init_once(VFS_I(ni));
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -262,12 +262,10 @@ static void dlmfs_init_once(void *foo,
|
|||
struct dlmfs_inode_private *ip =
|
||||
(struct dlmfs_inode_private *) foo;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
ip->ip_dlm = NULL;
|
||||
ip->ip_parent = NULL;
|
||||
ip->ip_dlm = NULL;
|
||||
ip->ip_parent = NULL;
|
||||
|
||||
inode_init_once(&ip->ip_vfs_inode);
|
||||
}
|
||||
inode_init_once(&ip->ip_vfs_inode);
|
||||
}
|
||||
|
||||
static struct inode *dlmfs_alloc_inode(struct super_block *sb)
|
||||
|
|
|
@ -937,31 +937,29 @@ static void ocfs2_inode_init_once(void *data,
|
|||
{
|
||||
struct ocfs2_inode_info *oi = data;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
oi->ip_flags = 0;
|
||||
oi->ip_open_count = 0;
|
||||
spin_lock_init(&oi->ip_lock);
|
||||
ocfs2_extent_map_init(&oi->vfs_inode);
|
||||
INIT_LIST_HEAD(&oi->ip_io_markers);
|
||||
oi->ip_created_trans = 0;
|
||||
oi->ip_last_trans = 0;
|
||||
oi->ip_dir_start_lookup = 0;
|
||||
oi->ip_flags = 0;
|
||||
oi->ip_open_count = 0;
|
||||
spin_lock_init(&oi->ip_lock);
|
||||
ocfs2_extent_map_init(&oi->vfs_inode);
|
||||
INIT_LIST_HEAD(&oi->ip_io_markers);
|
||||
oi->ip_created_trans = 0;
|
||||
oi->ip_last_trans = 0;
|
||||
oi->ip_dir_start_lookup = 0;
|
||||
|
||||
init_rwsem(&oi->ip_alloc_sem);
|
||||
mutex_init(&oi->ip_io_mutex);
|
||||
init_rwsem(&oi->ip_alloc_sem);
|
||||
mutex_init(&oi->ip_io_mutex);
|
||||
|
||||
oi->ip_blkno = 0ULL;
|
||||
oi->ip_clusters = 0;
|
||||
oi->ip_blkno = 0ULL;
|
||||
oi->ip_clusters = 0;
|
||||
|
||||
ocfs2_lock_res_init_once(&oi->ip_rw_lockres);
|
||||
ocfs2_lock_res_init_once(&oi->ip_meta_lockres);
|
||||
ocfs2_lock_res_init_once(&oi->ip_data_lockres);
|
||||
ocfs2_lock_res_init_once(&oi->ip_open_lockres);
|
||||
ocfs2_lock_res_init_once(&oi->ip_rw_lockres);
|
||||
ocfs2_lock_res_init_once(&oi->ip_meta_lockres);
|
||||
ocfs2_lock_res_init_once(&oi->ip_data_lockres);
|
||||
ocfs2_lock_res_init_once(&oi->ip_open_lockres);
|
||||
|
||||
ocfs2_metadata_cache_init(&oi->vfs_inode);
|
||||
ocfs2_metadata_cache_init(&oi->vfs_inode);
|
||||
|
||||
inode_init_once(&oi->vfs_inode);
|
||||
}
|
||||
inode_init_once(&oi->vfs_inode);
|
||||
}
|
||||
|
||||
static int ocfs2_initialize_mem_caches(void)
|
||||
|
|
|
@ -419,8 +419,7 @@ static void op_inode_init_once(void *data, struct kmem_cache * cachep, unsigned
|
|||
{
|
||||
struct op_inode_info *oi = (struct op_inode_info *) data;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(&oi->vfs_inode);
|
||||
inode_init_once(&oi->vfs_inode);
|
||||
}
|
||||
|
||||
static int __init init_openprom_fs(void)
|
||||
|
|
|
@ -109,8 +109,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
|
|||
{
|
||||
struct proc_inode *ei = (struct proc_inode *) foo;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
||||
int __init proc_init_inodecache(void)
|
||||
|
|
|
@ -536,8 +536,7 @@ static void init_once(void *foo, struct kmem_cache * cachep,
|
|||
{
|
||||
struct qnx4_inode_info *ei = (struct qnx4_inode_info *) foo;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
||||
static int init_inodecache(void)
|
||||
|
|
23
fs/quota.c
23
fs/quota.c
|
@ -157,7 +157,6 @@ static int check_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t
|
|||
static void quota_sync_sb(struct super_block *sb, int type)
|
||||
{
|
||||
int cnt;
|
||||
struct inode *discard[MAXQUOTAS];
|
||||
|
||||
sb->s_qcop->quota_sync(sb, type);
|
||||
/* This is not very clever (and fast) but currently I don't know about
|
||||
|
@ -167,29 +166,21 @@ static void quota_sync_sb(struct super_block *sb, int type)
|
|||
sb->s_op->sync_fs(sb, 1);
|
||||
sync_blockdev(sb->s_bdev);
|
||||
|
||||
/* Now when everything is written we can discard the pagecache so
|
||||
* that userspace sees the changes. We need i_mutex and so we could
|
||||
* not do it inside dqonoff_mutex. Moreover we need to be carefull
|
||||
* about races with quotaoff() (that is the reason why we have own
|
||||
* reference to inode). */
|
||||
/*
|
||||
* Now when everything is written we can discard the pagecache so
|
||||
* that userspace sees the changes.
|
||||
*/
|
||||
mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
|
||||
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
|
||||
discard[cnt] = NULL;
|
||||
if (type != -1 && cnt != type)
|
||||
continue;
|
||||
if (!sb_has_quota_enabled(sb, cnt))
|
||||
continue;
|
||||
discard[cnt] = igrab(sb_dqopt(sb)->files[cnt]);
|
||||
mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex, I_MUTEX_QUOTA);
|
||||
truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0);
|
||||
mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex);
|
||||
}
|
||||
mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
|
||||
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
|
||||
if (discard[cnt]) {
|
||||
mutex_lock(&discard[cnt]->i_mutex);
|
||||
truncate_inode_pages(&discard[cnt]->i_data, 0);
|
||||
mutex_unlock(&discard[cnt]->i_mutex);
|
||||
iput(discard[cnt]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void sync_dquots(struct super_block *sb, int type)
|
||||
|
|
|
@ -511,14 +511,12 @@ static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags
|
|||
{
|
||||
struct reiserfs_inode_info *ei = (struct reiserfs_inode_info *)foo;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
INIT_LIST_HEAD(&ei->i_prealloc_list);
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
INIT_LIST_HEAD(&ei->i_prealloc_list);
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
#ifdef CONFIG_REISERFS_FS_POSIX_ACL
|
||||
ei->i_acl_access = NULL;
|
||||
ei->i_acl_default = NULL;
|
||||
ei->i_acl_access = NULL;
|
||||
ei->i_acl_default = NULL;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
static int init_inodecache(void)
|
||||
|
|
|
@ -566,12 +566,11 @@ static void romfs_destroy_inode(struct inode *inode)
|
|||
kmem_cache_free(romfs_inode_cachep, ROMFS_I(inode));
|
||||
}
|
||||
|
||||
static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
|
||||
static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
|
||||
{
|
||||
struct romfs_inode_info *ei = (struct romfs_inode_info *) foo;
|
||||
struct romfs_inode_info *ei = foo;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
||||
static int init_inodecache(void)
|
||||
|
|
|
@ -70,8 +70,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
|
|||
{
|
||||
struct smb_inode_info *ei = (struct smb_inode_info *) foo;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
||||
static int init_inodecache(void)
|
||||
|
|
|
@ -322,8 +322,7 @@ static void init_once(void *p, struct kmem_cache *cachep, unsigned long flags)
|
|||
{
|
||||
struct sysv_inode_info *si = (struct sysv_inode_info *)p;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(&si->vfs_inode);
|
||||
inode_init_once(&si->vfs_inode);
|
||||
}
|
||||
|
||||
const struct super_operations sysv_sops = {
|
||||
|
|
|
@ -134,10 +134,8 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
|
|||
{
|
||||
struct udf_inode_info *ei = (struct udf_inode_info *) foo;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
ei->i_ext.i_data = NULL;
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
ei->i_ext.i_data = NULL;
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
||||
static int init_inodecache(void)
|
||||
|
|
|
@ -1237,8 +1237,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
|
|||
{
|
||||
struct ufs_inode_info *ei = (struct ufs_inode_info *) foo;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
||||
static int init_inodecache(void)
|
||||
|
|
|
@ -360,8 +360,7 @@ xfs_fs_inode_init_once(
|
|||
kmem_zone_t *zonep,
|
||||
unsigned long flags)
|
||||
{
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(vn_to_inode((bhv_vnode_t *)vnode));
|
||||
inode_init_once(vn_to_inode((bhv_vnode_t *)vnode));
|
||||
}
|
||||
|
||||
STATIC int
|
||||
|
|
|
@ -11,11 +11,8 @@
|
|||
#define MAX_PXM_DOMAINS (256) /* Old pxm spec is defined 8 bit */
|
||||
#endif
|
||||
|
||||
extern int __cpuinitdata pxm_to_node_map[MAX_PXM_DOMAINS];
|
||||
extern int __cpuinitdata node_to_pxm_map[MAX_NUMNODES];
|
||||
|
||||
extern int __cpuinit pxm_to_node(int);
|
||||
extern int __cpuinit node_to_pxm(int);
|
||||
extern int pxm_to_node(int);
|
||||
extern int node_to_pxm(int);
|
||||
extern int __cpuinit acpi_map_pxm_to_node(int);
|
||||
extern void __cpuinit acpi_unmap_pxm_to_node(int);
|
||||
|
||||
|
|
|
@ -17,6 +17,8 @@ struct pt_regs;
|
|||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#define CORENAME_MAX_SIZE 128
|
||||
|
||||
/*
|
||||
* This structure is used to hold the arguments that are used when loading binaries.
|
||||
*/
|
||||
|
|
|
@ -19,17 +19,27 @@
|
|||
CACHE(32768)
|
||||
CACHE(65536)
|
||||
CACHE(131072)
|
||||
#if (NR_CPUS > 512) || (MAX_NUMNODES > 256) || !defined(CONFIG_MMU)
|
||||
#if KMALLOC_MAX_SIZE >= 262144
|
||||
CACHE(262144)
|
||||
#endif
|
||||
#ifndef CONFIG_MMU
|
||||
#if KMALLOC_MAX_SIZE >= 524288
|
||||
CACHE(524288)
|
||||
#endif
|
||||
#if KMALLOC_MAX_SIZE >= 1048576
|
||||
CACHE(1048576)
|
||||
#ifdef CONFIG_LARGE_ALLOCS
|
||||
#endif
|
||||
#if KMALLOC_MAX_SIZE >= 2097152
|
||||
CACHE(2097152)
|
||||
#endif
|
||||
#if KMALLOC_MAX_SIZE >= 4194304
|
||||
CACHE(4194304)
|
||||
#endif
|
||||
#if KMALLOC_MAX_SIZE >= 8388608
|
||||
CACHE(8388608)
|
||||
#endif
|
||||
#if KMALLOC_MAX_SIZE >= 16777216
|
||||
CACHE(16777216)
|
||||
#endif
|
||||
#if KMALLOC_MAX_SIZE >= 33554432
|
||||
CACHE(33554432)
|
||||
#endif /* CONFIG_LARGE_ALLOCS */
|
||||
#endif /* CONFIG_MMU */
|
||||
#endif
|
||||
|
|
|
@ -471,6 +471,7 @@
|
|||
#define PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2 0x0219
|
||||
#define PCI_DEVICE_ID_IBM_ICOM_V2_TWO_PORTS_RVX 0x021A
|
||||
#define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM 0x0251
|
||||
#define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM_PCIE 0x0361
|
||||
#define PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL 0x252
|
||||
|
||||
#define PCI_VENDOR_ID_COMPEX2 0x101a /* pci.ids says "AT&T GIS (NCR)" */
|
||||
|
|
|
@ -74,17 +74,14 @@ void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned lon
|
|||
void page_add_file_rmap(struct page *);
|
||||
void page_remove_rmap(struct page *, struct vm_area_struct *);
|
||||
|
||||
/**
|
||||
* page_dup_rmap - duplicate pte mapping to a page
|
||||
* @page: the page to add the mapping to
|
||||
*
|
||||
* For copy_page_range only: minimal extract from page_add_rmap,
|
||||
* avoiding unnecessary tests (already checked) so it's quicker.
|
||||
*/
|
||||
static inline void page_dup_rmap(struct page *page)
|
||||
#ifdef CONFIG_DEBUG_VM
|
||||
void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address);
|
||||
#else
|
||||
static inline void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address)
|
||||
{
|
||||
atomic_inc(&page->_mapcount);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Called from mm/vmscan.c to handle paging out
|
||||
|
|
|
@ -32,9 +32,6 @@ typedef struct kmem_cache kmem_cache_t __deprecated;
|
|||
#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
|
||||
#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */
|
||||
|
||||
/* Flags passed to a constructor functions */
|
||||
#define SLAB_CTOR_CONSTRUCTOR 0x001UL /* If not set, then deconstructor */
|
||||
|
||||
/*
|
||||
* struct kmem_cache related prototypes
|
||||
*/
|
||||
|
@ -76,6 +73,21 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
|
|||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The largest kmalloc size supported by the slab allocators is
|
||||
* 32 megabyte (2^25) or the maximum allocatable page order if that is
|
||||
* less than 32 MB.
|
||||
*
|
||||
* WARNING: Its not easy to increase this value since the allocators have
|
||||
* to do various tricks to work around compiler limitations in order to
|
||||
* ensure proper constant folding.
|
||||
*/
|
||||
#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT) <= 25 ? \
|
||||
(MAX_ORDER + PAGE_SHIFT) : 25)
|
||||
|
||||
#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_HIGH)
|
||||
#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT)
|
||||
|
||||
/*
|
||||
* Common kmalloc functions provided by all allocators
|
||||
*/
|
||||
|
@ -233,9 +245,6 @@ extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *);
|
|||
|
||||
#endif /* DEBUG_SLAB */
|
||||
|
||||
extern const struct seq_operations slabinfo_op;
|
||||
ssize_t slabinfo_write(struct file *, const char __user *, size_t, loff_t *);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _LINUX_SLAB_H */
|
||||
|
||||
|
|
|
@ -109,4 +109,7 @@ found:
|
|||
|
||||
#endif /* CONFIG_NUMA */
|
||||
|
||||
extern const struct seq_operations slabinfo_op;
|
||||
ssize_t slabinfo_write(struct file *, const char __user *, size_t, loff_t *);
|
||||
|
||||
#endif /* _LINUX_SLAB_DEF_H */
|
||||
|
|
|
@ -40,7 +40,6 @@ struct kmem_cache {
|
|||
int objects; /* Number of objects in slab */
|
||||
int refcount; /* Refcount for slab cache destroy */
|
||||
void (*ctor)(void *, struct kmem_cache *, unsigned long);
|
||||
void (*dtor)(void *, struct kmem_cache *, unsigned long);
|
||||
int inuse; /* Offset to metadata */
|
||||
int align; /* Alignment */
|
||||
const char *name; /* Name (only for display!) */
|
||||
|
@ -59,17 +58,6 @@ struct kmem_cache {
|
|||
*/
|
||||
#define KMALLOC_SHIFT_LOW 3
|
||||
|
||||
#ifdef CONFIG_LARGE_ALLOCS
|
||||
#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT) =< 25 ? \
|
||||
(MAX_ORDER + PAGE_SHIFT - 1) : 25)
|
||||
#else
|
||||
#if !defined(CONFIG_MMU) || NR_CPUS > 512 || MAX_NUMNODES > 256
|
||||
#define KMALLOC_SHIFT_HIGH 20
|
||||
#else
|
||||
#define KMALLOC_SHIFT_HIGH 18
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We keep the general caches in an array of slab caches that are used for
|
||||
* 2^x bytes of allocations.
|
||||
|
@ -80,7 +68,7 @@ extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
|
|||
* Sorry that the following has to be that ugly but some versions of GCC
|
||||
* have trouble with constant propagation and loops.
|
||||
*/
|
||||
static inline int kmalloc_index(int size)
|
||||
static inline int kmalloc_index(size_t size)
|
||||
{
|
||||
/*
|
||||
* We should return 0 if size == 0 but we use the smallest object
|
||||
|
@ -88,7 +76,7 @@ static inline int kmalloc_index(int size)
|
|||
*/
|
||||
WARN_ON_ONCE(size == 0);
|
||||
|
||||
if (size > (1 << KMALLOC_SHIFT_HIGH))
|
||||
if (size > KMALLOC_MAX_SIZE)
|
||||
return -1;
|
||||
|
||||
if (size > 64 && size <= 96)
|
||||
|
@ -111,17 +99,13 @@ static inline int kmalloc_index(int size)
|
|||
if (size <= 64 * 1024) return 16;
|
||||
if (size <= 128 * 1024) return 17;
|
||||
if (size <= 256 * 1024) return 18;
|
||||
#if KMALLOC_SHIFT_HIGH > 18
|
||||
if (size <= 512 * 1024) return 19;
|
||||
if (size <= 1024 * 1024) return 20;
|
||||
#endif
|
||||
#if KMALLOC_SHIFT_HIGH > 20
|
||||
if (size <= 2 * 1024 * 1024) return 21;
|
||||
if (size <= 4 * 1024 * 1024) return 22;
|
||||
if (size <= 8 * 1024 * 1024) return 23;
|
||||
if (size <= 16 * 1024 * 1024) return 24;
|
||||
if (size <= 32 * 1024 * 1024) return 25;
|
||||
#endif
|
||||
return -1;
|
||||
|
||||
/*
|
||||
|
@ -146,7 +130,12 @@ static inline struct kmem_cache *kmalloc_slab(size_t size)
|
|||
if (index == 0)
|
||||
return NULL;
|
||||
|
||||
if (index < 0) {
|
||||
/*
|
||||
* This function only gets expanded if __builtin_constant_p(size), so
|
||||
* testing it here shouldn't be needed. But some versions of gcc need
|
||||
* help.
|
||||
*/
|
||||
if (__builtin_constant_p(size) && index < 0) {
|
||||
/*
|
||||
* Generate a link failure. Would be great if we could
|
||||
* do something to stop the compile here.
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
* Alan Cox. <alan@redhat.com>
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
|
||||
extern void cpu_idle(void);
|
||||
|
||||
|
@ -99,11 +100,9 @@ static inline void smp_send_reschedule(int cpu) { }
|
|||
#define num_booting_cpus() 1
|
||||
#define smp_prepare_boot_cpu() do {} while (0)
|
||||
static inline int smp_call_function_single(int cpuid, void (*func) (void *info),
|
||||
void *info, int retry, int wait)
|
||||
void *info, int retry, int wait)
|
||||
{
|
||||
/* Disable interrupts here? */
|
||||
func(info);
|
||||
return 0;
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
#endif /* !SMP */
|
||||
|
|
|
@ -122,7 +122,7 @@ extern struct workqueue_struct *__create_workqueue(const char *name,
|
|||
int singlethread,
|
||||
int freezeable);
|
||||
#define create_workqueue(name) __create_workqueue((name), 0, 0)
|
||||
#define create_freezeable_workqueue(name) __create_workqueue((name), 0, 1)
|
||||
#define create_freezeable_workqueue(name) __create_workqueue((name), 1, 1)
|
||||
#define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0)
|
||||
|
||||
extern void destroy_workqueue(struct workqueue_struct *wq);
|
||||
|
|
|
@ -567,7 +567,6 @@ config SLAB
|
|||
a slab allocator.
|
||||
|
||||
config SLUB
|
||||
depends on EXPERIMENTAL && !ARCH_USES_SLAB_PAGE_STRUCT
|
||||
bool "SLUB (Unqueued Allocator)"
|
||||
help
|
||||
SLUB is a slab allocator that minimizes cache line usage
|
||||
|
@ -577,14 +576,11 @@ config SLUB
|
|||
and has enhanced diagnostics.
|
||||
|
||||
config SLOB
|
||||
#
|
||||
# SLOB does not support SMP because SLAB_DESTROY_BY_RCU is unsupported
|
||||
#
|
||||
depends on EMBEDDED && !SMP && !SPARSEMEM
|
||||
depends on EMBEDDED && !SPARSEMEM
|
||||
bool "SLOB (Simple Allocator)"
|
||||
help
|
||||
SLOB replaces the SLAB allocator with a drastically simpler
|
||||
allocator. SLOB is more space efficient that SLAB but does not
|
||||
allocator. SLOB is more space efficient than SLAB but does not
|
||||
scale well (single lock for all operations) and is also highly
|
||||
susceptible to fragmentation. SLUB can accomplish a higher object
|
||||
density. It is usually better to use SLUB instead of SLOB.
|
||||
|
|
|
@ -215,8 +215,7 @@ static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags
|
|||
{
|
||||
struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(&p->vfs_inode);
|
||||
inode_init_once(&p->vfs_inode);
|
||||
}
|
||||
|
||||
static struct inode *mqueue_alloc_inode(struct super_block *sb)
|
||||
|
|
|
@ -1427,10 +1427,8 @@ static void sighand_ctor(void *data, struct kmem_cache *cachep,
|
|||
{
|
||||
struct sighand_struct *sighand = data;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
spin_lock_init(&sighand->siglock);
|
||||
INIT_LIST_HEAD(&sighand->signalfd_list);
|
||||
}
|
||||
spin_lock_init(&sighand->siglock);
|
||||
INIT_LIST_HEAD(&sighand->signalfd_list);
|
||||
}
|
||||
|
||||
void __init proc_caches_init(void)
|
||||
|
|
|
@ -416,7 +416,8 @@ static ssize_t disk_store(struct kset *kset, const char *buf, size_t n)
|
|||
|
||||
mutex_lock(&pm_mutex);
|
||||
for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) {
|
||||
if (!strncmp(buf, hibernation_modes[i], len)) {
|
||||
if (len == strlen(hibernation_modes[i])
|
||||
&& !strncmp(buf, hibernation_modes[i], len)) {
|
||||
mode = i;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -290,13 +290,13 @@ static ssize_t state_store(struct kset *kset, const char *buf, size_t n)
|
|||
len = p ? p - buf : n;
|
||||
|
||||
/* First, check if we are requested to hibernate */
|
||||
if (!strncmp(buf, "disk", len)) {
|
||||
if (len == 4 && !strncmp(buf, "disk", len)) {
|
||||
error = hibernate();
|
||||
return error ? error : n;
|
||||
}
|
||||
|
||||
for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++) {
|
||||
if (*s && !strncmp(buf, *s, len))
|
||||
if (*s && len == strlen(*s) && !strncmp(buf, *s, len))
|
||||
break;
|
||||
}
|
||||
if (state < PM_SUSPEND_MAX && *s)
|
||||
|
|
|
@ -227,7 +227,7 @@ static ctl_table kern_table[] = {
|
|||
.ctl_name = KERN_CORE_PATTERN,
|
||||
.procname = "core_pattern",
|
||||
.data = core_pattern,
|
||||
.maxlen = 128,
|
||||
.maxlen = CORENAME_MAX_SIZE,
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dostring,
|
||||
.strategy = &sysctl_string,
|
||||
|
|
|
@ -481,7 +481,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
|||
page = vm_normal_page(vma, addr, pte);
|
||||
if (page) {
|
||||
get_page(page);
|
||||
page_dup_rmap(page);
|
||||
page_dup_rmap(page, vma, addr);
|
||||
rss[!!PageAnon(page)]++;
|
||||
}
|
||||
|
||||
|
|
66
mm/rmap.c
66
mm/rmap.c
|
@ -162,12 +162,10 @@ void anon_vma_unlink(struct vm_area_struct *vma)
|
|||
static void anon_vma_ctor(void *data, struct kmem_cache *cachep,
|
||||
unsigned long flags)
|
||||
{
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
struct anon_vma *anon_vma = data;
|
||||
struct anon_vma *anon_vma = data;
|
||||
|
||||
spin_lock_init(&anon_vma->lock);
|
||||
INIT_LIST_HEAD(&anon_vma->head);
|
||||
}
|
||||
spin_lock_init(&anon_vma->lock);
|
||||
INIT_LIST_HEAD(&anon_vma->head);
|
||||
}
|
||||
|
||||
void __init anon_vma_init(void)
|
||||
|
@ -531,20 +529,52 @@ static void __page_set_anon_rmap(struct page *page,
|
|||
__inc_zone_page_state(page, NR_ANON_PAGES);
|
||||
}
|
||||
|
||||
/**
|
||||
* page_set_anon_rmap - sanity check anonymous rmap addition
|
||||
* @page: the page to add the mapping to
|
||||
* @vma: the vm area in which the mapping is added
|
||||
* @address: the user virtual address mapped
|
||||
*/
|
||||
static void __page_check_anon_rmap(struct page *page,
|
||||
struct vm_area_struct *vma, unsigned long address)
|
||||
{
|
||||
#ifdef CONFIG_DEBUG_VM
|
||||
/*
|
||||
* The page's anon-rmap details (mapping and index) are guaranteed to
|
||||
* be set up correctly at this point.
|
||||
*
|
||||
* We have exclusion against page_add_anon_rmap because the caller
|
||||
* always holds the page locked, except if called from page_dup_rmap,
|
||||
* in which case the page is already known to be setup.
|
||||
*
|
||||
* We have exclusion against page_add_new_anon_rmap because those pages
|
||||
* are initially only visible via the pagetables, and the pte is locked
|
||||
* over the call to page_add_new_anon_rmap.
|
||||
*/
|
||||
struct anon_vma *anon_vma = vma->anon_vma;
|
||||
anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
|
||||
BUG_ON(page->mapping != (struct address_space *)anon_vma);
|
||||
BUG_ON(page->index != linear_page_index(vma, address));
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* page_add_anon_rmap - add pte mapping to an anonymous page
|
||||
* @page: the page to add the mapping to
|
||||
* @vma: the vm area in which the mapping is added
|
||||
* @address: the user virtual address mapped
|
||||
*
|
||||
* The caller needs to hold the pte lock.
|
||||
* The caller needs to hold the pte lock and the page must be locked.
|
||||
*/
|
||||
void page_add_anon_rmap(struct page *page,
|
||||
struct vm_area_struct *vma, unsigned long address)
|
||||
{
|
||||
VM_BUG_ON(!PageLocked(page));
|
||||
VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
|
||||
if (atomic_inc_and_test(&page->_mapcount))
|
||||
__page_set_anon_rmap(page, vma, address);
|
||||
/* else checking page index and mapping is racy */
|
||||
else
|
||||
__page_check_anon_rmap(page, vma, address);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -555,10 +585,12 @@ void page_add_anon_rmap(struct page *page,
|
|||
*
|
||||
* Same as page_add_anon_rmap but must only be called on *new* pages.
|
||||
* This means the inc-and-test can be bypassed.
|
||||
* Page does not have to be locked.
|
||||
*/
|
||||
void page_add_new_anon_rmap(struct page *page,
|
||||
struct vm_area_struct *vma, unsigned long address)
|
||||
{
|
||||
BUG_ON(address < vma->vm_start || address >= vma->vm_end);
|
||||
atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */
|
||||
__page_set_anon_rmap(page, vma, address);
|
||||
}
|
||||
|
@ -575,6 +607,26 @@ void page_add_file_rmap(struct page *page)
|
|||
__inc_zone_page_state(page, NR_FILE_MAPPED);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_VM
|
||||
/**
|
||||
* page_dup_rmap - duplicate pte mapping to a page
|
||||
* @page: the page to add the mapping to
|
||||
*
|
||||
* For copy_page_range only: minimal extract from page_add_file_rmap /
|
||||
* page_add_anon_rmap, avoiding unnecessary tests (already checked) so it's
|
||||
* quicker.
|
||||
*
|
||||
* The caller needs to hold the pte lock.
|
||||
*/
|
||||
void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address)
|
||||
{
|
||||
BUG_ON(page_mapcount(page) == 0);
|
||||
if (PageAnon(page))
|
||||
__page_check_anon_rmap(page, vma, address);
|
||||
atomic_inc(&page->_mapcount);
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* page_remove_rmap - take down pte mapping from a page
|
||||
* @page: page to remove mapping from
|
||||
|
|
|
@ -2358,13 +2358,11 @@ static void init_once(void *foo, struct kmem_cache *cachep,
|
|||
{
|
||||
struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
inode_init_once(&p->vfs_inode);
|
||||
inode_init_once(&p->vfs_inode);
|
||||
#ifdef CONFIG_TMPFS_POSIX_ACL
|
||||
p->i_acl = NULL;
|
||||
p->i_default_acl = NULL;
|
||||
p->i_acl = NULL;
|
||||
p->i_default_acl = NULL;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
static int init_inodecache(void)
|
||||
|
|
57
mm/slab.c
57
mm/slab.c
|
@ -409,9 +409,6 @@ struct kmem_cache {
|
|||
/* constructor func */
|
||||
void (*ctor) (void *, struct kmem_cache *, unsigned long);
|
||||
|
||||
/* de-constructor func */
|
||||
void (*dtor) (void *, struct kmem_cache *, unsigned long);
|
||||
|
||||
/* 5) cache creation/removal */
|
||||
const char *name;
|
||||
struct list_head next;
|
||||
|
@ -571,21 +568,6 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
|
|||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Maximum size of an obj (in 2^order pages) and absolute limit for the gfp
|
||||
* order.
|
||||
*/
|
||||
#if defined(CONFIG_LARGE_ALLOCS)
|
||||
#define MAX_OBJ_ORDER 13 /* up to 32Mb */
|
||||
#define MAX_GFP_ORDER 13 /* up to 32Mb */
|
||||
#elif defined(CONFIG_MMU)
|
||||
#define MAX_OBJ_ORDER 5 /* 32 pages */
|
||||
#define MAX_GFP_ORDER 5 /* 32 pages */
|
||||
#else
|
||||
#define MAX_OBJ_ORDER 8 /* up to 1Mb */
|
||||
#define MAX_GFP_ORDER 8 /* up to 1Mb */
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Do not go above this order unless 0 objects fit into the slab.
|
||||
*/
|
||||
|
@ -792,6 +774,7 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
|
|||
*/
|
||||
BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
|
||||
#endif
|
||||
WARN_ON_ONCE(size == 0);
|
||||
while (size > csizep->cs_size)
|
||||
csizep++;
|
||||
|
||||
|
@ -1911,20 +1894,11 @@ static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
|
|||
slab_error(cachep, "end of a freed object "
|
||||
"was overwritten");
|
||||
}
|
||||
if (cachep->dtor && !(cachep->flags & SLAB_POISON))
|
||||
(cachep->dtor) (objp + obj_offset(cachep), cachep, 0);
|
||||
}
|
||||
}
|
||||
#else
|
||||
static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
|
||||
{
|
||||
if (cachep->dtor) {
|
||||
int i;
|
||||
for (i = 0; i < cachep->num; i++) {
|
||||
void *objp = index_to_obj(cachep, slabp, i);
|
||||
(cachep->dtor) (objp, cachep, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -2013,7 +1987,7 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
|
|||
size_t left_over = 0;
|
||||
int gfporder;
|
||||
|
||||
for (gfporder = 0; gfporder <= MAX_GFP_ORDER; gfporder++) {
|
||||
for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
|
||||
unsigned int num;
|
||||
size_t remainder;
|
||||
|
||||
|
@ -2124,7 +2098,7 @@ static int setup_cpu_cache(struct kmem_cache *cachep)
|
|||
* @align: The required alignment for the objects.
|
||||
* @flags: SLAB flags
|
||||
* @ctor: A constructor for the objects.
|
||||
* @dtor: A destructor for the objects.
|
||||
* @dtor: A destructor for the objects (not implemented anymore).
|
||||
*
|
||||
* Returns a ptr to the cache on success, NULL on failure.
|
||||
* Cannot be called within a int, but can be interrupted.
|
||||
|
@ -2159,7 +2133,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
|
|||
* Sanity checks... these are all serious usage bugs.
|
||||
*/
|
||||
if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||
|
||||
(size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || (dtor && !ctor)) {
|
||||
size > KMALLOC_MAX_SIZE || dtor) {
|
||||
printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__,
|
||||
name);
|
||||
BUG();
|
||||
|
@ -2213,9 +2187,6 @@ kmem_cache_create (const char *name, size_t size, size_t align,
|
|||
if (flags & SLAB_DESTROY_BY_RCU)
|
||||
BUG_ON(flags & SLAB_POISON);
|
||||
#endif
|
||||
if (flags & SLAB_DESTROY_BY_RCU)
|
||||
BUG_ON(dtor);
|
||||
|
||||
/*
|
||||
* Always checks flags, a caller might be expecting debug support which
|
||||
* isn't available.
|
||||
|
@ -2370,7 +2341,6 @@ kmem_cache_create (const char *name, size_t size, size_t align,
|
|||
BUG_ON(!cachep->slabp_cache);
|
||||
}
|
||||
cachep->ctor = ctor;
|
||||
cachep->dtor = dtor;
|
||||
cachep->name = name;
|
||||
|
||||
if (setup_cpu_cache(cachep)) {
|
||||
|
@ -2625,7 +2595,7 @@ static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
|
|||
}
|
||||
|
||||
static void cache_init_objs(struct kmem_cache *cachep,
|
||||
struct slab *slabp, unsigned long ctor_flags)
|
||||
struct slab *slabp)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -2649,7 +2619,7 @@ static void cache_init_objs(struct kmem_cache *cachep,
|
|||
*/
|
||||
if (cachep->ctor && !(cachep->flags & SLAB_POISON))
|
||||
cachep->ctor(objp + obj_offset(cachep), cachep,
|
||||
ctor_flags);
|
||||
0);
|
||||
|
||||
if (cachep->flags & SLAB_RED_ZONE) {
|
||||
if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
|
||||
|
@ -2665,7 +2635,7 @@ static void cache_init_objs(struct kmem_cache *cachep,
|
|||
cachep->buffer_size / PAGE_SIZE, 0);
|
||||
#else
|
||||
if (cachep->ctor)
|
||||
cachep->ctor(objp, cachep, ctor_flags);
|
||||
cachep->ctor(objp, cachep, 0);
|
||||
#endif
|
||||
slab_bufctl(slabp)[i] = i + 1;
|
||||
}
|
||||
|
@ -2754,7 +2724,6 @@ static int cache_grow(struct kmem_cache *cachep,
|
|||
struct slab *slabp;
|
||||
size_t offset;
|
||||
gfp_t local_flags;
|
||||
unsigned long ctor_flags;
|
||||
struct kmem_list3 *l3;
|
||||
|
||||
/*
|
||||
|
@ -2763,7 +2732,6 @@ static int cache_grow(struct kmem_cache *cachep,
|
|||
*/
|
||||
BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK));
|
||||
|
||||
ctor_flags = SLAB_CTOR_CONSTRUCTOR;
|
||||
local_flags = (flags & GFP_LEVEL_MASK);
|
||||
/* Take the l3 list lock to change the colour_next on this node */
|
||||
check_irq_off();
|
||||
|
@ -2808,7 +2776,7 @@ static int cache_grow(struct kmem_cache *cachep,
|
|||
slabp->nodeid = nodeid;
|
||||
slab_map_pages(cachep, slabp, objp);
|
||||
|
||||
cache_init_objs(cachep, slabp, ctor_flags);
|
||||
cache_init_objs(cachep, slabp);
|
||||
|
||||
if (local_flags & __GFP_WAIT)
|
||||
local_irq_disable();
|
||||
|
@ -2835,7 +2803,6 @@ failed:
|
|||
* Perform extra freeing checks:
|
||||
* - detect bad pointers.
|
||||
* - POISON/RED_ZONE checking
|
||||
* - destructor calls, for caches with POISON+dtor
|
||||
*/
|
||||
static void kfree_debugcheck(const void *objp)
|
||||
{
|
||||
|
@ -2894,12 +2861,6 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
|
|||
BUG_ON(objnr >= cachep->num);
|
||||
BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
|
||||
|
||||
if (cachep->flags & SLAB_POISON && cachep->dtor) {
|
||||
/* we want to cache poison the object,
|
||||
* call the destruction callback
|
||||
*/
|
||||
cachep->dtor(objp + obj_offset(cachep), cachep, 0);
|
||||
}
|
||||
#ifdef CONFIG_DEBUG_SLAB_LEAK
|
||||
slab_bufctl(slabp)[objnr] = BUFCTL_FREE;
|
||||
#endif
|
||||
|
@ -3099,7 +3060,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
|
|||
#endif
|
||||
objp += obj_offset(cachep);
|
||||
if (cachep->ctor && cachep->flags & SLAB_POISON)
|
||||
cachep->ctor(objp, cachep, SLAB_CTOR_CONSTRUCTOR);
|
||||
cachep->ctor(objp, cachep, 0);
|
||||
#if ARCH_SLAB_MINALIGN
|
||||
if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) {
|
||||
printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
|
||||
|
|
53
mm/slob.c
53
mm/slob.c
|
@ -35,6 +35,7 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/rcupdate.h>
|
||||
|
||||
struct slob_block {
|
||||
int units;
|
||||
|
@ -53,6 +54,16 @@ struct bigblock {
|
|||
};
|
||||
typedef struct bigblock bigblock_t;
|
||||
|
||||
/*
|
||||
* struct slob_rcu is inserted at the tail of allocated slob blocks, which
|
||||
* were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free
|
||||
* the block using call_rcu.
|
||||
*/
|
||||
struct slob_rcu {
|
||||
struct rcu_head head;
|
||||
int size;
|
||||
};
|
||||
|
||||
static slob_t arena = { .next = &arena, .units = 1 };
|
||||
static slob_t *slobfree = &arena;
|
||||
static bigblock_t *bigblocks;
|
||||
|
@ -266,9 +277,9 @@ size_t ksize(const void *block)
|
|||
|
||||
struct kmem_cache {
|
||||
unsigned int size, align;
|
||||
unsigned long flags;
|
||||
const char *name;
|
||||
void (*ctor)(void *, struct kmem_cache *, unsigned long);
|
||||
void (*dtor)(void *, struct kmem_cache *, unsigned long);
|
||||
};
|
||||
|
||||
struct kmem_cache *kmem_cache_create(const char *name, size_t size,
|
||||
|
@ -283,8 +294,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
|
|||
if (c) {
|
||||
c->name = name;
|
||||
c->size = size;
|
||||
if (flags & SLAB_DESTROY_BY_RCU) {
|
||||
/* leave room for rcu footer at the end of object */
|
||||
c->size += sizeof(struct slob_rcu);
|
||||
}
|
||||
c->flags = flags;
|
||||
c->ctor = ctor;
|
||||
c->dtor = dtor;
|
||||
/* ignore alignment unless it's forced */
|
||||
c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
|
||||
if (c->align < align)
|
||||
|
@ -312,7 +327,7 @@ void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags)
|
|||
b = (void *)__get_free_pages(flags, get_order(c->size));
|
||||
|
||||
if (c->ctor)
|
||||
c->ctor(b, c, SLAB_CTOR_CONSTRUCTOR);
|
||||
c->ctor(b, c, 0);
|
||||
|
||||
return b;
|
||||
}
|
||||
|
@ -328,15 +343,33 @@ void *kmem_cache_zalloc(struct kmem_cache *c, gfp_t flags)
|
|||
}
|
||||
EXPORT_SYMBOL(kmem_cache_zalloc);
|
||||
|
||||
static void __kmem_cache_free(void *b, int size)
|
||||
{
|
||||
if (size < PAGE_SIZE)
|
||||
slob_free(b, size);
|
||||
else
|
||||
free_pages((unsigned long)b, get_order(size));
|
||||
}
|
||||
|
||||
static void kmem_rcu_free(struct rcu_head *head)
|
||||
{
|
||||
struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
|
||||
void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
|
||||
|
||||
__kmem_cache_free(b, slob_rcu->size);
|
||||
}
|
||||
|
||||
void kmem_cache_free(struct kmem_cache *c, void *b)
|
||||
{
|
||||
if (c->dtor)
|
||||
c->dtor(b, c, 0);
|
||||
|
||||
if (c->size < PAGE_SIZE)
|
||||
slob_free(b, c->size);
|
||||
else
|
||||
free_pages((unsigned long)b, get_order(c->size));
|
||||
if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
|
||||
struct slob_rcu *slob_rcu;
|
||||
slob_rcu = b + (c->size - sizeof(struct slob_rcu));
|
||||
INIT_RCU_HEAD(&slob_rcu->head);
|
||||
slob_rcu->size = c->size;
|
||||
call_rcu(&slob_rcu->head, kmem_rcu_free);
|
||||
} else {
|
||||
__kmem_cache_free(b, c->size);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(kmem_cache_free);
|
||||
|
||||
|
|
236
mm/slub.c
236
mm/slub.c
|
@ -78,10 +78,18 @@
|
|||
*
|
||||
* Overloading of page flags that are otherwise used for LRU management.
|
||||
*
|
||||
* PageActive The slab is used as a cpu cache. Allocations
|
||||
* may be performed from the slab. The slab is not
|
||||
* on any slab list and cannot be moved onto one.
|
||||
* The cpu slab may be equipped with an additioanl
|
||||
* PageActive The slab is frozen and exempt from list processing.
|
||||
* This means that the slab is dedicated to a purpose
|
||||
* such as satisfying allocations for a specific
|
||||
* processor. Objects may be freed in the slab while
|
||||
* it is frozen but slab_free will then skip the usual
|
||||
* list operations. It is up to the processor holding
|
||||
* the slab to integrate the slab into the slab lists
|
||||
* when the slab is no longer needed.
|
||||
*
|
||||
* One use of this flag is to mark slabs that are
|
||||
* used for allocations. Then such a slab becomes a cpu
|
||||
* slab. The cpu slab may be equipped with an additional
|
||||
* lockless_freelist that allows lockless access to
|
||||
* free objects in addition to the regular freelist
|
||||
* that requires the slab lock.
|
||||
|
@ -91,27 +99,42 @@
|
|||
* the fast path and disables lockless freelists.
|
||||
*/
|
||||
|
||||
#define FROZEN (1 << PG_active)
|
||||
|
||||
#ifdef CONFIG_SLUB_DEBUG
|
||||
#define SLABDEBUG (1 << PG_error)
|
||||
#else
|
||||
#define SLABDEBUG 0
|
||||
#endif
|
||||
|
||||
static inline int SlabFrozen(struct page *page)
|
||||
{
|
||||
return page->flags & FROZEN;
|
||||
}
|
||||
|
||||
static inline void SetSlabFrozen(struct page *page)
|
||||
{
|
||||
page->flags |= FROZEN;
|
||||
}
|
||||
|
||||
static inline void ClearSlabFrozen(struct page *page)
|
||||
{
|
||||
page->flags &= ~FROZEN;
|
||||
}
|
||||
|
||||
static inline int SlabDebug(struct page *page)
|
||||
{
|
||||
#ifdef CONFIG_SLUB_DEBUG
|
||||
return PageError(page);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
return page->flags & SLABDEBUG;
|
||||
}
|
||||
|
||||
static inline void SetSlabDebug(struct page *page)
|
||||
{
|
||||
#ifdef CONFIG_SLUB_DEBUG
|
||||
SetPageError(page);
|
||||
#endif
|
||||
page->flags |= SLABDEBUG;
|
||||
}
|
||||
|
||||
static inline void ClearSlabDebug(struct page *page)
|
||||
{
|
||||
#ifdef CONFIG_SLUB_DEBUG
|
||||
ClearPageError(page);
|
||||
#endif
|
||||
page->flags &= ~SLABDEBUG;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -719,6 +742,22 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
|
|||
return search == NULL;
|
||||
}
|
||||
|
||||
static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc)
|
||||
{
|
||||
if (s->flags & SLAB_TRACE) {
|
||||
printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
|
||||
s->name,
|
||||
alloc ? "alloc" : "free",
|
||||
object, page->inuse,
|
||||
page->freelist);
|
||||
|
||||
if (!alloc)
|
||||
print_section("Object", (void *)object, s->objsize);
|
||||
|
||||
dump_stack();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Tracking of fully allocated slabs for debugging purposes.
|
||||
*/
|
||||
|
@ -743,8 +782,18 @@ static void remove_full(struct kmem_cache *s, struct page *page)
|
|||
spin_unlock(&n->list_lock);
|
||||
}
|
||||
|
||||
static int alloc_object_checks(struct kmem_cache *s, struct page *page,
|
||||
void *object)
|
||||
static void setup_object_debug(struct kmem_cache *s, struct page *page,
|
||||
void *object)
|
||||
{
|
||||
if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
|
||||
return;
|
||||
|
||||
init_object(s, object, 0);
|
||||
init_tracking(s, object);
|
||||
}
|
||||
|
||||
static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
|
||||
void *object, void *addr)
|
||||
{
|
||||
if (!check_slab(s, page))
|
||||
goto bad;
|
||||
|
@ -759,13 +808,16 @@ static int alloc_object_checks(struct kmem_cache *s, struct page *page,
|
|||
goto bad;
|
||||
}
|
||||
|
||||
if (!object)
|
||||
return 1;
|
||||
|
||||
if (!check_object(s, page, object, 0))
|
||||
if (object && !check_object(s, page, object, 0))
|
||||
goto bad;
|
||||
|
||||
/* Success perform special debug activities for allocs */
|
||||
if (s->flags & SLAB_STORE_USER)
|
||||
set_track(s, object, TRACK_ALLOC, addr);
|
||||
trace(s, page, object, 1);
|
||||
init_object(s, object, 1);
|
||||
return 1;
|
||||
|
||||
bad:
|
||||
if (PageSlab(page)) {
|
||||
/*
|
||||
|
@ -783,8 +835,8 @@ bad:
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int free_object_checks(struct kmem_cache *s, struct page *page,
|
||||
void *object)
|
||||
static int free_debug_processing(struct kmem_cache *s, struct page *page,
|
||||
void *object, void *addr)
|
||||
{
|
||||
if (!check_slab(s, page))
|
||||
goto fail;
|
||||
|
@ -818,29 +870,22 @@ static int free_object_checks(struct kmem_cache *s, struct page *page,
|
|||
"to slab %s", object, page->slab->name);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Special debug activities for freeing objects */
|
||||
if (!SlabFrozen(page) && !page->freelist)
|
||||
remove_full(s, page);
|
||||
if (s->flags & SLAB_STORE_USER)
|
||||
set_track(s, object, TRACK_FREE, addr);
|
||||
trace(s, page, object, 0);
|
||||
init_object(s, object, 0);
|
||||
return 1;
|
||||
|
||||
fail:
|
||||
printk(KERN_ERR "@@@ SLUB: %s slab 0x%p object at 0x%p not freed.\n",
|
||||
s->name, page, object);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc)
|
||||
{
|
||||
if (s->flags & SLAB_TRACE) {
|
||||
printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
|
||||
s->name,
|
||||
alloc ? "alloc" : "free",
|
||||
object, page->inuse,
|
||||
page->freelist);
|
||||
|
||||
if (!alloc)
|
||||
print_section("Object", (void *)object, s->objsize);
|
||||
|
||||
dump_stack();
|
||||
}
|
||||
}
|
||||
|
||||
static int __init setup_slub_debug(char *str)
|
||||
{
|
||||
if (!str || *str != '=')
|
||||
|
@ -891,13 +936,13 @@ static void kmem_cache_open_debug_check(struct kmem_cache *s)
|
|||
* On 32 bit platforms the limit is 256k. On 64bit platforms
|
||||
* the limit is 512k.
|
||||
*
|
||||
* Debugging or ctor/dtors may create a need to move the free
|
||||
* Debugging or ctor may create a need to move the free
|
||||
* pointer. Fail if this happens.
|
||||
*/
|
||||
if (s->size >= 65535 * sizeof(void *)) {
|
||||
BUG_ON(s->flags & (SLAB_RED_ZONE | SLAB_POISON |
|
||||
SLAB_STORE_USER | SLAB_DESTROY_BY_RCU));
|
||||
BUG_ON(s->ctor || s->dtor);
|
||||
BUG_ON(s->ctor);
|
||||
}
|
||||
else
|
||||
/*
|
||||
|
@ -909,26 +954,20 @@ static void kmem_cache_open_debug_check(struct kmem_cache *s)
|
|||
s->flags |= slub_debug;
|
||||
}
|
||||
#else
|
||||
static inline void setup_object_debug(struct kmem_cache *s,
|
||||
struct page *page, void *object) {}
|
||||
|
||||
static inline int alloc_object_checks(struct kmem_cache *s,
|
||||
struct page *page, void *object) { return 0; }
|
||||
static inline int alloc_debug_processing(struct kmem_cache *s,
|
||||
struct page *page, void *object, void *addr) { return 0; }
|
||||
|
||||
static inline int free_object_checks(struct kmem_cache *s,
|
||||
struct page *page, void *object) { return 0; }
|
||||
static inline int free_debug_processing(struct kmem_cache *s,
|
||||
struct page *page, void *object, void *addr) { return 0; }
|
||||
|
||||
static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
|
||||
static inline void remove_full(struct kmem_cache *s, struct page *page) {}
|
||||
static inline void trace(struct kmem_cache *s, struct page *page,
|
||||
void *object, int alloc) {}
|
||||
static inline void init_object(struct kmem_cache *s,
|
||||
void *object, int active) {}
|
||||
static inline void init_tracking(struct kmem_cache *s, void *object) {}
|
||||
static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
|
||||
{ return 1; }
|
||||
static inline int check_object(struct kmem_cache *s, struct page *page,
|
||||
void *object, int active) { return 1; }
|
||||
static inline void set_track(struct kmem_cache *s, void *object,
|
||||
enum track_item alloc, void *addr) {}
|
||||
static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
|
||||
static inline void kmem_cache_open_debug_check(struct kmem_cache *s) {}
|
||||
#define slub_debug 0
|
||||
#endif
|
||||
|
@ -965,13 +1004,9 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
|
|||
static void setup_object(struct kmem_cache *s, struct page *page,
|
||||
void *object)
|
||||
{
|
||||
if (SlabDebug(page)) {
|
||||
init_object(s, object, 0);
|
||||
init_tracking(s, object);
|
||||
}
|
||||
|
||||
setup_object_debug(s, page, object);
|
||||
if (unlikely(s->ctor))
|
||||
s->ctor(object, s, SLAB_CTOR_CONSTRUCTOR);
|
||||
s->ctor(object, s, 0);
|
||||
}
|
||||
|
||||
static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
|
||||
|
@ -1030,15 +1065,12 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
|
|||
{
|
||||
int pages = 1 << s->order;
|
||||
|
||||
if (unlikely(SlabDebug(page) || s->dtor)) {
|
||||
if (unlikely(SlabDebug(page))) {
|
||||
void *p;
|
||||
|
||||
slab_pad_check(s, page);
|
||||
for_each_object(p, s, page_address(page)) {
|
||||
if (s->dtor)
|
||||
s->dtor(p, s, 0);
|
||||
for_each_object(p, s, page_address(page))
|
||||
check_object(s, page, p, 0);
|
||||
}
|
||||
}
|
||||
|
||||
mod_zone_page_state(page_zone(page),
|
||||
|
@ -1138,11 +1170,12 @@ static void remove_partial(struct kmem_cache *s,
|
|||
*
|
||||
* Must hold list_lock.
|
||||
*/
|
||||
static int lock_and_del_slab(struct kmem_cache_node *n, struct page *page)
|
||||
static inline int lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page)
|
||||
{
|
||||
if (slab_trylock(page)) {
|
||||
list_del(&page->lru);
|
||||
n->nr_partial--;
|
||||
SetSlabFrozen(page);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
|
@ -1166,7 +1199,7 @@ static struct page *get_partial_node(struct kmem_cache_node *n)
|
|||
|
||||
spin_lock(&n->list_lock);
|
||||
list_for_each_entry(page, &n->partial, lru)
|
||||
if (lock_and_del_slab(n, page))
|
||||
if (lock_and_freeze_slab(n, page))
|
||||
goto out;
|
||||
page = NULL;
|
||||
out:
|
||||
|
@ -1245,10 +1278,11 @@ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
|
|||
*
|
||||
* On exit the slab lock will have been dropped.
|
||||
*/
|
||||
static void putback_slab(struct kmem_cache *s, struct page *page)
|
||||
static void unfreeze_slab(struct kmem_cache *s, struct page *page)
|
||||
{
|
||||
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
|
||||
|
||||
ClearSlabFrozen(page);
|
||||
if (page->inuse) {
|
||||
|
||||
if (page->freelist)
|
||||
|
@ -1299,9 +1333,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, int cpu)
|
|||
page->inuse--;
|
||||
}
|
||||
s->cpu_slab[cpu] = NULL;
|
||||
ClearPageActive(page);
|
||||
|
||||
putback_slab(s, page);
|
||||
unfreeze_slab(s, page);
|
||||
}
|
||||
|
||||
static void flush_slab(struct kmem_cache *s, struct page *page, int cpu)
|
||||
|
@ -1392,9 +1424,7 @@ another_slab:
|
|||
new_slab:
|
||||
page = get_partial(s, gfpflags, node);
|
||||
if (page) {
|
||||
have_slab:
|
||||
s->cpu_slab[cpu] = page;
|
||||
SetPageActive(page);
|
||||
goto load_freelist;
|
||||
}
|
||||
|
||||
|
@ -1424,17 +1454,15 @@ have_slab:
|
|||
flush_slab(s, s->cpu_slab[cpu], cpu);
|
||||
}
|
||||
slab_lock(page);
|
||||
goto have_slab;
|
||||
SetSlabFrozen(page);
|
||||
s->cpu_slab[cpu] = page;
|
||||
goto load_freelist;
|
||||
}
|
||||
return NULL;
|
||||
debug:
|
||||
object = page->freelist;
|
||||
if (!alloc_object_checks(s, page, object))
|
||||
if (!alloc_debug_processing(s, page, object, addr))
|
||||
goto another_slab;
|
||||
if (s->flags & SLAB_STORE_USER)
|
||||
set_track(s, object, TRACK_ALLOC, addr);
|
||||
trace(s, page, object, 1);
|
||||
init_object(s, object, 1);
|
||||
|
||||
page->inuse++;
|
||||
page->freelist = object[page->offset];
|
||||
|
@ -1511,11 +1539,7 @@ checks_ok:
|
|||
page->freelist = object;
|
||||
page->inuse--;
|
||||
|
||||
if (unlikely(PageActive(page)))
|
||||
/*
|
||||
* Cpu slabs are never on partial lists and are
|
||||
* never freed.
|
||||
*/
|
||||
if (unlikely(SlabFrozen(page)))
|
||||
goto out_unlock;
|
||||
|
||||
if (unlikely(!page->inuse))
|
||||
|
@ -1545,14 +1569,8 @@ slab_empty:
|
|||
return;
|
||||
|
||||
debug:
|
||||
if (!free_object_checks(s, page, x))
|
||||
if (!free_debug_processing(s, page, x, addr))
|
||||
goto out_unlock;
|
||||
if (!PageActive(page) && !page->freelist)
|
||||
remove_full(s, page);
|
||||
if (s->flags & SLAB_STORE_USER)
|
||||
set_track(s, x, TRACK_FREE, addr);
|
||||
trace(s, page, object, 0);
|
||||
init_object(s, object, 0);
|
||||
goto checks_ok;
|
||||
}
|
||||
|
||||
|
@ -1789,7 +1807,7 @@ static struct kmem_cache_node * __init early_kmem_cache_node_alloc(gfp_t gfpflag
|
|||
page->freelist = get_freepointer(kmalloc_caches, n);
|
||||
page->inuse++;
|
||||
kmalloc_caches->node[node] = n;
|
||||
init_object(kmalloc_caches, n, 1);
|
||||
setup_object_debug(kmalloc_caches, page, n);
|
||||
init_kmem_cache_node(n);
|
||||
atomic_long_inc(&n->nr_slabs);
|
||||
add_partial(n, page);
|
||||
|
@ -1871,7 +1889,7 @@ static int calculate_sizes(struct kmem_cache *s)
|
|||
* then we should never poison the object itself.
|
||||
*/
|
||||
if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
|
||||
!s->ctor && !s->dtor)
|
||||
!s->ctor)
|
||||
s->flags |= __OBJECT_POISON;
|
||||
else
|
||||
s->flags &= ~__OBJECT_POISON;
|
||||
|
@ -1901,7 +1919,7 @@ static int calculate_sizes(struct kmem_cache *s)
|
|||
|
||||
#ifdef CONFIG_SLUB_DEBUG
|
||||
if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
|
||||
s->ctor || s->dtor)) {
|
||||
s->ctor)) {
|
||||
/*
|
||||
* Relocate free pointer after the object if it is not
|
||||
* permitted to overwrite the first word of the object on
|
||||
|
@ -1970,13 +1988,11 @@ static int calculate_sizes(struct kmem_cache *s)
|
|||
static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
|
||||
const char *name, size_t size,
|
||||
size_t align, unsigned long flags,
|
||||
void (*ctor)(void *, struct kmem_cache *, unsigned long),
|
||||
void (*dtor)(void *, struct kmem_cache *, unsigned long))
|
||||
void (*ctor)(void *, struct kmem_cache *, unsigned long))
|
||||
{
|
||||
memset(s, 0, kmem_size);
|
||||
s->name = name;
|
||||
s->ctor = ctor;
|
||||
s->dtor = dtor;
|
||||
s->objsize = size;
|
||||
s->flags = flags;
|
||||
s->align = align;
|
||||
|
@ -2161,7 +2177,7 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
|
|||
|
||||
down_write(&slub_lock);
|
||||
if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
|
||||
flags, NULL, NULL))
|
||||
flags, NULL))
|
||||
goto panic;
|
||||
|
||||
list_add(&s->list, &slab_caches);
|
||||
|
@ -2463,7 +2479,7 @@ static int slab_unmergeable(struct kmem_cache *s)
|
|||
if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
|
||||
return 1;
|
||||
|
||||
if (s->ctor || s->dtor)
|
||||
if (s->ctor)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
|
@ -2471,15 +2487,14 @@ static int slab_unmergeable(struct kmem_cache *s)
|
|||
|
||||
static struct kmem_cache *find_mergeable(size_t size,
|
||||
size_t align, unsigned long flags,
|
||||
void (*ctor)(void *, struct kmem_cache *, unsigned long),
|
||||
void (*dtor)(void *, struct kmem_cache *, unsigned long))
|
||||
void (*ctor)(void *, struct kmem_cache *, unsigned long))
|
||||
{
|
||||
struct list_head *h;
|
||||
|
||||
if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
|
||||
return NULL;
|
||||
|
||||
if (ctor || dtor)
|
||||
if (ctor)
|
||||
return NULL;
|
||||
|
||||
size = ALIGN(size, sizeof(void *));
|
||||
|
@ -2521,8 +2536,9 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
|
|||
{
|
||||
struct kmem_cache *s;
|
||||
|
||||
BUG_ON(dtor);
|
||||
down_write(&slub_lock);
|
||||
s = find_mergeable(size, align, flags, ctor, dtor);
|
||||
s = find_mergeable(size, align, flags, ctor);
|
||||
if (s) {
|
||||
s->refcount++;
|
||||
/*
|
||||
|
@ -2536,7 +2552,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
|
|||
} else {
|
||||
s = kmalloc(kmem_size, GFP_KERNEL);
|
||||
if (s && kmem_cache_open(s, GFP_KERNEL, name,
|
||||
size, align, flags, ctor, dtor)) {
|
||||
size, align, flags, ctor)) {
|
||||
if (sysfs_slab_add(s)) {
|
||||
kfree(s);
|
||||
goto err;
|
||||
|
@ -3177,17 +3193,6 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf)
|
|||
}
|
||||
SLAB_ATTR_RO(ctor);
|
||||
|
||||
static ssize_t dtor_show(struct kmem_cache *s, char *buf)
|
||||
{
|
||||
if (s->dtor) {
|
||||
int n = sprint_symbol(buf, (unsigned long)s->dtor);
|
||||
|
||||
return n + sprintf(buf + n, "\n");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
SLAB_ATTR_RO(dtor);
|
||||
|
||||
static ssize_t aliases_show(struct kmem_cache *s, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", s->refcount - 1);
|
||||
|
@ -3419,7 +3424,6 @@ static struct attribute * slab_attrs[] = {
|
|||
&partial_attr.attr,
|
||||
&cpu_slabs_attr.attr,
|
||||
&ctor_attr.attr,
|
||||
&dtor_attr.attr,
|
||||
&aliases_attr.attr,
|
||||
&align_attr.attr,
|
||||
&sanity_checks_attr.attr,
|
||||
|
|
|
@ -311,7 +311,7 @@ struct vm_struct *remove_vm_area(void *addr)
|
|||
return v;
|
||||
}
|
||||
|
||||
void __vunmap(void *addr, int deallocate_pages)
|
||||
static void __vunmap(void *addr, int deallocate_pages)
|
||||
{
|
||||
struct vm_struct *area;
|
||||
|
||||
|
|
|
@ -87,7 +87,7 @@ extern int ipxrtr_add_route(__be32 network, struct ipx_interface *intrfc,
|
|||
unsigned char *node);
|
||||
extern void ipxrtr_del_routes(struct ipx_interface *intrfc);
|
||||
extern int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx,
|
||||
struct iovec *iov, int len, int noblock);
|
||||
struct iovec *iov, size_t len, int noblock);
|
||||
extern int ipxrtr_route_skb(struct sk_buff *skb);
|
||||
extern struct ipx_route *ipxrtr_lookup(__be32 net);
|
||||
extern int ipxrtr_ioctl(unsigned int cmd, void __user *arg);
|
||||
|
|
|
@ -261,8 +261,7 @@ static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
|
|||
{
|
||||
struct socket_alloc *ei = (struct socket_alloc *)foo;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
||||
static int init_inodecache(void)
|
||||
|
|
|
@ -828,19 +828,17 @@ init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
|
|||
{
|
||||
struct rpc_inode *rpci = (struct rpc_inode *) foo;
|
||||
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
inode_init_once(&rpci->vfs_inode);
|
||||
rpci->private = NULL;
|
||||
rpci->nreaders = 0;
|
||||
rpci->nwriters = 0;
|
||||
INIT_LIST_HEAD(&rpci->in_upcall);
|
||||
INIT_LIST_HEAD(&rpci->pipe);
|
||||
rpci->pipelen = 0;
|
||||
init_waitqueue_head(&rpci->waitq);
|
||||
INIT_DELAYED_WORK(&rpci->queue_timeout,
|
||||
rpc_timeout_upcall_queue);
|
||||
rpci->ops = NULL;
|
||||
}
|
||||
inode_init_once(&rpci->vfs_inode);
|
||||
rpci->private = NULL;
|
||||
rpci->nreaders = 0;
|
||||
rpci->nwriters = 0;
|
||||
INIT_LIST_HEAD(&rpci->in_upcall);
|
||||
INIT_LIST_HEAD(&rpci->pipe);
|
||||
rpci->pipelen = 0;
|
||||
init_waitqueue_head(&rpci->waitq);
|
||||
INIT_DELAYED_WORK(&rpci->queue_timeout,
|
||||
rpc_timeout_upcall_queue);
|
||||
rpci->ops = NULL;
|
||||
}
|
||||
|
||||
int register_rpc_pipefs(void)
|
||||
|
|
Loading…
Reference in New Issue