[PATCH] slab: remove SLAB_KERNEL
SLAB_KERNEL is an alias of GFP_KERNEL. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
54e6ecb239
commit
e94b176609
|
@ -132,7 +132,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack)
|
||||||
goto up_fail;
|
goto up_fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL);
|
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
|
||||||
if (!vma) {
|
if (!vma) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto up_fail;
|
goto up_fail;
|
||||||
|
|
|
@ -91,7 +91,7 @@ ia64_elf32_init (struct pt_regs *regs)
|
||||||
* it with privilege level 3 because the IVE uses non-privileged accesses to these
|
* it with privilege level 3 because the IVE uses non-privileged accesses to these
|
||||||
* tables. IA-32 segmentation is used to protect against IA-32 accesses to them.
|
* tables. IA-32 segmentation is used to protect against IA-32 accesses to them.
|
||||||
*/
|
*/
|
||||||
vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
|
vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
|
||||||
if (vma) {
|
if (vma) {
|
||||||
memset(vma, 0, sizeof(*vma));
|
memset(vma, 0, sizeof(*vma));
|
||||||
vma->vm_mm = current->mm;
|
vma->vm_mm = current->mm;
|
||||||
|
@ -117,7 +117,7 @@ ia64_elf32_init (struct pt_regs *regs)
|
||||||
* code is locked in specific gate page, which is pointed by pretcode
|
* code is locked in specific gate page, which is pointed by pretcode
|
||||||
* when setup_frame_ia32
|
* when setup_frame_ia32
|
||||||
*/
|
*/
|
||||||
vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
|
vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
|
||||||
if (vma) {
|
if (vma) {
|
||||||
memset(vma, 0, sizeof(*vma));
|
memset(vma, 0, sizeof(*vma));
|
||||||
vma->vm_mm = current->mm;
|
vma->vm_mm = current->mm;
|
||||||
|
@ -142,7 +142,7 @@ ia64_elf32_init (struct pt_regs *regs)
|
||||||
* Install LDT as anonymous memory. This gives us all-zero segment descriptors
|
* Install LDT as anonymous memory. This gives us all-zero segment descriptors
|
||||||
* until a task modifies them via modify_ldt().
|
* until a task modifies them via modify_ldt().
|
||||||
*/
|
*/
|
||||||
vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
|
vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
|
||||||
if (vma) {
|
if (vma) {
|
||||||
memset(vma, 0, sizeof(*vma));
|
memset(vma, 0, sizeof(*vma));
|
||||||
vma->vm_mm = current->mm;
|
vma->vm_mm = current->mm;
|
||||||
|
@ -214,7 +214,7 @@ ia32_setup_arg_pages (struct linux_binprm *bprm, int executable_stack)
|
||||||
bprm->loader += stack_base;
|
bprm->loader += stack_base;
|
||||||
bprm->exec += stack_base;
|
bprm->exec += stack_base;
|
||||||
|
|
||||||
mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
|
mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
|
||||||
if (!mpnt)
|
if (!mpnt)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
|
|
@ -2302,7 +2302,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned lon
|
||||||
DPRINT(("smpl_buf @%p\n", smpl_buf));
|
DPRINT(("smpl_buf @%p\n", smpl_buf));
|
||||||
|
|
||||||
/* allocate vma */
|
/* allocate vma */
|
||||||
vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
|
vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
|
||||||
if (!vma) {
|
if (!vma) {
|
||||||
DPRINT(("Cannot allocate vma\n"));
|
DPRINT(("Cannot allocate vma\n"));
|
||||||
goto error_kmem;
|
goto error_kmem;
|
||||||
|
|
|
@ -156,7 +156,7 @@ ia64_init_addr_space (void)
|
||||||
* the problem. When the process attempts to write to the register backing store
|
* the problem. When the process attempts to write to the register backing store
|
||||||
* for the first time, it will get a SEGFAULT in this case.
|
* for the first time, it will get a SEGFAULT in this case.
|
||||||
*/
|
*/
|
||||||
vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
|
vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
|
||||||
if (vma) {
|
if (vma) {
|
||||||
memset(vma, 0, sizeof(*vma));
|
memset(vma, 0, sizeof(*vma));
|
||||||
vma->vm_mm = current->mm;
|
vma->vm_mm = current->mm;
|
||||||
|
@ -175,7 +175,7 @@ ia64_init_addr_space (void)
|
||||||
|
|
||||||
/* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
|
/* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
|
||||||
if (!(current->personality & MMAP_PAGE_ZERO)) {
|
if (!(current->personality & MMAP_PAGE_ZERO)) {
|
||||||
vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
|
vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
|
||||||
if (vma) {
|
if (vma) {
|
||||||
memset(vma, 0, sizeof(*vma));
|
memset(vma, 0, sizeof(*vma));
|
||||||
vma->vm_mm = current->mm;
|
vma->vm_mm = current->mm;
|
||||||
|
|
|
@ -264,7 +264,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
|
||||||
|
|
||||||
|
|
||||||
/* Allocate a VMA structure and fill it up */
|
/* Allocate a VMA structure and fill it up */
|
||||||
vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL);
|
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
|
||||||
if (vma == NULL) {
|
if (vma == NULL) {
|
||||||
rc = -ENOMEM;
|
rc = -ENOMEM;
|
||||||
goto fail_mmapsem;
|
goto fail_mmapsem;
|
||||||
|
|
|
@ -48,7 +48,7 @@ spufs_alloc_inode(struct super_block *sb)
|
||||||
{
|
{
|
||||||
struct spufs_inode_info *ei;
|
struct spufs_inode_info *ei;
|
||||||
|
|
||||||
ei = kmem_cache_alloc(spufs_inode_cache, SLAB_KERNEL);
|
ei = kmem_cache_alloc(spufs_inode_cache, GFP_KERNEL);
|
||||||
if (!ei)
|
if (!ei)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
|
|
@ -97,7 +97,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
|
||||||
goto up_fail;
|
goto up_fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL);
|
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
|
||||||
if (!vma) {
|
if (!vma) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto up_fail;
|
goto up_fail;
|
||||||
|
|
|
@ -351,7 +351,7 @@ int ia32_setup_arg_pages(struct linux_binprm *bprm, unsigned long stack_top,
|
||||||
bprm->loader += stack_base;
|
bprm->loader += stack_base;
|
||||||
bprm->exec += stack_base;
|
bprm->exec += stack_base;
|
||||||
|
|
||||||
mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
|
mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
|
||||||
if (!mpnt)
|
if (!mpnt)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
|
|
@ -49,7 +49,7 @@ int syscall32_setup_pages(struct linux_binprm *bprm, int exstack)
|
||||||
struct mm_struct *mm = current->mm;
|
struct mm_struct *mm = current->mm;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
|
vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
|
||||||
if (!vma)
|
if (!vma)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
|
|
@ -820,7 +820,7 @@ he_init_group(struct he_dev *he_dev, int group)
|
||||||
void *cpuaddr;
|
void *cpuaddr;
|
||||||
|
|
||||||
#ifdef USE_RBPS_POOL
|
#ifdef USE_RBPS_POOL
|
||||||
cpuaddr = pci_pool_alloc(he_dev->rbps_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle);
|
cpuaddr = pci_pool_alloc(he_dev->rbps_pool, GFP_KERNEL|SLAB_DMA, &dma_handle);
|
||||||
if (cpuaddr == NULL)
|
if (cpuaddr == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
#else
|
#else
|
||||||
|
@ -884,7 +884,7 @@ he_init_group(struct he_dev *he_dev, int group)
|
||||||
void *cpuaddr;
|
void *cpuaddr;
|
||||||
|
|
||||||
#ifdef USE_RBPL_POOL
|
#ifdef USE_RBPL_POOL
|
||||||
cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle);
|
cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|SLAB_DMA, &dma_handle);
|
||||||
if (cpuaddr == NULL)
|
if (cpuaddr == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
#else
|
#else
|
||||||
|
|
|
@ -126,7 +126,7 @@ dma_pool_create (const char *name, struct device *dev,
|
||||||
} else if (allocation < size)
|
} else if (allocation < size)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
if (!(retval = kmalloc (sizeof *retval, SLAB_KERNEL)))
|
if (!(retval = kmalloc (sizeof *retval, GFP_KERNEL)))
|
||||||
return retval;
|
return retval;
|
||||||
|
|
||||||
strlcpy (retval->name, name, sizeof retval->name);
|
strlcpy (retval->name, name, sizeof retval->name);
|
||||||
|
|
|
@ -636,10 +636,10 @@ static int ioat_self_test(struct ioat_device *device)
|
||||||
dma_cookie_t cookie;
|
dma_cookie_t cookie;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, SLAB_KERNEL);
|
src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
|
||||||
if (!src)
|
if (!src)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, SLAB_KERNEL);
|
dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
|
||||||
if (!dest) {
|
if (!dest) {
|
||||||
kfree(src);
|
kfree(src);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
|
@ -123,7 +123,7 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
|
||||||
int i;
|
int i;
|
||||||
int hostnum = 0;
|
int hostnum = 0;
|
||||||
|
|
||||||
h = kzalloc(sizeof(*h) + extra, SLAB_KERNEL);
|
h = kzalloc(sizeof(*h) + extra, GFP_KERNEL);
|
||||||
if (!h)
|
if (!h)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
|
|
@ -1225,7 +1225,7 @@ static int ohci_iso_recv_init(struct hpsb_iso *iso)
|
||||||
int ctx;
|
int ctx;
|
||||||
int ret = -ENOMEM;
|
int ret = -ENOMEM;
|
||||||
|
|
||||||
recv = kmalloc(sizeof(*recv), SLAB_KERNEL);
|
recv = kmalloc(sizeof(*recv), GFP_KERNEL);
|
||||||
if (!recv)
|
if (!recv)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -1918,7 +1918,7 @@ static int ohci_iso_xmit_init(struct hpsb_iso *iso)
|
||||||
int ctx;
|
int ctx;
|
||||||
int ret = -ENOMEM;
|
int ret = -ENOMEM;
|
||||||
|
|
||||||
xmit = kmalloc(sizeof(*xmit), SLAB_KERNEL);
|
xmit = kmalloc(sizeof(*xmit), GFP_KERNEL);
|
||||||
if (!xmit)
|
if (!xmit)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -3021,7 +3021,7 @@ alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
|
d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i);
|
||||||
OHCI_DMA_ALLOC("pool dma_rcv prg[%d]", i);
|
OHCI_DMA_ALLOC("pool dma_rcv prg[%d]", i);
|
||||||
|
|
||||||
if (d->prg_cpu[i] != NULL) {
|
if (d->prg_cpu[i] != NULL) {
|
||||||
|
@ -3117,7 +3117,7 @@ alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
|
||||||
OHCI_DMA_ALLOC("dma_rcv prg pool");
|
OHCI_DMA_ALLOC("dma_rcv prg pool");
|
||||||
|
|
||||||
for (i = 0; i < d->num_desc; i++) {
|
for (i = 0; i < d->num_desc; i++) {
|
||||||
d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
|
d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i);
|
||||||
OHCI_DMA_ALLOC("pool dma_trm prg[%d]", i);
|
OHCI_DMA_ALLOC("pool dma_trm prg[%d]", i);
|
||||||
|
|
||||||
if (d->prg_cpu[i] != NULL) {
|
if (d->prg_cpu[i] != NULL) {
|
||||||
|
|
|
@ -1428,7 +1428,7 @@ static int __devinit add_card(struct pci_dev *dev,
|
||||||
struct i2c_algo_bit_data i2c_adapter_data;
|
struct i2c_algo_bit_data i2c_adapter_data;
|
||||||
|
|
||||||
error = -ENOMEM;
|
error = -ENOMEM;
|
||||||
i2c_ad = kmalloc(sizeof(*i2c_ad), SLAB_KERNEL);
|
i2c_ad = kmalloc(sizeof(*i2c_ad), GFP_KERNEL);
|
||||||
if (!i2c_ad) FAIL("failed to allocate I2C adapter memory");
|
if (!i2c_ad) FAIL("failed to allocate I2C adapter memory");
|
||||||
|
|
||||||
memcpy(i2c_ad, &bit_ops, sizeof(struct i2c_adapter));
|
memcpy(i2c_ad, &bit_ops, sizeof(struct i2c_adapter));
|
||||||
|
|
|
@ -112,7 +112,7 @@ static struct pending_request *__alloc_pending_request(gfp_t flags)
|
||||||
|
|
||||||
static inline struct pending_request *alloc_pending_request(void)
|
static inline struct pending_request *alloc_pending_request(void)
|
||||||
{
|
{
|
||||||
return __alloc_pending_request(SLAB_KERNEL);
|
return __alloc_pending_request(GFP_KERNEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void free_pending_request(struct pending_request *req)
|
static void free_pending_request(struct pending_request *req)
|
||||||
|
@ -1737,7 +1737,7 @@ static int arm_register(struct file_info *fi, struct pending_request *req)
|
||||||
return (-EINVAL);
|
return (-EINVAL);
|
||||||
}
|
}
|
||||||
/* addr-list-entry for fileinfo */
|
/* addr-list-entry for fileinfo */
|
||||||
addr = kmalloc(sizeof(*addr), SLAB_KERNEL);
|
addr = kmalloc(sizeof(*addr), GFP_KERNEL);
|
||||||
if (!addr) {
|
if (!addr) {
|
||||||
req->req.length = 0;
|
req->req.length = 0;
|
||||||
return (-ENOMEM);
|
return (-ENOMEM);
|
||||||
|
@ -2103,7 +2103,7 @@ static int write_phypacket(struct file_info *fi, struct pending_request *req)
|
||||||
static int get_config_rom(struct file_info *fi, struct pending_request *req)
|
static int get_config_rom(struct file_info *fi, struct pending_request *req)
|
||||||
{
|
{
|
||||||
int ret = sizeof(struct raw1394_request);
|
int ret = sizeof(struct raw1394_request);
|
||||||
quadlet_t *data = kmalloc(req->req.length, SLAB_KERNEL);
|
quadlet_t *data = kmalloc(req->req.length, GFP_KERNEL);
|
||||||
int status;
|
int status;
|
||||||
|
|
||||||
if (!data)
|
if (!data)
|
||||||
|
@ -2133,7 +2133,7 @@ static int get_config_rom(struct file_info *fi, struct pending_request *req)
|
||||||
static int update_config_rom(struct file_info *fi, struct pending_request *req)
|
static int update_config_rom(struct file_info *fi, struct pending_request *req)
|
||||||
{
|
{
|
||||||
int ret = sizeof(struct raw1394_request);
|
int ret = sizeof(struct raw1394_request);
|
||||||
quadlet_t *data = kmalloc(req->req.length, SLAB_KERNEL);
|
quadlet_t *data = kmalloc(req->req.length, GFP_KERNEL);
|
||||||
if (!data)
|
if (!data)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
if (copy_from_user(data, int2ptr(req->req.sendb), req->req.length)) {
|
if (copy_from_user(data, int2ptr(req->req.sendb), req->req.length)) {
|
||||||
|
@ -2779,7 +2779,7 @@ static int raw1394_open(struct inode *inode, struct file *file)
|
||||||
{
|
{
|
||||||
struct file_info *fi;
|
struct file_info *fi;
|
||||||
|
|
||||||
fi = kzalloc(sizeof(*fi), SLAB_KERNEL);
|
fi = kzalloc(sizeof(*fi), GFP_KERNEL);
|
||||||
if (!fi)
|
if (!fi)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
|
|
@ -57,7 +57,7 @@ struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
|
||||||
struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
|
struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
|
||||||
ib_device);
|
ib_device);
|
||||||
|
|
||||||
av = kmem_cache_alloc(av_cache, SLAB_KERNEL);
|
av = kmem_cache_alloc(av_cache, GFP_KERNEL);
|
||||||
if (!av) {
|
if (!av) {
|
||||||
ehca_err(pd->device, "Out of memory pd=%p ah_attr=%p",
|
ehca_err(pd->device, "Out of memory pd=%p ah_attr=%p",
|
||||||
pd, ah_attr);
|
pd, ah_attr);
|
||||||
|
|
|
@ -134,7 +134,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe,
|
||||||
if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
|
if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
|
|
||||||
my_cq = kmem_cache_alloc(cq_cache, SLAB_KERNEL);
|
my_cq = kmem_cache_alloc(cq_cache, GFP_KERNEL);
|
||||||
if (!my_cq) {
|
if (!my_cq) {
|
||||||
ehca_err(device, "Out of memory for ehca_cq struct device=%p",
|
ehca_err(device, "Out of memory for ehca_cq struct device=%p",
|
||||||
device);
|
device);
|
||||||
|
|
|
@ -108,7 +108,7 @@ static struct kmem_cache *ctblk_cache = NULL;
|
||||||
|
|
||||||
void *ehca_alloc_fw_ctrlblock(void)
|
void *ehca_alloc_fw_ctrlblock(void)
|
||||||
{
|
{
|
||||||
void *ret = kmem_cache_zalloc(ctblk_cache, SLAB_KERNEL);
|
void *ret = kmem_cache_zalloc(ctblk_cache, GFP_KERNEL);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
ehca_gen_err("Out of memory for ctblk");
|
ehca_gen_err("Out of memory for ctblk");
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -53,7 +53,7 @@ static struct ehca_mr *ehca_mr_new(void)
|
||||||
{
|
{
|
||||||
struct ehca_mr *me;
|
struct ehca_mr *me;
|
||||||
|
|
||||||
me = kmem_cache_alloc(mr_cache, SLAB_KERNEL);
|
me = kmem_cache_alloc(mr_cache, GFP_KERNEL);
|
||||||
if (me) {
|
if (me) {
|
||||||
memset(me, 0, sizeof(struct ehca_mr));
|
memset(me, 0, sizeof(struct ehca_mr));
|
||||||
spin_lock_init(&me->mrlock);
|
spin_lock_init(&me->mrlock);
|
||||||
|
@ -72,7 +72,7 @@ static struct ehca_mw *ehca_mw_new(void)
|
||||||
{
|
{
|
||||||
struct ehca_mw *me;
|
struct ehca_mw *me;
|
||||||
|
|
||||||
me = kmem_cache_alloc(mw_cache, SLAB_KERNEL);
|
me = kmem_cache_alloc(mw_cache, GFP_KERNEL);
|
||||||
if (me) {
|
if (me) {
|
||||||
memset(me, 0, sizeof(struct ehca_mw));
|
memset(me, 0, sizeof(struct ehca_mw));
|
||||||
spin_lock_init(&me->mwlock);
|
spin_lock_init(&me->mwlock);
|
||||||
|
|
|
@ -50,7 +50,7 @@ struct ib_pd *ehca_alloc_pd(struct ib_device *device,
|
||||||
{
|
{
|
||||||
struct ehca_pd *pd;
|
struct ehca_pd *pd;
|
||||||
|
|
||||||
pd = kmem_cache_alloc(pd_cache, SLAB_KERNEL);
|
pd = kmem_cache_alloc(pd_cache, GFP_KERNEL);
|
||||||
if (!pd) {
|
if (!pd) {
|
||||||
ehca_err(device, "device=%p context=%p out of memory",
|
ehca_err(device, "device=%p context=%p out of memory",
|
||||||
device, context);
|
device, context);
|
||||||
|
|
|
@ -450,7 +450,7 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
|
||||||
if (pd->uobject && udata)
|
if (pd->uobject && udata)
|
||||||
context = pd->uobject->context;
|
context = pd->uobject->context;
|
||||||
|
|
||||||
my_qp = kmem_cache_alloc(qp_cache, SLAB_KERNEL);
|
my_qp = kmem_cache_alloc(qp_cache, GFP_KERNEL);
|
||||||
if (!my_qp) {
|
if (!my_qp) {
|
||||||
ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd);
|
ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd);
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
|
@ -189,7 +189,7 @@ static int ads7846_read12_ser(struct device *dev, unsigned command)
|
||||||
{
|
{
|
||||||
struct spi_device *spi = to_spi_device(dev);
|
struct spi_device *spi = to_spi_device(dev);
|
||||||
struct ads7846 *ts = dev_get_drvdata(dev);
|
struct ads7846 *ts = dev_get_drvdata(dev);
|
||||||
struct ser_req *req = kzalloc(sizeof *req, SLAB_KERNEL);
|
struct ser_req *req = kzalloc(sizeof *req, GFP_KERNEL);
|
||||||
int status;
|
int status;
|
||||||
int sample;
|
int sample;
|
||||||
int i;
|
int i;
|
||||||
|
|
|
@ -2218,21 +2218,21 @@ static int gigaset_probe(struct usb_interface *interface,
|
||||||
* - three for the different uses of the default control pipe
|
* - three for the different uses of the default control pipe
|
||||||
* - three for each isochronous pipe
|
* - three for each isochronous pipe
|
||||||
*/
|
*/
|
||||||
if (!(ucs->urb_int_in = usb_alloc_urb(0, SLAB_KERNEL)) ||
|
if (!(ucs->urb_int_in = usb_alloc_urb(0, GFP_KERNEL)) ||
|
||||||
!(ucs->urb_cmd_in = usb_alloc_urb(0, SLAB_KERNEL)) ||
|
!(ucs->urb_cmd_in = usb_alloc_urb(0, GFP_KERNEL)) ||
|
||||||
!(ucs->urb_cmd_out = usb_alloc_urb(0, SLAB_KERNEL)) ||
|
!(ucs->urb_cmd_out = usb_alloc_urb(0, GFP_KERNEL)) ||
|
||||||
!(ucs->urb_ctrl = usb_alloc_urb(0, SLAB_KERNEL)))
|
!(ucs->urb_ctrl = usb_alloc_urb(0, GFP_KERNEL)))
|
||||||
goto allocerr;
|
goto allocerr;
|
||||||
|
|
||||||
for (j = 0; j < 2; ++j) {
|
for (j = 0; j < 2; ++j) {
|
||||||
ubc = cs->bcs[j].hw.bas;
|
ubc = cs->bcs[j].hw.bas;
|
||||||
for (i = 0; i < BAS_OUTURBS; ++i)
|
for (i = 0; i < BAS_OUTURBS; ++i)
|
||||||
if (!(ubc->isoouturbs[i].urb =
|
if (!(ubc->isoouturbs[i].urb =
|
||||||
usb_alloc_urb(BAS_NUMFRAMES, SLAB_KERNEL)))
|
usb_alloc_urb(BAS_NUMFRAMES, GFP_KERNEL)))
|
||||||
goto allocerr;
|
goto allocerr;
|
||||||
for (i = 0; i < BAS_INURBS; ++i)
|
for (i = 0; i < BAS_INURBS; ++i)
|
||||||
if (!(ubc->isoinurbs[i] =
|
if (!(ubc->isoinurbs[i] =
|
||||||
usb_alloc_urb(BAS_NUMFRAMES, SLAB_KERNEL)))
|
usb_alloc_urb(BAS_NUMFRAMES, GFP_KERNEL)))
|
||||||
goto allocerr;
|
goto allocerr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2246,7 +2246,7 @@ static int gigaset_probe(struct usb_interface *interface,
|
||||||
(endpoint->bEndpointAddress) & 0x0f),
|
(endpoint->bEndpointAddress) & 0x0f),
|
||||||
ucs->int_in_buf, 3, read_int_callback, cs,
|
ucs->int_in_buf, 3, read_int_callback, cs,
|
||||||
endpoint->bInterval);
|
endpoint->bInterval);
|
||||||
if ((rc = usb_submit_urb(ucs->urb_int_in, SLAB_KERNEL)) != 0) {
|
if ((rc = usb_submit_urb(ucs->urb_int_in, GFP_KERNEL)) != 0) {
|
||||||
dev_err(cs->dev, "could not submit interrupt URB: %s\n",
|
dev_err(cs->dev, "could not submit interrupt URB: %s\n",
|
||||||
get_usb_rcmsg(rc));
|
get_usb_rcmsg(rc));
|
||||||
goto error;
|
goto error;
|
||||||
|
|
|
@ -763,7 +763,7 @@ static int gigaset_probe(struct usb_interface *interface,
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
|
||||||
ucs->bulk_out_urb = usb_alloc_urb(0, SLAB_KERNEL);
|
ucs->bulk_out_urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||||
if (!ucs->bulk_out_urb) {
|
if (!ucs->bulk_out_urb) {
|
||||||
dev_err(cs->dev, "Couldn't allocate bulk_out_urb\n");
|
dev_err(cs->dev, "Couldn't allocate bulk_out_urb\n");
|
||||||
retval = -ENOMEM;
|
retval = -ENOMEM;
|
||||||
|
@ -774,7 +774,7 @@ static int gigaset_probe(struct usb_interface *interface,
|
||||||
|
|
||||||
atomic_set(&ucs->busy, 0);
|
atomic_set(&ucs->busy, 0);
|
||||||
|
|
||||||
ucs->read_urb = usb_alloc_urb(0, SLAB_KERNEL);
|
ucs->read_urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||||
if (!ucs->read_urb) {
|
if (!ucs->read_urb) {
|
||||||
dev_err(cs->dev, "No free urbs available\n");
|
dev_err(cs->dev, "No free urbs available\n");
|
||||||
retval = -ENOMEM;
|
retval = -ENOMEM;
|
||||||
|
@ -797,7 +797,7 @@ static int gigaset_probe(struct usb_interface *interface,
|
||||||
gigaset_read_int_callback,
|
gigaset_read_int_callback,
|
||||||
cs->inbuf + 0, endpoint->bInterval);
|
cs->inbuf + 0, endpoint->bInterval);
|
||||||
|
|
||||||
retval = usb_submit_urb(ucs->read_urb, SLAB_KERNEL);
|
retval = usb_submit_urb(ucs->read_urb, GFP_KERNEL);
|
||||||
if (retval) {
|
if (retval) {
|
||||||
dev_err(cs->dev, "Could not submit URB (error %d)\n", -retval);
|
dev_err(cs->dev, "Could not submit URB (error %d)\n", -retval);
|
||||||
goto error;
|
goto error;
|
||||||
|
|
|
@ -287,7 +287,7 @@ static int cinergyt2_alloc_stream_urbs (struct cinergyt2 *cinergyt2)
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
cinergyt2->streambuf = usb_buffer_alloc(cinergyt2->udev, STREAM_URB_COUNT*STREAM_BUF_SIZE,
|
cinergyt2->streambuf = usb_buffer_alloc(cinergyt2->udev, STREAM_URB_COUNT*STREAM_BUF_SIZE,
|
||||||
SLAB_KERNEL, &cinergyt2->streambuf_dmahandle);
|
GFP_KERNEL, &cinergyt2->streambuf_dmahandle);
|
||||||
if (!cinergyt2->streambuf) {
|
if (!cinergyt2->streambuf) {
|
||||||
dprintk(1, "failed to alloc consistent stream memory area, bailing out!\n");
|
dprintk(1, "failed to alloc consistent stream memory area, bailing out!\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
|
@ -451,7 +451,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
flash = kzalloc(sizeof *flash, SLAB_KERNEL);
|
flash = kzalloc(sizeof *flash, GFP_KERNEL);
|
||||||
if (!flash)
|
if (!flash)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
|
|
@ -6940,7 +6940,7 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
|
for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
|
||||||
ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, SLAB_KERNEL, &dma_addr);
|
ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
|
||||||
|
|
||||||
if (!ipr_cmd) {
|
if (!ipr_cmd) {
|
||||||
ipr_free_cmd_blks(ioa_cfg);
|
ipr_free_cmd_blks(ioa_cfg);
|
||||||
|
|
|
@ -360,7 +360,7 @@ spi_alloc_master(struct device *dev, unsigned size)
|
||||||
if (!dev)
|
if (!dev)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
master = kzalloc(size + sizeof *master, SLAB_KERNEL);
|
master = kzalloc(size + sizeof *master, GFP_KERNEL);
|
||||||
if (!master)
|
if (!master)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
@ -607,7 +607,7 @@ static int __init spi_init(void)
|
||||||
{
|
{
|
||||||
int status;
|
int status;
|
||||||
|
|
||||||
buf = kmalloc(SPI_BUFSIZ, SLAB_KERNEL);
|
buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
|
||||||
if (!buf) {
|
if (!buf) {
|
||||||
status = -ENOMEM;
|
status = -ENOMEM;
|
||||||
goto err0;
|
goto err0;
|
||||||
|
|
|
@ -196,7 +196,7 @@ int spi_bitbang_setup(struct spi_device *spi)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (!cs) {
|
if (!cs) {
|
||||||
cs = kzalloc(sizeof *cs, SLAB_KERNEL);
|
cs = kzalloc(sizeof *cs, GFP_KERNEL);
|
||||||
if (!cs)
|
if (!cs)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
spi->controller_state = cs;
|
spi->controller_state = cs;
|
||||||
|
|
|
@ -2371,7 +2371,7 @@ check_highspeed (struct usb_hub *hub, struct usb_device *udev, int port1)
|
||||||
struct usb_qualifier_descriptor *qual;
|
struct usb_qualifier_descriptor *qual;
|
||||||
int status;
|
int status;
|
||||||
|
|
||||||
qual = kmalloc (sizeof *qual, SLAB_KERNEL);
|
qual = kmalloc (sizeof *qual, GFP_KERNEL);
|
||||||
if (qual == NULL)
|
if (qual == NULL)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -2922,7 +2922,7 @@ static int config_descriptors_changed(struct usb_device *udev)
|
||||||
if (len < le16_to_cpu(udev->config[index].desc.wTotalLength))
|
if (len < le16_to_cpu(udev->config[index].desc.wTotalLength))
|
||||||
len = le16_to_cpu(udev->config[index].desc.wTotalLength);
|
len = le16_to_cpu(udev->config[index].desc.wTotalLength);
|
||||||
}
|
}
|
||||||
buf = kmalloc (len, SLAB_KERNEL);
|
buf = kmalloc (len, GFP_KERNEL);
|
||||||
if (buf == NULL) {
|
if (buf == NULL) {
|
||||||
dev_err(&udev->dev, "no mem to re-read configs after reset\n");
|
dev_err(&udev->dev, "no mem to re-read configs after reset\n");
|
||||||
/* assume the worst */
|
/* assume the worst */
|
||||||
|
|
|
@ -1236,7 +1236,7 @@ autoconf_fail:
|
||||||
|
|
||||||
|
|
||||||
/* ok, we made sense of the hardware ... */
|
/* ok, we made sense of the hardware ... */
|
||||||
dev = kzalloc(sizeof(*dev), SLAB_KERNEL);
|
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
|
||||||
if (!dev) {
|
if (!dev) {
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1864,7 +1864,7 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* alloc, and start init */
|
/* alloc, and start init */
|
||||||
dev = kmalloc (sizeof *dev, SLAB_KERNEL);
|
dev = kmalloc (sizeof *dev, GFP_KERNEL);
|
||||||
if (dev == NULL){
|
if (dev == NULL){
|
||||||
pr_debug("enomem %s\n", pci_name(pdev));
|
pr_debug("enomem %s\n", pci_name(pdev));
|
||||||
retval = -ENOMEM;
|
retval = -ENOMEM;
|
||||||
|
|
|
@ -412,7 +412,7 @@ ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
|
||||||
/* FIXME readahead for O_NONBLOCK and poll(); careful with ZLPs */
|
/* FIXME readahead for O_NONBLOCK and poll(); careful with ZLPs */
|
||||||
|
|
||||||
value = -ENOMEM;
|
value = -ENOMEM;
|
||||||
kbuf = kmalloc (len, SLAB_KERNEL);
|
kbuf = kmalloc (len, GFP_KERNEL);
|
||||||
if (unlikely (!kbuf))
|
if (unlikely (!kbuf))
|
||||||
goto free1;
|
goto free1;
|
||||||
|
|
||||||
|
@ -456,7 +456,7 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
|
||||||
/* FIXME writebehind for O_NONBLOCK and poll(), qlen = 1 */
|
/* FIXME writebehind for O_NONBLOCK and poll(), qlen = 1 */
|
||||||
|
|
||||||
value = -ENOMEM;
|
value = -ENOMEM;
|
||||||
kbuf = kmalloc (len, SLAB_KERNEL);
|
kbuf = kmalloc (len, GFP_KERNEL);
|
||||||
if (!kbuf)
|
if (!kbuf)
|
||||||
goto free1;
|
goto free1;
|
||||||
if (copy_from_user (kbuf, buf, len)) {
|
if (copy_from_user (kbuf, buf, len)) {
|
||||||
|
@ -1898,7 +1898,7 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
|
||||||
buf += 4;
|
buf += 4;
|
||||||
length -= 4;
|
length -= 4;
|
||||||
|
|
||||||
kbuf = kmalloc (length, SLAB_KERNEL);
|
kbuf = kmalloc (length, GFP_KERNEL);
|
||||||
if (!kbuf)
|
if (!kbuf)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
if (copy_from_user (kbuf, buf, length)) {
|
if (copy_from_user (kbuf, buf, length)) {
|
||||||
|
|
|
@ -2861,7 +2861,7 @@ static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* alloc, and start init */
|
/* alloc, and start init */
|
||||||
dev = kzalloc (sizeof *dev, SLAB_KERNEL);
|
dev = kzalloc (sizeof *dev, GFP_KERNEL);
|
||||||
if (dev == NULL){
|
if (dev == NULL){
|
||||||
retval = -ENOMEM;
|
retval = -ENOMEM;
|
||||||
goto done;
|
goto done;
|
||||||
|
|
|
@ -2581,7 +2581,7 @@ omap_udc_setup(struct platform_device *odev, struct otg_transceiver *xceiv)
|
||||||
/* UDC_PULLUP_EN gates the chip clock */
|
/* UDC_PULLUP_EN gates the chip clock */
|
||||||
// OTG_SYSCON_1_REG |= DEV_IDLE_EN;
|
// OTG_SYSCON_1_REG |= DEV_IDLE_EN;
|
||||||
|
|
||||||
udc = kzalloc(sizeof(*udc), SLAB_KERNEL);
|
udc = kzalloc(sizeof(*udc), GFP_KERNEL);
|
||||||
if (!udc)
|
if (!udc)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
|
|
@ -1190,7 +1190,7 @@ autoconf_fail:
|
||||||
|
|
||||||
|
|
||||||
/* ok, we made sense of the hardware ... */
|
/* ok, we made sense of the hardware ... */
|
||||||
dev = kzalloc(sizeof(*dev), SLAB_KERNEL);
|
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
|
||||||
if (!dev)
|
if (!dev)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
spin_lock_init (&dev->lock);
|
spin_lock_init (&dev->lock);
|
||||||
|
|
|
@ -188,7 +188,7 @@ static DEFINE_TIMER(bulk_eot_timer, NULL, 0, 0);
|
||||||
#define CHECK_ALIGN(x) if (((__u32)(x)) & 0x00000003) \
|
#define CHECK_ALIGN(x) if (((__u32)(x)) & 0x00000003) \
|
||||||
{panic("Alignment check (DWORD) failed at %s:%s:%d\n", __FILE__, __FUNCTION__, __LINE__);}
|
{panic("Alignment check (DWORD) failed at %s:%s:%d\n", __FILE__, __FUNCTION__, __LINE__);}
|
||||||
|
|
||||||
#define SLAB_FLAG (in_interrupt() ? GFP_ATOMIC : SLAB_KERNEL)
|
#define SLAB_FLAG (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL)
|
||||||
#define KMALLOC_FLAG (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL)
|
#define KMALLOC_FLAG (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL)
|
||||||
|
|
||||||
/* Most helpful debugging aid */
|
/* Most helpful debugging aid */
|
||||||
|
|
|
@ -134,7 +134,7 @@ static int isp1301_attach(struct i2c_adapter *adap, int addr, int kind)
|
||||||
{
|
{
|
||||||
struct i2c_client *c;
|
struct i2c_client *c;
|
||||||
|
|
||||||
c = (struct i2c_client *)kzalloc(sizeof(*c), SLAB_KERNEL);
|
c = (struct i2c_client *)kzalloc(sizeof(*c), GFP_KERNEL);
|
||||||
|
|
||||||
if (!c)
|
if (!c)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
|
@ -152,7 +152,7 @@ static int usb_acecad_probe(struct usb_interface *intf, const struct usb_device_
|
||||||
if (!acecad || !input_dev)
|
if (!acecad || !input_dev)
|
||||||
goto fail1;
|
goto fail1;
|
||||||
|
|
||||||
acecad->data = usb_buffer_alloc(dev, 8, SLAB_KERNEL, &acecad->data_dma);
|
acecad->data = usb_buffer_alloc(dev, 8, GFP_KERNEL, &acecad->data_dma);
|
||||||
if (!acecad->data)
|
if (!acecad->data)
|
||||||
goto fail1;
|
goto fail1;
|
||||||
|
|
||||||
|
|
|
@ -680,7 +680,7 @@ static int usbtouch_probe(struct usb_interface *intf,
|
||||||
type->process_pkt = usbtouch_process_pkt;
|
type->process_pkt = usbtouch_process_pkt;
|
||||||
|
|
||||||
usbtouch->data = usb_buffer_alloc(udev, type->rept_size,
|
usbtouch->data = usb_buffer_alloc(udev, type->rept_size,
|
||||||
SLAB_KERNEL, &usbtouch->data_dma);
|
GFP_KERNEL, &usbtouch->data_dma);
|
||||||
if (!usbtouch->data)
|
if (!usbtouch->data)
|
||||||
goto out_free;
|
goto out_free;
|
||||||
|
|
||||||
|
|
|
@ -213,7 +213,7 @@ static struct urb *simple_alloc_urb (
|
||||||
|
|
||||||
if (bytes < 0)
|
if (bytes < 0)
|
||||||
return NULL;
|
return NULL;
|
||||||
urb = usb_alloc_urb (0, SLAB_KERNEL);
|
urb = usb_alloc_urb (0, GFP_KERNEL);
|
||||||
if (!urb)
|
if (!urb)
|
||||||
return urb;
|
return urb;
|
||||||
usb_fill_bulk_urb (urb, udev, pipe, NULL, bytes, simple_callback, NULL);
|
usb_fill_bulk_urb (urb, udev, pipe, NULL, bytes, simple_callback, NULL);
|
||||||
|
@ -223,7 +223,7 @@ static struct urb *simple_alloc_urb (
|
||||||
urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
|
urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
|
||||||
if (usb_pipein (pipe))
|
if (usb_pipein (pipe))
|
||||||
urb->transfer_flags |= URB_SHORT_NOT_OK;
|
urb->transfer_flags |= URB_SHORT_NOT_OK;
|
||||||
urb->transfer_buffer = usb_buffer_alloc (udev, bytes, SLAB_KERNEL,
|
urb->transfer_buffer = usb_buffer_alloc (udev, bytes, GFP_KERNEL,
|
||||||
&urb->transfer_dma);
|
&urb->transfer_dma);
|
||||||
if (!urb->transfer_buffer) {
|
if (!urb->transfer_buffer) {
|
||||||
usb_free_urb (urb);
|
usb_free_urb (urb);
|
||||||
|
@ -315,7 +315,7 @@ static int simple_io (
|
||||||
init_completion (&completion);
|
init_completion (&completion);
|
||||||
if (usb_pipeout (urb->pipe))
|
if (usb_pipeout (urb->pipe))
|
||||||
simple_fill_buf (urb);
|
simple_fill_buf (urb);
|
||||||
if ((retval = usb_submit_urb (urb, SLAB_KERNEL)) != 0)
|
if ((retval = usb_submit_urb (urb, GFP_KERNEL)) != 0)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
/* NOTE: no timeouts; can't be broken out of by interrupt */
|
/* NOTE: no timeouts; can't be broken out of by interrupt */
|
||||||
|
@ -374,7 +374,7 @@ alloc_sglist (int nents, int max, int vary)
|
||||||
unsigned i;
|
unsigned i;
|
||||||
unsigned size = max;
|
unsigned size = max;
|
||||||
|
|
||||||
sg = kmalloc (nents * sizeof *sg, SLAB_KERNEL);
|
sg = kmalloc (nents * sizeof *sg, GFP_KERNEL);
|
||||||
if (!sg)
|
if (!sg)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
@ -382,7 +382,7 @@ alloc_sglist (int nents, int max, int vary)
|
||||||
char *buf;
|
char *buf;
|
||||||
unsigned j;
|
unsigned j;
|
||||||
|
|
||||||
buf = kzalloc (size, SLAB_KERNEL);
|
buf = kzalloc (size, GFP_KERNEL);
|
||||||
if (!buf) {
|
if (!buf) {
|
||||||
free_sglist (sg, i);
|
free_sglist (sg, i);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -428,7 +428,7 @@ static int perform_sglist (
|
||||||
(udev->speed == USB_SPEED_HIGH)
|
(udev->speed == USB_SPEED_HIGH)
|
||||||
? (INTERRUPT_RATE << 3)
|
? (INTERRUPT_RATE << 3)
|
||||||
: INTERRUPT_RATE,
|
: INTERRUPT_RATE,
|
||||||
sg, nents, 0, SLAB_KERNEL);
|
sg, nents, 0, GFP_KERNEL);
|
||||||
|
|
||||||
if (retval)
|
if (retval)
|
||||||
break;
|
break;
|
||||||
|
@ -855,7 +855,7 @@ test_ctrl_queue (struct usbtest_dev *dev, struct usbtest_param *param)
|
||||||
* as with bulk/intr sglists, sglen is the queue depth; it also
|
* as with bulk/intr sglists, sglen is the queue depth; it also
|
||||||
* controls which subtests run (more tests than sglen) or rerun.
|
* controls which subtests run (more tests than sglen) or rerun.
|
||||||
*/
|
*/
|
||||||
urb = kcalloc(param->sglen, sizeof(struct urb *), SLAB_KERNEL);
|
urb = kcalloc(param->sglen, sizeof(struct urb *), GFP_KERNEL);
|
||||||
if (!urb)
|
if (!urb)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
for (i = 0; i < param->sglen; i++) {
|
for (i = 0; i < param->sglen; i++) {
|
||||||
|
@ -981,7 +981,7 @@ test_ctrl_queue (struct usbtest_dev *dev, struct usbtest_param *param)
|
||||||
if (!u)
|
if (!u)
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
|
|
||||||
reqp = usb_buffer_alloc (udev, sizeof *reqp, SLAB_KERNEL,
|
reqp = usb_buffer_alloc (udev, sizeof *reqp, GFP_KERNEL,
|
||||||
&u->setup_dma);
|
&u->setup_dma);
|
||||||
if (!reqp)
|
if (!reqp)
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
|
@ -1067,7 +1067,7 @@ static int unlink1 (struct usbtest_dev *dev, int pipe, int size, int async)
|
||||||
* FIXME want additional tests for when endpoint is STALLing
|
* FIXME want additional tests for when endpoint is STALLing
|
||||||
* due to errors, or is just NAKing requests.
|
* due to errors, or is just NAKing requests.
|
||||||
*/
|
*/
|
||||||
if ((retval = usb_submit_urb (urb, SLAB_KERNEL)) != 0) {
|
if ((retval = usb_submit_urb (urb, GFP_KERNEL)) != 0) {
|
||||||
dev_dbg (&dev->intf->dev, "submit fail %d\n", retval);
|
dev_dbg (&dev->intf->dev, "submit fail %d\n", retval);
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
@ -1251,7 +1251,7 @@ static int ctrl_out (struct usbtest_dev *dev,
|
||||||
if (length < 1 || length > 0xffff || vary >= length)
|
if (length < 1 || length > 0xffff || vary >= length)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
buf = kmalloc(length, SLAB_KERNEL);
|
buf = kmalloc(length, GFP_KERNEL);
|
||||||
if (!buf)
|
if (!buf)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -1403,7 +1403,7 @@ static struct urb *iso_alloc_urb (
|
||||||
maxp *= 1 + (0x3 & (le16_to_cpu(desc->wMaxPacketSize) >> 11));
|
maxp *= 1 + (0x3 & (le16_to_cpu(desc->wMaxPacketSize) >> 11));
|
||||||
packets = (bytes + maxp - 1) / maxp;
|
packets = (bytes + maxp - 1) / maxp;
|
||||||
|
|
||||||
urb = usb_alloc_urb (packets, SLAB_KERNEL);
|
urb = usb_alloc_urb (packets, GFP_KERNEL);
|
||||||
if (!urb)
|
if (!urb)
|
||||||
return urb;
|
return urb;
|
||||||
urb->dev = udev;
|
urb->dev = udev;
|
||||||
|
@ -1411,7 +1411,7 @@ static struct urb *iso_alloc_urb (
|
||||||
|
|
||||||
urb->number_of_packets = packets;
|
urb->number_of_packets = packets;
|
||||||
urb->transfer_buffer_length = bytes;
|
urb->transfer_buffer_length = bytes;
|
||||||
urb->transfer_buffer = usb_buffer_alloc (udev, bytes, SLAB_KERNEL,
|
urb->transfer_buffer = usb_buffer_alloc (udev, bytes, GFP_KERNEL,
|
||||||
&urb->transfer_dma);
|
&urb->transfer_dma);
|
||||||
if (!urb->transfer_buffer) {
|
if (!urb->transfer_buffer) {
|
||||||
usb_free_urb (urb);
|
usb_free_urb (urb);
|
||||||
|
@ -1900,7 +1900,7 @@ usbtest_probe (struct usb_interface *intf, const struct usb_device_id *id)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
dev = kzalloc(sizeof(*dev), SLAB_KERNEL);
|
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
|
||||||
if (!dev)
|
if (!dev)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
info = (struct usbtest_info *) id->driver_info;
|
info = (struct usbtest_info *) id->driver_info;
|
||||||
|
@ -1910,7 +1910,7 @@ usbtest_probe (struct usb_interface *intf, const struct usb_device_id *id)
|
||||||
dev->intf = intf;
|
dev->intf = intf;
|
||||||
|
|
||||||
/* cacheline-aligned scratch for i/o */
|
/* cacheline-aligned scratch for i/o */
|
||||||
if ((dev->buf = kmalloc (TBUF_SIZE, SLAB_KERNEL)) == NULL) {
|
if ((dev->buf = kmalloc (TBUF_SIZE, GFP_KERNEL)) == NULL) {
|
||||||
kfree (dev);
|
kfree (dev);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
|
@ -469,7 +469,7 @@ static void rndis_unbind(struct usbnet *dev, struct usb_interface *intf)
|
||||||
struct rndis_halt *halt;
|
struct rndis_halt *halt;
|
||||||
|
|
||||||
/* try to clear any rndis state/activity (no i/o from stack!) */
|
/* try to clear any rndis state/activity (no i/o from stack!) */
|
||||||
halt = kcalloc(1, sizeof *halt, SLAB_KERNEL);
|
halt = kcalloc(1, sizeof *halt, GFP_KERNEL);
|
||||||
if (halt) {
|
if (halt) {
|
||||||
halt->msg_type = RNDIS_MSG_HALT;
|
halt->msg_type = RNDIS_MSG_HALT;
|
||||||
halt->msg_len = ccpu2(sizeof *halt);
|
halt->msg_len = ccpu2(sizeof *halt);
|
||||||
|
|
|
@ -179,9 +179,9 @@ static int init_status (struct usbnet *dev, struct usb_interface *intf)
|
||||||
period = max ((int) dev->status->desc.bInterval,
|
period = max ((int) dev->status->desc.bInterval,
|
||||||
(dev->udev->speed == USB_SPEED_HIGH) ? 7 : 3);
|
(dev->udev->speed == USB_SPEED_HIGH) ? 7 : 3);
|
||||||
|
|
||||||
buf = kmalloc (maxp, SLAB_KERNEL);
|
buf = kmalloc (maxp, GFP_KERNEL);
|
||||||
if (buf) {
|
if (buf) {
|
||||||
dev->interrupt = usb_alloc_urb (0, SLAB_KERNEL);
|
dev->interrupt = usb_alloc_urb (0, GFP_KERNEL);
|
||||||
if (!dev->interrupt) {
|
if (!dev->interrupt) {
|
||||||
kfree (buf);
|
kfree (buf);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
|
@ -217,7 +217,7 @@ static kmem_cache_t *adfs_inode_cachep;
|
||||||
static struct inode *adfs_alloc_inode(struct super_block *sb)
|
static struct inode *adfs_alloc_inode(struct super_block *sb)
|
||||||
{
|
{
|
||||||
struct adfs_inode_info *ei;
|
struct adfs_inode_info *ei;
|
||||||
ei = (struct adfs_inode_info *)kmem_cache_alloc(adfs_inode_cachep, SLAB_KERNEL);
|
ei = (struct adfs_inode_info *)kmem_cache_alloc(adfs_inode_cachep, GFP_KERNEL);
|
||||||
if (!ei)
|
if (!ei)
|
||||||
return NULL;
|
return NULL;
|
||||||
return &ei->vfs_inode;
|
return &ei->vfs_inode;
|
||||||
|
|
|
@ -71,7 +71,7 @@ static kmem_cache_t * affs_inode_cachep;
|
||||||
static struct inode *affs_alloc_inode(struct super_block *sb)
|
static struct inode *affs_alloc_inode(struct super_block *sb)
|
||||||
{
|
{
|
||||||
struct affs_inode_info *ei;
|
struct affs_inode_info *ei;
|
||||||
ei = (struct affs_inode_info *)kmem_cache_alloc(affs_inode_cachep, SLAB_KERNEL);
|
ei = (struct affs_inode_info *)kmem_cache_alloc(affs_inode_cachep, GFP_KERNEL);
|
||||||
if (!ei)
|
if (!ei)
|
||||||
return NULL;
|
return NULL;
|
||||||
ei->vfs_inode.i_version = 1;
|
ei->vfs_inode.i_version = 1;
|
||||||
|
|
|
@ -412,7 +412,7 @@ static struct inode *afs_alloc_inode(struct super_block *sb)
|
||||||
struct afs_vnode *vnode;
|
struct afs_vnode *vnode;
|
||||||
|
|
||||||
vnode = (struct afs_vnode *)
|
vnode = (struct afs_vnode *)
|
||||||
kmem_cache_alloc(afs_inode_cachep, SLAB_KERNEL);
|
kmem_cache_alloc(afs_inode_cachep, GFP_KERNEL);
|
||||||
if (!vnode)
|
if (!vnode)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
|
|
@ -277,7 +277,7 @@ befs_alloc_inode(struct super_block *sb)
|
||||||
{
|
{
|
||||||
struct befs_inode_info *bi;
|
struct befs_inode_info *bi;
|
||||||
bi = (struct befs_inode_info *)kmem_cache_alloc(befs_inode_cachep,
|
bi = (struct befs_inode_info *)kmem_cache_alloc(befs_inode_cachep,
|
||||||
SLAB_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!bi)
|
if (!bi)
|
||||||
return NULL;
|
return NULL;
|
||||||
return &bi->vfs_inode;
|
return &bi->vfs_inode;
|
||||||
|
|
|
@ -233,7 +233,7 @@ static kmem_cache_t * bfs_inode_cachep;
|
||||||
static struct inode *bfs_alloc_inode(struct super_block *sb)
|
static struct inode *bfs_alloc_inode(struct super_block *sb)
|
||||||
{
|
{
|
||||||
struct bfs_inode_info *bi;
|
struct bfs_inode_info *bi;
|
||||||
bi = kmem_cache_alloc(bfs_inode_cachep, SLAB_KERNEL);
|
bi = kmem_cache_alloc(bfs_inode_cachep, GFP_KERNEL);
|
||||||
if (!bi)
|
if (!bi)
|
||||||
return NULL;
|
return NULL;
|
||||||
return &bi->vfs_inode;
|
return &bi->vfs_inode;
|
||||||
|
|
|
@ -239,7 +239,7 @@ static kmem_cache_t * bdev_cachep __read_mostly;
|
||||||
|
|
||||||
static struct inode *bdev_alloc_inode(struct super_block *sb)
|
static struct inode *bdev_alloc_inode(struct super_block *sb)
|
||||||
{
|
{
|
||||||
struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, SLAB_KERNEL);
|
struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL);
|
||||||
if (!ei)
|
if (!ei)
|
||||||
return NULL;
|
return NULL;
|
||||||
return &ei->vfs_inode;
|
return &ei->vfs_inode;
|
||||||
|
|
|
@ -245,7 +245,7 @@ static struct inode *
|
||||||
cifs_alloc_inode(struct super_block *sb)
|
cifs_alloc_inode(struct super_block *sb)
|
||||||
{
|
{
|
||||||
struct cifsInodeInfo *cifs_inode;
|
struct cifsInodeInfo *cifs_inode;
|
||||||
cifs_inode = kmem_cache_alloc(cifs_inode_cachep, SLAB_KERNEL);
|
cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL);
|
||||||
if (!cifs_inode)
|
if (!cifs_inode)
|
||||||
return NULL;
|
return NULL;
|
||||||
cifs_inode->cifsAttrs = 0x20; /* default */
|
cifs_inode->cifsAttrs = 0x20; /* default */
|
||||||
|
|
|
@ -153,7 +153,7 @@ cifs_buf_get(void)
|
||||||
albeit slightly larger than necessary and maxbuffersize
|
albeit slightly larger than necessary and maxbuffersize
|
||||||
defaults to this and can not be bigger */
|
defaults to this and can not be bigger */
|
||||||
ret_buf =
|
ret_buf =
|
||||||
(struct smb_hdr *) mempool_alloc(cifs_req_poolp, SLAB_KERNEL | GFP_NOFS);
|
(struct smb_hdr *) mempool_alloc(cifs_req_poolp, GFP_KERNEL | GFP_NOFS);
|
||||||
|
|
||||||
/* clear the first few header bytes */
|
/* clear the first few header bytes */
|
||||||
/* for most paths, more is cleared in header_assemble */
|
/* for most paths, more is cleared in header_assemble */
|
||||||
|
@ -192,7 +192,7 @@ cifs_small_buf_get(void)
|
||||||
albeit slightly larger than necessary and maxbuffersize
|
albeit slightly larger than necessary and maxbuffersize
|
||||||
defaults to this and can not be bigger */
|
defaults to this and can not be bigger */
|
||||||
ret_buf =
|
ret_buf =
|
||||||
(struct smb_hdr *) mempool_alloc(cifs_sm_req_poolp, SLAB_KERNEL | GFP_NOFS);
|
(struct smb_hdr *) mempool_alloc(cifs_sm_req_poolp, GFP_KERNEL | GFP_NOFS);
|
||||||
if (ret_buf) {
|
if (ret_buf) {
|
||||||
/* No need to clear memory here, cleared in header assemble */
|
/* No need to clear memory here, cleared in header assemble */
|
||||||
/* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
|
/* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
|
||||||
|
|
|
@ -51,7 +51,7 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct cifsSesInfo *ses)
|
||||||
}
|
}
|
||||||
|
|
||||||
temp = (struct mid_q_entry *) mempool_alloc(cifs_mid_poolp,
|
temp = (struct mid_q_entry *) mempool_alloc(cifs_mid_poolp,
|
||||||
SLAB_KERNEL | GFP_NOFS);
|
GFP_KERNEL | GFP_NOFS);
|
||||||
if (temp == NULL)
|
if (temp == NULL)
|
||||||
return temp;
|
return temp;
|
||||||
else {
|
else {
|
||||||
|
@ -118,7 +118,7 @@ AllocOplockQEntry(struct inode * pinode, __u16 fid, struct cifsTconInfo * tcon)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
temp = (struct oplock_q_entry *) kmem_cache_alloc(cifs_oplock_cachep,
|
temp = (struct oplock_q_entry *) kmem_cache_alloc(cifs_oplock_cachep,
|
||||||
SLAB_KERNEL);
|
GFP_KERNEL);
|
||||||
if (temp == NULL)
|
if (temp == NULL)
|
||||||
return temp;
|
return temp;
|
||||||
else {
|
else {
|
||||||
|
|
|
@ -43,7 +43,7 @@ static kmem_cache_t * coda_inode_cachep;
|
||||||
static struct inode *coda_alloc_inode(struct super_block *sb)
|
static struct inode *coda_alloc_inode(struct super_block *sb)
|
||||||
{
|
{
|
||||||
struct coda_inode_info *ei;
|
struct coda_inode_info *ei;
|
||||||
ei = (struct coda_inode_info *)kmem_cache_alloc(coda_inode_cachep, SLAB_KERNEL);
|
ei = (struct coda_inode_info *)kmem_cache_alloc(coda_inode_cachep, GFP_KERNEL);
|
||||||
if (!ei)
|
if (!ei)
|
||||||
return NULL;
|
return NULL;
|
||||||
memset(&ei->c_fid, 0, sizeof(struct CodaFid));
|
memset(&ei->c_fid, 0, sizeof(struct CodaFid));
|
||||||
|
|
|
@ -77,7 +77,7 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
|
||||||
inode = filp->f_dentry->d_inode;
|
inode = filp->f_dentry->d_inode;
|
||||||
if (!S_ISDIR(inode->i_mode))
|
if (!S_ISDIR(inode->i_mode))
|
||||||
return -ENOTDIR;
|
return -ENOTDIR;
|
||||||
dn = kmem_cache_alloc(dn_cache, SLAB_KERNEL);
|
dn = kmem_cache_alloc(dn_cache, GFP_KERNEL);
|
||||||
if (dn == NULL)
|
if (dn == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
spin_lock(&inode->i_lock);
|
spin_lock(&inode->i_lock);
|
||||||
|
|
|
@ -628,7 +628,7 @@ int ecryptfs_decrypt_page(struct file *file, struct page *page)
|
||||||
num_extents_per_page = PAGE_CACHE_SIZE / crypt_stat->extent_size;
|
num_extents_per_page = PAGE_CACHE_SIZE / crypt_stat->extent_size;
|
||||||
base_extent = (page->index * num_extents_per_page);
|
base_extent = (page->index * num_extents_per_page);
|
||||||
lower_page_virt = kmem_cache_alloc(ecryptfs_lower_page_cache,
|
lower_page_virt = kmem_cache_alloc(ecryptfs_lower_page_cache,
|
||||||
SLAB_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!lower_page_virt) {
|
if (!lower_page_virt) {
|
||||||
rc = -ENOMEM;
|
rc = -ENOMEM;
|
||||||
ecryptfs_printk(KERN_ERR, "Error getting page for encrypted "
|
ecryptfs_printk(KERN_ERR, "Error getting page for encrypted "
|
||||||
|
|
|
@ -250,7 +250,7 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
|
||||||
int lower_flags;
|
int lower_flags;
|
||||||
|
|
||||||
/* Released in ecryptfs_release or end of function if failure */
|
/* Released in ecryptfs_release or end of function if failure */
|
||||||
file_info = kmem_cache_alloc(ecryptfs_file_info_cache, SLAB_KERNEL);
|
file_info = kmem_cache_alloc(ecryptfs_file_info_cache, GFP_KERNEL);
|
||||||
ecryptfs_set_file_private(file, file_info);
|
ecryptfs_set_file_private(file, file_info);
|
||||||
if (!file_info) {
|
if (!file_info) {
|
||||||
ecryptfs_printk(KERN_ERR,
|
ecryptfs_printk(KERN_ERR,
|
||||||
|
|
|
@ -369,7 +369,7 @@ static struct dentry *ecryptfs_lookup(struct inode *dir, struct dentry *dentry,
|
||||||
BUG_ON(!atomic_read(&lower_dentry->d_count));
|
BUG_ON(!atomic_read(&lower_dentry->d_count));
|
||||||
ecryptfs_set_dentry_private(dentry,
|
ecryptfs_set_dentry_private(dentry,
|
||||||
kmem_cache_alloc(ecryptfs_dentry_info_cache,
|
kmem_cache_alloc(ecryptfs_dentry_info_cache,
|
||||||
SLAB_KERNEL));
|
GFP_KERNEL));
|
||||||
if (!ecryptfs_dentry_to_private(dentry)) {
|
if (!ecryptfs_dentry_to_private(dentry)) {
|
||||||
rc = -ENOMEM;
|
rc = -ENOMEM;
|
||||||
ecryptfs_printk(KERN_ERR, "Out of memory whilst attempting "
|
ecryptfs_printk(KERN_ERR, "Out of memory whilst attempting "
|
||||||
|
@ -795,7 +795,7 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
|
||||||
/* Released at out_free: label */
|
/* Released at out_free: label */
|
||||||
ecryptfs_set_file_private(&fake_ecryptfs_file,
|
ecryptfs_set_file_private(&fake_ecryptfs_file,
|
||||||
kmem_cache_alloc(ecryptfs_file_info_cache,
|
kmem_cache_alloc(ecryptfs_file_info_cache,
|
||||||
SLAB_KERNEL));
|
GFP_KERNEL));
|
||||||
if (unlikely(!ecryptfs_file_to_private(&fake_ecryptfs_file))) {
|
if (unlikely(!ecryptfs_file_to_private(&fake_ecryptfs_file))) {
|
||||||
rc = -ENOMEM;
|
rc = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
|
|
|
@ -207,7 +207,7 @@ parse_tag_3_packet(struct ecryptfs_crypt_stat *crypt_stat,
|
||||||
/* Released: wipe_auth_tok_list called in ecryptfs_parse_packet_set or
|
/* Released: wipe_auth_tok_list called in ecryptfs_parse_packet_set or
|
||||||
* at end of function upon failure */
|
* at end of function upon failure */
|
||||||
auth_tok_list_item =
|
auth_tok_list_item =
|
||||||
kmem_cache_alloc(ecryptfs_auth_tok_list_item_cache, SLAB_KERNEL);
|
kmem_cache_alloc(ecryptfs_auth_tok_list_item_cache, GFP_KERNEL);
|
||||||
if (!auth_tok_list_item) {
|
if (!auth_tok_list_item) {
|
||||||
ecryptfs_printk(KERN_ERR, "Unable to allocate memory\n");
|
ecryptfs_printk(KERN_ERR, "Unable to allocate memory\n");
|
||||||
rc = -ENOMEM;
|
rc = -ENOMEM;
|
||||||
|
|
|
@ -378,7 +378,7 @@ ecryptfs_fill_super(struct super_block *sb, void *raw_data, int silent)
|
||||||
/* Released in ecryptfs_put_super() */
|
/* Released in ecryptfs_put_super() */
|
||||||
ecryptfs_set_superblock_private(sb,
|
ecryptfs_set_superblock_private(sb,
|
||||||
kmem_cache_alloc(ecryptfs_sb_info_cache,
|
kmem_cache_alloc(ecryptfs_sb_info_cache,
|
||||||
SLAB_KERNEL));
|
GFP_KERNEL));
|
||||||
if (!ecryptfs_superblock_to_private(sb)) {
|
if (!ecryptfs_superblock_to_private(sb)) {
|
||||||
ecryptfs_printk(KERN_WARNING, "Out of memory\n");
|
ecryptfs_printk(KERN_WARNING, "Out of memory\n");
|
||||||
rc = -ENOMEM;
|
rc = -ENOMEM;
|
||||||
|
@ -402,7 +402,7 @@ ecryptfs_fill_super(struct super_block *sb, void *raw_data, int silent)
|
||||||
/* through deactivate_super(sb) from get_sb_nodev() */
|
/* through deactivate_super(sb) from get_sb_nodev() */
|
||||||
ecryptfs_set_dentry_private(sb->s_root,
|
ecryptfs_set_dentry_private(sb->s_root,
|
||||||
kmem_cache_alloc(ecryptfs_dentry_info_cache,
|
kmem_cache_alloc(ecryptfs_dentry_info_cache,
|
||||||
SLAB_KERNEL));
|
GFP_KERNEL));
|
||||||
if (!ecryptfs_dentry_to_private(sb->s_root)) {
|
if (!ecryptfs_dentry_to_private(sb->s_root)) {
|
||||||
ecryptfs_printk(KERN_ERR,
|
ecryptfs_printk(KERN_ERR,
|
||||||
"dentry_info_cache alloc failed\n");
|
"dentry_info_cache alloc failed\n");
|
||||||
|
|
|
@ -50,7 +50,7 @@ static struct inode *ecryptfs_alloc_inode(struct super_block *sb)
|
||||||
struct inode *inode = NULL;
|
struct inode *inode = NULL;
|
||||||
|
|
||||||
ecryptfs_inode = kmem_cache_alloc(ecryptfs_inode_info_cache,
|
ecryptfs_inode = kmem_cache_alloc(ecryptfs_inode_info_cache,
|
||||||
SLAB_KERNEL);
|
GFP_KERNEL);
|
||||||
if (unlikely(!ecryptfs_inode))
|
if (unlikely(!ecryptfs_inode))
|
||||||
goto out;
|
goto out;
|
||||||
ecryptfs_init_crypt_stat(&ecryptfs_inode->crypt_stat);
|
ecryptfs_init_crypt_stat(&ecryptfs_inode->crypt_stat);
|
||||||
|
|
|
@ -57,7 +57,7 @@ static kmem_cache_t * efs_inode_cachep;
|
||||||
static struct inode *efs_alloc_inode(struct super_block *sb)
|
static struct inode *efs_alloc_inode(struct super_block *sb)
|
||||||
{
|
{
|
||||||
struct efs_inode_info *ei;
|
struct efs_inode_info *ei;
|
||||||
ei = (struct efs_inode_info *)kmem_cache_alloc(efs_inode_cachep, SLAB_KERNEL);
|
ei = (struct efs_inode_info *)kmem_cache_alloc(efs_inode_cachep, GFP_KERNEL);
|
||||||
if (!ei)
|
if (!ei)
|
||||||
return NULL;
|
return NULL;
|
||||||
return &ei->vfs_inode;
|
return &ei->vfs_inode;
|
||||||
|
|
|
@ -961,7 +961,7 @@ static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
|
||||||
struct epitem *epi = ep_item_from_epqueue(pt);
|
struct epitem *epi = ep_item_from_epqueue(pt);
|
||||||
struct eppoll_entry *pwq;
|
struct eppoll_entry *pwq;
|
||||||
|
|
||||||
if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, SLAB_KERNEL))) {
|
if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL))) {
|
||||||
init_waitqueue_func_entry(&pwq->wait, ep_poll_callback);
|
init_waitqueue_func_entry(&pwq->wait, ep_poll_callback);
|
||||||
pwq->whead = whead;
|
pwq->whead = whead;
|
||||||
pwq->base = epi;
|
pwq->base = epi;
|
||||||
|
@ -1004,7 +1004,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
|
||||||
struct ep_pqueue epq;
|
struct ep_pqueue epq;
|
||||||
|
|
||||||
error = -ENOMEM;
|
error = -ENOMEM;
|
||||||
if (!(epi = kmem_cache_alloc(epi_cache, SLAB_KERNEL)))
|
if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL)))
|
||||||
goto eexit_1;
|
goto eexit_1;
|
||||||
|
|
||||||
/* Item initialization follow here ... */
|
/* Item initialization follow here ... */
|
||||||
|
|
|
@ -404,7 +404,7 @@ int setup_arg_pages(struct linux_binprm *bprm,
|
||||||
bprm->loader += stack_base;
|
bprm->loader += stack_base;
|
||||||
bprm->exec += stack_base;
|
bprm->exec += stack_base;
|
||||||
|
|
||||||
mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
|
mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
|
||||||
if (!mpnt)
|
if (!mpnt)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
|
|
@ -140,7 +140,7 @@ static kmem_cache_t * ext2_inode_cachep;
|
||||||
static struct inode *ext2_alloc_inode(struct super_block *sb)
|
static struct inode *ext2_alloc_inode(struct super_block *sb)
|
||||||
{
|
{
|
||||||
struct ext2_inode_info *ei;
|
struct ext2_inode_info *ei;
|
||||||
ei = (struct ext2_inode_info *)kmem_cache_alloc(ext2_inode_cachep, SLAB_KERNEL);
|
ei = (struct ext2_inode_info *)kmem_cache_alloc(ext2_inode_cachep, GFP_KERNEL);
|
||||||
if (!ei)
|
if (!ei)
|
||||||
return NULL;
|
return NULL;
|
||||||
#ifdef CONFIG_EXT2_FS_POSIX_ACL
|
#ifdef CONFIG_EXT2_FS_POSIX_ACL
|
||||||
|
|
|
@ -63,7 +63,7 @@ void fat_cache_destroy(void)
|
||||||
|
|
||||||
static inline struct fat_cache *fat_cache_alloc(struct inode *inode)
|
static inline struct fat_cache *fat_cache_alloc(struct inode *inode)
|
||||||
{
|
{
|
||||||
return kmem_cache_alloc(fat_cache_cachep, SLAB_KERNEL);
|
return kmem_cache_alloc(fat_cache_cachep, GFP_KERNEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void fat_cache_free(struct fat_cache *cache)
|
static inline void fat_cache_free(struct fat_cache *cache)
|
||||||
|
|
|
@ -482,7 +482,7 @@ static kmem_cache_t *fat_inode_cachep;
|
||||||
static struct inode *fat_alloc_inode(struct super_block *sb)
|
static struct inode *fat_alloc_inode(struct super_block *sb)
|
||||||
{
|
{
|
||||||
struct msdos_inode_info *ei;
|
struct msdos_inode_info *ei;
|
||||||
ei = kmem_cache_alloc(fat_inode_cachep, SLAB_KERNEL);
|
ei = kmem_cache_alloc(fat_inode_cachep, GFP_KERNEL);
|
||||||
if (!ei)
|
if (!ei)
|
||||||
return NULL;
|
return NULL;
|
||||||
return &ei->vfs_inode;
|
return &ei->vfs_inode;
|
||||||
|
|
|
@ -567,7 +567,7 @@ int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fap
|
||||||
int result = 0;
|
int result = 0;
|
||||||
|
|
||||||
if (on) {
|
if (on) {
|
||||||
new = kmem_cache_alloc(fasync_cache, SLAB_KERNEL);
|
new = kmem_cache_alloc(fasync_cache, GFP_KERNEL);
|
||||||
if (!new)
|
if (!new)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
|
@ -103,7 +103,7 @@ vxfs_blkiget(struct super_block *sbp, u_long extent, ino_t ino)
|
||||||
struct vxfs_inode_info *vip;
|
struct vxfs_inode_info *vip;
|
||||||
struct vxfs_dinode *dip;
|
struct vxfs_dinode *dip;
|
||||||
|
|
||||||
if (!(vip = kmem_cache_alloc(vxfs_inode_cachep, SLAB_KERNEL)))
|
if (!(vip = kmem_cache_alloc(vxfs_inode_cachep, GFP_KERNEL)))
|
||||||
goto fail;
|
goto fail;
|
||||||
dip = (struct vxfs_dinode *)(bp->b_data + offset);
|
dip = (struct vxfs_dinode *)(bp->b_data + offset);
|
||||||
memcpy(vip, dip, sizeof(*vip));
|
memcpy(vip, dip, sizeof(*vip));
|
||||||
|
@ -145,7 +145,7 @@ __vxfs_iget(ino_t ino, struct inode *ilistp)
|
||||||
struct vxfs_dinode *dip;
|
struct vxfs_dinode *dip;
|
||||||
caddr_t kaddr = (char *)page_address(pp);
|
caddr_t kaddr = (char *)page_address(pp);
|
||||||
|
|
||||||
if (!(vip = kmem_cache_alloc(vxfs_inode_cachep, SLAB_KERNEL)))
|
if (!(vip = kmem_cache_alloc(vxfs_inode_cachep, GFP_KERNEL)))
|
||||||
goto fail;
|
goto fail;
|
||||||
dip = (struct vxfs_dinode *)(kaddr + offset);
|
dip = (struct vxfs_dinode *)(kaddr + offset);
|
||||||
memcpy(vip, dip, sizeof(*vip));
|
memcpy(vip, dip, sizeof(*vip));
|
||||||
|
|
|
@ -41,7 +41,7 @@ static void fuse_request_init(struct fuse_req *req)
|
||||||
|
|
||||||
struct fuse_req *fuse_request_alloc(void)
|
struct fuse_req *fuse_request_alloc(void)
|
||||||
{
|
{
|
||||||
struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, SLAB_KERNEL);
|
struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_KERNEL);
|
||||||
if (req)
|
if (req)
|
||||||
fuse_request_init(req);
|
fuse_request_init(req);
|
||||||
return req;
|
return req;
|
||||||
|
|
|
@ -46,7 +46,7 @@ static struct inode *fuse_alloc_inode(struct super_block *sb)
|
||||||
struct inode *inode;
|
struct inode *inode;
|
||||||
struct fuse_inode *fi;
|
struct fuse_inode *fi;
|
||||||
|
|
||||||
inode = kmem_cache_alloc(fuse_inode_cachep, SLAB_KERNEL);
|
inode = kmem_cache_alloc(fuse_inode_cachep, GFP_KERNEL);
|
||||||
if (!inode)
|
if (!inode)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
|
|
@ -145,7 +145,7 @@ static struct inode *hfs_alloc_inode(struct super_block *sb)
|
||||||
{
|
{
|
||||||
struct hfs_inode_info *i;
|
struct hfs_inode_info *i;
|
||||||
|
|
||||||
i = kmem_cache_alloc(hfs_inode_cachep, SLAB_KERNEL);
|
i = kmem_cache_alloc(hfs_inode_cachep, GFP_KERNEL);
|
||||||
return i ? &i->vfs_inode : NULL;
|
return i ? &i->vfs_inode : NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -440,7 +440,7 @@ static struct inode *hfsplus_alloc_inode(struct super_block *sb)
|
||||||
{
|
{
|
||||||
struct hfsplus_inode_info *i;
|
struct hfsplus_inode_info *i;
|
||||||
|
|
||||||
i = kmem_cache_alloc(hfsplus_inode_cachep, SLAB_KERNEL);
|
i = kmem_cache_alloc(hfsplus_inode_cachep, GFP_KERNEL);
|
||||||
return i ? &i->vfs_inode : NULL;
|
return i ? &i->vfs_inode : NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -522,7 +522,7 @@ static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
|
||||||
|
|
||||||
if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
|
if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
|
||||||
return NULL;
|
return NULL;
|
||||||
p = kmem_cache_alloc(hugetlbfs_inode_cachep, SLAB_KERNEL);
|
p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
|
||||||
if (unlikely(!p)) {
|
if (unlikely(!p)) {
|
||||||
hugetlbfs_inc_free_inodes(sbinfo);
|
hugetlbfs_inc_free_inodes(sbinfo);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
|
@ -109,7 +109,7 @@ static struct inode *alloc_inode(struct super_block *sb)
|
||||||
if (sb->s_op->alloc_inode)
|
if (sb->s_op->alloc_inode)
|
||||||
inode = sb->s_op->alloc_inode(sb);
|
inode = sb->s_op->alloc_inode(sb);
|
||||||
else
|
else
|
||||||
inode = (struct inode *) kmem_cache_alloc(inode_cachep, SLAB_KERNEL);
|
inode = (struct inode *) kmem_cache_alloc(inode_cachep, GFP_KERNEL);
|
||||||
|
|
||||||
if (inode) {
|
if (inode) {
|
||||||
struct address_space * const mapping = &inode->i_data;
|
struct address_space * const mapping = &inode->i_data;
|
||||||
|
|
|
@ -62,7 +62,7 @@ static kmem_cache_t *isofs_inode_cachep;
|
||||||
static struct inode *isofs_alloc_inode(struct super_block *sb)
|
static struct inode *isofs_alloc_inode(struct super_block *sb)
|
||||||
{
|
{
|
||||||
struct iso_inode_info *ei;
|
struct iso_inode_info *ei;
|
||||||
ei = kmem_cache_alloc(isofs_inode_cachep, SLAB_KERNEL);
|
ei = kmem_cache_alloc(isofs_inode_cachep, GFP_KERNEL);
|
||||||
if (!ei)
|
if (!ei)
|
||||||
return NULL;
|
return NULL;
|
||||||
return &ei->vfs_inode;
|
return &ei->vfs_inode;
|
||||||
|
|
|
@ -33,7 +33,7 @@ static kmem_cache_t *jffs2_inode_cachep;
|
||||||
static struct inode *jffs2_alloc_inode(struct super_block *sb)
|
static struct inode *jffs2_alloc_inode(struct super_block *sb)
|
||||||
{
|
{
|
||||||
struct jffs2_inode_info *ei;
|
struct jffs2_inode_info *ei;
|
||||||
ei = (struct jffs2_inode_info *)kmem_cache_alloc(jffs2_inode_cachep, SLAB_KERNEL);
|
ei = (struct jffs2_inode_info *)kmem_cache_alloc(jffs2_inode_cachep, GFP_KERNEL);
|
||||||
if (!ei)
|
if (!ei)
|
||||||
return NULL;
|
return NULL;
|
||||||
return &ei->vfs_inode;
|
return &ei->vfs_inode;
|
||||||
|
|
|
@ -147,7 +147,7 @@ static kmem_cache_t *filelock_cache __read_mostly;
|
||||||
/* Allocate an empty lock structure. */
|
/* Allocate an empty lock structure. */
|
||||||
static struct file_lock *locks_alloc_lock(void)
|
static struct file_lock *locks_alloc_lock(void)
|
||||||
{
|
{
|
||||||
return kmem_cache_alloc(filelock_cache, SLAB_KERNEL);
|
return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void locks_release_private(struct file_lock *fl)
|
static void locks_release_private(struct file_lock *fl)
|
||||||
|
|
|
@ -56,7 +56,7 @@ static kmem_cache_t * minix_inode_cachep;
|
||||||
static struct inode *minix_alloc_inode(struct super_block *sb)
|
static struct inode *minix_alloc_inode(struct super_block *sb)
|
||||||
{
|
{
|
||||||
struct minix_inode_info *ei;
|
struct minix_inode_info *ei;
|
||||||
ei = (struct minix_inode_info *)kmem_cache_alloc(minix_inode_cachep, SLAB_KERNEL);
|
ei = (struct minix_inode_info *)kmem_cache_alloc(minix_inode_cachep, GFP_KERNEL);
|
||||||
if (!ei)
|
if (!ei)
|
||||||
return NULL;
|
return NULL;
|
||||||
return &ei->vfs_inode;
|
return &ei->vfs_inode;
|
||||||
|
|
|
@ -45,7 +45,7 @@ static kmem_cache_t * ncp_inode_cachep;
|
||||||
static struct inode *ncp_alloc_inode(struct super_block *sb)
|
static struct inode *ncp_alloc_inode(struct super_block *sb)
|
||||||
{
|
{
|
||||||
struct ncp_inode_info *ei;
|
struct ncp_inode_info *ei;
|
||||||
ei = (struct ncp_inode_info *)kmem_cache_alloc(ncp_inode_cachep, SLAB_KERNEL);
|
ei = (struct ncp_inode_info *)kmem_cache_alloc(ncp_inode_cachep, GFP_KERNEL);
|
||||||
if (!ei)
|
if (!ei)
|
||||||
return NULL;
|
return NULL;
|
||||||
return &ei->vfs_inode;
|
return &ei->vfs_inode;
|
||||||
|
|
|
@ -143,7 +143,7 @@ static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
|
||||||
{
|
{
|
||||||
struct nfs_direct_req *dreq;
|
struct nfs_direct_req *dreq;
|
||||||
|
|
||||||
dreq = kmem_cache_alloc(nfs_direct_cachep, SLAB_KERNEL);
|
dreq = kmem_cache_alloc(nfs_direct_cachep, GFP_KERNEL);
|
||||||
if (!dreq)
|
if (!dreq)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
|
|
@ -1080,7 +1080,7 @@ void nfs4_clear_inode(struct inode *inode)
|
||||||
struct inode *nfs_alloc_inode(struct super_block *sb)
|
struct inode *nfs_alloc_inode(struct super_block *sb)
|
||||||
{
|
{
|
||||||
struct nfs_inode *nfsi;
|
struct nfs_inode *nfsi;
|
||||||
nfsi = (struct nfs_inode *)kmem_cache_alloc(nfs_inode_cachep, SLAB_KERNEL);
|
nfsi = (struct nfs_inode *)kmem_cache_alloc(nfs_inode_cachep, GFP_KERNEL);
|
||||||
if (!nfsi)
|
if (!nfsi)
|
||||||
return NULL;
|
return NULL;
|
||||||
nfsi->flags = 0UL;
|
nfsi->flags = 0UL;
|
||||||
|
|
|
@ -26,7 +26,7 @@ static inline struct nfs_page *
|
||||||
nfs_page_alloc(void)
|
nfs_page_alloc(void)
|
||||||
{
|
{
|
||||||
struct nfs_page *p;
|
struct nfs_page *p;
|
||||||
p = kmem_cache_alloc(nfs_page_cachep, SLAB_KERNEL);
|
p = kmem_cache_alloc(nfs_page_cachep, GFP_KERNEL);
|
||||||
if (p) {
|
if (p) {
|
||||||
memset(p, 0, sizeof(*p));
|
memset(p, 0, sizeof(*p));
|
||||||
INIT_LIST_HEAD(&p->wb_list);
|
INIT_LIST_HEAD(&p->wb_list);
|
||||||
|
|
|
@ -336,7 +336,7 @@ static struct inode *openprom_alloc_inode(struct super_block *sb)
|
||||||
{
|
{
|
||||||
struct op_inode_info *oi;
|
struct op_inode_info *oi;
|
||||||
|
|
||||||
oi = kmem_cache_alloc(op_inode_cachep, SLAB_KERNEL);
|
oi = kmem_cache_alloc(op_inode_cachep, GFP_KERNEL);
|
||||||
if (!oi)
|
if (!oi)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
|
|
@ -88,7 +88,7 @@ static struct inode *proc_alloc_inode(struct super_block *sb)
|
||||||
struct proc_inode *ei;
|
struct proc_inode *ei;
|
||||||
struct inode *inode;
|
struct inode *inode;
|
||||||
|
|
||||||
ei = (struct proc_inode *)kmem_cache_alloc(proc_inode_cachep, SLAB_KERNEL);
|
ei = (struct proc_inode *)kmem_cache_alloc(proc_inode_cachep, GFP_KERNEL);
|
||||||
if (!ei)
|
if (!ei)
|
||||||
return NULL;
|
return NULL;
|
||||||
ei->pid = NULL;
|
ei->pid = NULL;
|
||||||
|
|
|
@ -520,7 +520,7 @@ static kmem_cache_t *qnx4_inode_cachep;
|
||||||
static struct inode *qnx4_alloc_inode(struct super_block *sb)
|
static struct inode *qnx4_alloc_inode(struct super_block *sb)
|
||||||
{
|
{
|
||||||
struct qnx4_inode_info *ei;
|
struct qnx4_inode_info *ei;
|
||||||
ei = kmem_cache_alloc(qnx4_inode_cachep, SLAB_KERNEL);
|
ei = kmem_cache_alloc(qnx4_inode_cachep, GFP_KERNEL);
|
||||||
if (!ei)
|
if (!ei)
|
||||||
return NULL;
|
return NULL;
|
||||||
return &ei->vfs_inode;
|
return &ei->vfs_inode;
|
||||||
|
|
|
@ -496,7 +496,7 @@ static struct inode *reiserfs_alloc_inode(struct super_block *sb)
|
||||||
{
|
{
|
||||||
struct reiserfs_inode_info *ei;
|
struct reiserfs_inode_info *ei;
|
||||||
ei = (struct reiserfs_inode_info *)
|
ei = (struct reiserfs_inode_info *)
|
||||||
kmem_cache_alloc(reiserfs_inode_cachep, SLAB_KERNEL);
|
kmem_cache_alloc(reiserfs_inode_cachep, GFP_KERNEL);
|
||||||
if (!ei)
|
if (!ei)
|
||||||
return NULL;
|
return NULL;
|
||||||
return &ei->vfs_inode;
|
return &ei->vfs_inode;
|
||||||
|
|
|
@ -555,7 +555,7 @@ static kmem_cache_t * romfs_inode_cachep;
|
||||||
static struct inode *romfs_alloc_inode(struct super_block *sb)
|
static struct inode *romfs_alloc_inode(struct super_block *sb)
|
||||||
{
|
{
|
||||||
struct romfs_inode_info *ei;
|
struct romfs_inode_info *ei;
|
||||||
ei = (struct romfs_inode_info *)kmem_cache_alloc(romfs_inode_cachep, SLAB_KERNEL);
|
ei = (struct romfs_inode_info *)kmem_cache_alloc(romfs_inode_cachep, GFP_KERNEL);
|
||||||
if (!ei)
|
if (!ei)
|
||||||
return NULL;
|
return NULL;
|
||||||
return &ei->vfs_inode;
|
return &ei->vfs_inode;
|
||||||
|
|
|
@ -55,7 +55,7 @@ static kmem_cache_t *smb_inode_cachep;
|
||||||
static struct inode *smb_alloc_inode(struct super_block *sb)
|
static struct inode *smb_alloc_inode(struct super_block *sb)
|
||||||
{
|
{
|
||||||
struct smb_inode_info *ei;
|
struct smb_inode_info *ei;
|
||||||
ei = (struct smb_inode_info *)kmem_cache_alloc(smb_inode_cachep, SLAB_KERNEL);
|
ei = (struct smb_inode_info *)kmem_cache_alloc(smb_inode_cachep, GFP_KERNEL);
|
||||||
if (!ei)
|
if (!ei)
|
||||||
return NULL;
|
return NULL;
|
||||||
return &ei->vfs_inode;
|
return &ei->vfs_inode;
|
||||||
|
|
|
@ -61,7 +61,7 @@ static struct smb_request *smb_do_alloc_request(struct smb_sb_info *server,
|
||||||
struct smb_request *req;
|
struct smb_request *req;
|
||||||
unsigned char *buf = NULL;
|
unsigned char *buf = NULL;
|
||||||
|
|
||||||
req = kmem_cache_alloc(req_cachep, SLAB_KERNEL);
|
req = kmem_cache_alloc(req_cachep, GFP_KERNEL);
|
||||||
VERBOSE("allocating request: %p\n", req);
|
VERBOSE("allocating request: %p\n", req);
|
||||||
if (!req)
|
if (!req)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
|
@ -307,7 +307,7 @@ static struct inode *sysv_alloc_inode(struct super_block *sb)
|
||||||
{
|
{
|
||||||
struct sysv_inode_info *si;
|
struct sysv_inode_info *si;
|
||||||
|
|
||||||
si = kmem_cache_alloc(sysv_inode_cachep, SLAB_KERNEL);
|
si = kmem_cache_alloc(sysv_inode_cachep, GFP_KERNEL);
|
||||||
if (!si)
|
if (!si)
|
||||||
return NULL;
|
return NULL;
|
||||||
return &si->vfs_inode;
|
return &si->vfs_inode;
|
||||||
|
|
|
@ -112,7 +112,7 @@ static kmem_cache_t * udf_inode_cachep;
|
||||||
static struct inode *udf_alloc_inode(struct super_block *sb)
|
static struct inode *udf_alloc_inode(struct super_block *sb)
|
||||||
{
|
{
|
||||||
struct udf_inode_info *ei;
|
struct udf_inode_info *ei;
|
||||||
ei = (struct udf_inode_info *)kmem_cache_alloc(udf_inode_cachep, SLAB_KERNEL);
|
ei = (struct udf_inode_info *)kmem_cache_alloc(udf_inode_cachep, GFP_KERNEL);
|
||||||
if (!ei)
|
if (!ei)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
|
|
@ -1209,7 +1209,7 @@ static kmem_cache_t * ufs_inode_cachep;
|
||||||
static struct inode *ufs_alloc_inode(struct super_block *sb)
|
static struct inode *ufs_alloc_inode(struct super_block *sb)
|
||||||
{
|
{
|
||||||
struct ufs_inode_info *ei;
|
struct ufs_inode_info *ei;
|
||||||
ei = (struct ufs_inode_info *)kmem_cache_alloc(ufs_inode_cachep, SLAB_KERNEL);
|
ei = (struct ufs_inode_info *)kmem_cache_alloc(ufs_inode_cachep, GFP_KERNEL);
|
||||||
if (!ei)
|
if (!ei)
|
||||||
return NULL;
|
return NULL;
|
||||||
ei->vfs_inode.i_version = 1;
|
ei->vfs_inode.i_version = 1;
|
||||||
|
|
|
@ -1483,7 +1483,7 @@ extern void __init vfs_caches_init(unsigned long);
|
||||||
|
|
||||||
extern struct kmem_cache *names_cachep;
|
extern struct kmem_cache *names_cachep;
|
||||||
|
|
||||||
#define __getname() kmem_cache_alloc(names_cachep, SLAB_KERNEL)
|
#define __getname() kmem_cache_alloc(names_cachep, GFP_KERNEL)
|
||||||
#define __putname(name) kmem_cache_free(names_cachep, (void *)(name))
|
#define __putname(name) kmem_cache_free(names_cachep, (void *)(name))
|
||||||
#ifndef CONFIG_AUDITSYSCALL
|
#ifndef CONFIG_AUDITSYSCALL
|
||||||
#define putname(name) __putname(name)
|
#define putname(name) __putname(name)
|
||||||
|
|
|
@ -34,7 +34,7 @@ extern kmem_cache_t *anon_vma_cachep;
|
||||||
|
|
||||||
static inline struct anon_vma *anon_vma_alloc(void)
|
static inline struct anon_vma *anon_vma_alloc(void)
|
||||||
{
|
{
|
||||||
return kmem_cache_alloc(anon_vma_cachep, SLAB_KERNEL);
|
return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void anon_vma_free(struct anon_vma *anon_vma)
|
static inline void anon_vma_free(struct anon_vma *anon_vma)
|
||||||
|
|
|
@ -19,7 +19,6 @@ typedef struct kmem_cache kmem_cache_t;
|
||||||
#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
|
#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
|
||||||
|
|
||||||
/* flags for kmem_cache_alloc() */
|
/* flags for kmem_cache_alloc() */
|
||||||
#define SLAB_KERNEL GFP_KERNEL
|
|
||||||
#define SLAB_DMA GFP_DMA
|
#define SLAB_DMA GFP_DMA
|
||||||
|
|
||||||
/* flags to pass to kmem_cache_create().
|
/* flags to pass to kmem_cache_create().
|
||||||
|
|
|
@ -35,7 +35,7 @@ static inline void taskstats_tgid_alloc(struct task_struct *tsk)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* No problem if kmem_cache_zalloc() fails */
|
/* No problem if kmem_cache_zalloc() fails */
|
||||||
stats = kmem_cache_zalloc(taskstats_cache, SLAB_KERNEL);
|
stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
|
||||||
|
|
||||||
spin_lock_irq(&tsk->sighand->siglock);
|
spin_lock_irq(&tsk->sighand->siglock);
|
||||||
if (!sig->stats) {
|
if (!sig->stats) {
|
||||||
|
|
|
@ -224,7 +224,7 @@ static struct inode *mqueue_alloc_inode(struct super_block *sb)
|
||||||
{
|
{
|
||||||
struct mqueue_inode_info *ei;
|
struct mqueue_inode_info *ei;
|
||||||
|
|
||||||
ei = kmem_cache_alloc(mqueue_inode_cachep, SLAB_KERNEL);
|
ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL);
|
||||||
if (!ei)
|
if (!ei)
|
||||||
return NULL;
|
return NULL;
|
||||||
return &ei->vfs_inode;
|
return &ei->vfs_inode;
|
||||||
|
|
|
@ -41,7 +41,7 @@ void delayacct_init(void)
|
||||||
|
|
||||||
void __delayacct_tsk_init(struct task_struct *tsk)
|
void __delayacct_tsk_init(struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
tsk->delays = kmem_cache_zalloc(delayacct_cache, SLAB_KERNEL);
|
tsk->delays = kmem_cache_zalloc(delayacct_cache, GFP_KERNEL);
|
||||||
if (tsk->delays)
|
if (tsk->delays)
|
||||||
spin_lock_init(&tsk->delays->lock);
|
spin_lock_init(&tsk->delays->lock);
|
||||||
}
|
}
|
||||||
|
|
|
@ -237,7 +237,7 @@ static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
|
||||||
goto fail_nomem;
|
goto fail_nomem;
|
||||||
charge = len;
|
charge = len;
|
||||||
}
|
}
|
||||||
tmp = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
|
tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
|
||||||
if (!tmp)
|
if (!tmp)
|
||||||
goto fail_nomem;
|
goto fail_nomem;
|
||||||
*tmp = *mpnt;
|
*tmp = *mpnt;
|
||||||
|
@ -319,7 +319,7 @@ static inline void mm_free_pgd(struct mm_struct * mm)
|
||||||
|
|
||||||
__cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
|
__cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
|
||||||
|
|
||||||
#define allocate_mm() (kmem_cache_alloc(mm_cachep, SLAB_KERNEL))
|
#define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
|
||||||
#define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
|
#define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
|
||||||
|
|
||||||
#include <linux/init_task.h>
|
#include <linux/init_task.h>
|
||||||
|
@ -621,7 +621,7 @@ static struct files_struct *alloc_files(void)
|
||||||
struct files_struct *newf;
|
struct files_struct *newf;
|
||||||
struct fdtable *fdt;
|
struct fdtable *fdt;
|
||||||
|
|
||||||
newf = kmem_cache_alloc(files_cachep, SLAB_KERNEL);
|
newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
|
||||||
if (!newf)
|
if (!newf)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue