2009-04-07 10:01:41 +08:00
|
|
|
/*
|
|
|
|
* ioctl.c - NILFS ioctl operations.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2007, 2008 Nippon Telegraph and Telephone Corporation.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
|
*
|
|
|
|
* Written by Koji Sato <koji@osrg.net>.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/wait.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 16:04:11 +08:00
|
|
|
#include <linux/slab.h>
|
2009-04-07 10:01:41 +08:00
|
|
|
#include <linux/capability.h> /* capable() */
|
|
|
|
#include <linux/uaccess.h> /* copy_from_user(), copy_to_user() */
|
2009-05-10 21:41:43 +08:00
|
|
|
#include <linux/vmalloc.h>
|
2011-02-03 20:26:17 +08:00
|
|
|
#include <linux/compat.h> /* compat_ptr() */
|
2011-11-24 00:57:51 +08:00
|
|
|
#include <linux/mount.h> /* mnt_want_write_file(), mnt_drop_write() */
|
2011-02-04 00:19:38 +08:00
|
|
|
#include <linux/buffer_head.h>
|
2009-04-07 10:01:41 +08:00
|
|
|
#include <linux/nilfs2_fs.h>
|
|
|
|
#include "nilfs.h"
|
|
|
|
#include "segment.h"
|
|
|
|
#include "bmap.h"
|
|
|
|
#include "cpfile.h"
|
|
|
|
#include "sufile.h"
|
|
|
|
#include "dat.h"
|
|
|
|
|
|
|
|
|
|
|
|
static int nilfs_ioctl_wrap_copy(struct the_nilfs *nilfs,
|
|
|
|
struct nilfs_argv *argv, int dir,
|
|
|
|
ssize_t (*dofunc)(struct the_nilfs *,
|
2009-04-07 10:01:47 +08:00
|
|
|
__u64 *, int,
|
2009-04-07 10:01:41 +08:00
|
|
|
void *, size_t, size_t))
|
|
|
|
{
|
|
|
|
void *buf;
|
2009-04-07 10:01:52 +08:00
|
|
|
void __user *base = (void __user *)(unsigned long)argv->v_base;
|
nilfs2: fix problems of memory allocation in ioctl
This is another patch for fixing the following problems of a memory
copy function in nilfs2 ioctl:
(1) It tries to allocate 128KB size of memory even for small objects.
(2) Though the function repeatedly tries large memory allocations
while reducing the size, GFP_NOWAIT flag is not specified.
This increases the possibility of system memory shortage.
(3) During the retries of (2), verbose warnings are printed
because _GFP_NOWARN flag is not used for the kmalloc calls.
The first patch was still doing large allocations by kmalloc which are
repeatedly tried while reducing the size.
Andi Kleen told me that using copy_from_user for large memory is not
good from the viewpoint of preempt latency:
On Fri, 12 Dec 2008 21:24:11 +0100, Andi Kleen <andi@firstfloor.org> wrote:
> > In the current interface, each data item is copied twice: one is to
> > the allocated memory from user space (via copy_from_user), and another
>
> For such large copies it is better to use multiple smaller (e.g. 4K)
> copy user, that gives better real time preempt latencies. Each cfu has a
> cond_resched(), but only one, not multiple times in the inner loop.
He also advised me that:
On Sun, 14 Dec 2008 16:13:27 +0100, Andi Kleen <andi@firstfloor.org> wrote:
> Better would be if you could go to PAGE_SIZE. order 0 allocations
> are typically the fastest / least likely to stall.
>
> Also in this case it's a good idea to use __get_free_pages()
> directly, kmalloc tends to be become less efficient at larger
> sizes.
For the function in question, the size of buffer memory can be reduced
since the buffer is repeatedly used for a number of small objects. On
the other hand, it may incur large preempt latencies for larger buffer
because a copy_from_user (and a copy_to_user) was applied only once
each cycle.
With that, this revision uses the order 0 allocations with
__get_free_pages() to fix the original problems.
Cc: Andi Kleen <andi@firstfloor.org>
Signed-off-by: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-04-07 10:01:43 +08:00
|
|
|
size_t maxmembs, total, n;
|
2009-04-07 10:01:41 +08:00
|
|
|
ssize_t nr;
|
|
|
|
int ret, i;
|
2009-04-07 10:01:47 +08:00
|
|
|
__u64 pos, ppos;
|
2009-04-07 10:01:41 +08:00
|
|
|
|
|
|
|
if (argv->v_nmembs == 0)
|
|
|
|
return 0;
|
|
|
|
|
nilfs2: fix problems of memory allocation in ioctl
This is another patch for fixing the following problems of a memory
copy function in nilfs2 ioctl:
(1) It tries to allocate 128KB size of memory even for small objects.
(2) Though the function repeatedly tries large memory allocations
while reducing the size, GFP_NOWAIT flag is not specified.
This increases the possibility of system memory shortage.
(3) During the retries of (2), verbose warnings are printed
because _GFP_NOWARN flag is not used for the kmalloc calls.
The first patch was still doing large allocations by kmalloc which are
repeatedly tried while reducing the size.
Andi Kleen told me that using copy_from_user for large memory is not
good from the viewpoint of preempt latency:
On Fri, 12 Dec 2008 21:24:11 +0100, Andi Kleen <andi@firstfloor.org> wrote:
> > In the current interface, each data item is copied twice: one is to
> > the allocated memory from user space (via copy_from_user), and another
>
> For such large copies it is better to use multiple smaller (e.g. 4K)
> copy user, that gives better real time preempt latencies. Each cfu has a
> cond_resched(), but only one, not multiple times in the inner loop.
He also advised me that:
On Sun, 14 Dec 2008 16:13:27 +0100, Andi Kleen <andi@firstfloor.org> wrote:
> Better would be if you could go to PAGE_SIZE. order 0 allocations
> are typically the fastest / least likely to stall.
>
> Also in this case it's a good idea to use __get_free_pages()
> directly, kmalloc tends to be become less efficient at larger
> sizes.
For the function in question, the size of buffer memory can be reduced
since the buffer is repeatedly used for a number of small objects. On
the other hand, it may incur large preempt latencies for larger buffer
because a copy_from_user (and a copy_to_user) was applied only once
each cycle.
With that, this revision uses the order 0 allocations with
__get_free_pages() to fix the original problems.
Cc: Andi Kleen <andi@firstfloor.org>
Signed-off-by: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-04-07 10:01:43 +08:00
|
|
|
if (argv->v_size > PAGE_SIZE)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
buf = (void *)__get_free_pages(GFP_NOFS, 0);
|
|
|
|
if (unlikely(!buf))
|
2009-04-07 10:01:41 +08:00
|
|
|
return -ENOMEM;
|
nilfs2: fix problems of memory allocation in ioctl
This is another patch for fixing the following problems of a memory
copy function in nilfs2 ioctl:
(1) It tries to allocate 128KB size of memory even for small objects.
(2) Though the function repeatedly tries large memory allocations
while reducing the size, GFP_NOWAIT flag is not specified.
This increases the possibility of system memory shortage.
(3) During the retries of (2), verbose warnings are printed
because _GFP_NOWARN flag is not used for the kmalloc calls.
The first patch was still doing large allocations by kmalloc which are
repeatedly tried while reducing the size.
Andi Kleen told me that using copy_from_user for large memory is not
good from the viewpoint of preempt latency:
On Fri, 12 Dec 2008 21:24:11 +0100, Andi Kleen <andi@firstfloor.org> wrote:
> > In the current interface, each data item is copied twice: one is to
> > the allocated memory from user space (via copy_from_user), and another
>
> For such large copies it is better to use multiple smaller (e.g. 4K)
> copy user, that gives better real time preempt latencies. Each cfu has a
> cond_resched(), but only one, not multiple times in the inner loop.
He also advised me that:
On Sun, 14 Dec 2008 16:13:27 +0100, Andi Kleen <andi@firstfloor.org> wrote:
> Better would be if you could go to PAGE_SIZE. order 0 allocations
> are typically the fastest / least likely to stall.
>
> Also in this case it's a good idea to use __get_free_pages()
> directly, kmalloc tends to be become less efficient at larger
> sizes.
For the function in question, the size of buffer memory can be reduced
since the buffer is repeatedly used for a number of small objects. On
the other hand, it may incur large preempt latencies for larger buffer
because a copy_from_user (and a copy_to_user) was applied only once
each cycle.
With that, this revision uses the order 0 allocations with
__get_free_pages() to fix the original problems.
Cc: Andi Kleen <andi@firstfloor.org>
Signed-off-by: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-04-07 10:01:43 +08:00
|
|
|
maxmembs = PAGE_SIZE / argv->v_size;
|
2009-04-07 10:01:41 +08:00
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
total = 0;
|
2009-04-07 10:01:47 +08:00
|
|
|
pos = argv->v_index;
|
2009-04-07 10:01:41 +08:00
|
|
|
for (i = 0; i < argv->v_nmembs; i += n) {
|
|
|
|
n = (argv->v_nmembs - i < maxmembs) ?
|
|
|
|
argv->v_nmembs - i : maxmembs;
|
|
|
|
if ((dir & _IOC_WRITE) &&
|
2009-04-07 10:01:52 +08:00
|
|
|
copy_from_user(buf, base + argv->v_size * i,
|
|
|
|
argv->v_size * n)) {
|
2009-04-07 10:01:41 +08:00
|
|
|
ret = -EFAULT;
|
|
|
|
break;
|
|
|
|
}
|
2009-04-07 10:01:47 +08:00
|
|
|
ppos = pos;
|
2009-04-07 10:01:49 +08:00
|
|
|
nr = dofunc(nilfs, &pos, argv->v_flags, buf, argv->v_size,
|
2009-04-07 10:01:47 +08:00
|
|
|
n);
|
2009-04-07 10:01:41 +08:00
|
|
|
if (nr < 0) {
|
|
|
|
ret = nr;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if ((dir & _IOC_READ) &&
|
2009-04-07 10:01:52 +08:00
|
|
|
copy_to_user(base + argv->v_size * i, buf,
|
|
|
|
argv->v_size * nr)) {
|
2009-04-07 10:01:41 +08:00
|
|
|
ret = -EFAULT;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
total += nr;
|
2009-04-07 10:01:47 +08:00
|
|
|
if ((size_t)nr < n)
|
|
|
|
break;
|
|
|
|
if (pos == ppos)
|
|
|
|
pos += n;
|
2009-04-07 10:01:41 +08:00
|
|
|
}
|
|
|
|
argv->v_nmembs = total;
|
|
|
|
|
nilfs2: fix problems of memory allocation in ioctl
This is another patch for fixing the following problems of a memory
copy function in nilfs2 ioctl:
(1) It tries to allocate 128KB size of memory even for small objects.
(2) Though the function repeatedly tries large memory allocations
while reducing the size, GFP_NOWAIT flag is not specified.
This increases the possibility of system memory shortage.
(3) During the retries of (2), verbose warnings are printed
because _GFP_NOWARN flag is not used for the kmalloc calls.
The first patch was still doing large allocations by kmalloc which are
repeatedly tried while reducing the size.
Andi Kleen told me that using copy_from_user for large memory is not
good from the viewpoint of preempt latency:
On Fri, 12 Dec 2008 21:24:11 +0100, Andi Kleen <andi@firstfloor.org> wrote:
> > In the current interface, each data item is copied twice: one is to
> > the allocated memory from user space (via copy_from_user), and another
>
> For such large copies it is better to use multiple smaller (e.g. 4K)
> copy user, that gives better real time preempt latencies. Each cfu has a
> cond_resched(), but only one, not multiple times in the inner loop.
He also advised me that:
On Sun, 14 Dec 2008 16:13:27 +0100, Andi Kleen <andi@firstfloor.org> wrote:
> Better would be if you could go to PAGE_SIZE. order 0 allocations
> are typically the fastest / least likely to stall.
>
> Also in this case it's a good idea to use __get_free_pages()
> directly, kmalloc tends to be become less efficient at larger
> sizes.
For the function in question, the size of buffer memory can be reduced
since the buffer is repeatedly used for a number of small objects. On
the other hand, it may incur large preempt latencies for larger buffer
because a copy_from_user (and a copy_to_user) was applied only once
each cycle.
With that, this revision uses the order 0 allocations with
__get_free_pages() to fix the original problems.
Cc: Andi Kleen <andi@firstfloor.org>
Signed-off-by: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-04-07 10:01:43 +08:00
|
|
|
free_pages((unsigned long)buf, 0);
|
2009-04-07 10:01:41 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-01-20 01:09:53 +08:00
|
|
|
static int nilfs_ioctl_getflags(struct inode *inode, void __user *argp)
|
|
|
|
{
|
|
|
|
unsigned int flags = NILFS_I(inode)->i_flags & FS_FL_USER_VISIBLE;
|
|
|
|
|
|
|
|
return put_user(flags, (int __user *)argp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nilfs_ioctl_setflags(struct inode *inode, struct file *filp,
|
|
|
|
void __user *argp)
|
|
|
|
{
|
|
|
|
struct nilfs_transaction_info ti;
|
|
|
|
unsigned int flags, oldflags;
|
|
|
|
int ret;
|
|
|
|
|
2011-03-24 07:43:26 +08:00
|
|
|
if (!inode_owner_or_capable(inode))
|
2011-01-20 01:09:53 +08:00
|
|
|
return -EACCES;
|
|
|
|
|
|
|
|
if (get_user(flags, (int __user *)argp))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2011-11-24 00:57:51 +08:00
|
|
|
ret = mnt_want_write_file(filp);
|
2011-01-20 01:09:53 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
flags = nilfs_mask_flags(inode->i_mode, flags);
|
|
|
|
|
|
|
|
mutex_lock(&inode->i_mutex);
|
|
|
|
|
|
|
|
oldflags = NILFS_I(inode)->i_flags;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The IMMUTABLE and APPEND_ONLY flags can only be changed by the
|
|
|
|
* relevant capability.
|
|
|
|
*/
|
|
|
|
ret = -EPERM;
|
|
|
|
if (((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) &&
|
|
|
|
!capable(CAP_LINUX_IMMUTABLE))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
ret = nilfs_transaction_begin(inode->i_sb, &ti, 0);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
NILFS_I(inode)->i_flags = (oldflags & ~FS_FL_USER_MODIFIABLE) |
|
|
|
|
(flags & FS_FL_USER_MODIFIABLE);
|
|
|
|
|
|
|
|
nilfs_set_inode_flags(inode);
|
|
|
|
inode->i_ctime = CURRENT_TIME;
|
|
|
|
if (IS_SYNC(inode))
|
|
|
|
nilfs_set_transaction_flag(NILFS_TI_SYNC);
|
|
|
|
|
|
|
|
nilfs_mark_inode_dirty(inode);
|
|
|
|
ret = nilfs_transaction_commit(inode->i_sb);
|
|
|
|
out:
|
|
|
|
mutex_unlock(&inode->i_mutex);
|
|
|
|
mnt_drop_write(filp->f_path.mnt);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nilfs_ioctl_getversion(struct inode *inode, void __user *argp)
|
|
|
|
{
|
|
|
|
return put_user(inode->i_generation, (int __user *)argp);
|
|
|
|
}
|
|
|
|
|
2009-04-07 10:01:41 +08:00
|
|
|
static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
|
|
|
|
unsigned int cmd, void __user *argp)
|
|
|
|
{
|
2011-03-09 10:05:08 +08:00
|
|
|
struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
|
2009-04-07 10:01:41 +08:00
|
|
|
struct nilfs_transaction_info ti;
|
|
|
|
struct nilfs_cpmode cpmode;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!capable(CAP_SYS_ADMIN))
|
|
|
|
return -EPERM;
|
2010-01-26 12:59:40 +08:00
|
|
|
|
2011-11-24 00:57:51 +08:00
|
|
|
ret = mnt_want_write_file(filp);
|
2010-01-26 12:59:40 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = -EFAULT;
|
2009-04-07 10:01:41 +08:00
|
|
|
if (copy_from_user(&cpmode, argp, sizeof(cpmode)))
|
2010-01-26 12:59:40 +08:00
|
|
|
goto out;
|
2009-04-07 10:01:41 +08:00
|
|
|
|
2010-09-09 01:07:56 +08:00
|
|
|
down_read(&inode->i_sb->s_umount);
|
2010-01-26 12:59:40 +08:00
|
|
|
|
2009-04-07 10:01:41 +08:00
|
|
|
nilfs_transaction_begin(inode->i_sb, &ti, 0);
|
|
|
|
ret = nilfs_cpfile_change_cpmode(
|
2011-03-09 10:05:08 +08:00
|
|
|
nilfs->ns_cpfile, cpmode.cm_cno, cpmode.cm_mode);
|
2010-01-26 12:59:40 +08:00
|
|
|
if (unlikely(ret < 0))
|
2009-04-07 10:01:45 +08:00
|
|
|
nilfs_transaction_abort(inode->i_sb);
|
2010-01-26 12:59:40 +08:00
|
|
|
else
|
|
|
|
nilfs_transaction_commit(inode->i_sb); /* never fails */
|
|
|
|
|
2010-09-09 01:07:56 +08:00
|
|
|
up_read(&inode->i_sb->s_umount);
|
2010-01-26 12:59:40 +08:00
|
|
|
out:
|
|
|
|
mnt_drop_write(filp->f_path.mnt);
|
2009-04-07 10:01:41 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nilfs_ioctl_delete_checkpoint(struct inode *inode, struct file *filp,
|
|
|
|
unsigned int cmd, void __user *argp)
|
|
|
|
{
|
2011-03-09 10:05:08 +08:00
|
|
|
struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
|
2009-04-07 10:01:41 +08:00
|
|
|
struct nilfs_transaction_info ti;
|
|
|
|
__u64 cno;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!capable(CAP_SYS_ADMIN))
|
|
|
|
return -EPERM;
|
2010-01-26 12:59:40 +08:00
|
|
|
|
2011-11-24 00:57:51 +08:00
|
|
|
ret = mnt_want_write_file(filp);
|
2010-01-26 12:59:40 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = -EFAULT;
|
2009-04-07 10:01:41 +08:00
|
|
|
if (copy_from_user(&cno, argp, sizeof(cno)))
|
2010-01-26 12:59:40 +08:00
|
|
|
goto out;
|
2009-04-07 10:01:41 +08:00
|
|
|
|
|
|
|
nilfs_transaction_begin(inode->i_sb, &ti, 0);
|
2011-03-09 10:05:08 +08:00
|
|
|
ret = nilfs_cpfile_delete_checkpoint(nilfs->ns_cpfile, cno);
|
2010-01-26 12:59:40 +08:00
|
|
|
if (unlikely(ret < 0))
|
2009-04-07 10:01:45 +08:00
|
|
|
nilfs_transaction_abort(inode->i_sb);
|
2010-01-26 12:59:40 +08:00
|
|
|
else
|
|
|
|
nilfs_transaction_commit(inode->i_sb); /* never fails */
|
|
|
|
out:
|
|
|
|
mnt_drop_write(filp->f_path.mnt);
|
2009-04-07 10:01:41 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
2009-04-07 10:01:47 +08:00
|
|
|
nilfs_ioctl_do_get_cpinfo(struct the_nilfs *nilfs, __u64 *posp, int flags,
|
2009-04-07 10:01:41 +08:00
|
|
|
void *buf, size_t size, size_t nmembs)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2009-04-07 10:01:45 +08:00
|
|
|
down_read(&nilfs->ns_segctor_sem);
|
2009-04-30 01:21:00 +08:00
|
|
|
ret = nilfs_cpfile_get_cpinfo(nilfs->ns_cpfile, posp, flags, buf,
|
2009-05-12 02:58:47 +08:00
|
|
|
size, nmembs);
|
2009-04-07 10:01:45 +08:00
|
|
|
up_read(&nilfs->ns_segctor_sem);
|
2009-04-07 10:01:41 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nilfs_ioctl_get_cpstat(struct inode *inode, struct file *filp,
|
|
|
|
unsigned int cmd, void __user *argp)
|
|
|
|
{
|
2011-03-09 10:05:08 +08:00
|
|
|
struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
|
2009-04-07 10:01:41 +08:00
|
|
|
struct nilfs_cpstat cpstat;
|
|
|
|
int ret;
|
|
|
|
|
2009-04-07 10:01:45 +08:00
|
|
|
down_read(&nilfs->ns_segctor_sem);
|
|
|
|
ret = nilfs_cpfile_get_stat(nilfs->ns_cpfile, &cpstat);
|
|
|
|
up_read(&nilfs->ns_segctor_sem);
|
2009-04-07 10:01:41 +08:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (copy_to_user(argp, &cpstat, sizeof(cpstat)))
|
|
|
|
ret = -EFAULT;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
2009-04-07 10:01:47 +08:00
|
|
|
nilfs_ioctl_do_get_suinfo(struct the_nilfs *nilfs, __u64 *posp, int flags,
|
2009-04-07 10:01:41 +08:00
|
|
|
void *buf, size_t size, size_t nmembs)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2009-04-07 10:01:45 +08:00
|
|
|
down_read(&nilfs->ns_segctor_sem);
|
2009-05-12 02:58:47 +08:00
|
|
|
ret = nilfs_sufile_get_suinfo(nilfs->ns_sufile, *posp, buf, size,
|
|
|
|
nmembs);
|
2009-04-07 10:01:45 +08:00
|
|
|
up_read(&nilfs->ns_segctor_sem);
|
2009-04-07 10:01:41 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nilfs_ioctl_get_sustat(struct inode *inode, struct file *filp,
|
|
|
|
unsigned int cmd, void __user *argp)
|
|
|
|
{
|
2011-03-09 10:05:08 +08:00
|
|
|
struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
|
2009-04-07 10:01:41 +08:00
|
|
|
struct nilfs_sustat sustat;
|
|
|
|
int ret;
|
|
|
|
|
2009-04-07 10:01:45 +08:00
|
|
|
down_read(&nilfs->ns_segctor_sem);
|
|
|
|
ret = nilfs_sufile_get_stat(nilfs->ns_sufile, &sustat);
|
|
|
|
up_read(&nilfs->ns_segctor_sem);
|
2009-04-07 10:01:41 +08:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (copy_to_user(argp, &sustat, sizeof(sustat)))
|
|
|
|
ret = -EFAULT;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
2009-04-07 10:01:47 +08:00
|
|
|
nilfs_ioctl_do_get_vinfo(struct the_nilfs *nilfs, __u64 *posp, int flags,
|
2009-04-07 10:01:41 +08:00
|
|
|
void *buf, size_t size, size_t nmembs)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2009-04-07 10:01:45 +08:00
|
|
|
down_read(&nilfs->ns_segctor_sem);
|
2010-12-26 23:07:30 +08:00
|
|
|
ret = nilfs_dat_get_vinfo(nilfs->ns_dat, buf, size, nmembs);
|
2009-04-07 10:01:45 +08:00
|
|
|
up_read(&nilfs->ns_segctor_sem);
|
2009-04-07 10:01:41 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
2009-04-07 10:01:47 +08:00
|
|
|
nilfs_ioctl_do_get_bdescs(struct the_nilfs *nilfs, __u64 *posp, int flags,
|
2009-04-07 10:01:41 +08:00
|
|
|
void *buf, size_t size, size_t nmembs)
|
|
|
|
{
|
2010-12-26 23:07:30 +08:00
|
|
|
struct nilfs_bmap *bmap = NILFS_I(nilfs->ns_dat)->i_bmap;
|
2009-04-07 10:01:41 +08:00
|
|
|
struct nilfs_bdesc *bdescs = buf;
|
|
|
|
int ret, i;
|
|
|
|
|
2009-04-30 01:21:00 +08:00
|
|
|
down_read(&nilfs->ns_segctor_sem);
|
2009-04-07 10:01:41 +08:00
|
|
|
for (i = 0; i < nmembs; i++) {
|
|
|
|
ret = nilfs_bmap_lookup_at_level(bmap,
|
|
|
|
bdescs[i].bd_offset,
|
|
|
|
bdescs[i].bd_level + 1,
|
|
|
|
&bdescs[i].bd_blocknr);
|
|
|
|
if (ret < 0) {
|
2009-04-30 01:21:00 +08:00
|
|
|
if (ret != -ENOENT) {
|
|
|
|
up_read(&nilfs->ns_segctor_sem);
|
2009-04-07 10:01:41 +08:00
|
|
|
return ret;
|
2009-04-30 01:21:00 +08:00
|
|
|
}
|
2009-04-07 10:01:41 +08:00
|
|
|
bdescs[i].bd_blocknr = 0;
|
|
|
|
}
|
|
|
|
}
|
2009-04-30 01:21:00 +08:00
|
|
|
up_read(&nilfs->ns_segctor_sem);
|
2009-04-07 10:01:41 +08:00
|
|
|
return nmembs;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nilfs_ioctl_get_bdescs(struct inode *inode, struct file *filp,
|
|
|
|
unsigned int cmd, void __user *argp)
|
|
|
|
{
|
2011-03-09 10:05:08 +08:00
|
|
|
struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
|
2009-04-07 10:01:41 +08:00
|
|
|
struct nilfs_argv argv;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (copy_from_user(&argv, argp, sizeof(argv)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2009-05-11 22:24:47 +08:00
|
|
|
if (argv.v_size != sizeof(struct nilfs_bdesc))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2009-04-07 10:01:41 +08:00
|
|
|
ret = nilfs_ioctl_wrap_copy(nilfs, &argv, _IOC_DIR(cmd),
|
|
|
|
nilfs_ioctl_do_get_bdescs);
|
2009-04-07 10:01:45 +08:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2009-04-07 10:01:41 +08:00
|
|
|
|
|
|
|
if (copy_to_user(argp, &argv, sizeof(argv)))
|
|
|
|
ret = -EFAULT;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nilfs_ioctl_move_inode_block(struct inode *inode,
|
|
|
|
struct nilfs_vdesc *vdesc,
|
|
|
|
struct list_head *buffers)
|
|
|
|
{
|
|
|
|
struct buffer_head *bh;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (vdesc->vd_flags == 0)
|
|
|
|
ret = nilfs_gccache_submit_read_data(
|
|
|
|
inode, vdesc->vd_offset, vdesc->vd_blocknr,
|
|
|
|
vdesc->vd_vblocknr, &bh);
|
|
|
|
else
|
|
|
|
ret = nilfs_gccache_submit_read_node(
|
|
|
|
inode, vdesc->vd_blocknr, vdesc->vd_vblocknr, &bh);
|
|
|
|
|
|
|
|
if (unlikely(ret < 0)) {
|
|
|
|
if (ret == -ENOENT)
|
|
|
|
printk(KERN_CRIT
|
|
|
|
"%s: invalid virtual block address (%s): "
|
|
|
|
"ino=%llu, cno=%llu, offset=%llu, "
|
|
|
|
"blocknr=%llu, vblocknr=%llu\n",
|
|
|
|
__func__, vdesc->vd_flags ? "node" : "data",
|
|
|
|
(unsigned long long)vdesc->vd_ino,
|
|
|
|
(unsigned long long)vdesc->vd_cno,
|
|
|
|
(unsigned long long)vdesc->vd_offset,
|
|
|
|
(unsigned long long)vdesc->vd_blocknr,
|
|
|
|
(unsigned long long)vdesc->vd_vblocknr);
|
|
|
|
return ret;
|
|
|
|
}
|
2009-11-07 17:45:16 +08:00
|
|
|
if (unlikely(!list_empty(&bh->b_assoc_buffers))) {
|
|
|
|
printk(KERN_CRIT "%s: conflicting %s buffer: ino=%llu, "
|
|
|
|
"cno=%llu, offset=%llu, blocknr=%llu, vblocknr=%llu\n",
|
|
|
|
__func__, vdesc->vd_flags ? "node" : "data",
|
|
|
|
(unsigned long long)vdesc->vd_ino,
|
|
|
|
(unsigned long long)vdesc->vd_cno,
|
|
|
|
(unsigned long long)vdesc->vd_offset,
|
|
|
|
(unsigned long long)vdesc->vd_blocknr,
|
|
|
|
(unsigned long long)vdesc->vd_vblocknr);
|
|
|
|
brelse(bh);
|
|
|
|
return -EEXIST;
|
|
|
|
}
|
2009-04-07 10:01:41 +08:00
|
|
|
list_add_tail(&bh->b_assoc_buffers, buffers);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-08-20 18:06:11 +08:00
|
|
|
static int nilfs_ioctl_move_blocks(struct super_block *sb,
|
2009-05-10 21:41:43 +08:00
|
|
|
struct nilfs_argv *argv, void *buf)
|
2009-04-07 10:01:41 +08:00
|
|
|
{
|
2009-05-10 21:41:43 +08:00
|
|
|
size_t nmembs = argv->v_nmembs;
|
2011-03-09 10:05:08 +08:00
|
|
|
struct the_nilfs *nilfs = sb->s_fs_info;
|
2009-04-07 10:01:41 +08:00
|
|
|
struct inode *inode;
|
|
|
|
struct nilfs_vdesc *vdesc;
|
|
|
|
struct buffer_head *bh, *n;
|
|
|
|
LIST_HEAD(buffers);
|
|
|
|
ino_t ino;
|
|
|
|
__u64 cno;
|
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
for (i = 0, vdesc = buf; i < nmembs; ) {
|
|
|
|
ino = vdesc->vd_ino;
|
|
|
|
cno = vdesc->vd_cno;
|
2010-08-20 18:06:11 +08:00
|
|
|
inode = nilfs_iget_for_gc(sb, ino, cno);
|
2010-11-23 14:26:02 +08:00
|
|
|
if (IS_ERR(inode)) {
|
|
|
|
ret = PTR_ERR(inode);
|
2009-04-07 10:01:41 +08:00
|
|
|
goto failed;
|
|
|
|
}
|
2010-12-16 08:57:57 +08:00
|
|
|
if (list_empty(&NILFS_I(inode)->i_dirty)) {
|
|
|
|
/*
|
|
|
|
* Add the inode to GC inode list. Garbage Collection
|
|
|
|
* is serialized and no two processes manipulate the
|
|
|
|
* list simultaneously.
|
|
|
|
*/
|
|
|
|
igrab(inode);
|
|
|
|
list_add(&NILFS_I(inode)->i_dirty,
|
|
|
|
&nilfs->ns_gc_inodes);
|
|
|
|
}
|
|
|
|
|
2009-04-07 10:01:41 +08:00
|
|
|
do {
|
|
|
|
ret = nilfs_ioctl_move_inode_block(inode, vdesc,
|
|
|
|
&buffers);
|
2010-08-20 18:06:11 +08:00
|
|
|
if (unlikely(ret < 0)) {
|
|
|
|
iput(inode);
|
2009-04-07 10:01:41 +08:00
|
|
|
goto failed;
|
2010-08-20 18:06:11 +08:00
|
|
|
}
|
2009-04-07 10:01:41 +08:00
|
|
|
vdesc++;
|
|
|
|
} while (++i < nmembs &&
|
|
|
|
vdesc->vd_ino == ino && vdesc->vd_cno == cno);
|
2010-08-20 18:06:11 +08:00
|
|
|
|
|
|
|
iput(inode); /* The inode still remains in GC inode list */
|
2009-04-07 10:01:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
list_for_each_entry_safe(bh, n, &buffers, b_assoc_buffers) {
|
|
|
|
ret = nilfs_gccache_wait_and_mark_dirty(bh);
|
|
|
|
if (unlikely(ret < 0)) {
|
2009-11-07 17:45:16 +08:00
|
|
|
WARN_ON(ret == -EEXIST);
|
2009-04-07 10:01:41 +08:00
|
|
|
goto failed;
|
|
|
|
}
|
|
|
|
list_del_init(&bh->b_assoc_buffers);
|
|
|
|
brelse(bh);
|
|
|
|
}
|
|
|
|
return nmembs;
|
|
|
|
|
|
|
|
failed:
|
|
|
|
list_for_each_entry_safe(bh, n, &buffers, b_assoc_buffers) {
|
|
|
|
list_del_init(&bh->b_assoc_buffers);
|
|
|
|
brelse(bh);
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-05-10 21:41:43 +08:00
|
|
|
static int nilfs_ioctl_delete_checkpoints(struct the_nilfs *nilfs,
|
|
|
|
struct nilfs_argv *argv, void *buf)
|
2009-04-07 10:01:41 +08:00
|
|
|
{
|
2009-05-10 21:41:43 +08:00
|
|
|
size_t nmembs = argv->v_nmembs;
|
2009-04-07 10:01:41 +08:00
|
|
|
struct inode *cpfile = nilfs->ns_cpfile;
|
|
|
|
struct nilfs_period *periods = buf;
|
|
|
|
int ret, i;
|
|
|
|
|
|
|
|
for (i = 0; i < nmembs; i++) {
|
|
|
|
ret = nilfs_cpfile_delete_checkpoints(
|
|
|
|
cpfile, periods[i].p_start, periods[i].p_end);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
return nmembs;
|
|
|
|
}
|
|
|
|
|
2009-05-10 21:41:43 +08:00
|
|
|
static int nilfs_ioctl_free_vblocknrs(struct the_nilfs *nilfs,
|
|
|
|
struct nilfs_argv *argv, void *buf)
|
2009-04-07 10:01:41 +08:00
|
|
|
{
|
2009-05-10 21:41:43 +08:00
|
|
|
size_t nmembs = argv->v_nmembs;
|
|
|
|
int ret;
|
2009-04-07 10:01:41 +08:00
|
|
|
|
2010-12-26 23:07:30 +08:00
|
|
|
ret = nilfs_dat_freev(nilfs->ns_dat, buf, nmembs);
|
2009-04-07 10:01:41 +08:00
|
|
|
|
|
|
|
return (ret < 0) ? ret : nmembs;
|
|
|
|
}
|
|
|
|
|
2009-05-10 21:41:43 +08:00
|
|
|
static int nilfs_ioctl_mark_blocks_dirty(struct the_nilfs *nilfs,
|
|
|
|
struct nilfs_argv *argv, void *buf)
|
2009-04-07 10:01:41 +08:00
|
|
|
{
|
2009-05-10 21:41:43 +08:00
|
|
|
size_t nmembs = argv->v_nmembs;
|
2010-12-26 23:07:30 +08:00
|
|
|
struct nilfs_bmap *bmap = NILFS_I(nilfs->ns_dat)->i_bmap;
|
2009-04-07 10:01:41 +08:00
|
|
|
struct nilfs_bdesc *bdescs = buf;
|
|
|
|
int ret, i;
|
|
|
|
|
|
|
|
for (i = 0; i < nmembs; i++) {
|
|
|
|
/* XXX: use macro or inline func to check liveness */
|
|
|
|
ret = nilfs_bmap_lookup_at_level(bmap,
|
|
|
|
bdescs[i].bd_offset,
|
|
|
|
bdescs[i].bd_level + 1,
|
|
|
|
&bdescs[i].bd_blocknr);
|
|
|
|
if (ret < 0) {
|
|
|
|
if (ret != -ENOENT)
|
|
|
|
return ret;
|
|
|
|
bdescs[i].bd_blocknr = 0;
|
|
|
|
}
|
|
|
|
if (bdescs[i].bd_blocknr != bdescs[i].bd_oblocknr)
|
|
|
|
/* skip dead block */
|
|
|
|
continue;
|
|
|
|
if (bdescs[i].bd_level == 0) {
|
2010-12-26 23:07:30 +08:00
|
|
|
ret = nilfs_mdt_mark_block_dirty(nilfs->ns_dat,
|
2009-04-07 10:01:41 +08:00
|
|
|
bdescs[i].bd_offset);
|
|
|
|
if (ret < 0) {
|
2009-04-07 10:01:55 +08:00
|
|
|
WARN_ON(ret == -ENOENT);
|
2009-04-07 10:01:41 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ret = nilfs_bmap_mark(bmap, bdescs[i].bd_offset,
|
|
|
|
bdescs[i].bd_level);
|
|
|
|
if (ret < 0) {
|
2009-04-07 10:01:55 +08:00
|
|
|
WARN_ON(ret == -ENOENT);
|
2009-04-07 10:01:41 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nmembs;
|
|
|
|
}
|
|
|
|
|
|
|
|
int nilfs_ioctl_prepare_clean_segments(struct the_nilfs *nilfs,
|
2009-05-10 21:41:43 +08:00
|
|
|
struct nilfs_argv *argv, void **kbufs)
|
2009-04-07 10:01:41 +08:00
|
|
|
{
|
2009-04-07 10:01:55 +08:00
|
|
|
const char *msg;
|
2009-05-10 21:41:43 +08:00
|
|
|
int ret;
|
2009-04-07 10:01:41 +08:00
|
|
|
|
2009-05-10 21:41:43 +08:00
|
|
|
ret = nilfs_ioctl_delete_checkpoints(nilfs, &argv[1], kbufs[1]);
|
2009-04-07 10:01:55 +08:00
|
|
|
if (ret < 0) {
|
|
|
|
/*
|
|
|
|
* can safely abort because checkpoints can be removed
|
|
|
|
* independently.
|
|
|
|
*/
|
|
|
|
msg = "cannot delete checkpoints";
|
|
|
|
goto failed;
|
|
|
|
}
|
2009-05-10 21:41:43 +08:00
|
|
|
ret = nilfs_ioctl_free_vblocknrs(nilfs, &argv[2], kbufs[2]);
|
2009-04-07 10:01:55 +08:00
|
|
|
if (ret < 0) {
|
|
|
|
/*
|
|
|
|
* can safely abort because DAT file is updated atomically
|
|
|
|
* using a copy-on-write technique.
|
|
|
|
*/
|
|
|
|
msg = "cannot delete virtual blocks from DAT file";
|
|
|
|
goto failed;
|
|
|
|
}
|
2009-05-10 21:41:43 +08:00
|
|
|
ret = nilfs_ioctl_mark_blocks_dirty(nilfs, &argv[3], kbufs[3]);
|
2009-04-07 10:01:55 +08:00
|
|
|
if (ret < 0) {
|
|
|
|
/*
|
|
|
|
* can safely abort because the operation is nondestructive.
|
|
|
|
*/
|
|
|
|
msg = "cannot mark copying blocks dirty";
|
|
|
|
goto failed;
|
|
|
|
}
|
2009-04-07 10:01:41 +08:00
|
|
|
return 0;
|
|
|
|
|
2009-04-07 10:01:55 +08:00
|
|
|
failed:
|
|
|
|
printk(KERN_ERR "NILFS: GC failed during preparation: %s: err=%d\n",
|
|
|
|
msg, ret);
|
2009-04-07 10:01:41 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
|
|
|
|
unsigned int cmd, void __user *argp)
|
|
|
|
{
|
2009-05-10 21:41:43 +08:00
|
|
|
struct nilfs_argv argv[5];
|
2009-12-23 20:57:47 +08:00
|
|
|
static const size_t argsz[5] = {
|
2009-05-10 21:41:43 +08:00
|
|
|
sizeof(struct nilfs_vdesc),
|
|
|
|
sizeof(struct nilfs_period),
|
|
|
|
sizeof(__u64),
|
|
|
|
sizeof(struct nilfs_bdesc),
|
|
|
|
sizeof(__u64),
|
|
|
|
};
|
|
|
|
void __user *base;
|
|
|
|
void *kbufs[5];
|
|
|
|
struct the_nilfs *nilfs;
|
|
|
|
size_t len, nsegs;
|
|
|
|
int n, ret;
|
|
|
|
|
2009-04-07 10:01:41 +08:00
|
|
|
if (!capable(CAP_SYS_ADMIN))
|
|
|
|
return -EPERM;
|
2009-05-10 21:41:43 +08:00
|
|
|
|
2011-11-24 00:57:51 +08:00
|
|
|
ret = mnt_want_write_file(filp);
|
2010-01-26 12:59:40 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = -EFAULT;
|
2009-05-10 21:41:43 +08:00
|
|
|
if (copy_from_user(argv, argp, sizeof(argv)))
|
2010-01-26 12:59:40 +08:00
|
|
|
goto out;
|
2009-05-10 21:41:43 +08:00
|
|
|
|
2010-01-26 12:59:40 +08:00
|
|
|
ret = -EINVAL;
|
2009-05-10 21:41:43 +08:00
|
|
|
nsegs = argv[4].v_nmembs;
|
|
|
|
if (argv[4].v_size != argsz[4])
|
2010-01-26 12:59:40 +08:00
|
|
|
goto out;
|
|
|
|
|
2009-05-10 21:41:43 +08:00
|
|
|
/*
|
|
|
|
* argv[4] points to segment numbers this ioctl cleans. We
|
|
|
|
* use kmalloc() for its buffer because memory used for the
|
|
|
|
* segment numbers is enough small.
|
|
|
|
*/
|
|
|
|
kbufs[4] = memdup_user((void __user *)(unsigned long)argv[4].v_base,
|
|
|
|
nsegs * sizeof(__u64));
|
2010-01-26 12:59:40 +08:00
|
|
|
if (IS_ERR(kbufs[4])) {
|
|
|
|
ret = PTR_ERR(kbufs[4]);
|
|
|
|
goto out;
|
|
|
|
}
|
2011-03-09 10:05:08 +08:00
|
|
|
nilfs = inode->i_sb->s_fs_info;
|
2009-05-10 21:41:43 +08:00
|
|
|
|
|
|
|
for (n = 0; n < 4; n++) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
if (argv[n].v_size != argsz[n])
|
|
|
|
goto out_free;
|
|
|
|
|
|
|
|
if (argv[n].v_nmembs > nsegs * nilfs->ns_blocks_per_segment)
|
|
|
|
goto out_free;
|
|
|
|
|
2011-12-20 09:11:56 +08:00
|
|
|
if (argv[n].v_nmembs >= UINT_MAX / argv[n].v_size)
|
|
|
|
goto out_free;
|
|
|
|
|
2009-05-10 21:41:43 +08:00
|
|
|
len = argv[n].v_size * argv[n].v_nmembs;
|
|
|
|
base = (void __user *)(unsigned long)argv[n].v_base;
|
|
|
|
if (len == 0) {
|
|
|
|
kbufs[n] = NULL;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
kbufs[n] = vmalloc(len);
|
|
|
|
if (!kbufs[n]) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out_free;
|
|
|
|
}
|
|
|
|
if (copy_from_user(kbufs[n], base, len)) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
vfree(kbufs[n]);
|
|
|
|
goto out_free;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-09-03 21:24:17 +08:00
|
|
|
/*
|
2010-08-20 18:06:11 +08:00
|
|
|
* nilfs_ioctl_move_blocks() will call nilfs_iget_for_gc(),
|
2009-09-03 21:24:17 +08:00
|
|
|
* which will operates an inode list without blocking.
|
|
|
|
* To protect the list from concurrent operations,
|
|
|
|
* nilfs_ioctl_move_blocks should be atomic operation.
|
|
|
|
*/
|
|
|
|
if (test_and_set_bit(THE_NILFS_GC_RUNNING, &nilfs->ns_flags)) {
|
|
|
|
ret = -EBUSY;
|
|
|
|
goto out_free;
|
|
|
|
}
|
|
|
|
|
2010-09-20 17:19:06 +08:00
|
|
|
vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
|
|
|
|
|
2010-08-20 18:06:11 +08:00
|
|
|
ret = nilfs_ioctl_move_blocks(inode->i_sb, &argv[0], kbufs[0]);
|
2009-09-03 21:24:17 +08:00
|
|
|
if (ret < 0)
|
|
|
|
printk(KERN_ERR "NILFS: GC failed during preparation: "
|
|
|
|
"cannot read source blocks: err=%d\n", ret);
|
|
|
|
else
|
|
|
|
ret = nilfs_clean_segments(inode->i_sb, argv, kbufs);
|
|
|
|
|
2010-08-20 18:06:11 +08:00
|
|
|
nilfs_remove_all_gcinodes(nilfs);
|
2009-09-03 21:24:17 +08:00
|
|
|
clear_nilfs_gc_running(nilfs);
|
2009-05-10 21:41:43 +08:00
|
|
|
|
2010-01-26 12:59:40 +08:00
|
|
|
out_free:
|
2009-05-22 19:36:21 +08:00
|
|
|
while (--n >= 0)
|
2009-05-10 21:41:43 +08:00
|
|
|
vfree(kbufs[n]);
|
|
|
|
kfree(kbufs[4]);
|
2010-01-26 12:59:40 +08:00
|
|
|
out:
|
|
|
|
mnt_drop_write(filp->f_path.mnt);
|
2009-05-10 21:41:43 +08:00
|
|
|
return ret;
|
2009-04-07 10:01:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int nilfs_ioctl_sync(struct inode *inode, struct file *filp,
|
|
|
|
unsigned int cmd, void __user *argp)
|
|
|
|
{
|
|
|
|
__u64 cno;
|
|
|
|
int ret;
|
2010-02-20 18:47:49 +08:00
|
|
|
struct the_nilfs *nilfs;
|
2009-04-07 10:01:41 +08:00
|
|
|
|
|
|
|
ret = nilfs_construct_segment(inode->i_sb);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (argp != NULL) {
|
2011-03-09 10:05:08 +08:00
|
|
|
nilfs = inode->i_sb->s_fs_info;
|
2010-02-20 18:47:49 +08:00
|
|
|
down_read(&nilfs->ns_segctor_sem);
|
|
|
|
cno = nilfs->ns_cno - 1;
|
|
|
|
up_read(&nilfs->ns_segctor_sem);
|
2009-04-07 10:01:41 +08:00
|
|
|
if (copy_to_user(argp, &cno, sizeof(cno)))
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-05-05 00:23:58 +08:00
|
|
|
static int nilfs_ioctl_resize(struct inode *inode, struct file *filp,
|
|
|
|
void __user *argp)
|
|
|
|
{
|
|
|
|
__u64 newsize;
|
|
|
|
int ret = -EPERM;
|
|
|
|
|
|
|
|
if (!capable(CAP_SYS_ADMIN))
|
|
|
|
goto out;
|
|
|
|
|
2011-11-24 00:57:51 +08:00
|
|
|
ret = mnt_want_write_file(filp);
|
2011-05-05 00:23:58 +08:00
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
ret = -EFAULT;
|
|
|
|
if (copy_from_user(&newsize, argp, sizeof(newsize)))
|
|
|
|
goto out_drop_write;
|
|
|
|
|
|
|
|
ret = nilfs_resize_fs(inode->i_sb, newsize);
|
|
|
|
|
|
|
|
out_drop_write:
|
|
|
|
mnt_drop_write(filp->f_path.mnt);
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-05-05 00:23:57 +08:00
|
|
|
static int nilfs_ioctl_set_alloc_range(struct inode *inode, void __user *argp)
|
|
|
|
{
|
|
|
|
struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
|
|
|
|
__u64 range[2];
|
|
|
|
__u64 minseg, maxseg;
|
|
|
|
unsigned long segbytes;
|
|
|
|
int ret = -EPERM;
|
|
|
|
|
|
|
|
if (!capable(CAP_SYS_ADMIN))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
ret = -EFAULT;
|
|
|
|
if (copy_from_user(range, argp, sizeof(__u64[2])))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
ret = -ERANGE;
|
|
|
|
if (range[1] > i_size_read(inode->i_sb->s_bdev->bd_inode))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
segbytes = nilfs->ns_blocks_per_segment * nilfs->ns_blocksize;
|
|
|
|
|
|
|
|
minseg = range[0] + segbytes - 1;
|
|
|
|
do_div(minseg, segbytes);
|
|
|
|
maxseg = NILFS_SB2_OFFSET_BYTES(range[1]);
|
|
|
|
do_div(maxseg, segbytes);
|
|
|
|
maxseg--;
|
|
|
|
|
|
|
|
ret = nilfs_sufile_set_alloc_range(nilfs->ns_sufile, minseg, maxseg);
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-04-30 01:21:00 +08:00
|
|
|
static int nilfs_ioctl_get_info(struct inode *inode, struct file *filp,
|
|
|
|
unsigned int cmd, void __user *argp,
|
2009-05-11 22:24:47 +08:00
|
|
|
size_t membsz,
|
2009-04-30 01:21:00 +08:00
|
|
|
ssize_t (*dofunc)(struct the_nilfs *,
|
|
|
|
__u64 *, int,
|
|
|
|
void *, size_t, size_t))
|
|
|
|
|
|
|
|
{
|
2011-03-09 10:05:08 +08:00
|
|
|
struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
|
2009-04-30 01:21:00 +08:00
|
|
|
struct nilfs_argv argv;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (copy_from_user(&argv, argp, sizeof(argv)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2009-05-12 02:58:47 +08:00
|
|
|
if (argv.v_size < membsz)
|
2009-05-11 22:24:47 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2009-04-30 01:21:00 +08:00
|
|
|
ret = nilfs_ioctl_wrap_copy(nilfs, &argv, _IOC_DIR(cmd), dofunc);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (copy_to_user(argp, &argv, sizeof(argv)))
|
|
|
|
ret = -EFAULT;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-04-07 10:01:53 +08:00
|
|
|
long nilfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
2009-04-07 10:01:41 +08:00
|
|
|
{
|
2009-04-07 10:01:53 +08:00
|
|
|
struct inode *inode = filp->f_dentry->d_inode;
|
2010-03-31 15:41:00 +08:00
|
|
|
void __user *argp = (void __user *)arg;
|
2009-04-07 10:01:41 +08:00
|
|
|
|
|
|
|
switch (cmd) {
|
2011-01-20 01:09:53 +08:00
|
|
|
case FS_IOC_GETFLAGS:
|
|
|
|
return nilfs_ioctl_getflags(inode, argp);
|
|
|
|
case FS_IOC_SETFLAGS:
|
|
|
|
return nilfs_ioctl_setflags(inode, filp, argp);
|
|
|
|
case FS_IOC_GETVERSION:
|
|
|
|
return nilfs_ioctl_getversion(inode, argp);
|
2009-04-07 10:01:41 +08:00
|
|
|
case NILFS_IOCTL_CHANGE_CPMODE:
|
|
|
|
return nilfs_ioctl_change_cpmode(inode, filp, cmd, argp);
|
|
|
|
case NILFS_IOCTL_DELETE_CHECKPOINT:
|
|
|
|
return nilfs_ioctl_delete_checkpoint(inode, filp, cmd, argp);
|
|
|
|
case NILFS_IOCTL_GET_CPINFO:
|
2009-04-30 01:21:00 +08:00
|
|
|
return nilfs_ioctl_get_info(inode, filp, cmd, argp,
|
2009-05-11 22:24:47 +08:00
|
|
|
sizeof(struct nilfs_cpinfo),
|
2009-04-30 01:21:00 +08:00
|
|
|
nilfs_ioctl_do_get_cpinfo);
|
2009-04-07 10:01:41 +08:00
|
|
|
case NILFS_IOCTL_GET_CPSTAT:
|
|
|
|
return nilfs_ioctl_get_cpstat(inode, filp, cmd, argp);
|
|
|
|
case NILFS_IOCTL_GET_SUINFO:
|
2009-04-30 01:21:00 +08:00
|
|
|
return nilfs_ioctl_get_info(inode, filp, cmd, argp,
|
2009-05-11 22:24:47 +08:00
|
|
|
sizeof(struct nilfs_suinfo),
|
2009-04-30 01:21:00 +08:00
|
|
|
nilfs_ioctl_do_get_suinfo);
|
2009-04-07 10:01:41 +08:00
|
|
|
case NILFS_IOCTL_GET_SUSTAT:
|
|
|
|
return nilfs_ioctl_get_sustat(inode, filp, cmd, argp);
|
|
|
|
case NILFS_IOCTL_GET_VINFO:
|
2009-04-30 01:21:00 +08:00
|
|
|
return nilfs_ioctl_get_info(inode, filp, cmd, argp,
|
2009-05-11 22:24:47 +08:00
|
|
|
sizeof(struct nilfs_vinfo),
|
2009-04-30 01:21:00 +08:00
|
|
|
nilfs_ioctl_do_get_vinfo);
|
2009-04-07 10:01:41 +08:00
|
|
|
case NILFS_IOCTL_GET_BDESCS:
|
|
|
|
return nilfs_ioctl_get_bdescs(inode, filp, cmd, argp);
|
|
|
|
case NILFS_IOCTL_CLEAN_SEGMENTS:
|
|
|
|
return nilfs_ioctl_clean_segments(inode, filp, cmd, argp);
|
|
|
|
case NILFS_IOCTL_SYNC:
|
|
|
|
return nilfs_ioctl_sync(inode, filp, cmd, argp);
|
2011-05-05 00:23:58 +08:00
|
|
|
case NILFS_IOCTL_RESIZE:
|
|
|
|
return nilfs_ioctl_resize(inode, filp, argp);
|
2011-05-05 00:23:57 +08:00
|
|
|
case NILFS_IOCTL_SET_ALLOC_RANGE:
|
|
|
|
return nilfs_ioctl_set_alloc_range(inode, argp);
|
2009-04-07 10:01:41 +08:00
|
|
|
default:
|
|
|
|
return -ENOTTY;
|
|
|
|
}
|
|
|
|
}
|
2011-02-03 20:26:17 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
long nilfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|
|
|
{
|
|
|
|
switch (cmd) {
|
|
|
|
case FS_IOC32_GETFLAGS:
|
|
|
|
cmd = FS_IOC_GETFLAGS;
|
|
|
|
break;
|
|
|
|
case FS_IOC32_SETFLAGS:
|
|
|
|
cmd = FS_IOC_SETFLAGS;
|
|
|
|
break;
|
|
|
|
case FS_IOC32_GETVERSION:
|
|
|
|
cmd = FS_IOC_GETVERSION;
|
|
|
|
break;
|
2011-12-20 09:11:55 +08:00
|
|
|
case NILFS_IOCTL_CHANGE_CPMODE:
|
|
|
|
case NILFS_IOCTL_DELETE_CHECKPOINT:
|
|
|
|
case NILFS_IOCTL_GET_CPINFO:
|
|
|
|
case NILFS_IOCTL_GET_CPSTAT:
|
|
|
|
case NILFS_IOCTL_GET_SUINFO:
|
|
|
|
case NILFS_IOCTL_GET_SUSTAT:
|
|
|
|
case NILFS_IOCTL_GET_VINFO:
|
|
|
|
case NILFS_IOCTL_GET_BDESCS:
|
|
|
|
case NILFS_IOCTL_CLEAN_SEGMENTS:
|
|
|
|
case NILFS_IOCTL_SYNC:
|
|
|
|
case NILFS_IOCTL_RESIZE:
|
|
|
|
case NILFS_IOCTL_SET_ALLOC_RANGE:
|
|
|
|
break;
|
2011-02-03 20:26:17 +08:00
|
|
|
default:
|
|
|
|
return -ENOIOCTLCMD;
|
|
|
|
}
|
|
|
|
return nilfs_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
|
|
|
|
}
|
|
|
|
#endif
|