2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Quota code necessary even when VFS quota support is not compiled
|
|
|
|
* into the kernel. The interesting stuff is over in dquot.c, here
|
|
|
|
* we have symbols for initial quotactl(2) handling, the sysctl(2)
|
|
|
|
* variables, etc - things needed even when quota support disabled.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/namei.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <asm/current.h>
|
2012-05-28 23:40:17 +08:00
|
|
|
#include <linux/uaccess.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/security.h>
|
|
|
|
#include <linux/syscalls.h>
|
2006-01-12 04:17:46 +08:00
|
|
|
#include <linux/capability.h>
|
2005-11-07 16:59:35 +08:00
|
|
|
#include <linux/quotaops.h>
|
2007-07-16 14:41:12 +08:00
|
|
|
#include <linux/types.h>
|
2010-02-16 16:44:51 +08:00
|
|
|
#include <linux/writeback.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-02-16 16:44:50 +08:00
|
|
|
static int check_quotactl_permission(struct super_block *sb, int type, int cmd,
|
|
|
|
qid_t id)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2010-02-16 16:44:50 +08:00
|
|
|
switch (cmd) {
|
|
|
|
/* these commands do not require any special privilegues */
|
|
|
|
case Q_GETFMT:
|
|
|
|
case Q_SYNC:
|
|
|
|
case Q_GETINFO:
|
|
|
|
case Q_XGETQSTAT:
|
2013-08-07 06:27:07 +08:00
|
|
|
case Q_XGETQSTATV:
|
2010-02-16 16:44:50 +08:00
|
|
|
case Q_XQUOTASYNC:
|
|
|
|
break;
|
|
|
|
/* allow to query information for dquots we "own" */
|
|
|
|
case Q_GETQUOTA:
|
|
|
|
case Q_XGETQUOTA:
|
2012-09-16 17:07:49 +08:00
|
|
|
if ((type == USRQUOTA && uid_eq(current_euid(), make_kuid(current_user_ns(), id))) ||
|
|
|
|
(type == GRPQUOTA && in_egroup_p(make_kgid(current_user_ns(), id))))
|
2010-02-16 16:44:50 +08:00
|
|
|
break;
|
|
|
|
/*FALLTHROUGH*/
|
|
|
|
default:
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!capable(CAP_SYS_ADMIN))
|
|
|
|
return -EPERM;
|
|
|
|
}
|
|
|
|
|
2010-02-16 16:44:50 +08:00
|
|
|
return security_quotactl(cmd, type, id, sb);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2010-03-23 18:06:58 +08:00
|
|
|
static void quota_sync_one(struct super_block *sb, void *arg)
|
|
|
|
{
|
2014-09-30 16:43:09 +08:00
|
|
|
int type = *(int *)arg;
|
|
|
|
|
|
|
|
if (sb->s_qcop && sb->s_qcop->quota_sync &&
|
|
|
|
(sb->s_quota_types & (1 << type)))
|
|
|
|
sb->s_qcop->quota_sync(sb, type);
|
2010-03-23 18:06:58 +08:00
|
|
|
}
|
|
|
|
|
2010-02-16 16:44:49 +08:00
|
|
|
static int quota_sync_all(int type)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2010-02-16 16:44:49 +08:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (type >= MAXQUOTAS)
|
|
|
|
return -EINVAL;
|
|
|
|
ret = security_quotactl(Q_SYNC, type, 0, NULL);
|
2010-03-23 18:06:58 +08:00
|
|
|
if (!ret)
|
|
|
|
iterate_supers(quota_sync_one, &type);
|
|
|
|
return ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2014-10-08 22:07:12 +08:00
|
|
|
unsigned int qtype_enforce_flag(int type)
|
|
|
|
{
|
|
|
|
switch (type) {
|
|
|
|
case USRQUOTA:
|
|
|
|
return FS_QUOTA_UDQ_ENFD;
|
|
|
|
case GRPQUOTA:
|
|
|
|
return FS_QUOTA_GDQ_ENFD;
|
|
|
|
case PRJQUOTA:
|
|
|
|
return FS_QUOTA_PDQ_ENFD;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-02-08 08:21:24 +08:00
|
|
|
static int quota_quotaon(struct super_block *sb, int type, qid_t id,
|
2010-09-15 23:38:58 +08:00
|
|
|
struct path *path)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2014-10-09 00:35:31 +08:00
|
|
|
if (!sb->s_qcop->quota_on && !sb->s_qcop->quota_enable)
|
2010-09-15 23:38:58 +08:00
|
|
|
return -ENOSYS;
|
2014-10-08 22:07:12 +08:00
|
|
|
if (sb->s_qcop->quota_enable)
|
|
|
|
return sb->s_qcop->quota_enable(sb, qtype_enforce_flag(type));
|
2010-09-15 23:38:58 +08:00
|
|
|
if (IS_ERR(path))
|
|
|
|
return PTR_ERR(path);
|
|
|
|
return sb->s_qcop->quota_on(sb, type, id, path);
|
2010-02-16 16:44:47 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-10-08 22:07:12 +08:00
|
|
|
static int quota_quotaoff(struct super_block *sb, int type)
|
|
|
|
{
|
|
|
|
if (!sb->s_qcop->quota_off && !sb->s_qcop->quota_disable)
|
|
|
|
return -ENOSYS;
|
|
|
|
if (sb->s_qcop->quota_disable)
|
|
|
|
return sb->s_qcop->quota_disable(sb, qtype_enforce_flag(type));
|
|
|
|
return sb->s_qcop->quota_off(sb, type);
|
|
|
|
}
|
|
|
|
|
2010-02-16 16:44:47 +08:00
|
|
|
static int quota_getfmt(struct super_block *sb, int type, void __user *addr)
|
|
|
|
{
|
|
|
|
__u32 fmt;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-06-04 12:19:12 +08:00
|
|
|
mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
|
2010-02-16 16:44:47 +08:00
|
|
|
if (!sb_has_quota_active(sb, type)) {
|
2014-06-04 12:19:12 +08:00
|
|
|
mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
|
2010-02-16 16:44:47 +08:00
|
|
|
return -ESRCH;
|
|
|
|
}
|
|
|
|
fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id;
|
2014-06-04 12:19:12 +08:00
|
|
|
mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
|
2010-02-16 16:44:47 +08:00
|
|
|
if (copy_to_user(addr, &fmt, sizeof(fmt)))
|
|
|
|
return -EFAULT;
|
|
|
|
return 0;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-02-16 16:44:47 +08:00
|
|
|
static int quota_getinfo(struct super_block *sb, int type, void __user *addr)
|
|
|
|
{
|
2014-11-19 07:42:09 +08:00
|
|
|
struct qc_state state;
|
|
|
|
struct qc_type_state *tstate;
|
|
|
|
struct if_dqinfo uinfo;
|
2010-02-16 16:44:47 +08:00
|
|
|
int ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-11-19 07:42:09 +08:00
|
|
|
/* This checks whether qc_state has enough entries... */
|
|
|
|
BUILD_BUG_ON(MAXQUOTAS > XQM_MAXQUOTAS);
|
|
|
|
if (!sb->s_qcop->get_state)
|
2010-02-16 16:44:48 +08:00
|
|
|
return -ENOSYS;
|
2014-11-19 07:42:09 +08:00
|
|
|
ret = sb->s_qcop->get_state(sb, &state);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
tstate = state.s_state + type;
|
|
|
|
if (!(tstate->flags & QCI_ACCT_ENABLED))
|
|
|
|
return -ESRCH;
|
|
|
|
memset(&uinfo, 0, sizeof(uinfo));
|
|
|
|
uinfo.dqi_bgrace = tstate->spc_timelimit;
|
|
|
|
uinfo.dqi_igrace = tstate->ino_timelimit;
|
|
|
|
if (tstate->flags & QCI_SYSFILE)
|
|
|
|
uinfo.dqi_flags |= DQF_SYS_FILE;
|
|
|
|
if (tstate->flags & QCI_ROOT_SQUASH)
|
|
|
|
uinfo.dqi_flags |= DQF_ROOT_SQUASH;
|
|
|
|
uinfo.dqi_valid = IIF_ALL;
|
2015-08-11 05:29:55 +08:00
|
|
|
if (copy_to_user(addr, &uinfo, sizeof(uinfo)))
|
2010-02-16 16:44:47 +08:00
|
|
|
return -EFAULT;
|
2015-08-11 05:29:55 +08:00
|
|
|
return 0;
|
2010-02-16 16:44:47 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-02-16 16:44:47 +08:00
|
|
|
static int quota_setinfo(struct super_block *sb, int type, void __user *addr)
|
|
|
|
{
|
|
|
|
struct if_dqinfo info;
|
2014-12-16 19:03:51 +08:00
|
|
|
struct qc_info qinfo;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-02-16 16:44:47 +08:00
|
|
|
if (copy_from_user(&info, addr, sizeof(info)))
|
|
|
|
return -EFAULT;
|
2010-02-16 16:44:48 +08:00
|
|
|
if (!sb->s_qcop->set_info)
|
|
|
|
return -ENOSYS;
|
2014-12-16 19:03:51 +08:00
|
|
|
if (info.dqi_valid & ~(IIF_FLAGS | IIF_BGRACE | IIF_IGRACE))
|
|
|
|
return -EINVAL;
|
|
|
|
memset(&qinfo, 0, sizeof(qinfo));
|
|
|
|
if (info.dqi_valid & IIF_FLAGS) {
|
|
|
|
if (info.dqi_flags & ~DQF_SETINFO_MASK)
|
|
|
|
return -EINVAL;
|
|
|
|
if (info.dqi_flags & DQF_ROOT_SQUASH)
|
|
|
|
qinfo.i_flags |= QCI_ROOT_SQUASH;
|
|
|
|
qinfo.i_fieldmask |= QC_FLAGS;
|
|
|
|
}
|
|
|
|
if (info.dqi_valid & IIF_BGRACE) {
|
|
|
|
qinfo.i_spc_timelimit = info.dqi_bgrace;
|
|
|
|
qinfo.i_fieldmask |= QC_SPC_TIMER;
|
|
|
|
}
|
|
|
|
if (info.dqi_valid & IIF_IGRACE) {
|
|
|
|
qinfo.i_ino_timelimit = info.dqi_igrace;
|
|
|
|
qinfo.i_fieldmask |= QC_INO_TIMER;
|
|
|
|
}
|
|
|
|
return sb->s_qcop->set_info(sb, type, &qinfo);
|
2010-02-16 16:44:47 +08:00
|
|
|
}
|
|
|
|
|
2014-10-09 22:03:13 +08:00
|
|
|
static inline qsize_t qbtos(qsize_t blocks)
|
|
|
|
{
|
|
|
|
return blocks << QIF_DQBLKSIZE_BITS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline qsize_t stoqb(qsize_t space)
|
|
|
|
{
|
|
|
|
return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void copy_to_if_dqblk(struct if_dqblk *dst, struct qc_dqblk *src)
|
2010-05-07 05:04:58 +08:00
|
|
|
{
|
2013-11-01 18:21:54 +08:00
|
|
|
memset(dst, 0, sizeof(*dst));
|
2014-10-09 22:03:13 +08:00
|
|
|
dst->dqb_bhardlimit = stoqb(src->d_spc_hardlimit);
|
|
|
|
dst->dqb_bsoftlimit = stoqb(src->d_spc_softlimit);
|
|
|
|
dst->dqb_curspace = src->d_space;
|
2010-05-07 05:04:58 +08:00
|
|
|
dst->dqb_ihardlimit = src->d_ino_hardlimit;
|
|
|
|
dst->dqb_isoftlimit = src->d_ino_softlimit;
|
2014-10-09 22:03:13 +08:00
|
|
|
dst->dqb_curinodes = src->d_ino_count;
|
|
|
|
dst->dqb_btime = src->d_spc_timer;
|
|
|
|
dst->dqb_itime = src->d_ino_timer;
|
2010-05-07 05:04:58 +08:00
|
|
|
dst->dqb_valid = QIF_ALL;
|
|
|
|
}
|
|
|
|
|
2010-02-16 16:44:47 +08:00
|
|
|
static int quota_getquota(struct super_block *sb, int type, qid_t id,
|
|
|
|
void __user *addr)
|
|
|
|
{
|
2012-09-16 17:07:49 +08:00
|
|
|
struct kqid qid;
|
2014-10-09 22:03:13 +08:00
|
|
|
struct qc_dqblk fdq;
|
2010-02-16 16:44:47 +08:00
|
|
|
struct if_dqblk idq;
|
|
|
|
int ret;
|
|
|
|
|
2010-02-16 16:44:48 +08:00
|
|
|
if (!sb->s_qcop->get_dqblk)
|
|
|
|
return -ENOSYS;
|
2012-09-16 17:07:49 +08:00
|
|
|
qid = make_kqid(current_user_ns(), type, id);
|
|
|
|
if (!qid_valid(qid))
|
|
|
|
return -EINVAL;
|
|
|
|
ret = sb->s_qcop->get_dqblk(sb, qid, &fdq);
|
2010-02-16 16:44:47 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2010-05-07 05:04:58 +08:00
|
|
|
copy_to_if_dqblk(&idq, &fdq);
|
2010-02-16 16:44:47 +08:00
|
|
|
if (copy_to_user(addr, &idq, sizeof(idq)))
|
|
|
|
return -EFAULT;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
quota: add new quotactl Q_GETNEXTQUOTA
Q_GETNEXTQUOTA is exactly like Q_GETQUOTA, except that it
will return quota information for the id equal to or greater
than the id requested. In other words, if the requested id has
no quota, the command will return quota information for the
next higher id which does have a quota set. If no higher id
has an active quota, -ESRCH is returned.
This allows filesystems to do efficient iteration in kernelspace,
much like extN filesystems do in userspace when asked to report
all active quotas.
This does require a new data structure for userspace, as the
current structure does not include an ID for the returned quota
information.
Today, Ext4 with a hidden quota inode requires getpwent-style
iterations, and for systems which have i.e. LDAP backends,
this can be very slow, or even impossible if iteration is not
allowed in the configuration.
Signed-off-by: Eric Sandeen <sandeen@redhat.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2016-02-08 08:22:21 +08:00
|
|
|
/*
|
|
|
|
* Return quota for next active quota >= this id, if any exists,
|
|
|
|
* otherwise return -ESRCH via ->get_nextdqblk
|
|
|
|
*/
|
|
|
|
static int quota_getnextquota(struct super_block *sb, int type, qid_t id,
|
|
|
|
void __user *addr)
|
|
|
|
{
|
|
|
|
struct kqid qid;
|
|
|
|
struct qc_dqblk fdq;
|
|
|
|
struct if_nextdqblk idq;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!sb->s_qcop->get_nextdqblk)
|
|
|
|
return -ENOSYS;
|
|
|
|
qid = make_kqid(current_user_ns(), type, id);
|
|
|
|
if (!qid_valid(qid))
|
|
|
|
return -EINVAL;
|
|
|
|
ret = sb->s_qcop->get_nextdqblk(sb, &qid, &fdq);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
/* struct if_nextdqblk is a superset of struct if_dqblk */
|
|
|
|
copy_to_if_dqblk((struct if_dqblk *)&idq, &fdq);
|
|
|
|
idq.dqb_id = from_kqid(current_user_ns(), qid);
|
|
|
|
if (copy_to_user(addr, &idq, sizeof(idq)))
|
|
|
|
return -EFAULT;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-10-09 22:03:13 +08:00
|
|
|
static void copy_from_if_dqblk(struct qc_dqblk *dst, struct if_dqblk *src)
|
2010-05-07 05:05:17 +08:00
|
|
|
{
|
2014-10-09 22:03:13 +08:00
|
|
|
dst->d_spc_hardlimit = qbtos(src->dqb_bhardlimit);
|
|
|
|
dst->d_spc_softlimit = qbtos(src->dqb_bsoftlimit);
|
|
|
|
dst->d_space = src->dqb_curspace;
|
2010-05-07 05:05:17 +08:00
|
|
|
dst->d_ino_hardlimit = src->dqb_ihardlimit;
|
|
|
|
dst->d_ino_softlimit = src->dqb_isoftlimit;
|
2014-10-09 22:03:13 +08:00
|
|
|
dst->d_ino_count = src->dqb_curinodes;
|
|
|
|
dst->d_spc_timer = src->dqb_btime;
|
|
|
|
dst->d_ino_timer = src->dqb_itime;
|
2010-05-07 05:05:17 +08:00
|
|
|
|
|
|
|
dst->d_fieldmask = 0;
|
|
|
|
if (src->dqb_valid & QIF_BLIMITS)
|
2014-10-09 22:03:13 +08:00
|
|
|
dst->d_fieldmask |= QC_SPC_SOFT | QC_SPC_HARD;
|
2010-05-07 05:05:17 +08:00
|
|
|
if (src->dqb_valid & QIF_SPACE)
|
2014-10-09 22:03:13 +08:00
|
|
|
dst->d_fieldmask |= QC_SPACE;
|
2010-05-07 05:05:17 +08:00
|
|
|
if (src->dqb_valid & QIF_ILIMITS)
|
2014-10-09 22:03:13 +08:00
|
|
|
dst->d_fieldmask |= QC_INO_SOFT | QC_INO_HARD;
|
2010-05-07 05:05:17 +08:00
|
|
|
if (src->dqb_valid & QIF_INODES)
|
2014-10-09 22:03:13 +08:00
|
|
|
dst->d_fieldmask |= QC_INO_COUNT;
|
2010-05-07 05:05:17 +08:00
|
|
|
if (src->dqb_valid & QIF_BTIME)
|
2014-10-09 22:03:13 +08:00
|
|
|
dst->d_fieldmask |= QC_SPC_TIMER;
|
2010-05-07 05:05:17 +08:00
|
|
|
if (src->dqb_valid & QIF_ITIME)
|
2014-10-09 22:03:13 +08:00
|
|
|
dst->d_fieldmask |= QC_INO_TIMER;
|
2010-05-07 05:05:17 +08:00
|
|
|
}
|
|
|
|
|
2010-02-16 16:44:47 +08:00
|
|
|
static int quota_setquota(struct super_block *sb, int type, qid_t id,
|
|
|
|
void __user *addr)
|
|
|
|
{
|
2014-10-09 22:03:13 +08:00
|
|
|
struct qc_dqblk fdq;
|
2010-02-16 16:44:47 +08:00
|
|
|
struct if_dqblk idq;
|
2012-09-16 17:07:49 +08:00
|
|
|
struct kqid qid;
|
2010-02-16 16:44:47 +08:00
|
|
|
|
|
|
|
if (copy_from_user(&idq, addr, sizeof(idq)))
|
|
|
|
return -EFAULT;
|
2010-02-16 16:44:48 +08:00
|
|
|
if (!sb->s_qcop->set_dqblk)
|
|
|
|
return -ENOSYS;
|
2012-09-16 17:07:49 +08:00
|
|
|
qid = make_kqid(current_user_ns(), type, id);
|
|
|
|
if (!qid_valid(qid))
|
|
|
|
return -EINVAL;
|
2010-05-07 05:05:17 +08:00
|
|
|
copy_from_if_dqblk(&fdq, &idq);
|
2012-09-16 17:07:49 +08:00
|
|
|
return sb->s_qcop->set_dqblk(sb, qid, &fdq);
|
2010-02-16 16:44:47 +08:00
|
|
|
}
|
|
|
|
|
2014-10-08 21:56:21 +08:00
|
|
|
static int quota_enable(struct super_block *sb, void __user *addr)
|
2010-02-16 16:44:47 +08:00
|
|
|
{
|
|
|
|
__u32 flags;
|
|
|
|
|
|
|
|
if (copy_from_user(&flags, addr, sizeof(flags)))
|
|
|
|
return -EFAULT;
|
2014-10-08 21:56:21 +08:00
|
|
|
if (!sb->s_qcop->quota_enable)
|
2010-02-16 16:44:48 +08:00
|
|
|
return -ENOSYS;
|
2014-10-08 21:56:21 +08:00
|
|
|
return sb->s_qcop->quota_enable(sb, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int quota_disable(struct super_block *sb, void __user *addr)
|
|
|
|
{
|
|
|
|
__u32 flags;
|
|
|
|
|
|
|
|
if (copy_from_user(&flags, addr, sizeof(flags)))
|
|
|
|
return -EFAULT;
|
|
|
|
if (!sb->s_qcop->quota_disable)
|
|
|
|
return -ENOSYS;
|
|
|
|
return sb->s_qcop->quota_disable(sb, flags);
|
2010-02-16 16:44:47 +08:00
|
|
|
}
|
|
|
|
|
2014-11-19 23:17:45 +08:00
|
|
|
static int quota_state_to_flags(struct qc_state *state)
|
|
|
|
{
|
|
|
|
int flags = 0;
|
|
|
|
|
|
|
|
if (state->s_state[USRQUOTA].flags & QCI_ACCT_ENABLED)
|
|
|
|
flags |= FS_QUOTA_UDQ_ACCT;
|
|
|
|
if (state->s_state[USRQUOTA].flags & QCI_LIMITS_ENFORCED)
|
|
|
|
flags |= FS_QUOTA_UDQ_ENFD;
|
|
|
|
if (state->s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED)
|
|
|
|
flags |= FS_QUOTA_GDQ_ACCT;
|
|
|
|
if (state->s_state[GRPQUOTA].flags & QCI_LIMITS_ENFORCED)
|
|
|
|
flags |= FS_QUOTA_GDQ_ENFD;
|
|
|
|
if (state->s_state[PRJQUOTA].flags & QCI_ACCT_ENABLED)
|
|
|
|
flags |= FS_QUOTA_PDQ_ACCT;
|
|
|
|
if (state->s_state[PRJQUOTA].flags & QCI_LIMITS_ENFORCED)
|
|
|
|
flags |= FS_QUOTA_PDQ_ENFD;
|
|
|
|
return flags;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int quota_getstate(struct super_block *sb, struct fs_quota_stat *fqs)
|
|
|
|
{
|
|
|
|
int type;
|
|
|
|
struct qc_state state;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = sb->s_qcop->get_state(sb, &state);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
memset(fqs, 0, sizeof(*fqs));
|
|
|
|
fqs->qs_version = FS_QSTAT_VERSION;
|
|
|
|
fqs->qs_flags = quota_state_to_flags(&state);
|
|
|
|
/* No quota enabled? */
|
|
|
|
if (!fqs->qs_flags)
|
|
|
|
return -ENOSYS;
|
|
|
|
fqs->qs_incoredqs = state.s_incoredqs;
|
|
|
|
/*
|
|
|
|
* GETXSTATE quotactl has space for just one set of time limits so
|
|
|
|
* report them for the first enabled quota type
|
|
|
|
*/
|
|
|
|
for (type = 0; type < XQM_MAXQUOTAS; type++)
|
|
|
|
if (state.s_state[type].flags & QCI_ACCT_ENABLED)
|
|
|
|
break;
|
|
|
|
BUG_ON(type == XQM_MAXQUOTAS);
|
|
|
|
fqs->qs_btimelimit = state.s_state[type].spc_timelimit;
|
|
|
|
fqs->qs_itimelimit = state.s_state[type].ino_timelimit;
|
|
|
|
fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit;
|
|
|
|
fqs->qs_bwarnlimit = state.s_state[type].spc_warnlimit;
|
|
|
|
fqs->qs_iwarnlimit = state.s_state[type].ino_warnlimit;
|
|
|
|
if (state.s_state[USRQUOTA].flags & QCI_ACCT_ENABLED) {
|
|
|
|
fqs->qs_uquota.qfs_ino = state.s_state[USRQUOTA].ino;
|
|
|
|
fqs->qs_uquota.qfs_nblks = state.s_state[USRQUOTA].blocks;
|
|
|
|
fqs->qs_uquota.qfs_nextents = state.s_state[USRQUOTA].nextents;
|
|
|
|
}
|
|
|
|
if (state.s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED) {
|
|
|
|
fqs->qs_gquota.qfs_ino = state.s_state[GRPQUOTA].ino;
|
|
|
|
fqs->qs_gquota.qfs_nblks = state.s_state[GRPQUOTA].blocks;
|
|
|
|
fqs->qs_gquota.qfs_nextents = state.s_state[GRPQUOTA].nextents;
|
|
|
|
}
|
|
|
|
if (state.s_state[PRJQUOTA].flags & QCI_ACCT_ENABLED) {
|
|
|
|
/*
|
|
|
|
* Q_XGETQSTAT doesn't have room for both group and project
|
|
|
|
* quotas. So, allow the project quota values to be copied out
|
|
|
|
* only if there is no group quota information available.
|
|
|
|
*/
|
|
|
|
if (!(state.s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED)) {
|
|
|
|
fqs->qs_gquota.qfs_ino = state.s_state[PRJQUOTA].ino;
|
|
|
|
fqs->qs_gquota.qfs_nblks =
|
|
|
|
state.s_state[PRJQUOTA].blocks;
|
|
|
|
fqs->qs_gquota.qfs_nextents =
|
|
|
|
state.s_state[PRJQUOTA].nextents;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-02-16 16:44:47 +08:00
|
|
|
static int quota_getxstate(struct super_block *sb, void __user *addr)
|
|
|
|
{
|
|
|
|
struct fs_quota_stat fqs;
|
|
|
|
int ret;
|
2010-02-16 16:44:48 +08:00
|
|
|
|
2014-11-19 23:44:58 +08:00
|
|
|
if (!sb->s_qcop->get_state)
|
2010-02-16 16:44:48 +08:00
|
|
|
return -ENOSYS;
|
2014-11-19 23:44:58 +08:00
|
|
|
ret = quota_getstate(sb, &fqs);
|
2010-02-16 16:44:47 +08:00
|
|
|
if (!ret && copy_to_user(addr, &fqs, sizeof(fqs)))
|
|
|
|
return -EFAULT;
|
|
|
|
return ret;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-11-19 23:17:45 +08:00
|
|
|
static int quota_getstatev(struct super_block *sb, struct fs_quota_statv *fqs)
|
|
|
|
{
|
|
|
|
int type;
|
|
|
|
struct qc_state state;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = sb->s_qcop->get_state(sb, &state);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
memset(fqs, 0, sizeof(*fqs));
|
|
|
|
fqs->qs_version = FS_QSTAT_VERSION;
|
|
|
|
fqs->qs_flags = quota_state_to_flags(&state);
|
|
|
|
/* No quota enabled? */
|
|
|
|
if (!fqs->qs_flags)
|
|
|
|
return -ENOSYS;
|
|
|
|
fqs->qs_incoredqs = state.s_incoredqs;
|
|
|
|
/*
|
|
|
|
* GETXSTATV quotactl has space for just one set of time limits so
|
|
|
|
* report them for the first enabled quota type
|
|
|
|
*/
|
|
|
|
for (type = 0; type < XQM_MAXQUOTAS; type++)
|
|
|
|
if (state.s_state[type].flags & QCI_ACCT_ENABLED)
|
|
|
|
break;
|
|
|
|
BUG_ON(type == XQM_MAXQUOTAS);
|
|
|
|
fqs->qs_btimelimit = state.s_state[type].spc_timelimit;
|
|
|
|
fqs->qs_itimelimit = state.s_state[type].ino_timelimit;
|
|
|
|
fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit;
|
|
|
|
fqs->qs_bwarnlimit = state.s_state[type].spc_warnlimit;
|
|
|
|
fqs->qs_iwarnlimit = state.s_state[type].ino_warnlimit;
|
|
|
|
if (state.s_state[USRQUOTA].flags & QCI_ACCT_ENABLED) {
|
|
|
|
fqs->qs_uquota.qfs_ino = state.s_state[USRQUOTA].ino;
|
|
|
|
fqs->qs_uquota.qfs_nblks = state.s_state[USRQUOTA].blocks;
|
|
|
|
fqs->qs_uquota.qfs_nextents = state.s_state[USRQUOTA].nextents;
|
|
|
|
}
|
|
|
|
if (state.s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED) {
|
|
|
|
fqs->qs_gquota.qfs_ino = state.s_state[GRPQUOTA].ino;
|
|
|
|
fqs->qs_gquota.qfs_nblks = state.s_state[GRPQUOTA].blocks;
|
|
|
|
fqs->qs_gquota.qfs_nextents = state.s_state[GRPQUOTA].nextents;
|
|
|
|
}
|
|
|
|
if (state.s_state[PRJQUOTA].flags & QCI_ACCT_ENABLED) {
|
|
|
|
fqs->qs_pquota.qfs_ino = state.s_state[PRJQUOTA].ino;
|
|
|
|
fqs->qs_pquota.qfs_nblks = state.s_state[PRJQUOTA].blocks;
|
|
|
|
fqs->qs_pquota.qfs_nextents = state.s_state[PRJQUOTA].nextents;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-08-07 06:27:07 +08:00
|
|
|
static int quota_getxstatev(struct super_block *sb, void __user *addr)
|
|
|
|
{
|
|
|
|
struct fs_quota_statv fqs;
|
|
|
|
int ret;
|
|
|
|
|
2014-11-19 23:44:58 +08:00
|
|
|
if (!sb->s_qcop->get_state)
|
2013-08-07 06:27:07 +08:00
|
|
|
return -ENOSYS;
|
|
|
|
|
|
|
|
memset(&fqs, 0, sizeof(fqs));
|
|
|
|
if (copy_from_user(&fqs, addr, 1)) /* Just read qs_version */
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
/* If this kernel doesn't support user specified version, fail */
|
|
|
|
switch (fqs.qs_version) {
|
|
|
|
case FS_QSTATV_VERSION1:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2014-11-19 23:44:58 +08:00
|
|
|
ret = quota_getstatev(sb, &fqs);
|
2013-08-07 06:27:07 +08:00
|
|
|
if (!ret && copy_to_user(addr, &fqs, sizeof(fqs)))
|
|
|
|
return -EFAULT;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-10-09 22:03:13 +08:00
|
|
|
/*
|
|
|
|
* XFS defines BBTOB and BTOBB macros inside fs/xfs/ and we cannot move them
|
|
|
|
* out of there as xfsprogs rely on definitions being in that header file. So
|
|
|
|
* just define same functions here for quota purposes.
|
|
|
|
*/
|
|
|
|
#define XFS_BB_SHIFT 9
|
|
|
|
|
|
|
|
static inline u64 quota_bbtob(u64 blocks)
|
|
|
|
{
|
|
|
|
return blocks << XFS_BB_SHIFT;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u64 quota_btobb(u64 bytes)
|
|
|
|
{
|
|
|
|
return (bytes + (1 << XFS_BB_SHIFT) - 1) >> XFS_BB_SHIFT;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void copy_from_xfs_dqblk(struct qc_dqblk *dst, struct fs_disk_quota *src)
|
|
|
|
{
|
|
|
|
dst->d_spc_hardlimit = quota_bbtob(src->d_blk_hardlimit);
|
|
|
|
dst->d_spc_softlimit = quota_bbtob(src->d_blk_softlimit);
|
|
|
|
dst->d_ino_hardlimit = src->d_ino_hardlimit;
|
|
|
|
dst->d_ino_softlimit = src->d_ino_softlimit;
|
|
|
|
dst->d_space = quota_bbtob(src->d_bcount);
|
|
|
|
dst->d_ino_count = src->d_icount;
|
|
|
|
dst->d_ino_timer = src->d_itimer;
|
|
|
|
dst->d_spc_timer = src->d_btimer;
|
|
|
|
dst->d_ino_warns = src->d_iwarns;
|
|
|
|
dst->d_spc_warns = src->d_bwarns;
|
|
|
|
dst->d_rt_spc_hardlimit = quota_bbtob(src->d_rtb_hardlimit);
|
|
|
|
dst->d_rt_spc_softlimit = quota_bbtob(src->d_rtb_softlimit);
|
|
|
|
dst->d_rt_space = quota_bbtob(src->d_rtbcount);
|
|
|
|
dst->d_rt_spc_timer = src->d_rtbtimer;
|
|
|
|
dst->d_rt_spc_warns = src->d_rtbwarns;
|
|
|
|
dst->d_fieldmask = 0;
|
|
|
|
if (src->d_fieldmask & FS_DQ_ISOFT)
|
|
|
|
dst->d_fieldmask |= QC_INO_SOFT;
|
|
|
|
if (src->d_fieldmask & FS_DQ_IHARD)
|
|
|
|
dst->d_fieldmask |= QC_INO_HARD;
|
|
|
|
if (src->d_fieldmask & FS_DQ_BSOFT)
|
|
|
|
dst->d_fieldmask |= QC_SPC_SOFT;
|
|
|
|
if (src->d_fieldmask & FS_DQ_BHARD)
|
|
|
|
dst->d_fieldmask |= QC_SPC_HARD;
|
|
|
|
if (src->d_fieldmask & FS_DQ_RTBSOFT)
|
|
|
|
dst->d_fieldmask |= QC_RT_SPC_SOFT;
|
|
|
|
if (src->d_fieldmask & FS_DQ_RTBHARD)
|
|
|
|
dst->d_fieldmask |= QC_RT_SPC_HARD;
|
|
|
|
if (src->d_fieldmask & FS_DQ_BTIMER)
|
|
|
|
dst->d_fieldmask |= QC_SPC_TIMER;
|
|
|
|
if (src->d_fieldmask & FS_DQ_ITIMER)
|
|
|
|
dst->d_fieldmask |= QC_INO_TIMER;
|
|
|
|
if (src->d_fieldmask & FS_DQ_RTBTIMER)
|
|
|
|
dst->d_fieldmask |= QC_RT_SPC_TIMER;
|
|
|
|
if (src->d_fieldmask & FS_DQ_BWARNS)
|
|
|
|
dst->d_fieldmask |= QC_SPC_WARNS;
|
|
|
|
if (src->d_fieldmask & FS_DQ_IWARNS)
|
|
|
|
dst->d_fieldmask |= QC_INO_WARNS;
|
|
|
|
if (src->d_fieldmask & FS_DQ_RTBWARNS)
|
|
|
|
dst->d_fieldmask |= QC_RT_SPC_WARNS;
|
|
|
|
if (src->d_fieldmask & FS_DQ_BCOUNT)
|
|
|
|
dst->d_fieldmask |= QC_SPACE;
|
|
|
|
if (src->d_fieldmask & FS_DQ_ICOUNT)
|
|
|
|
dst->d_fieldmask |= QC_INO_COUNT;
|
|
|
|
if (src->d_fieldmask & FS_DQ_RTBCOUNT)
|
|
|
|
dst->d_fieldmask |= QC_RT_SPACE;
|
|
|
|
}
|
|
|
|
|
2014-12-16 23:12:27 +08:00
|
|
|
static void copy_qcinfo_from_xfs_dqblk(struct qc_info *dst,
|
|
|
|
struct fs_disk_quota *src)
|
|
|
|
{
|
|
|
|
memset(dst, 0, sizeof(*dst));
|
|
|
|
dst->i_spc_timelimit = src->d_btimer;
|
|
|
|
dst->i_ino_timelimit = src->d_itimer;
|
|
|
|
dst->i_rt_spc_timelimit = src->d_rtbtimer;
|
|
|
|
dst->i_ino_warnlimit = src->d_iwarns;
|
|
|
|
dst->i_spc_warnlimit = src->d_bwarns;
|
|
|
|
dst->i_rt_spc_warnlimit = src->d_rtbwarns;
|
|
|
|
if (src->d_fieldmask & FS_DQ_BWARNS)
|
|
|
|
dst->i_fieldmask |= QC_SPC_WARNS;
|
|
|
|
if (src->d_fieldmask & FS_DQ_IWARNS)
|
|
|
|
dst->i_fieldmask |= QC_INO_WARNS;
|
|
|
|
if (src->d_fieldmask & FS_DQ_RTBWARNS)
|
|
|
|
dst->i_fieldmask |= QC_RT_SPC_WARNS;
|
|
|
|
if (src->d_fieldmask & FS_DQ_BTIMER)
|
|
|
|
dst->i_fieldmask |= QC_SPC_TIMER;
|
|
|
|
if (src->d_fieldmask & FS_DQ_ITIMER)
|
|
|
|
dst->i_fieldmask |= QC_INO_TIMER;
|
|
|
|
if (src->d_fieldmask & FS_DQ_RTBTIMER)
|
|
|
|
dst->i_fieldmask |= QC_RT_SPC_TIMER;
|
|
|
|
}
|
|
|
|
|
2010-02-16 16:44:47 +08:00
|
|
|
static int quota_setxquota(struct super_block *sb, int type, qid_t id,
|
|
|
|
void __user *addr)
|
|
|
|
{
|
|
|
|
struct fs_disk_quota fdq;
|
2014-10-09 22:03:13 +08:00
|
|
|
struct qc_dqblk qdq;
|
2012-09-16 17:07:49 +08:00
|
|
|
struct kqid qid;
|
2010-02-16 16:44:47 +08:00
|
|
|
|
|
|
|
if (copy_from_user(&fdq, addr, sizeof(fdq)))
|
|
|
|
return -EFAULT;
|
2010-05-07 05:05:17 +08:00
|
|
|
if (!sb->s_qcop->set_dqblk)
|
2010-02-16 16:44:48 +08:00
|
|
|
return -ENOSYS;
|
2012-09-16 17:07:49 +08:00
|
|
|
qid = make_kqid(current_user_ns(), type, id);
|
|
|
|
if (!qid_valid(qid))
|
|
|
|
return -EINVAL;
|
2014-12-16 23:12:27 +08:00
|
|
|
/* Are we actually setting timer / warning limits for all users? */
|
|
|
|
if (from_kqid(&init_user_ns, qid) == 0 &&
|
|
|
|
fdq.d_fieldmask & (FS_DQ_WARNS_MASK | FS_DQ_TIMER_MASK)) {
|
|
|
|
struct qc_info qinfo;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!sb->s_qcop->set_info)
|
|
|
|
return -EINVAL;
|
|
|
|
copy_qcinfo_from_xfs_dqblk(&qinfo, &fdq);
|
|
|
|
ret = sb->s_qcop->set_info(sb, type, &qinfo);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
/* These are already done */
|
|
|
|
fdq.d_fieldmask &= ~(FS_DQ_WARNS_MASK | FS_DQ_TIMER_MASK);
|
|
|
|
}
|
2014-10-09 22:03:13 +08:00
|
|
|
copy_from_xfs_dqblk(&qdq, &fdq);
|
|
|
|
return sb->s_qcop->set_dqblk(sb, qid, &qdq);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void copy_to_xfs_dqblk(struct fs_disk_quota *dst, struct qc_dqblk *src,
|
|
|
|
int type, qid_t id)
|
|
|
|
{
|
|
|
|
memset(dst, 0, sizeof(*dst));
|
|
|
|
dst->d_version = FS_DQUOT_VERSION;
|
|
|
|
dst->d_id = id;
|
|
|
|
if (type == USRQUOTA)
|
|
|
|
dst->d_flags = FS_USER_QUOTA;
|
|
|
|
else if (type == PRJQUOTA)
|
|
|
|
dst->d_flags = FS_PROJ_QUOTA;
|
|
|
|
else
|
|
|
|
dst->d_flags = FS_GROUP_QUOTA;
|
|
|
|
dst->d_blk_hardlimit = quota_btobb(src->d_spc_hardlimit);
|
|
|
|
dst->d_blk_softlimit = quota_btobb(src->d_spc_softlimit);
|
|
|
|
dst->d_ino_hardlimit = src->d_ino_hardlimit;
|
|
|
|
dst->d_ino_softlimit = src->d_ino_softlimit;
|
|
|
|
dst->d_bcount = quota_btobb(src->d_space);
|
|
|
|
dst->d_icount = src->d_ino_count;
|
|
|
|
dst->d_itimer = src->d_ino_timer;
|
|
|
|
dst->d_btimer = src->d_spc_timer;
|
|
|
|
dst->d_iwarns = src->d_ino_warns;
|
|
|
|
dst->d_bwarns = src->d_spc_warns;
|
|
|
|
dst->d_rtb_hardlimit = quota_btobb(src->d_rt_spc_hardlimit);
|
|
|
|
dst->d_rtb_softlimit = quota_btobb(src->d_rt_spc_softlimit);
|
|
|
|
dst->d_rtbcount = quota_btobb(src->d_rt_space);
|
|
|
|
dst->d_rtbtimer = src->d_rt_spc_timer;
|
|
|
|
dst->d_rtbwarns = src->d_rt_spc_warns;
|
2010-02-16 16:44:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int quota_getxquota(struct super_block *sb, int type, qid_t id,
|
|
|
|
void __user *addr)
|
|
|
|
{
|
|
|
|
struct fs_disk_quota fdq;
|
2014-10-09 22:03:13 +08:00
|
|
|
struct qc_dqblk qdq;
|
2012-09-16 17:07:49 +08:00
|
|
|
struct kqid qid;
|
2010-02-16 16:44:47 +08:00
|
|
|
int ret;
|
|
|
|
|
2010-05-07 05:04:58 +08:00
|
|
|
if (!sb->s_qcop->get_dqblk)
|
2010-02-16 16:44:48 +08:00
|
|
|
return -ENOSYS;
|
2012-09-16 17:07:49 +08:00
|
|
|
qid = make_kqid(current_user_ns(), type, id);
|
|
|
|
if (!qid_valid(qid))
|
|
|
|
return -EINVAL;
|
2014-10-09 22:03:13 +08:00
|
|
|
ret = sb->s_qcop->get_dqblk(sb, qid, &qdq);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
copy_to_xfs_dqblk(&fdq, &qdq, type, id);
|
|
|
|
if (copy_to_user(addr, &fdq, sizeof(fdq)))
|
2010-02-16 16:44:47 +08:00
|
|
|
return -EFAULT;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
quota: add new quotactl Q_XGETNEXTQUOTA
Q_XGETNEXTQUOTA is exactly like Q_XGETQUOTA, except that it
will return quota information for the id equal to or greater
than the id requested. In other words, if the requested id has
no quota, the command will return quota information for the
next higher id which does have a quota set. If no higher id
has an active quota, -ESRCH is returned.
This allows filesystems to do efficient iteration in kernelspace,
much like extN filesystems do in userspace when asked to report
all active quotas.
The patch adds a d_id field to struct qc_dqblk so that we can
pass back the id of the quota which was found, and return it
to userspace.
Today, filesystems such as XFS require getpwent-style iterations,
and for systems which have i.e. LDAP backends, this can be very
slow, or even impossible if iteration is not allowed in the
configuration.
Signed-off-by: Eric Sandeen <sandeen@redhat.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2016-02-08 08:21:50 +08:00
|
|
|
/*
|
|
|
|
* Return quota for next active quota >= this id, if any exists,
|
|
|
|
* otherwise return -ESRCH via ->get_nextdqblk.
|
|
|
|
*/
|
|
|
|
static int quota_getnextxquota(struct super_block *sb, int type, qid_t id,
|
|
|
|
void __user *addr)
|
|
|
|
{
|
|
|
|
struct fs_disk_quota fdq;
|
|
|
|
struct qc_dqblk qdq;
|
|
|
|
struct kqid qid;
|
|
|
|
qid_t id_out;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!sb->s_qcop->get_nextdqblk)
|
|
|
|
return -ENOSYS;
|
|
|
|
qid = make_kqid(current_user_ns(), type, id);
|
|
|
|
if (!qid_valid(qid))
|
|
|
|
return -EINVAL;
|
|
|
|
ret = sb->s_qcop->get_nextdqblk(sb, &qid, &qdq);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
id_out = from_kqid(current_user_ns(), qid);
|
|
|
|
copy_to_xfs_dqblk(&fdq, &qdq, type, id_out);
|
|
|
|
if (copy_to_user(addr, &fdq, sizeof(fdq)))
|
|
|
|
return -EFAULT;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
xfs: fix Q_XQUOTARM ioctl
The Q_XQUOTARM quotactl was not working properly, because
we weren't passing around proper flags. The xfs_fs_set_xstate()
ioctl handler used the same flags for Q_XQUOTAON/OFF as
well as for Q_XQUOTARM, but Q_XQUOTAON/OFF look for
XFS_UQUOTA_ACCT, XFS_UQUOTA_ENFD, XFS_GQUOTA_ACCT etc,
i.e. quota type + state, while Q_XQUOTARM looks only for
the type of quota, i.e. XFS_DQ_USER, XFS_DQ_GROUP etc.
Unfortunately these flag spaces overlap a bit, so we
got semi-random results for Q_XQUOTARM; i.e. the value
for XFS_DQ_USER == XFS_UQUOTA_ACCT, etc. yeargh.
Add a new quotactl op vector specifically for the QUOTARM
operation, since it operates with a different flag space.
This has been broken more or less forever, AFAICT.
Signed-off-by: Eric Sandeen <sandeen@redhat.com>
Acked-by: Jan Kara <jack@suse.cz>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2014-05-05 15:25:50 +08:00
|
|
|
static int quota_rmxquota(struct super_block *sb, void __user *addr)
|
|
|
|
{
|
|
|
|
__u32 flags;
|
|
|
|
|
|
|
|
if (copy_from_user(&flags, addr, sizeof(flags)))
|
|
|
|
return -EFAULT;
|
|
|
|
if (!sb->s_qcop->rm_xquota)
|
|
|
|
return -ENOSYS;
|
|
|
|
return sb->s_qcop->rm_xquota(sb, flags);
|
|
|
|
}
|
|
|
|
|
2010-02-16 16:44:47 +08:00
|
|
|
/* Copy parameters and call proper function */
|
|
|
|
static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
|
2010-09-15 23:38:58 +08:00
|
|
|
void __user *addr, struct path *path)
|
2010-02-16 16:44:47 +08:00
|
|
|
{
|
2010-02-16 16:44:50 +08:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (type >= (XQM_COMMAND(cmd) ? XQM_MAXQUOTAS : MAXQUOTAS))
|
|
|
|
return -EINVAL;
|
2014-09-30 16:43:09 +08:00
|
|
|
/*
|
|
|
|
* Quota not supported on this fs? Check this before s_quota_types
|
|
|
|
* since they needn't be set if quota is not supported at all.
|
|
|
|
*/
|
2010-02-16 16:44:50 +08:00
|
|
|
if (!sb->s_qcop)
|
|
|
|
return -ENOSYS;
|
2014-09-30 16:43:09 +08:00
|
|
|
if (!(sb->s_quota_types & (1 << type)))
|
|
|
|
return -EINVAL;
|
2010-02-16 16:44:50 +08:00
|
|
|
|
|
|
|
ret = check_quotactl_permission(sb, type, cmd, id);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
2010-02-16 16:44:47 +08:00
|
|
|
switch (cmd) {
|
|
|
|
case Q_QUOTAON:
|
2016-02-08 08:21:24 +08:00
|
|
|
return quota_quotaon(sb, type, id, path);
|
2010-02-16 16:44:47 +08:00
|
|
|
case Q_QUOTAOFF:
|
2014-10-08 22:07:12 +08:00
|
|
|
return quota_quotaoff(sb, type);
|
2010-02-16 16:44:47 +08:00
|
|
|
case Q_GETFMT:
|
|
|
|
return quota_getfmt(sb, type, addr);
|
|
|
|
case Q_GETINFO:
|
|
|
|
return quota_getinfo(sb, type, addr);
|
|
|
|
case Q_SETINFO:
|
|
|
|
return quota_setinfo(sb, type, addr);
|
|
|
|
case Q_GETQUOTA:
|
|
|
|
return quota_getquota(sb, type, id, addr);
|
quota: add new quotactl Q_GETNEXTQUOTA
Q_GETNEXTQUOTA is exactly like Q_GETQUOTA, except that it
will return quota information for the id equal to or greater
than the id requested. In other words, if the requested id has
no quota, the command will return quota information for the
next higher id which does have a quota set. If no higher id
has an active quota, -ESRCH is returned.
This allows filesystems to do efficient iteration in kernelspace,
much like extN filesystems do in userspace when asked to report
all active quotas.
This does require a new data structure for userspace, as the
current structure does not include an ID for the returned quota
information.
Today, Ext4 with a hidden quota inode requires getpwent-style
iterations, and for systems which have i.e. LDAP backends,
this can be very slow, or even impossible if iteration is not
allowed in the configuration.
Signed-off-by: Eric Sandeen <sandeen@redhat.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2016-02-08 08:22:21 +08:00
|
|
|
case Q_GETNEXTQUOTA:
|
|
|
|
return quota_getnextquota(sb, type, id, addr);
|
2010-02-16 16:44:47 +08:00
|
|
|
case Q_SETQUOTA:
|
|
|
|
return quota_setquota(sb, type, id, addr);
|
|
|
|
case Q_SYNC:
|
2010-02-16 16:44:49 +08:00
|
|
|
if (!sb->s_qcop->quota_sync)
|
|
|
|
return -ENOSYS;
|
2012-07-03 22:45:28 +08:00
|
|
|
return sb->s_qcop->quota_sync(sb, type);
|
2010-02-16 16:44:47 +08:00
|
|
|
case Q_XQUOTAON:
|
2014-10-08 21:56:21 +08:00
|
|
|
return quota_enable(sb, addr);
|
2010-02-16 16:44:47 +08:00
|
|
|
case Q_XQUOTAOFF:
|
2014-10-08 21:56:21 +08:00
|
|
|
return quota_disable(sb, addr);
|
xfs: fix Q_XQUOTARM ioctl
The Q_XQUOTARM quotactl was not working properly, because
we weren't passing around proper flags. The xfs_fs_set_xstate()
ioctl handler used the same flags for Q_XQUOTAON/OFF as
well as for Q_XQUOTARM, but Q_XQUOTAON/OFF look for
XFS_UQUOTA_ACCT, XFS_UQUOTA_ENFD, XFS_GQUOTA_ACCT etc,
i.e. quota type + state, while Q_XQUOTARM looks only for
the type of quota, i.e. XFS_DQ_USER, XFS_DQ_GROUP etc.
Unfortunately these flag spaces overlap a bit, so we
got semi-random results for Q_XQUOTARM; i.e. the value
for XFS_DQ_USER == XFS_UQUOTA_ACCT, etc. yeargh.
Add a new quotactl op vector specifically for the QUOTARM
operation, since it operates with a different flag space.
This has been broken more or less forever, AFAICT.
Signed-off-by: Eric Sandeen <sandeen@redhat.com>
Acked-by: Jan Kara <jack@suse.cz>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2014-05-05 15:25:50 +08:00
|
|
|
case Q_XQUOTARM:
|
|
|
|
return quota_rmxquota(sb, addr);
|
2010-02-16 16:44:47 +08:00
|
|
|
case Q_XGETQSTAT:
|
|
|
|
return quota_getxstate(sb, addr);
|
2013-08-07 06:27:07 +08:00
|
|
|
case Q_XGETQSTATV:
|
|
|
|
return quota_getxstatev(sb, addr);
|
2010-02-16 16:44:47 +08:00
|
|
|
case Q_XSETQLIM:
|
|
|
|
return quota_setxquota(sb, type, id, addr);
|
|
|
|
case Q_XGETQUOTA:
|
|
|
|
return quota_getxquota(sb, type, id, addr);
|
quota: add new quotactl Q_XGETNEXTQUOTA
Q_XGETNEXTQUOTA is exactly like Q_XGETQUOTA, except that it
will return quota information for the id equal to or greater
than the id requested. In other words, if the requested id has
no quota, the command will return quota information for the
next higher id which does have a quota set. If no higher id
has an active quota, -ESRCH is returned.
This allows filesystems to do efficient iteration in kernelspace,
much like extN filesystems do in userspace when asked to report
all active quotas.
The patch adds a d_id field to struct qc_dqblk so that we can
pass back the id of the quota which was found, and return it
to userspace.
Today, filesystems such as XFS require getpwent-style iterations,
and for systems which have i.e. LDAP backends, this can be very
slow, or even impossible if iteration is not allowed in the
configuration.
Signed-off-by: Eric Sandeen <sandeen@redhat.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2016-02-08 08:21:50 +08:00
|
|
|
case Q_XGETNEXTQUOTA:
|
|
|
|
return quota_getnextxquota(sb, type, id, addr);
|
2010-02-16 16:44:47 +08:00
|
|
|
case Q_XQUOTASYNC:
|
2010-02-16 16:44:51 +08:00
|
|
|
if (sb->s_flags & MS_RDONLY)
|
|
|
|
return -EROFS;
|
2012-02-20 10:28:18 +08:00
|
|
|
/* XFS quotas are fully coherent now, making this call a noop */
|
2010-02-16 16:44:51 +08:00
|
|
|
return 0;
|
2010-02-16 16:44:47 +08:00
|
|
|
default:
|
2010-02-16 16:44:48 +08:00
|
|
|
return -EINVAL;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-11-04 06:02:28 +08:00
|
|
|
#ifdef CONFIG_BLOCK
|
|
|
|
|
2012-02-10 18:03:01 +08:00
|
|
|
/* Return 1 if 'cmd' will block on frozen filesystem */
|
|
|
|
static int quotactl_cmd_write(int cmd)
|
|
|
|
{
|
|
|
|
switch (cmd) {
|
|
|
|
case Q_GETFMT:
|
|
|
|
case Q_GETINFO:
|
quota: add new quotactl Q_GETNEXTQUOTA
Q_GETNEXTQUOTA is exactly like Q_GETQUOTA, except that it
will return quota information for the id equal to or greater
than the id requested. In other words, if the requested id has
no quota, the command will return quota information for the
next higher id which does have a quota set. If no higher id
has an active quota, -ESRCH is returned.
This allows filesystems to do efficient iteration in kernelspace,
much like extN filesystems do in userspace when asked to report
all active quotas.
This does require a new data structure for userspace, as the
current structure does not include an ID for the returned quota
information.
Today, Ext4 with a hidden quota inode requires getpwent-style
iterations, and for systems which have i.e. LDAP backends,
this can be very slow, or even impossible if iteration is not
allowed in the configuration.
Signed-off-by: Eric Sandeen <sandeen@redhat.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2016-02-08 08:22:21 +08:00
|
|
|
case Q_GETNEXTQUOTA:
|
2012-02-10 18:03:01 +08:00
|
|
|
case Q_SYNC:
|
|
|
|
case Q_XGETQSTAT:
|
2013-08-07 06:27:07 +08:00
|
|
|
case Q_XGETQSTATV:
|
2012-02-10 18:03:01 +08:00
|
|
|
case Q_XGETQUOTA:
|
quota: add new quotactl Q_XGETNEXTQUOTA
Q_XGETNEXTQUOTA is exactly like Q_XGETQUOTA, except that it
will return quota information for the id equal to or greater
than the id requested. In other words, if the requested id has
no quota, the command will return quota information for the
next higher id which does have a quota set. If no higher id
has an active quota, -ESRCH is returned.
This allows filesystems to do efficient iteration in kernelspace,
much like extN filesystems do in userspace when asked to report
all active quotas.
The patch adds a d_id field to struct qc_dqblk so that we can
pass back the id of the quota which was found, and return it
to userspace.
Today, filesystems such as XFS require getpwent-style iterations,
and for systems which have i.e. LDAP backends, this can be very
slow, or even impossible if iteration is not allowed in the
configuration.
Signed-off-by: Eric Sandeen <sandeen@redhat.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2016-02-08 08:21:50 +08:00
|
|
|
case Q_XGETNEXTQUOTA:
|
2012-02-10 18:03:01 +08:00
|
|
|
case Q_XQUOTASYNC:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2012-11-04 06:02:28 +08:00
|
|
|
#endif /* CONFIG_BLOCK */
|
|
|
|
|
[PATCH] BLOCK: Make it possible to disable the block layer [try #6]
Make it possible to disable the block layer. Not all embedded devices require
it, some can make do with just JFFS2, NFS, ramfs, etc - none of which require
the block layer to be present.
This patch does the following:
(*) Introduces CONFIG_BLOCK to disable the block layer, buffering and blockdev
support.
(*) Adds dependencies on CONFIG_BLOCK to any configuration item that controls
an item that uses the block layer. This includes:
(*) Block I/O tracing.
(*) Disk partition code.
(*) All filesystems that are block based, eg: Ext3, ReiserFS, ISOFS.
(*) The SCSI layer. As far as I can tell, even SCSI chardevs use the
block layer to do scheduling. Some drivers that use SCSI facilities -
such as USB storage - end up disabled indirectly from this.
(*) Various block-based device drivers, such as IDE and the old CDROM
drivers.
(*) MTD blockdev handling and FTL.
(*) JFFS - which uses set_bdev_super(), something it could avoid doing by
taking a leaf out of JFFS2's book.
(*) Makes most of the contents of linux/blkdev.h, linux/buffer_head.h and
linux/elevator.h contingent on CONFIG_BLOCK being set. sector_div() is,
however, still used in places, and so is still available.
(*) Also made contingent are the contents of linux/mpage.h, linux/genhd.h and
parts of linux/fs.h.
(*) Makes a number of files in fs/ contingent on CONFIG_BLOCK.
(*) Makes mm/bounce.c (bounce buffering) contingent on CONFIG_BLOCK.
(*) set_page_dirty() doesn't call __set_page_dirty_buffers() if CONFIG_BLOCK
is not enabled.
(*) fs/no-block.c is created to hold out-of-line stubs and things that are
required when CONFIG_BLOCK is not set:
(*) Default blockdev file operations (to give error ENODEV on opening).
(*) Makes some /proc changes:
(*) /proc/devices does not list any blockdevs.
(*) /proc/diskstats and /proc/partitions are contingent on CONFIG_BLOCK.
(*) Makes some compat ioctl handling contingent on CONFIG_BLOCK.
(*) If CONFIG_BLOCK is not defined, makes sys_quotactl() return -ENODEV if
given command other than Q_SYNC or if a special device is specified.
(*) In init/do_mounts.c, no reference is made to the blockdev routines if
CONFIG_BLOCK is not defined. This does not prohibit NFS roots or JFFS2.
(*) The bdflush, ioprio_set and ioprio_get syscalls can now be absent (return
error ENOSYS by way of cond_syscall if so).
(*) The seclvl_bd_claim() and seclvl_bd_release() security calls do nothing if
CONFIG_BLOCK is not set, since they can't then happen.
Signed-Off-By: David Howells <dhowells@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2006-10-01 02:45:40 +08:00
|
|
|
/*
|
|
|
|
* look up a superblock on which quota ops will be performed
|
|
|
|
* - use the name of a block device to find the superblock thereon
|
|
|
|
*/
|
2012-02-10 18:03:01 +08:00
|
|
|
static struct super_block *quotactl_block(const char __user *special, int cmd)
|
[PATCH] BLOCK: Make it possible to disable the block layer [try #6]
Make it possible to disable the block layer. Not all embedded devices require
it, some can make do with just JFFS2, NFS, ramfs, etc - none of which require
the block layer to be present.
This patch does the following:
(*) Introduces CONFIG_BLOCK to disable the block layer, buffering and blockdev
support.
(*) Adds dependencies on CONFIG_BLOCK to any configuration item that controls
an item that uses the block layer. This includes:
(*) Block I/O tracing.
(*) Disk partition code.
(*) All filesystems that are block based, eg: Ext3, ReiserFS, ISOFS.
(*) The SCSI layer. As far as I can tell, even SCSI chardevs use the
block layer to do scheduling. Some drivers that use SCSI facilities -
such as USB storage - end up disabled indirectly from this.
(*) Various block-based device drivers, such as IDE and the old CDROM
drivers.
(*) MTD blockdev handling and FTL.
(*) JFFS - which uses set_bdev_super(), something it could avoid doing by
taking a leaf out of JFFS2's book.
(*) Makes most of the contents of linux/blkdev.h, linux/buffer_head.h and
linux/elevator.h contingent on CONFIG_BLOCK being set. sector_div() is,
however, still used in places, and so is still available.
(*) Also made contingent are the contents of linux/mpage.h, linux/genhd.h and
parts of linux/fs.h.
(*) Makes a number of files in fs/ contingent on CONFIG_BLOCK.
(*) Makes mm/bounce.c (bounce buffering) contingent on CONFIG_BLOCK.
(*) set_page_dirty() doesn't call __set_page_dirty_buffers() if CONFIG_BLOCK
is not enabled.
(*) fs/no-block.c is created to hold out-of-line stubs and things that are
required when CONFIG_BLOCK is not set:
(*) Default blockdev file operations (to give error ENODEV on opening).
(*) Makes some /proc changes:
(*) /proc/devices does not list any blockdevs.
(*) /proc/diskstats and /proc/partitions are contingent on CONFIG_BLOCK.
(*) Makes some compat ioctl handling contingent on CONFIG_BLOCK.
(*) If CONFIG_BLOCK is not defined, makes sys_quotactl() return -ENODEV if
given command other than Q_SYNC or if a special device is specified.
(*) In init/do_mounts.c, no reference is made to the blockdev routines if
CONFIG_BLOCK is not defined. This does not prohibit NFS roots or JFFS2.
(*) The bdflush, ioprio_set and ioprio_get syscalls can now be absent (return
error ENOSYS by way of cond_syscall if so).
(*) The seclvl_bd_claim() and seclvl_bd_release() security calls do nothing if
CONFIG_BLOCK is not set, since they can't then happen.
Signed-Off-By: David Howells <dhowells@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2006-10-01 02:45:40 +08:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_BLOCK
|
|
|
|
struct block_device *bdev;
|
|
|
|
struct super_block *sb;
|
2012-10-11 03:25:28 +08:00
|
|
|
struct filename *tmp = getname(special);
|
[PATCH] BLOCK: Make it possible to disable the block layer [try #6]
Make it possible to disable the block layer. Not all embedded devices require
it, some can make do with just JFFS2, NFS, ramfs, etc - none of which require
the block layer to be present.
This patch does the following:
(*) Introduces CONFIG_BLOCK to disable the block layer, buffering and blockdev
support.
(*) Adds dependencies on CONFIG_BLOCK to any configuration item that controls
an item that uses the block layer. This includes:
(*) Block I/O tracing.
(*) Disk partition code.
(*) All filesystems that are block based, eg: Ext3, ReiserFS, ISOFS.
(*) The SCSI layer. As far as I can tell, even SCSI chardevs use the
block layer to do scheduling. Some drivers that use SCSI facilities -
such as USB storage - end up disabled indirectly from this.
(*) Various block-based device drivers, such as IDE and the old CDROM
drivers.
(*) MTD blockdev handling and FTL.
(*) JFFS - which uses set_bdev_super(), something it could avoid doing by
taking a leaf out of JFFS2's book.
(*) Makes most of the contents of linux/blkdev.h, linux/buffer_head.h and
linux/elevator.h contingent on CONFIG_BLOCK being set. sector_div() is,
however, still used in places, and so is still available.
(*) Also made contingent are the contents of linux/mpage.h, linux/genhd.h and
parts of linux/fs.h.
(*) Makes a number of files in fs/ contingent on CONFIG_BLOCK.
(*) Makes mm/bounce.c (bounce buffering) contingent on CONFIG_BLOCK.
(*) set_page_dirty() doesn't call __set_page_dirty_buffers() if CONFIG_BLOCK
is not enabled.
(*) fs/no-block.c is created to hold out-of-line stubs and things that are
required when CONFIG_BLOCK is not set:
(*) Default blockdev file operations (to give error ENODEV on opening).
(*) Makes some /proc changes:
(*) /proc/devices does not list any blockdevs.
(*) /proc/diskstats and /proc/partitions are contingent on CONFIG_BLOCK.
(*) Makes some compat ioctl handling contingent on CONFIG_BLOCK.
(*) If CONFIG_BLOCK is not defined, makes sys_quotactl() return -ENODEV if
given command other than Q_SYNC or if a special device is specified.
(*) In init/do_mounts.c, no reference is made to the blockdev routines if
CONFIG_BLOCK is not defined. This does not prohibit NFS roots or JFFS2.
(*) The bdflush, ioprio_set and ioprio_get syscalls can now be absent (return
error ENOSYS by way of cond_syscall if so).
(*) The seclvl_bd_claim() and seclvl_bd_release() security calls do nothing if
CONFIG_BLOCK is not set, since they can't then happen.
Signed-Off-By: David Howells <dhowells@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2006-10-01 02:45:40 +08:00
|
|
|
|
|
|
|
if (IS_ERR(tmp))
|
2008-02-07 16:15:26 +08:00
|
|
|
return ERR_CAST(tmp);
|
2012-10-11 03:25:28 +08:00
|
|
|
bdev = lookup_bdev(tmp->name);
|
[PATCH] BLOCK: Make it possible to disable the block layer [try #6]
Make it possible to disable the block layer. Not all embedded devices require
it, some can make do with just JFFS2, NFS, ramfs, etc - none of which require
the block layer to be present.
This patch does the following:
(*) Introduces CONFIG_BLOCK to disable the block layer, buffering and blockdev
support.
(*) Adds dependencies on CONFIG_BLOCK to any configuration item that controls
an item that uses the block layer. This includes:
(*) Block I/O tracing.
(*) Disk partition code.
(*) All filesystems that are block based, eg: Ext3, ReiserFS, ISOFS.
(*) The SCSI layer. As far as I can tell, even SCSI chardevs use the
block layer to do scheduling. Some drivers that use SCSI facilities -
such as USB storage - end up disabled indirectly from this.
(*) Various block-based device drivers, such as IDE and the old CDROM
drivers.
(*) MTD blockdev handling and FTL.
(*) JFFS - which uses set_bdev_super(), something it could avoid doing by
taking a leaf out of JFFS2's book.
(*) Makes most of the contents of linux/blkdev.h, linux/buffer_head.h and
linux/elevator.h contingent on CONFIG_BLOCK being set. sector_div() is,
however, still used in places, and so is still available.
(*) Also made contingent are the contents of linux/mpage.h, linux/genhd.h and
parts of linux/fs.h.
(*) Makes a number of files in fs/ contingent on CONFIG_BLOCK.
(*) Makes mm/bounce.c (bounce buffering) contingent on CONFIG_BLOCK.
(*) set_page_dirty() doesn't call __set_page_dirty_buffers() if CONFIG_BLOCK
is not enabled.
(*) fs/no-block.c is created to hold out-of-line stubs and things that are
required when CONFIG_BLOCK is not set:
(*) Default blockdev file operations (to give error ENODEV on opening).
(*) Makes some /proc changes:
(*) /proc/devices does not list any blockdevs.
(*) /proc/diskstats and /proc/partitions are contingent on CONFIG_BLOCK.
(*) Makes some compat ioctl handling contingent on CONFIG_BLOCK.
(*) If CONFIG_BLOCK is not defined, makes sys_quotactl() return -ENODEV if
given command other than Q_SYNC or if a special device is specified.
(*) In init/do_mounts.c, no reference is made to the blockdev routines if
CONFIG_BLOCK is not defined. This does not prohibit NFS roots or JFFS2.
(*) The bdflush, ioprio_set and ioprio_get syscalls can now be absent (return
error ENOSYS by way of cond_syscall if so).
(*) The seclvl_bd_claim() and seclvl_bd_release() security calls do nothing if
CONFIG_BLOCK is not set, since they can't then happen.
Signed-Off-By: David Howells <dhowells@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2006-10-01 02:45:40 +08:00
|
|
|
putname(tmp);
|
|
|
|
if (IS_ERR(bdev))
|
2008-02-07 16:15:26 +08:00
|
|
|
return ERR_CAST(bdev);
|
2012-02-10 18:03:01 +08:00
|
|
|
if (quotactl_cmd_write(cmd))
|
|
|
|
sb = get_super_thawed(bdev);
|
|
|
|
else
|
|
|
|
sb = get_super(bdev);
|
[PATCH] BLOCK: Make it possible to disable the block layer [try #6]
Make it possible to disable the block layer. Not all embedded devices require
it, some can make do with just JFFS2, NFS, ramfs, etc - none of which require
the block layer to be present.
This patch does the following:
(*) Introduces CONFIG_BLOCK to disable the block layer, buffering and blockdev
support.
(*) Adds dependencies on CONFIG_BLOCK to any configuration item that controls
an item that uses the block layer. This includes:
(*) Block I/O tracing.
(*) Disk partition code.
(*) All filesystems that are block based, eg: Ext3, ReiserFS, ISOFS.
(*) The SCSI layer. As far as I can tell, even SCSI chardevs use the
block layer to do scheduling. Some drivers that use SCSI facilities -
such as USB storage - end up disabled indirectly from this.
(*) Various block-based device drivers, such as IDE and the old CDROM
drivers.
(*) MTD blockdev handling and FTL.
(*) JFFS - which uses set_bdev_super(), something it could avoid doing by
taking a leaf out of JFFS2's book.
(*) Makes most of the contents of linux/blkdev.h, linux/buffer_head.h and
linux/elevator.h contingent on CONFIG_BLOCK being set. sector_div() is,
however, still used in places, and so is still available.
(*) Also made contingent are the contents of linux/mpage.h, linux/genhd.h and
parts of linux/fs.h.
(*) Makes a number of files in fs/ contingent on CONFIG_BLOCK.
(*) Makes mm/bounce.c (bounce buffering) contingent on CONFIG_BLOCK.
(*) set_page_dirty() doesn't call __set_page_dirty_buffers() if CONFIG_BLOCK
is not enabled.
(*) fs/no-block.c is created to hold out-of-line stubs and things that are
required when CONFIG_BLOCK is not set:
(*) Default blockdev file operations (to give error ENODEV on opening).
(*) Makes some /proc changes:
(*) /proc/devices does not list any blockdevs.
(*) /proc/diskstats and /proc/partitions are contingent on CONFIG_BLOCK.
(*) Makes some compat ioctl handling contingent on CONFIG_BLOCK.
(*) If CONFIG_BLOCK is not defined, makes sys_quotactl() return -ENODEV if
given command other than Q_SYNC or if a special device is specified.
(*) In init/do_mounts.c, no reference is made to the blockdev routines if
CONFIG_BLOCK is not defined. This does not prohibit NFS roots or JFFS2.
(*) The bdflush, ioprio_set and ioprio_get syscalls can now be absent (return
error ENOSYS by way of cond_syscall if so).
(*) The seclvl_bd_claim() and seclvl_bd_release() security calls do nothing if
CONFIG_BLOCK is not set, since they can't then happen.
Signed-Off-By: David Howells <dhowells@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2006-10-01 02:45:40 +08:00
|
|
|
bdput(bdev);
|
|
|
|
if (!sb)
|
|
|
|
return ERR_PTR(-ENODEV);
|
|
|
|
|
|
|
|
return sb;
|
|
|
|
#else
|
|
|
|
return ERR_PTR(-ENODEV);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* This is the system call interface. This communicates with
|
|
|
|
* the user-level programs. Currently this only supports diskquota
|
|
|
|
* calls. Maybe we need to add the process quotas etc. in the future,
|
|
|
|
* but we probably should use rlimits for that.
|
|
|
|
*/
|
2009-01-14 21:14:22 +08:00
|
|
|
SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special,
|
|
|
|
qid_t, id, void __user *, addr)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
uint cmds, type;
|
|
|
|
struct super_block *sb = NULL;
|
2010-09-15 23:38:58 +08:00
|
|
|
struct path path, *pathp = NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
cmds = cmd >> SUBCMDSHIFT;
|
|
|
|
type = cmd & SUBCMDMASK;
|
|
|
|
|
2010-02-16 16:44:49 +08:00
|
|
|
/*
|
|
|
|
* As a special case Q_SYNC can be called without a specific device.
|
|
|
|
* It will iterate all superblocks that have quota enabled and call
|
|
|
|
* the sync action on each of them.
|
|
|
|
*/
|
|
|
|
if (!special) {
|
|
|
|
if (cmds == Q_SYNC)
|
|
|
|
return quota_sync_all(type);
|
|
|
|
return -ENODEV;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2010-09-15 23:38:58 +08:00
|
|
|
/*
|
|
|
|
* Path for quotaon has to be resolved before grabbing superblock
|
|
|
|
* because that gets s_umount sem which is also possibly needed by path
|
|
|
|
* resolution (think about autofs) and thus deadlocks could arise.
|
|
|
|
*/
|
|
|
|
if (cmds == Q_QUOTAON) {
|
2011-09-27 08:36:09 +08:00
|
|
|
ret = user_path_at(AT_FDCWD, addr, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
|
2010-09-15 23:38:58 +08:00
|
|
|
if (ret)
|
|
|
|
pathp = ERR_PTR(ret);
|
|
|
|
else
|
|
|
|
pathp = &path;
|
|
|
|
}
|
|
|
|
|
2012-02-10 18:03:01 +08:00
|
|
|
sb = quotactl_block(special, cmds);
|
2011-10-11 00:32:06 +08:00
|
|
|
if (IS_ERR(sb)) {
|
|
|
|
ret = PTR_ERR(sb);
|
|
|
|
goto out;
|
|
|
|
}
|
2010-02-16 16:44:49 +08:00
|
|
|
|
2010-09-15 23:38:58 +08:00
|
|
|
ret = do_quotactl(sb, type, cmds, id, addr, pathp);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-02-16 16:44:49 +08:00
|
|
|
drop_super(sb);
|
2011-10-11 00:32:06 +08:00
|
|
|
out:
|
2010-09-15 23:38:58 +08:00
|
|
|
if (pathp && !IS_ERR(pathp))
|
|
|
|
path_put(pathp);
|
2005-04-17 06:20:36 +08:00
|
|
|
return ret;
|
|
|
|
}
|