Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw
* git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw: (31 commits) GFS2: Fix glock refcount issues writeback: remove unused nonblocking and congestion checks (gfs2) GFS2: drop rindex glock to refresh rindex list GFS2: Tag all metadata with jid GFS2: Locking order fix in gfs2_check_blk_state GFS2: Remove dirent_first() function GFS2: Display nobarrier option in /proc/mounts GFS2: add barrier/nobarrier mount options GFS2: remove division from new statfs code GFS2: Improve statfs and quota usability GFS2: Use dquot_send_warning() VFS: Export dquot_send_warning GFS2: Add set_xquota support GFS2: Add get_xquota support GFS2: Clean up gfs2_adjust_quota() and do_glock() GFS2: Remove constant argument from qd_get() GFS2: Remove constant argument from qdsb_get() GFS2: Add proper error reporting to quota sync via sysfs GFS2: Add get_xstate quota function GFS2: Remove obsolete code in quota.c ...
This commit is contained in:
commit
1ebb275afc
|
@ -8,6 +8,8 @@ config GFS2_FS
|
|||
select FS_POSIX_ACL
|
||||
select CRC32
|
||||
select SLOW_WORK
|
||||
select QUOTA
|
||||
select QUOTACTL
|
||||
help
|
||||
A cluster filesystem.
|
||||
|
||||
|
|
363
fs/gfs2/acl.c
363
fs/gfs2/acl.c
|
@ -12,6 +12,7 @@
|
|||
#include <linux/spinlock.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/xattr.h>
|
||||
#include <linux/posix_acl.h>
|
||||
#include <linux/posix_acl_xattr.h>
|
||||
#include <linux/gfs2_ondisk.h>
|
||||
|
@ -26,108 +27,44 @@
|
|||
#include "trans.h"
|
||||
#include "util.h"
|
||||
|
||||
#define ACL_ACCESS 1
|
||||
#define ACL_DEFAULT 0
|
||||
static const char *gfs2_acl_name(int type)
|
||||
{
|
||||
switch (type) {
|
||||
case ACL_TYPE_ACCESS:
|
||||
return GFS2_POSIX_ACL_ACCESS;
|
||||
case ACL_TYPE_DEFAULT:
|
||||
return GFS2_POSIX_ACL_DEFAULT;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int gfs2_acl_validate_set(struct gfs2_inode *ip, int access,
|
||||
struct gfs2_ea_request *er, int *remove, mode_t *mode)
|
||||
static struct posix_acl *gfs2_acl_get(struct gfs2_inode *ip, int type)
|
||||
{
|
||||
struct posix_acl *acl;
|
||||
int error;
|
||||
|
||||
error = gfs2_acl_validate_remove(ip, access);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
if (!er->er_data)
|
||||
return -EINVAL;
|
||||
|
||||
acl = posix_acl_from_xattr(er->er_data, er->er_data_len);
|
||||
if (IS_ERR(acl))
|
||||
return PTR_ERR(acl);
|
||||
if (!acl) {
|
||||
*remove = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
error = posix_acl_valid(acl);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
if (access) {
|
||||
error = posix_acl_equiv_mode(acl, mode);
|
||||
if (!error)
|
||||
*remove = 1;
|
||||
else if (error > 0)
|
||||
error = 0;
|
||||
}
|
||||
|
||||
out:
|
||||
posix_acl_release(acl);
|
||||
return error;
|
||||
}
|
||||
|
||||
int gfs2_acl_validate_remove(struct gfs2_inode *ip, int access)
|
||||
{
|
||||
if (!GFS2_SB(&ip->i_inode)->sd_args.ar_posix_acl)
|
||||
return -EOPNOTSUPP;
|
||||
if (!is_owner_or_cap(&ip->i_inode))
|
||||
return -EPERM;
|
||||
if (S_ISLNK(ip->i_inode.i_mode))
|
||||
return -EOPNOTSUPP;
|
||||
if (!access && !S_ISDIR(ip->i_inode.i_mode))
|
||||
return -EACCES;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int acl_get(struct gfs2_inode *ip, const char *name,
|
||||
struct posix_acl **acl, struct gfs2_ea_location *el,
|
||||
char **datap, unsigned int *lenp)
|
||||
{
|
||||
const char *name;
|
||||
char *data;
|
||||
unsigned int len;
|
||||
int error;
|
||||
|
||||
el->el_bh = NULL;
|
||||
int len;
|
||||
|
||||
if (!ip->i_eattr)
|
||||
return 0;
|
||||
return NULL;
|
||||
|
||||
error = gfs2_ea_find(ip, GFS2_EATYPE_SYS, name, el);
|
||||
if (error)
|
||||
return error;
|
||||
if (!el->el_ea)
|
||||
return 0;
|
||||
if (!GFS2_EA_DATA_LEN(el->el_ea))
|
||||
goto out;
|
||||
acl = get_cached_acl(&ip->i_inode, type);
|
||||
if (acl != ACL_NOT_CACHED)
|
||||
return acl;
|
||||
|
||||
len = GFS2_EA_DATA_LEN(el->el_ea);
|
||||
data = kmalloc(len, GFP_NOFS);
|
||||
error = -ENOMEM;
|
||||
if (!data)
|
||||
goto out;
|
||||
name = gfs2_acl_name(type);
|
||||
if (name == NULL)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
error = gfs2_ea_get_copy(ip, el, data, len);
|
||||
if (error < 0)
|
||||
goto out_kfree;
|
||||
error = 0;
|
||||
len = gfs2_xattr_acl_get(ip, name, &data);
|
||||
if (len < 0)
|
||||
return ERR_PTR(len);
|
||||
if (len == 0)
|
||||
return NULL;
|
||||
|
||||
if (acl) {
|
||||
*acl = posix_acl_from_xattr(data, len);
|
||||
if (IS_ERR(*acl))
|
||||
error = PTR_ERR(*acl);
|
||||
}
|
||||
|
||||
out_kfree:
|
||||
if (error || !datap) {
|
||||
kfree(data);
|
||||
} else {
|
||||
*datap = data;
|
||||
*lenp = len;
|
||||
}
|
||||
out:
|
||||
return error;
|
||||
acl = posix_acl_from_xattr(data, len);
|
||||
kfree(data);
|
||||
return acl;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -140,14 +77,12 @@ out:
|
|||
|
||||
int gfs2_check_acl(struct inode *inode, int mask)
|
||||
{
|
||||
struct gfs2_ea_location el;
|
||||
struct posix_acl *acl = NULL;
|
||||
struct posix_acl *acl;
|
||||
int error;
|
||||
|
||||
error = acl_get(GFS2_I(inode), GFS2_POSIX_ACL_ACCESS, &acl, &el, NULL, NULL);
|
||||
brelse(el.el_bh);
|
||||
if (error)
|
||||
return error;
|
||||
acl = gfs2_acl_get(GFS2_I(inode), ACL_TYPE_ACCESS);
|
||||
if (IS_ERR(acl))
|
||||
return PTR_ERR(acl);
|
||||
|
||||
if (acl) {
|
||||
error = posix_acl_permission(inode, acl, mask);
|
||||
|
@ -158,57 +93,75 @@ int gfs2_check_acl(struct inode *inode, int mask)
|
|||
return -EAGAIN;
|
||||
}
|
||||
|
||||
static int munge_mode(struct gfs2_inode *ip, mode_t mode)
|
||||
static int gfs2_set_mode(struct inode *inode, mode_t mode)
|
||||
{
|
||||
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
|
||||
struct buffer_head *dibh;
|
||||
int error;
|
||||
int error = 0;
|
||||
|
||||
error = gfs2_trans_begin(sdp, RES_DINODE, 0);
|
||||
if (error)
|
||||
return error;
|
||||
if (mode != inode->i_mode) {
|
||||
struct iattr iattr;
|
||||
|
||||
error = gfs2_meta_inode_buffer(ip, &dibh);
|
||||
if (!error) {
|
||||
gfs2_assert_withdraw(sdp,
|
||||
(ip->i_inode.i_mode & S_IFMT) == (mode & S_IFMT));
|
||||
ip->i_inode.i_mode = mode;
|
||||
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
|
||||
gfs2_dinode_out(ip, dibh->b_data);
|
||||
brelse(dibh);
|
||||
iattr.ia_valid = ATTR_MODE;
|
||||
iattr.ia_mode = mode;
|
||||
|
||||
error = gfs2_setattr_simple(GFS2_I(inode), &iattr);
|
||||
}
|
||||
|
||||
gfs2_trans_end(sdp);
|
||||
|
||||
return 0;
|
||||
return error;
|
||||
}
|
||||
|
||||
int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip)
|
||||
static int gfs2_acl_set(struct inode *inode, int type, struct posix_acl *acl)
|
||||
{
|
||||
struct gfs2_ea_location el;
|
||||
struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
|
||||
struct posix_acl *acl = NULL, *clone;
|
||||
mode_t mode = ip->i_inode.i_mode;
|
||||
char *data = NULL;
|
||||
unsigned int len;
|
||||
int error;
|
||||
int len;
|
||||
char *data;
|
||||
const char *name = gfs2_acl_name(type);
|
||||
|
||||
BUG_ON(name == NULL);
|
||||
len = posix_acl_to_xattr(acl, NULL, 0);
|
||||
if (len == 0)
|
||||
return 0;
|
||||
data = kmalloc(len, GFP_NOFS);
|
||||
if (data == NULL)
|
||||
return -ENOMEM;
|
||||
error = posix_acl_to_xattr(acl, data, len);
|
||||
if (error < 0)
|
||||
goto out;
|
||||
error = gfs2_xattr_set(inode, GFS2_EATYPE_SYS, name, data, len, 0);
|
||||
if (!error)
|
||||
set_cached_acl(inode, type, acl);
|
||||
out:
|
||||
kfree(data);
|
||||
return error;
|
||||
}
|
||||
|
||||
int gfs2_acl_create(struct gfs2_inode *dip, struct inode *inode)
|
||||
{
|
||||
struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
|
||||
struct posix_acl *acl, *clone;
|
||||
mode_t mode = inode->i_mode;
|
||||
int error = 0;
|
||||
|
||||
if (!sdp->sd_args.ar_posix_acl)
|
||||
return 0;
|
||||
if (S_ISLNK(ip->i_inode.i_mode))
|
||||
if (S_ISLNK(inode->i_mode))
|
||||
return 0;
|
||||
|
||||
error = acl_get(dip, GFS2_POSIX_ACL_DEFAULT, &acl, &el, &data, &len);
|
||||
brelse(el.el_bh);
|
||||
if (error)
|
||||
return error;
|
||||
acl = gfs2_acl_get(dip, ACL_TYPE_DEFAULT);
|
||||
if (IS_ERR(acl))
|
||||
return PTR_ERR(acl);
|
||||
if (!acl) {
|
||||
mode &= ~current_umask();
|
||||
if (mode != ip->i_inode.i_mode)
|
||||
error = munge_mode(ip, mode);
|
||||
if (mode != inode->i_mode)
|
||||
error = gfs2_set_mode(inode, mode);
|
||||
return error;
|
||||
}
|
||||
|
||||
if (S_ISDIR(inode->i_mode)) {
|
||||
error = gfs2_acl_set(inode, ACL_TYPE_DEFAULT, acl);
|
||||
if (error)
|
||||
goto out;
|
||||
}
|
||||
|
||||
clone = posix_acl_clone(acl, GFP_NOFS);
|
||||
error = -ENOMEM;
|
||||
if (!clone)
|
||||
|
@ -216,43 +169,32 @@ int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip)
|
|||
posix_acl_release(acl);
|
||||
acl = clone;
|
||||
|
||||
if (S_ISDIR(ip->i_inode.i_mode)) {
|
||||
error = gfs2_xattr_set(&ip->i_inode, GFS2_EATYPE_SYS,
|
||||
GFS2_POSIX_ACL_DEFAULT, data, len, 0);
|
||||
if (error)
|
||||
goto out;
|
||||
}
|
||||
|
||||
error = posix_acl_create_masq(acl, &mode);
|
||||
if (error < 0)
|
||||
goto out;
|
||||
if (error == 0)
|
||||
goto munge;
|
||||
|
||||
posix_acl_to_xattr(acl, data, len);
|
||||
error = gfs2_xattr_set(&ip->i_inode, GFS2_EATYPE_SYS,
|
||||
GFS2_POSIX_ACL_ACCESS, data, len, 0);
|
||||
error = gfs2_acl_set(inode, ACL_TYPE_ACCESS, acl);
|
||||
if (error)
|
||||
goto out;
|
||||
munge:
|
||||
error = munge_mode(ip, mode);
|
||||
error = gfs2_set_mode(inode, mode);
|
||||
out:
|
||||
posix_acl_release(acl);
|
||||
kfree(data);
|
||||
return error;
|
||||
}
|
||||
|
||||
int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr)
|
||||
{
|
||||
struct posix_acl *acl = NULL, *clone;
|
||||
struct gfs2_ea_location el;
|
||||
struct posix_acl *acl, *clone;
|
||||
char *data;
|
||||
unsigned int len;
|
||||
int error;
|
||||
|
||||
error = acl_get(ip, GFS2_POSIX_ACL_ACCESS, &acl, &el, &data, &len);
|
||||
if (error)
|
||||
goto out_brelse;
|
||||
acl = gfs2_acl_get(ip, ACL_TYPE_ACCESS);
|
||||
if (IS_ERR(acl))
|
||||
return PTR_ERR(acl);
|
||||
if (!acl)
|
||||
return gfs2_setattr_simple(ip, attr);
|
||||
|
||||
|
@ -265,15 +207,134 @@ int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr)
|
|||
|
||||
error = posix_acl_chmod_masq(acl, attr->ia_mode);
|
||||
if (!error) {
|
||||
len = posix_acl_to_xattr(acl, NULL, 0);
|
||||
data = kmalloc(len, GFP_NOFS);
|
||||
error = -ENOMEM;
|
||||
if (data == NULL)
|
||||
goto out;
|
||||
posix_acl_to_xattr(acl, data, len);
|
||||
error = gfs2_ea_acl_chmod(ip, &el, attr, data);
|
||||
error = gfs2_xattr_acl_chmod(ip, attr, data);
|
||||
kfree(data);
|
||||
set_cached_acl(&ip->i_inode, ACL_TYPE_ACCESS, acl);
|
||||
}
|
||||
|
||||
out:
|
||||
posix_acl_release(acl);
|
||||
kfree(data);
|
||||
out_brelse:
|
||||
brelse(el.el_bh);
|
||||
return error;
|
||||
}
|
||||
|
||||
static int gfs2_acl_type(const char *name)
|
||||
{
|
||||
if (strcmp(name, GFS2_POSIX_ACL_ACCESS) == 0)
|
||||
return ACL_TYPE_ACCESS;
|
||||
if (strcmp(name, GFS2_POSIX_ACL_DEFAULT) == 0)
|
||||
return ACL_TYPE_DEFAULT;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int gfs2_xattr_system_get(struct inode *inode, const char *name,
|
||||
void *buffer, size_t size)
|
||||
{
|
||||
struct posix_acl *acl;
|
||||
int type;
|
||||
int error;
|
||||
|
||||
type = gfs2_acl_type(name);
|
||||
if (type < 0)
|
||||
return type;
|
||||
|
||||
acl = gfs2_acl_get(GFS2_I(inode), type);
|
||||
if (IS_ERR(acl))
|
||||
return PTR_ERR(acl);
|
||||
if (acl == NULL)
|
||||
return -ENODATA;
|
||||
|
||||
error = posix_acl_to_xattr(acl, buffer, size);
|
||||
posix_acl_release(acl);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
static int gfs2_xattr_system_set(struct inode *inode, const char *name,
|
||||
const void *value, size_t size, int flags)
|
||||
{
|
||||
struct gfs2_sbd *sdp = GFS2_SB(inode);
|
||||
struct posix_acl *acl = NULL;
|
||||
int error = 0, type;
|
||||
|
||||
if (!sdp->sd_args.ar_posix_acl)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
type = gfs2_acl_type(name);
|
||||
if (type < 0)
|
||||
return type;
|
||||
if (flags & XATTR_CREATE)
|
||||
return -EINVAL;
|
||||
if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
|
||||
return value ? -EACCES : 0;
|
||||
if ((current_fsuid() != inode->i_uid) && !capable(CAP_FOWNER))
|
||||
return -EPERM;
|
||||
if (S_ISLNK(inode->i_mode))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!value)
|
||||
goto set_acl;
|
||||
|
||||
acl = posix_acl_from_xattr(value, size);
|
||||
if (!acl) {
|
||||
/*
|
||||
* acl_set_file(3) may request that we set default ACLs with
|
||||
* zero length -- defend (gracefully) against that here.
|
||||
*/
|
||||
goto out;
|
||||
}
|
||||
if (IS_ERR(acl)) {
|
||||
error = PTR_ERR(acl);
|
||||
goto out;
|
||||
}
|
||||
|
||||
error = posix_acl_valid(acl);
|
||||
if (error)
|
||||
goto out_release;
|
||||
|
||||
error = -EINVAL;
|
||||
if (acl->a_count > GFS2_ACL_MAX_ENTRIES)
|
||||
goto out_release;
|
||||
|
||||
if (type == ACL_TYPE_ACCESS) {
|
||||
mode_t mode = inode->i_mode;
|
||||
error = posix_acl_equiv_mode(acl, &mode);
|
||||
|
||||
if (error <= 0) {
|
||||
posix_acl_release(acl);
|
||||
acl = NULL;
|
||||
|
||||
if (error < 0)
|
||||
return error;
|
||||
}
|
||||
|
||||
error = gfs2_set_mode(inode, mode);
|
||||
if (error)
|
||||
goto out_release;
|
||||
}
|
||||
|
||||
set_acl:
|
||||
error = gfs2_xattr_set(inode, GFS2_EATYPE_SYS, name, value, size, 0);
|
||||
if (!error) {
|
||||
if (acl)
|
||||
set_cached_acl(inode, type, acl);
|
||||
else
|
||||
forget_cached_acl(inode, type);
|
||||
}
|
||||
out_release:
|
||||
posix_acl_release(acl);
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
struct xattr_handler gfs2_xattr_system_handler = {
|
||||
.prefix = XATTR_SYSTEM_PREFIX,
|
||||
.get = gfs2_xattr_system_get,
|
||||
.set = gfs2_xattr_system_set,
|
||||
};
|
||||
|
||||
|
|
|
@ -13,26 +13,12 @@
|
|||
#include "incore.h"
|
||||
|
||||
#define GFS2_POSIX_ACL_ACCESS "posix_acl_access"
|
||||
#define GFS2_POSIX_ACL_ACCESS_LEN 16
|
||||
#define GFS2_POSIX_ACL_DEFAULT "posix_acl_default"
|
||||
#define GFS2_POSIX_ACL_DEFAULT_LEN 17
|
||||
#define GFS2_ACL_MAX_ENTRIES 25
|
||||
|
||||
#define GFS2_ACL_IS_ACCESS(name, len) \
|
||||
((len) == GFS2_POSIX_ACL_ACCESS_LEN && \
|
||||
!memcmp(GFS2_POSIX_ACL_ACCESS, (name), (len)))
|
||||
|
||||
#define GFS2_ACL_IS_DEFAULT(name, len) \
|
||||
((len) == GFS2_POSIX_ACL_DEFAULT_LEN && \
|
||||
!memcmp(GFS2_POSIX_ACL_DEFAULT, (name), (len)))
|
||||
|
||||
struct gfs2_ea_request;
|
||||
|
||||
int gfs2_acl_validate_set(struct gfs2_inode *ip, int access,
|
||||
struct gfs2_ea_request *er,
|
||||
int *remove, mode_t *mode);
|
||||
int gfs2_acl_validate_remove(struct gfs2_inode *ip, int access);
|
||||
int gfs2_check_acl(struct inode *inode, int mask);
|
||||
int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip);
|
||||
int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr);
|
||||
extern int gfs2_check_acl(struct inode *inode, int mask);
|
||||
extern int gfs2_acl_create(struct gfs2_inode *dip, struct inode *inode);
|
||||
extern int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr);
|
||||
extern struct xattr_handler gfs2_xattr_system_handler;
|
||||
|
||||
#endif /* __ACL_DOT_H__ */
|
||||
|
|
|
@ -269,7 +269,6 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping,
|
|||
pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
|
||||
unsigned offset = i_size & (PAGE_CACHE_SIZE-1);
|
||||
unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize);
|
||||
struct backing_dev_info *bdi = mapping->backing_dev_info;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
|
@ -313,11 +312,6 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping,
|
|||
|
||||
if (ret || (--(wbc->nr_to_write) <= 0))
|
||||
ret = 1;
|
||||
if (wbc->nonblocking && bdi_write_congested(bdi)) {
|
||||
wbc->encountered_congestion = 1;
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
}
|
||||
gfs2_trans_end(sdp);
|
||||
return ret;
|
||||
|
@ -338,7 +332,6 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping,
|
|||
static int gfs2_write_cache_jdata(struct address_space *mapping,
|
||||
struct writeback_control *wbc)
|
||||
{
|
||||
struct backing_dev_info *bdi = mapping->backing_dev_info;
|
||||
int ret = 0;
|
||||
int done = 0;
|
||||
struct pagevec pvec;
|
||||
|
@ -348,11 +341,6 @@ static int gfs2_write_cache_jdata(struct address_space *mapping,
|
|||
int scanned = 0;
|
||||
int range_whole = 0;
|
||||
|
||||
if (wbc->nonblocking && bdi_write_congested(bdi)) {
|
||||
wbc->encountered_congestion = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
pagevec_init(&pvec, 0);
|
||||
if (wbc->range_cyclic) {
|
||||
index = mapping->writeback_index; /* Start from prev offset */
|
||||
|
@ -819,8 +807,10 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
|
|||
mark_inode_dirty(inode);
|
||||
}
|
||||
|
||||
if (inode == sdp->sd_rindex)
|
||||
if (inode == sdp->sd_rindex) {
|
||||
adjust_fs_space(inode);
|
||||
ip->i_gh.gh_flags |= GL_NOCACHE;
|
||||
}
|
||||
|
||||
brelse(dibh);
|
||||
gfs2_trans_end(sdp);
|
||||
|
@ -889,8 +879,10 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
|
|||
mark_inode_dirty(inode);
|
||||
}
|
||||
|
||||
if (inode == sdp->sd_rindex)
|
||||
if (inode == sdp->sd_rindex) {
|
||||
adjust_fs_space(inode);
|
||||
ip->i_gh.gh_flags |= GL_NOCACHE;
|
||||
}
|
||||
|
||||
brelse(dibh);
|
||||
gfs2_trans_end(sdp);
|
||||
|
|
|
@ -525,38 +525,6 @@ consist_inode:
|
|||
return ERR_PTR(-EIO);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* dirent_first - Return the first dirent
|
||||
* @dip: the directory
|
||||
* @bh: The buffer
|
||||
* @dent: Pointer to list of dirents
|
||||
*
|
||||
* return first dirent whether bh points to leaf or stuffed dinode
|
||||
*
|
||||
* Returns: IS_LEAF, IS_DINODE, or -errno
|
||||
*/
|
||||
|
||||
static int dirent_first(struct gfs2_inode *dip, struct buffer_head *bh,
|
||||
struct gfs2_dirent **dent)
|
||||
{
|
||||
struct gfs2_meta_header *h = (struct gfs2_meta_header *)bh->b_data;
|
||||
|
||||
if (be32_to_cpu(h->mh_type) == GFS2_METATYPE_LF) {
|
||||
if (gfs2_meta_check(GFS2_SB(&dip->i_inode), bh))
|
||||
return -EIO;
|
||||
*dent = (struct gfs2_dirent *)(bh->b_data +
|
||||
sizeof(struct gfs2_leaf));
|
||||
return IS_LEAF;
|
||||
} else {
|
||||
if (gfs2_metatype_check(GFS2_SB(&dip->i_inode), bh, GFS2_METATYPE_DI))
|
||||
return -EIO;
|
||||
*dent = (struct gfs2_dirent *)(bh->b_data +
|
||||
sizeof(struct gfs2_dinode));
|
||||
return IS_DINODE;
|
||||
}
|
||||
}
|
||||
|
||||
static int dirent_check_reclen(struct gfs2_inode *dip,
|
||||
const struct gfs2_dirent *d, const void *end_p)
|
||||
{
|
||||
|
@ -1006,7 +974,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
|
|||
divider = (start + half_len) << (32 - dip->i_depth);
|
||||
|
||||
/* Copy the entries */
|
||||
dirent_first(dip, obh, &dent);
|
||||
dent = (struct gfs2_dirent *)(obh->b_data + sizeof(struct gfs2_leaf));
|
||||
|
||||
do {
|
||||
next = dent;
|
||||
|
|
|
@ -241,15 +241,14 @@ int gfs2_glock_put(struct gfs2_glock *gl)
|
|||
int rv = 0;
|
||||
|
||||
write_lock(gl_lock_addr(gl->gl_hash));
|
||||
if (atomic_dec_and_test(&gl->gl_ref)) {
|
||||
if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) {
|
||||
hlist_del(&gl->gl_list);
|
||||
write_unlock(gl_lock_addr(gl->gl_hash));
|
||||
spin_lock(&lru_lock);
|
||||
if (!list_empty(&gl->gl_lru)) {
|
||||
list_del_init(&gl->gl_lru);
|
||||
atomic_dec(&lru_count);
|
||||
}
|
||||
spin_unlock(&lru_lock);
|
||||
write_unlock(gl_lock_addr(gl->gl_hash));
|
||||
GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
|
||||
glock_free(gl);
|
||||
rv = 1;
|
||||
|
@ -513,7 +512,6 @@ retry:
|
|||
GLOCK_BUG_ON(gl, 1);
|
||||
}
|
||||
spin_unlock(&gl->gl_spin);
|
||||
gfs2_glock_put(gl);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -524,8 +522,6 @@ retry:
|
|||
if (glops->go_xmote_bh) {
|
||||
spin_unlock(&gl->gl_spin);
|
||||
rv = glops->go_xmote_bh(gl, gh);
|
||||
if (rv == -EAGAIN)
|
||||
return;
|
||||
spin_lock(&gl->gl_spin);
|
||||
if (rv) {
|
||||
do_error(gl, rv);
|
||||
|
@ -540,7 +536,6 @@ out:
|
|||
clear_bit(GLF_LOCK, &gl->gl_flags);
|
||||
out_locked:
|
||||
spin_unlock(&gl->gl_spin);
|
||||
gfs2_glock_put(gl);
|
||||
}
|
||||
|
||||
static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
|
||||
|
@ -600,7 +595,6 @@ __acquires(&gl->gl_spin)
|
|||
|
||||
if (!(ret & LM_OUT_ASYNC)) {
|
||||
finish_xmote(gl, ret);
|
||||
gfs2_glock_hold(gl);
|
||||
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
|
||||
gfs2_glock_put(gl);
|
||||
} else {
|
||||
|
@ -672,12 +666,17 @@ out:
|
|||
return;
|
||||
|
||||
out_sched:
|
||||
clear_bit(GLF_LOCK, &gl->gl_flags);
|
||||
smp_mb__after_clear_bit();
|
||||
gfs2_glock_hold(gl);
|
||||
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
|
||||
gfs2_glock_put_nolock(gl);
|
||||
return;
|
||||
|
||||
out_unlock:
|
||||
clear_bit(GLF_LOCK, &gl->gl_flags);
|
||||
goto out;
|
||||
smp_mb__after_clear_bit();
|
||||
return;
|
||||
}
|
||||
|
||||
static void delete_work_func(struct work_struct *work)
|
||||
|
@ -707,9 +706,12 @@ static void glock_work_func(struct work_struct *work)
|
|||
{
|
||||
unsigned long delay = 0;
|
||||
struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
|
||||
int drop_ref = 0;
|
||||
|
||||
if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags))
|
||||
if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
|
||||
finish_xmote(gl, gl->gl_reply);
|
||||
drop_ref = 1;
|
||||
}
|
||||
down_read(&gfs2_umount_flush_sem);
|
||||
spin_lock(&gl->gl_spin);
|
||||
if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
|
||||
|
@ -727,6 +729,8 @@ static void glock_work_func(struct work_struct *work)
|
|||
if (!delay ||
|
||||
queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
|
||||
gfs2_glock_put(gl);
|
||||
if (drop_ref)
|
||||
gfs2_glock_put(gl);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1361,10 +1365,6 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
|
|||
list_del_init(&gl->gl_lru);
|
||||
atomic_dec(&lru_count);
|
||||
|
||||
/* Check if glock is about to be freed */
|
||||
if (atomic_read(&gl->gl_ref) == 0)
|
||||
continue;
|
||||
|
||||
/* Test for being demotable */
|
||||
if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
|
||||
gfs2_glock_hold(gl);
|
||||
|
@ -1375,10 +1375,11 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
|
|||
handle_callback(gl, LM_ST_UNLOCKED, 0);
|
||||
nr--;
|
||||
}
|
||||
clear_bit(GLF_LOCK, &gl->gl_flags);
|
||||
smp_mb__after_clear_bit();
|
||||
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
|
||||
gfs2_glock_put_nolock(gl);
|
||||
spin_unlock(&gl->gl_spin);
|
||||
clear_bit(GLF_LOCK, &gl->gl_flags);
|
||||
spin_lock(&lru_lock);
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -180,15 +180,6 @@ static inline int gfs2_glock_is_held_shrd(struct gfs2_glock *gl)
|
|||
return gl->gl_state == LM_ST_SHARED;
|
||||
}
|
||||
|
||||
static inline int gfs2_glock_is_blocking(struct gfs2_glock *gl)
|
||||
{
|
||||
int ret;
|
||||
spin_lock(&gl->gl_spin);
|
||||
ret = test_bit(GLF_DEMOTE, &gl->gl_flags);
|
||||
spin_unlock(&gl->gl_spin);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int gfs2_glock_get(struct gfs2_sbd *sdp,
|
||||
u64 number, const struct gfs2_glock_operations *glops,
|
||||
int create, struct gfs2_glock **glp);
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/buffer_head.h>
|
||||
#include <linux/gfs2_ondisk.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/posix_acl.h>
|
||||
|
||||
#include "gfs2.h"
|
||||
#include "incore.h"
|
||||
|
@ -184,8 +185,10 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
|
|||
if (flags & DIO_METADATA) {
|
||||
struct address_space *mapping = gl->gl_aspace->i_mapping;
|
||||
truncate_inode_pages(mapping, 0);
|
||||
if (ip)
|
||||
if (ip) {
|
||||
set_bit(GIF_INVALID, &ip->i_flags);
|
||||
forget_all_cached_acls(&ip->i_inode);
|
||||
}
|
||||
}
|
||||
|
||||
if (ip == GFS2_I(gl->gl_sbd->sd_rindex))
|
||||
|
|
|
@ -429,7 +429,11 @@ struct gfs2_args {
|
|||
unsigned int ar_meta:1; /* mount metafs */
|
||||
unsigned int ar_discard:1; /* discard requests */
|
||||
unsigned int ar_errors:2; /* errors=withdraw | panic */
|
||||
unsigned int ar_nobarrier:1; /* do not send barriers */
|
||||
int ar_commit; /* Commit interval */
|
||||
int ar_statfs_quantum; /* The fast statfs interval */
|
||||
int ar_quota_quantum; /* The quota interval */
|
||||
int ar_statfs_percent; /* The % change to force sync */
|
||||
};
|
||||
|
||||
struct gfs2_tune {
|
||||
|
@ -558,6 +562,7 @@ struct gfs2_sbd {
|
|||
spinlock_t sd_statfs_spin;
|
||||
struct gfs2_statfs_change_host sd_statfs_master;
|
||||
struct gfs2_statfs_change_host sd_statfs_local;
|
||||
int sd_statfs_force_sync;
|
||||
|
||||
/* Resource group stuff */
|
||||
|
||||
|
|
|
@ -871,7 +871,7 @@ struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
|
|||
if (error)
|
||||
goto fail_gunlock2;
|
||||
|
||||
error = gfs2_acl_create(dip, GFS2_I(inode));
|
||||
error = gfs2_acl_create(dip, inode);
|
||||
if (error)
|
||||
goto fail_gunlock2;
|
||||
|
||||
|
@ -947,9 +947,7 @@ void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
|
|||
|
||||
str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
|
||||
str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
|
||||
str->di_header.__pad0 = 0;
|
||||
str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
|
||||
str->di_header.__pad1 = 0;
|
||||
str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
|
||||
str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
|
||||
str->di_mode = cpu_to_be32(ip->i_inode.i_mode);
|
||||
|
|
|
@ -596,7 +596,9 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
|
|||
memset(lh, 0, sizeof(struct gfs2_log_header));
|
||||
lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
|
||||
lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
|
||||
lh->lh_header.__pad0 = cpu_to_be64(0);
|
||||
lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
|
||||
lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
|
||||
lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++);
|
||||
lh->lh_flags = cpu_to_be32(flags);
|
||||
lh->lh_tail = cpu_to_be32(tail);
|
||||
|
|
|
@ -132,6 +132,7 @@ static struct buffer_head *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type)
|
|||
static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
|
||||
{
|
||||
struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
|
||||
struct gfs2_meta_header *mh;
|
||||
struct gfs2_trans *tr;
|
||||
|
||||
lock_buffer(bd->bd_bh);
|
||||
|
@ -148,6 +149,9 @@ static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
|
|||
set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
|
||||
gfs2_meta_check(sdp, bd->bd_bh);
|
||||
gfs2_pin(sdp, bd->bd_bh);
|
||||
mh = (struct gfs2_meta_header *)bd->bd_bh->b_data;
|
||||
mh->__pad0 = cpu_to_be64(0);
|
||||
mh->mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
|
||||
sdp->sd_log_num_buf++;
|
||||
list_add(&le->le_list, &sdp->sd_log_le_buf);
|
||||
tr->tr_num_buf_new++;
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <linux/mount.h>
|
||||
#include <linux/gfs2_ondisk.h>
|
||||
#include <linux/slow-work.h>
|
||||
#include <linux/quotaops.h>
|
||||
|
||||
#include "gfs2.h"
|
||||
#include "incore.h"
|
||||
|
@ -62,13 +63,10 @@ static void gfs2_tune_init(struct gfs2_tune *gt)
|
|||
gt->gt_quota_warn_period = 10;
|
||||
gt->gt_quota_scale_num = 1;
|
||||
gt->gt_quota_scale_den = 1;
|
||||
gt->gt_quota_quantum = 60;
|
||||
gt->gt_new_files_jdata = 0;
|
||||
gt->gt_max_readahead = 1 << 18;
|
||||
gt->gt_stall_secs = 600;
|
||||
gt->gt_complain_secs = 10;
|
||||
gt->gt_statfs_quantum = 30;
|
||||
gt->gt_statfs_slow = 0;
|
||||
}
|
||||
|
||||
static struct gfs2_sbd *init_sbd(struct super_block *sb)
|
||||
|
@ -1114,7 +1112,7 @@ void gfs2_online_uevent(struct gfs2_sbd *sdp)
|
|||
* Returns: errno
|
||||
*/
|
||||
|
||||
static int fill_super(struct super_block *sb, void *data, int silent)
|
||||
static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent)
|
||||
{
|
||||
struct gfs2_sbd *sdp;
|
||||
struct gfs2_holder mount_gh;
|
||||
|
@ -1125,17 +1123,7 @@ static int fill_super(struct super_block *sb, void *data, int silent)
|
|||
printk(KERN_WARNING "GFS2: can't alloc struct gfs2_sbd\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
sdp->sd_args.ar_quota = GFS2_QUOTA_DEFAULT;
|
||||
sdp->sd_args.ar_data = GFS2_DATA_DEFAULT;
|
||||
sdp->sd_args.ar_commit = 60;
|
||||
sdp->sd_args.ar_errors = GFS2_ERRORS_DEFAULT;
|
||||
|
||||
error = gfs2_mount_args(sdp, &sdp->sd_args, data);
|
||||
if (error) {
|
||||
printk(KERN_WARNING "GFS2: can't parse mount arguments\n");
|
||||
goto fail;
|
||||
}
|
||||
sdp->sd_args = *args;
|
||||
|
||||
if (sdp->sd_args.ar_spectator) {
|
||||
sb->s_flags |= MS_RDONLY;
|
||||
|
@ -1143,11 +1131,15 @@ static int fill_super(struct super_block *sb, void *data, int silent)
|
|||
}
|
||||
if (sdp->sd_args.ar_posix_acl)
|
||||
sb->s_flags |= MS_POSIXACL;
|
||||
if (sdp->sd_args.ar_nobarrier)
|
||||
set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
|
||||
|
||||
sb->s_magic = GFS2_MAGIC;
|
||||
sb->s_op = &gfs2_super_ops;
|
||||
sb->s_export_op = &gfs2_export_ops;
|
||||
sb->s_xattr = gfs2_xattr_handlers;
|
||||
sb->s_qcop = &gfs2_quotactl_ops;
|
||||
sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
|
||||
sb->s_time_gran = 1;
|
||||
sb->s_maxbytes = MAX_LFS_FILESIZE;
|
||||
|
||||
|
@ -1160,6 +1152,15 @@ static int fill_super(struct super_block *sb, void *data, int silent)
|
|||
sdp->sd_fsb2bb = 1 << sdp->sd_fsb2bb_shift;
|
||||
|
||||
sdp->sd_tune.gt_log_flush_secs = sdp->sd_args.ar_commit;
|
||||
sdp->sd_tune.gt_quota_quantum = sdp->sd_args.ar_quota_quantum;
|
||||
if (sdp->sd_args.ar_statfs_quantum) {
|
||||
sdp->sd_tune.gt_statfs_slow = 0;
|
||||
sdp->sd_tune.gt_statfs_quantum = sdp->sd_args.ar_statfs_quantum;
|
||||
}
|
||||
else {
|
||||
sdp->sd_tune.gt_statfs_slow = 1;
|
||||
sdp->sd_tune.gt_statfs_quantum = 30;
|
||||
}
|
||||
|
||||
error = init_names(sdp, silent);
|
||||
if (error)
|
||||
|
@ -1243,18 +1244,127 @@ fail:
|
|||
return error;
|
||||
}
|
||||
|
||||
static int gfs2_get_sb(struct file_system_type *fs_type, int flags,
|
||||
const char *dev_name, void *data, struct vfsmount *mnt)
|
||||
static int set_gfs2_super(struct super_block *s, void *data)
|
||||
{
|
||||
return get_sb_bdev(fs_type, flags, dev_name, data, fill_super, mnt);
|
||||
s->s_bdev = data;
|
||||
s->s_dev = s->s_bdev->bd_dev;
|
||||
|
||||
/*
|
||||
* We set the bdi here to the queue backing, file systems can
|
||||
* overwrite this in ->fill_super()
|
||||
*/
|
||||
s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int test_meta_super(struct super_block *s, void *ptr)
|
||||
static int test_gfs2_super(struct super_block *s, void *ptr)
|
||||
{
|
||||
struct block_device *bdev = ptr;
|
||||
return (bdev == s->s_bdev);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_get_sb - Get the GFS2 superblock
|
||||
* @fs_type: The GFS2 filesystem type
|
||||
* @flags: Mount flags
|
||||
* @dev_name: The name of the device
|
||||
* @data: The mount arguments
|
||||
* @mnt: The vfsmnt for this mount
|
||||
*
|
||||
* Q. Why not use get_sb_bdev() ?
|
||||
* A. We need to select one of two root directories to mount, independent
|
||||
* of whether this is the initial, or subsequent, mount of this sb
|
||||
*
|
||||
* Returns: 0 or -ve on error
|
||||
*/
|
||||
|
||||
static int gfs2_get_sb(struct file_system_type *fs_type, int flags,
|
||||
const char *dev_name, void *data, struct vfsmount *mnt)
|
||||
{
|
||||
struct block_device *bdev;
|
||||
struct super_block *s;
|
||||
fmode_t mode = FMODE_READ;
|
||||
int error;
|
||||
struct gfs2_args args;
|
||||
struct gfs2_sbd *sdp;
|
||||
|
||||
if (!(flags & MS_RDONLY))
|
||||
mode |= FMODE_WRITE;
|
||||
|
||||
bdev = open_bdev_exclusive(dev_name, mode, fs_type);
|
||||
if (IS_ERR(bdev))
|
||||
return PTR_ERR(bdev);
|
||||
|
||||
/*
|
||||
* once the super is inserted into the list by sget, s_umount
|
||||
* will protect the lockfs code from trying to start a snapshot
|
||||
* while we are mounting
|
||||
*/
|
||||
mutex_lock(&bdev->bd_fsfreeze_mutex);
|
||||
if (bdev->bd_fsfreeze_count > 0) {
|
||||
mutex_unlock(&bdev->bd_fsfreeze_mutex);
|
||||
error = -EBUSY;
|
||||
goto error_bdev;
|
||||
}
|
||||
s = sget(fs_type, test_gfs2_super, set_gfs2_super, bdev);
|
||||
mutex_unlock(&bdev->bd_fsfreeze_mutex);
|
||||
error = PTR_ERR(s);
|
||||
if (IS_ERR(s))
|
||||
goto error_bdev;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
args.ar_quota = GFS2_QUOTA_DEFAULT;
|
||||
args.ar_data = GFS2_DATA_DEFAULT;
|
||||
args.ar_commit = 60;
|
||||
args.ar_statfs_quantum = 30;
|
||||
args.ar_quota_quantum = 60;
|
||||
args.ar_errors = GFS2_ERRORS_DEFAULT;
|
||||
|
||||
error = gfs2_mount_args(&args, data);
|
||||
if (error) {
|
||||
printk(KERN_WARNING "GFS2: can't parse mount arguments\n");
|
||||
if (s->s_root)
|
||||
goto error_super;
|
||||
deactivate_locked_super(s);
|
||||
return error;
|
||||
}
|
||||
|
||||
if (s->s_root) {
|
||||
error = -EBUSY;
|
||||
if ((flags ^ s->s_flags) & MS_RDONLY)
|
||||
goto error_super;
|
||||
close_bdev_exclusive(bdev, mode);
|
||||
} else {
|
||||
char b[BDEVNAME_SIZE];
|
||||
|
||||
s->s_flags = flags;
|
||||
s->s_mode = mode;
|
||||
strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
|
||||
sb_set_blocksize(s, block_size(bdev));
|
||||
error = fill_super(s, &args, flags & MS_SILENT ? 1 : 0);
|
||||
if (error) {
|
||||
deactivate_locked_super(s);
|
||||
return error;
|
||||
}
|
||||
s->s_flags |= MS_ACTIVE;
|
||||
bdev->bd_super = s;
|
||||
}
|
||||
|
||||
sdp = s->s_fs_info;
|
||||
mnt->mnt_sb = s;
|
||||
if (args.ar_meta)
|
||||
mnt->mnt_root = dget(sdp->sd_master_dir);
|
||||
else
|
||||
mnt->mnt_root = dget(sdp->sd_root_dir);
|
||||
return 0;
|
||||
|
||||
error_super:
|
||||
deactivate_locked_super(s);
|
||||
error_bdev:
|
||||
close_bdev_exclusive(bdev, mode);
|
||||
return error;
|
||||
}
|
||||
|
||||
static int set_meta_super(struct super_block *s, void *ptr)
|
||||
{
|
||||
return -EINVAL;
|
||||
|
@ -1274,13 +1384,17 @@ static int gfs2_get_sb_meta(struct file_system_type *fs_type, int flags,
|
|||
dev_name, error);
|
||||
return error;
|
||||
}
|
||||
s = sget(&gfs2_fs_type, test_meta_super, set_meta_super,
|
||||
s = sget(&gfs2_fs_type, test_gfs2_super, set_meta_super,
|
||||
path.dentry->d_inode->i_sb->s_bdev);
|
||||
path_put(&path);
|
||||
if (IS_ERR(s)) {
|
||||
printk(KERN_WARNING "GFS2: gfs2 mount does not exist\n");
|
||||
return PTR_ERR(s);
|
||||
}
|
||||
if ((flags ^ s->s_flags) & MS_RDONLY) {
|
||||
deactivate_locked_super(s);
|
||||
return -EBUSY;
|
||||
}
|
||||
sdp = s->s_fs_info;
|
||||
mnt->mnt_sb = s;
|
||||
mnt->mnt_root = dget(sdp->sd_master_dir);
|
||||
|
|
393
fs/gfs2/quota.c
393
fs/gfs2/quota.c
|
@ -15,7 +15,7 @@
|
|||
* fuzziness in the current usage value of IDs that are being used on different
|
||||
* nodes in the cluster simultaneously. So, it is possible for a user on
|
||||
* multiple nodes to overrun their quota, but that overrun is controlable.
|
||||
* Since quota tags are part of transactions, there is no need to a quota check
|
||||
* Since quota tags are part of transactions, there is no need for a quota check
|
||||
* program to be run on node crashes or anything like that.
|
||||
*
|
||||
* There are couple of knobs that let the administrator manage the quota
|
||||
|
@ -47,6 +47,8 @@
|
|||
#include <linux/gfs2_ondisk.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/quota.h>
|
||||
#include <linux/dqblk_xfs.h>
|
||||
|
||||
#include "gfs2.h"
|
||||
#include "incore.h"
|
||||
|
@ -65,13 +67,6 @@
|
|||
#define QUOTA_USER 1
|
||||
#define QUOTA_GROUP 0
|
||||
|
||||
struct gfs2_quota_host {
|
||||
u64 qu_limit;
|
||||
u64 qu_warn;
|
||||
s64 qu_value;
|
||||
u32 qu_ll_next;
|
||||
};
|
||||
|
||||
struct gfs2_quota_change_host {
|
||||
u64 qc_change;
|
||||
u32 qc_flags; /* GFS2_QCF_... */
|
||||
|
@ -164,7 +159,7 @@ fail:
|
|||
return error;
|
||||
}
|
||||
|
||||
static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
|
||||
static int qd_get(struct gfs2_sbd *sdp, int user, u32 id,
|
||||
struct gfs2_quota_data **qdp)
|
||||
{
|
||||
struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
|
||||
|
@ -202,7 +197,7 @@ static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
|
|||
|
||||
spin_unlock(&qd_lru_lock);
|
||||
|
||||
if (qd || !create) {
|
||||
if (qd) {
|
||||
if (new_qd) {
|
||||
gfs2_glock_put(new_qd->qd_gl);
|
||||
kmem_cache_free(gfs2_quotad_cachep, new_qd);
|
||||
|
@ -461,12 +456,12 @@ static void qd_unlock(struct gfs2_quota_data *qd)
|
|||
qd_put(qd);
|
||||
}
|
||||
|
||||
static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
|
||||
static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id,
|
||||
struct gfs2_quota_data **qdp)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = qd_get(sdp, user, id, create, qdp);
|
||||
error = qd_get(sdp, user, id, qdp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
@ -508,20 +503,20 @@ int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
|
|||
if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
|
||||
return 0;
|
||||
|
||||
error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, CREATE, qd);
|
||||
error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, qd);
|
||||
if (error)
|
||||
goto out;
|
||||
al->al_qd_num++;
|
||||
qd++;
|
||||
|
||||
error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, CREATE, qd);
|
||||
error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, qd);
|
||||
if (error)
|
||||
goto out;
|
||||
al->al_qd_num++;
|
||||
qd++;
|
||||
|
||||
if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) {
|
||||
error = qdsb_get(sdp, QUOTA_USER, uid, CREATE, qd);
|
||||
error = qdsb_get(sdp, QUOTA_USER, uid, qd);
|
||||
if (error)
|
||||
goto out;
|
||||
al->al_qd_num++;
|
||||
|
@ -529,7 +524,7 @@ int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
|
|||
}
|
||||
|
||||
if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) {
|
||||
error = qdsb_get(sdp, QUOTA_GROUP, gid, CREATE, qd);
|
||||
error = qdsb_get(sdp, QUOTA_GROUP, gid, qd);
|
||||
if (error)
|
||||
goto out;
|
||||
al->al_qd_num++;
|
||||
|
@ -617,48 +612,36 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change)
|
|||
mutex_unlock(&sdp->sd_quota_mutex);
|
||||
}
|
||||
|
||||
static void gfs2_quota_in(struct gfs2_quota_host *qu, const void *buf)
|
||||
{
|
||||
const struct gfs2_quota *str = buf;
|
||||
|
||||
qu->qu_limit = be64_to_cpu(str->qu_limit);
|
||||
qu->qu_warn = be64_to_cpu(str->qu_warn);
|
||||
qu->qu_value = be64_to_cpu(str->qu_value);
|
||||
qu->qu_ll_next = be32_to_cpu(str->qu_ll_next);
|
||||
}
|
||||
|
||||
static void gfs2_quota_out(const struct gfs2_quota_host *qu, void *buf)
|
||||
{
|
||||
struct gfs2_quota *str = buf;
|
||||
|
||||
str->qu_limit = cpu_to_be64(qu->qu_limit);
|
||||
str->qu_warn = cpu_to_be64(qu->qu_warn);
|
||||
str->qu_value = cpu_to_be64(qu->qu_value);
|
||||
str->qu_ll_next = cpu_to_be32(qu->qu_ll_next);
|
||||
memset(&str->qu_reserved, 0, sizeof(str->qu_reserved));
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_adjust_quota
|
||||
* gfs2_adjust_quota - adjust record of current block usage
|
||||
* @ip: The quota inode
|
||||
* @loc: Offset of the entry in the quota file
|
||||
* @change: The amount of usage change to record
|
||||
* @qd: The quota data
|
||||
* @fdq: The updated limits to record
|
||||
*
|
||||
* This function was mostly borrowed from gfs2_block_truncate_page which was
|
||||
* in turn mostly borrowed from ext3
|
||||
*
|
||||
* Returns: 0 or -ve on error
|
||||
*/
|
||||
|
||||
static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
|
||||
s64 change, struct gfs2_quota_data *qd)
|
||||
s64 change, struct gfs2_quota_data *qd,
|
||||
struct fs_disk_quota *fdq)
|
||||
{
|
||||
struct inode *inode = &ip->i_inode;
|
||||
struct address_space *mapping = inode->i_mapping;
|
||||
unsigned long index = loc >> PAGE_CACHE_SHIFT;
|
||||
unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
|
||||
unsigned blocksize, iblock, pos;
|
||||
struct buffer_head *bh;
|
||||
struct buffer_head *bh, *dibh;
|
||||
struct page *page;
|
||||
void *kaddr;
|
||||
char *ptr;
|
||||
struct gfs2_quota_host qp;
|
||||
struct gfs2_quota *qp;
|
||||
s64 value;
|
||||
int err = -EIO;
|
||||
u64 size;
|
||||
|
||||
if (gfs2_is_stuffed(ip))
|
||||
gfs2_unstuff_dinode(ip, NULL);
|
||||
|
@ -700,18 +683,38 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
|
|||
gfs2_trans_add_bh(ip->i_gl, bh, 0);
|
||||
|
||||
kaddr = kmap_atomic(page, KM_USER0);
|
||||
ptr = kaddr + offset;
|
||||
gfs2_quota_in(&qp, ptr);
|
||||
qp.qu_value += change;
|
||||
value = qp.qu_value;
|
||||
gfs2_quota_out(&qp, ptr);
|
||||
qp = kaddr + offset;
|
||||
value = (s64)be64_to_cpu(qp->qu_value) + change;
|
||||
qp->qu_value = cpu_to_be64(value);
|
||||
qd->qd_qb.qb_value = qp->qu_value;
|
||||
if (fdq) {
|
||||
if (fdq->d_fieldmask & FS_DQ_BSOFT) {
|
||||
qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit);
|
||||
qd->qd_qb.qb_warn = qp->qu_warn;
|
||||
}
|
||||
if (fdq->d_fieldmask & FS_DQ_BHARD) {
|
||||
qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit);
|
||||
qd->qd_qb.qb_limit = qp->qu_limit;
|
||||
}
|
||||
}
|
||||
flush_dcache_page(page);
|
||||
kunmap_atomic(kaddr, KM_USER0);
|
||||
err = 0;
|
||||
qd->qd_qb.qb_magic = cpu_to_be32(GFS2_MAGIC);
|
||||
qd->qd_qb.qb_value = cpu_to_be64(value);
|
||||
((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_magic = cpu_to_be32(GFS2_MAGIC);
|
||||
((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_value = cpu_to_be64(value);
|
||||
|
||||
err = gfs2_meta_inode_buffer(ip, &dibh);
|
||||
if (err)
|
||||
goto unlock;
|
||||
|
||||
size = loc + sizeof(struct gfs2_quota);
|
||||
if (size > inode->i_size) {
|
||||
ip->i_disksize = size;
|
||||
i_size_write(inode, size);
|
||||
}
|
||||
inode->i_mtime = inode->i_atime = CURRENT_TIME;
|
||||
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
|
||||
gfs2_dinode_out(ip, dibh->b_data);
|
||||
brelse(dibh);
|
||||
mark_inode_dirty(inode);
|
||||
|
||||
unlock:
|
||||
unlock_page(page);
|
||||
page_cache_release(page);
|
||||
|
@ -739,9 +742,9 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
|
|||
return -ENOMEM;
|
||||
|
||||
sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
|
||||
mutex_lock_nested(&ip->i_inode.i_mutex, I_MUTEX_QUOTA);
|
||||
for (qx = 0; qx < num_qd; qx++) {
|
||||
error = gfs2_glock_nq_init(qda[qx]->qd_gl,
|
||||
LM_ST_EXCLUSIVE,
|
||||
error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
|
||||
GL_NOCACHE, &ghs[qx]);
|
||||
if (error)
|
||||
goto out;
|
||||
|
@ -795,9 +798,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
|
|||
for (x = 0; x < num_qd; x++) {
|
||||
qd = qda[x];
|
||||
offset = qd2offset(qd);
|
||||
error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync,
|
||||
(struct gfs2_quota_data *)
|
||||
qd);
|
||||
error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL);
|
||||
if (error)
|
||||
goto out_end_trans;
|
||||
|
||||
|
@ -817,21 +818,44 @@ out_gunlock:
|
|||
out:
|
||||
while (qx--)
|
||||
gfs2_glock_dq_uninit(&ghs[qx]);
|
||||
mutex_unlock(&ip->i_inode.i_mutex);
|
||||
kfree(ghs);
|
||||
gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
|
||||
return error;
|
||||
}
|
||||
|
||||
static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
|
||||
{
|
||||
struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
|
||||
struct gfs2_quota q;
|
||||
struct gfs2_quota_lvb *qlvb;
|
||||
loff_t pos;
|
||||
int error;
|
||||
|
||||
memset(&q, 0, sizeof(struct gfs2_quota));
|
||||
pos = qd2offset(qd);
|
||||
error = gfs2_internal_read(ip, NULL, (char *)&q, &pos, sizeof(q));
|
||||
if (error < 0)
|
||||
return error;
|
||||
|
||||
qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
|
||||
qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
|
||||
qlvb->__pad = 0;
|
||||
qlvb->qb_limit = q.qu_limit;
|
||||
qlvb->qb_warn = q.qu_warn;
|
||||
qlvb->qb_value = q.qu_value;
|
||||
qd->qd_qb = *qlvb;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
|
||||
struct gfs2_holder *q_gh)
|
||||
{
|
||||
struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
|
||||
struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
|
||||
struct gfs2_holder i_gh;
|
||||
struct gfs2_quota_host q;
|
||||
char buf[sizeof(struct gfs2_quota)];
|
||||
int error;
|
||||
struct gfs2_quota_lvb *qlvb;
|
||||
|
||||
restart:
|
||||
error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
|
||||
|
@ -841,11 +865,9 @@ restart:
|
|||
qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
|
||||
|
||||
if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
|
||||
loff_t pos;
|
||||
gfs2_glock_dq_uninit(q_gh);
|
||||
error = gfs2_glock_nq_init(qd->qd_gl,
|
||||
LM_ST_EXCLUSIVE, GL_NOCACHE,
|
||||
q_gh);
|
||||
error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
|
||||
GL_NOCACHE, q_gh);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
@ -853,29 +875,14 @@ restart:
|
|||
if (error)
|
||||
goto fail;
|
||||
|
||||
memset(buf, 0, sizeof(struct gfs2_quota));
|
||||
pos = qd2offset(qd);
|
||||
error = gfs2_internal_read(ip, NULL, buf, &pos,
|
||||
sizeof(struct gfs2_quota));
|
||||
if (error < 0)
|
||||
error = update_qd(sdp, qd);
|
||||
if (error)
|
||||
goto fail_gunlock;
|
||||
|
||||
gfs2_glock_dq_uninit(&i_gh);
|
||||
|
||||
gfs2_quota_in(&q, buf);
|
||||
qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
|
||||
qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
|
||||
qlvb->__pad = 0;
|
||||
qlvb->qb_limit = cpu_to_be64(q.qu_limit);
|
||||
qlvb->qb_warn = cpu_to_be64(q.qu_warn);
|
||||
qlvb->qb_value = cpu_to_be64(q.qu_value);
|
||||
qd->qd_qb = *qlvb;
|
||||
|
||||
if (gfs2_glock_is_blocking(qd->qd_gl)) {
|
||||
gfs2_glock_dq_uninit(q_gh);
|
||||
force_refresh = 0;
|
||||
goto restart;
|
||||
}
|
||||
gfs2_glock_dq_uninit(q_gh);
|
||||
force_refresh = 0;
|
||||
goto restart;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -995,7 +1002,7 @@ static int print_message(struct gfs2_quota_data *qd, char *type)
|
|||
{
|
||||
struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
|
||||
|
||||
printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\r\n",
|
||||
printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\n",
|
||||
sdp->sd_fsname, type,
|
||||
(test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group",
|
||||
qd->qd_id);
|
||||
|
@ -1032,6 +1039,10 @@ int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
|
|||
|
||||
if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
|
||||
print_message(qd, "exceeded");
|
||||
quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ?
|
||||
USRQUOTA : GRPQUOTA, qd->qd_id,
|
||||
sdp->sd_vfs->s_dev, QUOTA_NL_BHARDWARN);
|
||||
|
||||
error = -EDQUOT;
|
||||
break;
|
||||
} else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
|
||||
|
@ -1039,6 +1050,9 @@ int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
|
|||
time_after_eq(jiffies, qd->qd_last_warn +
|
||||
gfs2_tune_get(sdp,
|
||||
gt_quota_warn_period) * HZ)) {
|
||||
quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ?
|
||||
USRQUOTA : GRPQUOTA, qd->qd_id,
|
||||
sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
|
||||
error = print_message(qd, "warning");
|
||||
qd->qd_last_warn = jiffies;
|
||||
}
|
||||
|
@ -1069,8 +1083,9 @@ void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
|
|||
}
|
||||
}
|
||||
|
||||
int gfs2_quota_sync(struct gfs2_sbd *sdp)
|
||||
int gfs2_quota_sync(struct super_block *sb, int type)
|
||||
{
|
||||
struct gfs2_sbd *sdp = sb->s_fs_info;
|
||||
struct gfs2_quota_data **qda;
|
||||
unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
|
||||
unsigned int num_qd;
|
||||
|
@ -1118,7 +1133,7 @@ int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
|
|||
struct gfs2_holder q_gh;
|
||||
int error;
|
||||
|
||||
error = qd_get(sdp, user, id, CREATE, &qd);
|
||||
error = qd_get(sdp, user, id, &qd);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
@ -1127,7 +1142,6 @@ int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
|
|||
gfs2_glock_dq_uninit(&q_gh);
|
||||
|
||||
qd_put(qd);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -1298,12 +1312,12 @@ static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
|
|||
}
|
||||
|
||||
static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
|
||||
int (*fxn)(struct gfs2_sbd *sdp),
|
||||
int (*fxn)(struct super_block *sb, int type),
|
||||
unsigned long t, unsigned long *timeo,
|
||||
unsigned int *new_timeo)
|
||||
{
|
||||
if (t >= *timeo) {
|
||||
int error = fxn(sdp);
|
||||
int error = fxn(sdp->sd_vfs, 0);
|
||||
quotad_error(sdp, msg, error);
|
||||
*timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
|
||||
} else {
|
||||
|
@ -1330,6 +1344,14 @@ static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
|
|||
}
|
||||
}
|
||||
|
||||
void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
|
||||
if (!sdp->sd_statfs_force_sync) {
|
||||
sdp->sd_statfs_force_sync = 1;
|
||||
wake_up(&sdp->sd_quota_wait);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* gfs2_quotad - Write cached quota changes into the quota file
|
||||
* @sdp: Pointer to GFS2 superblock
|
||||
|
@ -1349,8 +1371,15 @@ int gfs2_quotad(void *data)
|
|||
while (!kthread_should_stop()) {
|
||||
|
||||
/* Update the master statfs file */
|
||||
quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
|
||||
&statfs_timeo, &tune->gt_statfs_quantum);
|
||||
if (sdp->sd_statfs_force_sync) {
|
||||
int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
|
||||
quotad_error(sdp, "statfs", error);
|
||||
statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
|
||||
}
|
||||
else
|
||||
quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
|
||||
&statfs_timeo,
|
||||
&tune->gt_statfs_quantum);
|
||||
|
||||
/* Update quota file */
|
||||
quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
|
||||
|
@ -1367,7 +1396,7 @@ int gfs2_quotad(void *data)
|
|||
spin_lock(&sdp->sd_trunc_lock);
|
||||
empty = list_empty(&sdp->sd_trunc_list);
|
||||
spin_unlock(&sdp->sd_trunc_lock);
|
||||
if (empty)
|
||||
if (empty && !sdp->sd_statfs_force_sync)
|
||||
t -= schedule_timeout(t);
|
||||
else
|
||||
t = 0;
|
||||
|
@ -1377,3 +1406,181 @@ int gfs2_quotad(void *data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int gfs2_quota_get_xstate(struct super_block *sb,
|
||||
struct fs_quota_stat *fqs)
|
||||
{
|
||||
struct gfs2_sbd *sdp = sb->s_fs_info;
|
||||
|
||||
memset(fqs, 0, sizeof(struct fs_quota_stat));
|
||||
fqs->qs_version = FS_QSTAT_VERSION;
|
||||
if (sdp->sd_args.ar_quota == GFS2_QUOTA_ON)
|
||||
fqs->qs_flags = (XFS_QUOTA_UDQ_ENFD | XFS_QUOTA_GDQ_ENFD);
|
||||
else if (sdp->sd_args.ar_quota == GFS2_QUOTA_ACCOUNT)
|
||||
fqs->qs_flags = (XFS_QUOTA_UDQ_ACCT | XFS_QUOTA_GDQ_ACCT);
|
||||
if (sdp->sd_quota_inode) {
|
||||
fqs->qs_uquota.qfs_ino = GFS2_I(sdp->sd_quota_inode)->i_no_addr;
|
||||
fqs->qs_uquota.qfs_nblks = sdp->sd_quota_inode->i_blocks;
|
||||
}
|
||||
fqs->qs_uquota.qfs_nextents = 1; /* unsupported */
|
||||
fqs->qs_gquota = fqs->qs_uquota; /* its the same inode in both cases */
|
||||
fqs->qs_incoredqs = atomic_read(&qd_lru_count);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gfs2_xquota_get(struct super_block *sb, int type, qid_t id,
|
||||
struct fs_disk_quota *fdq)
|
||||
{
|
||||
struct gfs2_sbd *sdp = sb->s_fs_info;
|
||||
struct gfs2_quota_lvb *qlvb;
|
||||
struct gfs2_quota_data *qd;
|
||||
struct gfs2_holder q_gh;
|
||||
int error;
|
||||
|
||||
memset(fdq, 0, sizeof(struct fs_disk_quota));
|
||||
|
||||
if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
|
||||
return -ESRCH; /* Crazy XFS error code */
|
||||
|
||||
if (type == USRQUOTA)
|
||||
type = QUOTA_USER;
|
||||
else if (type == GRPQUOTA)
|
||||
type = QUOTA_GROUP;
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
error = qd_get(sdp, type, id, &qd);
|
||||
if (error)
|
||||
return error;
|
||||
error = do_glock(qd, FORCE, &q_gh);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
|
||||
fdq->d_version = FS_DQUOT_VERSION;
|
||||
fdq->d_flags = (type == QUOTA_USER) ? XFS_USER_QUOTA : XFS_GROUP_QUOTA;
|
||||
fdq->d_id = id;
|
||||
fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit);
|
||||
fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn);
|
||||
fdq->d_bcount = be64_to_cpu(qlvb->qb_value);
|
||||
|
||||
gfs2_glock_dq_uninit(&q_gh);
|
||||
out:
|
||||
qd_put(qd);
|
||||
return error;
|
||||
}
|
||||
|
||||
/* GFS2 only supports a subset of the XFS fields */
|
||||
#define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD)
|
||||
|
||||
static int gfs2_xquota_set(struct super_block *sb, int type, qid_t id,
|
||||
struct fs_disk_quota *fdq)
|
||||
{
|
||||
struct gfs2_sbd *sdp = sb->s_fs_info;
|
||||
struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
|
||||
struct gfs2_quota_data *qd;
|
||||
struct gfs2_holder q_gh, i_gh;
|
||||
unsigned int data_blocks, ind_blocks;
|
||||
unsigned int blocks = 0;
|
||||
int alloc_required;
|
||||
struct gfs2_alloc *al;
|
||||
loff_t offset;
|
||||
int error;
|
||||
|
||||
if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
|
||||
return -ESRCH; /* Crazy XFS error code */
|
||||
|
||||
switch(type) {
|
||||
case USRQUOTA:
|
||||
type = QUOTA_USER;
|
||||
if (fdq->d_flags != XFS_USER_QUOTA)
|
||||
return -EINVAL;
|
||||
break;
|
||||
case GRPQUOTA:
|
||||
type = QUOTA_GROUP;
|
||||
if (fdq->d_flags != XFS_GROUP_QUOTA)
|
||||
return -EINVAL;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
|
||||
return -EINVAL;
|
||||
if (fdq->d_id != id)
|
||||
return -EINVAL;
|
||||
|
||||
error = qd_get(sdp, type, id, &qd);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
mutex_lock(&ip->i_inode.i_mutex);
|
||||
error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
|
||||
if (error)
|
||||
goto out_put;
|
||||
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
|
||||
if (error)
|
||||
goto out_q;
|
||||
|
||||
/* Check for existing entry, if none then alloc new blocks */
|
||||
error = update_qd(sdp, qd);
|
||||
if (error)
|
||||
goto out_i;
|
||||
|
||||
/* If nothing has changed, this is a no-op */
|
||||
if ((fdq->d_fieldmask & FS_DQ_BSOFT) &&
|
||||
(fdq->d_blk_softlimit == be64_to_cpu(qd->qd_qb.qb_warn)))
|
||||
fdq->d_fieldmask ^= FS_DQ_BSOFT;
|
||||
if ((fdq->d_fieldmask & FS_DQ_BHARD) &&
|
||||
(fdq->d_blk_hardlimit == be64_to_cpu(qd->qd_qb.qb_limit)))
|
||||
fdq->d_fieldmask ^= FS_DQ_BHARD;
|
||||
if (fdq->d_fieldmask == 0)
|
||||
goto out_i;
|
||||
|
||||
offset = qd2offset(qd);
|
||||
error = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota),
|
||||
&alloc_required);
|
||||
if (error)
|
||||
goto out_i;
|
||||
if (alloc_required) {
|
||||
al = gfs2_alloc_get(ip);
|
||||
if (al == NULL)
|
||||
goto out_i;
|
||||
gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
|
||||
&data_blocks, &ind_blocks);
|
||||
blocks = al->al_requested = 1 + data_blocks + ind_blocks;
|
||||
error = gfs2_inplace_reserve(ip);
|
||||
if (error)
|
||||
goto out_alloc;
|
||||
}
|
||||
|
||||
error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 1, 0);
|
||||
if (error)
|
||||
goto out_release;
|
||||
|
||||
/* Apply changes */
|
||||
error = gfs2_adjust_quota(ip, offset, 0, qd, fdq);
|
||||
|
||||
gfs2_trans_end(sdp);
|
||||
out_release:
|
||||
if (alloc_required) {
|
||||
gfs2_inplace_release(ip);
|
||||
out_alloc:
|
||||
gfs2_alloc_put(ip);
|
||||
}
|
||||
out_i:
|
||||
gfs2_glock_dq_uninit(&i_gh);
|
||||
out_q:
|
||||
gfs2_glock_dq_uninit(&q_gh);
|
||||
out_put:
|
||||
mutex_unlock(&ip->i_inode.i_mutex);
|
||||
qd_put(qd);
|
||||
return error;
|
||||
}
|
||||
|
||||
const struct quotactl_ops gfs2_quotactl_ops = {
|
||||
.quota_sync = gfs2_quota_sync,
|
||||
.get_xstate = gfs2_quota_get_xstate,
|
||||
.get_xquota = gfs2_xquota_get,
|
||||
.set_xquota = gfs2_xquota_set,
|
||||
};
|
||||
|
||||
|
|
|
@ -25,13 +25,15 @@ extern int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid);
|
|||
extern void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
|
||||
u32 uid, u32 gid);
|
||||
|
||||
extern int gfs2_quota_sync(struct gfs2_sbd *sdp);
|
||||
extern int gfs2_quota_sync(struct super_block *sb, int type);
|
||||
extern int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id);
|
||||
|
||||
extern int gfs2_quota_init(struct gfs2_sbd *sdp);
|
||||
extern void gfs2_quota_cleanup(struct gfs2_sbd *sdp);
|
||||
extern int gfs2_quotad(void *data);
|
||||
|
||||
extern void gfs2_wake_up_statfs(struct gfs2_sbd *sdp);
|
||||
|
||||
static inline int gfs2_quota_lock_check(struct gfs2_inode *ip)
|
||||
{
|
||||
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
|
||||
|
@ -50,5 +52,6 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip)
|
|||
}
|
||||
|
||||
extern int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask);
|
||||
extern const struct quotactl_ops gfs2_quotactl_ops;
|
||||
|
||||
#endif /* __QUOTA_DOT_H__ */
|
||||
|
|
|
@ -410,7 +410,9 @@ static int clean_journal(struct gfs2_jdesc *jd, struct gfs2_log_header_host *hea
|
|||
memset(lh, 0, sizeof(struct gfs2_log_header));
|
||||
lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
|
||||
lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
|
||||
lh->lh_header.__pad0 = cpu_to_be64(0);
|
||||
lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
|
||||
lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
|
||||
lh->lh_sequence = cpu_to_be64(head->lh_sequence + 1);
|
||||
lh->lh_flags = cpu_to_be32(GFS2_LOG_HEAD_UNMOUNT);
|
||||
lh->lh_blkno = cpu_to_be32(lblock);
|
||||
|
|
|
@ -1710,11 +1710,16 @@ int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
|
|||
{
|
||||
struct gfs2_rgrpd *rgd;
|
||||
struct gfs2_holder ri_gh, rgd_gh;
|
||||
struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex);
|
||||
int ri_locked = 0;
|
||||
int error;
|
||||
|
||||
error = gfs2_rindex_hold(sdp, &ri_gh);
|
||||
if (error)
|
||||
goto fail;
|
||||
if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
|
||||
error = gfs2_rindex_hold(sdp, &ri_gh);
|
||||
if (error)
|
||||
goto fail;
|
||||
ri_locked = 1;
|
||||
}
|
||||
|
||||
error = -EINVAL;
|
||||
rgd = gfs2_blk2rgrpd(sdp, no_addr);
|
||||
|
@ -1730,7 +1735,8 @@ int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
|
|||
|
||||
gfs2_glock_dq_uninit(&rgd_gh);
|
||||
fail_rindex:
|
||||
gfs2_glock_dq_uninit(&ri_gh);
|
||||
if (ri_locked)
|
||||
gfs2_glock_dq_uninit(&ri_gh);
|
||||
fail:
|
||||
return error;
|
||||
}
|
||||
|
|
110
fs/gfs2/super.c
110
fs/gfs2/super.c
|
@ -70,6 +70,11 @@ enum {
|
|||
Opt_commit,
|
||||
Opt_err_withdraw,
|
||||
Opt_err_panic,
|
||||
Opt_statfs_quantum,
|
||||
Opt_statfs_percent,
|
||||
Opt_quota_quantum,
|
||||
Opt_barrier,
|
||||
Opt_nobarrier,
|
||||
Opt_error,
|
||||
};
|
||||
|
||||
|
@ -101,18 +106,23 @@ static const match_table_t tokens = {
|
|||
{Opt_commit, "commit=%d"},
|
||||
{Opt_err_withdraw, "errors=withdraw"},
|
||||
{Opt_err_panic, "errors=panic"},
|
||||
{Opt_statfs_quantum, "statfs_quantum=%d"},
|
||||
{Opt_statfs_percent, "statfs_percent=%d"},
|
||||
{Opt_quota_quantum, "quota_quantum=%d"},
|
||||
{Opt_barrier, "barrier"},
|
||||
{Opt_nobarrier, "nobarrier"},
|
||||
{Opt_error, NULL}
|
||||
};
|
||||
|
||||
/**
|
||||
* gfs2_mount_args - Parse mount options
|
||||
* @sdp:
|
||||
* @data:
|
||||
* @args: The structure into which the parsed options will be written
|
||||
* @options: The options to parse
|
||||
*
|
||||
* Return: errno
|
||||
*/
|
||||
|
||||
int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *options)
|
||||
int gfs2_mount_args(struct gfs2_args *args, char *options)
|
||||
{
|
||||
char *o;
|
||||
int token;
|
||||
|
@ -157,7 +167,7 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *options)
|
|||
break;
|
||||
case Opt_debug:
|
||||
if (args->ar_errors == GFS2_ERRORS_PANIC) {
|
||||
fs_info(sdp, "-o debug and -o errors=panic "
|
||||
printk(KERN_WARNING "GFS2: -o debug and -o errors=panic "
|
||||
"are mutually exclusive.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -210,7 +220,29 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *options)
|
|||
case Opt_commit:
|
||||
rv = match_int(&tmp[0], &args->ar_commit);
|
||||
if (rv || args->ar_commit <= 0) {
|
||||
fs_info(sdp, "commit mount option requires a positive numeric argument\n");
|
||||
printk(KERN_WARNING "GFS2: commit mount option requires a positive numeric argument\n");
|
||||
return rv ? rv : -EINVAL;
|
||||
}
|
||||
break;
|
||||
case Opt_statfs_quantum:
|
||||
rv = match_int(&tmp[0], &args->ar_statfs_quantum);
|
||||
if (rv || args->ar_statfs_quantum < 0) {
|
||||
printk(KERN_WARNING "GFS2: statfs_quantum mount option requires a non-negative numeric argument\n");
|
||||
return rv ? rv : -EINVAL;
|
||||
}
|
||||
break;
|
||||
case Opt_quota_quantum:
|
||||
rv = match_int(&tmp[0], &args->ar_quota_quantum);
|
||||
if (rv || args->ar_quota_quantum <= 0) {
|
||||
printk(KERN_WARNING "GFS2: quota_quantum mount option requires a positive numeric argument\n");
|
||||
return rv ? rv : -EINVAL;
|
||||
}
|
||||
break;
|
||||
case Opt_statfs_percent:
|
||||
rv = match_int(&tmp[0], &args->ar_statfs_percent);
|
||||
if (rv || args->ar_statfs_percent < 0 ||
|
||||
args->ar_statfs_percent > 100) {
|
||||
printk(KERN_WARNING "statfs_percent mount option requires a numeric argument between 0 and 100\n");
|
||||
return rv ? rv : -EINVAL;
|
||||
}
|
||||
break;
|
||||
|
@ -219,15 +251,21 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *options)
|
|||
break;
|
||||
case Opt_err_panic:
|
||||
if (args->ar_debug) {
|
||||
fs_info(sdp, "-o debug and -o errors=panic "
|
||||
printk(KERN_WARNING "GFS2: -o debug and -o errors=panic "
|
||||
"are mutually exclusive.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
args->ar_errors = GFS2_ERRORS_PANIC;
|
||||
break;
|
||||
case Opt_barrier:
|
||||
args->ar_nobarrier = 0;
|
||||
break;
|
||||
case Opt_nobarrier:
|
||||
args->ar_nobarrier = 1;
|
||||
break;
|
||||
case Opt_error:
|
||||
default:
|
||||
fs_info(sdp, "invalid mount option: %s\n", o);
|
||||
printk(KERN_WARNING "GFS2: invalid mount option: %s\n", o);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
@ -442,7 +480,10 @@ void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
|
|||
{
|
||||
struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
|
||||
struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
|
||||
struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
|
||||
struct buffer_head *l_bh;
|
||||
s64 x, y;
|
||||
int need_sync = 0;
|
||||
int error;
|
||||
|
||||
error = gfs2_meta_inode_buffer(l_ip, &l_bh);
|
||||
|
@ -456,9 +497,17 @@ void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
|
|||
l_sc->sc_free += free;
|
||||
l_sc->sc_dinodes += dinodes;
|
||||
gfs2_statfs_change_out(l_sc, l_bh->b_data + sizeof(struct gfs2_dinode));
|
||||
if (sdp->sd_args.ar_statfs_percent) {
|
||||
x = 100 * l_sc->sc_free;
|
||||
y = m_sc->sc_free * sdp->sd_args.ar_statfs_percent;
|
||||
if (x >= y || x <= -y)
|
||||
need_sync = 1;
|
||||
}
|
||||
spin_unlock(&sdp->sd_statfs_spin);
|
||||
|
||||
brelse(l_bh);
|
||||
if (need_sync)
|
||||
gfs2_wake_up_statfs(sdp);
|
||||
}
|
||||
|
||||
void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
|
||||
|
@ -484,8 +533,9 @@ void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
|
|||
gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
|
||||
}
|
||||
|
||||
int gfs2_statfs_sync(struct gfs2_sbd *sdp)
|
||||
int gfs2_statfs_sync(struct super_block *sb, int type)
|
||||
{
|
||||
struct gfs2_sbd *sdp = sb->s_fs_info;
|
||||
struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
|
||||
struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
|
||||
struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
|
||||
|
@ -521,6 +571,7 @@ int gfs2_statfs_sync(struct gfs2_sbd *sdp)
|
|||
goto out_bh2;
|
||||
|
||||
update_statfs(sdp, m_bh, l_bh);
|
||||
sdp->sd_statfs_force_sync = 0;
|
||||
|
||||
gfs2_trans_end(sdp);
|
||||
|
||||
|
@ -712,8 +763,8 @@ static int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
|
|||
int error;
|
||||
|
||||
flush_workqueue(gfs2_delete_workqueue);
|
||||
gfs2_quota_sync(sdp);
|
||||
gfs2_statfs_sync(sdp);
|
||||
gfs2_quota_sync(sdp->sd_vfs, 0);
|
||||
gfs2_statfs_sync(sdp->sd_vfs, 0);
|
||||
|
||||
error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, GL_NOCACHE,
|
||||
&t_gh);
|
||||
|
@ -1061,8 +1112,13 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
|
|||
|
||||
spin_lock(>->gt_spin);
|
||||
args.ar_commit = gt->gt_log_flush_secs;
|
||||
args.ar_quota_quantum = gt->gt_quota_quantum;
|
||||
if (gt->gt_statfs_slow)
|
||||
args.ar_statfs_quantum = 0;
|
||||
else
|
||||
args.ar_statfs_quantum = gt->gt_statfs_quantum;
|
||||
spin_unlock(>->gt_spin);
|
||||
error = gfs2_mount_args(sdp, &args, data);
|
||||
error = gfs2_mount_args(&args, data);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
@ -1097,8 +1153,21 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
|
|||
sb->s_flags |= MS_POSIXACL;
|
||||
else
|
||||
sb->s_flags &= ~MS_POSIXACL;
|
||||
if (sdp->sd_args.ar_nobarrier)
|
||||
set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
|
||||
else
|
||||
clear_bit(SDF_NOBARRIERS, &sdp->sd_flags);
|
||||
spin_lock(>->gt_spin);
|
||||
gt->gt_log_flush_secs = args.ar_commit;
|
||||
gt->gt_quota_quantum = args.ar_quota_quantum;
|
||||
if (args.ar_statfs_quantum) {
|
||||
gt->gt_statfs_slow = 0;
|
||||
gt->gt_statfs_quantum = args.ar_statfs_quantum;
|
||||
}
|
||||
else {
|
||||
gt->gt_statfs_slow = 1;
|
||||
gt->gt_statfs_quantum = 30;
|
||||
}
|
||||
spin_unlock(>->gt_spin);
|
||||
|
||||
gfs2_online_uevent(sdp);
|
||||
|
@ -1179,7 +1248,7 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
|
|||
{
|
||||
struct gfs2_sbd *sdp = mnt->mnt_sb->s_fs_info;
|
||||
struct gfs2_args *args = &sdp->sd_args;
|
||||
int lfsecs;
|
||||
int val;
|
||||
|
||||
if (is_ancestor(mnt->mnt_root, sdp->sd_master_dir))
|
||||
seq_printf(s, ",meta");
|
||||
|
@ -1240,9 +1309,17 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
|
|||
}
|
||||
if (args->ar_discard)
|
||||
seq_printf(s, ",discard");
|
||||
lfsecs = sdp->sd_tune.gt_log_flush_secs;
|
||||
if (lfsecs != 60)
|
||||
seq_printf(s, ",commit=%d", lfsecs);
|
||||
val = sdp->sd_tune.gt_log_flush_secs;
|
||||
if (val != 60)
|
||||
seq_printf(s, ",commit=%d", val);
|
||||
val = sdp->sd_tune.gt_statfs_quantum;
|
||||
if (val != 30)
|
||||
seq_printf(s, ",statfs_quantum=%d", val);
|
||||
val = sdp->sd_tune.gt_quota_quantum;
|
||||
if (val != 60)
|
||||
seq_printf(s, ",quota_quantum=%d", val);
|
||||
if (args->ar_statfs_percent)
|
||||
seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent);
|
||||
if (args->ar_errors != GFS2_ERRORS_DEFAULT) {
|
||||
const char *state;
|
||||
|
||||
|
@ -1259,6 +1336,9 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
|
|||
}
|
||||
seq_printf(s, ",errors=%s", state);
|
||||
}
|
||||
if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
|
||||
seq_printf(s, ",nobarrier");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@ static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp)
|
|||
|
||||
extern void gfs2_jindex_free(struct gfs2_sbd *sdp);
|
||||
|
||||
extern int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *data);
|
||||
extern int gfs2_mount_args(struct gfs2_args *args, char *data);
|
||||
|
||||
extern struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid);
|
||||
extern int gfs2_jdesc_check(struct gfs2_jdesc *jd);
|
||||
|
@ -44,7 +44,7 @@ extern void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc,
|
|||
const void *buf);
|
||||
extern void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
|
||||
struct buffer_head *l_bh);
|
||||
extern int gfs2_statfs_sync(struct gfs2_sbd *sdp);
|
||||
extern int gfs2_statfs_sync(struct super_block *sb, int type);
|
||||
|
||||
extern int gfs2_freeze_fs(struct gfs2_sbd *sdp);
|
||||
extern void gfs2_unfreeze_fs(struct gfs2_sbd *sdp);
|
||||
|
|
|
@ -158,7 +158,7 @@ static ssize_t statfs_sync_store(struct gfs2_sbd *sdp, const char *buf,
|
|||
if (simple_strtol(buf, NULL, 0) != 1)
|
||||
return -EINVAL;
|
||||
|
||||
gfs2_statfs_sync(sdp);
|
||||
gfs2_statfs_sync(sdp->sd_vfs, 0);
|
||||
return len;
|
||||
}
|
||||
|
||||
|
@ -171,13 +171,14 @@ static ssize_t quota_sync_store(struct gfs2_sbd *sdp, const char *buf,
|
|||
if (simple_strtol(buf, NULL, 0) != 1)
|
||||
return -EINVAL;
|
||||
|
||||
gfs2_quota_sync(sdp);
|
||||
gfs2_quota_sync(sdp->sd_vfs, 0);
|
||||
return len;
|
||||
}
|
||||
|
||||
static ssize_t quota_refresh_user_store(struct gfs2_sbd *sdp, const char *buf,
|
||||
size_t len)
|
||||
{
|
||||
int error;
|
||||
u32 id;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
|
@ -185,13 +186,14 @@ static ssize_t quota_refresh_user_store(struct gfs2_sbd *sdp, const char *buf,
|
|||
|
||||
id = simple_strtoul(buf, NULL, 0);
|
||||
|
||||
gfs2_quota_refresh(sdp, 1, id);
|
||||
return len;
|
||||
error = gfs2_quota_refresh(sdp, 1, id);
|
||||
return error ? error : len;
|
||||
}
|
||||
|
||||
static ssize_t quota_refresh_group_store(struct gfs2_sbd *sdp, const char *buf,
|
||||
size_t len)
|
||||
{
|
||||
int error;
|
||||
u32 id;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
|
@ -199,8 +201,8 @@ static ssize_t quota_refresh_group_store(struct gfs2_sbd *sdp, const char *buf,
|
|||
|
||||
id = simple_strtoul(buf, NULL, 0);
|
||||
|
||||
gfs2_quota_refresh(sdp, 0, id);
|
||||
return len;
|
||||
error = gfs2_quota_refresh(sdp, 0, id);
|
||||
return error ? error : len;
|
||||
}
|
||||
|
||||
static ssize_t demote_rq_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
|
||||
|
|
|
@ -186,8 +186,8 @@ static int ea_find_i(struct gfs2_inode *ip, struct buffer_head *bh,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int gfs2_ea_find(struct gfs2_inode *ip, int type, const char *name,
|
||||
struct gfs2_ea_location *el)
|
||||
static int gfs2_ea_find(struct gfs2_inode *ip, int type, const char *name,
|
||||
struct gfs2_ea_location *el)
|
||||
{
|
||||
struct ea_find ef;
|
||||
int error;
|
||||
|
@ -516,8 +516,8 @@ out:
|
|||
return error;
|
||||
}
|
||||
|
||||
int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
|
||||
char *data, size_t size)
|
||||
static int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
|
||||
char *data, size_t size)
|
||||
{
|
||||
int ret;
|
||||
size_t len = GFS2_EA_DATA_LEN(el->el_ea);
|
||||
|
@ -534,6 +534,36 @@ int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
|
|||
return len;
|
||||
}
|
||||
|
||||
int gfs2_xattr_acl_get(struct gfs2_inode *ip, const char *name, char **ppdata)
|
||||
{
|
||||
struct gfs2_ea_location el;
|
||||
int error;
|
||||
int len;
|
||||
char *data;
|
||||
|
||||
error = gfs2_ea_find(ip, GFS2_EATYPE_SYS, name, &el);
|
||||
if (error)
|
||||
return error;
|
||||
if (!el.el_ea)
|
||||
goto out;
|
||||
if (!GFS2_EA_DATA_LEN(el.el_ea))
|
||||
goto out;
|
||||
|
||||
len = GFS2_EA_DATA_LEN(el.el_ea);
|
||||
data = kmalloc(len, GFP_NOFS);
|
||||
error = -ENOMEM;
|
||||
if (data == NULL)
|
||||
goto out;
|
||||
|
||||
error = gfs2_ea_get_copy(ip, &el, data, len);
|
||||
if (error == 0)
|
||||
error = len;
|
||||
*ppdata = data;
|
||||
out:
|
||||
brelse(el.el_bh);
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_xattr_get - Get a GFS2 extended attribute
|
||||
* @inode: The inode
|
||||
|
@ -1259,22 +1289,26 @@ fail:
|
|||
return error;
|
||||
}
|
||||
|
||||
int gfs2_ea_acl_chmod(struct gfs2_inode *ip, struct gfs2_ea_location *el,
|
||||
struct iattr *attr, char *data)
|
||||
int gfs2_xattr_acl_chmod(struct gfs2_inode *ip, struct iattr *attr, char *data)
|
||||
{
|
||||
struct gfs2_ea_location el;
|
||||
struct buffer_head *dibh;
|
||||
int error;
|
||||
|
||||
if (GFS2_EA_IS_STUFFED(el->el_ea)) {
|
||||
error = gfs2_ea_find(ip, GFS2_EATYPE_SYS, GFS2_POSIX_ACL_ACCESS, &el);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
if (GFS2_EA_IS_STUFFED(el.el_ea)) {
|
||||
error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
|
||||
memcpy(GFS2_EA2DATA(el->el_ea), data,
|
||||
GFS2_EA_DATA_LEN(el->el_ea));
|
||||
gfs2_trans_add_bh(ip->i_gl, el.el_bh, 1);
|
||||
memcpy(GFS2_EA2DATA(el.el_ea), data,
|
||||
GFS2_EA_DATA_LEN(el.el_ea));
|
||||
} else
|
||||
error = ea_acl_chmod_unstuffed(ip, el->el_ea, data);
|
||||
error = ea_acl_chmod_unstuffed(ip, el.el_ea, data);
|
||||
|
||||
if (error)
|
||||
return error;
|
||||
|
@ -1507,18 +1541,6 @@ static int gfs2_xattr_user_set(struct inode *inode, const char *name,
|
|||
return gfs2_xattr_set(inode, GFS2_EATYPE_USR, name, value, size, flags);
|
||||
}
|
||||
|
||||
static int gfs2_xattr_system_get(struct inode *inode, const char *name,
|
||||
void *buffer, size_t size)
|
||||
{
|
||||
return gfs2_xattr_get(inode, GFS2_EATYPE_SYS, name, buffer, size);
|
||||
}
|
||||
|
||||
static int gfs2_xattr_system_set(struct inode *inode, const char *name,
|
||||
const void *value, size_t size, int flags)
|
||||
{
|
||||
return gfs2_xattr_set(inode, GFS2_EATYPE_SYS, name, value, size, flags);
|
||||
}
|
||||
|
||||
static int gfs2_xattr_security_get(struct inode *inode, const char *name,
|
||||
void *buffer, size_t size)
|
||||
{
|
||||
|
@ -1543,12 +1565,6 @@ static struct xattr_handler gfs2_xattr_security_handler = {
|
|||
.set = gfs2_xattr_security_set,
|
||||
};
|
||||
|
||||
static struct xattr_handler gfs2_xattr_system_handler = {
|
||||
.prefix = XATTR_SYSTEM_PREFIX,
|
||||
.get = gfs2_xattr_system_get,
|
||||
.set = gfs2_xattr_system_set,
|
||||
};
|
||||
|
||||
struct xattr_handler *gfs2_xattr_handlers[] = {
|
||||
&gfs2_xattr_user_handler,
|
||||
&gfs2_xattr_security_handler,
|
||||
|
|
|
@ -62,11 +62,7 @@ extern int gfs2_ea_dealloc(struct gfs2_inode *ip);
|
|||
|
||||
/* Exported to acl.c */
|
||||
|
||||
extern int gfs2_ea_find(struct gfs2_inode *ip, int type, const char *name,
|
||||
struct gfs2_ea_location *el);
|
||||
extern int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
|
||||
char *data, size_t size);
|
||||
extern int gfs2_ea_acl_chmod(struct gfs2_inode *ip, struct gfs2_ea_location *el,
|
||||
struct iattr *attr, char *data);
|
||||
extern int gfs2_xattr_acl_get(struct gfs2_inode *ip, const char *name, char **data);
|
||||
extern int gfs2_xattr_acl_chmod(struct gfs2_inode *ip, struct iattr *attr, char *data);
|
||||
|
||||
#endif /* __EATTR_DOT_H__ */
|
||||
|
|
|
@ -17,7 +17,7 @@ config QUOTA
|
|||
|
||||
config QUOTA_NETLINK_INTERFACE
|
||||
bool "Report quota messages through netlink interface"
|
||||
depends on QUOTA && NET
|
||||
depends on QUOTACTL && NET
|
||||
help
|
||||
If you say Y here, quota warnings (about exceeding softlimit, reaching
|
||||
hardlimit, etc.) will be reported through netlink interface. If unsure,
|
||||
|
|
|
@ -77,10 +77,6 @@
|
|||
#include <linux/capability.h>
|
||||
#include <linux/quotaops.h>
|
||||
#include <linux/writeback.h> /* for inode_lock, oddly enough.. */
|
||||
#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
|
||||
#include <net/netlink.h>
|
||||
#include <net/genetlink.h>
|
||||
#endif
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
|
@ -1071,73 +1067,6 @@ static void print_warning(struct dquot *dquot, const int warntype)
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
|
||||
|
||||
/* Netlink family structure for quota */
|
||||
static struct genl_family quota_genl_family = {
|
||||
.id = GENL_ID_GENERATE,
|
||||
.hdrsize = 0,
|
||||
.name = "VFS_DQUOT",
|
||||
.version = 1,
|
||||
.maxattr = QUOTA_NL_A_MAX,
|
||||
};
|
||||
|
||||
/* Send warning to userspace about user which exceeded quota */
|
||||
static void send_warning(const struct dquot *dquot, const char warntype)
|
||||
{
|
||||
static atomic_t seq;
|
||||
struct sk_buff *skb;
|
||||
void *msg_head;
|
||||
int ret;
|
||||
int msg_size = 4 * nla_total_size(sizeof(u32)) +
|
||||
2 * nla_total_size(sizeof(u64));
|
||||
|
||||
/* We have to allocate using GFP_NOFS as we are called from a
|
||||
* filesystem performing write and thus further recursion into
|
||||
* the fs to free some data could cause deadlocks. */
|
||||
skb = genlmsg_new(msg_size, GFP_NOFS);
|
||||
if (!skb) {
|
||||
printk(KERN_ERR
|
||||
"VFS: Not enough memory to send quota warning.\n");
|
||||
return;
|
||||
}
|
||||
msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
|
||||
"a_genl_family, 0, QUOTA_NL_C_WARNING);
|
||||
if (!msg_head) {
|
||||
printk(KERN_ERR
|
||||
"VFS: Cannot store netlink header in quota warning.\n");
|
||||
goto err_out;
|
||||
}
|
||||
ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, dquot->dq_type);
|
||||
if (ret)
|
||||
goto attr_err_out;
|
||||
ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, dquot->dq_id);
|
||||
if (ret)
|
||||
goto attr_err_out;
|
||||
ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype);
|
||||
if (ret)
|
||||
goto attr_err_out;
|
||||
ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR,
|
||||
MAJOR(dquot->dq_sb->s_dev));
|
||||
if (ret)
|
||||
goto attr_err_out;
|
||||
ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR,
|
||||
MINOR(dquot->dq_sb->s_dev));
|
||||
if (ret)
|
||||
goto attr_err_out;
|
||||
ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid());
|
||||
if (ret)
|
||||
goto attr_err_out;
|
||||
genlmsg_end(skb, msg_head);
|
||||
|
||||
genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS);
|
||||
return;
|
||||
attr_err_out:
|
||||
printk(KERN_ERR "VFS: Not enough space to compose quota message!\n");
|
||||
err_out:
|
||||
kfree_skb(skb);
|
||||
}
|
||||
#endif
|
||||
/*
|
||||
* Write warnings to the console and send warning messages over netlink.
|
||||
*
|
||||
|
@ -1145,18 +1074,20 @@ err_out:
|
|||
*/
|
||||
static void flush_warnings(struct dquot *const *dquots, char *warntype)
|
||||
{
|
||||
struct dquot *dq;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MAXQUOTAS; i++)
|
||||
if (dquots[i] && warntype[i] != QUOTA_NL_NOWARN &&
|
||||
!warning_issued(dquots[i], warntype[i])) {
|
||||
for (i = 0; i < MAXQUOTAS; i++) {
|
||||
dq = dquots[i];
|
||||
if (dq && warntype[i] != QUOTA_NL_NOWARN &&
|
||||
!warning_issued(dq, warntype[i])) {
|
||||
#ifdef CONFIG_PRINT_QUOTA_WARNING
|
||||
print_warning(dquots[i], warntype[i]);
|
||||
#endif
|
||||
#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
|
||||
send_warning(dquots[i], warntype[i]);
|
||||
print_warning(dq, warntype[i]);
|
||||
#endif
|
||||
quota_send_warning(dq->dq_type, dq->dq_id,
|
||||
dq->dq_sb->s_dev, warntype[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int ignore_hardlimit(struct dquot *dquot)
|
||||
|
@ -2607,12 +2538,6 @@ static int __init dquot_init(void)
|
|||
|
||||
register_shrinker(&dqcache_shrinker);
|
||||
|
||||
#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
|
||||
if (genl_register_family("a_genl_family) != 0)
|
||||
printk(KERN_ERR
|
||||
"VFS: Failed to create quota netlink interface.\n");
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
module_init(dquot_init);
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
#include <linux/capability.h>
|
||||
#include <linux/quotaops.h>
|
||||
#include <linux/types.h>
|
||||
#include <net/netlink.h>
|
||||
#include <net/genetlink.h>
|
||||
|
||||
/* Check validity of generic quotactl commands */
|
||||
static int generic_quotactl_valid(struct super_block *sb, int type, int cmd,
|
||||
|
@ -525,3 +527,94 @@ asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special,
|
|||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
|
||||
|
||||
/* Netlink family structure for quota */
|
||||
static struct genl_family quota_genl_family = {
|
||||
.id = GENL_ID_GENERATE,
|
||||
.hdrsize = 0,
|
||||
.name = "VFS_DQUOT",
|
||||
.version = 1,
|
||||
.maxattr = QUOTA_NL_A_MAX,
|
||||
};
|
||||
|
||||
/**
|
||||
* quota_send_warning - Send warning to userspace about exceeded quota
|
||||
* @type: The quota type: USRQQUOTA, GRPQUOTA,...
|
||||
* @id: The user or group id of the quota that was exceeded
|
||||
* @dev: The device on which the fs is mounted (sb->s_dev)
|
||||
* @warntype: The type of the warning: QUOTA_NL_...
|
||||
*
|
||||
* This can be used by filesystems (including those which don't use
|
||||
* dquot) to send a message to userspace relating to quota limits.
|
||||
*
|
||||
*/
|
||||
|
||||
void quota_send_warning(short type, unsigned int id, dev_t dev,
|
||||
const char warntype)
|
||||
{
|
||||
static atomic_t seq;
|
||||
struct sk_buff *skb;
|
||||
void *msg_head;
|
||||
int ret;
|
||||
int msg_size = 4 * nla_total_size(sizeof(u32)) +
|
||||
2 * nla_total_size(sizeof(u64));
|
||||
|
||||
/* We have to allocate using GFP_NOFS as we are called from a
|
||||
* filesystem performing write and thus further recursion into
|
||||
* the fs to free some data could cause deadlocks. */
|
||||
skb = genlmsg_new(msg_size, GFP_NOFS);
|
||||
if (!skb) {
|
||||
printk(KERN_ERR
|
||||
"VFS: Not enough memory to send quota warning.\n");
|
||||
return;
|
||||
}
|
||||
msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
|
||||
"a_genl_family, 0, QUOTA_NL_C_WARNING);
|
||||
if (!msg_head) {
|
||||
printk(KERN_ERR
|
||||
"VFS: Cannot store netlink header in quota warning.\n");
|
||||
goto err_out;
|
||||
}
|
||||
ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, type);
|
||||
if (ret)
|
||||
goto attr_err_out;
|
||||
ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, id);
|
||||
if (ret)
|
||||
goto attr_err_out;
|
||||
ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype);
|
||||
if (ret)
|
||||
goto attr_err_out;
|
||||
ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR, MAJOR(dev));
|
||||
if (ret)
|
||||
goto attr_err_out;
|
||||
ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, MINOR(dev));
|
||||
if (ret)
|
||||
goto attr_err_out;
|
||||
ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid());
|
||||
if (ret)
|
||||
goto attr_err_out;
|
||||
genlmsg_end(skb, msg_head);
|
||||
|
||||
genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS);
|
||||
return;
|
||||
attr_err_out:
|
||||
printk(KERN_ERR "VFS: Not enough space to compose quota message!\n");
|
||||
err_out:
|
||||
kfree_skb(skb);
|
||||
}
|
||||
EXPORT_SYMBOL(quota_send_warning);
|
||||
|
||||
static int __init quota_init(void)
|
||||
{
|
||||
if (genl_register_family("a_genl_family) != 0)
|
||||
printk(KERN_ERR
|
||||
"VFS: Failed to create quota netlink interface.\n");
|
||||
return 0;
|
||||
};
|
||||
|
||||
module_init(quota_init);
|
||||
#endif
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ posix_acl_from_xattr(const void *value, size_t size)
|
|||
if (count == 0)
|
||||
return NULL;
|
||||
|
||||
acl = posix_acl_alloc(count, GFP_KERNEL);
|
||||
acl = posix_acl_alloc(count, GFP_NOFS);
|
||||
if (!acl)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
acl_e = acl->a_entries;
|
||||
|
|
|
@ -81,7 +81,11 @@ struct gfs2_meta_header {
|
|||
__be32 mh_type;
|
||||
__be64 __pad0; /* Was generation number in gfs1 */
|
||||
__be32 mh_format;
|
||||
__be32 __pad1; /* Was incarnation number in gfs1 */
|
||||
/* This union is to keep userspace happy */
|
||||
union {
|
||||
__be32 mh_jid; /* Was incarnation number in gfs1 */
|
||||
__be32 __pad1;
|
||||
};
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -147,6 +147,20 @@ static inline void forget_cached_acl(struct inode *inode, int type)
|
|||
if (old != ACL_NOT_CACHED)
|
||||
posix_acl_release(old);
|
||||
}
|
||||
|
||||
static inline void forget_all_cached_acls(struct inode *inode)
|
||||
{
|
||||
struct posix_acl *old_access, *old_default;
|
||||
spin_lock(&inode->i_lock);
|
||||
old_access = inode->i_acl;
|
||||
old_default = inode->i_default_acl;
|
||||
inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
|
||||
spin_unlock(&inode->i_lock);
|
||||
if (old_access != ACL_NOT_CACHED)
|
||||
posix_acl_release(old_access);
|
||||
if (old_default != ACL_NOT_CACHED)
|
||||
posix_acl_release(old_default);
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void cache_no_acl(struct inode *inode)
|
||||
|
|
|
@ -376,6 +376,17 @@ static inline unsigned int dquot_generic_flag(unsigned int flags, int type)
|
|||
return flags >> _DQUOT_STATE_FLAGS;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
|
||||
extern void quota_send_warning(short type, unsigned int id, dev_t dev,
|
||||
const char warntype);
|
||||
#else
|
||||
static inline void quota_send_warning(short type, unsigned int id, dev_t dev,
|
||||
const char warntype)
|
||||
{
|
||||
return;
|
||||
}
|
||||
#endif /* CONFIG_QUOTA_NETLINK_INTERFACE */
|
||||
|
||||
struct quota_info {
|
||||
unsigned int flags; /* Flags for diskquotas on this device */
|
||||
struct mutex dqio_mutex; /* lock device while I/O in progress */
|
||||
|
|
Loading…
Reference in New Issue