2006-01-17 00:50:04 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
2008-01-29 12:31:39 +08:00
|
|
|
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
|
2006-01-17 00:50:04 +08:00
|
|
|
*
|
|
|
|
* This copyrighted material is made available to anyone wishing to use,
|
|
|
|
* modify, copy, or redistribute it subject to the terms and conditions
|
2006-09-01 23:05:15 +08:00
|
|
|
* of the GNU General Public License version 2.
|
2006-01-17 00:50:04 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __INCORE_DOT_H__
|
|
|
|
#define __INCORE_DOT_H__
|
|
|
|
|
2006-09-05 22:39:21 +08:00
|
|
|
#include <linux/fs.h>
|
[GFS2] delay glock demote for a minimum hold time
When a lot of IO, with some distributed mmap IO, is run on a GFS2 filesystem in
a cluster, it will deadlock. The reason is that do_no_page() will repeatedly
call gfs2_sharewrite_nopage(), because each node keeps giving up the glock
too early, and is forced to call unmap_mapping_range(). This bumps the
mapping->truncate_count sequence count, forcing do_no_page() to retry. This
patch institutes a minimum glock hold time a tenth a second. This insures
that even in heavy contention cases, the node has enough time to get some
useful work done before it gives up the glock.
A second issue is that when gfs2_glock_dq() is called from within a page fault
to demote a lock, and the associated page needs to be written out, it will
try to acqire a lock on it, but it has already been locked at a higher level.
This patch puts makes gfs2_glock_dq() use the work queue as well, to avoid this
issue. This is the same patch as Steve Whitehouse originally proposed to fix
this issue, execpt that gfs2_glock_dq() now grabs a reference to the glock
before it queues up the work on it.
Signed-off-by: Benjamin E. Marzinski <bmarzins@redhat.com>
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
2007-08-24 02:19:05 +08:00
|
|
|
#include <linux/workqueue.h>
|
2006-09-05 22:39:21 +08:00
|
|
|
|
2006-01-17 00:50:04 +08:00
|
|
|
#define DIO_WAIT 0x00000010
|
|
|
|
#define DIO_METADATA 0x00000020
|
|
|
|
#define DIO_ALL 0x00000100
|
|
|
|
|
|
|
|
struct gfs2_log_operations;
|
|
|
|
struct gfs2_log_element;
|
|
|
|
struct gfs2_holder;
|
|
|
|
struct gfs2_glock;
|
|
|
|
struct gfs2_quota_data;
|
|
|
|
struct gfs2_trans;
|
|
|
|
struct gfs2_ail;
|
|
|
|
struct gfs2_jdesc;
|
|
|
|
struct gfs2_sbd;
|
|
|
|
|
|
|
|
typedef void (*gfs2_glop_bh_t) (struct gfs2_glock *gl, unsigned int ret);
|
|
|
|
|
2007-06-01 21:11:58 +08:00
|
|
|
struct gfs2_log_header_host {
|
|
|
|
u64 lh_sequence; /* Sequence number of this transaction */
|
|
|
|
u32 lh_flags; /* GFS2_LOG_HEAD_... */
|
|
|
|
u32 lh_tail; /* Block number of log tail */
|
|
|
|
u32 lh_blkno;
|
|
|
|
u32 lh_hash;
|
|
|
|
};
|
|
|
|
|
2006-01-17 00:50:04 +08:00
|
|
|
/*
|
|
|
|
* Structure of operations that are associated with each
|
|
|
|
* type of element in the log.
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct gfs2_log_operations {
|
|
|
|
void (*lo_add) (struct gfs2_sbd *sdp, struct gfs2_log_element *le);
|
|
|
|
void (*lo_before_commit) (struct gfs2_sbd *sdp);
|
|
|
|
void (*lo_after_commit) (struct gfs2_sbd *sdp, struct gfs2_ail *ai);
|
|
|
|
void (*lo_before_scan) (struct gfs2_jdesc *jd,
|
2006-10-14 09:47:13 +08:00
|
|
|
struct gfs2_log_header_host *head, int pass);
|
2006-01-17 00:50:04 +08:00
|
|
|
int (*lo_scan_elements) (struct gfs2_jdesc *jd, unsigned int start,
|
|
|
|
struct gfs2_log_descriptor *ld, __be64 *ptr,
|
|
|
|
int pass);
|
|
|
|
void (*lo_after_scan) (struct gfs2_jdesc *jd, int error, int pass);
|
2006-04-07 23:17:32 +08:00
|
|
|
const char *lo_name;
|
2006-01-17 00:50:04 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct gfs2_log_element {
|
|
|
|
struct list_head le_list;
|
2006-04-07 23:17:32 +08:00
|
|
|
const struct gfs2_log_operations *le_ops;
|
2006-01-17 00:50:04 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct gfs2_bitmap {
|
|
|
|
struct buffer_head *bi_bh;
|
|
|
|
char *bi_clone;
|
2006-09-05 00:49:07 +08:00
|
|
|
u32 bi_offset;
|
|
|
|
u32 bi_start;
|
|
|
|
u32 bi_len;
|
2006-01-17 00:50:04 +08:00
|
|
|
};
|
|
|
|
|
2007-06-01 21:11:58 +08:00
|
|
|
struct gfs2_rgrp_host {
|
|
|
|
u32 rg_free;
|
|
|
|
u32 rg_dinodes;
|
|
|
|
u64 rg_igeneration;
|
|
|
|
};
|
|
|
|
|
2006-01-17 00:50:04 +08:00
|
|
|
struct gfs2_rgrpd {
|
|
|
|
struct list_head rd_list; /* Link with superblock */
|
|
|
|
struct list_head rd_list_mru;
|
|
|
|
struct list_head rd_recent; /* Recently used rgrps */
|
|
|
|
struct gfs2_glock *rd_gl; /* Glock for this rgrp */
|
2007-06-01 21:11:58 +08:00
|
|
|
u64 rd_addr; /* grp block disk address */
|
|
|
|
u64 rd_data0; /* first data location */
|
|
|
|
u32 rd_length; /* length of rgrp header in fs blocks */
|
|
|
|
u32 rd_data; /* num of data blocks in rgrp */
|
|
|
|
u32 rd_bitbytes; /* number of bytes in data bitmaps */
|
2006-10-14 09:07:22 +08:00
|
|
|
struct gfs2_rgrp_host rd_rg;
|
2006-09-05 00:49:07 +08:00
|
|
|
u64 rd_rg_vn;
|
2006-01-17 00:50:04 +08:00
|
|
|
struct gfs2_bitmap *rd_bits;
|
|
|
|
unsigned int rd_bh_count;
|
2006-02-21 20:51:39 +08:00
|
|
|
struct mutex rd_mutex;
|
2006-09-05 00:49:07 +08:00
|
|
|
u32 rd_free_clone;
|
2006-01-17 00:50:04 +08:00
|
|
|
struct gfs2_log_element rd_le;
|
2006-09-05 00:49:07 +08:00
|
|
|
u32 rd_last_alloc_data;
|
|
|
|
u32 rd_last_alloc_meta;
|
2006-01-17 00:50:04 +08:00
|
|
|
struct gfs2_sbd *rd_sbd;
|
2008-01-29 08:38:07 +08:00
|
|
|
unsigned char rd_flags;
|
|
|
|
#define GFS2_RDF_CHECK 0x01 /* Need to check for unlinked inodes */
|
|
|
|
#define GFS2_RDF_NOALLOC 0x02 /* rg prohibits allocation */
|
2006-01-17 00:50:04 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
enum gfs2_state_bits {
|
|
|
|
BH_Pinned = BH_PrivateStart,
|
2006-01-31 02:34:10 +08:00
|
|
|
BH_Escaped = BH_PrivateStart + 1,
|
2006-01-17 00:50:04 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
BUFFER_FNS(Pinned, pinned)
|
|
|
|
TAS_BUFFER_FNS(Pinned, pinned)
|
2006-01-31 02:34:10 +08:00
|
|
|
BUFFER_FNS(Escaped, escaped)
|
|
|
|
TAS_BUFFER_FNS(Escaped, escaped)
|
2006-01-17 00:50:04 +08:00
|
|
|
|
|
|
|
struct gfs2_bufdata {
|
|
|
|
struct buffer_head *bd_bh;
|
|
|
|
struct gfs2_glock *bd_gl;
|
|
|
|
|
2007-09-02 22:39:43 +08:00
|
|
|
union {
|
|
|
|
struct list_head list_tr;
|
|
|
|
u64 blkno;
|
|
|
|
} u;
|
|
|
|
#define bd_list_tr u.list_tr
|
|
|
|
#define bd_blkno u.blkno
|
|
|
|
|
2006-01-17 00:50:04 +08:00
|
|
|
struct gfs2_log_element bd_le;
|
|
|
|
|
|
|
|
struct gfs2_ail *bd_ail;
|
|
|
|
struct list_head bd_ail_st_list;
|
|
|
|
struct list_head bd_ail_gl_list;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct gfs2_glock_operations {
|
2007-01-23 01:15:34 +08:00
|
|
|
void (*go_xmote_th) (struct gfs2_glock *gl);
|
2006-11-20 23:37:45 +08:00
|
|
|
void (*go_xmote_bh) (struct gfs2_glock *gl);
|
|
|
|
void (*go_inval) (struct gfs2_glock *gl, int flags);
|
|
|
|
int (*go_demote_ok) (struct gfs2_glock *gl);
|
|
|
|
int (*go_lock) (struct gfs2_holder *gh);
|
|
|
|
void (*go_unlock) (struct gfs2_holder *gh);
|
2006-08-30 21:30:00 +08:00
|
|
|
const int go_type;
|
[GFS2] delay glock demote for a minimum hold time
When a lot of IO, with some distributed mmap IO, is run on a GFS2 filesystem in
a cluster, it will deadlock. The reason is that do_no_page() will repeatedly
call gfs2_sharewrite_nopage(), because each node keeps giving up the glock
too early, and is forced to call unmap_mapping_range(). This bumps the
mapping->truncate_count sequence count, forcing do_no_page() to retry. This
patch institutes a minimum glock hold time a tenth a second. This insures
that even in heavy contention cases, the node has enough time to get some
useful work done before it gives up the glock.
A second issue is that when gfs2_glock_dq() is called from within a page fault
to demote a lock, and the associated page needs to be written out, it will
try to acqire a lock on it, but it has already been locked at a higher level.
This patch puts makes gfs2_glock_dq() use the work queue as well, to avoid this
issue. This is the same patch as Steve Whitehouse originally proposed to fix
this issue, execpt that gfs2_glock_dq() now grabs a reference to the glock
before it queues up the work on it.
Signed-off-by: Benjamin E. Marzinski <bmarzins@redhat.com>
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
2007-08-24 02:19:05 +08:00
|
|
|
const unsigned long go_min_hold_time;
|
2006-01-17 00:50:04 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
enum {
|
|
|
|
/* States */
|
|
|
|
HIF_HOLDER = 6,
|
|
|
|
HIF_FIRST = 7,
|
|
|
|
HIF_ABORTED = 9,
|
2007-01-17 23:33:23 +08:00
|
|
|
HIF_WAIT = 10,
|
2006-01-17 00:50:04 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct gfs2_holder {
|
|
|
|
struct list_head gh_list;
|
|
|
|
|
|
|
|
struct gfs2_glock *gh_gl;
|
2008-02-07 16:13:19 +08:00
|
|
|
struct pid *gh_owner_pid;
|
2006-01-17 00:50:04 +08:00
|
|
|
unsigned int gh_state;
|
2006-04-18 22:09:15 +08:00
|
|
|
unsigned gh_flags;
|
2006-01-17 00:50:04 +08:00
|
|
|
|
|
|
|
int gh_error;
|
2006-06-20 20:44:27 +08:00
|
|
|
unsigned long gh_iflags;
|
2006-03-30 03:36:49 +08:00
|
|
|
unsigned long gh_ip;
|
2006-01-17 00:50:04 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
enum {
|
|
|
|
GLF_LOCK = 1,
|
|
|
|
GLF_STICKY = 2,
|
2007-03-16 17:40:31 +08:00
|
|
|
GLF_DEMOTE = 3,
|
[GFS2] delay glock demote for a minimum hold time
When a lot of IO, with some distributed mmap IO, is run on a GFS2 filesystem in
a cluster, it will deadlock. The reason is that do_no_page() will repeatedly
call gfs2_sharewrite_nopage(), because each node keeps giving up the glock
too early, and is forced to call unmap_mapping_range(). This bumps the
mapping->truncate_count sequence count, forcing do_no_page() to retry. This
patch institutes a minimum glock hold time a tenth a second. This insures
that even in heavy contention cases, the node has enough time to get some
useful work done before it gives up the glock.
A second issue is that when gfs2_glock_dq() is called from within a page fault
to demote a lock, and the associated page needs to be written out, it will
try to acqire a lock on it, but it has already been locked at a higher level.
This patch puts makes gfs2_glock_dq() use the work queue as well, to avoid this
issue. This is the same patch as Steve Whitehouse originally proposed to fix
this issue, execpt that gfs2_glock_dq() now grabs a reference to the glock
before it queues up the work on it.
Signed-off-by: Benjamin E. Marzinski <bmarzins@redhat.com>
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
2007-08-24 02:19:05 +08:00
|
|
|
GLF_PENDING_DEMOTE = 4,
|
2006-01-17 00:50:04 +08:00
|
|
|
GLF_DIRTY = 5,
|
2007-10-05 12:27:58 +08:00
|
|
|
GLF_DEMOTE_IN_PROGRESS = 6,
|
2007-11-08 22:25:12 +08:00
|
|
|
GLF_LFLUSH = 7,
|
2008-01-29 12:31:39 +08:00
|
|
|
GLF_WAITERS2 = 8,
|
2006-01-17 00:50:04 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct gfs2_glock {
|
2006-09-12 22:10:01 +08:00
|
|
|
struct hlist_node gl_list;
|
2006-01-17 00:50:04 +08:00
|
|
|
unsigned long gl_flags; /* GLF_... */
|
|
|
|
struct lm_lockname gl_name;
|
2006-09-13 22:43:37 +08:00
|
|
|
atomic_t gl_ref;
|
2006-01-17 00:50:04 +08:00
|
|
|
|
|
|
|
spinlock_t gl_spin;
|
|
|
|
|
|
|
|
unsigned int gl_state;
|
2006-09-09 01:35:56 +08:00
|
|
|
unsigned int gl_hash;
|
2007-03-16 17:40:31 +08:00
|
|
|
unsigned int gl_demote_state; /* state requested by remote node */
|
|
|
|
unsigned long gl_demote_time; /* time of first demote request */
|
2008-02-07 16:13:21 +08:00
|
|
|
struct pid *gl_owner_pid;
|
2006-05-19 04:25:27 +08:00
|
|
|
unsigned long gl_ip;
|
2006-01-17 00:50:04 +08:00
|
|
|
struct list_head gl_holders;
|
|
|
|
struct list_head gl_waiters1; /* HIF_MUTEX */
|
|
|
|
struct list_head gl_waiters3; /* HIF_PROMOTE */
|
|
|
|
|
2006-08-30 21:30:00 +08:00
|
|
|
const struct gfs2_glock_operations *gl_ops;
|
2006-01-17 00:50:04 +08:00
|
|
|
|
|
|
|
struct gfs2_holder *gl_req_gh;
|
|
|
|
|
2006-09-08 22:17:58 +08:00
|
|
|
void *gl_lock;
|
2006-01-17 00:50:04 +08:00
|
|
|
char *gl_lvb;
|
|
|
|
atomic_t gl_lvb_count;
|
|
|
|
|
2006-09-05 00:49:07 +08:00
|
|
|
u64 gl_vn;
|
2006-01-17 00:50:04 +08:00
|
|
|
unsigned long gl_stamp;
|
[GFS2] delay glock demote for a minimum hold time
When a lot of IO, with some distributed mmap IO, is run on a GFS2 filesystem in
a cluster, it will deadlock. The reason is that do_no_page() will repeatedly
call gfs2_sharewrite_nopage(), because each node keeps giving up the glock
too early, and is forced to call unmap_mapping_range(). This bumps the
mapping->truncate_count sequence count, forcing do_no_page() to retry. This
patch institutes a minimum glock hold time a tenth a second. This insures
that even in heavy contention cases, the node has enough time to get some
useful work done before it gives up the glock.
A second issue is that when gfs2_glock_dq() is called from within a page fault
to demote a lock, and the associated page needs to be written out, it will
try to acqire a lock on it, but it has already been locked at a higher level.
This patch puts makes gfs2_glock_dq() use the work queue as well, to avoid this
issue. This is the same patch as Steve Whitehouse originally proposed to fix
this issue, execpt that gfs2_glock_dq() now grabs a reference to the glock
before it queues up the work on it.
Signed-off-by: Benjamin E. Marzinski <bmarzins@redhat.com>
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
2007-08-24 02:19:05 +08:00
|
|
|
unsigned long gl_tchange;
|
2006-01-17 00:50:04 +08:00
|
|
|
void *gl_object;
|
|
|
|
|
|
|
|
struct list_head gl_reclaim;
|
|
|
|
|
|
|
|
struct gfs2_sbd *gl_sbd;
|
|
|
|
|
|
|
|
struct inode *gl_aspace;
|
|
|
|
struct list_head gl_ail_list;
|
|
|
|
atomic_t gl_ail_count;
|
[GFS2] delay glock demote for a minimum hold time
When a lot of IO, with some distributed mmap IO, is run on a GFS2 filesystem in
a cluster, it will deadlock. The reason is that do_no_page() will repeatedly
call gfs2_sharewrite_nopage(), because each node keeps giving up the glock
too early, and is forced to call unmap_mapping_range(). This bumps the
mapping->truncate_count sequence count, forcing do_no_page() to retry. This
patch institutes a minimum glock hold time a tenth a second. This insures
that even in heavy contention cases, the node has enough time to get some
useful work done before it gives up the glock.
A second issue is that when gfs2_glock_dq() is called from within a page fault
to demote a lock, and the associated page needs to be written out, it will
try to acqire a lock on it, but it has already been locked at a higher level.
This patch puts makes gfs2_glock_dq() use the work queue as well, to avoid this
issue. This is the same patch as Steve Whitehouse originally proposed to fix
this issue, execpt that gfs2_glock_dq() now grabs a reference to the glock
before it queues up the work on it.
Signed-off-by: Benjamin E. Marzinski <bmarzins@redhat.com>
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
2007-08-24 02:19:05 +08:00
|
|
|
struct delayed_work gl_work;
|
2006-01-17 00:50:04 +08:00
|
|
|
};
|
|
|
|
|
2008-01-30 23:34:04 +08:00
|
|
|
#define GFS2_MIN_LVB_SIZE 32 /* Min size of LVB that gfs2 supports */
|
|
|
|
|
2006-01-17 00:50:04 +08:00
|
|
|
struct gfs2_alloc {
|
|
|
|
/* Quota stuff */
|
|
|
|
|
2006-09-01 23:05:15 +08:00
|
|
|
struct gfs2_quota_data *al_qd[2*MAXQUOTAS];
|
|
|
|
struct gfs2_holder al_qd_ghs[2*MAXQUOTAS];
|
2006-06-07 02:49:39 +08:00
|
|
|
unsigned int al_qd_num;
|
2006-01-17 00:50:04 +08:00
|
|
|
|
2006-06-07 02:49:39 +08:00
|
|
|
u32 al_requested; /* Filled in by caller of gfs2_inplace_reserve() */
|
|
|
|
u32 al_alloced; /* Filled in by gfs2_alloc_*() */
|
2006-01-17 00:50:04 +08:00
|
|
|
|
|
|
|
/* Filled in by gfs2_inplace_reserve() */
|
|
|
|
|
|
|
|
unsigned int al_line;
|
2006-06-07 02:49:39 +08:00
|
|
|
char *al_file;
|
2006-01-17 00:50:04 +08:00
|
|
|
struct gfs2_holder al_ri_gh;
|
|
|
|
struct gfs2_holder al_rgd_gh;
|
|
|
|
struct gfs2_rgrpd *al_rgd;
|
|
|
|
|
|
|
|
};
|
|
|
|
|
|
|
|
enum {
|
2006-11-02 05:05:38 +08:00
|
|
|
GIF_INVALID = 0,
|
2006-01-17 00:50:04 +08:00
|
|
|
GIF_QD_LOCKED = 1,
|
|
|
|
GIF_SW_PAGED = 3,
|
|
|
|
};
|
|
|
|
|
2007-06-01 21:11:58 +08:00
|
|
|
struct gfs2_dinode_host {
|
|
|
|
u64 di_size; /* number of bytes in file */
|
|
|
|
u64 di_blocks; /* number of blocks in file */
|
|
|
|
u64 di_goal_meta; /* rgrp to alloc from next */
|
|
|
|
u64 di_goal_data; /* data block goal */
|
|
|
|
u64 di_generation; /* generation number for NFS */
|
|
|
|
u32 di_flags; /* GFS2_DIF_... */
|
|
|
|
/* These only apply to directories */
|
|
|
|
u16 di_depth; /* Number of bits in the table */
|
|
|
|
u32 di_entries; /* The number of entries in the directory */
|
|
|
|
u64 di_eattr; /* extended attribute block number */
|
|
|
|
};
|
|
|
|
|
2006-01-17 00:50:04 +08:00
|
|
|
struct gfs2_inode {
|
2006-05-19 04:25:27 +08:00
|
|
|
struct inode i_inode;
|
2007-05-15 22:37:50 +08:00
|
|
|
u64 i_no_addr;
|
|
|
|
u64 i_no_formal_ino;
|
2006-01-17 00:50:04 +08:00
|
|
|
unsigned long i_flags; /* GIF_... */
|
|
|
|
|
2006-10-14 08:33:01 +08:00
|
|
|
struct gfs2_dinode_host i_di; /* To be replaced by ref to block */
|
2006-01-17 00:50:04 +08:00
|
|
|
|
2006-06-15 03:32:57 +08:00
|
|
|
struct gfs2_glock *i_gl; /* Move into i_gh? */
|
2006-01-17 00:50:04 +08:00
|
|
|
struct gfs2_holder i_iopen_gh;
|
2006-01-31 02:34:10 +08:00
|
|
|
struct gfs2_holder i_gh; /* for prepare/commit_write only */
|
2008-01-10 23:18:55 +08:00
|
|
|
struct gfs2_alloc *i_alloc;
|
2006-09-05 00:49:07 +08:00
|
|
|
u64 i_last_rg_alloc;
|
2006-01-17 00:50:04 +08:00
|
|
|
|
|
|
|
struct rw_semaphore i_rw_mutex;
|
2008-01-28 18:37:35 +08:00
|
|
|
u8 i_height;
|
2006-01-17 00:50:04 +08:00
|
|
|
};
|
|
|
|
|
2006-06-15 03:32:57 +08:00
|
|
|
/*
|
|
|
|
* Since i_inode is the first element of struct gfs2_inode,
|
|
|
|
* this is effectively a cast.
|
|
|
|
*/
|
2006-05-19 04:25:27 +08:00
|
|
|
static inline struct gfs2_inode *GFS2_I(struct inode *inode)
|
|
|
|
{
|
|
|
|
return container_of(inode, struct gfs2_inode, i_inode);
|
|
|
|
}
|
|
|
|
|
2007-10-17 15:35:19 +08:00
|
|
|
static inline struct gfs2_sbd *GFS2_SB(const struct inode *inode)
|
2006-06-15 03:32:57 +08:00
|
|
|
{
|
|
|
|
return inode->i_sb->s_fs_info;
|
|
|
|
}
|
|
|
|
|
2006-01-17 00:50:04 +08:00
|
|
|
struct gfs2_file {
|
2006-02-21 20:51:39 +08:00
|
|
|
struct mutex f_fl_mutex;
|
2006-01-17 00:50:04 +08:00
|
|
|
struct gfs2_holder f_fl_gh;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct gfs2_revoke_replay {
|
|
|
|
struct list_head rr_list;
|
2006-09-05 00:49:07 +08:00
|
|
|
u64 rr_blkno;
|
2006-01-17 00:50:04 +08:00
|
|
|
unsigned int rr_where;
|
|
|
|
};
|
|
|
|
|
|
|
|
enum {
|
|
|
|
QDF_USER = 0,
|
|
|
|
QDF_CHANGE = 1,
|
|
|
|
QDF_LOCKED = 2,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct gfs2_quota_data {
|
|
|
|
struct list_head qd_list;
|
|
|
|
unsigned int qd_count;
|
|
|
|
|
2006-09-05 00:49:07 +08:00
|
|
|
u32 qd_id;
|
2006-01-17 00:50:04 +08:00
|
|
|
unsigned long qd_flags; /* QDF_... */
|
|
|
|
|
2006-09-05 00:49:07 +08:00
|
|
|
s64 qd_change;
|
|
|
|
s64 qd_change_sync;
|
2006-01-17 00:50:04 +08:00
|
|
|
|
|
|
|
unsigned int qd_slot;
|
|
|
|
unsigned int qd_slot_count;
|
|
|
|
|
|
|
|
struct buffer_head *qd_bh;
|
|
|
|
struct gfs2_quota_change *qd_bh_qc;
|
|
|
|
unsigned int qd_bh_count;
|
|
|
|
|
|
|
|
struct gfs2_glock *qd_gl;
|
|
|
|
struct gfs2_quota_lvb qd_qb;
|
|
|
|
|
2006-09-05 00:49:07 +08:00
|
|
|
u64 qd_sync_gen;
|
2006-01-17 00:50:04 +08:00
|
|
|
unsigned long qd_last_warn;
|
|
|
|
unsigned long qd_last_touched;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct gfs2_trans {
|
2006-03-30 03:36:49 +08:00
|
|
|
unsigned long tr_ip;
|
2006-01-17 00:50:04 +08:00
|
|
|
|
|
|
|
unsigned int tr_blocks;
|
|
|
|
unsigned int tr_revokes;
|
|
|
|
unsigned int tr_reserved;
|
|
|
|
|
2006-03-02 00:39:37 +08:00
|
|
|
struct gfs2_holder tr_t_gh;
|
2006-01-17 00:50:04 +08:00
|
|
|
|
|
|
|
int tr_touched;
|
|
|
|
|
|
|
|
unsigned int tr_num_buf;
|
|
|
|
unsigned int tr_num_buf_new;
|
[GFS2] assertion failure after writing to journaled file, umount
This patch passes all my nasty tests that were causing the code to
fail under one circumstance or another. Here is a complete summary
of all changes from today's git tree, in order of appearance:
1. There are now separate variables for metadata buffer accounting.
2. Variable sd_log_num_hdrs is no longer needed, since the header
accounting is taken care of by the reserve/refund sequence.
3. Fixed a tiny grammatical problem in a comment.
4. Added a new function "calc_reserved" to calculate the reserved
log space. This isn't entirely necessary, but it has two benefits:
First, it simplifies the gfs2_log_refund function greatly.
Second, it allows for easier debugging because I could sprinkle the
code with calls to this function to make sure the accounting is
proper (by adding asserts and printks) at strategic point of the code.
5. In log_pull_tail there apparently was a kludge to fix up the
accounting based on a "pull" parameter. The buffer accounting is
now done properly, so the kludge was removed.
6. File sync operations were making a call to gfs2_log_flush that
writes another journal header. Since that header was unplanned
for (reserved) by the reserve/refund sequence, the free space had
to be decremented so that when log_pull_tail gets called, the free
space is be adjusted properly. (Did I hear you call that a kludge?
well, maybe, but a lot more justifiable than the one I removed).
7. In the gfs2_log_shutdown code, it optionally syncs the log by
specifying the PULL parameter to log_write_header. I'm not sure
this is necessary anymore. It just seems to me there could be
cases where shutdown is called while there are outstanding log
buffers.
8. In the (data)buf_lo_before_commit functions, I changed some offset
values from being calculated on the fly to being constants. That
simplified some code and we might as well let the compiler do the
calculation once rather than redoing those cycles at run time.
9. This version has my rewritten databuf_lo_add function.
This version is much more like its predecessor, buf_lo_add, which
makes it easier to understand. Again, this might not be necessary,
but it seems as if this one works as well as the previous one,
maybe even better, so I decided to leave it in.
10. In databuf_lo_before_commit, a previous data corruption problem
was caused by going off the end of the buffer. The proper solution
is to have the proper limit in place, rather than stopping earlier.
(Thus my previous attempt to fix it is wrong).
If you don't wrap the buffer, you're stopping too early and that
causes more log buffer accounting problems.
11. In lops.h there are two new (previously mentioned) constants for
figuring out the data offset for the journal buffers.
12. There are also two new functions, buf_limit and databuf_limit to
calculate how many entries will fit in the buffer.
13. In function gfs2_meta_wipe, it needs to distinguish between pinned
metadata buffers and journaled data buffers for proper journal buffer
accounting. It can't use the JDATA gfs2_inode flag because it's
sometimes passed the "real" inode and sometimes the "metadata
inode" and the inode flags will be random bits in a metadata
gfs2_inode. It needs to base its decision on which was passed in.
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
2007-06-19 03:50:20 +08:00
|
|
|
unsigned int tr_num_databuf_new;
|
2006-01-17 00:50:04 +08:00
|
|
|
unsigned int tr_num_buf_rm;
|
[GFS2] assertion failure after writing to journaled file, umount
This patch passes all my nasty tests that were causing the code to
fail under one circumstance or another. Here is a complete summary
of all changes from today's git tree, in order of appearance:
1. There are now separate variables for metadata buffer accounting.
2. Variable sd_log_num_hdrs is no longer needed, since the header
accounting is taken care of by the reserve/refund sequence.
3. Fixed a tiny grammatical problem in a comment.
4. Added a new function "calc_reserved" to calculate the reserved
log space. This isn't entirely necessary, but it has two benefits:
First, it simplifies the gfs2_log_refund function greatly.
Second, it allows for easier debugging because I could sprinkle the
code with calls to this function to make sure the accounting is
proper (by adding asserts and printks) at strategic point of the code.
5. In log_pull_tail there apparently was a kludge to fix up the
accounting based on a "pull" parameter. The buffer accounting is
now done properly, so the kludge was removed.
6. File sync operations were making a call to gfs2_log_flush that
writes another journal header. Since that header was unplanned
for (reserved) by the reserve/refund sequence, the free space had
to be decremented so that when log_pull_tail gets called, the free
space is be adjusted properly. (Did I hear you call that a kludge?
well, maybe, but a lot more justifiable than the one I removed).
7. In the gfs2_log_shutdown code, it optionally syncs the log by
specifying the PULL parameter to log_write_header. I'm not sure
this is necessary anymore. It just seems to me there could be
cases where shutdown is called while there are outstanding log
buffers.
8. In the (data)buf_lo_before_commit functions, I changed some offset
values from being calculated on the fly to being constants. That
simplified some code and we might as well let the compiler do the
calculation once rather than redoing those cycles at run time.
9. This version has my rewritten databuf_lo_add function.
This version is much more like its predecessor, buf_lo_add, which
makes it easier to understand. Again, this might not be necessary,
but it seems as if this one works as well as the previous one,
maybe even better, so I decided to leave it in.
10. In databuf_lo_before_commit, a previous data corruption problem
was caused by going off the end of the buffer. The proper solution
is to have the proper limit in place, rather than stopping earlier.
(Thus my previous attempt to fix it is wrong).
If you don't wrap the buffer, you're stopping too early and that
causes more log buffer accounting problems.
11. In lops.h there are two new (previously mentioned) constants for
figuring out the data offset for the journal buffers.
12. There are also two new functions, buf_limit and databuf_limit to
calculate how many entries will fit in the buffer.
13. In function gfs2_meta_wipe, it needs to distinguish between pinned
metadata buffers and journaled data buffers for proper journal buffer
accounting. It can't use the JDATA gfs2_inode flag because it's
sometimes passed the "real" inode and sometimes the "metadata
inode" and the inode flags will be random bits in a metadata
gfs2_inode. It needs to base its decision on which was passed in.
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
2007-06-19 03:50:20 +08:00
|
|
|
unsigned int tr_num_databuf_rm;
|
2006-01-17 00:50:04 +08:00
|
|
|
struct list_head tr_list_buf;
|
|
|
|
|
|
|
|
unsigned int tr_num_revoke;
|
|
|
|
unsigned int tr_num_revoke_rm;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct gfs2_ail {
|
|
|
|
struct list_head ai_list;
|
|
|
|
|
|
|
|
unsigned int ai_first;
|
|
|
|
struct list_head ai_ail1_list;
|
|
|
|
struct list_head ai_ail2_list;
|
|
|
|
|
2006-09-05 00:49:07 +08:00
|
|
|
u64 ai_sync_gen;
|
2006-01-17 00:50:04 +08:00
|
|
|
};
|
|
|
|
|
2007-12-12 08:49:21 +08:00
|
|
|
struct gfs2_journal_extent {
|
|
|
|
struct list_head extent_list;
|
|
|
|
|
|
|
|
unsigned int lblock; /* First logical block */
|
|
|
|
u64 dblock; /* First disk block */
|
|
|
|
u64 blocks;
|
|
|
|
};
|
|
|
|
|
2006-01-17 00:50:04 +08:00
|
|
|
struct gfs2_jdesc {
|
|
|
|
struct list_head jd_list;
|
2007-12-12 08:49:21 +08:00
|
|
|
struct list_head extent_list;
|
2006-01-17 00:50:04 +08:00
|
|
|
|
2006-02-13 20:27:43 +08:00
|
|
|
struct inode *jd_inode;
|
2006-01-17 00:50:04 +08:00
|
|
|
unsigned int jd_jid;
|
|
|
|
int jd_dirty;
|
|
|
|
|
|
|
|
unsigned int jd_blocks;
|
|
|
|
};
|
|
|
|
|
2007-06-01 21:11:58 +08:00
|
|
|
struct gfs2_statfs_change_host {
|
|
|
|
s64 sc_total;
|
|
|
|
s64 sc_free;
|
|
|
|
s64 sc_dinodes;
|
|
|
|
};
|
|
|
|
|
2006-01-17 00:50:04 +08:00
|
|
|
#define GFS2_GLOCKD_DEFAULT 1
|
|
|
|
#define GFS2_GLOCKD_MAX 16
|
|
|
|
|
|
|
|
#define GFS2_QUOTA_DEFAULT GFS2_QUOTA_OFF
|
|
|
|
#define GFS2_QUOTA_OFF 0
|
|
|
|
#define GFS2_QUOTA_ACCOUNT 1
|
|
|
|
#define GFS2_QUOTA_ON 2
|
|
|
|
|
|
|
|
#define GFS2_DATA_DEFAULT GFS2_DATA_ORDERED
|
|
|
|
#define GFS2_DATA_WRITEBACK 1
|
|
|
|
#define GFS2_DATA_ORDERED 2
|
|
|
|
|
|
|
|
struct gfs2_args {
|
|
|
|
char ar_lockproto[GFS2_LOCKNAME_LEN]; /* Name of the Lock Protocol */
|
|
|
|
char ar_locktable[GFS2_LOCKNAME_LEN]; /* Name of the Lock Table */
|
|
|
|
char ar_hostdata[GFS2_LOCKNAME_LEN]; /* Host specific data */
|
|
|
|
int ar_spectator; /* Don't get a journal because we're always RO */
|
|
|
|
int ar_ignore_local_fs; /* Don't optimize even if local_fs is 1 */
|
|
|
|
int ar_localflocks; /* Let the VFS do flock|fcntl locks for us */
|
|
|
|
int ar_localcaching; /* Local-style caching (dangerous on multihost) */
|
|
|
|
int ar_debug; /* Oops on errors instead of trying to be graceful */
|
|
|
|
int ar_upgrade; /* Upgrade ondisk/multihost format */
|
|
|
|
unsigned int ar_num_glockd; /* Number of glockd threads */
|
|
|
|
int ar_posix_acl; /* Enable posix acls */
|
|
|
|
int ar_quota; /* off/account/on */
|
|
|
|
int ar_suiddir; /* suiddir support */
|
|
|
|
int ar_data; /* ordered/writeback */
|
|
|
|
};
|
|
|
|
|
|
|
|
struct gfs2_tune {
|
|
|
|
spinlock_t gt_spin;
|
|
|
|
|
|
|
|
unsigned int gt_demote_secs; /* Cache retention for unheld glock */
|
|
|
|
unsigned int gt_incore_log_blocks;
|
|
|
|
unsigned int gt_log_flush_secs;
|
|
|
|
|
|
|
|
unsigned int gt_recoverd_secs;
|
|
|
|
unsigned int gt_logd_secs;
|
|
|
|
unsigned int gt_quotad_secs;
|
|
|
|
|
|
|
|
unsigned int gt_quota_simul_sync; /* Max quotavals to sync at once */
|
|
|
|
unsigned int gt_quota_warn_period; /* Secs between quota warn msgs */
|
|
|
|
unsigned int gt_quota_scale_num; /* Numerator */
|
|
|
|
unsigned int gt_quota_scale_den; /* Denominator */
|
|
|
|
unsigned int gt_quota_cache_secs;
|
|
|
|
unsigned int gt_quota_quantum; /* Secs between syncs to quota file */
|
|
|
|
unsigned int gt_atime_quantum; /* Min secs between atime updates */
|
|
|
|
unsigned int gt_new_files_jdata;
|
|
|
|
unsigned int gt_new_files_directio;
|
|
|
|
unsigned int gt_max_readahead; /* Max bytes to read-ahead from disk */
|
|
|
|
unsigned int gt_stall_secs; /* Detects trouble! */
|
|
|
|
unsigned int gt_complain_secs;
|
|
|
|
unsigned int gt_statfs_quantum;
|
|
|
|
unsigned int gt_statfs_slow;
|
|
|
|
};
|
|
|
|
|
|
|
|
enum {
|
|
|
|
SDF_JOURNAL_CHECKED = 0,
|
|
|
|
SDF_JOURNAL_LIVE = 1,
|
|
|
|
SDF_SHUTDOWN = 2,
|
|
|
|
SDF_NOATIME = 3,
|
|
|
|
};
|
|
|
|
|
|
|
|
#define GFS2_FSNAME_LEN 256
|
|
|
|
|
2007-06-01 21:11:58 +08:00
|
|
|
struct gfs2_inum_host {
|
|
|
|
u64 no_formal_ino;
|
|
|
|
u64 no_addr;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct gfs2_sb_host {
|
|
|
|
u32 sb_magic;
|
|
|
|
u32 sb_type;
|
|
|
|
u32 sb_format;
|
|
|
|
|
|
|
|
u32 sb_fs_format;
|
|
|
|
u32 sb_multihost_format;
|
|
|
|
u32 sb_bsize;
|
|
|
|
u32 sb_bsize_shift;
|
|
|
|
|
|
|
|
struct gfs2_inum_host sb_master_dir;
|
|
|
|
struct gfs2_inum_host sb_root_dir;
|
|
|
|
|
|
|
|
char sb_lockproto[GFS2_LOCKNAME_LEN];
|
|
|
|
char sb_locktable[GFS2_LOCKNAME_LEN];
|
|
|
|
};
|
|
|
|
|
2006-01-17 00:50:04 +08:00
|
|
|
struct gfs2_sbd {
|
|
|
|
struct super_block *sd_vfs;
|
2006-08-26 00:13:37 +08:00
|
|
|
struct super_block *sd_vfs_meta;
|
2006-01-17 00:50:04 +08:00
|
|
|
struct kobject sd_kobj;
|
|
|
|
unsigned long sd_flags; /* SDF_... */
|
2006-10-14 08:45:02 +08:00
|
|
|
struct gfs2_sb_host sd_sb;
|
2006-01-17 00:50:04 +08:00
|
|
|
|
|
|
|
/* Constants computed on mount */
|
|
|
|
|
2006-09-05 00:49:07 +08:00
|
|
|
u32 sd_fsb2bb;
|
|
|
|
u32 sd_fsb2bb_shift;
|
|
|
|
u32 sd_diptrs; /* Number of pointers in a dinode */
|
|
|
|
u32 sd_inptrs; /* Number of pointers in a indirect block */
|
|
|
|
u32 sd_jbsize; /* Size of a journaled data block */
|
|
|
|
u32 sd_hash_bsize; /* sizeof(exhash block) */
|
|
|
|
u32 sd_hash_bsize_shift;
|
|
|
|
u32 sd_hash_ptrs; /* Number of pointers in a hash block */
|
|
|
|
u32 sd_qc_per_block;
|
|
|
|
u32 sd_max_dirres; /* Max blocks needed to add a directory entry */
|
|
|
|
u32 sd_max_height; /* Max height of a file's metadata tree */
|
2008-01-28 18:37:35 +08:00
|
|
|
u64 sd_heightsize[GFS2_MAX_META_HEIGHT + 1];
|
2006-09-05 00:49:07 +08:00
|
|
|
u32 sd_max_jheight; /* Max height of journaled file's meta tree */
|
2008-01-28 18:37:35 +08:00
|
|
|
u64 sd_jheightsize[GFS2_MAX_META_HEIGHT + 1];
|
2006-01-17 00:50:04 +08:00
|
|
|
|
|
|
|
struct gfs2_args sd_args; /* Mount arguments */
|
|
|
|
struct gfs2_tune sd_tune; /* Filesystem tuning structure */
|
|
|
|
|
|
|
|
/* Lock Stuff */
|
|
|
|
|
|
|
|
struct lm_lockstruct sd_lockstruct;
|
|
|
|
struct list_head sd_reclaim_list;
|
|
|
|
spinlock_t sd_reclaim_lock;
|
|
|
|
wait_queue_head_t sd_reclaim_wq;
|
|
|
|
atomic_t sd_reclaim_count;
|
|
|
|
struct gfs2_holder sd_live_gh;
|
|
|
|
struct gfs2_glock *sd_rename_gl;
|
|
|
|
struct gfs2_glock *sd_trans_gl;
|
|
|
|
|
|
|
|
/* Inode Stuff */
|
|
|
|
|
2006-01-31 02:34:10 +08:00
|
|
|
struct inode *sd_master_dir;
|
|
|
|
struct inode *sd_jindex;
|
|
|
|
struct inode *sd_inum_inode;
|
|
|
|
struct inode *sd_statfs_inode;
|
|
|
|
struct inode *sd_ir_inode;
|
|
|
|
struct inode *sd_sc_inode;
|
|
|
|
struct inode *sd_qc_inode;
|
|
|
|
struct inode *sd_rindex;
|
|
|
|
struct inode *sd_quota_inode;
|
2006-01-17 00:50:04 +08:00
|
|
|
|
|
|
|
/* Inum stuff */
|
|
|
|
|
2006-02-21 20:51:39 +08:00
|
|
|
struct mutex sd_inum_mutex;
|
2006-01-17 00:50:04 +08:00
|
|
|
|
|
|
|
/* StatFS stuff */
|
|
|
|
|
|
|
|
spinlock_t sd_statfs_spin;
|
2006-10-14 11:43:19 +08:00
|
|
|
struct gfs2_statfs_change_host sd_statfs_master;
|
|
|
|
struct gfs2_statfs_change_host sd_statfs_local;
|
2006-01-17 00:50:04 +08:00
|
|
|
unsigned long sd_statfs_sync_time;
|
|
|
|
|
|
|
|
/* Resource group stuff */
|
|
|
|
|
2006-09-05 00:49:07 +08:00
|
|
|
u64 sd_rindex_vn;
|
2006-01-17 00:50:04 +08:00
|
|
|
spinlock_t sd_rindex_spin;
|
2006-02-21 20:51:39 +08:00
|
|
|
struct mutex sd_rindex_mutex;
|
2006-01-17 00:50:04 +08:00
|
|
|
struct list_head sd_rindex_list;
|
|
|
|
struct list_head sd_rindex_mru_list;
|
|
|
|
struct list_head sd_rindex_recent_list;
|
|
|
|
struct gfs2_rgrpd *sd_rindex_forward;
|
|
|
|
unsigned int sd_rgrps;
|
|
|
|
|
|
|
|
/* Journal index stuff */
|
|
|
|
|
|
|
|
struct list_head sd_jindex_list;
|
|
|
|
spinlock_t sd_jindex_spin;
|
2006-02-21 20:51:39 +08:00
|
|
|
struct mutex sd_jindex_mutex;
|
2006-01-17 00:50:04 +08:00
|
|
|
unsigned int sd_journals;
|
|
|
|
unsigned long sd_jindex_refresh_time;
|
|
|
|
|
|
|
|
struct gfs2_jdesc *sd_jdesc;
|
|
|
|
struct gfs2_holder sd_journal_gh;
|
|
|
|
struct gfs2_holder sd_jinode_gh;
|
|
|
|
|
|
|
|
struct gfs2_holder sd_ir_gh;
|
|
|
|
struct gfs2_holder sd_sc_gh;
|
|
|
|
struct gfs2_holder sd_qc_gh;
|
|
|
|
|
|
|
|
/* Daemon stuff */
|
|
|
|
|
|
|
|
struct task_struct *sd_recoverd_process;
|
|
|
|
struct task_struct *sd_logd_process;
|
|
|
|
struct task_struct *sd_quotad_process;
|
|
|
|
struct task_struct *sd_glockd_process[GFS2_GLOCKD_MAX];
|
|
|
|
unsigned int sd_glockd_num;
|
|
|
|
|
|
|
|
/* Quota stuff */
|
|
|
|
|
|
|
|
struct list_head sd_quota_list;
|
|
|
|
atomic_t sd_quota_count;
|
|
|
|
spinlock_t sd_quota_spin;
|
2006-02-21 20:51:39 +08:00
|
|
|
struct mutex sd_quota_mutex;
|
2006-01-17 00:50:04 +08:00
|
|
|
|
|
|
|
unsigned int sd_quota_slots;
|
|
|
|
unsigned int sd_quota_chunks;
|
|
|
|
unsigned char **sd_quota_bitmap;
|
|
|
|
|
2006-09-05 00:49:07 +08:00
|
|
|
u64 sd_quota_sync_gen;
|
2006-01-17 00:50:04 +08:00
|
|
|
unsigned long sd_quota_sync_time;
|
|
|
|
|
|
|
|
/* Log stuff */
|
|
|
|
|
|
|
|
spinlock_t sd_log_lock;
|
|
|
|
|
|
|
|
unsigned int sd_log_blks_reserved;
|
|
|
|
unsigned int sd_log_commited_buf;
|
[GFS2] assertion failure after writing to journaled file, umount
This patch passes all my nasty tests that were causing the code to
fail under one circumstance or another. Here is a complete summary
of all changes from today's git tree, in order of appearance:
1. There are now separate variables for metadata buffer accounting.
2. Variable sd_log_num_hdrs is no longer needed, since the header
accounting is taken care of by the reserve/refund sequence.
3. Fixed a tiny grammatical problem in a comment.
4. Added a new function "calc_reserved" to calculate the reserved
log space. This isn't entirely necessary, but it has two benefits:
First, it simplifies the gfs2_log_refund function greatly.
Second, it allows for easier debugging because I could sprinkle the
code with calls to this function to make sure the accounting is
proper (by adding asserts and printks) at strategic point of the code.
5. In log_pull_tail there apparently was a kludge to fix up the
accounting based on a "pull" parameter. The buffer accounting is
now done properly, so the kludge was removed.
6. File sync operations were making a call to gfs2_log_flush that
writes another journal header. Since that header was unplanned
for (reserved) by the reserve/refund sequence, the free space had
to be decremented so that when log_pull_tail gets called, the free
space is be adjusted properly. (Did I hear you call that a kludge?
well, maybe, but a lot more justifiable than the one I removed).
7. In the gfs2_log_shutdown code, it optionally syncs the log by
specifying the PULL parameter to log_write_header. I'm not sure
this is necessary anymore. It just seems to me there could be
cases where shutdown is called while there are outstanding log
buffers.
8. In the (data)buf_lo_before_commit functions, I changed some offset
values from being calculated on the fly to being constants. That
simplified some code and we might as well let the compiler do the
calculation once rather than redoing those cycles at run time.
9. This version has my rewritten databuf_lo_add function.
This version is much more like its predecessor, buf_lo_add, which
makes it easier to understand. Again, this might not be necessary,
but it seems as if this one works as well as the previous one,
maybe even better, so I decided to leave it in.
10. In databuf_lo_before_commit, a previous data corruption problem
was caused by going off the end of the buffer. The proper solution
is to have the proper limit in place, rather than stopping earlier.
(Thus my previous attempt to fix it is wrong).
If you don't wrap the buffer, you're stopping too early and that
causes more log buffer accounting problems.
11. In lops.h there are two new (previously mentioned) constants for
figuring out the data offset for the journal buffers.
12. There are also two new functions, buf_limit and databuf_limit to
calculate how many entries will fit in the buffer.
13. In function gfs2_meta_wipe, it needs to distinguish between pinned
metadata buffers and journaled data buffers for proper journal buffer
accounting. It can't use the JDATA gfs2_inode flag because it's
sometimes passed the "real" inode and sometimes the "metadata
inode" and the inode flags will be random bits in a metadata
gfs2_inode. It needs to base its decision on which was passed in.
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
2007-06-19 03:50:20 +08:00
|
|
|
unsigned int sd_log_commited_databuf;
|
2006-01-17 00:50:04 +08:00
|
|
|
unsigned int sd_log_commited_revoke;
|
|
|
|
|
|
|
|
unsigned int sd_log_num_buf;
|
|
|
|
unsigned int sd_log_num_revoke;
|
|
|
|
unsigned int sd_log_num_rg;
|
|
|
|
unsigned int sd_log_num_databuf;
|
2006-01-31 02:34:10 +08:00
|
|
|
|
2006-01-17 00:50:04 +08:00
|
|
|
struct list_head sd_log_le_buf;
|
|
|
|
struct list_head sd_log_le_revoke;
|
|
|
|
struct list_head sd_log_le_rg;
|
|
|
|
struct list_head sd_log_le_databuf;
|
2007-09-02 17:48:13 +08:00
|
|
|
struct list_head sd_log_le_ordered;
|
2006-01-17 00:50:04 +08:00
|
|
|
|
2007-11-08 22:55:03 +08:00
|
|
|
atomic_t sd_log_blks_free;
|
2006-03-29 03:14:04 +08:00
|
|
|
struct mutex sd_log_reserve_mutex;
|
2006-01-17 00:50:04 +08:00
|
|
|
|
2006-09-05 00:49:07 +08:00
|
|
|
u64 sd_log_sequence;
|
2006-01-17 00:50:04 +08:00
|
|
|
unsigned int sd_log_head;
|
|
|
|
unsigned int sd_log_tail;
|
|
|
|
int sd_log_idle;
|
|
|
|
|
|
|
|
unsigned long sd_log_flush_time;
|
2006-03-29 22:12:12 +08:00
|
|
|
struct rw_semaphore sd_log_flush_lock;
|
2007-09-17 17:59:52 +08:00
|
|
|
atomic_t sd_log_in_flight;
|
|
|
|
wait_queue_head_t sd_log_flush_wait;
|
2006-01-17 00:50:04 +08:00
|
|
|
|
|
|
|
unsigned int sd_log_flush_head;
|
2006-09-05 00:49:07 +08:00
|
|
|
u64 sd_log_flush_wrapped;
|
2006-01-17 00:50:04 +08:00
|
|
|
|
|
|
|
struct list_head sd_ail1_list;
|
|
|
|
struct list_head sd_ail2_list;
|
2006-09-05 00:49:07 +08:00
|
|
|
u64 sd_ail_sync_gen;
|
2006-01-17 00:50:04 +08:00
|
|
|
|
|
|
|
/* Replay stuff */
|
|
|
|
|
|
|
|
struct list_head sd_revoke_list;
|
|
|
|
unsigned int sd_replay_tail;
|
|
|
|
|
|
|
|
unsigned int sd_found_blocks;
|
|
|
|
unsigned int sd_found_revokes;
|
|
|
|
unsigned int sd_replayed_blocks;
|
|
|
|
|
|
|
|
/* For quiescing the filesystem */
|
|
|
|
|
|
|
|
struct gfs2_holder sd_freeze_gh;
|
2006-02-21 20:51:39 +08:00
|
|
|
struct mutex sd_freeze_lock;
|
2006-01-17 00:50:04 +08:00
|
|
|
unsigned int sd_freeze_count;
|
|
|
|
|
|
|
|
/* Counters */
|
|
|
|
|
|
|
|
atomic_t sd_glock_count;
|
|
|
|
atomic_t sd_glock_held_count;
|
|
|
|
atomic_t sd_inode_count;
|
|
|
|
atomic_t sd_reclaimed;
|
|
|
|
|
|
|
|
char sd_fsname[GFS2_FSNAME_LEN];
|
|
|
|
char sd_table_name[GFS2_FSNAME_LEN];
|
|
|
|
char sd_proto_name[GFS2_FSNAME_LEN];
|
|
|
|
|
|
|
|
/* Debugging crud */
|
|
|
|
|
|
|
|
unsigned long sd_last_warning;
|
2006-08-26 00:13:37 +08:00
|
|
|
struct vfsmount *sd_gfs2mnt;
|
2007-04-19 00:41:11 +08:00
|
|
|
struct dentry *debugfs_dir; /* debugfs directory */
|
|
|
|
struct dentry *debugfs_dentry_glocks; /* for debugfs */
|
2006-01-17 00:50:04 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
#endif /* __INCORE_DOT_H__ */
|
|
|
|
|