2009-04-03 23:42:38 +08:00
|
|
|
/* netfs cookie management
|
|
|
|
*
|
|
|
|
* Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
|
|
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
2009-04-03 23:42:38 +08:00
|
|
|
*
|
|
|
|
* See Documentation/filesystems/caching/netfs-api.txt for more information on
|
|
|
|
* the netfs API.
|
2009-04-03 23:42:38 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
#define FSCACHE_DEBUG_LEVEL COOKIE
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include "internal.h"
|
|
|
|
|
|
|
|
struct kmem_cache *fscache_cookie_jar;
|
|
|
|
|
2009-04-03 23:42:38 +08:00
|
|
|
static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
|
|
|
|
|
|
|
|
static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
|
|
|
|
static int fscache_alloc_object(struct fscache_cache *cache,
|
|
|
|
struct fscache_cookie *cookie);
|
|
|
|
static int fscache_attach_object(struct fscache_cookie *cookie,
|
|
|
|
struct fscache_object *object);
|
|
|
|
|
2009-04-03 23:42:38 +08:00
|
|
|
/*
|
|
|
|
* initialise an cookie jar slab element prior to any use
|
|
|
|
*/
|
|
|
|
void fscache_cookie_init_once(void *_cookie)
|
|
|
|
{
|
|
|
|
struct fscache_cookie *cookie = _cookie;
|
|
|
|
|
|
|
|
memset(cookie, 0, sizeof(*cookie));
|
|
|
|
spin_lock_init(&cookie->lock);
|
FS-Cache: Fix lock misorder in fscache_write_op()
FS-Cache has two structs internally for keeping track of the internal state of
a cached file: the fscache_cookie struct, which represents the netfs's state,
and fscache_object struct, which represents the cache's state. Each has a
pointer that points to the other (when both are in existence), and each has a
spinlock for pointer maintenance.
Since netfs operations approach these structures from the cookie side, they get
the cookie lock first, then the object lock. Cache operations, on the other
hand, approach from the object side, and get the object lock first. It is not
then permitted for a cache operation to get the cookie lock whilst it is
holding the object lock lest deadlock occur; instead, it must do one of two
things:
(1) increment the cookie usage counter, drop the object lock and then get both
locks in order, or
(2) simply hold the object lock as certain parts of the cookie may not be
altered whilst the object lock is held.
It is also not permitted to follow either pointer without holding the lock at
the end you start with. To break the pointers between the cookie and the
object, both locks must be held.
fscache_write_op(), however, violates the locking rules: It attempts to get the
cookie lock without (a) checking that the cookie pointer is a valid pointer,
and (b) holding the object lock to protect the cookie pointer whilst it follows
it. This is so that it can access the pending page store tree without
interference from __fscache_write_page().
This is fixed by splitting the cookie lock, such that the page store tracking
tree is protected by its own lock, and checking that the cookie pointer is
non-NULL before we attempt to follow it whilst holding the object lock.
The new lock is subordinate to both the cookie lock and the object lock, and so
should be taken after those.
Signed-off-by: David Howells <dhowells@redhat.com>
2009-11-20 02:11:25 +08:00
|
|
|
spin_lock_init(&cookie->stores_lock);
|
2009-04-03 23:42:38 +08:00
|
|
|
INIT_HLIST_HEAD(&cookie->backing_objects);
|
|
|
|
}
|
|
|
|
|
2009-04-03 23:42:38 +08:00
|
|
|
/*
|
|
|
|
* request a cookie to represent an object (index, datafile, xattr, etc)
|
|
|
|
* - parent specifies the parent object
|
|
|
|
* - the top level index cookie for each netfs is stored in the fscache_netfs
|
|
|
|
* struct upon registration
|
|
|
|
* - def points to the definition
|
|
|
|
* - the netfs_data will be passed to the functions pointed to in *def
|
|
|
|
* - all attached caches will be searched to see if they contain this object
|
|
|
|
* - index objects aren't stored on disk until there's a dependent file that
|
|
|
|
* needs storing
|
|
|
|
* - other objects are stored in a selected cache immediately, and all the
|
|
|
|
* indices forming the path to it are instantiated if necessary
|
|
|
|
* - we never let on to the netfs about errors
|
|
|
|
* - we may set a negative cookie pointer, but that's okay
|
|
|
|
*/
|
|
|
|
struct fscache_cookie *__fscache_acquire_cookie(
|
|
|
|
struct fscache_cookie *parent,
|
|
|
|
const struct fscache_cookie_def *def,
|
|
|
|
void *netfs_data)
|
|
|
|
{
|
|
|
|
struct fscache_cookie *cookie;
|
|
|
|
|
|
|
|
BUG_ON(!def);
|
|
|
|
|
|
|
|
_enter("{%s},{%s},%p",
|
|
|
|
parent ? (char *) parent->def->name : "<no-parent>",
|
|
|
|
def->name, netfs_data);
|
|
|
|
|
|
|
|
fscache_stat(&fscache_n_acquires);
|
|
|
|
|
|
|
|
/* if there's no parent cookie, then we don't create one here either */
|
|
|
|
if (!parent) {
|
|
|
|
fscache_stat(&fscache_n_acquires_null);
|
|
|
|
_leave(" [no parent]");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* validate the definition */
|
|
|
|
BUG_ON(!def->get_key);
|
|
|
|
BUG_ON(!def->name[0]);
|
|
|
|
|
|
|
|
BUG_ON(def->type == FSCACHE_COOKIE_TYPE_INDEX &&
|
|
|
|
parent->def->type != FSCACHE_COOKIE_TYPE_INDEX);
|
|
|
|
|
|
|
|
/* allocate and initialise a cookie */
|
|
|
|
cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
|
|
|
|
if (!cookie) {
|
|
|
|
fscache_stat(&fscache_n_acquires_oom);
|
|
|
|
_leave(" [ENOMEM]");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
atomic_set(&cookie->usage, 1);
|
|
|
|
atomic_set(&cookie->n_children, 0);
|
|
|
|
|
|
|
|
atomic_inc(&parent->usage);
|
|
|
|
atomic_inc(&parent->n_children);
|
|
|
|
|
|
|
|
cookie->def = def;
|
|
|
|
cookie->parent = parent;
|
|
|
|
cookie->netfs_data = netfs_data;
|
|
|
|
cookie->flags = 0;
|
|
|
|
|
FS-Cache: Use radix tree preload correctly in tracking of pages to be stored
__fscache_write_page() attempts to load the radix tree preallocation pool for
the CPU it is on before calling radix_tree_insert(), as the insertion must be
done inside a pair of spinlocks.
Use of the preallocation pool, however, is contingent on the radix tree being
initialised without __GFP_WAIT specified. __fscache_acquire_cookie() was
passing GFP_NOFS to INIT_RADIX_TREE() - but that includes __GFP_WAIT.
The solution is to AND out __GFP_WAIT.
Additionally, the banner comment to radix_tree_preload() is altered to make
note of this prerequisite. Possibly there should be a WARN_ON() too.
Without this fix, I have seen the following recursive deadlock caused by
radix_tree_insert() attempting to allocate memory inside the spinlocked
region, which resulted in FS-Cache being called back into to release memory -
which required the spinlock already held.
=============================================
[ INFO: possible recursive locking detected ]
2.6.32-rc6-cachefs #24
---------------------------------------------
nfsiod/7916 is trying to acquire lock:
(&cookie->lock){+.+.-.}, at: [<ffffffffa0076872>] __fscache_uncache_page+0xdb/0x160 [fscache]
but task is already holding lock:
(&cookie->lock){+.+.-.}, at: [<ffffffffa0076acc>] __fscache_write_page+0x15c/0x3f3 [fscache]
other info that might help us debug this:
5 locks held by nfsiod/7916:
#0: (nfsiod){+.+.+.}, at: [<ffffffff81048290>] worker_thread+0x19a/0x2e2
#1: (&task->u.tk_work#2){+.+.+.}, at: [<ffffffff81048290>] worker_thread+0x19a/0x2e2
#2: (&cookie->lock){+.+.-.}, at: [<ffffffffa0076acc>] __fscache_write_page+0x15c/0x3f3 [fscache]
#3: (&object->lock#2){+.+.-.}, at: [<ffffffffa0076b07>] __fscache_write_page+0x197/0x3f3 [fscache]
#4: (&cookie->stores_lock){+.+...}, at: [<ffffffffa0076b0f>] __fscache_write_page+0x19f/0x3f3 [fscache]
stack backtrace:
Pid: 7916, comm: nfsiod Not tainted 2.6.32-rc6-cachefs #24
Call Trace:
[<ffffffff8105ac7f>] __lock_acquire+0x1649/0x16e3
[<ffffffff81059ded>] ? __lock_acquire+0x7b7/0x16e3
[<ffffffff8100e27d>] ? dump_trace+0x248/0x257
[<ffffffff8105ad70>] lock_acquire+0x57/0x6d
[<ffffffffa0076872>] ? __fscache_uncache_page+0xdb/0x160 [fscache]
[<ffffffff8135467c>] _spin_lock+0x2c/0x3b
[<ffffffffa0076872>] ? __fscache_uncache_page+0xdb/0x160 [fscache]
[<ffffffffa0076872>] __fscache_uncache_page+0xdb/0x160 [fscache]
[<ffffffffa0077eb7>] ? __fscache_check_page_write+0x0/0x71 [fscache]
[<ffffffffa00b4755>] nfs_fscache_release_page+0x86/0xc4 [nfs]
[<ffffffffa00907f0>] nfs_release_page+0x3c/0x41 [nfs]
[<ffffffff81087ffb>] try_to_release_page+0x32/0x3b
[<ffffffff81092c2b>] shrink_page_list+0x316/0x4ac
[<ffffffff81058a9b>] ? mark_held_locks+0x52/0x70
[<ffffffff8135451b>] ? _spin_unlock_irq+0x2b/0x31
[<ffffffff81093153>] shrink_inactive_list+0x392/0x67c
[<ffffffff81058a9b>] ? mark_held_locks+0x52/0x70
[<ffffffff810934ca>] shrink_list+0x8d/0x8f
[<ffffffff81093744>] shrink_zone+0x278/0x33c
[<ffffffff81052c70>] ? ktime_get_ts+0xad/0xba
[<ffffffff8109453b>] try_to_free_pages+0x22e/0x392
[<ffffffff8109184c>] ? isolate_pages_global+0x0/0x212
[<ffffffff8108e16b>] __alloc_pages_nodemask+0x3dc/0x5cf
[<ffffffff810ae24a>] cache_alloc_refill+0x34d/0x6c1
[<ffffffff811bcf74>] ? radix_tree_node_alloc+0x52/0x5c
[<ffffffff810ae929>] kmem_cache_alloc+0xb2/0x118
[<ffffffff811bcf74>] radix_tree_node_alloc+0x52/0x5c
[<ffffffff811bcfd5>] radix_tree_insert+0x57/0x19c
[<ffffffffa0076b53>] __fscache_write_page+0x1e3/0x3f3 [fscache]
[<ffffffffa00b4248>] __nfs_readpage_to_fscache+0x58/0x11e [nfs]
[<ffffffffa009bb77>] nfs_readpage_release+0x34/0x9b [nfs]
[<ffffffffa009c0d9>] nfs_readpage_release_full+0x32/0x4b [nfs]
[<ffffffffa0006cff>] rpc_release_calldata+0x12/0x14 [sunrpc]
[<ffffffffa0006e2d>] rpc_free_task+0x59/0x61 [sunrpc]
[<ffffffffa0006f03>] rpc_async_release+0x10/0x12 [sunrpc]
[<ffffffff810482e5>] worker_thread+0x1ef/0x2e2
[<ffffffff81048290>] ? worker_thread+0x19a/0x2e2
[<ffffffff81352433>] ? thread_return+0x3e/0x101
[<ffffffffa0006ef3>] ? rpc_async_release+0x0/0x12 [sunrpc]
[<ffffffff8104bff5>] ? autoremove_wake_function+0x0/0x34
[<ffffffff81058d25>] ? trace_hardirqs_on+0xd/0xf
[<ffffffff810480f6>] ? worker_thread+0x0/0x2e2
[<ffffffff8104bd21>] kthread+0x7a/0x82
[<ffffffff8100beda>] child_rip+0xa/0x20
[<ffffffff8100b87c>] ? restore_args+0x0/0x30
[<ffffffff8104c2b9>] ? add_wait_queue+0x15/0x44
[<ffffffff8104bca7>] ? kthread+0x0/0x82
[<ffffffff8100bed0>] ? child_rip+0x0/0x20
Signed-off-by: David Howells <dhowells@redhat.com>
2009-11-20 02:11:14 +08:00
|
|
|
/* radix tree insertion won't use the preallocation pool unless it's
|
|
|
|
* told it may not wait */
|
|
|
|
INIT_RADIX_TREE(&cookie->stores, GFP_NOFS & ~__GFP_WAIT);
|
2009-04-03 23:42:38 +08:00
|
|
|
|
|
|
|
switch (cookie->def->type) {
|
|
|
|
case FSCACHE_COOKIE_TYPE_INDEX:
|
|
|
|
fscache_stat(&fscache_n_cookie_index);
|
|
|
|
break;
|
|
|
|
case FSCACHE_COOKIE_TYPE_DATAFILE:
|
|
|
|
fscache_stat(&fscache_n_cookie_data);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
fscache_stat(&fscache_n_cookie_special);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* if the object is an index then we need do nothing more here - we
|
|
|
|
* create indices on disk when we need them as an index may exist in
|
|
|
|
* multiple caches */
|
|
|
|
if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX) {
|
|
|
|
if (fscache_acquire_non_index_cookie(cookie) < 0) {
|
|
|
|
atomic_dec(&parent->n_children);
|
|
|
|
__fscache_cookie_put(cookie);
|
|
|
|
fscache_stat(&fscache_n_acquires_nobufs);
|
|
|
|
_leave(" = NULL");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fscache_stat(&fscache_n_acquires_ok);
|
|
|
|
_leave(" = %p", cookie);
|
|
|
|
return cookie;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(__fscache_acquire_cookie);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* acquire a non-index cookie
|
|
|
|
* - this must make sure the index chain is instantiated and instantiate the
|
|
|
|
* object representation too
|
|
|
|
*/
|
|
|
|
static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
|
|
|
|
{
|
|
|
|
struct fscache_object *object;
|
|
|
|
struct fscache_cache *cache;
|
|
|
|
uint64_t i_size;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
_enter("");
|
|
|
|
|
|
|
|
cookie->flags = 1 << FSCACHE_COOKIE_UNAVAILABLE;
|
|
|
|
|
|
|
|
/* now we need to see whether the backing objects for this cookie yet
|
|
|
|
* exist, if not there'll be nothing to search */
|
|
|
|
down_read(&fscache_addremove_sem);
|
|
|
|
|
|
|
|
if (list_empty(&fscache_cache_list)) {
|
|
|
|
up_read(&fscache_addremove_sem);
|
|
|
|
_leave(" = 0 [no caches]");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* select a cache in which to store the object */
|
|
|
|
cache = fscache_select_cache_for_object(cookie->parent);
|
|
|
|
if (!cache) {
|
|
|
|
up_read(&fscache_addremove_sem);
|
|
|
|
fscache_stat(&fscache_n_acquires_no_cache);
|
|
|
|
_leave(" = -ENOMEDIUM [no cache]");
|
|
|
|
return -ENOMEDIUM;
|
|
|
|
}
|
|
|
|
|
|
|
|
_debug("cache %s", cache->tag->name);
|
|
|
|
|
|
|
|
cookie->flags =
|
|
|
|
(1 << FSCACHE_COOKIE_LOOKING_UP) |
|
|
|
|
(1 << FSCACHE_COOKIE_CREATING) |
|
|
|
|
(1 << FSCACHE_COOKIE_NO_DATA_YET);
|
|
|
|
|
|
|
|
/* ask the cache to allocate objects for this cookie and its parent
|
|
|
|
* chain */
|
|
|
|
ret = fscache_alloc_object(cache, cookie);
|
|
|
|
if (ret < 0) {
|
|
|
|
up_read(&fscache_addremove_sem);
|
|
|
|
_leave(" = %d", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* pass on how big the object we're caching is supposed to be */
|
|
|
|
cookie->def->get_attr(cookie->netfs_data, &i_size);
|
|
|
|
|
|
|
|
spin_lock(&cookie->lock);
|
|
|
|
if (hlist_empty(&cookie->backing_objects)) {
|
|
|
|
spin_unlock(&cookie->lock);
|
|
|
|
goto unavailable;
|
|
|
|
}
|
|
|
|
|
|
|
|
object = hlist_entry(cookie->backing_objects.first,
|
|
|
|
struct fscache_object, cookie_link);
|
|
|
|
|
|
|
|
fscache_set_store_limit(object, i_size);
|
|
|
|
|
|
|
|
/* initiate the process of looking up all the objects in the chain
|
|
|
|
* (done by fscache_initialise_object()) */
|
|
|
|
fscache_enqueue_object(object);
|
|
|
|
|
|
|
|
spin_unlock(&cookie->lock);
|
|
|
|
|
|
|
|
/* we may be required to wait for lookup to complete at this point */
|
|
|
|
if (!fscache_defer_lookup) {
|
|
|
|
_debug("non-deferred lookup %p", &cookie->flags);
|
|
|
|
wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
|
|
|
|
fscache_wait_bit, TASK_UNINTERRUPTIBLE);
|
|
|
|
_debug("complete");
|
|
|
|
if (test_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags))
|
|
|
|
goto unavailable;
|
|
|
|
}
|
|
|
|
|
|
|
|
up_read(&fscache_addremove_sem);
|
|
|
|
_leave(" = 0 [deferred]");
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
unavailable:
|
|
|
|
up_read(&fscache_addremove_sem);
|
|
|
|
_leave(" = -ENOBUFS");
|
|
|
|
return -ENOBUFS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* recursively allocate cache object records for a cookie/cache combination
|
|
|
|
* - caller must be holding the addremove sem
|
|
|
|
*/
|
|
|
|
static int fscache_alloc_object(struct fscache_cache *cache,
|
|
|
|
struct fscache_cookie *cookie)
|
|
|
|
{
|
|
|
|
struct fscache_object *object;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
_enter("%p,%p{%s}", cache, cookie, cookie->def->name);
|
|
|
|
|
|
|
|
spin_lock(&cookie->lock);
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
hlist_for_each_entry(object, &cookie->backing_objects,
|
2009-04-03 23:42:38 +08:00
|
|
|
cookie_link) {
|
|
|
|
if (object->cache == cache)
|
|
|
|
goto object_already_extant;
|
|
|
|
}
|
|
|
|
spin_unlock(&cookie->lock);
|
|
|
|
|
|
|
|
/* ask the cache to allocate an object (we may end up with duplicate
|
|
|
|
* objects at this stage, but we sort that out later) */
|
2009-11-20 02:11:08 +08:00
|
|
|
fscache_stat(&fscache_n_cop_alloc_object);
|
2009-04-03 23:42:38 +08:00
|
|
|
object = cache->ops->alloc_object(cache, cookie);
|
2009-11-20 02:11:08 +08:00
|
|
|
fscache_stat_d(&fscache_n_cop_alloc_object);
|
2009-04-03 23:42:38 +08:00
|
|
|
if (IS_ERR(object)) {
|
|
|
|
fscache_stat(&fscache_n_object_no_alloc);
|
|
|
|
ret = PTR_ERR(object);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
fscache_stat(&fscache_n_object_alloc);
|
|
|
|
|
|
|
|
object->debug_id = atomic_inc_return(&fscache_object_debug_id);
|
|
|
|
|
|
|
|
_debug("ALLOC OBJ%x: %s {%lx}",
|
|
|
|
object->debug_id, cookie->def->name, object->events);
|
|
|
|
|
|
|
|
ret = fscache_alloc_object(cache, cookie->parent);
|
|
|
|
if (ret < 0)
|
|
|
|
goto error_put;
|
|
|
|
|
|
|
|
/* only attach if we managed to allocate all we needed, otherwise
|
|
|
|
* discard the object we just allocated and instead use the one
|
|
|
|
* attached to the cookie */
|
2009-11-20 02:11:08 +08:00
|
|
|
if (fscache_attach_object(cookie, object) < 0) {
|
|
|
|
fscache_stat(&fscache_n_cop_put_object);
|
2009-04-03 23:42:38 +08:00
|
|
|
cache->ops->put_object(object);
|
2009-11-20 02:11:08 +08:00
|
|
|
fscache_stat_d(&fscache_n_cop_put_object);
|
|
|
|
}
|
2009-04-03 23:42:38 +08:00
|
|
|
|
|
|
|
_leave(" = 0");
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
object_already_extant:
|
|
|
|
ret = -ENOBUFS;
|
|
|
|
if (object->state >= FSCACHE_OBJECT_DYING) {
|
|
|
|
spin_unlock(&cookie->lock);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
spin_unlock(&cookie->lock);
|
|
|
|
_leave(" = 0 [found]");
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
error_put:
|
2009-11-20 02:11:08 +08:00
|
|
|
fscache_stat(&fscache_n_cop_put_object);
|
2009-04-03 23:42:38 +08:00
|
|
|
cache->ops->put_object(object);
|
2009-11-20 02:11:08 +08:00
|
|
|
fscache_stat_d(&fscache_n_cop_put_object);
|
2009-04-03 23:42:38 +08:00
|
|
|
error:
|
|
|
|
_leave(" = %d", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* attach a cache object to a cookie
|
|
|
|
*/
|
|
|
|
static int fscache_attach_object(struct fscache_cookie *cookie,
|
|
|
|
struct fscache_object *object)
|
|
|
|
{
|
|
|
|
struct fscache_object *p;
|
|
|
|
struct fscache_cache *cache = object->cache;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
_enter("{%s},{OBJ%x}", cookie->def->name, object->debug_id);
|
|
|
|
|
|
|
|
spin_lock(&cookie->lock);
|
|
|
|
|
|
|
|
/* there may be multiple initial creations of this object, but we only
|
|
|
|
* want one */
|
|
|
|
ret = -EEXIST;
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
hlist_for_each_entry(p, &cookie->backing_objects, cookie_link) {
|
2009-04-03 23:42:38 +08:00
|
|
|
if (p->cache == object->cache) {
|
|
|
|
if (p->state >= FSCACHE_OBJECT_DYING)
|
|
|
|
ret = -ENOBUFS;
|
|
|
|
goto cant_attach_object;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* pin the parent object */
|
|
|
|
spin_lock_nested(&cookie->parent->lock, 1);
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
hlist_for_each_entry(p, &cookie->parent->backing_objects,
|
2009-04-03 23:42:38 +08:00
|
|
|
cookie_link) {
|
|
|
|
if (p->cache == object->cache) {
|
|
|
|
if (p->state >= FSCACHE_OBJECT_DYING) {
|
|
|
|
ret = -ENOBUFS;
|
|
|
|
spin_unlock(&cookie->parent->lock);
|
|
|
|
goto cant_attach_object;
|
|
|
|
}
|
|
|
|
object->parent = p;
|
|
|
|
spin_lock(&p->lock);
|
|
|
|
p->n_children++;
|
|
|
|
spin_unlock(&p->lock);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
spin_unlock(&cookie->parent->lock);
|
|
|
|
|
|
|
|
/* attach to the cache's object list */
|
|
|
|
if (list_empty(&object->cache_link)) {
|
|
|
|
spin_lock(&cache->object_list_lock);
|
|
|
|
list_add(&object->cache_link, &cache->object_list);
|
|
|
|
spin_unlock(&cache->object_list_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* attach to the cookie */
|
|
|
|
object->cookie = cookie;
|
|
|
|
atomic_inc(&cookie->usage);
|
|
|
|
hlist_add_head(&object->cookie_link, &cookie->backing_objects);
|
2009-11-20 02:11:04 +08:00
|
|
|
|
|
|
|
fscache_objlist_add(object);
|
2009-04-03 23:42:38 +08:00
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cant_attach_object:
|
|
|
|
spin_unlock(&cookie->lock);
|
|
|
|
_leave(" = %d", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-12-21 05:52:36 +08:00
|
|
|
/*
|
|
|
|
* Invalidate an object. Callable with spinlocks held.
|
|
|
|
*/
|
|
|
|
void __fscache_invalidate(struct fscache_cookie *cookie)
|
|
|
|
{
|
|
|
|
struct fscache_object *object;
|
|
|
|
|
|
|
|
_enter("{%s}", cookie->def->name);
|
|
|
|
|
|
|
|
fscache_stat(&fscache_n_invalidates);
|
|
|
|
|
|
|
|
/* Only permit invalidation of data files. Invalidating an index will
|
|
|
|
* require the caller to release all its attachments to the tree rooted
|
|
|
|
* there, and if it's doing that, it may as well just retire the
|
|
|
|
* cookie.
|
|
|
|
*/
|
|
|
|
ASSERTCMP(cookie->def->type, ==, FSCACHE_COOKIE_TYPE_DATAFILE);
|
|
|
|
|
|
|
|
/* We will be updating the cookie too. */
|
|
|
|
BUG_ON(!cookie->def->get_aux);
|
|
|
|
|
|
|
|
/* If there's an object, we tell the object state machine to handle the
|
|
|
|
* invalidation on our behalf, otherwise there's nothing to do.
|
|
|
|
*/
|
|
|
|
if (!hlist_empty(&cookie->backing_objects)) {
|
|
|
|
spin_lock(&cookie->lock);
|
|
|
|
|
|
|
|
if (!hlist_empty(&cookie->backing_objects) &&
|
|
|
|
!test_and_set_bit(FSCACHE_COOKIE_INVALIDATING,
|
|
|
|
&cookie->flags)) {
|
|
|
|
object = hlist_entry(cookie->backing_objects.first,
|
|
|
|
struct fscache_object,
|
|
|
|
cookie_link);
|
|
|
|
if (object->state < FSCACHE_OBJECT_DYING)
|
|
|
|
fscache_raise_event(
|
|
|
|
object, FSCACHE_OBJECT_EV_INVALIDATE);
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock(&cookie->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
_leave("");
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(__fscache_invalidate);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wait for object invalidation to complete.
|
|
|
|
*/
|
|
|
|
void __fscache_wait_on_invalidate(struct fscache_cookie *cookie)
|
|
|
|
{
|
|
|
|
_enter("%p", cookie);
|
|
|
|
|
|
|
|
wait_on_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING,
|
|
|
|
fscache_wait_bit_interruptible,
|
|
|
|
TASK_UNINTERRUPTIBLE);
|
|
|
|
|
|
|
|
_leave("");
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(__fscache_wait_on_invalidate);
|
|
|
|
|
2009-04-03 23:42:38 +08:00
|
|
|
/*
|
|
|
|
* update the index entries backing a cookie
|
|
|
|
*/
|
|
|
|
void __fscache_update_cookie(struct fscache_cookie *cookie)
|
|
|
|
{
|
|
|
|
struct fscache_object *object;
|
|
|
|
|
|
|
|
fscache_stat(&fscache_n_updates);
|
|
|
|
|
|
|
|
if (!cookie) {
|
|
|
|
fscache_stat(&fscache_n_updates_null);
|
|
|
|
_leave(" [no cookie]");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
_enter("{%s}", cookie->def->name);
|
|
|
|
|
|
|
|
BUG_ON(!cookie->def->get_aux);
|
|
|
|
|
|
|
|
spin_lock(&cookie->lock);
|
|
|
|
|
|
|
|
/* update the index entry on disk in each cache backing this cookie */
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
hlist_for_each_entry(object,
|
2009-04-03 23:42:38 +08:00
|
|
|
&cookie->backing_objects, cookie_link) {
|
|
|
|
fscache_raise_event(object, FSCACHE_OBJECT_EV_UPDATE);
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock(&cookie->lock);
|
|
|
|
_leave("");
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(__fscache_update_cookie);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* release a cookie back to the cache
|
|
|
|
* - the object will be marked as recyclable on disk if retire is true
|
|
|
|
* - all dependents of this cookie must have already been unregistered
|
|
|
|
* (indices/files/pages)
|
|
|
|
*/
|
|
|
|
void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
|
|
|
|
{
|
|
|
|
struct fscache_cache *cache;
|
|
|
|
struct fscache_object *object;
|
|
|
|
unsigned long event;
|
|
|
|
|
|
|
|
fscache_stat(&fscache_n_relinquishes);
|
2009-11-20 02:11:38 +08:00
|
|
|
if (retire)
|
|
|
|
fscache_stat(&fscache_n_relinquishes_retire);
|
2009-04-03 23:42:38 +08:00
|
|
|
|
|
|
|
if (!cookie) {
|
|
|
|
fscache_stat(&fscache_n_relinquishes_null);
|
|
|
|
_leave(" [no cookie]");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
_enter("%p{%s,%p},%d",
|
|
|
|
cookie, cookie->def->name, cookie->netfs_data, retire);
|
|
|
|
|
|
|
|
if (atomic_read(&cookie->n_children) != 0) {
|
|
|
|
printk(KERN_ERR "FS-Cache: Cookie '%s' still has children\n",
|
|
|
|
cookie->def->name);
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* wait for the cookie to finish being instantiated (or to fail) */
|
|
|
|
if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
|
|
|
|
fscache_stat(&fscache_n_relinquishes_waitcrt);
|
|
|
|
wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
|
|
|
|
fscache_wait_bit, TASK_UNINTERRUPTIBLE);
|
|
|
|
}
|
|
|
|
|
|
|
|
event = retire ? FSCACHE_OBJECT_EV_RETIRE : FSCACHE_OBJECT_EV_RELEASE;
|
|
|
|
|
2012-12-21 05:52:35 +08:00
|
|
|
try_again:
|
2009-04-03 23:42:38 +08:00
|
|
|
spin_lock(&cookie->lock);
|
|
|
|
|
|
|
|
/* break links with all the active objects */
|
|
|
|
while (!hlist_empty(&cookie->backing_objects)) {
|
2012-12-21 05:52:35 +08:00
|
|
|
int n_reads;
|
2009-04-03 23:42:38 +08:00
|
|
|
object = hlist_entry(cookie->backing_objects.first,
|
|
|
|
struct fscache_object,
|
|
|
|
cookie_link);
|
|
|
|
|
|
|
|
_debug("RELEASE OBJ%x", object->debug_id);
|
|
|
|
|
2012-12-21 05:52:35 +08:00
|
|
|
set_bit(FSCACHE_COOKIE_WAITING_ON_READS, &cookie->flags);
|
|
|
|
n_reads = atomic_read(&object->n_reads);
|
|
|
|
if (n_reads) {
|
|
|
|
int n_ops = object->n_ops;
|
|
|
|
int n_in_progress = object->n_in_progress;
|
2012-12-21 05:52:33 +08:00
|
|
|
spin_unlock(&cookie->lock);
|
|
|
|
printk(KERN_ERR "FS-Cache:"
|
2012-12-21 05:52:35 +08:00
|
|
|
" Cookie '%s' still has %d outstanding reads (%d,%d)\n",
|
|
|
|
cookie->def->name,
|
|
|
|
n_reads, n_ops, n_in_progress);
|
|
|
|
wait_on_bit(&cookie->flags, FSCACHE_COOKIE_WAITING_ON_READS,
|
|
|
|
fscache_wait_bit, TASK_UNINTERRUPTIBLE);
|
|
|
|
printk("Wait finished\n");
|
|
|
|
goto try_again;
|
2012-12-21 05:52:33 +08:00
|
|
|
}
|
|
|
|
|
2009-04-03 23:42:38 +08:00
|
|
|
/* detach each cache object from the object cookie */
|
|
|
|
spin_lock(&object->lock);
|
|
|
|
hlist_del_init(&object->cookie_link);
|
|
|
|
|
|
|
|
cache = object->cache;
|
|
|
|
object->cookie = NULL;
|
|
|
|
fscache_raise_event(object, event);
|
|
|
|
spin_unlock(&object->lock);
|
|
|
|
|
|
|
|
if (atomic_dec_and_test(&cookie->usage))
|
|
|
|
/* the cookie refcount shouldn't be reduced to 0 yet */
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|
2009-11-20 02:11:11 +08:00
|
|
|
/* detach pointers back to the netfs */
|
|
|
|
cookie->netfs_data = NULL;
|
|
|
|
cookie->def = NULL;
|
|
|
|
|
2009-04-03 23:42:38 +08:00
|
|
|
spin_unlock(&cookie->lock);
|
|
|
|
|
|
|
|
if (cookie->parent) {
|
|
|
|
ASSERTCMP(atomic_read(&cookie->parent->usage), >, 0);
|
|
|
|
ASSERTCMP(atomic_read(&cookie->parent->n_children), >, 0);
|
|
|
|
atomic_dec(&cookie->parent->n_children);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* finally dispose of the cookie */
|
|
|
|
ASSERTCMP(atomic_read(&cookie->usage), >, 0);
|
|
|
|
fscache_cookie_put(cookie);
|
|
|
|
|
|
|
|
_leave("");
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(__fscache_relinquish_cookie);
|
|
|
|
|
2009-04-03 23:42:38 +08:00
|
|
|
/*
|
|
|
|
* destroy a cookie
|
|
|
|
*/
|
|
|
|
void __fscache_cookie_put(struct fscache_cookie *cookie)
|
|
|
|
{
|
|
|
|
struct fscache_cookie *parent;
|
|
|
|
|
|
|
|
_enter("%p", cookie);
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
_debug("FREE COOKIE %p", cookie);
|
|
|
|
parent = cookie->parent;
|
|
|
|
BUG_ON(!hlist_empty(&cookie->backing_objects));
|
|
|
|
kmem_cache_free(fscache_cookie_jar, cookie);
|
|
|
|
|
|
|
|
if (!parent)
|
|
|
|
break;
|
|
|
|
|
|
|
|
cookie = parent;
|
|
|
|
BUG_ON(atomic_read(&cookie->usage) <= 0);
|
|
|
|
if (!atomic_dec_and_test(&cookie->usage))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
_leave("");
|
|
|
|
}
|