block: initialize request_queue's numa node during
struct request_queue is allocated with __GFP_ZERO so its "node" field is zero before initialization. This causes an oops if node 0 is offline in the page allocator because its zonelists are not initialized. From Dave Young's dmesg: SRAT: Node 1 PXM 2 0-d0000000 SRAT: Node 1 PXM 2 100000000-330000000 SRAT: Node 0 PXM 1 330000000-630000000 Initmem setup node 1 0000000000000000-000000000affb000 ... Built 1 zonelists in Node order, mobility grouping on. ... BUG: unable to handle kernel paging request at 0000000000001c08 IP: [<ffffffff8111c355>] __alloc_pages_nodemask+0xb5/0x870 and __alloc_pages_nodemask+0xb5 translates to a NULL pointer on zonelist->_zonerefs. The fix is to initialize q->node at the time of allocation so the correct node is passed to the slab allocator later. Since blk_init_allocated_queue_node() is no longer needed, merge it with blk_init_allocated_queue(). [rientjes@google.com: changelog, initializing q->node] Cc: stable@vger.kernel.org [2.6.37+] Reported-by: Dave Young <dyoung@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: David Rientjes <rientjes@google.com> Tested-by: Dave Young <dyoung@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
b4bbb02934
commit
5151412dd4
|
@ -467,6 +467,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
|
||||||
q->backing_dev_info.state = 0;
|
q->backing_dev_info.state = 0;
|
||||||
q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
|
q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
|
||||||
q->backing_dev_info.name = "block";
|
q->backing_dev_info.name = "block";
|
||||||
|
q->node = node_id;
|
||||||
|
|
||||||
err = bdi_init(&q->backing_dev_info);
|
err = bdi_init(&q->backing_dev_info);
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -551,7 +552,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
|
||||||
if (!uninit_q)
|
if (!uninit_q)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
q = blk_init_allocated_queue_node(uninit_q, rfn, lock, node_id);
|
q = blk_init_allocated_queue(uninit_q, rfn, lock);
|
||||||
if (!q)
|
if (!q)
|
||||||
blk_cleanup_queue(uninit_q);
|
blk_cleanup_queue(uninit_q);
|
||||||
|
|
||||||
|
@ -562,19 +563,10 @@ EXPORT_SYMBOL(blk_init_queue_node);
|
||||||
struct request_queue *
|
struct request_queue *
|
||||||
blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
|
blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
|
||||||
spinlock_t *lock)
|
spinlock_t *lock)
|
||||||
{
|
|
||||||
return blk_init_allocated_queue_node(q, rfn, lock, -1);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(blk_init_allocated_queue);
|
|
||||||
|
|
||||||
struct request_queue *
|
|
||||||
blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
|
|
||||||
spinlock_t *lock, int node_id)
|
|
||||||
{
|
{
|
||||||
if (!q)
|
if (!q)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
q->node = node_id;
|
|
||||||
if (blk_init_free_list(q))
|
if (blk_init_free_list(q))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
@ -604,7 +596,7 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_init_allocated_queue_node);
|
EXPORT_SYMBOL(blk_init_allocated_queue);
|
||||||
|
|
||||||
int blk_get_queue(struct request_queue *q)
|
int blk_get_queue(struct request_queue *q)
|
||||||
{
|
{
|
||||||
|
|
|
@ -805,9 +805,6 @@ extern void blk_unprep_request(struct request *);
|
||||||
*/
|
*/
|
||||||
extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
|
extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
|
||||||
spinlock_t *lock, int node_id);
|
spinlock_t *lock, int node_id);
|
||||||
extern struct request_queue *blk_init_allocated_queue_node(struct request_queue *,
|
|
||||||
request_fn_proc *,
|
|
||||||
spinlock_t *, int node_id);
|
|
||||||
extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
|
extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
|
||||||
extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
|
extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
|
||||||
request_fn_proc *, spinlock_t *);
|
request_fn_proc *, spinlock_t *);
|
||||||
|
|
Loading…
Reference in New Issue