OpenCloudOS-Kernel/drivers/target/target_core_device.c

1189 lines
31 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* Filename: target_core_device.c (based on iscsi_target_device.c)
*
2011-07-19 16:55:10 +08:00
* This file contains the TCM Virtual Device and Disk Transport
* agnostic related functions.
*
* (c) Copyright 2003-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
******************************************************************************/
#include <linux/net.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/in.h>
#include <linux/export.h>
#include <linux/t10-pi.h>
#include <asm/unaligned.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <scsi/scsi_common.h>
#include <scsi/scsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include "target_core_internal.h"
#include "target_core_alua.h"
#include "target_core_pr.h"
#include "target_core_ua.h"
static DEFINE_MUTEX(device_mutex);
static LIST_HEAD(device_list);
static DEFINE_IDR(devices_idr);
2011-07-19 16:55:10 +08:00
static struct se_hba *lun0_hba;
/* not static, needed by tpg.c */
struct se_device *g_lun0_dev;
sense_reason_t
transport_lookup_cmd_lun(struct se_cmd *se_cmd)
{
struct se_lun *se_lun = NULL;
2011-07-19 16:55:10 +08:00
struct se_session *se_sess = se_cmd->se_sess;
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
struct se_node_acl *nacl = se_sess->se_node_acl;
struct se_dev_entry *deve;
sense_reason_t ret = TCM_NO_SENSE;
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
rcu_read_lock();
deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
if (deve) {
atomic_long_inc(&deve->total_cmds);
if (se_cmd->data_direction == DMA_TO_DEVICE)
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
atomic_long_add(se_cmd->data_length,
&deve->write_bytes);
else if (se_cmd->data_direction == DMA_FROM_DEVICE)
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
atomic_long_add(se_cmd->data_length,
&deve->read_bytes);
if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
deve->lun_access_ro) {
pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
" Access for 0x%08llx\n",
se_cmd->se_tfo->fabric_name,
se_cmd->orig_fe_lun);
rcu_read_unlock();
return TCM_WRITE_PROTECTED;
}
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
se_lun = rcu_dereference(deve->se_lun);
target: Fix NULL dereference during LUN lookup + active I/O shutdown When transport_clear_lun_ref() is shutting down a se_lun via configfs with new I/O in-flight, it's possible to trigger a NULL pointer dereference in transport_lookup_cmd_lun() due to the fact percpu_ref_get() doesn't do any __PERCPU_REF_DEAD checking before incrementing lun->lun_ref.count after lun->lun_ref has switched to atomic_t mode. This results in a NULL pointer dereference as LUN shutdown code in core_tpg_remove_lun() continues running after the existing ->release() -> core_tpg_lun_ref_release() callback completes, and clears the RCU protected se_lun->lun_se_dev pointer. During the OOPs, the state of lun->lun_ref in the process which triggered the NULL pointer dereference looks like the following on v4.1.y stable code: struct se_lun { lun_link_magic = 4294932337, lun_status = TRANSPORT_LUN_STATUS_FREE, ..... lun_se_dev = 0x0, lun_sep = 0x0, ..... lun_ref = { count = { counter = 1 }, percpu_count_ptr = 3, release = 0xffffffffa02fa1e0 <core_tpg_lun_ref_release>, confirm_switch = 0x0, force_atomic = false, rcu = { next = 0xffff88154fa1a5d0, func = 0xffffffff8137c4c0 <percpu_ref_switch_to_atomic_rcu> } } } To address this bug, use percpu_ref_tryget_live() to ensure once __PERCPU_REF_DEAD is visable on all CPUs and ->lun_ref has switched to atomic_t, all new I/Os will fail to obtain a new lun->lun_ref reference. Also use an explicit percpu_ref_kill_and_confirm() callback to block on ->lun_ref_comp to allow the first stage and associated RCU grace period to complete, and then block on ->lun_ref_shutdown waiting for the final percpu_ref_put() to drop the last reference via transport_lun_remove_cmd() before continuing with core_tpg_remove_lun() shutdown. Reported-by: Rob Millner <rlm@daterainc.com> Tested-by: Rob Millner <rlm@daterainc.com> Cc: Rob Millner <rlm@daterainc.com> Tested-by: Vaibhav Tandon <vst@datera.io> Cc: Vaibhav Tandon <vst@datera.io> Tested-by: Bryant G. Ly <bryantly@linux.vnet.ibm.com> Cc: <stable@vger.kernel.org> # v3.14+ Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-02-23 14:06:32 +08:00
if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
se_lun = NULL;
goto out_unlock;
}
se_cmd->se_lun = se_lun;
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
se_cmd->lun_ref_active = true;
}
target: Fix NULL dereference during LUN lookup + active I/O shutdown When transport_clear_lun_ref() is shutting down a se_lun via configfs with new I/O in-flight, it's possible to trigger a NULL pointer dereference in transport_lookup_cmd_lun() due to the fact percpu_ref_get() doesn't do any __PERCPU_REF_DEAD checking before incrementing lun->lun_ref.count after lun->lun_ref has switched to atomic_t mode. This results in a NULL pointer dereference as LUN shutdown code in core_tpg_remove_lun() continues running after the existing ->release() -> core_tpg_lun_ref_release() callback completes, and clears the RCU protected se_lun->lun_se_dev pointer. During the OOPs, the state of lun->lun_ref in the process which triggered the NULL pointer dereference looks like the following on v4.1.y stable code: struct se_lun { lun_link_magic = 4294932337, lun_status = TRANSPORT_LUN_STATUS_FREE, ..... lun_se_dev = 0x0, lun_sep = 0x0, ..... lun_ref = { count = { counter = 1 }, percpu_count_ptr = 3, release = 0xffffffffa02fa1e0 <core_tpg_lun_ref_release>, confirm_switch = 0x0, force_atomic = false, rcu = { next = 0xffff88154fa1a5d0, func = 0xffffffff8137c4c0 <percpu_ref_switch_to_atomic_rcu> } } } To address this bug, use percpu_ref_tryget_live() to ensure once __PERCPU_REF_DEAD is visable on all CPUs and ->lun_ref has switched to atomic_t, all new I/Os will fail to obtain a new lun->lun_ref reference. Also use an explicit percpu_ref_kill_and_confirm() callback to block on ->lun_ref_comp to allow the first stage and associated RCU grace period to complete, and then block on ->lun_ref_shutdown waiting for the final percpu_ref_put() to drop the last reference via transport_lun_remove_cmd() before continuing with core_tpg_remove_lun() shutdown. Reported-by: Rob Millner <rlm@daterainc.com> Tested-by: Rob Millner <rlm@daterainc.com> Cc: Rob Millner <rlm@daterainc.com> Tested-by: Vaibhav Tandon <vst@datera.io> Cc: Vaibhav Tandon <vst@datera.io> Tested-by: Bryant G. Ly <bryantly@linux.vnet.ibm.com> Cc: <stable@vger.kernel.org> # v3.14+ Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-02-23 14:06:32 +08:00
out_unlock:
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
rcu_read_unlock();
if (!se_lun) {
/*
* Use the se_portal_group->tpg_virt_lun0 to allow for
* REPORT_LUNS, et al to be returned when no active
* MappedLUN=0 exists for this Initiator Port.
*/
if (se_cmd->orig_fe_lun != 0) {
pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
" Access for 0x%08llx from %s\n",
se_cmd->se_tfo->fabric_name,
se_cmd->orig_fe_lun,
nacl->initiatorname);
return TCM_NON_EXISTENT_LUN;
}
/*
* Force WRITE PROTECT for virtual LUN 0
*/
if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
(se_cmd->data_direction != DMA_NONE))
return TCM_WRITE_PROTECTED;
se_lun = se_sess->se_tpg->tpg_virt_lun0;
if (!percpu_ref_tryget_live(&se_lun->lun_ref))
return TCM_NON_EXISTENT_LUN;
se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
se_cmd->lun_ref_active = true;
}
/*
* RCU reference protected by percpu se_lun->lun_ref taken above that
* must drop to zero (including initial reference) before this se_lun
* pointer can be kfree_rcu() by the final se_lun->lun_group put via
* target_core_fabric_configfs.c:target_fabric_port_release
*/
se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
atomic_long_inc(&se_cmd->se_dev->num_cmds);
if (se_cmd->data_direction == DMA_TO_DEVICE)
atomic_long_add(se_cmd->data_length,
&se_cmd->se_dev->write_bytes);
else if (se_cmd->data_direction == DMA_FROM_DEVICE)
atomic_long_add(se_cmd->data_length,
&se_cmd->se_dev->read_bytes);
return ret;
}
EXPORT_SYMBOL(transport_lookup_cmd_lun);
int transport_lookup_tmr_lun(struct se_cmd *se_cmd)
{
struct se_dev_entry *deve;
struct se_lun *se_lun = NULL;
2011-07-19 16:55:10 +08:00
struct se_session *se_sess = se_cmd->se_sess;
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
struct se_node_acl *nacl = se_sess->se_node_acl;
struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
unsigned long flags;
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
rcu_read_lock();
deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
if (deve) {
se_lun = rcu_dereference(deve->se_lun);
if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
se_lun = NULL;
goto out_unlock;
}
se_cmd->se_lun = se_lun;
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
se_cmd->lun_ref_active = true;
}
out_unlock:
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
rcu_read_unlock();
if (!se_lun) {
pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
" Access for 0x%08llx for %s\n",
se_cmd->se_tfo->fabric_name,
se_cmd->orig_fe_lun,
nacl->initiatorname);
2011-07-19 16:55:10 +08:00
return -ENODEV;
}
se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
return 0;
}
EXPORT_SYMBOL(transport_lookup_tmr_lun);
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
bool target_lun_is_rdonly(struct se_cmd *cmd)
{
struct se_session *se_sess = cmd->se_sess;
struct se_dev_entry *deve;
bool ret;
rcu_read_lock();
deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun);
ret = deve && deve->lun_access_ro;
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL(target_lun_is_rdonly);
/*
* This function is called from core_scsi3_emulate_pro_register_and_move()
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
* and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref
* when a matching rtpi is found.
*/
struct se_dev_entry *core_get_se_deve_from_rtpi(
struct se_node_acl *nacl,
u16 rtpi)
{
struct se_dev_entry *deve;
struct se_lun *lun;
struct se_portal_group *tpg = nacl->se_tpg;
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
rcu_read_lock();
hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
lun = rcu_dereference(deve->se_lun);
if (!lun) {
pr_err("%s device entries device pointer is"
" NULL, but Initiator has access.\n",
tpg->se_tpg_tfo->fabric_name);
continue;
}
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
if (lun->lun_rtpi != rtpi)
continue;
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
kref_get(&deve->pr_kref);
rcu_read_unlock();
return deve;
}
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
rcu_read_unlock();
return NULL;
}
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
void core_free_device_list_for_node(
struct se_node_acl *nacl,
struct se_portal_group *tpg)
{
struct se_dev_entry *deve;
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
mutex_lock(&nacl->lun_entry_mutex);
hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
struct se_lun *lun = rcu_dereference_check(deve->se_lun,
lockdep_is_held(&nacl->lun_entry_mutex));
core_disable_device_list_for_node(lun, deve, nacl, tpg);
}
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
mutex_unlock(&nacl->lun_entry_mutex);
}
void core_update_device_list_access(
u64 mapped_lun,
bool lun_access_ro,
struct se_node_acl *nacl)
{
struct se_dev_entry *deve;
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
mutex_lock(&nacl->lun_entry_mutex);
deve = target_nacl_find_deve(nacl, mapped_lun);
if (deve)
deve->lun_access_ro = lun_access_ro;
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
mutex_unlock(&nacl->lun_entry_mutex);
}
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
/*
* Called with rcu_read_lock or nacl->device_list_lock held.
*/
struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun)
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
{
struct se_dev_entry *deve;
hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
if (deve->mapped_lun == mapped_lun)
return deve;
return NULL;
}
EXPORT_SYMBOL(target_nacl_find_deve);
void target_pr_kref_release(struct kref *kref)
{
struct se_dev_entry *deve = container_of(kref, struct se_dev_entry,
pr_kref);
complete(&deve->pr_comp);
}
static void
target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new,
bool skip_new)
{
struct se_dev_entry *tmp;
rcu_read_lock();
hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) {
if (skip_new && tmp == new)
continue;
core_scsi3_ua_allocate(tmp, 0x3F,
ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED);
}
rcu_read_unlock();
}
int core_enable_device_list_for_node(
struct se_lun *lun,
struct se_lun_acl *lun_acl,
u64 mapped_lun,
bool lun_access_ro,
struct se_node_acl *nacl,
struct se_portal_group *tpg)
{
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
struct se_dev_entry *orig, *new;
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
new = kzalloc(sizeof(*new), GFP_KERNEL);
if (!new) {
pr_err("Unable to allocate se_dev_entry memory\n");
return -ENOMEM;
}
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
spin_lock_init(&new->ua_lock);
INIT_LIST_HEAD(&new->ua_list);
INIT_LIST_HEAD(&new->lun_link);
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
new->mapped_lun = mapped_lun;
kref_init(&new->pr_kref);
init_completion(&new->pr_comp);
new->lun_access_ro = lun_access_ro;
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
new->creation_time = get_jiffies_64();
new->attach_count++;
mutex_lock(&nacl->lun_entry_mutex);
orig = target_nacl_find_deve(nacl, mapped_lun);
if (orig && orig->se_lun) {
struct se_lun *orig_lun = rcu_dereference_check(orig->se_lun,
lockdep_is_held(&nacl->lun_entry_mutex));
if (orig_lun != lun) {
pr_err("Existing orig->se_lun doesn't match new lun"
" for dynamic -> explicit NodeACL conversion:"
" %s\n", nacl->initiatorname);
mutex_unlock(&nacl->lun_entry_mutex);
kfree(new);
return -EINVAL;
}
if (orig->se_lun_acl != NULL) {
pr_warn_ratelimited("Detected existing explicit"
" se_lun_acl->se_lun_group reference for %s"
" mapped_lun: %llu, failing\n",
nacl->initiatorname, mapped_lun);
mutex_unlock(&nacl->lun_entry_mutex);
kfree(new);
return -EINVAL;
}
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
rcu_assign_pointer(new->se_lun, lun);
rcu_assign_pointer(new->se_lun_acl, lun_acl);
hlist_del_rcu(&orig->link);
hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
mutex_unlock(&nacl->lun_entry_mutex);
spin_lock(&lun->lun_deve_lock);
list_del(&orig->lun_link);
list_add_tail(&new->lun_link, &lun->lun_deve_list);
spin_unlock(&lun->lun_deve_lock);
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
kref_put(&orig->pr_kref, target_pr_kref_release);
wait_for_completion(&orig->pr_comp);
target_luns_data_has_changed(nacl, new, true);
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
kfree_rcu(orig, rcu_head);
return 0;
}
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
rcu_assign_pointer(new->se_lun, lun);
rcu_assign_pointer(new->se_lun_acl, lun_acl);
hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
mutex_unlock(&nacl->lun_entry_mutex);
spin_lock(&lun->lun_deve_lock);
list_add_tail(&new->lun_link, &lun->lun_deve_list);
spin_unlock(&lun->lun_deve_lock);
target_luns_data_has_changed(nacl, new, true);
return 0;
}
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
void core_disable_device_list_for_node(
struct se_lun *lun,
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
struct se_dev_entry *orig,
struct se_node_acl *nacl,
struct se_portal_group *tpg)
{
/*
* rcu_dereference_raw protected by se_lun->lun_group symlink
* reference to se_device->dev_group.
*/
struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
lockdep_assert_held(&nacl->lun_entry_mutex);
/*
* If the MappedLUN entry is being disabled, the entry in
* lun->lun_deve_list must be removed now before clearing the
* struct se_dev_entry pointers below as logic in
* core_alua_do_transition_tg_pt() depends on these being present.
*
* deve->se_lun_acl will be NULL for demo-mode created LUNs
* that have not been explicitly converted to MappedLUNs ->
* struct se_lun_acl, but we remove deve->lun_link from
* lun->lun_deve_list. This also means that active UAs and
* NodeACL context specific PR metadata for demo-mode
* MappedLUN *deve will be released below..
*/
spin_lock(&lun->lun_deve_lock);
list_del(&orig->lun_link);
spin_unlock(&lun->lun_deve_lock);
/*
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
* Disable struct se_dev_entry LUN ACL mapping
*/
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
core_scsi3_ua_release_all(orig);
hlist_del_rcu(&orig->link);
clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
orig->lun_access_ro = false;
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
orig->creation_time = 0;
orig->attach_count--;
/*
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
* Before firing off RCU callback, wait for any in process SPEC_I_PT=1
* or REGISTER_AND_MOVE PR operation to complete.
*/
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
kref_put(&orig->pr_kref, target_pr_kref_release);
wait_for_completion(&orig->pr_comp);
target: Fix PR registration + APTPL RCU conversion regression This patch fixes a v4.2+ regression introduced by commit 79dc9c9e86 where lookup of t10_pr_registration->pr_reg_deve and associated ->pr_kref get was missing from __core_scsi3_do_alloc_registration(), which is responsible for setting DEF_PR_REG_ACTIVE. This would result in REGISTER operations completing successfully, but subsequent core_scsi3_pr_seq_non_holder() checking would fail with !DEF_PR_REG_ACTIVE -> RESERVATION CONFLICT status. Update __core_scsi3_add_registration() to drop ->pr_kref reference after registration and any optional ALL_TG_PT=1 processing has completed. Update core_scsi3_decode_spec_i_port() to release the new parent local_pr_reg->pr_kref as well. Also, update __core_scsi3_check_aptpl_registration() to perform the same target_nacl_find_deve() lookup + ->pr_kref get, now that __core_scsi3_add_registration() expects to drop the reference. Finally, since there are cases when se_dev_entry->se_lun_acl can still be dereferenced in core_scsi3_lunacl_undepend_item() while holding ->pr_kref, go ahead and move explicit rcu_assign_pointer() NULL assignments within core_disable_device_list_for_node() until after orig->pr_comp finishes. Reported-by: Scott L. Lykens <scott@lykens.org> Tested-by: Scott L. Lykens <scott@lykens.org> Cc: Christoph Hellwig <hch@lst.de> Cc: Hannes Reinecke <hare@suse.de> Cc: Lee Duncan <lduncan@suse.com> Cc: <stable@vger.kernel.org> # v4.2+ Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-09-13 17:30:46 +08:00
rcu_assign_pointer(orig->se_lun, NULL);
rcu_assign_pointer(orig->se_lun_acl, NULL);
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
kfree_rcu(orig, rcu_head);
core_scsi3_free_pr_reg_from_nacl(dev, nacl);
target_luns_data_has_changed(nacl, NULL, false);
}
/* core_clear_lun_from_tpg():
*
*
*/
void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
{
struct se_node_acl *nacl;
struct se_dev_entry *deve;
mutex_lock(&tpg->acl_node_mutex);
list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
mutex_lock(&nacl->lun_entry_mutex);
hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
struct se_lun *tmp_lun = rcu_dereference_check(deve->se_lun,
lockdep_is_held(&nacl->lun_entry_mutex));
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
if (lun != tmp_lun)
continue;
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
core_disable_device_list_for_node(lun, deve, nacl, tpg);
}
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
mutex_unlock(&nacl->lun_entry_mutex);
}
mutex_unlock(&tpg->acl_node_mutex);
}
int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev)
{
struct se_lun *tmp;
spin_lock(&dev->se_port_lock);
if (dev->export_count == 0x0000ffff) {
pr_warn("Reached dev->dev_port_count =="
" 0x0000ffff\n");
spin_unlock(&dev->se_port_lock);
return -ENOSPC;
}
again:
/*
* Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
* Here is the table from spc4r17 section 7.7.3.8.
*
* Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
*
* Code Description
* 0h Reserved
* 1h Relative port 1, historically known as port A
* 2h Relative port 2, historically known as port B
* 3h to FFFFh Relative port 3 through 65 535
*/
lun->lun_rtpi = dev->dev_rpti_counter++;
if (!lun->lun_rtpi)
goto again;
list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) {
/*
* Make sure RELATIVE TARGET PORT IDENTIFIER is unique
* for 16-bit wrap..
*/
if (lun->lun_rtpi == tmp->lun_rtpi)
goto again;
}
spin_unlock(&dev->se_port_lock);
return 0;
}
static void se_release_vpd_for_dev(struct se_device *dev)
{
struct t10_vpd *vpd, *vpd_tmp;
spin_lock(&dev->t10_wwn.t10_vpd_lock);
list_for_each_entry_safe(vpd, vpd_tmp,
&dev->t10_wwn.t10_vpd_list, vpd_list) {
list_del(&vpd->vpd_list);
kfree(vpd);
}
spin_unlock(&dev->t10_wwn.t10_vpd_lock);
}
static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
{
u32 aligned_max_sectors;
u32 alignment;
/*
* Limit max_sectors to a PAGE_SIZE aligned value for modern
* transport_allocate_data_tasks() operation.
*/
alignment = max(1ul, PAGE_SIZE / block_size);
aligned_max_sectors = rounddown(max_sectors, alignment);
if (max_sectors != aligned_max_sectors)
pr_info("Rounding down aligned max_sectors from %u to %u\n",
max_sectors, aligned_max_sectors);
return aligned_max_sectors;
}
int core_dev_add_lun(
struct se_portal_group *tpg,
struct se_device *dev,
struct se_lun *lun)
{
int rc;
rc = core_tpg_add_lun(tpg, lun, false, dev);
if (rc < 0)
return rc;
pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from"
" CORE HBA: %u\n", tpg->se_tpg_tfo->fabric_name,
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
tpg->se_tpg_tfo->fabric_name, dev->se_hba->hba_id);
/*
* Update LUN maps for dynamically added initiators when
* generate_node_acl is enabled.
*/
2011-07-19 16:55:10 +08:00
if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
struct se_node_acl *acl;
mutex_lock(&tpg->acl_node_mutex);
list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
if (acl->dynamic_node_acl &&
(!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
core_tpg_add_node_to_devs(acl, tpg, lun);
}
}
mutex_unlock(&tpg->acl_node_mutex);
}
return 0;
}
/* core_dev_del_lun():
*
*
*/
void core_dev_del_lun(
struct se_portal_group *tpg,
struct se_lun *lun)
{
pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from"
" device object\n", tpg->se_tpg_tfo->fabric_name,
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
tpg->se_tpg_tfo->fabric_name);
core_tpg_remove_lun(tpg, lun);
}
struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
struct se_portal_group *tpg,
struct se_node_acl *nacl,
u64 mapped_lun,
int *ret)
{
struct se_lun_acl *lacl;
if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
pr_err("%s InitiatorName exceeds maximum size.\n",
tpg->se_tpg_tfo->fabric_name);
*ret = -EOVERFLOW;
return NULL;
}
lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
if (!lacl) {
pr_err("Unable to allocate memory for struct se_lun_acl.\n");
*ret = -ENOMEM;
return NULL;
}
lacl->mapped_lun = mapped_lun;
lacl->se_lun_nacl = nacl;
return lacl;
}
int core_dev_add_initiator_node_lun_acl(
struct se_portal_group *tpg,
struct se_lun_acl *lacl,
struct se_lun *lun,
bool lun_access_ro)
{
struct se_node_acl *nacl = lacl->se_lun_nacl;
/*
* rcu_dereference_raw protected by se_lun->lun_group symlink
* reference to se_device->dev_group.
*/
struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
if (!nacl)
return -EINVAL;
if (lun->lun_access_ro)
lun_access_ro = true;
lacl->se_lun = lun;
if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
lun_access_ro, nacl, tpg) < 0)
return -EINVAL;
pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
" InitiatorNode: %s\n", tpg->se_tpg_tfo->fabric_name,
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
lun_access_ro ? "RO" : "RW",
nacl->initiatorname);
/*
* Check to see if there are any existing persistent reservation APTPL
* pre-registrations that need to be enabled for this LUN ACL..
*/
core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl,
lacl->mapped_lun);
return 0;
}
int core_dev_del_initiator_node_lun_acl(
struct se_lun *lun,
struct se_lun_acl *lacl)
{
struct se_portal_group *tpg = lun->lun_tpg;
struct se_node_acl *nacl;
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
struct se_dev_entry *deve;
nacl = lacl->se_lun_nacl;
if (!nacl)
return -EINVAL;
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-23 11:42:19 +08:00
mutex_lock(&nacl->lun_entry_mutex);
deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
if (deve)
core_disable_device_list_for_node(lun, deve, nacl, tpg);
mutex_unlock(&nacl->lun_entry_mutex);
pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for"
" InitiatorNode: %s Mapped LUN: %llu\n",
tpg->se_tpg_tfo->fabric_name,
2011-07-19 16:55:10 +08:00
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
nacl->initiatorname, lacl->mapped_lun);
return 0;
}
void core_dev_free_initiator_node_lun_acl(
struct se_portal_group *tpg,
struct se_lun_acl *lacl)
{
pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
" Mapped LUN: %llu\n", tpg->se_tpg_tfo->fabric_name,
2011-07-19 16:55:10 +08:00
tpg->se_tpg_tfo->tpg_get_tag(tpg),
tpg->se_tpg_tfo->fabric_name,
lacl->se_lun_nacl->initiatorname, lacl->mapped_lun);
kfree(lacl);
}
static void scsi_dump_inquiry(struct se_device *dev)
{
struct t10_wwn *wwn = &dev->t10_wwn;
int device_type = dev->transport->get_device_type(dev);
/*
* Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
*/
pr_debug(" Vendor: %-" __stringify(INQUIRY_VENDOR_LEN) "s\n",
wwn->vendor);
pr_debug(" Model: %-" __stringify(INQUIRY_MODEL_LEN) "s\n",
wwn->model);
pr_debug(" Revision: %-" __stringify(INQUIRY_REVISION_LEN) "s\n",
wwn->revision);
pr_debug(" Type: %s ", scsi_device_type(device_type));
}
struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
{
struct se_device *dev;
struct se_lun *xcopy_lun;
int i;
dev = hba->backend->ops->alloc_device(hba, name);
if (!dev)
return NULL;
dev->queues = kcalloc(nr_cpu_ids, sizeof(*dev->queues), GFP_KERNEL);
if (!dev->queues) {
dev->transport->free_device(dev);
return NULL;
}
dev->queue_cnt = nr_cpu_ids;
for (i = 0; i < dev->queue_cnt; i++) {
struct se_device_queue *q;
q = &dev->queues[i];
INIT_LIST_HEAD(&q->state_list);
spin_lock_init(&q->lock);
init_llist_head(&q->sq.cmd_list);
INIT_WORK(&q->sq.work, target_queued_submit_work);
}
dev->se_hba = hba;
dev->transport = hba->backend->ops;
dev->transport_flags = dev->transport->transport_flags_default;
dev->prot_length = sizeof(struct t10_pi_tuple);
dev->hba_index = hba->hba_index;
INIT_LIST_HEAD(&dev->dev_sep_list);
INIT_LIST_HEAD(&dev->dev_tmr_list);
INIT_LIST_HEAD(&dev->delayed_cmd_list);
INIT_LIST_HEAD(&dev->qf_cmd_list);
spin_lock_init(&dev->delayed_cmd_lock);
spin_lock_init(&dev->dev_reservation_lock);
spin_lock_init(&dev->se_port_lock);
spin_lock_init(&dev->se_tmr_lock);
spin_lock_init(&dev->qf_cmd_lock);
target: Add support for COMPARE_AND_WRITE emulation This patch adds support for COMPARE_AND_WRITE emulation on a per block basis. This logic is used as an atomic test and set primative currently used by VMWare ESX VAAI for performing array side locking of individual VMFS extent ownership. This includes the COMPARE_AND_WRITE CDB parsing within sbc_parse_cdb(), and does the majority of the work within the compare_and_write_callback() to perform the verify instance user data comparision, and subsequent write instance user data I/O submission upon a successfull comparision. The synchronization is enforced by se_device->caw_sem, that is obtained before the initial READ I/O submission in sbc_compare_and_write(). The mutex is then released upon MISCOMPARE in compare_and_write_callback(), or upon WRITE instance user-data completion in compare_and_write_post(). The implementation currently assumes a single logical block (NoLB=1). v4 changes: - Explicitly clear cmd->transport_complete_callback for two failure cases in sbc_compare_and_write() in order to avoid double unlock of ->caw_sem in compare_and_write_callback() (Dan Carpenter) v3 changes: - Convert se_device->caw_mutex to ->caw_sem v2 changes: - Set SCF_COMPARE_AND_WRITE and cmd->execute_cmd() to sbc_compare_and_write() during setup in sbc_parse_cdb() - Use sbc_compare_and_write() for initial READ submission with DMA_FROM_DEVICE - Reset cmd->execute_cmd() to sbc_execute_rw() for write instance user-data in compare_and_write_callback() - Drop SCF_BIDI command flag usage - Set TRANSPORT_PROCESSING + transport_state flags before write instance submission, and convert to __target_execute_cmd() - Prevent sbc_get_size() from being being called twice to generate incorrect size in sbc_parse_cdb() - Enforce se_device->caw_mutex synchronization between initial READ I/O submission, and final WRITE I/O completion. Cc: Christoph Hellwig <hch@lst.de> Cc: Hannes Reinecke <hare@suse.de> Cc: Martin Petersen <martin.petersen@oracle.com> Cc: Chris Mason <chris.mason@fusionio.com> Cc: James Bottomley <JBottomley@Parallels.com> Cc: Nicholas Bellinger <nab@linux-iscsi.org> Signed-off-by: Nicholas Bellinger <nab@daterainc.com>
2013-08-20 06:20:28 +08:00
sema_init(&dev->caw_sem, 1);
INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
INIT_LIST_HEAD(&dev->t10_pr.registration_list);
INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
spin_lock_init(&dev->t10_pr.registration_lock);
spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
spin_lock_init(&dev->t10_alua.lba_map_lock);
scsi: target: Fix ordered tag handling This patch fixes the following bugs: 1. If there are multiple ordered cmds queued and multiple simple cmds completing, target_restart_delayed_cmds() could be called on different CPUs and each instance could start a ordered cmd. They could then run in different orders than they were queued. 2. target_restart_delayed_cmds() and target_handle_task_attr() can race where: 1. target_handle_task_attr() has passed the simple_cmds == 0 check. 2. transport_complete_task_attr() then decrements simple_cmds to 0. 3. transport_complete_task_attr() runs target_restart_delayed_cmds() and it does not see any cmds on the delayed_cmd_list. 4. target_handle_task_attr() adds the cmd to the delayed_cmd_list. The cmd will then end up timing out. 3. If we are sent > 1 ordered cmds and simple_cmds == 0, we can execute them out of order, because target_handle_task_attr() will hit that simple_cmds check first and return false for all ordered cmds sent. 4. We run target_restart_delayed_cmds() after every cmd completion, so if there is more than 1 simple cmd running, we start executing ordered cmds after that first cmd instead of waiting for all of them to complete. 5. Ordered cmds are not supposed to start until HEAD OF QUEUE and all older cmds have completed, and not just simple. 6. It's not a bug but it doesn't make sense to take the delayed_cmd_lock for every cmd completion when ordered cmds are almost never used. Just replacing that lock with an atomic increases IOPs by up to 10% when completions are spread over multiple CPUs and there are multiple sessions/ mqs/thread accessing the same device. This patch moves the queued delayed handling to a per device work to serialze the cmd executions for each device and adds a new counter to track HEAD_OF_QUEUE and SIMPLE cmds. We can then check the new counter to determine when to run the work on the completion path. Link: https://lore.kernel.org/r/20210930020422.92578-3-michael.christie@oracle.com Signed-off-by: Mike Christie <michael.christie@oracle.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2021-09-30 10:04:19 +08:00
INIT_WORK(&dev->delayed_cmd_work, target_do_delayed_work);
dev->t10_wwn.t10_dev = dev;
/*
* Use OpenFabrics IEEE Company ID: 00 14 05
*/
dev->t10_wwn.company_id = 0x001405;
dev->t10_alua.t10_dev = dev;
dev->dev_attrib.da_dev = dev;
dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
dev->dev_attrib.emulate_dpo = 1;
dev->dev_attrib.emulate_fua_write = 1;
dev->dev_attrib.emulate_fua_read = 1;
dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
dev->dev_attrib.emulate_ua_intlck_ctrl = TARGET_UA_INTLCK_CTRL_CLEAR;
dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
dev->dev_attrib.emulate_pr = DA_EMULATE_PR;
dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL;
dev->dev_attrib.is_nonrot = DA_IS_NONROT;
dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
dev->dev_attrib.max_unmap_block_desc_count =
DA_MAX_UNMAP_BLOCK_DESC_COUNT;
dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
dev->dev_attrib.unmap_granularity_alignment =
DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
dev->dev_attrib.unmap_zeroes_data =
DA_UNMAP_ZEROES_DATA_DEFAULT;
dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
xcopy_lun = &dev->xcopy_lun;
rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
target: Fix NULL dereference during LUN lookup + active I/O shutdown When transport_clear_lun_ref() is shutting down a se_lun via configfs with new I/O in-flight, it's possible to trigger a NULL pointer dereference in transport_lookup_cmd_lun() due to the fact percpu_ref_get() doesn't do any __PERCPU_REF_DEAD checking before incrementing lun->lun_ref.count after lun->lun_ref has switched to atomic_t mode. This results in a NULL pointer dereference as LUN shutdown code in core_tpg_remove_lun() continues running after the existing ->release() -> core_tpg_lun_ref_release() callback completes, and clears the RCU protected se_lun->lun_se_dev pointer. During the OOPs, the state of lun->lun_ref in the process which triggered the NULL pointer dereference looks like the following on v4.1.y stable code: struct se_lun { lun_link_magic = 4294932337, lun_status = TRANSPORT_LUN_STATUS_FREE, ..... lun_se_dev = 0x0, lun_sep = 0x0, ..... lun_ref = { count = { counter = 1 }, percpu_count_ptr = 3, release = 0xffffffffa02fa1e0 <core_tpg_lun_ref_release>, confirm_switch = 0x0, force_atomic = false, rcu = { next = 0xffff88154fa1a5d0, func = 0xffffffff8137c4c0 <percpu_ref_switch_to_atomic_rcu> } } } To address this bug, use percpu_ref_tryget_live() to ensure once __PERCPU_REF_DEAD is visable on all CPUs and ->lun_ref has switched to atomic_t, all new I/Os will fail to obtain a new lun->lun_ref reference. Also use an explicit percpu_ref_kill_and_confirm() callback to block on ->lun_ref_comp to allow the first stage and associated RCU grace period to complete, and then block on ->lun_ref_shutdown waiting for the final percpu_ref_put() to drop the last reference via transport_lun_remove_cmd() before continuing with core_tpg_remove_lun() shutdown. Reported-by: Rob Millner <rlm@daterainc.com> Tested-by: Rob Millner <rlm@daterainc.com> Cc: Rob Millner <rlm@daterainc.com> Tested-by: Vaibhav Tandon <vst@datera.io> Cc: Vaibhav Tandon <vst@datera.io> Tested-by: Bryant G. Ly <bryantly@linux.vnet.ibm.com> Cc: <stable@vger.kernel.org> # v3.14+ Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2017-02-23 14:06:32 +08:00
init_completion(&xcopy_lun->lun_shutdown_comp);
INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
xcopy_lun->lun_tpg = &xcopy_pt_tpg;
/* Preload the default INQUIRY const values */
strlcpy(dev->t10_wwn.vendor, "LIO-ORG", sizeof(dev->t10_wwn.vendor));
strlcpy(dev->t10_wwn.model, dev->transport->inquiry_prod,
sizeof(dev->t10_wwn.model));
strlcpy(dev->t10_wwn.revision, dev->transport->inquiry_rev,
sizeof(dev->t10_wwn.revision));
return dev;
}
/*
* Check if the underlying struct block_device request_queue supports
* the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
* in ATA and we need to set TPE=1
*/
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
struct request_queue *q)
{
int block_size = queue_logical_block_size(q);
if (!blk_queue_discard(q))
return false;
attrib->max_unmap_lba_count =
q->limits.max_discard_sectors >> (ilog2(block_size) - 9);
/*
* Currently hardcoded to 1 in Linux/SCSI code..
*/
attrib->max_unmap_block_desc_count = 1;
attrib->unmap_granularity = q->limits.discard_granularity / block_size;
attrib->unmap_granularity_alignment = q->limits.discard_alignment /
block_size;
attrib->unmap_zeroes_data = !!(q->limits.max_write_zeroes_sectors);
return true;
}
EXPORT_SYMBOL(target_configure_unmap_from_queue);
/*
* Convert from blocksize advertised to the initiator to the 512 byte
* units unconditionally used by the Linux block layer.
*/
sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
{
switch (dev->dev_attrib.block_size) {
case 4096:
return lb << 3;
case 2048:
return lb << 2;
case 1024:
return lb << 1;
default:
return lb;
}
}
EXPORT_SYMBOL(target_to_linux_sector);
struct devices_idr_iter {
scsi: target: Avoid that EXTENDED COPY commands trigger lock inversion The approach for adding a device to the devices_idr data structure and for removing it is as follows: * &dev->dev_group.cg_item is initialized before a device is added to devices_idr. * If the reference count of a device drops to zero then target_free_device() removes the device from devices_idr. * All devices_idr manipulations are protected by device_mutex. This means that increasing the reference count of a device is sufficient to prevent removal from devices_idr and also that it is safe access dev_group.cg_item for any device that is referenced by devices_idr. Use this to modify target_find_device() and target_for_each_device() such that these functions no longer introduce a dependency between device_mutex and the configfs root inode mutex. Note: it is safe to pass a NULL pointer to config_item_put() and also to config_item_get_unless_zero(). This patch prevents that lockdep reports the following complaint: ====================================================== WARNING: possible circular locking dependency detected 4.12.0-rc1-dbg+ #1 Not tainted ------------------------------------------------------ rmdir/12053 is trying to acquire lock: (device_mutex#2){+.+.+.}, at: [<ffffffffa010afce>] target_free_device+0xae/0xf0 [target_core_mod] but task is already holding lock: (&sb->s_type->i_mutex_key#14){++++++}, at: [<ffffffff811c5c30>] vfs_rmdir+0x50/0x140 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #1 (&sb->s_type->i_mutex_key#14){++++++}: lock_acquire+0x59/0x80 down_write+0x36/0x70 configfs_depend_item+0x3a/0xb0 [configfs] target_depend_item+0x13/0x20 [target_core_mod] target_xcopy_locate_se_dev_e4_iter+0x87/0x100 [target_core_mod] target_devices_idr_iter+0x16/0x20 [target_core_mod] idr_for_each+0x39/0xc0 target_for_each_device+0x36/0x50 [target_core_mod] target_xcopy_locate_se_dev_e4+0x28/0x80 [target_core_mod] target_xcopy_do_work+0x2e9/0xdd0 [target_core_mod] process_one_work+0x1ca/0x3f0 worker_thread+0x49/0x3b0 kthread+0x109/0x140 ret_from_fork+0x31/0x40 -> #0 (device_mutex#2){+.+.+.}: __lock_acquire+0x101f/0x11d0 lock_acquire+0x59/0x80 __mutex_lock+0x7e/0x950 mutex_lock_nested+0x16/0x20 target_free_device+0xae/0xf0 [target_core_mod] target_core_dev_release+0x10/0x20 [target_core_mod] config_item_put+0x6e/0xb0 [configfs] configfs_rmdir+0x1a6/0x300 [configfs] vfs_rmdir+0xb7/0x140 do_rmdir+0x1f4/0x200 SyS_rmdir+0x11/0x20 entry_SYSCALL_64_fastpath+0x23/0xc2 other info that might help us debug this: Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&sb->s_type->i_mutex_key#14); lock(device_mutex#2); lock(&sb->s_type->i_mutex_key#14); lock(device_mutex#2); *** DEADLOCK *** 3 locks held by rmdir/12053: #0: (sb_writers#10){.+.+.+}, at: [<ffffffff811e223f>] mnt_want_write+0x1f/0x50 #1: (&sb->s_type->i_mutex_key#14/1){+.+.+.}, at: [<ffffffff811cb97e>] do_rmdir+0x15e/0x200 #2: (&sb->s_type->i_mutex_key#14){++++++}, at: [<ffffffff811c5c30>] vfs_rmdir+0x50/0x140 stack backtrace: CPU: 3 PID: 12053 Comm: rmdir Not tainted 4.12.0-rc1-dbg+ #1 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.0.0-prebuilt.qemu-project.org 04/01/2014 Call Trace: dump_stack+0x86/0xcf print_circular_bug+0x1c7/0x220 __lock_acquire+0x101f/0x11d0 lock_acquire+0x59/0x80 __mutex_lock+0x7e/0x950 mutex_lock_nested+0x16/0x20 target_free_device+0xae/0xf0 [target_core_mod] target_core_dev_release+0x10/0x20 [target_core_mod] config_item_put+0x6e/0xb0 [configfs] configfs_rmdir+0x1a6/0x300 [configfs] vfs_rmdir+0xb7/0x140 do_rmdir+0x1f4/0x200 SyS_rmdir+0x11/0x20 entry_SYSCALL_64_fastpath+0x23/0xc2 Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com> [Rebased to handle conflict withe target_find_device removal] Signed-off-by: Mike Christie <mchristi@redhat.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2018-06-29 02:48:57 +08:00
struct config_item *prev_item;
int (*fn)(struct se_device *dev, void *data);
void *data;
};
static int target_devices_idr_iter(int id, void *p, void *data)
scsi: target: Avoid that EXTENDED COPY commands trigger lock inversion The approach for adding a device to the devices_idr data structure and for removing it is as follows: * &dev->dev_group.cg_item is initialized before a device is added to devices_idr. * If the reference count of a device drops to zero then target_free_device() removes the device from devices_idr. * All devices_idr manipulations are protected by device_mutex. This means that increasing the reference count of a device is sufficient to prevent removal from devices_idr and also that it is safe access dev_group.cg_item for any device that is referenced by devices_idr. Use this to modify target_find_device() and target_for_each_device() such that these functions no longer introduce a dependency between device_mutex and the configfs root inode mutex. Note: it is safe to pass a NULL pointer to config_item_put() and also to config_item_get_unless_zero(). This patch prevents that lockdep reports the following complaint: ====================================================== WARNING: possible circular locking dependency detected 4.12.0-rc1-dbg+ #1 Not tainted ------------------------------------------------------ rmdir/12053 is trying to acquire lock: (device_mutex#2){+.+.+.}, at: [<ffffffffa010afce>] target_free_device+0xae/0xf0 [target_core_mod] but task is already holding lock: (&sb->s_type->i_mutex_key#14){++++++}, at: [<ffffffff811c5c30>] vfs_rmdir+0x50/0x140 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #1 (&sb->s_type->i_mutex_key#14){++++++}: lock_acquire+0x59/0x80 down_write+0x36/0x70 configfs_depend_item+0x3a/0xb0 [configfs] target_depend_item+0x13/0x20 [target_core_mod] target_xcopy_locate_se_dev_e4_iter+0x87/0x100 [target_core_mod] target_devices_idr_iter+0x16/0x20 [target_core_mod] idr_for_each+0x39/0xc0 target_for_each_device+0x36/0x50 [target_core_mod] target_xcopy_locate_se_dev_e4+0x28/0x80 [target_core_mod] target_xcopy_do_work+0x2e9/0xdd0 [target_core_mod] process_one_work+0x1ca/0x3f0 worker_thread+0x49/0x3b0 kthread+0x109/0x140 ret_from_fork+0x31/0x40 -> #0 (device_mutex#2){+.+.+.}: __lock_acquire+0x101f/0x11d0 lock_acquire+0x59/0x80 __mutex_lock+0x7e/0x950 mutex_lock_nested+0x16/0x20 target_free_device+0xae/0xf0 [target_core_mod] target_core_dev_release+0x10/0x20 [target_core_mod] config_item_put+0x6e/0xb0 [configfs] configfs_rmdir+0x1a6/0x300 [configfs] vfs_rmdir+0xb7/0x140 do_rmdir+0x1f4/0x200 SyS_rmdir+0x11/0x20 entry_SYSCALL_64_fastpath+0x23/0xc2 other info that might help us debug this: Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&sb->s_type->i_mutex_key#14); lock(device_mutex#2); lock(&sb->s_type->i_mutex_key#14); lock(device_mutex#2); *** DEADLOCK *** 3 locks held by rmdir/12053: #0: (sb_writers#10){.+.+.+}, at: [<ffffffff811e223f>] mnt_want_write+0x1f/0x50 #1: (&sb->s_type->i_mutex_key#14/1){+.+.+.}, at: [<ffffffff811cb97e>] do_rmdir+0x15e/0x200 #2: (&sb->s_type->i_mutex_key#14){++++++}, at: [<ffffffff811c5c30>] vfs_rmdir+0x50/0x140 stack backtrace: CPU: 3 PID: 12053 Comm: rmdir Not tainted 4.12.0-rc1-dbg+ #1 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.0.0-prebuilt.qemu-project.org 04/01/2014 Call Trace: dump_stack+0x86/0xcf print_circular_bug+0x1c7/0x220 __lock_acquire+0x101f/0x11d0 lock_acquire+0x59/0x80 __mutex_lock+0x7e/0x950 mutex_lock_nested+0x16/0x20 target_free_device+0xae/0xf0 [target_core_mod] target_core_dev_release+0x10/0x20 [target_core_mod] config_item_put+0x6e/0xb0 [configfs] configfs_rmdir+0x1a6/0x300 [configfs] vfs_rmdir+0xb7/0x140 do_rmdir+0x1f4/0x200 SyS_rmdir+0x11/0x20 entry_SYSCALL_64_fastpath+0x23/0xc2 Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com> [Rebased to handle conflict withe target_find_device removal] Signed-off-by: Mike Christie <mchristi@redhat.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2018-06-29 02:48:57 +08:00
__must_hold(&device_mutex)
{
struct devices_idr_iter *iter = data;
struct se_device *dev = p;
scsi: target: Avoid that EXTENDED COPY commands trigger lock inversion The approach for adding a device to the devices_idr data structure and for removing it is as follows: * &dev->dev_group.cg_item is initialized before a device is added to devices_idr. * If the reference count of a device drops to zero then target_free_device() removes the device from devices_idr. * All devices_idr manipulations are protected by device_mutex. This means that increasing the reference count of a device is sufficient to prevent removal from devices_idr and also that it is safe access dev_group.cg_item for any device that is referenced by devices_idr. Use this to modify target_find_device() and target_for_each_device() such that these functions no longer introduce a dependency between device_mutex and the configfs root inode mutex. Note: it is safe to pass a NULL pointer to config_item_put() and also to config_item_get_unless_zero(). This patch prevents that lockdep reports the following complaint: ====================================================== WARNING: possible circular locking dependency detected 4.12.0-rc1-dbg+ #1 Not tainted ------------------------------------------------------ rmdir/12053 is trying to acquire lock: (device_mutex#2){+.+.+.}, at: [<ffffffffa010afce>] target_free_device+0xae/0xf0 [target_core_mod] but task is already holding lock: (&sb->s_type->i_mutex_key#14){++++++}, at: [<ffffffff811c5c30>] vfs_rmdir+0x50/0x140 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #1 (&sb->s_type->i_mutex_key#14){++++++}: lock_acquire+0x59/0x80 down_write+0x36/0x70 configfs_depend_item+0x3a/0xb0 [configfs] target_depend_item+0x13/0x20 [target_core_mod] target_xcopy_locate_se_dev_e4_iter+0x87/0x100 [target_core_mod] target_devices_idr_iter+0x16/0x20 [target_core_mod] idr_for_each+0x39/0xc0 target_for_each_device+0x36/0x50 [target_core_mod] target_xcopy_locate_se_dev_e4+0x28/0x80 [target_core_mod] target_xcopy_do_work+0x2e9/0xdd0 [target_core_mod] process_one_work+0x1ca/0x3f0 worker_thread+0x49/0x3b0 kthread+0x109/0x140 ret_from_fork+0x31/0x40 -> #0 (device_mutex#2){+.+.+.}: __lock_acquire+0x101f/0x11d0 lock_acquire+0x59/0x80 __mutex_lock+0x7e/0x950 mutex_lock_nested+0x16/0x20 target_free_device+0xae/0xf0 [target_core_mod] target_core_dev_release+0x10/0x20 [target_core_mod] config_item_put+0x6e/0xb0 [configfs] configfs_rmdir+0x1a6/0x300 [configfs] vfs_rmdir+0xb7/0x140 do_rmdir+0x1f4/0x200 SyS_rmdir+0x11/0x20 entry_SYSCALL_64_fastpath+0x23/0xc2 other info that might help us debug this: Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&sb->s_type->i_mutex_key#14); lock(device_mutex#2); lock(&sb->s_type->i_mutex_key#14); lock(device_mutex#2); *** DEADLOCK *** 3 locks held by rmdir/12053: #0: (sb_writers#10){.+.+.+}, at: [<ffffffff811e223f>] mnt_want_write+0x1f/0x50 #1: (&sb->s_type->i_mutex_key#14/1){+.+.+.}, at: [<ffffffff811cb97e>] do_rmdir+0x15e/0x200 #2: (&sb->s_type->i_mutex_key#14){++++++}, at: [<ffffffff811c5c30>] vfs_rmdir+0x50/0x140 stack backtrace: CPU: 3 PID: 12053 Comm: rmdir Not tainted 4.12.0-rc1-dbg+ #1 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.0.0-prebuilt.qemu-project.org 04/01/2014 Call Trace: dump_stack+0x86/0xcf print_circular_bug+0x1c7/0x220 __lock_acquire+0x101f/0x11d0 lock_acquire+0x59/0x80 __mutex_lock+0x7e/0x950 mutex_lock_nested+0x16/0x20 target_free_device+0xae/0xf0 [target_core_mod] target_core_dev_release+0x10/0x20 [target_core_mod] config_item_put+0x6e/0xb0 [configfs] configfs_rmdir+0x1a6/0x300 [configfs] vfs_rmdir+0xb7/0x140 do_rmdir+0x1f4/0x200 SyS_rmdir+0x11/0x20 entry_SYSCALL_64_fastpath+0x23/0xc2 Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com> [Rebased to handle conflict withe target_find_device removal] Signed-off-by: Mike Christie <mchristi@redhat.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2018-06-29 02:48:57 +08:00
int ret;
config_item_put(iter->prev_item);
iter->prev_item = NULL;
/*
* We add the device early to the idr, so it can be used
* by backend modules during configuration. We do not want
* to allow other callers to access partially setup devices,
* so we skip them here.
*/
if (!target_dev_configured(dev))
return 0;
scsi: target: Avoid that EXTENDED COPY commands trigger lock inversion The approach for adding a device to the devices_idr data structure and for removing it is as follows: * &dev->dev_group.cg_item is initialized before a device is added to devices_idr. * If the reference count of a device drops to zero then target_free_device() removes the device from devices_idr. * All devices_idr manipulations are protected by device_mutex. This means that increasing the reference count of a device is sufficient to prevent removal from devices_idr and also that it is safe access dev_group.cg_item for any device that is referenced by devices_idr. Use this to modify target_find_device() and target_for_each_device() such that these functions no longer introduce a dependency between device_mutex and the configfs root inode mutex. Note: it is safe to pass a NULL pointer to config_item_put() and also to config_item_get_unless_zero(). This patch prevents that lockdep reports the following complaint: ====================================================== WARNING: possible circular locking dependency detected 4.12.0-rc1-dbg+ #1 Not tainted ------------------------------------------------------ rmdir/12053 is trying to acquire lock: (device_mutex#2){+.+.+.}, at: [<ffffffffa010afce>] target_free_device+0xae/0xf0 [target_core_mod] but task is already holding lock: (&sb->s_type->i_mutex_key#14){++++++}, at: [<ffffffff811c5c30>] vfs_rmdir+0x50/0x140 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #1 (&sb->s_type->i_mutex_key#14){++++++}: lock_acquire+0x59/0x80 down_write+0x36/0x70 configfs_depend_item+0x3a/0xb0 [configfs] target_depend_item+0x13/0x20 [target_core_mod] target_xcopy_locate_se_dev_e4_iter+0x87/0x100 [target_core_mod] target_devices_idr_iter+0x16/0x20 [target_core_mod] idr_for_each+0x39/0xc0 target_for_each_device+0x36/0x50 [target_core_mod] target_xcopy_locate_se_dev_e4+0x28/0x80 [target_core_mod] target_xcopy_do_work+0x2e9/0xdd0 [target_core_mod] process_one_work+0x1ca/0x3f0 worker_thread+0x49/0x3b0 kthread+0x109/0x140 ret_from_fork+0x31/0x40 -> #0 (device_mutex#2){+.+.+.}: __lock_acquire+0x101f/0x11d0 lock_acquire+0x59/0x80 __mutex_lock+0x7e/0x950 mutex_lock_nested+0x16/0x20 target_free_device+0xae/0xf0 [target_core_mod] target_core_dev_release+0x10/0x20 [target_core_mod] config_item_put+0x6e/0xb0 [configfs] configfs_rmdir+0x1a6/0x300 [configfs] vfs_rmdir+0xb7/0x140 do_rmdir+0x1f4/0x200 SyS_rmdir+0x11/0x20 entry_SYSCALL_64_fastpath+0x23/0xc2 other info that might help us debug this: Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&sb->s_type->i_mutex_key#14); lock(device_mutex#2); lock(&sb->s_type->i_mutex_key#14); lock(device_mutex#2); *** DEADLOCK *** 3 locks held by rmdir/12053: #0: (sb_writers#10){.+.+.+}, at: [<ffffffff811e223f>] mnt_want_write+0x1f/0x50 #1: (&sb->s_type->i_mutex_key#14/1){+.+.+.}, at: [<ffffffff811cb97e>] do_rmdir+0x15e/0x200 #2: (&sb->s_type->i_mutex_key#14){++++++}, at: [<ffffffff811c5c30>] vfs_rmdir+0x50/0x140 stack backtrace: CPU: 3 PID: 12053 Comm: rmdir Not tainted 4.12.0-rc1-dbg+ #1 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.0.0-prebuilt.qemu-project.org 04/01/2014 Call Trace: dump_stack+0x86/0xcf print_circular_bug+0x1c7/0x220 __lock_acquire+0x101f/0x11d0 lock_acquire+0x59/0x80 __mutex_lock+0x7e/0x950 mutex_lock_nested+0x16/0x20 target_free_device+0xae/0xf0 [target_core_mod] target_core_dev_release+0x10/0x20 [target_core_mod] config_item_put+0x6e/0xb0 [configfs] configfs_rmdir+0x1a6/0x300 [configfs] vfs_rmdir+0xb7/0x140 do_rmdir+0x1f4/0x200 SyS_rmdir+0x11/0x20 entry_SYSCALL_64_fastpath+0x23/0xc2 Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com> [Rebased to handle conflict withe target_find_device removal] Signed-off-by: Mike Christie <mchristi@redhat.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2018-06-29 02:48:57 +08:00
iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item);
if (!iter->prev_item)
return 0;
mutex_unlock(&device_mutex);
ret = iter->fn(dev, iter->data);
mutex_lock(&device_mutex);
return ret;
}
/**
* target_for_each_device - iterate over configured devices
* @fn: iterator function
* @data: pointer to data that will be passed to fn
*
* fn must return 0 to continue looping over devices. non-zero will break
* from the loop and return that value to the caller.
*/
int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
void *data)
{
scsi: target: Avoid that EXTENDED COPY commands trigger lock inversion The approach for adding a device to the devices_idr data structure and for removing it is as follows: * &dev->dev_group.cg_item is initialized before a device is added to devices_idr. * If the reference count of a device drops to zero then target_free_device() removes the device from devices_idr. * All devices_idr manipulations are protected by device_mutex. This means that increasing the reference count of a device is sufficient to prevent removal from devices_idr and also that it is safe access dev_group.cg_item for any device that is referenced by devices_idr. Use this to modify target_find_device() and target_for_each_device() such that these functions no longer introduce a dependency between device_mutex and the configfs root inode mutex. Note: it is safe to pass a NULL pointer to config_item_put() and also to config_item_get_unless_zero(). This patch prevents that lockdep reports the following complaint: ====================================================== WARNING: possible circular locking dependency detected 4.12.0-rc1-dbg+ #1 Not tainted ------------------------------------------------------ rmdir/12053 is trying to acquire lock: (device_mutex#2){+.+.+.}, at: [<ffffffffa010afce>] target_free_device+0xae/0xf0 [target_core_mod] but task is already holding lock: (&sb->s_type->i_mutex_key#14){++++++}, at: [<ffffffff811c5c30>] vfs_rmdir+0x50/0x140 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #1 (&sb->s_type->i_mutex_key#14){++++++}: lock_acquire+0x59/0x80 down_write+0x36/0x70 configfs_depend_item+0x3a/0xb0 [configfs] target_depend_item+0x13/0x20 [target_core_mod] target_xcopy_locate_se_dev_e4_iter+0x87/0x100 [target_core_mod] target_devices_idr_iter+0x16/0x20 [target_core_mod] idr_for_each+0x39/0xc0 target_for_each_device+0x36/0x50 [target_core_mod] target_xcopy_locate_se_dev_e4+0x28/0x80 [target_core_mod] target_xcopy_do_work+0x2e9/0xdd0 [target_core_mod] process_one_work+0x1ca/0x3f0 worker_thread+0x49/0x3b0 kthread+0x109/0x140 ret_from_fork+0x31/0x40 -> #0 (device_mutex#2){+.+.+.}: __lock_acquire+0x101f/0x11d0 lock_acquire+0x59/0x80 __mutex_lock+0x7e/0x950 mutex_lock_nested+0x16/0x20 target_free_device+0xae/0xf0 [target_core_mod] target_core_dev_release+0x10/0x20 [target_core_mod] config_item_put+0x6e/0xb0 [configfs] configfs_rmdir+0x1a6/0x300 [configfs] vfs_rmdir+0xb7/0x140 do_rmdir+0x1f4/0x200 SyS_rmdir+0x11/0x20 entry_SYSCALL_64_fastpath+0x23/0xc2 other info that might help us debug this: Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&sb->s_type->i_mutex_key#14); lock(device_mutex#2); lock(&sb->s_type->i_mutex_key#14); lock(device_mutex#2); *** DEADLOCK *** 3 locks held by rmdir/12053: #0: (sb_writers#10){.+.+.+}, at: [<ffffffff811e223f>] mnt_want_write+0x1f/0x50 #1: (&sb->s_type->i_mutex_key#14/1){+.+.+.}, at: [<ffffffff811cb97e>] do_rmdir+0x15e/0x200 #2: (&sb->s_type->i_mutex_key#14){++++++}, at: [<ffffffff811c5c30>] vfs_rmdir+0x50/0x140 stack backtrace: CPU: 3 PID: 12053 Comm: rmdir Not tainted 4.12.0-rc1-dbg+ #1 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.0.0-prebuilt.qemu-project.org 04/01/2014 Call Trace: dump_stack+0x86/0xcf print_circular_bug+0x1c7/0x220 __lock_acquire+0x101f/0x11d0 lock_acquire+0x59/0x80 __mutex_lock+0x7e/0x950 mutex_lock_nested+0x16/0x20 target_free_device+0xae/0xf0 [target_core_mod] target_core_dev_release+0x10/0x20 [target_core_mod] config_item_put+0x6e/0xb0 [configfs] configfs_rmdir+0x1a6/0x300 [configfs] vfs_rmdir+0xb7/0x140 do_rmdir+0x1f4/0x200 SyS_rmdir+0x11/0x20 entry_SYSCALL_64_fastpath+0x23/0xc2 Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com> [Rebased to handle conflict withe target_find_device removal] Signed-off-by: Mike Christie <mchristi@redhat.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2018-06-29 02:48:57 +08:00
struct devices_idr_iter iter = { .fn = fn, .data = data };
int ret;
mutex_lock(&device_mutex);
ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
mutex_unlock(&device_mutex);
scsi: target: Avoid that EXTENDED COPY commands trigger lock inversion The approach for adding a device to the devices_idr data structure and for removing it is as follows: * &dev->dev_group.cg_item is initialized before a device is added to devices_idr. * If the reference count of a device drops to zero then target_free_device() removes the device from devices_idr. * All devices_idr manipulations are protected by device_mutex. This means that increasing the reference count of a device is sufficient to prevent removal from devices_idr and also that it is safe access dev_group.cg_item for any device that is referenced by devices_idr. Use this to modify target_find_device() and target_for_each_device() such that these functions no longer introduce a dependency between device_mutex and the configfs root inode mutex. Note: it is safe to pass a NULL pointer to config_item_put() and also to config_item_get_unless_zero(). This patch prevents that lockdep reports the following complaint: ====================================================== WARNING: possible circular locking dependency detected 4.12.0-rc1-dbg+ #1 Not tainted ------------------------------------------------------ rmdir/12053 is trying to acquire lock: (device_mutex#2){+.+.+.}, at: [<ffffffffa010afce>] target_free_device+0xae/0xf0 [target_core_mod] but task is already holding lock: (&sb->s_type->i_mutex_key#14){++++++}, at: [<ffffffff811c5c30>] vfs_rmdir+0x50/0x140 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #1 (&sb->s_type->i_mutex_key#14){++++++}: lock_acquire+0x59/0x80 down_write+0x36/0x70 configfs_depend_item+0x3a/0xb0 [configfs] target_depend_item+0x13/0x20 [target_core_mod] target_xcopy_locate_se_dev_e4_iter+0x87/0x100 [target_core_mod] target_devices_idr_iter+0x16/0x20 [target_core_mod] idr_for_each+0x39/0xc0 target_for_each_device+0x36/0x50 [target_core_mod] target_xcopy_locate_se_dev_e4+0x28/0x80 [target_core_mod] target_xcopy_do_work+0x2e9/0xdd0 [target_core_mod] process_one_work+0x1ca/0x3f0 worker_thread+0x49/0x3b0 kthread+0x109/0x140 ret_from_fork+0x31/0x40 -> #0 (device_mutex#2){+.+.+.}: __lock_acquire+0x101f/0x11d0 lock_acquire+0x59/0x80 __mutex_lock+0x7e/0x950 mutex_lock_nested+0x16/0x20 target_free_device+0xae/0xf0 [target_core_mod] target_core_dev_release+0x10/0x20 [target_core_mod] config_item_put+0x6e/0xb0 [configfs] configfs_rmdir+0x1a6/0x300 [configfs] vfs_rmdir+0xb7/0x140 do_rmdir+0x1f4/0x200 SyS_rmdir+0x11/0x20 entry_SYSCALL_64_fastpath+0x23/0xc2 other info that might help us debug this: Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&sb->s_type->i_mutex_key#14); lock(device_mutex#2); lock(&sb->s_type->i_mutex_key#14); lock(device_mutex#2); *** DEADLOCK *** 3 locks held by rmdir/12053: #0: (sb_writers#10){.+.+.+}, at: [<ffffffff811e223f>] mnt_want_write+0x1f/0x50 #1: (&sb->s_type->i_mutex_key#14/1){+.+.+.}, at: [<ffffffff811cb97e>] do_rmdir+0x15e/0x200 #2: (&sb->s_type->i_mutex_key#14){++++++}, at: [<ffffffff811c5c30>] vfs_rmdir+0x50/0x140 stack backtrace: CPU: 3 PID: 12053 Comm: rmdir Not tainted 4.12.0-rc1-dbg+ #1 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.0.0-prebuilt.qemu-project.org 04/01/2014 Call Trace: dump_stack+0x86/0xcf print_circular_bug+0x1c7/0x220 __lock_acquire+0x101f/0x11d0 lock_acquire+0x59/0x80 __mutex_lock+0x7e/0x950 mutex_lock_nested+0x16/0x20 target_free_device+0xae/0xf0 [target_core_mod] target_core_dev_release+0x10/0x20 [target_core_mod] config_item_put+0x6e/0xb0 [configfs] configfs_rmdir+0x1a6/0x300 [configfs] vfs_rmdir+0xb7/0x140 do_rmdir+0x1f4/0x200 SyS_rmdir+0x11/0x20 entry_SYSCALL_64_fastpath+0x23/0xc2 Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com> [Rebased to handle conflict withe target_find_device removal] Signed-off-by: Mike Christie <mchristi@redhat.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2018-06-29 02:48:57 +08:00
config_item_put(iter.prev_item);
return ret;
}
int target_configure_device(struct se_device *dev)
{
struct se_hba *hba = dev->se_hba;
int ret, id;
if (target_dev_configured(dev)) {
pr_err("se_dev->se_dev_ptr already set for storage"
" object\n");
return -EEXIST;
}
/*
* Add early so modules like tcmu can use during its
* configuration.
*/
mutex_lock(&device_mutex);
/*
* Use cyclic to try and avoid collisions with devices
* that were recently removed.
*/
id = idr_alloc_cyclic(&devices_idr, dev, 0, INT_MAX, GFP_KERNEL);
mutex_unlock(&device_mutex);
if (id < 0) {
ret = -ENOMEM;
goto out;
}
dev->dev_index = id;
ret = dev->transport->configure_device(dev);
if (ret)
goto out_free_index;
/*
* XXX: there is not much point to have two different values here..
*/
dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
/*
* Align max_hw_sectors down to PAGE_SIZE I/O transfers
*/
dev->dev_attrib.hw_max_sectors =
se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
dev->dev_attrib.hw_block_size);
2015-01-07 08:10:37 +08:00
dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
dev->creation_time = get_jiffies_64();
ret = core_setup_alua(dev);
if (ret)
goto out_destroy_device;
/*
* Setup work_queue for QUEUE_FULL
*/
INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
scsi_dump_inquiry(dev);
spin_lock(&hba->device_lock);
hba->dev_count++;
spin_unlock(&hba->device_lock);
dev->dev_flags |= DF_CONFIGURED;
return 0;
out_destroy_device:
dev->transport->destroy_device(dev);
out_free_index:
mutex_lock(&device_mutex);
idr_remove(&devices_idr, dev->dev_index);
mutex_unlock(&device_mutex);
out:
se_release_vpd_for_dev(dev);
return ret;
}
void target_free_device(struct se_device *dev)
{
struct se_hba *hba = dev->se_hba;
WARN_ON(!list_empty(&dev->dev_sep_list));
if (target_dev_configured(dev)) {
dev->transport->destroy_device(dev);
mutex_lock(&device_mutex);
idr_remove(&devices_idr, dev->dev_index);
mutex_unlock(&device_mutex);
spin_lock(&hba->device_lock);
hba->dev_count--;
spin_unlock(&hba->device_lock);
}
core_alua_free_lu_gp_mem(dev);
core_alua_set_lba_map(dev, NULL, 0, 0);
core_scsi3_free_all_registrations(dev);
se_release_vpd_for_dev(dev);
if (dev->transport->free_prot)
dev->transport->free_prot(dev);
kfree(dev->queues);
dev->transport->free_device(dev);
}
int core_dev_setup_virtual_lun0(void)
{
struct se_hba *hba;
struct se_device *dev;
char buf[] = "rd_pages=8,rd_nullio=1,rd_dummy=1";
int ret;
hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
if (IS_ERR(hba))
return PTR_ERR(hba);
dev = target_alloc_device(hba, "virt_lun0");
if (!dev) {
ret = -ENOMEM;
goto out_free_hba;
}
hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf));
ret = target_configure_device(dev);
if (ret)
goto out_free_se_dev;
lun0_hba = hba;
g_lun0_dev = dev;
return 0;
out_free_se_dev:
target_free_device(dev);
out_free_hba:
core_delete_hba(hba);
return ret;
}
void core_dev_release_virtual_lun0(void)
{
2011-07-19 16:55:10 +08:00
struct se_hba *hba = lun0_hba;
if (!hba)
return;
2011-07-19 16:55:10 +08:00
if (g_lun0_dev)
target_free_device(g_lun0_dev);
core_delete_hba(hba);
}
/*
* Common CDB parsing for kernel and user passthrough.
*/
sense_reason_t
passthrough_parse_cdb(struct se_cmd *cmd,
sense_reason_t (*exec_cmd)(struct se_cmd *cmd))
{
unsigned char *cdb = cmd->t_task_cdb;
struct se_device *dev = cmd->se_dev;
unsigned int size;
/*
* For REPORT LUNS we always need to emulate the response, for everything
* else, pass it up.
*/
if (cdb[0] == REPORT_LUNS) {
cmd->execute_cmd = spc_emulate_report_luns;
return TCM_NO_SENSE;
}
/*
* With emulate_pr disabled, all reservation requests should fail,
* regardless of whether or not TRANSPORT_FLAG_PASSTHROUGH_PGR is set.
*/
if (!dev->dev_attrib.emulate_pr &&
((cdb[0] == PERSISTENT_RESERVE_IN) ||
(cdb[0] == PERSISTENT_RESERVE_OUT) ||
(cdb[0] == RELEASE || cdb[0] == RELEASE_10) ||
(cdb[0] == RESERVE || cdb[0] == RESERVE_10))) {
return TCM_UNSUPPORTED_SCSI_OPCODE;
}
/*
* For PERSISTENT RESERVE IN/OUT, RELEASE, and RESERVE we need to
* emulate the response, since tcmu does not have the information
* required to process these commands.
*/
if (!(dev->transport_flags &
TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
if (cdb[0] == PERSISTENT_RESERVE_IN) {
cmd->execute_cmd = target_scsi3_emulate_pr_in;
size = get_unaligned_be16(&cdb[7]);
return target_cmd_size_check(cmd, size);
}
if (cdb[0] == PERSISTENT_RESERVE_OUT) {
cmd->execute_cmd = target_scsi3_emulate_pr_out;
size = get_unaligned_be32(&cdb[5]);
return target_cmd_size_check(cmd, size);
}
if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) {
cmd->execute_cmd = target_scsi2_reservation_release;
if (cdb[0] == RELEASE_10)
size = get_unaligned_be16(&cdb[7]);
else
size = cmd->data_length;
return target_cmd_size_check(cmd, size);
}
if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) {
cmd->execute_cmd = target_scsi2_reservation_reserve;
if (cdb[0] == RESERVE_10)
size = get_unaligned_be16(&cdb[7]);
else
size = cmd->data_length;
return target_cmd_size_check(cmd, size);
}
}
/* Set DATA_CDB flag for ops that should have it */
switch (cdb[0]) {
case READ_6:
case READ_10:
case READ_12:
case READ_16:
case WRITE_6:
case WRITE_10:
case WRITE_12:
case WRITE_16:
case WRITE_VERIFY:
case WRITE_VERIFY_12:
case WRITE_VERIFY_16:
case COMPARE_AND_WRITE:
case XDWRITEREAD_10:
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
break;
case VARIABLE_LENGTH_CMD:
switch (get_unaligned_be16(&cdb[8])) {
case READ_32:
case WRITE_32:
case WRITE_VERIFY_32:
case XDWRITEREAD_32:
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
break;
}
}
cmd->execute_cmd = exec_cmd;
return TCM_NO_SENSE;
}
EXPORT_SYMBOL(passthrough_parse_cdb);