for-5.17/drivers-2022-01-11
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmHd8EIQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpnOKEADGpxp+Vntbm8nZI/PFP5fA2gUTZWgSVB4l axVTYW21pjSrsrAhGg2FIgBgL0tNkgxQnIPRn50YL8jT3pTkCEcR7kLbhEU7W/Ln 7hrsBgFnsCBoCs38LvzXHZD69jtEtNRk1ijPMLo5iCcHkAyUVKa1glfeMwefuI5/ Rl8SoueRXppvCfwNPptaAKiDsYVN8KCJPvvhlMNoKP5n1iTsNYJ/HVsLqfRnP0oc CR6eHaYceWGLER8tWtBlG2Qp40+cd/A320thkIlEpEKJPWE/ce5AUp0PYxVJbwjU qvO1tMYSya7gPiaVWRJcUeAgRFiivM/kTdDrGwiY9hpv/BQG7EAW5D9Xecz/M4UG BgNLfhe0aR9QssjPxITgyiy9sRpwwpnpoVONTu3slgXVTUVlOq0QT6LOTPR1B9A4 ZjbHVCuI3eyrAOqD4IjYSqjHa6GjFLiKTh8Q0ZB/KJGX1eItLVLVdJfcfV4RkBIf 6RZg9+7/mXaDxU74DZ2tfUhHT0sC5RS+5VFxpkhThVk9qRbVdZGGWAHcVOkMjk9B L4PCpJeuaR+rzXvCDOCOI5sHraa5F/IRhMaTu5sHj/MIuEpq1fqjaB7tWRvfm6HO 4tepUtb++rS3/zFFQlZCLyjVk2o0p2b0viwPLjvsRqsBp1bVoO9mJIiyp6POmM3G UjxQS0vEDw== =k0IZ -----END PGP SIGNATURE----- Merge tag 'for-5.17/drivers-2022-01-11' of git://git.kernel.dk/linux-block Pull block driver updates from Jens Axboe: - mtip32xx pci cleanups (Bjorn) - mtip32xx conversion to generic power management (Vaibhav) - rsxx pci powermanagement cleanups (Bjorn) - Remove the rsxx driver. This hardware never saw much adoption, and it's been end of lifed for a while. (Christoph) - MD pull request from Song: - REQ_NOWAIT support (Vishal Verma) - raid6 benchmark optimization (Dirk Müller) - Fix for acct bioset (Xiao Ni) - Clean up max_queued_requests (Mariusz Tkaczyk) - PREEMPT_RT optimization (Davidlohr Bueso) - Use default_groups in kobj_type (Greg Kroah-Hartman) - Use attribute groups in pktcdvd and rnbd (Greg) - NVMe pull request from Christoph: - increment request genctr on completion (Keith Busch, Geliang Tang) - add a 'iopolicy' module parameter (Hannes Reinecke) - print out valid arguments when reading from /dev/nvme-fabrics (Hannes Reinecke) - Use struct_group() in drbd (Kees) - null_blk fixes (Ming) - Get rid of congestion logic in pktcdvd (Neil) - Floppy ejection hang fix (Tasos) - Floppy max user request size fix (Xiongwei) - Loop locking fix (Tetsuo) * tag 'for-5.17/drivers-2022-01-11' of git://git.kernel.dk/linux-block: (32 commits) md: use default_groups in kobj_type md: Move alloc/free acct bioset in to personality lib/raid6: Use strict priority ranking for pq gen() benchmarking lib/raid6: skip benchmark of non-chosen xor_syndrome functions md: fix spelling of "its" md: raid456 add nowait support md: raid10 add nowait support md: raid1 add nowait support md: add support for REQ_NOWAIT md: drop queue limitation for RAID1 and RAID10 md/raid5: play nice with PREEMPT_RT block/rnbd-clt-sysfs: use default_groups in kobj_type pktcdvd: convert to use attribute groups block: null_blk: only set set->nr_maps as 3 if active poll_queues is > 0 nvme: add 'iopolicy' module parameter nvme: drop unused variable ctrl in nvme_setup_cmd nvme: increment request genctr on completion nvme-fabrics: print out valid arguments when reading from /dev/nvme-fabrics block: remove the rsxx driver rsxx: Drop PCI legacy power management ...
This commit is contained in:
commit
c9193f48e9
|
@ -7489,12 +7489,6 @@ F: Documentation/firmware_class/
|
|||
F: drivers/base/firmware_loader/
|
||||
F: include/linux/firmware.h
|
||||
|
||||
FLASH ADAPTER DRIVER (IBM Flash Adapter 900GB Full Height PCI Flash Card)
|
||||
M: Joshua Morris <josh.h.morris@us.ibm.com>
|
||||
M: Philip Kelleher <pjk1939@linux.ibm.com>
|
||||
S: Maintained
|
||||
F: drivers/block/rsxx/
|
||||
|
||||
FLEXTIMER FTM-QUADDEC DRIVER
|
||||
M: Patrick Havelange <patrick.havelange@essensium.com>
|
||||
L: linux-iio@vger.kernel.org
|
||||
|
|
|
@ -392,17 +392,6 @@ config BLK_DEV_RBD
|
|||
|
||||
If unsure, say N.
|
||||
|
||||
config BLK_DEV_RSXX
|
||||
tristate "IBM Flash Adapter 900GB Full Height PCIe Device Driver"
|
||||
depends on PCI
|
||||
select CRC32
|
||||
help
|
||||
Device driver for IBM's high speed PCIe SSD
|
||||
storage device: Flash Adapter 900GB Full Height.
|
||||
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called rsxx.
|
||||
|
||||
source "drivers/block/rnbd/Kconfig"
|
||||
|
||||
endif # BLK_DEV
|
||||
|
|
|
@ -34,7 +34,6 @@ obj-$(CONFIG_BLK_DEV_DRBD) += drbd/
|
|||
obj-$(CONFIG_BLK_DEV_RBD) += rbd.o
|
||||
obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/
|
||||
|
||||
obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/
|
||||
obj-$(CONFIG_ZRAM) += zram/
|
||||
obj-$(CONFIG_BLK_DEV_RNBD) += rnbd/
|
||||
|
||||
|
|
|
@ -729,7 +729,8 @@ int drbd_send_sync_param(struct drbd_peer_device *peer_device)
|
|||
cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
|
||||
|
||||
/* initialize verify_alg and csums_alg */
|
||||
memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
|
||||
BUILD_BUG_ON(sizeof(p->algs) != 2 * SHARED_SECRET_MAX);
|
||||
memset(&p->algs, 0, sizeof(p->algs));
|
||||
|
||||
if (get_ldev(peer_device->device)) {
|
||||
dc = rcu_dereference(peer_device->device->ldev->disk_conf);
|
||||
|
|
|
@ -283,8 +283,10 @@ struct p_rs_param_89 {
|
|||
|
||||
struct p_rs_param_95 {
|
||||
u32 resync_rate;
|
||||
char verify_alg[SHARED_SECRET_MAX];
|
||||
char csums_alg[SHARED_SECRET_MAX];
|
||||
struct_group(algs,
|
||||
char verify_alg[SHARED_SECRET_MAX];
|
||||
char csums_alg[SHARED_SECRET_MAX];
|
||||
);
|
||||
u32 c_plan_ahead;
|
||||
u32 c_delay_target;
|
||||
u32 c_fill_target;
|
||||
|
|
|
@ -3921,7 +3921,8 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
|
|||
|
||||
/* initialize verify_alg and csums_alg */
|
||||
p = pi->data;
|
||||
memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
|
||||
BUILD_BUG_ON(sizeof(p->algs) != 2 * SHARED_SECRET_MAX);
|
||||
memset(&p->algs, 0, sizeof(p->algs));
|
||||
|
||||
err = drbd_recv_all(peer_device->connection, p, header_size);
|
||||
if (err)
|
||||
|
|
|
@ -1015,7 +1015,7 @@ static DECLARE_DELAYED_WORK(fd_timer, fd_timer_workfn);
|
|||
static void cancel_activity(void)
|
||||
{
|
||||
do_floppy = NULL;
|
||||
cancel_delayed_work_sync(&fd_timer);
|
||||
cancel_delayed_work(&fd_timer);
|
||||
cancel_work_sync(&floppy_work);
|
||||
}
|
||||
|
||||
|
@ -3081,6 +3081,8 @@ static void raw_cmd_free(struct floppy_raw_cmd **ptr)
|
|||
}
|
||||
}
|
||||
|
||||
#define MAX_LEN (1UL << MAX_ORDER << PAGE_SHIFT)
|
||||
|
||||
static int raw_cmd_copyin(int cmd, void __user *param,
|
||||
struct floppy_raw_cmd **rcmd)
|
||||
{
|
||||
|
@ -3108,7 +3110,7 @@ loop:
|
|||
ptr->resultcode = 0;
|
||||
|
||||
if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) {
|
||||
if (ptr->length <= 0)
|
||||
if (ptr->length <= 0 || ptr->length >= MAX_LEN)
|
||||
return -EINVAL;
|
||||
ptr->kernel_data = (char *)fd_dma_mem_alloc(ptr->length);
|
||||
fallback_on_nodma_alloc(&ptr->kernel_data, ptr->length);
|
||||
|
|
|
@ -1082,13 +1082,10 @@ out_putf:
|
|||
return error;
|
||||
}
|
||||
|
||||
static int __loop_clr_fd(struct loop_device *lo, bool release)
|
||||
static void __loop_clr_fd(struct loop_device *lo)
|
||||
{
|
||||
struct file *filp = NULL;
|
||||
struct file *filp;
|
||||
gfp_t gfp = lo->old_gfp_mask;
|
||||
int err = 0;
|
||||
bool partscan = false;
|
||||
int lo_number;
|
||||
struct loop_worker *pos, *worker;
|
||||
|
||||
/*
|
||||
|
@ -1103,17 +1100,14 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
|
|||
* became visible.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Since this function is called upon "ioctl(LOOP_CLR_FD)" xor "close()
|
||||
* after ioctl(LOOP_CLR_FD)", it is a sign of something going wrong if
|
||||
* lo->lo_state has changed while waiting for lo->lo_mutex.
|
||||
*/
|
||||
mutex_lock(&lo->lo_mutex);
|
||||
if (WARN_ON_ONCE(lo->lo_state != Lo_rundown)) {
|
||||
err = -ENXIO;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
filp = lo->lo_backing_file;
|
||||
if (filp == NULL) {
|
||||
err = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
BUG_ON(lo->lo_state != Lo_rundown);
|
||||
mutex_unlock(&lo->lo_mutex);
|
||||
|
||||
if (test_bit(QUEUE_FLAG_WC, &lo->lo_queue->queue_flags))
|
||||
blk_queue_write_cache(lo->lo_queue, false, false);
|
||||
|
@ -1134,6 +1128,7 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
|
|||
del_timer_sync(&lo->timer);
|
||||
|
||||
spin_lock_irq(&lo->lo_lock);
|
||||
filp = lo->lo_backing_file;
|
||||
lo->lo_backing_file = NULL;
|
||||
spin_unlock_irq(&lo->lo_lock);
|
||||
|
||||
|
@ -1149,60 +1144,59 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
|
|||
/* let user-space know about this change */
|
||||
kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
|
||||
mapping_set_gfp_mask(filp->f_mapping, gfp);
|
||||
/* This is safe: open() is still holding a reference. */
|
||||
module_put(THIS_MODULE);
|
||||
blk_mq_unfreeze_queue(lo->lo_queue);
|
||||
|
||||
partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
|
||||
lo_number = lo->lo_number;
|
||||
disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
|
||||
out_unlock:
|
||||
mutex_unlock(&lo->lo_mutex);
|
||||
if (partscan) {
|
||||
/*
|
||||
* open_mutex has been held already in release path, so don't
|
||||
* acquire it if this function is called in such case.
|
||||
*
|
||||
* If the reread partition isn't from release path, lo_refcnt
|
||||
* must be at least one and it can only become zero when the
|
||||
* current holder is released.
|
||||
*/
|
||||
if (!release)
|
||||
mutex_lock(&lo->lo_disk->open_mutex);
|
||||
|
||||
if (lo->lo_flags & LO_FLAGS_PARTSCAN) {
|
||||
int err;
|
||||
|
||||
mutex_lock(&lo->lo_disk->open_mutex);
|
||||
err = bdev_disk_changed(lo->lo_disk, false);
|
||||
if (!release)
|
||||
mutex_unlock(&lo->lo_disk->open_mutex);
|
||||
mutex_unlock(&lo->lo_disk->open_mutex);
|
||||
if (err)
|
||||
pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
|
||||
__func__, lo_number, err);
|
||||
__func__, lo->lo_number, err);
|
||||
/* Device is gone, no point in returning error */
|
||||
err = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* lo->lo_state is set to Lo_unbound here after above partscan has
|
||||
* finished.
|
||||
*
|
||||
* There cannot be anybody else entering __loop_clr_fd() as
|
||||
* lo->lo_backing_file is already cleared and Lo_rundown state
|
||||
* protects us from all the other places trying to change the 'lo'
|
||||
* device.
|
||||
*/
|
||||
mutex_lock(&lo->lo_mutex);
|
||||
lo->lo_flags = 0;
|
||||
if (!part_shift)
|
||||
lo->lo_disk->flags |= GENHD_FL_NO_PART;
|
||||
|
||||
fput(filp);
|
||||
}
|
||||
|
||||
static void loop_rundown_completed(struct loop_device *lo)
|
||||
{
|
||||
mutex_lock(&lo->lo_mutex);
|
||||
lo->lo_state = Lo_unbound;
|
||||
mutex_unlock(&lo->lo_mutex);
|
||||
module_put(THIS_MODULE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Need not hold lo_mutex to fput backing file. Calling fput holding
|
||||
* lo_mutex triggers a circular lock dependency possibility warning as
|
||||
* fput can take open_mutex which is usually taken before lo_mutex.
|
||||
*/
|
||||
if (filp)
|
||||
fput(filp);
|
||||
return err;
|
||||
static void loop_rundown_workfn(struct work_struct *work)
|
||||
{
|
||||
struct loop_device *lo = container_of(work, struct loop_device,
|
||||
rundown_work);
|
||||
struct block_device *bdev = lo->lo_device;
|
||||
struct gendisk *disk = lo->lo_disk;
|
||||
|
||||
__loop_clr_fd(lo);
|
||||
kobject_put(&bdev->bd_device.kobj);
|
||||
module_put(disk->fops->owner);
|
||||
loop_rundown_completed(lo);
|
||||
}
|
||||
|
||||
static void loop_schedule_rundown(struct loop_device *lo)
|
||||
{
|
||||
struct block_device *bdev = lo->lo_device;
|
||||
struct gendisk *disk = lo->lo_disk;
|
||||
|
||||
__module_get(disk->fops->owner);
|
||||
kobject_get(&bdev->bd_device.kobj);
|
||||
INIT_WORK(&lo->rundown_work, loop_rundown_workfn);
|
||||
queue_work(system_long_wq, &lo->rundown_work);
|
||||
}
|
||||
|
||||
static int loop_clr_fd(struct loop_device *lo)
|
||||
|
@ -1234,7 +1228,9 @@ static int loop_clr_fd(struct loop_device *lo)
|
|||
lo->lo_state = Lo_rundown;
|
||||
mutex_unlock(&lo->lo_mutex);
|
||||
|
||||
return __loop_clr_fd(lo, false);
|
||||
__loop_clr_fd(lo);
|
||||
loop_rundown_completed(lo);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -1758,7 +1754,7 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
|
|||
* In autoclear mode, stop the loop thread
|
||||
* and remove configuration after last close.
|
||||
*/
|
||||
__loop_clr_fd(lo, true);
|
||||
loop_schedule_rundown(lo);
|
||||
return;
|
||||
} else if (lo->lo_state == Lo_bound) {
|
||||
/*
|
||||
|
|
|
@ -56,6 +56,7 @@ struct loop_device {
|
|||
struct gendisk *lo_disk;
|
||||
struct mutex lo_mutex;
|
||||
bool idr_visible;
|
||||
struct work_struct rundown_work;
|
||||
};
|
||||
|
||||
struct loop_cmd {
|
||||
|
|
|
@ -136,16 +136,15 @@ struct mtip_compat_ide_task_request_s {
|
|||
* return value
|
||||
* true if device removed, else false
|
||||
*/
|
||||
static bool mtip_check_surprise_removal(struct pci_dev *pdev)
|
||||
static bool mtip_check_surprise_removal(struct driver_data *dd)
|
||||
{
|
||||
u16 vendor_id = 0;
|
||||
struct driver_data *dd = pci_get_drvdata(pdev);
|
||||
|
||||
if (dd->sr)
|
||||
return true;
|
||||
|
||||
/* Read the vendorID from the configuration space */
|
||||
pci_read_config_word(pdev, 0x00, &vendor_id);
|
||||
pci_read_config_word(dd->pdev, 0x00, &vendor_id);
|
||||
if (vendor_id == 0xFFFF) {
|
||||
dd->sr = true;
|
||||
if (dd->queue)
|
||||
|
@ -447,7 +446,7 @@ static int mtip_device_reset(struct driver_data *dd)
|
|||
{
|
||||
int rv = 0;
|
||||
|
||||
if (mtip_check_surprise_removal(dd->pdev))
|
||||
if (mtip_check_surprise_removal(dd))
|
||||
return 0;
|
||||
|
||||
if (mtip_hba_reset(dd) < 0)
|
||||
|
@ -727,7 +726,7 @@ static inline void mtip_process_errors(struct driver_data *dd, u32 port_stat)
|
|||
dev_warn(&dd->pdev->dev,
|
||||
"Port stat errors %x unhandled\n",
|
||||
(port_stat & ~PORT_IRQ_HANDLED));
|
||||
if (mtip_check_surprise_removal(dd->pdev))
|
||||
if (mtip_check_surprise_removal(dd))
|
||||
return;
|
||||
}
|
||||
if (likely(port_stat & (PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR))) {
|
||||
|
@ -752,7 +751,7 @@ static inline irqreturn_t mtip_handle_irq(struct driver_data *data)
|
|||
/* Acknowledge the interrupt status on the port.*/
|
||||
port_stat = readl(port->mmio + PORT_IRQ_STAT);
|
||||
if (unlikely(port_stat == 0xFFFFFFFF)) {
|
||||
mtip_check_surprise_removal(dd->pdev);
|
||||
mtip_check_surprise_removal(dd);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
writel(port_stat, port->mmio + PORT_IRQ_STAT);
|
||||
|
@ -796,7 +795,7 @@ static inline irqreturn_t mtip_handle_irq(struct driver_data *data)
|
|||
}
|
||||
|
||||
if (unlikely(port_stat & PORT_IRQ_ERR)) {
|
||||
if (unlikely(mtip_check_surprise_removal(dd->pdev))) {
|
||||
if (unlikely(mtip_check_surprise_removal(dd))) {
|
||||
/* don't proceed further */
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -915,7 +914,7 @@ static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
|
|||
|
||||
msleep(100);
|
||||
|
||||
if (mtip_check_surprise_removal(port->dd->pdev))
|
||||
if (mtip_check_surprise_removal(port->dd))
|
||||
goto err_fault;
|
||||
|
||||
active = mtip_commands_active(port);
|
||||
|
@ -980,7 +979,7 @@ static int mtip_exec_internal_command(struct mtip_port *port,
|
|||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (mtip_check_surprise_removal(dd->pdev))
|
||||
if (mtip_check_surprise_removal(dd))
|
||||
return -EFAULT;
|
||||
|
||||
rq = blk_mq_alloc_request(dd->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_RESERVED);
|
||||
|
@ -1022,7 +1021,7 @@ static int mtip_exec_internal_command(struct mtip_port *port,
|
|||
fis->command, int_cmd->status);
|
||||
rv = -EIO;
|
||||
|
||||
if (mtip_check_surprise_removal(dd->pdev) ||
|
||||
if (mtip_check_surprise_removal(dd) ||
|
||||
test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
|
||||
&dd->dd_flag)) {
|
||||
dev_err(&dd->pdev->dev,
|
||||
|
@ -2513,7 +2512,7 @@ static int mtip_ftl_rebuild_poll(struct driver_data *dd)
|
|||
if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
|
||||
&dd->dd_flag)))
|
||||
return -EFAULT;
|
||||
if (mtip_check_surprise_removal(dd->pdev))
|
||||
if (mtip_check_surprise_removal(dd))
|
||||
return -EFAULT;
|
||||
|
||||
if (mtip_get_identify(dd->port, NULL) < 0)
|
||||
|
@ -2891,7 +2890,7 @@ static int mtip_hw_init(struct driver_data *dd)
|
|||
time_before(jiffies, timeout)) {
|
||||
mdelay(100);
|
||||
}
|
||||
if (unlikely(mtip_check_surprise_removal(dd->pdev))) {
|
||||
if (unlikely(mtip_check_surprise_removal(dd))) {
|
||||
timetaken = jiffies - timetaken;
|
||||
dev_warn(&dd->pdev->dev,
|
||||
"Surprise removal detected at %u ms\n",
|
||||
|
@ -4098,7 +4097,7 @@ static void mtip_pci_remove(struct pci_dev *pdev)
|
|||
list_add(&dd->remove_list, &removing_list);
|
||||
spin_unlock_irqrestore(&dev_lock, flags);
|
||||
|
||||
mtip_check_surprise_removal(pdev);
|
||||
mtip_check_surprise_removal(dd);
|
||||
synchronize_irq(dd->pdev->irq);
|
||||
|
||||
/* Spin until workers are done */
|
||||
|
@ -4145,36 +4144,17 @@ static void mtip_pci_remove(struct pci_dev *pdev)
|
|||
* 0 Success
|
||||
* <0 Error
|
||||
*/
|
||||
static int mtip_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
|
||||
static int __maybe_unused mtip_pci_suspend(struct device *dev)
|
||||
{
|
||||
int rv = 0;
|
||||
struct driver_data *dd = pci_get_drvdata(pdev);
|
||||
|
||||
if (!dd) {
|
||||
dev_err(&pdev->dev,
|
||||
"Driver private datastructure is NULL\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
struct driver_data *dd = dev_get_drvdata(dev);
|
||||
|
||||
set_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag);
|
||||
|
||||
/* Disable ports & interrupts then send standby immediate */
|
||||
rv = mtip_block_suspend(dd);
|
||||
if (rv < 0) {
|
||||
dev_err(&pdev->dev,
|
||||
"Failed to suspend controller\n");
|
||||
return rv;
|
||||
}
|
||||
|
||||
/*
|
||||
* Save the pci config space to pdev structure &
|
||||
* disable the device
|
||||
*/
|
||||
pci_save_state(pdev);
|
||||
pci_disable_device(pdev);
|
||||
|
||||
/* Move to Low power state*/
|
||||
pci_set_power_state(pdev, PCI_D3hot);
|
||||
if (rv < 0)
|
||||
dev_err(dev, "Failed to suspend controller\n");
|
||||
|
||||
return rv;
|
||||
}
|
||||
|
@ -4186,32 +4166,10 @@ static int mtip_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
|
|||
* 0 Success
|
||||
* <0 Error
|
||||
*/
|
||||
static int mtip_pci_resume(struct pci_dev *pdev)
|
||||
static int __maybe_unused mtip_pci_resume(struct device *dev)
|
||||
{
|
||||
int rv = 0;
|
||||
struct driver_data *dd;
|
||||
|
||||
dd = pci_get_drvdata(pdev);
|
||||
if (!dd) {
|
||||
dev_err(&pdev->dev,
|
||||
"Driver private datastructure is NULL\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
/* Move the device to active State */
|
||||
pci_set_power_state(pdev, PCI_D0);
|
||||
|
||||
/* Restore PCI configuration space */
|
||||
pci_restore_state(pdev);
|
||||
|
||||
/* Enable the PCI device*/
|
||||
rv = pcim_enable_device(pdev);
|
||||
if (rv < 0) {
|
||||
dev_err(&pdev->dev,
|
||||
"Failed to enable card during resume\n");
|
||||
goto err;
|
||||
}
|
||||
pci_set_master(pdev);
|
||||
struct driver_data *dd = dev_get_drvdata(dev);
|
||||
|
||||
/*
|
||||
* Calls hbaReset, initPort, & startPort function
|
||||
|
@ -4219,9 +4177,8 @@ static int mtip_pci_resume(struct pci_dev *pdev)
|
|||
*/
|
||||
rv = mtip_block_resume(dd);
|
||||
if (rv < 0)
|
||||
dev_err(&pdev->dev, "Unable to resume\n");
|
||||
dev_err(dev, "Unable to resume\n");
|
||||
|
||||
err:
|
||||
clear_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag);
|
||||
|
||||
return rv;
|
||||
|
@ -4252,14 +4209,15 @@ static const struct pci_device_id mtip_pci_tbl[] = {
|
|||
{ 0 }
|
||||
};
|
||||
|
||||
static SIMPLE_DEV_PM_OPS(mtip_pci_pm_ops, mtip_pci_suspend, mtip_pci_resume);
|
||||
|
||||
/* Structure that describes the PCI driver functions. */
|
||||
static struct pci_driver mtip_pci_driver = {
|
||||
.name = MTIP_DRV_NAME,
|
||||
.id_table = mtip_pci_tbl,
|
||||
.probe = mtip_pci_probe,
|
||||
.remove = mtip_pci_remove,
|
||||
.suspend = mtip_pci_suspend,
|
||||
.resume = mtip_pci_resume,
|
||||
.driver.pm = &mtip_pci_pm_ops,
|
||||
.shutdown = mtip_pci_shutdown,
|
||||
};
|
||||
|
||||
|
|
|
@ -340,9 +340,9 @@ static int nullb_update_nr_hw_queues(struct nullb_device *dev,
|
|||
return 0;
|
||||
|
||||
/*
|
||||
* Make sure at least one queue exists for each of submit and poll.
|
||||
* Make sure at least one submit queue exists.
|
||||
*/
|
||||
if (!submit_queues || !poll_queues)
|
||||
if (!submit_queues)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
|
@ -1574,7 +1574,9 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
|
|||
cmd = blk_mq_rq_to_pdu(req);
|
||||
cmd->error = null_process_cmd(cmd, req_op(req), blk_rq_pos(req),
|
||||
blk_rq_sectors(req));
|
||||
end_cmd(cmd);
|
||||
if (!blk_mq_add_to_batch(req, iob, (__force int) cmd->error,
|
||||
blk_mq_end_request_batch))
|
||||
end_cmd(cmd);
|
||||
nr++;
|
||||
}
|
||||
|
||||
|
@ -1890,7 +1892,7 @@ static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
|
|||
if (g_shared_tag_bitmap)
|
||||
set->flags |= BLK_MQ_F_TAG_HCTX_SHARED;
|
||||
set->driver_data = nullb;
|
||||
if (g_poll_queues)
|
||||
if (poll_queues)
|
||||
set->nr_maps = 3;
|
||||
else
|
||||
set->nr_maps = 1;
|
||||
|
@ -1917,8 +1919,6 @@ static int null_validate_conf(struct nullb_device *dev)
|
|||
|
||||
if (dev->poll_queues > g_poll_queues)
|
||||
dev->poll_queues = g_poll_queues;
|
||||
else if (dev->poll_queues == 0)
|
||||
dev->poll_queues = 1;
|
||||
dev->prev_poll_queues = dev->poll_queues;
|
||||
|
||||
dev->queue_mode = min_t(unsigned int, dev->queue_mode, NULL_Q_MQ);
|
||||
|
|
|
@ -113,57 +113,10 @@ static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
|
|||
return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* create and register a pktcdvd kernel object.
|
||||
*/
|
||||
static struct pktcdvd_kobj* pkt_kobj_create(struct pktcdvd_device *pd,
|
||||
const char* name,
|
||||
struct kobject* parent,
|
||||
struct kobj_type* ktype)
|
||||
{
|
||||
struct pktcdvd_kobj *p;
|
||||
int error;
|
||||
|
||||
p = kzalloc(sizeof(*p), GFP_KERNEL);
|
||||
if (!p)
|
||||
return NULL;
|
||||
p->pd = pd;
|
||||
error = kobject_init_and_add(&p->kobj, ktype, parent, "%s", name);
|
||||
if (error) {
|
||||
kobject_put(&p->kobj);
|
||||
return NULL;
|
||||
}
|
||||
kobject_uevent(&p->kobj, KOBJ_ADD);
|
||||
return p;
|
||||
}
|
||||
/*
|
||||
* remove a pktcdvd kernel object.
|
||||
*/
|
||||
static void pkt_kobj_remove(struct pktcdvd_kobj *p)
|
||||
{
|
||||
if (p)
|
||||
kobject_put(&p->kobj);
|
||||
}
|
||||
/*
|
||||
* default release function for pktcdvd kernel objects.
|
||||
*/
|
||||
static void pkt_kobj_release(struct kobject *kobj)
|
||||
{
|
||||
kfree(to_pktcdvdkobj(kobj));
|
||||
}
|
||||
|
||||
|
||||
/**********************************************************
|
||||
*
|
||||
* sysfs interface for pktcdvd
|
||||
* by (C) 2006 Thomas Maier <balagi@justmail.de>
|
||||
*
|
||||
**********************************************************/
|
||||
|
||||
#define DEF_ATTR(_obj,_name,_mode) \
|
||||
static struct attribute _obj = { .name = _name, .mode = _mode }
|
||||
|
||||
/**********************************************************
|
||||
|
||||
/sys/class/pktcdvd/pktcdvd[0-7]/
|
||||
stat/reset
|
||||
stat/packets_started
|
||||
|
@ -176,75 +129,94 @@ static void pkt_kobj_release(struct kobject *kobj)
|
|||
write_queue/congestion_on
|
||||
**********************************************************/
|
||||
|
||||
DEF_ATTR(kobj_pkt_attr_st1, "reset", 0200);
|
||||
DEF_ATTR(kobj_pkt_attr_st2, "packets_started", 0444);
|
||||
DEF_ATTR(kobj_pkt_attr_st3, "packets_finished", 0444);
|
||||
DEF_ATTR(kobj_pkt_attr_st4, "kb_written", 0444);
|
||||
DEF_ATTR(kobj_pkt_attr_st5, "kb_read", 0444);
|
||||
DEF_ATTR(kobj_pkt_attr_st6, "kb_read_gather", 0444);
|
||||
|
||||
static struct attribute *kobj_pkt_attrs_stat[] = {
|
||||
&kobj_pkt_attr_st1,
|
||||
&kobj_pkt_attr_st2,
|
||||
&kobj_pkt_attr_st3,
|
||||
&kobj_pkt_attr_st4,
|
||||
&kobj_pkt_attr_st5,
|
||||
&kobj_pkt_attr_st6,
|
||||
NULL
|
||||
};
|
||||
|
||||
DEF_ATTR(kobj_pkt_attr_wq1, "size", 0444);
|
||||
DEF_ATTR(kobj_pkt_attr_wq2, "congestion_off", 0644);
|
||||
DEF_ATTR(kobj_pkt_attr_wq3, "congestion_on", 0644);
|
||||
|
||||
static struct attribute *kobj_pkt_attrs_wqueue[] = {
|
||||
&kobj_pkt_attr_wq1,
|
||||
&kobj_pkt_attr_wq2,
|
||||
&kobj_pkt_attr_wq3,
|
||||
NULL
|
||||
};
|
||||
|
||||
static ssize_t kobj_pkt_show(struct kobject *kobj,
|
||||
struct attribute *attr, char *data)
|
||||
static ssize_t packets_started_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd;
|
||||
int n = 0;
|
||||
int v;
|
||||
if (strcmp(attr->name, "packets_started") == 0) {
|
||||
n = sprintf(data, "%lu\n", pd->stats.pkt_started);
|
||||
struct pktcdvd_device *pd = dev_get_drvdata(dev);
|
||||
|
||||
} else if (strcmp(attr->name, "packets_finished") == 0) {
|
||||
n = sprintf(data, "%lu\n", pd->stats.pkt_ended);
|
||||
return sysfs_emit(buf, "%lu\n", pd->stats.pkt_started);
|
||||
}
|
||||
static DEVICE_ATTR_RO(packets_started);
|
||||
|
||||
} else if (strcmp(attr->name, "kb_written") == 0) {
|
||||
n = sprintf(data, "%lu\n", pd->stats.secs_w >> 1);
|
||||
static ssize_t packets_finished_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct pktcdvd_device *pd = dev_get_drvdata(dev);
|
||||
|
||||
} else if (strcmp(attr->name, "kb_read") == 0) {
|
||||
n = sprintf(data, "%lu\n", pd->stats.secs_r >> 1);
|
||||
return sysfs_emit(buf, "%lu\n", pd->stats.pkt_ended);
|
||||
}
|
||||
static DEVICE_ATTR_RO(packets_finished);
|
||||
|
||||
} else if (strcmp(attr->name, "kb_read_gather") == 0) {
|
||||
n = sprintf(data, "%lu\n", pd->stats.secs_rg >> 1);
|
||||
static ssize_t kb_written_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct pktcdvd_device *pd = dev_get_drvdata(dev);
|
||||
|
||||
} else if (strcmp(attr->name, "size") == 0) {
|
||||
spin_lock(&pd->lock);
|
||||
v = pd->bio_queue_size;
|
||||
spin_unlock(&pd->lock);
|
||||
n = sprintf(data, "%d\n", v);
|
||||
return sysfs_emit(buf, "%lu\n", pd->stats.secs_w >> 1);
|
||||
}
|
||||
static DEVICE_ATTR_RO(kb_written);
|
||||
|
||||
} else if (strcmp(attr->name, "congestion_off") == 0) {
|
||||
spin_lock(&pd->lock);
|
||||
v = pd->write_congestion_off;
|
||||
spin_unlock(&pd->lock);
|
||||
n = sprintf(data, "%d\n", v);
|
||||
static ssize_t kb_read_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct pktcdvd_device *pd = dev_get_drvdata(dev);
|
||||
|
||||
} else if (strcmp(attr->name, "congestion_on") == 0) {
|
||||
spin_lock(&pd->lock);
|
||||
v = pd->write_congestion_on;
|
||||
spin_unlock(&pd->lock);
|
||||
n = sprintf(data, "%d\n", v);
|
||||
return sysfs_emit(buf, "%lu\n", pd->stats.secs_r >> 1);
|
||||
}
|
||||
static DEVICE_ATTR_RO(kb_read);
|
||||
|
||||
static ssize_t kb_read_gather_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct pktcdvd_device *pd = dev_get_drvdata(dev);
|
||||
|
||||
return sysfs_emit(buf, "%lu\n", pd->stats.secs_rg >> 1);
|
||||
}
|
||||
static DEVICE_ATTR_RO(kb_read_gather);
|
||||
|
||||
static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t len)
|
||||
{
|
||||
struct pktcdvd_device *pd = dev_get_drvdata(dev);
|
||||
|
||||
if (len > 0) {
|
||||
pd->stats.pkt_started = 0;
|
||||
pd->stats.pkt_ended = 0;
|
||||
pd->stats.secs_w = 0;
|
||||
pd->stats.secs_rg = 0;
|
||||
pd->stats.secs_r = 0;
|
||||
}
|
||||
return len;
|
||||
}
|
||||
static DEVICE_ATTR_WO(reset);
|
||||
|
||||
static struct attribute *pkt_stat_attrs[] = {
|
||||
&dev_attr_packets_finished.attr,
|
||||
&dev_attr_packets_started.attr,
|
||||
&dev_attr_kb_read.attr,
|
||||
&dev_attr_kb_written.attr,
|
||||
&dev_attr_kb_read_gather.attr,
|
||||
&dev_attr_reset.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static const struct attribute_group pkt_stat_group = {
|
||||
.name = "stat",
|
||||
.attrs = pkt_stat_attrs,
|
||||
};
|
||||
|
||||
static ssize_t size_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct pktcdvd_device *pd = dev_get_drvdata(dev);
|
||||
int n;
|
||||
|
||||
spin_lock(&pd->lock);
|
||||
n = sysfs_emit(buf, "%d\n", pd->bio_queue_size);
|
||||
spin_unlock(&pd->lock);
|
||||
return n;
|
||||
}
|
||||
static DEVICE_ATTR_RO(size);
|
||||
|
||||
static void init_write_congestion_marks(int* lo, int* hi)
|
||||
{
|
||||
|
@ -263,30 +235,56 @@ static void init_write_congestion_marks(int* lo, int* hi)
|
|||
}
|
||||
}
|
||||
|
||||
static ssize_t kobj_pkt_store(struct kobject *kobj,
|
||||
struct attribute *attr,
|
||||
const char *data, size_t len)
|
||||
static ssize_t congestion_off_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd;
|
||||
struct pktcdvd_device *pd = dev_get_drvdata(dev);
|
||||
int n;
|
||||
|
||||
spin_lock(&pd->lock);
|
||||
n = sysfs_emit(buf, "%d\n", pd->write_congestion_off);
|
||||
spin_unlock(&pd->lock);
|
||||
return n;
|
||||
}
|
||||
|
||||
static ssize_t congestion_off_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t len)
|
||||
{
|
||||
struct pktcdvd_device *pd = dev_get_drvdata(dev);
|
||||
int val;
|
||||
|
||||
if (strcmp(attr->name, "reset") == 0 && len > 0) {
|
||||
pd->stats.pkt_started = 0;
|
||||
pd->stats.pkt_ended = 0;
|
||||
pd->stats.secs_w = 0;
|
||||
pd->stats.secs_rg = 0;
|
||||
pd->stats.secs_r = 0;
|
||||
|
||||
} else if (strcmp(attr->name, "congestion_off") == 0
|
||||
&& sscanf(data, "%d", &val) == 1) {
|
||||
if (sscanf(buf, "%d", &val) == 1) {
|
||||
spin_lock(&pd->lock);
|
||||
pd->write_congestion_off = val;
|
||||
init_write_congestion_marks(&pd->write_congestion_off,
|
||||
&pd->write_congestion_on);
|
||||
spin_unlock(&pd->lock);
|
||||
}
|
||||
return len;
|
||||
}
|
||||
static DEVICE_ATTR_RW(congestion_off);
|
||||
|
||||
} else if (strcmp(attr->name, "congestion_on") == 0
|
||||
&& sscanf(data, "%d", &val) == 1) {
|
||||
static ssize_t congestion_on_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct pktcdvd_device *pd = dev_get_drvdata(dev);
|
||||
int n;
|
||||
|
||||
spin_lock(&pd->lock);
|
||||
n = sysfs_emit(buf, "%d\n", pd->write_congestion_on);
|
||||
spin_unlock(&pd->lock);
|
||||
return n;
|
||||
}
|
||||
|
||||
static ssize_t congestion_on_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t len)
|
||||
{
|
||||
struct pktcdvd_device *pd = dev_get_drvdata(dev);
|
||||
int val;
|
||||
|
||||
if (sscanf(buf, "%d", &val) == 1) {
|
||||
spin_lock(&pd->lock);
|
||||
pd->write_congestion_on = val;
|
||||
init_write_congestion_marks(&pd->write_congestion_off,
|
||||
|
@ -295,44 +293,39 @@ static ssize_t kobj_pkt_store(struct kobject *kobj,
|
|||
}
|
||||
return len;
|
||||
}
|
||||
static DEVICE_ATTR_RW(congestion_on);
|
||||
|
||||
static const struct sysfs_ops kobj_pkt_ops = {
|
||||
.show = kobj_pkt_show,
|
||||
.store = kobj_pkt_store
|
||||
static struct attribute *pkt_wq_attrs[] = {
|
||||
&dev_attr_congestion_on.attr,
|
||||
&dev_attr_congestion_off.attr,
|
||||
&dev_attr_size.attr,
|
||||
NULL,
|
||||
};
|
||||
static struct kobj_type kobj_pkt_type_stat = {
|
||||
.release = pkt_kobj_release,
|
||||
.sysfs_ops = &kobj_pkt_ops,
|
||||
.default_attrs = kobj_pkt_attrs_stat
|
||||
|
||||
static const struct attribute_group pkt_wq_group = {
|
||||
.name = "write_queue",
|
||||
.attrs = pkt_wq_attrs,
|
||||
};
|
||||
static struct kobj_type kobj_pkt_type_wqueue = {
|
||||
.release = pkt_kobj_release,
|
||||
.sysfs_ops = &kobj_pkt_ops,
|
||||
.default_attrs = kobj_pkt_attrs_wqueue
|
||||
|
||||
static const struct attribute_group *pkt_groups[] = {
|
||||
&pkt_stat_group,
|
||||
&pkt_wq_group,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static void pkt_sysfs_dev_new(struct pktcdvd_device *pd)
|
||||
{
|
||||
if (class_pktcdvd) {
|
||||
pd->dev = device_create(class_pktcdvd, NULL, MKDEV(0, 0), NULL,
|
||||
"%s", pd->name);
|
||||
pd->dev = device_create_with_groups(class_pktcdvd, NULL,
|
||||
MKDEV(0, 0), pd, pkt_groups,
|
||||
"%s", pd->name);
|
||||
if (IS_ERR(pd->dev))
|
||||
pd->dev = NULL;
|
||||
}
|
||||
if (pd->dev) {
|
||||
pd->kobj_stat = pkt_kobj_create(pd, "stat",
|
||||
&pd->dev->kobj,
|
||||
&kobj_pkt_type_stat);
|
||||
pd->kobj_wqueue = pkt_kobj_create(pd, "write_queue",
|
||||
&pd->dev->kobj,
|
||||
&kobj_pkt_type_wqueue);
|
||||
}
|
||||
}
|
||||
|
||||
static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd)
|
||||
{
|
||||
pkt_kobj_remove(pd->kobj_stat);
|
||||
pkt_kobj_remove(pd->kobj_wqueue);
|
||||
if (class_pktcdvd)
|
||||
device_unregister(pd->dev);
|
||||
}
|
||||
|
@ -1107,7 +1100,6 @@ static int pkt_handle_queue(struct pktcdvd_device *pd)
|
|||
sector_t zone = 0; /* Suppress gcc warning */
|
||||
struct pkt_rb_node *node, *first_node;
|
||||
struct rb_node *n;
|
||||
int wakeup;
|
||||
|
||||
atomic_set(&pd->scan_queue, 0);
|
||||
|
||||
|
@ -1179,12 +1171,14 @@ try_next_bio:
|
|||
spin_unlock(&pkt->lock);
|
||||
}
|
||||
/* check write congestion marks, and if bio_queue_size is
|
||||
below, wake up any waiters */
|
||||
wakeup = (pd->write_congestion_on > 0
|
||||
&& pd->bio_queue_size <= pd->write_congestion_off);
|
||||
* below, wake up any waiters
|
||||
*/
|
||||
if (pd->congested &&
|
||||
pd->bio_queue_size <= pd->write_congestion_off) {
|
||||
pd->congested = false;
|
||||
wake_up_var(&pd->congested);
|
||||
}
|
||||
spin_unlock(&pd->lock);
|
||||
if (wakeup)
|
||||
clear_bdi_congested(pd->disk->bdi, BLK_RW_ASYNC);
|
||||
|
||||
pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
|
||||
pkt_set_state(pkt, PACKET_WAITING_STATE);
|
||||
|
@ -2356,7 +2350,7 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
|
|||
}
|
||||
spin_unlock(&pd->cdrw.active_list_lock);
|
||||
|
||||
/*
|
||||
/*
|
||||
* Test if there is enough room left in the bio work queue
|
||||
* (queue size >= congestion on mark).
|
||||
* If not, wait till the work queue size is below the congestion off mark.
|
||||
|
@ -2364,12 +2358,20 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
|
|||
spin_lock(&pd->lock);
|
||||
if (pd->write_congestion_on > 0
|
||||
&& pd->bio_queue_size >= pd->write_congestion_on) {
|
||||
set_bdi_congested(bio->bi_bdev->bd_disk->bdi, BLK_RW_ASYNC);
|
||||
do {
|
||||
struct wait_bit_queue_entry wqe;
|
||||
|
||||
init_wait_var_entry(&wqe, &pd->congested, 0);
|
||||
for (;;) {
|
||||
prepare_to_wait_event(__var_waitqueue(&pd->congested),
|
||||
&wqe.wq_entry,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
if (pd->bio_queue_size <= pd->write_congestion_off)
|
||||
break;
|
||||
pd->congested = true;
|
||||
spin_unlock(&pd->lock);
|
||||
congestion_wait(BLK_RW_ASYNC, HZ);
|
||||
schedule();
|
||||
spin_lock(&pd->lock);
|
||||
} while(pd->bio_queue_size > pd->write_congestion_off);
|
||||
}
|
||||
}
|
||||
spin_unlock(&pd->lock);
|
||||
|
||||
|
|
|
@ -452,6 +452,7 @@ static struct attribute *rnbd_dev_attrs[] = {
|
|||
&rnbd_clt_nr_poll_queues.attr,
|
||||
NULL,
|
||||
};
|
||||
ATTRIBUTE_GROUPS(rnbd_dev);
|
||||
|
||||
void rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev *dev)
|
||||
{
|
||||
|
@ -474,7 +475,7 @@ void rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev *dev)
|
|||
|
||||
static struct kobj_type rnbd_dev_ktype = {
|
||||
.sysfs_ops = &kobj_sysfs_ops,
|
||||
.default_attrs = rnbd_dev_attrs,
|
||||
.default_groups = rnbd_dev_groups,
|
||||
};
|
||||
|
||||
static int rnbd_clt_add_dev_kobj(struct rnbd_clt_dev *dev)
|
||||
|
|
|
@ -1,3 +0,0 @@
|
|||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
obj-$(CONFIG_BLK_DEV_RSXX) += rsxx.o
|
||||
rsxx-objs := config.o core.o cregs.o dev.o dma.o
|
|
@ -1,197 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* Filename: config.c
|
||||
*
|
||||
* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
|
||||
* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
|
||||
*
|
||||
* (C) Copyright 2013 IBM Corporation
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/crc32.h>
|
||||
#include <linux/swab.h>
|
||||
|
||||
#include "rsxx_priv.h"
|
||||
#include "rsxx_cfg.h"
|
||||
|
||||
static void initialize_config(struct rsxx_card_cfg *cfg)
|
||||
{
|
||||
cfg->hdr.version = RSXX_CFG_VERSION;
|
||||
|
||||
cfg->data.block_size = RSXX_HW_BLK_SIZE;
|
||||
cfg->data.stripe_size = RSXX_HW_BLK_SIZE;
|
||||
cfg->data.vendor_id = RSXX_VENDOR_ID_IBM;
|
||||
cfg->data.cache_order = (-1);
|
||||
cfg->data.intr_coal.mode = RSXX_INTR_COAL_DISABLED;
|
||||
cfg->data.intr_coal.count = 0;
|
||||
cfg->data.intr_coal.latency = 0;
|
||||
}
|
||||
|
||||
static u32 config_data_crc32(struct rsxx_card_cfg *cfg)
|
||||
{
|
||||
/*
|
||||
* Return the compliment of the CRC to ensure compatibility
|
||||
* (i.e. this is how early rsxx drivers did it.)
|
||||
*/
|
||||
|
||||
return ~crc32(~0, &cfg->data, sizeof(cfg->data));
|
||||
}
|
||||
|
||||
|
||||
/*----------------- Config Byte Swap Functions -------------------*/
|
||||
static void config_hdr_be_to_cpu(struct card_cfg_hdr *hdr)
|
||||
{
|
||||
hdr->version = be32_to_cpu((__force __be32) hdr->version);
|
||||
hdr->crc = be32_to_cpu((__force __be32) hdr->crc);
|
||||
}
|
||||
|
||||
static void config_hdr_cpu_to_be(struct card_cfg_hdr *hdr)
|
||||
{
|
||||
hdr->version = (__force u32) cpu_to_be32(hdr->version);
|
||||
hdr->crc = (__force u32) cpu_to_be32(hdr->crc);
|
||||
}
|
||||
|
||||
static void config_data_swab(struct rsxx_card_cfg *cfg)
|
||||
{
|
||||
u32 *data = (u32 *) &cfg->data;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < (sizeof(cfg->data) / 4); i++)
|
||||
data[i] = swab32(data[i]);
|
||||
}
|
||||
|
||||
static void config_data_le_to_cpu(struct rsxx_card_cfg *cfg)
|
||||
{
|
||||
u32 *data = (u32 *) &cfg->data;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < (sizeof(cfg->data) / 4); i++)
|
||||
data[i] = le32_to_cpu((__force __le32) data[i]);
|
||||
}
|
||||
|
||||
static void config_data_cpu_to_le(struct rsxx_card_cfg *cfg)
|
||||
{
|
||||
u32 *data = (u32 *) &cfg->data;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < (sizeof(cfg->data) / 4); i++)
|
||||
data[i] = (__force u32) cpu_to_le32(data[i]);
|
||||
}
|
||||
|
||||
|
||||
/*----------------- Config Operations ------------------*/
|
||||
static int rsxx_save_config(struct rsxx_cardinfo *card)
|
||||
{
|
||||
struct rsxx_card_cfg cfg;
|
||||
int st;
|
||||
|
||||
memcpy(&cfg, &card->config, sizeof(cfg));
|
||||
|
||||
if (unlikely(cfg.hdr.version != RSXX_CFG_VERSION)) {
|
||||
dev_err(CARD_TO_DEV(card),
|
||||
"Cannot save config with invalid version %d\n",
|
||||
cfg.hdr.version);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Convert data to little endian for the CRC calculation. */
|
||||
config_data_cpu_to_le(&cfg);
|
||||
|
||||
cfg.hdr.crc = config_data_crc32(&cfg);
|
||||
|
||||
/*
|
||||
* Swap the data from little endian to big endian so it can be
|
||||
* stored.
|
||||
*/
|
||||
config_data_swab(&cfg);
|
||||
config_hdr_cpu_to_be(&cfg.hdr);
|
||||
|
||||
st = rsxx_creg_write(card, CREG_ADD_CONFIG, sizeof(cfg), &cfg, 1);
|
||||
if (st)
|
||||
return st;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int rsxx_load_config(struct rsxx_cardinfo *card)
|
||||
{
|
||||
int st;
|
||||
u32 crc;
|
||||
|
||||
st = rsxx_creg_read(card, CREG_ADD_CONFIG, sizeof(card->config),
|
||||
&card->config, 1);
|
||||
if (st) {
|
||||
dev_err(CARD_TO_DEV(card),
|
||||
"Failed reading card config.\n");
|
||||
return st;
|
||||
}
|
||||
|
||||
config_hdr_be_to_cpu(&card->config.hdr);
|
||||
|
||||
if (card->config.hdr.version == RSXX_CFG_VERSION) {
|
||||
/*
|
||||
* We calculate the CRC with the data in little endian, because
|
||||
* early drivers did not take big endian CPUs into account.
|
||||
* The data is always stored in big endian, so we need to byte
|
||||
* swap it before calculating the CRC.
|
||||
*/
|
||||
|
||||
config_data_swab(&card->config);
|
||||
|
||||
/* Check the CRC */
|
||||
crc = config_data_crc32(&card->config);
|
||||
if (crc != card->config.hdr.crc) {
|
||||
dev_err(CARD_TO_DEV(card),
|
||||
"Config corruption detected!\n");
|
||||
dev_info(CARD_TO_DEV(card),
|
||||
"CRC (sb x%08x is x%08x)\n",
|
||||
card->config.hdr.crc, crc);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* Convert the data to CPU byteorder */
|
||||
config_data_le_to_cpu(&card->config);
|
||||
|
||||
} else if (card->config.hdr.version != 0) {
|
||||
dev_err(CARD_TO_DEV(card),
|
||||
"Invalid config version %d.\n",
|
||||
card->config.hdr.version);
|
||||
/*
|
||||
* Config version changes require special handling from the
|
||||
* user
|
||||
*/
|
||||
return -EINVAL;
|
||||
} else {
|
||||
dev_info(CARD_TO_DEV(card),
|
||||
"Initializing card configuration.\n");
|
||||
initialize_config(&card->config);
|
||||
st = rsxx_save_config(card);
|
||||
if (st)
|
||||
return st;
|
||||
}
|
||||
|
||||
card->config_valid = 1;
|
||||
|
||||
dev_dbg(CARD_TO_DEV(card), "version: x%08x\n",
|
||||
card->config.hdr.version);
|
||||
dev_dbg(CARD_TO_DEV(card), "crc: x%08x\n",
|
||||
card->config.hdr.crc);
|
||||
dev_dbg(CARD_TO_DEV(card), "block_size: x%08x\n",
|
||||
card->config.data.block_size);
|
||||
dev_dbg(CARD_TO_DEV(card), "stripe_size: x%08x\n",
|
||||
card->config.data.stripe_size);
|
||||
dev_dbg(CARD_TO_DEV(card), "vendor_id: x%08x\n",
|
||||
card->config.data.vendor_id);
|
||||
dev_dbg(CARD_TO_DEV(card), "cache_order: x%08x\n",
|
||||
card->config.data.cache_order);
|
||||
dev_dbg(CARD_TO_DEV(card), "mode: x%08x\n",
|
||||
card->config.data.intr_coal.mode);
|
||||
dev_dbg(CARD_TO_DEV(card), "count: x%08x\n",
|
||||
card->config.data.intr_coal.count);
|
||||
dev_dbg(CARD_TO_DEV(card), "latency: x%08x\n",
|
||||
card->config.data.intr_coal.latency);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
|
@ -1,789 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* Filename: cregs.c
|
||||
*
|
||||
* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
|
||||
* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
|
||||
*
|
||||
* (C) Copyright 2013 IBM Corporation
|
||||
*/
|
||||
|
||||
#include <linux/completion.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "rsxx_priv.h"
|
||||
|
||||
#define CREG_TIMEOUT_MSEC 10000
|
||||
|
||||
typedef void (*creg_cmd_cb)(struct rsxx_cardinfo *card,
|
||||
struct creg_cmd *cmd,
|
||||
int st);
|
||||
|
||||
struct creg_cmd {
|
||||
struct list_head list;
|
||||
creg_cmd_cb cb;
|
||||
void *cb_private;
|
||||
unsigned int op;
|
||||
unsigned int addr;
|
||||
int cnt8;
|
||||
void *buf;
|
||||
unsigned int stream;
|
||||
unsigned int status;
|
||||
};
|
||||
|
||||
static struct kmem_cache *creg_cmd_pool;
|
||||
|
||||
|
||||
/*------------ Private Functions --------------*/
|
||||
|
||||
#if defined(__LITTLE_ENDIAN)
|
||||
#define LITTLE_ENDIAN 1
|
||||
#elif defined(__BIG_ENDIAN)
|
||||
#define LITTLE_ENDIAN 0
|
||||
#else
|
||||
#error Unknown endianess!!! Aborting...
|
||||
#endif
|
||||
|
||||
static int copy_to_creg_data(struct rsxx_cardinfo *card,
|
||||
int cnt8,
|
||||
void *buf,
|
||||
unsigned int stream)
|
||||
{
|
||||
int i = 0;
|
||||
u32 *data = buf;
|
||||
|
||||
if (unlikely(card->eeh_state))
|
||||
return -EIO;
|
||||
|
||||
for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
|
||||
/*
|
||||
* Firmware implementation makes it necessary to byte swap on
|
||||
* little endian processors.
|
||||
*/
|
||||
if (LITTLE_ENDIAN && stream)
|
||||
iowrite32be(data[i], card->regmap + CREG_DATA(i));
|
||||
else
|
||||
iowrite32(data[i], card->regmap + CREG_DATA(i));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int copy_from_creg_data(struct rsxx_cardinfo *card,
|
||||
int cnt8,
|
||||
void *buf,
|
||||
unsigned int stream)
|
||||
{
|
||||
int i = 0;
|
||||
u32 *data = buf;
|
||||
|
||||
if (unlikely(card->eeh_state))
|
||||
return -EIO;
|
||||
|
||||
for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
|
||||
/*
|
||||
* Firmware implementation makes it necessary to byte swap on
|
||||
* little endian processors.
|
||||
*/
|
||||
if (LITTLE_ENDIAN && stream)
|
||||
data[i] = ioread32be(card->regmap + CREG_DATA(i));
|
||||
else
|
||||
data[i] = ioread32(card->regmap + CREG_DATA(i));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void creg_issue_cmd(struct rsxx_cardinfo *card, struct creg_cmd *cmd)
|
||||
{
|
||||
int st;
|
||||
|
||||
if (unlikely(card->eeh_state))
|
||||
return;
|
||||
|
||||
iowrite32(cmd->addr, card->regmap + CREG_ADD);
|
||||
iowrite32(cmd->cnt8, card->regmap + CREG_CNT);
|
||||
|
||||
if (cmd->op == CREG_OP_WRITE) {
|
||||
if (cmd->buf) {
|
||||
st = copy_to_creg_data(card, cmd->cnt8,
|
||||
cmd->buf, cmd->stream);
|
||||
if (st)
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (unlikely(card->eeh_state))
|
||||
return;
|
||||
|
||||
/* Setting the valid bit will kick off the command. */
|
||||
iowrite32(cmd->op, card->regmap + CREG_CMD);
|
||||
}
|
||||
|
||||
static void creg_kick_queue(struct rsxx_cardinfo *card)
|
||||
{
|
||||
if (card->creg_ctrl.active || list_empty(&card->creg_ctrl.queue))
|
||||
return;
|
||||
|
||||
card->creg_ctrl.active = 1;
|
||||
card->creg_ctrl.active_cmd = list_first_entry(&card->creg_ctrl.queue,
|
||||
struct creg_cmd, list);
|
||||
list_del(&card->creg_ctrl.active_cmd->list);
|
||||
card->creg_ctrl.q_depth--;
|
||||
|
||||
/*
|
||||
* We have to set the timer before we push the new command. Otherwise,
|
||||
* we could create a race condition that would occur if the timer
|
||||
* was not canceled, and expired after the new command was pushed,
|
||||
* but before the command was issued to hardware.
|
||||
*/
|
||||
mod_timer(&card->creg_ctrl.cmd_timer,
|
||||
jiffies + msecs_to_jiffies(CREG_TIMEOUT_MSEC));
|
||||
|
||||
creg_issue_cmd(card, card->creg_ctrl.active_cmd);
|
||||
}
|
||||
|
||||
static int creg_queue_cmd(struct rsxx_cardinfo *card,
|
||||
unsigned int op,
|
||||
unsigned int addr,
|
||||
unsigned int cnt8,
|
||||
void *buf,
|
||||
int stream,
|
||||
creg_cmd_cb callback,
|
||||
void *cb_private)
|
||||
{
|
||||
struct creg_cmd *cmd;
|
||||
|
||||
/* Don't queue stuff up if we're halted. */
|
||||
if (unlikely(card->halt))
|
||||
return -EINVAL;
|
||||
|
||||
if (card->creg_ctrl.reset)
|
||||
return -EAGAIN;
|
||||
|
||||
if (cnt8 > MAX_CREG_DATA8)
|
||||
return -EINVAL;
|
||||
|
||||
cmd = kmem_cache_alloc(creg_cmd_pool, GFP_KERNEL);
|
||||
if (!cmd)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_LIST_HEAD(&cmd->list);
|
||||
|
||||
cmd->op = op;
|
||||
cmd->addr = addr;
|
||||
cmd->cnt8 = cnt8;
|
||||
cmd->buf = buf;
|
||||
cmd->stream = stream;
|
||||
cmd->cb = callback;
|
||||
cmd->cb_private = cb_private;
|
||||
cmd->status = 0;
|
||||
|
||||
spin_lock_bh(&card->creg_ctrl.lock);
|
||||
list_add_tail(&cmd->list, &card->creg_ctrl.queue);
|
||||
card->creg_ctrl.q_depth++;
|
||||
creg_kick_queue(card);
|
||||
spin_unlock_bh(&card->creg_ctrl.lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void creg_cmd_timed_out(struct timer_list *t)
|
||||
{
|
||||
struct rsxx_cardinfo *card = from_timer(card, t, creg_ctrl.cmd_timer);
|
||||
struct creg_cmd *cmd;
|
||||
|
||||
spin_lock(&card->creg_ctrl.lock);
|
||||
cmd = card->creg_ctrl.active_cmd;
|
||||
card->creg_ctrl.active_cmd = NULL;
|
||||
spin_unlock(&card->creg_ctrl.lock);
|
||||
|
||||
if (cmd == NULL) {
|
||||
card->creg_ctrl.creg_stats.creg_timeout++;
|
||||
dev_warn(CARD_TO_DEV(card),
|
||||
"No active command associated with timeout!\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (cmd->cb)
|
||||
cmd->cb(card, cmd, -ETIMEDOUT);
|
||||
|
||||
kmem_cache_free(creg_cmd_pool, cmd);
|
||||
|
||||
|
||||
spin_lock(&card->creg_ctrl.lock);
|
||||
card->creg_ctrl.active = 0;
|
||||
creg_kick_queue(card);
|
||||
spin_unlock(&card->creg_ctrl.lock);
|
||||
}
|
||||
|
||||
|
||||
static void creg_cmd_done(struct work_struct *work)
|
||||
{
|
||||
struct rsxx_cardinfo *card;
|
||||
struct creg_cmd *cmd;
|
||||
int st = 0;
|
||||
|
||||
card = container_of(work, struct rsxx_cardinfo,
|
||||
creg_ctrl.done_work);
|
||||
|
||||
/*
|
||||
* The timer could not be cancelled for some reason,
|
||||
* race to pop the active command.
|
||||
*/
|
||||
if (del_timer_sync(&card->creg_ctrl.cmd_timer) == 0)
|
||||
card->creg_ctrl.creg_stats.failed_cancel_timer++;
|
||||
|
||||
spin_lock_bh(&card->creg_ctrl.lock);
|
||||
cmd = card->creg_ctrl.active_cmd;
|
||||
card->creg_ctrl.active_cmd = NULL;
|
||||
spin_unlock_bh(&card->creg_ctrl.lock);
|
||||
|
||||
if (cmd == NULL) {
|
||||
dev_err(CARD_TO_DEV(card),
|
||||
"Spurious creg interrupt!\n");
|
||||
return;
|
||||
}
|
||||
|
||||
card->creg_ctrl.creg_stats.stat = ioread32(card->regmap + CREG_STAT);
|
||||
cmd->status = card->creg_ctrl.creg_stats.stat;
|
||||
if ((cmd->status & CREG_STAT_STATUS_MASK) == 0) {
|
||||
dev_err(CARD_TO_DEV(card),
|
||||
"Invalid status on creg command\n");
|
||||
/*
|
||||
* At this point we're probably reading garbage from HW. Don't
|
||||
* do anything else that could mess up the system and let
|
||||
* the sync function return an error.
|
||||
*/
|
||||
st = -EIO;
|
||||
goto creg_done;
|
||||
} else if (cmd->status & CREG_STAT_ERROR) {
|
||||
st = -EIO;
|
||||
}
|
||||
|
||||
if (cmd->op == CREG_OP_READ) {
|
||||
unsigned int cnt8 = ioread32(card->regmap + CREG_CNT);
|
||||
|
||||
/* Paranoid Sanity Checks */
|
||||
if (!cmd->buf) {
|
||||
dev_err(CARD_TO_DEV(card),
|
||||
"Buffer not given for read.\n");
|
||||
st = -EIO;
|
||||
goto creg_done;
|
||||
}
|
||||
if (cnt8 != cmd->cnt8) {
|
||||
dev_err(CARD_TO_DEV(card),
|
||||
"count mismatch\n");
|
||||
st = -EIO;
|
||||
goto creg_done;
|
||||
}
|
||||
|
||||
st = copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream);
|
||||
}
|
||||
|
||||
creg_done:
|
||||
if (cmd->cb)
|
||||
cmd->cb(card, cmd, st);
|
||||
|
||||
kmem_cache_free(creg_cmd_pool, cmd);
|
||||
|
||||
spin_lock_bh(&card->creg_ctrl.lock);
|
||||
card->creg_ctrl.active = 0;
|
||||
creg_kick_queue(card);
|
||||
spin_unlock_bh(&card->creg_ctrl.lock);
|
||||
}
|
||||
|
||||
static void creg_reset(struct rsxx_cardinfo *card)
|
||||
{
|
||||
struct creg_cmd *cmd = NULL;
|
||||
struct creg_cmd *tmp;
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* mutex_trylock is used here because if reset_lock is taken then a
|
||||
* reset is already happening. So, we can just go ahead and return.
|
||||
*/
|
||||
if (!mutex_trylock(&card->creg_ctrl.reset_lock))
|
||||
return;
|
||||
|
||||
card->creg_ctrl.reset = 1;
|
||||
spin_lock_irqsave(&card->irq_lock, flags);
|
||||
rsxx_disable_ier_and_isr(card, CR_INTR_CREG | CR_INTR_EVENT);
|
||||
spin_unlock_irqrestore(&card->irq_lock, flags);
|
||||
|
||||
dev_warn(CARD_TO_DEV(card),
|
||||
"Resetting creg interface for recovery\n");
|
||||
|
||||
/* Cancel outstanding commands */
|
||||
spin_lock_bh(&card->creg_ctrl.lock);
|
||||
list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
|
||||
list_del(&cmd->list);
|
||||
card->creg_ctrl.q_depth--;
|
||||
if (cmd->cb)
|
||||
cmd->cb(card, cmd, -ECANCELED);
|
||||
kmem_cache_free(creg_cmd_pool, cmd);
|
||||
}
|
||||
|
||||
cmd = card->creg_ctrl.active_cmd;
|
||||
card->creg_ctrl.active_cmd = NULL;
|
||||
if (cmd) {
|
||||
if (timer_pending(&card->creg_ctrl.cmd_timer))
|
||||
del_timer_sync(&card->creg_ctrl.cmd_timer);
|
||||
|
||||
if (cmd->cb)
|
||||
cmd->cb(card, cmd, -ECANCELED);
|
||||
kmem_cache_free(creg_cmd_pool, cmd);
|
||||
|
||||
card->creg_ctrl.active = 0;
|
||||
}
|
||||
spin_unlock_bh(&card->creg_ctrl.lock);
|
||||
|
||||
card->creg_ctrl.reset = 0;
|
||||
spin_lock_irqsave(&card->irq_lock, flags);
|
||||
rsxx_enable_ier_and_isr(card, CR_INTR_CREG | CR_INTR_EVENT);
|
||||
spin_unlock_irqrestore(&card->irq_lock, flags);
|
||||
|
||||
mutex_unlock(&card->creg_ctrl.reset_lock);
|
||||
}
|
||||
|
||||
/* Used for synchronous accesses */
|
||||
struct creg_completion {
|
||||
struct completion *cmd_done;
|
||||
int st;
|
||||
u32 creg_status;
|
||||
};
|
||||
|
||||
static void creg_cmd_done_cb(struct rsxx_cardinfo *card,
|
||||
struct creg_cmd *cmd,
|
||||
int st)
|
||||
{
|
||||
struct creg_completion *cmd_completion;
|
||||
|
||||
cmd_completion = cmd->cb_private;
|
||||
BUG_ON(!cmd_completion);
|
||||
|
||||
cmd_completion->st = st;
|
||||
cmd_completion->creg_status = cmd->status;
|
||||
complete(cmd_completion->cmd_done);
|
||||
}
|
||||
|
||||
static int __issue_creg_rw(struct rsxx_cardinfo *card,
|
||||
unsigned int op,
|
||||
unsigned int addr,
|
||||
unsigned int cnt8,
|
||||
void *buf,
|
||||
int stream,
|
||||
unsigned int *hw_stat)
|
||||
{
|
||||
DECLARE_COMPLETION_ONSTACK(cmd_done);
|
||||
struct creg_completion completion;
|
||||
unsigned long timeout;
|
||||
int st;
|
||||
|
||||
completion.cmd_done = &cmd_done;
|
||||
completion.st = 0;
|
||||
completion.creg_status = 0;
|
||||
|
||||
st = creg_queue_cmd(card, op, addr, cnt8, buf, stream, creg_cmd_done_cb,
|
||||
&completion);
|
||||
if (st)
|
||||
return st;
|
||||
|
||||
/*
|
||||
* This timeout is necessary for unresponsive hardware. The additional
|
||||
* 20 seconds to used to guarantee that each cregs requests has time to
|
||||
* complete.
|
||||
*/
|
||||
timeout = msecs_to_jiffies(CREG_TIMEOUT_MSEC *
|
||||
card->creg_ctrl.q_depth + 20000);
|
||||
|
||||
/*
|
||||
* The creg interface is guaranteed to complete. It has a timeout
|
||||
* mechanism that will kick in if hardware does not respond.
|
||||
*/
|
||||
st = wait_for_completion_timeout(completion.cmd_done, timeout);
|
||||
if (st == 0) {
|
||||
/*
|
||||
* This is really bad, because the kernel timer did not
|
||||
* expire and notify us of a timeout!
|
||||
*/
|
||||
dev_crit(CARD_TO_DEV(card),
|
||||
"cregs timer failed\n");
|
||||
creg_reset(card);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
*hw_stat = completion.creg_status;
|
||||
|
||||
if (completion.st) {
|
||||
/*
|
||||
* This read is needed to verify that there has not been any
|
||||
* extreme errors that might have occurred, i.e. EEH. The
|
||||
* function iowrite32 will not detect EEH errors, so it is
|
||||
* necessary that we recover if such an error is the reason
|
||||
* for the timeout. This is a dummy read.
|
||||
*/
|
||||
ioread32(card->regmap + SCRATCH);
|
||||
|
||||
dev_warn(CARD_TO_DEV(card),
|
||||
"creg command failed(%d x%08x)\n",
|
||||
completion.st, addr);
|
||||
return completion.st;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int issue_creg_rw(struct rsxx_cardinfo *card,
|
||||
u32 addr,
|
||||
unsigned int size8,
|
||||
void *data,
|
||||
int stream,
|
||||
int read)
|
||||
{
|
||||
unsigned int hw_stat;
|
||||
unsigned int xfer;
|
||||
unsigned int op;
|
||||
int st;
|
||||
|
||||
op = read ? CREG_OP_READ : CREG_OP_WRITE;
|
||||
|
||||
do {
|
||||
xfer = min_t(unsigned int, size8, MAX_CREG_DATA8);
|
||||
|
||||
st = __issue_creg_rw(card, op, addr, xfer,
|
||||
data, stream, &hw_stat);
|
||||
if (st)
|
||||
return st;
|
||||
|
||||
data = (char *)data + xfer;
|
||||
addr += xfer;
|
||||
size8 -= xfer;
|
||||
} while (size8);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* ---------------------------- Public API ---------------------------------- */
|
||||
int rsxx_creg_write(struct rsxx_cardinfo *card,
|
||||
u32 addr,
|
||||
unsigned int size8,
|
||||
void *data,
|
||||
int byte_stream)
|
||||
{
|
||||
return issue_creg_rw(card, addr, size8, data, byte_stream, 0);
|
||||
}
|
||||
|
||||
int rsxx_creg_read(struct rsxx_cardinfo *card,
|
||||
u32 addr,
|
||||
unsigned int size8,
|
||||
void *data,
|
||||
int byte_stream)
|
||||
{
|
||||
return issue_creg_rw(card, addr, size8, data, byte_stream, 1);
|
||||
}
|
||||
|
||||
int rsxx_get_card_state(struct rsxx_cardinfo *card, unsigned int *state)
|
||||
{
|
||||
return rsxx_creg_read(card, CREG_ADD_CARD_STATE,
|
||||
sizeof(*state), state, 0);
|
||||
}
|
||||
|
||||
int rsxx_get_card_size8(struct rsxx_cardinfo *card, u64 *size8)
|
||||
{
|
||||
unsigned int size;
|
||||
int st;
|
||||
|
||||
st = rsxx_creg_read(card, CREG_ADD_CARD_SIZE,
|
||||
sizeof(size), &size, 0);
|
||||
if (st)
|
||||
return st;
|
||||
|
||||
*size8 = (u64)size * RSXX_HW_BLK_SIZE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int rsxx_get_num_targets(struct rsxx_cardinfo *card,
|
||||
unsigned int *n_targets)
|
||||
{
|
||||
return rsxx_creg_read(card, CREG_ADD_NUM_TARGETS,
|
||||
sizeof(*n_targets), n_targets, 0);
|
||||
}
|
||||
|
||||
int rsxx_get_card_capabilities(struct rsxx_cardinfo *card,
|
||||
u32 *capabilities)
|
||||
{
|
||||
return rsxx_creg_read(card, CREG_ADD_CAPABILITIES,
|
||||
sizeof(*capabilities), capabilities, 0);
|
||||
}
|
||||
|
||||
int rsxx_issue_card_cmd(struct rsxx_cardinfo *card, u32 cmd)
|
||||
{
|
||||
return rsxx_creg_write(card, CREG_ADD_CARD_CMD,
|
||||
sizeof(cmd), &cmd, 0);
|
||||
}
|
||||
|
||||
|
||||
/*----------------- HW Log Functions -------------------*/
|
||||
static void hw_log_msg(struct rsxx_cardinfo *card, const char *str, int len)
|
||||
{
|
||||
static char level;
|
||||
|
||||
/*
|
||||
* New messages start with "<#>", where # is the log level. Messages
|
||||
* that extend past the log buffer will use the previous level
|
||||
*/
|
||||
if ((len > 3) && (str[0] == '<') && (str[2] == '>')) {
|
||||
level = str[1];
|
||||
str += 3; /* Skip past the log level. */
|
||||
len -= 3;
|
||||
}
|
||||
|
||||
switch (level) {
|
||||
case '0':
|
||||
dev_emerg(CARD_TO_DEV(card), "HW: %.*s", len, str);
|
||||
break;
|
||||
case '1':
|
||||
dev_alert(CARD_TO_DEV(card), "HW: %.*s", len, str);
|
||||
break;
|
||||
case '2':
|
||||
dev_crit(CARD_TO_DEV(card), "HW: %.*s", len, str);
|
||||
break;
|
||||
case '3':
|
||||
dev_err(CARD_TO_DEV(card), "HW: %.*s", len, str);
|
||||
break;
|
||||
case '4':
|
||||
dev_warn(CARD_TO_DEV(card), "HW: %.*s", len, str);
|
||||
break;
|
||||
case '5':
|
||||
dev_notice(CARD_TO_DEV(card), "HW: %.*s", len, str);
|
||||
break;
|
||||
case '6':
|
||||
dev_info(CARD_TO_DEV(card), "HW: %.*s", len, str);
|
||||
break;
|
||||
case '7':
|
||||
dev_dbg(CARD_TO_DEV(card), "HW: %.*s", len, str);
|
||||
break;
|
||||
default:
|
||||
dev_info(CARD_TO_DEV(card), "HW: %.*s", len, str);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The substrncpy function copies the src string (which includes the
|
||||
* terminating '\0' character), up to the count into the dest pointer.
|
||||
* Returns the number of bytes copied to dest.
|
||||
*/
|
||||
static int substrncpy(char *dest, const char *src, int count)
|
||||
{
|
||||
int max_cnt = count;
|
||||
|
||||
while (count) {
|
||||
count--;
|
||||
*dest = *src;
|
||||
if (*dest == '\0')
|
||||
break;
|
||||
src++;
|
||||
dest++;
|
||||
}
|
||||
return max_cnt - count;
|
||||
}
|
||||
|
||||
|
||||
static void read_hw_log_done(struct rsxx_cardinfo *card,
|
||||
struct creg_cmd *cmd,
|
||||
int st)
|
||||
{
|
||||
char *buf;
|
||||
char *log_str;
|
||||
int cnt;
|
||||
int len;
|
||||
int off;
|
||||
|
||||
buf = cmd->buf;
|
||||
off = 0;
|
||||
|
||||
/* Failed getting the log message */
|
||||
if (st)
|
||||
return;
|
||||
|
||||
while (off < cmd->cnt8) {
|
||||
log_str = &card->log.buf[card->log.buf_len];
|
||||
cnt = min(cmd->cnt8 - off, LOG_BUF_SIZE8 - card->log.buf_len);
|
||||
len = substrncpy(log_str, &buf[off], cnt);
|
||||
|
||||
off += len;
|
||||
card->log.buf_len += len;
|
||||
|
||||
/*
|
||||
* Flush the log if we've hit the end of a message or if we've
|
||||
* run out of buffer space.
|
||||
*/
|
||||
if ((log_str[len - 1] == '\0') ||
|
||||
(card->log.buf_len == LOG_BUF_SIZE8)) {
|
||||
if (card->log.buf_len != 1) /* Don't log blank lines. */
|
||||
hw_log_msg(card, card->log.buf,
|
||||
card->log.buf_len);
|
||||
card->log.buf_len = 0;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (cmd->status & CREG_STAT_LOG_PENDING)
|
||||
rsxx_read_hw_log(card);
|
||||
}
|
||||
|
||||
int rsxx_read_hw_log(struct rsxx_cardinfo *card)
|
||||
{
|
||||
int st;
|
||||
|
||||
st = creg_queue_cmd(card, CREG_OP_READ, CREG_ADD_LOG,
|
||||
sizeof(card->log.tmp), card->log.tmp,
|
||||
1, read_hw_log_done, NULL);
|
||||
if (st)
|
||||
dev_err(CARD_TO_DEV(card),
|
||||
"Failed getting log text\n");
|
||||
|
||||
return st;
|
||||
}
|
||||
|
||||
/*-------------- IOCTL REG Access ------------------*/
|
||||
static int issue_reg_cmd(struct rsxx_cardinfo *card,
|
||||
struct rsxx_reg_access *cmd,
|
||||
int read)
|
||||
{
|
||||
unsigned int op = read ? CREG_OP_READ : CREG_OP_WRITE;
|
||||
|
||||
return __issue_creg_rw(card, op, cmd->addr, cmd->cnt, cmd->data,
|
||||
cmd->stream, &cmd->stat);
|
||||
}
|
||||
|
||||
int rsxx_reg_access(struct rsxx_cardinfo *card,
|
||||
struct rsxx_reg_access __user *ucmd,
|
||||
int read)
|
||||
{
|
||||
struct rsxx_reg_access cmd;
|
||||
int st;
|
||||
|
||||
st = copy_from_user(&cmd, ucmd, sizeof(cmd));
|
||||
if (st)
|
||||
return -EFAULT;
|
||||
|
||||
if (cmd.cnt > RSXX_MAX_REG_CNT)
|
||||
return -EFAULT;
|
||||
|
||||
st = issue_reg_cmd(card, &cmd, read);
|
||||
if (st)
|
||||
return st;
|
||||
|
||||
st = put_user(cmd.stat, &ucmd->stat);
|
||||
if (st)
|
||||
return -EFAULT;
|
||||
|
||||
if (read) {
|
||||
st = copy_to_user(ucmd->data, cmd.data, cmd.cnt);
|
||||
if (st)
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void rsxx_eeh_save_issued_creg(struct rsxx_cardinfo *card)
|
||||
{
|
||||
struct creg_cmd *cmd = NULL;
|
||||
|
||||
cmd = card->creg_ctrl.active_cmd;
|
||||
card->creg_ctrl.active_cmd = NULL;
|
||||
|
||||
if (cmd) {
|
||||
del_timer_sync(&card->creg_ctrl.cmd_timer);
|
||||
|
||||
spin_lock_bh(&card->creg_ctrl.lock);
|
||||
list_add(&cmd->list, &card->creg_ctrl.queue);
|
||||
card->creg_ctrl.q_depth++;
|
||||
card->creg_ctrl.active = 0;
|
||||
spin_unlock_bh(&card->creg_ctrl.lock);
|
||||
}
|
||||
}
|
||||
|
||||
void rsxx_kick_creg_queue(struct rsxx_cardinfo *card)
|
||||
{
|
||||
spin_lock_bh(&card->creg_ctrl.lock);
|
||||
if (!list_empty(&card->creg_ctrl.queue))
|
||||
creg_kick_queue(card);
|
||||
spin_unlock_bh(&card->creg_ctrl.lock);
|
||||
}
|
||||
|
||||
/*------------ Initialization & Setup --------------*/
|
||||
int rsxx_creg_setup(struct rsxx_cardinfo *card)
|
||||
{
|
||||
card->creg_ctrl.active_cmd = NULL;
|
||||
|
||||
card->creg_ctrl.creg_wq =
|
||||
create_singlethread_workqueue(DRIVER_NAME"_creg");
|
||||
if (!card->creg_ctrl.creg_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_WORK(&card->creg_ctrl.done_work, creg_cmd_done);
|
||||
mutex_init(&card->creg_ctrl.reset_lock);
|
||||
INIT_LIST_HEAD(&card->creg_ctrl.queue);
|
||||
spin_lock_init(&card->creg_ctrl.lock);
|
||||
timer_setup(&card->creg_ctrl.cmd_timer, creg_cmd_timed_out, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void rsxx_creg_destroy(struct rsxx_cardinfo *card)
|
||||
{
|
||||
struct creg_cmd *cmd;
|
||||
struct creg_cmd *tmp;
|
||||
int cnt = 0;
|
||||
|
||||
/* Cancel outstanding commands */
|
||||
spin_lock_bh(&card->creg_ctrl.lock);
|
||||
list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
|
||||
list_del(&cmd->list);
|
||||
if (cmd->cb)
|
||||
cmd->cb(card, cmd, -ECANCELED);
|
||||
kmem_cache_free(creg_cmd_pool, cmd);
|
||||
cnt++;
|
||||
}
|
||||
|
||||
if (cnt)
|
||||
dev_info(CARD_TO_DEV(card),
|
||||
"Canceled %d queue creg commands\n", cnt);
|
||||
|
||||
cmd = card->creg_ctrl.active_cmd;
|
||||
card->creg_ctrl.active_cmd = NULL;
|
||||
if (cmd) {
|
||||
if (timer_pending(&card->creg_ctrl.cmd_timer))
|
||||
del_timer_sync(&card->creg_ctrl.cmd_timer);
|
||||
|
||||
if (cmd->cb)
|
||||
cmd->cb(card, cmd, -ECANCELED);
|
||||
dev_info(CARD_TO_DEV(card),
|
||||
"Canceled active creg command\n");
|
||||
kmem_cache_free(creg_cmd_pool, cmd);
|
||||
}
|
||||
spin_unlock_bh(&card->creg_ctrl.lock);
|
||||
|
||||
cancel_work_sync(&card->creg_ctrl.done_work);
|
||||
}
|
||||
|
||||
|
||||
int rsxx_creg_init(void)
|
||||
{
|
||||
creg_cmd_pool = KMEM_CACHE(creg_cmd, SLAB_HWCACHE_ALIGN);
|
||||
if (!creg_cmd_pool)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void rsxx_creg_cleanup(void)
|
||||
{
|
||||
kmem_cache_destroy(creg_cmd_pool);
|
||||
}
|
|
@ -1,306 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* Filename: dev.c
|
||||
*
|
||||
* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
|
||||
* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
|
||||
*
|
||||
* (C) Copyright 2013 IBM Corporation
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <linux/hdreg.h>
|
||||
#include <linux/genhd.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/bio.h>
|
||||
|
||||
#include <linux/fs.h>
|
||||
|
||||
#include "rsxx_priv.h"
|
||||
|
||||
static unsigned int blkdev_minors = 64;
|
||||
module_param(blkdev_minors, uint, 0444);
|
||||
MODULE_PARM_DESC(blkdev_minors, "Number of minors(partitions)");
|
||||
|
||||
/*
|
||||
* For now I'm making this tweakable in case any applications hit this limit.
|
||||
* If you see a "bio too big" error in the log you will need to raise this
|
||||
* value.
|
||||
*/
|
||||
static unsigned int blkdev_max_hw_sectors = 1024;
|
||||
module_param(blkdev_max_hw_sectors, uint, 0444);
|
||||
MODULE_PARM_DESC(blkdev_max_hw_sectors, "Max hw sectors for a single BIO");
|
||||
|
||||
static unsigned int enable_blkdev = 1;
|
||||
module_param(enable_blkdev , uint, 0444);
|
||||
MODULE_PARM_DESC(enable_blkdev, "Enable block device interfaces");
|
||||
|
||||
|
||||
struct rsxx_bio_meta {
|
||||
struct bio *bio;
|
||||
atomic_t pending_dmas;
|
||||
atomic_t error;
|
||||
unsigned long start_time;
|
||||
};
|
||||
|
||||
static struct kmem_cache *bio_meta_pool;
|
||||
|
||||
static void rsxx_submit_bio(struct bio *bio);
|
||||
|
||||
/*----------------- Block Device Operations -----------------*/
|
||||
static int rsxx_blkdev_ioctl(struct block_device *bdev,
|
||||
fmode_t mode,
|
||||
unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
struct rsxx_cardinfo *card = bdev->bd_disk->private_data;
|
||||
|
||||
switch (cmd) {
|
||||
case RSXX_GETREG:
|
||||
return rsxx_reg_access(card, (void __user *)arg, 1);
|
||||
case RSXX_SETREG:
|
||||
return rsxx_reg_access(card, (void __user *)arg, 0);
|
||||
}
|
||||
|
||||
return -ENOTTY;
|
||||
}
|
||||
|
||||
static int rsxx_getgeo(struct block_device *bdev, struct hd_geometry *geo)
|
||||
{
|
||||
struct rsxx_cardinfo *card = bdev->bd_disk->private_data;
|
||||
u64 blocks = card->size8 >> 9;
|
||||
|
||||
/*
|
||||
* get geometry: Fake it. I haven't found any drivers that set
|
||||
* geo->start, so we won't either.
|
||||
*/
|
||||
if (card->size8) {
|
||||
geo->heads = 64;
|
||||
geo->sectors = 16;
|
||||
do_div(blocks, (geo->heads * geo->sectors));
|
||||
geo->cylinders = blocks;
|
||||
} else {
|
||||
geo->heads = 0;
|
||||
geo->sectors = 0;
|
||||
geo->cylinders = 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct block_device_operations rsxx_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.submit_bio = rsxx_submit_bio,
|
||||
.getgeo = rsxx_getgeo,
|
||||
.ioctl = rsxx_blkdev_ioctl,
|
||||
};
|
||||
|
||||
static void bio_dma_done_cb(struct rsxx_cardinfo *card,
|
||||
void *cb_data,
|
||||
unsigned int error)
|
||||
{
|
||||
struct rsxx_bio_meta *meta = cb_data;
|
||||
|
||||
if (error)
|
||||
atomic_set(&meta->error, 1);
|
||||
|
||||
if (atomic_dec_and_test(&meta->pending_dmas)) {
|
||||
if (!card->eeh_state && card->gendisk)
|
||||
bio_end_io_acct(meta->bio, meta->start_time);
|
||||
|
||||
if (atomic_read(&meta->error))
|
||||
bio_io_error(meta->bio);
|
||||
else
|
||||
bio_endio(meta->bio);
|
||||
kmem_cache_free(bio_meta_pool, meta);
|
||||
}
|
||||
}
|
||||
|
||||
static void rsxx_submit_bio(struct bio *bio)
|
||||
{
|
||||
struct rsxx_cardinfo *card = bio->bi_bdev->bd_disk->private_data;
|
||||
struct rsxx_bio_meta *bio_meta;
|
||||
blk_status_t st = BLK_STS_IOERR;
|
||||
|
||||
blk_queue_split(&bio);
|
||||
|
||||
might_sleep();
|
||||
|
||||
if (!card)
|
||||
goto req_err;
|
||||
|
||||
if (bio_end_sector(bio) > get_capacity(card->gendisk))
|
||||
goto req_err;
|
||||
|
||||
if (unlikely(card->halt))
|
||||
goto req_err;
|
||||
|
||||
if (unlikely(card->dma_fault))
|
||||
goto req_err;
|
||||
|
||||
if (bio->bi_iter.bi_size == 0) {
|
||||
dev_err(CARD_TO_DEV(card), "size zero BIO!\n");
|
||||
goto req_err;
|
||||
}
|
||||
|
||||
bio_meta = kmem_cache_alloc(bio_meta_pool, GFP_KERNEL);
|
||||
if (!bio_meta) {
|
||||
st = BLK_STS_RESOURCE;
|
||||
goto req_err;
|
||||
}
|
||||
|
||||
bio_meta->bio = bio;
|
||||
atomic_set(&bio_meta->error, 0);
|
||||
atomic_set(&bio_meta->pending_dmas, 0);
|
||||
|
||||
if (!unlikely(card->halt))
|
||||
bio_meta->start_time = bio_start_io_acct(bio);
|
||||
|
||||
dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n",
|
||||
bio_data_dir(bio) ? 'W' : 'R', bio_meta,
|
||||
(u64)bio->bi_iter.bi_sector << 9, bio->bi_iter.bi_size);
|
||||
|
||||
st = rsxx_dma_queue_bio(card, bio, &bio_meta->pending_dmas,
|
||||
bio_dma_done_cb, bio_meta);
|
||||
if (st)
|
||||
goto queue_err;
|
||||
|
||||
return;
|
||||
|
||||
queue_err:
|
||||
kmem_cache_free(bio_meta_pool, bio_meta);
|
||||
req_err:
|
||||
if (st)
|
||||
bio->bi_status = st;
|
||||
bio_endio(bio);
|
||||
}
|
||||
|
||||
/*----------------- Device Setup -------------------*/
|
||||
static bool rsxx_discard_supported(struct rsxx_cardinfo *card)
|
||||
{
|
||||
unsigned char pci_rev;
|
||||
|
||||
pci_read_config_byte(card->dev, PCI_REVISION_ID, &pci_rev);
|
||||
|
||||
return (pci_rev >= RSXX_DISCARD_SUPPORT);
|
||||
}
|
||||
|
||||
int rsxx_attach_dev(struct rsxx_cardinfo *card)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
mutex_lock(&card->dev_lock);
|
||||
|
||||
/* The block device requires the stripe size from the config. */
|
||||
if (enable_blkdev) {
|
||||
if (card->config_valid)
|
||||
set_capacity(card->gendisk, card->size8 >> 9);
|
||||
else
|
||||
set_capacity(card->gendisk, 0);
|
||||
err = device_add_disk(CARD_TO_DEV(card), card->gendisk, NULL);
|
||||
if (err == 0)
|
||||
card->bdev_attached = 1;
|
||||
}
|
||||
|
||||
mutex_unlock(&card->dev_lock);
|
||||
|
||||
if (err)
|
||||
blk_cleanup_disk(card->gendisk);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void rsxx_detach_dev(struct rsxx_cardinfo *card)
|
||||
{
|
||||
mutex_lock(&card->dev_lock);
|
||||
|
||||
if (card->bdev_attached) {
|
||||
del_gendisk(card->gendisk);
|
||||
card->bdev_attached = 0;
|
||||
}
|
||||
|
||||
mutex_unlock(&card->dev_lock);
|
||||
}
|
||||
|
||||
int rsxx_setup_dev(struct rsxx_cardinfo *card)
|
||||
{
|
||||
unsigned short blk_size;
|
||||
|
||||
mutex_init(&card->dev_lock);
|
||||
|
||||
if (!enable_blkdev)
|
||||
return 0;
|
||||
|
||||
card->major = register_blkdev(0, DRIVER_NAME);
|
||||
if (card->major < 0) {
|
||||
dev_err(CARD_TO_DEV(card), "Failed to get major number\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
card->gendisk = blk_alloc_disk(blkdev_minors);
|
||||
if (!card->gendisk) {
|
||||
dev_err(CARD_TO_DEV(card), "Failed disk alloc\n");
|
||||
unregister_blkdev(card->major, DRIVER_NAME);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (card->config_valid) {
|
||||
blk_size = card->config.data.block_size;
|
||||
blk_queue_dma_alignment(card->gendisk->queue, blk_size - 1);
|
||||
blk_queue_logical_block_size(card->gendisk->queue, blk_size);
|
||||
}
|
||||
|
||||
blk_queue_max_hw_sectors(card->gendisk->queue, blkdev_max_hw_sectors);
|
||||
blk_queue_physical_block_size(card->gendisk->queue, RSXX_HW_BLK_SIZE);
|
||||
|
||||
blk_queue_flag_set(QUEUE_FLAG_NONROT, card->gendisk->queue);
|
||||
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, card->gendisk->queue);
|
||||
if (rsxx_discard_supported(card)) {
|
||||
blk_queue_flag_set(QUEUE_FLAG_DISCARD, card->gendisk->queue);
|
||||
blk_queue_max_discard_sectors(card->gendisk->queue,
|
||||
RSXX_HW_BLK_SIZE >> 9);
|
||||
card->gendisk->queue->limits.discard_granularity =
|
||||
RSXX_HW_BLK_SIZE;
|
||||
card->gendisk->queue->limits.discard_alignment =
|
||||
RSXX_HW_BLK_SIZE;
|
||||
}
|
||||
|
||||
snprintf(card->gendisk->disk_name, sizeof(card->gendisk->disk_name),
|
||||
"rsxx%d", card->disk_id);
|
||||
card->gendisk->major = card->major;
|
||||
card->gendisk->minors = blkdev_minors;
|
||||
card->gendisk->fops = &rsxx_fops;
|
||||
card->gendisk->private_data = card;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void rsxx_destroy_dev(struct rsxx_cardinfo *card)
|
||||
{
|
||||
if (!enable_blkdev)
|
||||
return;
|
||||
|
||||
blk_cleanup_disk(card->gendisk);
|
||||
card->gendisk = NULL;
|
||||
unregister_blkdev(card->major, DRIVER_NAME);
|
||||
}
|
||||
|
||||
int rsxx_dev_init(void)
|
||||
{
|
||||
bio_meta_pool = KMEM_CACHE(rsxx_bio_meta, SLAB_HWCACHE_ALIGN);
|
||||
if (!bio_meta_pool)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void rsxx_dev_cleanup(void)
|
||||
{
|
||||
kmem_cache_destroy(bio_meta_pool);
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
|
@ -1,33 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
/*
|
||||
* Filename: rsxx.h
|
||||
*
|
||||
* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
|
||||
* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
|
||||
*
|
||||
* (C) Copyright 2013 IBM Corporation
|
||||
*/
|
||||
|
||||
#ifndef __RSXX_H__
|
||||
#define __RSXX_H__
|
||||
|
||||
/*----------------- IOCTL Definitions -------------------*/
|
||||
|
||||
#define RSXX_MAX_DATA 8
|
||||
|
||||
struct rsxx_reg_access {
|
||||
__u32 addr;
|
||||
__u32 cnt;
|
||||
__u32 stat;
|
||||
__u32 stream;
|
||||
__u32 data[RSXX_MAX_DATA];
|
||||
};
|
||||
|
||||
#define RSXX_MAX_REG_CNT (RSXX_MAX_DATA * (sizeof(__u32)))
|
||||
|
||||
#define RSXX_IOC_MAGIC 'r'
|
||||
|
||||
#define RSXX_GETREG _IOWR(RSXX_IOC_MAGIC, 0x20, struct rsxx_reg_access)
|
||||
#define RSXX_SETREG _IOWR(RSXX_IOC_MAGIC, 0x21, struct rsxx_reg_access)
|
||||
|
||||
#endif /* __RSXX_H_ */
|
|
@ -1,58 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
/*
|
||||
* Filename: rsXX_cfg.h
|
||||
*
|
||||
* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
|
||||
* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
|
||||
*
|
||||
* (C) Copyright 2013 IBM Corporation
|
||||
*/
|
||||
|
||||
#ifndef __RSXX_CFG_H__
|
||||
#define __RSXX_CFG_H__
|
||||
|
||||
/* NOTE: Config values will be saved in network byte order (i.e. Big endian) */
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
* The card config version must match the driver's expected version. If it does
|
||||
* not, the DMA interfaces will not be attached and the user will need to
|
||||
* initialize/upgrade the card configuration using the card config utility.
|
||||
*/
|
||||
#define RSXX_CFG_VERSION 4
|
||||
|
||||
struct card_cfg_hdr {
|
||||
__u32 version;
|
||||
__u32 crc;
|
||||
};
|
||||
|
||||
struct card_cfg_data {
|
||||
__u32 block_size;
|
||||
__u32 stripe_size;
|
||||
__u32 vendor_id;
|
||||
__u32 cache_order;
|
||||
struct {
|
||||
__u32 mode; /* Disabled, manual, auto-tune... */
|
||||
__u32 count; /* Number of intr to coalesce */
|
||||
__u32 latency;/* Max wait time (in ns) */
|
||||
} intr_coal;
|
||||
};
|
||||
|
||||
struct rsxx_card_cfg {
|
||||
struct card_cfg_hdr hdr;
|
||||
struct card_cfg_data data;
|
||||
};
|
||||
|
||||
/* Vendor ID Values */
|
||||
#define RSXX_VENDOR_ID_IBM 0
|
||||
#define RSXX_VENDOR_ID_DSI 1
|
||||
#define RSXX_VENDOR_COUNT 2
|
||||
|
||||
/* Interrupt Coalescing Values */
|
||||
#define RSXX_INTR_COAL_DISABLED 0
|
||||
#define RSXX_INTR_COAL_EXPLICIT 1
|
||||
#define RSXX_INTR_COAL_AUTO_TUNE 2
|
||||
|
||||
|
||||
#endif /* __RSXX_CFG_H__ */
|
||||
|
|
@ -1,418 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
/*
|
||||
* Filename: rsxx_priv.h
|
||||
*
|
||||
* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
|
||||
* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
|
||||
*
|
||||
* (C) Copyright 2013 IBM Corporation
|
||||
*/
|
||||
|
||||
#ifndef __RSXX_PRIV_H__
|
||||
#define __RSXX_PRIV_H__
|
||||
|
||||
#include <linux/semaphore.h>
|
||||
|
||||
#include <linux/fs.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/sysfs.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/ioctl.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include "rsxx.h"
|
||||
#include "rsxx_cfg.h"
|
||||
|
||||
struct proc_cmd;
|
||||
|
||||
#define PCI_DEVICE_ID_FS70_FLASH 0x04A9
|
||||
#define PCI_DEVICE_ID_FS80_FLASH 0x04AA
|
||||
|
||||
#define RS70_PCI_REV_SUPPORTED 4
|
||||
|
||||
#define DRIVER_NAME "rsxx"
|
||||
#define DRIVER_VERSION "4.0.3.2516"
|
||||
|
||||
/* Block size is 4096 */
|
||||
#define RSXX_HW_BLK_SHIFT 12
|
||||
#define RSXX_HW_BLK_SIZE (1 << RSXX_HW_BLK_SHIFT)
|
||||
#define RSXX_HW_BLK_MASK (RSXX_HW_BLK_SIZE - 1)
|
||||
|
||||
#define MAX_CREG_DATA8 32
|
||||
#define LOG_BUF_SIZE8 128
|
||||
|
||||
#define RSXX_MAX_OUTSTANDING_CMDS 255
|
||||
#define RSXX_CS_IDX_MASK 0xff
|
||||
|
||||
#define STATUS_BUFFER_SIZE8 4096
|
||||
#define COMMAND_BUFFER_SIZE8 4096
|
||||
|
||||
#define RSXX_MAX_TARGETS 8
|
||||
|
||||
struct dma_tracker_list;
|
||||
|
||||
/* DMA Command/Status Buffer structure */
|
||||
struct rsxx_cs_buffer {
|
||||
dma_addr_t dma_addr;
|
||||
void *buf;
|
||||
u32 idx;
|
||||
};
|
||||
|
||||
struct rsxx_dma_stats {
|
||||
u32 crc_errors;
|
||||
u32 hard_errors;
|
||||
u32 soft_errors;
|
||||
u32 writes_issued;
|
||||
u32 writes_failed;
|
||||
u32 reads_issued;
|
||||
u32 reads_failed;
|
||||
u32 reads_retried;
|
||||
u32 discards_issued;
|
||||
u32 discards_failed;
|
||||
u32 done_rescheduled;
|
||||
u32 issue_rescheduled;
|
||||
u32 dma_sw_err;
|
||||
u32 dma_hw_fault;
|
||||
u32 dma_cancelled;
|
||||
u32 sw_q_depth; /* Number of DMAs on the SW queue. */
|
||||
atomic_t hw_q_depth; /* Number of DMAs queued to HW. */
|
||||
};
|
||||
|
||||
struct rsxx_dma_ctrl {
|
||||
struct rsxx_cardinfo *card;
|
||||
int id;
|
||||
void __iomem *regmap;
|
||||
struct rsxx_cs_buffer status;
|
||||
struct rsxx_cs_buffer cmd;
|
||||
u16 e_cnt;
|
||||
spinlock_t queue_lock;
|
||||
struct list_head queue;
|
||||
struct workqueue_struct *issue_wq;
|
||||
struct work_struct issue_dma_work;
|
||||
struct workqueue_struct *done_wq;
|
||||
struct work_struct dma_done_work;
|
||||
struct timer_list activity_timer;
|
||||
struct dma_tracker_list *trackers;
|
||||
struct rsxx_dma_stats stats;
|
||||
struct mutex work_lock;
|
||||
};
|
||||
|
||||
struct rsxx_cardinfo {
|
||||
struct pci_dev *dev;
|
||||
unsigned int halt;
|
||||
unsigned int eeh_state;
|
||||
|
||||
void __iomem *regmap;
|
||||
spinlock_t irq_lock;
|
||||
unsigned int isr_mask;
|
||||
unsigned int ier_mask;
|
||||
|
||||
struct rsxx_card_cfg config;
|
||||
int config_valid;
|
||||
|
||||
/* Embedded CPU Communication */
|
||||
struct {
|
||||
spinlock_t lock;
|
||||
bool active;
|
||||
struct creg_cmd *active_cmd;
|
||||
struct workqueue_struct *creg_wq;
|
||||
struct work_struct done_work;
|
||||
struct list_head queue;
|
||||
unsigned int q_depth;
|
||||
/* Cache the creg status to prevent ioreads */
|
||||
struct {
|
||||
u32 stat;
|
||||
u32 failed_cancel_timer;
|
||||
u32 creg_timeout;
|
||||
} creg_stats;
|
||||
struct timer_list cmd_timer;
|
||||
struct mutex reset_lock;
|
||||
int reset;
|
||||
} creg_ctrl;
|
||||
|
||||
struct {
|
||||
char tmp[MAX_CREG_DATA8];
|
||||
char buf[LOG_BUF_SIZE8]; /* terminated */
|
||||
int buf_len;
|
||||
} log;
|
||||
|
||||
struct workqueue_struct *event_wq;
|
||||
struct work_struct event_work;
|
||||
unsigned int state;
|
||||
u64 size8;
|
||||
|
||||
/* Lock the device attach/detach function */
|
||||
struct mutex dev_lock;
|
||||
|
||||
/* Block Device Variables */
|
||||
bool bdev_attached;
|
||||
int disk_id;
|
||||
int major;
|
||||
struct gendisk *gendisk;
|
||||
struct {
|
||||
/* Used to convert a byte address to a device address. */
|
||||
u64 lower_mask;
|
||||
u64 upper_shift;
|
||||
u64 upper_mask;
|
||||
u64 target_mask;
|
||||
u64 target_shift;
|
||||
} _stripe;
|
||||
unsigned int dma_fault;
|
||||
|
||||
int scrub_hard;
|
||||
|
||||
int n_targets;
|
||||
struct rsxx_dma_ctrl *ctrl;
|
||||
|
||||
struct dentry *debugfs_dir;
|
||||
};
|
||||
|
||||
enum rsxx_pci_regmap {
|
||||
HWID = 0x00, /* Hardware Identification Register */
|
||||
SCRATCH = 0x04, /* Scratch/Debug Register */
|
||||
RESET = 0x08, /* Reset Register */
|
||||
ISR = 0x10, /* Interrupt Status Register */
|
||||
IER = 0x14, /* Interrupt Enable Register */
|
||||
IPR = 0x18, /* Interrupt Poll Register */
|
||||
CB_ADD_LO = 0x20, /* Command Host Buffer Address [31:0] */
|
||||
CB_ADD_HI = 0x24, /* Command Host Buffer Address [63:32]*/
|
||||
HW_CMD_IDX = 0x28, /* Hardware Processed Command Index */
|
||||
SW_CMD_IDX = 0x2C, /* Software Processed Command Index */
|
||||
SB_ADD_LO = 0x30, /* Status Host Buffer Address [31:0] */
|
||||
SB_ADD_HI = 0x34, /* Status Host Buffer Address [63:32] */
|
||||
HW_STATUS_CNT = 0x38, /* Hardware Status Counter */
|
||||
SW_STATUS_CNT = 0x3C, /* Deprecated */
|
||||
CREG_CMD = 0x40, /* CPU Command Register */
|
||||
CREG_ADD = 0x44, /* CPU Address Register */
|
||||
CREG_CNT = 0x48, /* CPU Count Register */
|
||||
CREG_STAT = 0x4C, /* CPU Status Register */
|
||||
CREG_DATA0 = 0x50, /* CPU Data Registers */
|
||||
CREG_DATA1 = 0x54,
|
||||
CREG_DATA2 = 0x58,
|
||||
CREG_DATA3 = 0x5C,
|
||||
CREG_DATA4 = 0x60,
|
||||
CREG_DATA5 = 0x64,
|
||||
CREG_DATA6 = 0x68,
|
||||
CREG_DATA7 = 0x6c,
|
||||
INTR_COAL = 0x70, /* Interrupt Coalescing Register */
|
||||
HW_ERROR = 0x74, /* Card Error Register */
|
||||
PCI_DEBUG0 = 0x78, /* PCI Debug Registers */
|
||||
PCI_DEBUG1 = 0x7C,
|
||||
PCI_DEBUG2 = 0x80,
|
||||
PCI_DEBUG3 = 0x84,
|
||||
PCI_DEBUG4 = 0x88,
|
||||
PCI_DEBUG5 = 0x8C,
|
||||
PCI_DEBUG6 = 0x90,
|
||||
PCI_DEBUG7 = 0x94,
|
||||
PCI_POWER_THROTTLE = 0x98,
|
||||
PERF_CTRL = 0x9c,
|
||||
PERF_TIMER_LO = 0xa0,
|
||||
PERF_TIMER_HI = 0xa4,
|
||||
PERF_RD512_LO = 0xa8,
|
||||
PERF_RD512_HI = 0xac,
|
||||
PERF_WR512_LO = 0xb0,
|
||||
PERF_WR512_HI = 0xb4,
|
||||
PCI_RECONFIG = 0xb8,
|
||||
};
|
||||
|
||||
enum rsxx_intr {
|
||||
CR_INTR_DMA0 = 0x00000001,
|
||||
CR_INTR_CREG = 0x00000002,
|
||||
CR_INTR_DMA1 = 0x00000004,
|
||||
CR_INTR_EVENT = 0x00000008,
|
||||
CR_INTR_DMA2 = 0x00000010,
|
||||
CR_INTR_DMA3 = 0x00000020,
|
||||
CR_INTR_DMA4 = 0x00000040,
|
||||
CR_INTR_DMA5 = 0x00000080,
|
||||
CR_INTR_DMA6 = 0x00000100,
|
||||
CR_INTR_DMA7 = 0x00000200,
|
||||
CR_INTR_ALL_C = 0x0000003f,
|
||||
CR_INTR_ALL_G = 0x000003ff,
|
||||
CR_INTR_DMA_ALL = 0x000003f5,
|
||||
CR_INTR_ALL = 0xffffffff,
|
||||
};
|
||||
|
||||
static inline int CR_INTR_DMA(int N)
|
||||
{
|
||||
static const unsigned int _CR_INTR_DMA[] = {
|
||||
CR_INTR_DMA0, CR_INTR_DMA1, CR_INTR_DMA2, CR_INTR_DMA3,
|
||||
CR_INTR_DMA4, CR_INTR_DMA5, CR_INTR_DMA6, CR_INTR_DMA7
|
||||
};
|
||||
return _CR_INTR_DMA[N];
|
||||
}
|
||||
enum rsxx_pci_reset {
|
||||
DMA_QUEUE_RESET = 0x00000001,
|
||||
};
|
||||
|
||||
enum rsxx_hw_fifo_flush {
|
||||
RSXX_FLUSH_BUSY = 0x00000002,
|
||||
RSXX_FLUSH_TIMEOUT = 0x00000004,
|
||||
};
|
||||
|
||||
enum rsxx_pci_revision {
|
||||
RSXX_DISCARD_SUPPORT = 2,
|
||||
RSXX_EEH_SUPPORT = 3,
|
||||
};
|
||||
|
||||
enum rsxx_creg_cmd {
|
||||
CREG_CMD_TAG_MASK = 0x0000FF00,
|
||||
CREG_OP_WRITE = 0x000000C0,
|
||||
CREG_OP_READ = 0x000000E0,
|
||||
};
|
||||
|
||||
enum rsxx_creg_addr {
|
||||
CREG_ADD_CARD_CMD = 0x80001000,
|
||||
CREG_ADD_CARD_STATE = 0x80001004,
|
||||
CREG_ADD_CARD_SIZE = 0x8000100c,
|
||||
CREG_ADD_CAPABILITIES = 0x80001050,
|
||||
CREG_ADD_LOG = 0x80002000,
|
||||
CREG_ADD_NUM_TARGETS = 0x80003000,
|
||||
CREG_ADD_CRAM = 0xA0000000,
|
||||
CREG_ADD_CONFIG = 0xB0000000,
|
||||
};
|
||||
|
||||
enum rsxx_creg_card_cmd {
|
||||
CARD_CMD_STARTUP = 1,
|
||||
CARD_CMD_SHUTDOWN = 2,
|
||||
CARD_CMD_LOW_LEVEL_FORMAT = 3,
|
||||
CARD_CMD_FPGA_RECONFIG_BR = 4,
|
||||
CARD_CMD_FPGA_RECONFIG_MAIN = 5,
|
||||
CARD_CMD_BACKUP = 6,
|
||||
CARD_CMD_RESET = 7,
|
||||
CARD_CMD_deprecated = 8,
|
||||
CARD_CMD_UNINITIALIZE = 9,
|
||||
CARD_CMD_DSTROY_EMERGENCY = 10,
|
||||
CARD_CMD_DSTROY_NORMAL = 11,
|
||||
CARD_CMD_DSTROY_EXTENDED = 12,
|
||||
CARD_CMD_DSTROY_ABORT = 13,
|
||||
};
|
||||
|
||||
enum rsxx_card_state {
|
||||
CARD_STATE_SHUTDOWN = 0x00000001,
|
||||
CARD_STATE_STARTING = 0x00000002,
|
||||
CARD_STATE_FORMATTING = 0x00000004,
|
||||
CARD_STATE_UNINITIALIZED = 0x00000008,
|
||||
CARD_STATE_GOOD = 0x00000010,
|
||||
CARD_STATE_SHUTTING_DOWN = 0x00000020,
|
||||
CARD_STATE_FAULT = 0x00000040,
|
||||
CARD_STATE_RD_ONLY_FAULT = 0x00000080,
|
||||
CARD_STATE_DSTROYING = 0x00000100,
|
||||
};
|
||||
|
||||
enum rsxx_led {
|
||||
LED_DEFAULT = 0x0,
|
||||
LED_IDENTIFY = 0x1,
|
||||
LED_SOAK = 0x2,
|
||||
};
|
||||
|
||||
enum rsxx_creg_flash_lock {
|
||||
CREG_FLASH_LOCK = 1,
|
||||
CREG_FLASH_UNLOCK = 2,
|
||||
};
|
||||
|
||||
enum rsxx_card_capabilities {
|
||||
CARD_CAP_SUBPAGE_WRITES = 0x00000080,
|
||||
};
|
||||
|
||||
enum rsxx_creg_stat {
|
||||
CREG_STAT_STATUS_MASK = 0x00000003,
|
||||
CREG_STAT_SUCCESS = 0x1,
|
||||
CREG_STAT_ERROR = 0x2,
|
||||
CREG_STAT_CHAR_PENDING = 0x00000004, /* Character I/O pending bit */
|
||||
CREG_STAT_LOG_PENDING = 0x00000008, /* HW log message pending bit */
|
||||
CREG_STAT_TAG_MASK = 0x0000ff00,
|
||||
};
|
||||
|
||||
enum rsxx_dma_finish {
|
||||
FREE_DMA = 0x0,
|
||||
COMPLETE_DMA = 0x1,
|
||||
};
|
||||
|
||||
static inline unsigned int CREG_DATA(int N)
|
||||
{
|
||||
return CREG_DATA0 + (N << 2);
|
||||
}
|
||||
|
||||
/*----------------- Convenient Log Wrappers -------------------*/
|
||||
#define CARD_TO_DEV(__CARD) (&(__CARD)->dev->dev)
|
||||
|
||||
/***** config.c *****/
|
||||
int rsxx_load_config(struct rsxx_cardinfo *card);
|
||||
|
||||
/***** core.c *****/
|
||||
void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr);
|
||||
void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr);
|
||||
void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card,
|
||||
unsigned int intr);
|
||||
void rsxx_disable_ier_and_isr(struct rsxx_cardinfo *card,
|
||||
unsigned int intr);
|
||||
|
||||
/***** dev.c *****/
|
||||
int rsxx_attach_dev(struct rsxx_cardinfo *card);
|
||||
void rsxx_detach_dev(struct rsxx_cardinfo *card);
|
||||
int rsxx_setup_dev(struct rsxx_cardinfo *card);
|
||||
void rsxx_destroy_dev(struct rsxx_cardinfo *card);
|
||||
int rsxx_dev_init(void);
|
||||
void rsxx_dev_cleanup(void);
|
||||
|
||||
/***** dma.c ****/
|
||||
typedef void (*rsxx_dma_cb)(struct rsxx_cardinfo *card,
|
||||
void *cb_data,
|
||||
unsigned int status);
|
||||
int rsxx_dma_setup(struct rsxx_cardinfo *card);
|
||||
void rsxx_dma_destroy(struct rsxx_cardinfo *card);
|
||||
int rsxx_dma_init(void);
|
||||
int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl,
|
||||
struct list_head *q,
|
||||
unsigned int done);
|
||||
int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl);
|
||||
void rsxx_dma_cleanup(void);
|
||||
void rsxx_dma_queue_reset(struct rsxx_cardinfo *card);
|
||||
int rsxx_dma_configure(struct rsxx_cardinfo *card);
|
||||
blk_status_t rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
|
||||
struct bio *bio,
|
||||
atomic_t *n_dmas,
|
||||
rsxx_dma_cb cb,
|
||||
void *cb_data);
|
||||
int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl);
|
||||
int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card);
|
||||
int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card);
|
||||
|
||||
/***** cregs.c *****/
|
||||
int rsxx_creg_write(struct rsxx_cardinfo *card, u32 addr,
|
||||
unsigned int size8,
|
||||
void *data,
|
||||
int byte_stream);
|
||||
int rsxx_creg_read(struct rsxx_cardinfo *card,
|
||||
u32 addr,
|
||||
unsigned int size8,
|
||||
void *data,
|
||||
int byte_stream);
|
||||
int rsxx_read_hw_log(struct rsxx_cardinfo *card);
|
||||
int rsxx_get_card_state(struct rsxx_cardinfo *card,
|
||||
unsigned int *state);
|
||||
int rsxx_get_card_size8(struct rsxx_cardinfo *card, u64 *size8);
|
||||
int rsxx_get_num_targets(struct rsxx_cardinfo *card,
|
||||
unsigned int *n_targets);
|
||||
int rsxx_get_card_capabilities(struct rsxx_cardinfo *card,
|
||||
u32 *capabilities);
|
||||
int rsxx_issue_card_cmd(struct rsxx_cardinfo *card, u32 cmd);
|
||||
int rsxx_creg_setup(struct rsxx_cardinfo *card);
|
||||
void rsxx_creg_destroy(struct rsxx_cardinfo *card);
|
||||
int rsxx_creg_init(void);
|
||||
void rsxx_creg_cleanup(void);
|
||||
int rsxx_reg_access(struct rsxx_cardinfo *card,
|
||||
struct rsxx_reg_access __user *ucmd,
|
||||
int read);
|
||||
void rsxx_eeh_save_issued_creg(struct rsxx_cardinfo *card);
|
||||
void rsxx_kick_creg_queue(struct rsxx_cardinfo *card);
|
||||
|
||||
|
||||
|
||||
#endif /* __DRIVERS_BLOCK_RSXX_H__ */
|
|
@ -574,7 +574,7 @@ static int process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg)
|
|||
int ret = 0;
|
||||
|
||||
if (WARN(mddev->cluster_info->slot_number - 1 == le32_to_cpu(msg->slot),
|
||||
"node %d received it's own msg\n", le32_to_cpu(msg->slot)))
|
||||
"node %d received its own msg\n", le32_to_cpu(msg->slot)))
|
||||
return -1;
|
||||
switch (le32_to_cpu(msg->type)) {
|
||||
case METADATA_UPDATED:
|
||||
|
|
|
@ -418,6 +418,12 @@ check_suspended:
|
|||
rcu_read_lock();
|
||||
if (is_suspended(mddev, bio)) {
|
||||
DEFINE_WAIT(__wait);
|
||||
/* Bail out if REQ_NOWAIT is set for the bio */
|
||||
if (bio->bi_opf & REQ_NOWAIT) {
|
||||
rcu_read_unlock();
|
||||
bio_wouldblock_error(bio);
|
||||
return;
|
||||
}
|
||||
for (;;) {
|
||||
prepare_to_wait(&mddev->sb_wait, &__wait,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
|
@ -3603,6 +3609,7 @@ static struct attribute *rdev_default_attrs[] = {
|
|||
&rdev_ppl_size.attr,
|
||||
NULL,
|
||||
};
|
||||
ATTRIBUTE_GROUPS(rdev_default);
|
||||
static ssize_t
|
||||
rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
|
||||
{
|
||||
|
@ -3652,7 +3659,7 @@ static const struct sysfs_ops rdev_sysfs_ops = {
|
|||
static struct kobj_type rdev_ktype = {
|
||||
.release = rdev_free,
|
||||
.sysfs_ops = &rdev_sysfs_ops,
|
||||
.default_attrs = rdev_default_attrs,
|
||||
.default_groups = rdev_default_groups,
|
||||
};
|
||||
|
||||
int md_rdev_init(struct md_rdev *rdev)
|
||||
|
@ -5788,6 +5795,7 @@ int md_run(struct mddev *mddev)
|
|||
int err;
|
||||
struct md_rdev *rdev;
|
||||
struct md_personality *pers;
|
||||
bool nowait = true;
|
||||
|
||||
if (list_empty(&mddev->disks))
|
||||
/* cannot run an array with no devices.. */
|
||||
|
@ -5858,8 +5866,13 @@ int md_run(struct mddev *mddev)
|
|||
}
|
||||
}
|
||||
sysfs_notify_dirent_safe(rdev->sysfs_state);
|
||||
nowait = nowait && blk_queue_nowait(bdev_get_queue(rdev->bdev));
|
||||
}
|
||||
|
||||
/* Set the NOWAIT flags if all underlying devices support it */
|
||||
if (nowait)
|
||||
blk_queue_flag_set(QUEUE_FLAG_NOWAIT, mddev->queue);
|
||||
|
||||
if (!bioset_initialized(&mddev->bio_set)) {
|
||||
err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
|
||||
if (err)
|
||||
|
@ -5870,13 +5883,6 @@ int md_run(struct mddev *mddev)
|
|||
if (err)
|
||||
goto exit_bio_set;
|
||||
}
|
||||
if (mddev->level != 1 && mddev->level != 10 &&
|
||||
!bioset_initialized(&mddev->io_acct_set)) {
|
||||
err = bioset_init(&mddev->io_acct_set, BIO_POOL_SIZE,
|
||||
offsetof(struct md_io_acct, bio_clone), 0);
|
||||
if (err)
|
||||
goto exit_sync_set;
|
||||
}
|
||||
|
||||
spin_lock(&pers_lock);
|
||||
pers = find_pers(mddev->level, mddev->clevel);
|
||||
|
@ -6053,9 +6059,6 @@ bitmap_abort:
|
|||
module_put(pers->owner);
|
||||
md_bitmap_destroy(mddev);
|
||||
abort:
|
||||
if (mddev->level != 1 && mddev->level != 10)
|
||||
bioset_exit(&mddev->io_acct_set);
|
||||
exit_sync_set:
|
||||
bioset_exit(&mddev->sync_set);
|
||||
exit_bio_set:
|
||||
bioset_exit(&mddev->bio_set);
|
||||
|
@ -7004,6 +7007,15 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
|
|||
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
|
||||
if (!mddev->thread)
|
||||
md_update_sb(mddev, 1);
|
||||
/*
|
||||
* If the new disk does not support REQ_NOWAIT,
|
||||
* disable on the whole MD.
|
||||
*/
|
||||
if (!blk_queue_nowait(bdev_get_queue(rdev->bdev))) {
|
||||
pr_info("%s: Disabling nowait because %s does not support nowait\n",
|
||||
mdname(mddev), bdevname(rdev->bdev, b));
|
||||
blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, mddev->queue);
|
||||
}
|
||||
/*
|
||||
* Kick recovery, maybe this spare has to be added to the
|
||||
* array immediately.
|
||||
|
@ -8402,7 +8414,7 @@ int md_setup_cluster(struct mddev *mddev, int nodes)
|
|||
spin_lock(&pers_lock);
|
||||
/* ensure module won't be unloaded */
|
||||
if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
|
||||
pr_warn("can't find md-cluster module or get it's reference.\n");
|
||||
pr_warn("can't find md-cluster module or get its reference.\n");
|
||||
spin_unlock(&pers_lock);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
@ -8589,6 +8601,23 @@ void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(md_submit_discard_bio);
|
||||
|
||||
int acct_bioset_init(struct mddev *mddev)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
if (!bioset_initialized(&mddev->io_acct_set))
|
||||
err = bioset_init(&mddev->io_acct_set, BIO_POOL_SIZE,
|
||||
offsetof(struct md_io_acct, bio_clone), 0);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acct_bioset_init);
|
||||
|
||||
void acct_bioset_exit(struct mddev *mddev)
|
||||
{
|
||||
bioset_exit(&mddev->io_acct_set);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acct_bioset_exit);
|
||||
|
||||
static void md_end_io_acct(struct bio *bio)
|
||||
{
|
||||
struct md_io_acct *md_io_acct = bio->bi_private;
|
||||
|
|
|
@ -721,6 +721,8 @@ extern void md_error(struct mddev *mddev, struct md_rdev *rdev);
|
|||
extern void md_finish_reshape(struct mddev *mddev);
|
||||
void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
|
||||
struct bio *bio, sector_t start, sector_t size);
|
||||
int acct_bioset_init(struct mddev *mddev);
|
||||
void acct_bioset_exit(struct mddev *mddev);
|
||||
void md_account_bio(struct mddev *mddev, struct bio **bio);
|
||||
|
||||
extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio);
|
||||
|
|
|
@ -356,7 +356,21 @@ static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks
|
|||
return array_sectors;
|
||||
}
|
||||
|
||||
static void raid0_free(struct mddev *mddev, void *priv);
|
||||
static void free_conf(struct mddev *mddev, struct r0conf *conf)
|
||||
{
|
||||
kfree(conf->strip_zone);
|
||||
kfree(conf->devlist);
|
||||
kfree(conf);
|
||||
mddev->private = NULL;
|
||||
}
|
||||
|
||||
static void raid0_free(struct mddev *mddev, void *priv)
|
||||
{
|
||||
struct r0conf *conf = priv;
|
||||
|
||||
free_conf(mddev, conf);
|
||||
acct_bioset_exit(mddev);
|
||||
}
|
||||
|
||||
static int raid0_run(struct mddev *mddev)
|
||||
{
|
||||
|
@ -370,11 +384,16 @@ static int raid0_run(struct mddev *mddev)
|
|||
if (md_check_no_bitmap(mddev))
|
||||
return -EINVAL;
|
||||
|
||||
if (acct_bioset_init(mddev)) {
|
||||
pr_err("md/raid0:%s: alloc acct bioset failed.\n", mdname(mddev));
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* if private is not null, we are here after takeover */
|
||||
if (mddev->private == NULL) {
|
||||
ret = create_strip_zones(mddev, &conf);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
goto exit_acct_set;
|
||||
mddev->private = conf;
|
||||
}
|
||||
conf = mddev->private;
|
||||
|
@ -413,17 +432,16 @@ static int raid0_run(struct mddev *mddev)
|
|||
dump_zones(mddev);
|
||||
|
||||
ret = md_integrity_register(mddev);
|
||||
if (ret)
|
||||
goto free;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void raid0_free(struct mddev *mddev, void *priv)
|
||||
{
|
||||
struct r0conf *conf = priv;
|
||||
|
||||
kfree(conf->strip_zone);
|
||||
kfree(conf->devlist);
|
||||
kfree(conf);
|
||||
free:
|
||||
free_conf(mddev, conf);
|
||||
exit_acct_set:
|
||||
acct_bioset_exit(mddev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
|
||||
|
|
|
@ -22,12 +22,6 @@
|
|||
|
||||
#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
|
||||
|
||||
/* When there are this many requests queue to be written by
|
||||
* the raid thread, we become 'congested' to provide back-pressure
|
||||
* for writeback.
|
||||
*/
|
||||
static int max_queued_requests = 1024;
|
||||
|
||||
/* for managing resync I/O pages */
|
||||
struct resync_pages {
|
||||
void *raid_bio;
|
||||
|
|
|
@ -929,8 +929,10 @@ static void lower_barrier(struct r1conf *conf, sector_t sector_nr)
|
|||
wake_up(&conf->wait_barrier);
|
||||
}
|
||||
|
||||
static void _wait_barrier(struct r1conf *conf, int idx)
|
||||
static bool _wait_barrier(struct r1conf *conf, int idx, bool nowait)
|
||||
{
|
||||
bool ret = true;
|
||||
|
||||
/*
|
||||
* We need to increase conf->nr_pending[idx] very early here,
|
||||
* then raise_barrier() can be blocked when it waits for
|
||||
|
@ -961,7 +963,7 @@ static void _wait_barrier(struct r1conf *conf, int idx)
|
|||
*/
|
||||
if (!READ_ONCE(conf->array_frozen) &&
|
||||
!atomic_read(&conf->barrier[idx]))
|
||||
return;
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* After holding conf->resync_lock, conf->nr_pending[idx]
|
||||
|
@ -979,18 +981,27 @@ static void _wait_barrier(struct r1conf *conf, int idx)
|
|||
*/
|
||||
wake_up(&conf->wait_barrier);
|
||||
/* Wait for the barrier in same barrier unit bucket to drop. */
|
||||
wait_event_lock_irq(conf->wait_barrier,
|
||||
!conf->array_frozen &&
|
||||
!atomic_read(&conf->barrier[idx]),
|
||||
conf->resync_lock);
|
||||
atomic_inc(&conf->nr_pending[idx]);
|
||||
|
||||
/* Return false when nowait flag is set */
|
||||
if (nowait) {
|
||||
ret = false;
|
||||
} else {
|
||||
wait_event_lock_irq(conf->wait_barrier,
|
||||
!conf->array_frozen &&
|
||||
!atomic_read(&conf->barrier[idx]),
|
||||
conf->resync_lock);
|
||||
atomic_inc(&conf->nr_pending[idx]);
|
||||
}
|
||||
|
||||
atomic_dec(&conf->nr_waiting[idx]);
|
||||
spin_unlock_irq(&conf->resync_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr)
|
||||
static bool wait_read_barrier(struct r1conf *conf, sector_t sector_nr, bool nowait)
|
||||
{
|
||||
int idx = sector_to_idx(sector_nr);
|
||||
bool ret = true;
|
||||
|
||||
/*
|
||||
* Very similar to _wait_barrier(). The difference is, for read
|
||||
|
@ -1002,7 +1013,7 @@ static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr)
|
|||
atomic_inc(&conf->nr_pending[idx]);
|
||||
|
||||
if (!READ_ONCE(conf->array_frozen))
|
||||
return;
|
||||
return ret;
|
||||
|
||||
spin_lock_irq(&conf->resync_lock);
|
||||
atomic_inc(&conf->nr_waiting[idx]);
|
||||
|
@ -1013,19 +1024,28 @@ static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr)
|
|||
*/
|
||||
wake_up(&conf->wait_barrier);
|
||||
/* Wait for array to be unfrozen */
|
||||
wait_event_lock_irq(conf->wait_barrier,
|
||||
!conf->array_frozen,
|
||||
conf->resync_lock);
|
||||
atomic_inc(&conf->nr_pending[idx]);
|
||||
|
||||
/* Return false when nowait flag is set */
|
||||
if (nowait) {
|
||||
/* Return false when nowait flag is set */
|
||||
ret = false;
|
||||
} else {
|
||||
wait_event_lock_irq(conf->wait_barrier,
|
||||
!conf->array_frozen,
|
||||
conf->resync_lock);
|
||||
atomic_inc(&conf->nr_pending[idx]);
|
||||
}
|
||||
|
||||
atomic_dec(&conf->nr_waiting[idx]);
|
||||
spin_unlock_irq(&conf->resync_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void wait_barrier(struct r1conf *conf, sector_t sector_nr)
|
||||
static bool wait_barrier(struct r1conf *conf, sector_t sector_nr, bool nowait)
|
||||
{
|
||||
int idx = sector_to_idx(sector_nr);
|
||||
|
||||
_wait_barrier(conf, idx);
|
||||
return _wait_barrier(conf, idx, nowait);
|
||||
}
|
||||
|
||||
static void _allow_barrier(struct r1conf *conf, int idx)
|
||||
|
@ -1236,7 +1256,11 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
|
|||
* Still need barrier for READ in case that whole
|
||||
* array is frozen.
|
||||
*/
|
||||
wait_read_barrier(conf, bio->bi_iter.bi_sector);
|
||||
if (!wait_read_barrier(conf, bio->bi_iter.bi_sector,
|
||||
bio->bi_opf & REQ_NOWAIT)) {
|
||||
bio_wouldblock_error(bio);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!r1_bio)
|
||||
r1_bio = alloc_r1bio(mddev, bio);
|
||||
|
@ -1336,6 +1360,10 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
|
|||
bio->bi_iter.bi_sector, bio_end_sector(bio))) {
|
||||
|
||||
DEFINE_WAIT(w);
|
||||
if (bio->bi_opf & REQ_NOWAIT) {
|
||||
bio_wouldblock_error(bio);
|
||||
return;
|
||||
}
|
||||
for (;;) {
|
||||
prepare_to_wait(&conf->wait_barrier,
|
||||
&w, TASK_IDLE);
|
||||
|
@ -1353,17 +1381,15 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
|
|||
* thread has put up a bar for new requests.
|
||||
* Continue immediately if no resync is active currently.
|
||||
*/
|
||||
wait_barrier(conf, bio->bi_iter.bi_sector);
|
||||
if (!wait_barrier(conf, bio->bi_iter.bi_sector,
|
||||
bio->bi_opf & REQ_NOWAIT)) {
|
||||
bio_wouldblock_error(bio);
|
||||
return;
|
||||
}
|
||||
|
||||
r1_bio = alloc_r1bio(mddev, bio);
|
||||
r1_bio->sectors = max_write_sectors;
|
||||
|
||||
if (conf->pending_count >= max_queued_requests) {
|
||||
md_wakeup_thread(mddev->thread);
|
||||
raid1_log(mddev, "wait queued");
|
||||
wait_event(conf->wait_barrier,
|
||||
conf->pending_count < max_queued_requests);
|
||||
}
|
||||
/* first select target devices under rcu_lock and
|
||||
* inc refcount on their rdev. Record them by setting
|
||||
* bios[x] to bio
|
||||
|
@ -1458,9 +1484,14 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
|
|||
rdev_dec_pending(conf->mirrors[j].rdev, mddev);
|
||||
r1_bio->state = 0;
|
||||
allow_barrier(conf, bio->bi_iter.bi_sector);
|
||||
|
||||
if (bio->bi_opf & REQ_NOWAIT) {
|
||||
bio_wouldblock_error(bio);
|
||||
return;
|
||||
}
|
||||
raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
|
||||
md_wait_for_blocked_rdev(blocked_rdev, mddev);
|
||||
wait_barrier(conf, bio->bi_iter.bi_sector);
|
||||
wait_barrier(conf, bio->bi_iter.bi_sector, false);
|
||||
goto retry_write;
|
||||
}
|
||||
|
||||
|
@ -1688,7 +1719,7 @@ static void close_sync(struct r1conf *conf)
|
|||
int idx;
|
||||
|
||||
for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) {
|
||||
_wait_barrier(conf, idx);
|
||||
_wait_barrier(conf, idx, false);
|
||||
_allow_barrier(conf, idx);
|
||||
}
|
||||
|
||||
|
@ -3410,5 +3441,3 @@ MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
|
|||
MODULE_ALIAS("md-personality-3"); /* RAID1 */
|
||||
MODULE_ALIAS("md-raid1");
|
||||
MODULE_ALIAS("md-level-1");
|
||||
|
||||
module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);
|
||||
|
|
|
@ -952,8 +952,10 @@ static void lower_barrier(struct r10conf *conf)
|
|||
wake_up(&conf->wait_barrier);
|
||||
}
|
||||
|
||||
static void wait_barrier(struct r10conf *conf)
|
||||
static bool wait_barrier(struct r10conf *conf, bool nowait)
|
||||
{
|
||||
bool ret = true;
|
||||
|
||||
spin_lock_irq(&conf->resync_lock);
|
||||
if (conf->barrier) {
|
||||
struct bio_list *bio_list = current->bio_list;
|
||||
|
@ -967,27 +969,35 @@ static void wait_barrier(struct r10conf *conf)
|
|||
* that queue to get the nr_pending
|
||||
* count down.
|
||||
*/
|
||||
raid10_log(conf->mddev, "wait barrier");
|
||||
wait_event_lock_irq(conf->wait_barrier,
|
||||
!conf->barrier ||
|
||||
(atomic_read(&conf->nr_pending) &&
|
||||
bio_list &&
|
||||
(!bio_list_empty(&bio_list[0]) ||
|
||||
!bio_list_empty(&bio_list[1]))) ||
|
||||
/* move on if recovery thread is
|
||||
* blocked by us
|
||||
*/
|
||||
(conf->mddev->thread->tsk == current &&
|
||||
test_bit(MD_RECOVERY_RUNNING,
|
||||
&conf->mddev->recovery) &&
|
||||
conf->nr_queued > 0),
|
||||
conf->resync_lock);
|
||||
/* Return false when nowait flag is set */
|
||||
if (nowait) {
|
||||
ret = false;
|
||||
} else {
|
||||
raid10_log(conf->mddev, "wait barrier");
|
||||
wait_event_lock_irq(conf->wait_barrier,
|
||||
!conf->barrier ||
|
||||
(atomic_read(&conf->nr_pending) &&
|
||||
bio_list &&
|
||||
(!bio_list_empty(&bio_list[0]) ||
|
||||
!bio_list_empty(&bio_list[1]))) ||
|
||||
/* move on if recovery thread is
|
||||
* blocked by us
|
||||
*/
|
||||
(conf->mddev->thread->tsk == current &&
|
||||
test_bit(MD_RECOVERY_RUNNING,
|
||||
&conf->mddev->recovery) &&
|
||||
conf->nr_queued > 0),
|
||||
conf->resync_lock);
|
||||
}
|
||||
conf->nr_waiting--;
|
||||
if (!conf->nr_waiting)
|
||||
wake_up(&conf->wait_barrier);
|
||||
}
|
||||
atomic_inc(&conf->nr_pending);
|
||||
/* Only increment nr_pending when we wait */
|
||||
if (ret)
|
||||
atomic_inc(&conf->nr_pending);
|
||||
spin_unlock_irq(&conf->resync_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void allow_barrier(struct r10conf *conf)
|
||||
|
@ -1098,21 +1108,30 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
|
|||
* currently.
|
||||
* 2. If IO spans the reshape position. Need to wait for reshape to pass.
|
||||
*/
|
||||
static void regular_request_wait(struct mddev *mddev, struct r10conf *conf,
|
||||
static bool regular_request_wait(struct mddev *mddev, struct r10conf *conf,
|
||||
struct bio *bio, sector_t sectors)
|
||||
{
|
||||
wait_barrier(conf);
|
||||
/* Bail out if REQ_NOWAIT is set for the bio */
|
||||
if (!wait_barrier(conf, bio->bi_opf & REQ_NOWAIT)) {
|
||||
bio_wouldblock_error(bio);
|
||||
return false;
|
||||
}
|
||||
while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
|
||||
bio->bi_iter.bi_sector < conf->reshape_progress &&
|
||||
bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
|
||||
raid10_log(conf->mddev, "wait reshape");
|
||||
allow_barrier(conf);
|
||||
if (bio->bi_opf & REQ_NOWAIT) {
|
||||
bio_wouldblock_error(bio);
|
||||
return false;
|
||||
}
|
||||
raid10_log(conf->mddev, "wait reshape");
|
||||
wait_event(conf->wait_barrier,
|
||||
conf->reshape_progress <= bio->bi_iter.bi_sector ||
|
||||
conf->reshape_progress >= bio->bi_iter.bi_sector +
|
||||
sectors);
|
||||
wait_barrier(conf);
|
||||
wait_barrier(conf, false);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static void raid10_read_request(struct mddev *mddev, struct bio *bio,
|
||||
|
@ -1157,7 +1176,8 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
|
|||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
regular_request_wait(mddev, conf, bio, r10_bio->sectors);
|
||||
if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors))
|
||||
return;
|
||||
rdev = read_balance(conf, r10_bio, &max_sectors);
|
||||
if (!rdev) {
|
||||
if (err_rdev) {
|
||||
|
@ -1179,7 +1199,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
|
|||
bio_chain(split, bio);
|
||||
allow_barrier(conf);
|
||||
submit_bio_noacct(bio);
|
||||
wait_barrier(conf);
|
||||
wait_barrier(conf, false);
|
||||
bio = split;
|
||||
r10_bio->master_bio = bio;
|
||||
r10_bio->sectors = max_sectors;
|
||||
|
@ -1338,7 +1358,7 @@ retry_wait:
|
|||
raid10_log(conf->mddev, "%s wait rdev %d blocked",
|
||||
__func__, blocked_rdev->raid_disk);
|
||||
md_wait_for_blocked_rdev(blocked_rdev, mddev);
|
||||
wait_barrier(conf);
|
||||
wait_barrier(conf, false);
|
||||
goto retry_wait;
|
||||
}
|
||||
}
|
||||
|
@ -1356,6 +1376,11 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
|
|||
bio->bi_iter.bi_sector,
|
||||
bio_end_sector(bio)))) {
|
||||
DEFINE_WAIT(w);
|
||||
/* Bail out if REQ_NOWAIT is set for the bio */
|
||||
if (bio->bi_opf & REQ_NOWAIT) {
|
||||
bio_wouldblock_error(bio);
|
||||
return;
|
||||
}
|
||||
for (;;) {
|
||||
prepare_to_wait(&conf->wait_barrier,
|
||||
&w, TASK_IDLE);
|
||||
|
@ -1368,7 +1393,8 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
|
|||
}
|
||||
|
||||
sectors = r10_bio->sectors;
|
||||
regular_request_wait(mddev, conf, bio, sectors);
|
||||
if (!regular_request_wait(mddev, conf, bio, sectors))
|
||||
return;
|
||||
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
|
||||
(mddev->reshape_backwards
|
||||
? (bio->bi_iter.bi_sector < conf->reshape_safe &&
|
||||
|
@ -1380,6 +1406,11 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
|
|||
set_mask_bits(&mddev->sb_flags, 0,
|
||||
BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
|
||||
md_wakeup_thread(mddev->thread);
|
||||
if (bio->bi_opf & REQ_NOWAIT) {
|
||||
allow_barrier(conf);
|
||||
bio_wouldblock_error(bio);
|
||||
return;
|
||||
}
|
||||
raid10_log(conf->mddev, "wait reshape metadata");
|
||||
wait_event(mddev->sb_wait,
|
||||
!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
|
||||
|
@ -1387,12 +1418,6 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
|
|||
conf->reshape_safe = mddev->reshape_position;
|
||||
}
|
||||
|
||||
if (conf->pending_count >= max_queued_requests) {
|
||||
md_wakeup_thread(mddev->thread);
|
||||
raid10_log(mddev, "wait queued");
|
||||
wait_event(conf->wait_barrier,
|
||||
conf->pending_count < max_queued_requests);
|
||||
}
|
||||
/* first select target devices under rcu_lock and
|
||||
* inc refcount on their rdev. Record them by setting
|
||||
* bios[x] to bio
|
||||
|
@ -1482,7 +1507,7 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
|
|||
bio_chain(split, bio);
|
||||
allow_barrier(conf);
|
||||
submit_bio_noacct(bio);
|
||||
wait_barrier(conf);
|
||||
wait_barrier(conf, false);
|
||||
bio = split;
|
||||
r10_bio->master_bio = bio;
|
||||
}
|
||||
|
@ -1607,7 +1632,11 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
|
|||
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
|
||||
return -EAGAIN;
|
||||
|
||||
wait_barrier(conf);
|
||||
if (WARN_ON_ONCE(bio->bi_opf & REQ_NOWAIT)) {
|
||||
bio_wouldblock_error(bio);
|
||||
return 0;
|
||||
}
|
||||
wait_barrier(conf, false);
|
||||
|
||||
/*
|
||||
* Check reshape again to avoid reshape happens after checking
|
||||
|
@ -1649,7 +1678,7 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
|
|||
allow_barrier(conf);
|
||||
/* Resend the fist split part */
|
||||
submit_bio_noacct(split);
|
||||
wait_barrier(conf);
|
||||
wait_barrier(conf, false);
|
||||
}
|
||||
div_u64_rem(bio_end, stripe_size, &remainder);
|
||||
if (remainder) {
|
||||
|
@ -1660,7 +1689,7 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
|
|||
/* Resend the second split part */
|
||||
submit_bio_noacct(bio);
|
||||
bio = split;
|
||||
wait_barrier(conf);
|
||||
wait_barrier(conf, false);
|
||||
}
|
||||
|
||||
bio_start = bio->bi_iter.bi_sector;
|
||||
|
@ -1816,7 +1845,7 @@ retry_discard:
|
|||
end_disk_offset += geo->stride;
|
||||
atomic_inc(&first_r10bio->remaining);
|
||||
raid_end_discard_bio(r10_bio);
|
||||
wait_barrier(conf);
|
||||
wait_barrier(conf, false);
|
||||
goto retry_discard;
|
||||
}
|
||||
|
||||
|
@ -2011,7 +2040,7 @@ static void print_conf(struct r10conf *conf)
|
|||
|
||||
static void close_sync(struct r10conf *conf)
|
||||
{
|
||||
wait_barrier(conf);
|
||||
wait_barrier(conf, false);
|
||||
allow_barrier(conf);
|
||||
|
||||
mempool_exit(&conf->r10buf_pool);
|
||||
|
@ -4819,7 +4848,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
|
|||
if (need_flush ||
|
||||
time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
|
||||
/* Need to update reshape_position in metadata */
|
||||
wait_barrier(conf);
|
||||
wait_barrier(conf, false);
|
||||
mddev->reshape_position = conf->reshape_progress;
|
||||
if (mddev->reshape_backwards)
|
||||
mddev->curr_resync_completed = raid10_size(mddev, 0, 0)
|
||||
|
@ -5242,5 +5271,3 @@ MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
|
|||
MODULE_ALIAS("md-personality-9"); /* RAID10 */
|
||||
MODULE_ALIAS("md-raid10");
|
||||
MODULE_ALIAS("md-level-10");
|
||||
|
||||
module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);
|
||||
|
|
|
@ -2215,10 +2215,9 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
|
|||
struct r5conf *conf = sh->raid_conf;
|
||||
int level = conf->level;
|
||||
struct raid5_percpu *percpu;
|
||||
unsigned long cpu;
|
||||
|
||||
cpu = get_cpu();
|
||||
percpu = per_cpu_ptr(conf->percpu, cpu);
|
||||
local_lock(&conf->percpu->lock);
|
||||
percpu = this_cpu_ptr(conf->percpu);
|
||||
if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
|
||||
ops_run_biofill(sh);
|
||||
overlap_clear++;
|
||||
|
@ -2271,13 +2270,14 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
|
|||
BUG();
|
||||
}
|
||||
|
||||
if (overlap_clear && !sh->batch_head)
|
||||
if (overlap_clear && !sh->batch_head) {
|
||||
for (i = disks; i--; ) {
|
||||
struct r5dev *dev = &sh->dev[i];
|
||||
if (test_and_clear_bit(R5_Overlap, &dev->flags))
|
||||
wake_up(&sh->raid_conf->wait_for_overlap);
|
||||
}
|
||||
put_cpu();
|
||||
}
|
||||
local_unlock(&conf->percpu->lock);
|
||||
}
|
||||
|
||||
static void free_stripe(struct kmem_cache *sc, struct stripe_head *sh)
|
||||
|
@ -5686,6 +5686,10 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
|
|||
struct stripe_head *sh;
|
||||
int stripe_sectors;
|
||||
|
||||
/* We need to handle this when io_uring supports discard/trim */
|
||||
if (WARN_ON_ONCE(bi->bi_opf & REQ_NOWAIT))
|
||||
return;
|
||||
|
||||
if (mddev->reshape_position != MaxSector)
|
||||
/* Skip discard while reshape is happening */
|
||||
return;
|
||||
|
@ -5819,6 +5823,17 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
|
|||
last_sector = bio_end_sector(bi);
|
||||
bi->bi_next = NULL;
|
||||
|
||||
/* Bail out if conflicts with reshape and REQ_NOWAIT is set */
|
||||
if ((bi->bi_opf & REQ_NOWAIT) &&
|
||||
(conf->reshape_progress != MaxSector) &&
|
||||
(mddev->reshape_backwards
|
||||
? (logical_sector > conf->reshape_progress && logical_sector <= conf->reshape_safe)
|
||||
: (logical_sector >= conf->reshape_safe && logical_sector < conf->reshape_progress))) {
|
||||
bio_wouldblock_error(bi);
|
||||
if (rw == WRITE)
|
||||
md_write_end(mddev);
|
||||
return true;
|
||||
}
|
||||
md_account_bio(mddev, &bi);
|
||||
prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
|
||||
for (; logical_sector < last_sector; logical_sector += RAID5_STRIPE_SECTORS(conf)) {
|
||||
|
@ -7052,6 +7067,7 @@ static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
local_lock_init(&percpu->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -7446,12 +7462,19 @@ static int raid5_run(struct mddev *mddev)
|
|||
struct md_rdev *rdev;
|
||||
struct md_rdev *journal_dev = NULL;
|
||||
sector_t reshape_offset = 0;
|
||||
int i;
|
||||
int i, ret = 0;
|
||||
long long min_offset_diff = 0;
|
||||
int first = 1;
|
||||
|
||||
if (mddev_init_writes_pending(mddev) < 0)
|
||||
if (acct_bioset_init(mddev)) {
|
||||
pr_err("md/raid456:%s: alloc acct bioset failed.\n", mdname(mddev));
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (mddev_init_writes_pending(mddev) < 0) {
|
||||
ret = -ENOMEM;
|
||||
goto exit_acct_set;
|
||||
}
|
||||
|
||||
if (mddev->recovery_cp != MaxSector)
|
||||
pr_notice("md/raid:%s: not clean -- starting background reconstruction\n",
|
||||
|
@ -7482,7 +7505,8 @@ static int raid5_run(struct mddev *mddev)
|
|||
(mddev->bitmap_info.offset || mddev->bitmap_info.file)) {
|
||||
pr_notice("md/raid:%s: array cannot have both journal and bitmap\n",
|
||||
mdname(mddev));
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto exit_acct_set;
|
||||
}
|
||||
|
||||
if (mddev->reshape_position != MaxSector) {
|
||||
|
@ -7507,13 +7531,15 @@ static int raid5_run(struct mddev *mddev)
|
|||
if (journal_dev) {
|
||||
pr_warn("md/raid:%s: don't support reshape with journal - aborting.\n",
|
||||
mdname(mddev));
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto exit_acct_set;
|
||||
}
|
||||
|
||||
if (mddev->new_level != mddev->level) {
|
||||
pr_warn("md/raid:%s: unsupported reshape required - aborting.\n",
|
||||
mdname(mddev));
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto exit_acct_set;
|
||||
}
|
||||
old_disks = mddev->raid_disks - mddev->delta_disks;
|
||||
/* reshape_position must be on a new-stripe boundary, and one
|
||||
|
@ -7529,7 +7555,8 @@ static int raid5_run(struct mddev *mddev)
|
|||
if (sector_div(here_new, chunk_sectors * new_data_disks)) {
|
||||
pr_warn("md/raid:%s: reshape_position not on a stripe boundary\n",
|
||||
mdname(mddev));
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto exit_acct_set;
|
||||
}
|
||||
reshape_offset = here_new * chunk_sectors;
|
||||
/* here_new is the stripe we will write to */
|
||||
|
@ -7551,7 +7578,8 @@ static int raid5_run(struct mddev *mddev)
|
|||
else if (mddev->ro == 0) {
|
||||
pr_warn("md/raid:%s: in-place reshape must be started in read-only mode - aborting\n",
|
||||
mdname(mddev));
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto exit_acct_set;
|
||||
}
|
||||
} else if (mddev->reshape_backwards
|
||||
? (here_new * chunk_sectors + min_offset_diff <=
|
||||
|
@ -7561,7 +7589,8 @@ static int raid5_run(struct mddev *mddev)
|
|||
/* Reading from the same stripe as writing to - bad */
|
||||
pr_warn("md/raid:%s: reshape_position too early for auto-recovery - aborting.\n",
|
||||
mdname(mddev));
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto exit_acct_set;
|
||||
}
|
||||
pr_debug("md/raid:%s: reshape will continue\n", mdname(mddev));
|
||||
/* OK, we should be able to continue; */
|
||||
|
@ -7585,8 +7614,10 @@ static int raid5_run(struct mddev *mddev)
|
|||
else
|
||||
conf = mddev->private;
|
||||
|
||||
if (IS_ERR(conf))
|
||||
return PTR_ERR(conf);
|
||||
if (IS_ERR(conf)) {
|
||||
ret = PTR_ERR(conf);
|
||||
goto exit_acct_set;
|
||||
}
|
||||
|
||||
if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
|
||||
if (!journal_dev) {
|
||||
|
@ -7783,7 +7814,10 @@ abort:
|
|||
free_conf(conf);
|
||||
mddev->private = NULL;
|
||||
pr_warn("md/raid:%s: failed to run raid set.\n", mdname(mddev));
|
||||
return -EIO;
|
||||
ret = -EIO;
|
||||
exit_acct_set:
|
||||
acct_bioset_exit(mddev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void raid5_free(struct mddev *mddev, void *priv)
|
||||
|
@ -7791,6 +7825,7 @@ static void raid5_free(struct mddev *mddev, void *priv)
|
|||
struct r5conf *conf = priv;
|
||||
|
||||
free_conf(conf);
|
||||
acct_bioset_exit(mddev);
|
||||
mddev->to_remove = &raid5_attrs_group;
|
||||
}
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
|
||||
#include <linux/raid/xor.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/local_lock.h>
|
||||
|
||||
/*
|
||||
*
|
||||
|
@ -640,7 +641,8 @@ struct r5conf {
|
|||
* lists and performing address
|
||||
* conversions
|
||||
*/
|
||||
int scribble_obj_size;
|
||||
int scribble_obj_size;
|
||||
local_lock_t lock;
|
||||
} __percpu *percpu;
|
||||
int scribble_disks;
|
||||
int scribble_sectors;
|
||||
|
|
|
@ -991,7 +991,6 @@ EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
|
|||
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
|
||||
{
|
||||
struct nvme_command *cmd = nvme_req(req)->cmd;
|
||||
struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
|
||||
blk_status_t ret = BLK_STS_OK;
|
||||
|
||||
if (!(req->rq_flags & RQF_DONTPREP))
|
||||
|
@ -1038,8 +1037,6 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
|
|||
return BLK_STS_IOERR;
|
||||
}
|
||||
|
||||
if (!(ctrl->quirks & NVME_QUIRK_SKIP_CID_GEN))
|
||||
nvme_req(req)->genctr++;
|
||||
cmd->common.command_id = nvme_cid(req);
|
||||
trace_nvme_setup_cmd(req, cmd);
|
||||
return ret;
|
||||
|
@ -2762,9 +2759,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
|
|||
return -EINVAL;
|
||||
}
|
||||
subsys->awupf = le16_to_cpu(id->awupf);
|
||||
#ifdef CONFIG_NVME_MULTIPATH
|
||||
subsys->iopolicy = NVME_IOPOLICY_NUMA;
|
||||
#endif
|
||||
nvme_mpath_default_iopolicy(subsys);
|
||||
|
||||
subsys->dev.class = nvme_subsys_class;
|
||||
subsys->dev.release = nvme_release_subsystem;
|
||||
|
|
|
@ -1069,6 +1069,26 @@ out_unlock:
|
|||
return ret ? ret : count;
|
||||
}
|
||||
|
||||
static void __nvmf_concat_opt_tokens(struct seq_file *seq_file)
|
||||
{
|
||||
const struct match_token *tok;
|
||||
int idx;
|
||||
|
||||
/*
|
||||
* Add dummy entries for instance and cntlid to
|
||||
* signal an invalid/non-existing controller
|
||||
*/
|
||||
seq_puts(seq_file, "instance=-1,cntlid=-1");
|
||||
for (idx = 0; idx < ARRAY_SIZE(opt_tokens); idx++) {
|
||||
tok = &opt_tokens[idx];
|
||||
if (tok->token == NVMF_OPT_ERR)
|
||||
continue;
|
||||
seq_puts(seq_file, ",");
|
||||
seq_puts(seq_file, tok->pattern);
|
||||
}
|
||||
seq_puts(seq_file, "\n");
|
||||
}
|
||||
|
||||
static int nvmf_dev_show(struct seq_file *seq_file, void *private)
|
||||
{
|
||||
struct nvme_ctrl *ctrl;
|
||||
|
@ -1077,7 +1097,7 @@ static int nvmf_dev_show(struct seq_file *seq_file, void *private)
|
|||
mutex_lock(&nvmf_dev_mutex);
|
||||
ctrl = seq_file->private;
|
||||
if (!ctrl) {
|
||||
ret = -EINVAL;
|
||||
__nvmf_concat_opt_tokens(seq_file);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
|
|
|
@ -13,6 +13,42 @@ module_param(multipath, bool, 0444);
|
|||
MODULE_PARM_DESC(multipath,
|
||||
"turn on native support for multiple controllers per subsystem");
|
||||
|
||||
static const char *nvme_iopolicy_names[] = {
|
||||
[NVME_IOPOLICY_NUMA] = "numa",
|
||||
[NVME_IOPOLICY_RR] = "round-robin",
|
||||
};
|
||||
|
||||
static int iopolicy = NVME_IOPOLICY_NUMA;
|
||||
|
||||
static int nvme_set_iopolicy(const char *val, const struct kernel_param *kp)
|
||||
{
|
||||
if (!val)
|
||||
return -EINVAL;
|
||||
if (!strncmp(val, "numa", 4))
|
||||
iopolicy = NVME_IOPOLICY_NUMA;
|
||||
else if (!strncmp(val, "round-robin", 11))
|
||||
iopolicy = NVME_IOPOLICY_RR;
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nvme_get_iopolicy(char *buf, const struct kernel_param *kp)
|
||||
{
|
||||
return sprintf(buf, "%s\n", nvme_iopolicy_names[iopolicy]);
|
||||
}
|
||||
|
||||
module_param_call(iopolicy, nvme_set_iopolicy, nvme_get_iopolicy,
|
||||
&iopolicy, 0644);
|
||||
MODULE_PARM_DESC(iopolicy,
|
||||
"Default multipath I/O policy; 'numa' (default) or 'round-robin'");
|
||||
|
||||
void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys)
|
||||
{
|
||||
subsys->iopolicy = iopolicy;
|
||||
}
|
||||
|
||||
void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
|
||||
{
|
||||
struct nvme_ns_head *h;
|
||||
|
@ -706,11 +742,6 @@ void nvme_mpath_stop(struct nvme_ctrl *ctrl)
|
|||
struct device_attribute subsys_attr_##_name = \
|
||||
__ATTR(_name, _mode, _show, _store)
|
||||
|
||||
static const char *nvme_iopolicy_names[] = {
|
||||
[NVME_IOPOLICY_NUMA] = "numa",
|
||||
[NVME_IOPOLICY_RR] = "round-robin",
|
||||
};
|
||||
|
||||
static ssize_t nvme_subsys_iopolicy_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
|
|
|
@ -614,6 +614,10 @@ static inline bool nvme_try_complete_req(struct request *req, __le16 status,
|
|||
union nvme_result result)
|
||||
{
|
||||
struct nvme_request *rq = nvme_req(req);
|
||||
struct nvme_ctrl *ctrl = rq->ctrl;
|
||||
|
||||
if (!(ctrl->quirks & NVME_QUIRK_SKIP_CID_GEN))
|
||||
rq->genctr++;
|
||||
|
||||
rq->status = le16_to_cpu(status) >> 1;
|
||||
rq->result = result;
|
||||
|
@ -763,6 +767,7 @@ static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
|
|||
void nvme_mpath_unfreeze(struct nvme_subsystem *subsys);
|
||||
void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
|
||||
void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
|
||||
void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys);
|
||||
bool nvme_mpath_set_disk_name(struct nvme_ns *ns, char *disk_name, int *flags);
|
||||
void nvme_failover_req(struct request *req);
|
||||
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
|
||||
|
@ -860,6 +865,9 @@ static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
|
|||
static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
|
||||
{
|
||||
}
|
||||
static inline void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_NVME_MULTIPATH */
|
||||
|
||||
int nvme_revalidate_zones(struct nvme_ns *ns);
|
||||
|
|
|
@ -152,14 +152,6 @@ struct packet_stacked_data
|
|||
};
|
||||
#define PSD_POOL_SIZE 64
|
||||
|
||||
struct pktcdvd_kobj
|
||||
{
|
||||
struct kobject kobj;
|
||||
struct pktcdvd_device *pd;
|
||||
};
|
||||
#define to_pktcdvdkobj(_k) \
|
||||
((struct pktcdvd_kobj*)container_of(_k,struct pktcdvd_kobj,kobj))
|
||||
|
||||
struct pktcdvd_device
|
||||
{
|
||||
struct block_device *bdev; /* dev attached */
|
||||
|
@ -183,6 +175,8 @@ struct pktcdvd_device
|
|||
spinlock_t lock; /* Serialize access to bio_queue */
|
||||
struct rb_root bio_queue; /* Work queue of bios we need to handle */
|
||||
int bio_queue_size; /* Number of nodes in bio_queue */
|
||||
bool congested; /* Someone is waiting for bio_queue_size
|
||||
* to drop. */
|
||||
sector_t current_sector; /* Keep track of where the elevator is */
|
||||
atomic_t scan_queue; /* Set to non-zero when pkt_handle_queue */
|
||||
/* needs to be run. */
|
||||
|
@ -195,8 +189,6 @@ struct pktcdvd_device
|
|||
int write_congestion_on;
|
||||
|
||||
struct device *dev; /* sysfs pktcdvd[0-7] dev */
|
||||
struct pktcdvd_kobj *kobj_stat; /* sysfs pktcdvd[0-7]/stat/ */
|
||||
struct pktcdvd_kobj *kobj_wqueue; /* sysfs pktcdvd[0-7]/write_queue/ */
|
||||
|
||||
struct dentry *dfs_d_root; /* debugfs: devname directory */
|
||||
struct dentry *dfs_f_info; /* debugfs: info file */
|
||||
|
|
|
@ -81,7 +81,7 @@ struct raid6_calls {
|
|||
void (*xor_syndrome)(int, int, int, size_t, void **);
|
||||
int (*valid)(void); /* Returns 1 if this routine set is usable */
|
||||
const char *name; /* Name of this routine set */
|
||||
int prefer; /* Has special performance attribute */
|
||||
int priority; /* Relative priority ranking if non-zero */
|
||||
};
|
||||
|
||||
/* Selected algorithm */
|
||||
|
|
|
@ -145,13 +145,13 @@ static inline const struct raid6_recov_calls *raid6_choose_recov(void)
|
|||
static inline const struct raid6_calls *raid6_choose_gen(
|
||||
void *(*const dptrs)[RAID6_TEST_DISKS], const int disks)
|
||||
{
|
||||
unsigned long perf, bestgenperf, bestxorperf, j0, j1;
|
||||
unsigned long perf, bestgenperf, j0, j1;
|
||||
int start = (disks>>1)-1, stop = disks-3; /* work on the second half of the disks */
|
||||
const struct raid6_calls *const *algo;
|
||||
const struct raid6_calls *best;
|
||||
|
||||
for (bestgenperf = 0, bestxorperf = 0, best = NULL, algo = raid6_algos; *algo; algo++) {
|
||||
if (!best || (*algo)->prefer >= best->prefer) {
|
||||
for (bestgenperf = 0, best = NULL, algo = raid6_algos; *algo; algo++) {
|
||||
if (!best || (*algo)->priority >= best->priority) {
|
||||
if ((*algo)->valid && !(*algo)->valid())
|
||||
continue;
|
||||
|
||||
|
@ -180,50 +180,48 @@ static inline const struct raid6_calls *raid6_choose_gen(
|
|||
pr_info("raid6: %-8s gen() %5ld MB/s\n", (*algo)->name,
|
||||
(perf * HZ * (disks-2)) >>
|
||||
(20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2));
|
||||
|
||||
if (!(*algo)->xor_syndrome)
|
||||
continue;
|
||||
|
||||
perf = 0;
|
||||
|
||||
preempt_disable();
|
||||
j0 = jiffies;
|
||||
while ((j1 = jiffies) == j0)
|
||||
cpu_relax();
|
||||
while (time_before(jiffies,
|
||||
j1 + (1<<RAID6_TIME_JIFFIES_LG2))) {
|
||||
(*algo)->xor_syndrome(disks, start, stop,
|
||||
PAGE_SIZE, *dptrs);
|
||||
perf++;
|
||||
}
|
||||
preempt_enable();
|
||||
|
||||
if (best == *algo)
|
||||
bestxorperf = perf;
|
||||
|
||||
pr_info("raid6: %-8s xor() %5ld MB/s\n", (*algo)->name,
|
||||
(perf * HZ * (disks-2)) >>
|
||||
(20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2 + 1));
|
||||
}
|
||||
}
|
||||
|
||||
if (best) {
|
||||
if (IS_ENABLED(CONFIG_RAID6_PQ_BENCHMARK)) {
|
||||
pr_info("raid6: using algorithm %s gen() %ld MB/s\n",
|
||||
best->name,
|
||||
(bestgenperf * HZ * (disks-2)) >>
|
||||
(20 - PAGE_SHIFT+RAID6_TIME_JIFFIES_LG2));
|
||||
if (best->xor_syndrome)
|
||||
pr_info("raid6: .... xor() %ld MB/s, rmw enabled\n",
|
||||
(bestxorperf * HZ * (disks-2)) >>
|
||||
(20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2 + 1));
|
||||
} else
|
||||
pr_info("raid6: skip pq benchmark and using algorithm %s\n",
|
||||
best->name);
|
||||
raid6_call = *best;
|
||||
} else
|
||||
pr_err("raid6: Yikes! No algorithm found!\n");
|
||||
if (!best) {
|
||||
pr_err("raid6: Yikes! No algorithm found!\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
raid6_call = *best;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_RAID6_PQ_BENCHMARK)) {
|
||||
pr_info("raid6: skipped pq benchmark and selected %s\n",
|
||||
best->name);
|
||||
goto out;
|
||||
}
|
||||
|
||||
pr_info("raid6: using algorithm %s gen() %ld MB/s\n",
|
||||
best->name,
|
||||
(bestgenperf * HZ * (disks - 2)) >>
|
||||
(20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2));
|
||||
|
||||
if (best->xor_syndrome) {
|
||||
perf = 0;
|
||||
|
||||
preempt_disable();
|
||||
j0 = jiffies;
|
||||
while ((j1 = jiffies) == j0)
|
||||
cpu_relax();
|
||||
while (time_before(jiffies,
|
||||
j1 + (1 << RAID6_TIME_JIFFIES_LG2))) {
|
||||
best->xor_syndrome(disks, start, stop,
|
||||
PAGE_SIZE, *dptrs);
|
||||
perf++;
|
||||
}
|
||||
preempt_enable();
|
||||
|
||||
pr_info("raid6: .... xor() %ld MB/s, rmw enabled\n",
|
||||
(perf * HZ * (disks - 2)) >>
|
||||
(20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2 + 1));
|
||||
}
|
||||
|
||||
out:
|
||||
return best;
|
||||
}
|
||||
|
||||
|
|
|
@ -132,7 +132,7 @@ const struct raid6_calls raid6_avx2x1 = {
|
|||
raid6_avx21_xor_syndrome,
|
||||
raid6_have_avx2,
|
||||
"avx2x1",
|
||||
1 /* Has cache hints */
|
||||
.priority = 2 /* Prefer AVX2 over priority 1 (SSE2 and others) */
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -262,7 +262,7 @@ const struct raid6_calls raid6_avx2x2 = {
|
|||
raid6_avx22_xor_syndrome,
|
||||
raid6_have_avx2,
|
||||
"avx2x2",
|
||||
1 /* Has cache hints */
|
||||
.priority = 2 /* Prefer AVX2 over priority 1 (SSE2 and others) */
|
||||
};
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
@ -465,6 +465,6 @@ const struct raid6_calls raid6_avx2x4 = {
|
|||
raid6_avx24_xor_syndrome,
|
||||
raid6_have_avx2,
|
||||
"avx2x4",
|
||||
1 /* Has cache hints */
|
||||
.priority = 2 /* Prefer AVX2 over priority 1 (SSE2 and others) */
|
||||
};
|
||||
#endif
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
|
|
@ -162,7 +162,7 @@ const struct raid6_calls raid6_avx512x1 = {
|
|||
raid6_avx5121_xor_syndrome,
|
||||
raid6_have_avx512,
|
||||
"avx512x1",
|
||||
1 /* Has cache hints */
|
||||
.priority = 2 /* Prefer AVX512 over priority 1 (SSE2 and others) */
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -319,7 +319,7 @@ const struct raid6_calls raid6_avx512x2 = {
|
|||
raid6_avx5122_xor_syndrome,
|
||||
raid6_have_avx512,
|
||||
"avx512x2",
|
||||
1 /* Has cache hints */
|
||||
.priority = 2 /* Prefer AVX512 over priority 1 (SSE2 and others) */
|
||||
};
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
@ -557,7 +557,7 @@ const struct raid6_calls raid6_avx512x4 = {
|
|||
raid6_avx5124_xor_syndrome,
|
||||
raid6_have_avx512,
|
||||
"avx512x4",
|
||||
1 /* Has cache hints */
|
||||
.priority = 2 /* Prefer AVX512 over priority 1 (SSE2 and others) */
|
||||
};
|
||||
#endif
|
||||
|
||||
|
|
Loading…
Reference in New Issue