2017-08-18 04:12:47 +08:00
|
|
|
/*
|
|
|
|
* Driver for sTec s1120 PCIe SSDs. sTec was acquired in 2013 by HGST and HGST
|
|
|
|
* was acquired by Western Digital in 2012.
|
2013-10-16 04:19:07 +08:00
|
|
|
*
|
2017-08-18 04:12:47 +08:00
|
|
|
* Copyright 2012 sTec, Inc.
|
|
|
|
* Copyright (c) 2017 Western Digital Corporation or its affiliates.
|
|
|
|
*
|
|
|
|
* This file is part of the Linux kernel, and is made available under
|
|
|
|
* the terms of the GNU General Public License version 2.
|
2013-10-16 04:19:07 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/blkdev.h>
|
2017-08-18 04:13:26 +08:00
|
|
|
#include <linux/blk-mq.h>
|
2013-10-16 04:19:07 +08:00
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/compiler.h>
|
|
|
|
#include <linux/workqueue.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/time.h>
|
|
|
|
#include <linux/hdreg.h>
|
|
|
|
#include <linux/dma-mapping.h>
|
|
|
|
#include <linux/completion.h>
|
|
|
|
#include <linux/scatterlist.h>
|
|
|
|
#include <linux/version.h>
|
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/aer.h>
|
|
|
|
#include <linux/wait.h>
|
2017-08-18 04:13:01 +08:00
|
|
|
#include <linux/stringify.h>
|
2013-10-16 04:19:07 +08:00
|
|
|
#include <scsi/scsi.h>
|
|
|
|
#include <scsi/sg.h>
|
|
|
|
#include <linux/io.h>
|
|
|
|
#include <linux/uaccess.h>
|
2013-11-05 19:37:04 +08:00
|
|
|
#include <asm/unaligned.h>
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
#include "skd_s1120.h"
|
|
|
|
|
|
|
|
static int skd_dbg_level;
|
|
|
|
static int skd_isr_comp_limit = 4;
|
|
|
|
|
|
|
|
#define SKD_ASSERT(expr) \
|
|
|
|
do { \
|
|
|
|
if (unlikely(!(expr))) { \
|
|
|
|
pr_err("Assertion failed! %s,%s,%s,line=%d\n", \
|
|
|
|
# expr, __FILE__, __func__, __LINE__); \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define DRV_NAME "skd"
|
|
|
|
#define PFX DRV_NAME ": "
|
|
|
|
|
2017-08-18 04:12:47 +08:00
|
|
|
MODULE_LICENSE("GPL");
|
2013-10-16 04:19:07 +08:00
|
|
|
|
2017-08-18 04:13:38 +08:00
|
|
|
MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver");
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
#define PCI_VENDOR_ID_STEC 0x1B39
|
|
|
|
#define PCI_DEVICE_ID_S1120 0x0001
|
|
|
|
|
|
|
|
#define SKD_FUA_NV (1 << 1)
|
|
|
|
#define SKD_MINORS_PER_DEVICE 16
|
|
|
|
|
|
|
|
#define SKD_MAX_QUEUE_DEPTH 200u
|
|
|
|
|
|
|
|
#define SKD_PAUSE_TIMEOUT (5 * 1000)
|
|
|
|
|
|
|
|
#define SKD_N_FITMSG_BYTES (512u)
|
2017-08-18 04:13:01 +08:00
|
|
|
#define SKD_MAX_REQ_PER_MSG 14
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
#define SKD_N_SPECIAL_FITMSG_BYTES (128u)
|
|
|
|
|
|
|
|
/* SG elements are 32 bytes, so we can make this 4096 and still be under the
|
|
|
|
* 128KB limit. That allows 4096*4K = 16M xfer size
|
|
|
|
*/
|
|
|
|
#define SKD_N_SG_PER_REQ_DEFAULT 256u
|
|
|
|
|
|
|
|
#define SKD_N_COMPLETION_ENTRY 256u
|
|
|
|
#define SKD_N_READ_CAP_BYTES (8u)
|
|
|
|
|
|
|
|
#define SKD_N_INTERNAL_BYTES (512u)
|
|
|
|
|
2017-08-18 04:13:02 +08:00
|
|
|
#define SKD_SKCOMP_SIZE \
|
|
|
|
((sizeof(struct fit_completion_entry_v1) + \
|
|
|
|
sizeof(struct fit_comp_error_info)) * SKD_N_COMPLETION_ENTRY)
|
|
|
|
|
2013-10-16 04:19:07 +08:00
|
|
|
/* 5 bits of uniqifier, 0xF800 */
|
|
|
|
#define SKD_ID_TABLE_MASK (3u << 8u)
|
|
|
|
#define SKD_ID_RW_REQUEST (0u << 8u)
|
|
|
|
#define SKD_ID_INTERNAL (1u << 8u)
|
|
|
|
#define SKD_ID_FIT_MSG (3u << 8u)
|
|
|
|
#define SKD_ID_SLOT_MASK 0x00FFu
|
|
|
|
#define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
|
|
|
|
|
|
|
|
#define SKD_N_MAX_SECTORS 2048u
|
|
|
|
|
|
|
|
#define SKD_MAX_RETRIES 2u
|
|
|
|
|
|
|
|
#define SKD_TIMER_SECONDS(seconds) (seconds)
|
|
|
|
#define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
|
|
|
|
|
|
|
|
#define INQ_STD_NBYTES 36
|
|
|
|
|
|
|
|
enum skd_drvr_state {
|
|
|
|
SKD_DRVR_STATE_LOAD,
|
|
|
|
SKD_DRVR_STATE_IDLE,
|
|
|
|
SKD_DRVR_STATE_BUSY,
|
|
|
|
SKD_DRVR_STATE_STARTING,
|
|
|
|
SKD_DRVR_STATE_ONLINE,
|
|
|
|
SKD_DRVR_STATE_PAUSING,
|
|
|
|
SKD_DRVR_STATE_PAUSED,
|
|
|
|
SKD_DRVR_STATE_RESTARTING,
|
|
|
|
SKD_DRVR_STATE_RESUMING,
|
|
|
|
SKD_DRVR_STATE_STOPPING,
|
|
|
|
SKD_DRVR_STATE_FAULT,
|
|
|
|
SKD_DRVR_STATE_DISAPPEARED,
|
|
|
|
SKD_DRVR_STATE_PROTOCOL_MISMATCH,
|
|
|
|
SKD_DRVR_STATE_BUSY_ERASE,
|
|
|
|
SKD_DRVR_STATE_BUSY_SANITIZE,
|
|
|
|
SKD_DRVR_STATE_BUSY_IMMINENT,
|
|
|
|
SKD_DRVR_STATE_WAIT_BOOT,
|
|
|
|
SKD_DRVR_STATE_SYNCING,
|
|
|
|
};
|
|
|
|
|
|
|
|
#define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u)
|
|
|
|
#define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u)
|
|
|
|
#define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u)
|
|
|
|
#define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u)
|
|
|
|
#define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u)
|
|
|
|
#define SKD_START_WAIT_SECONDS 90u
|
|
|
|
|
|
|
|
enum skd_req_state {
|
|
|
|
SKD_REQ_STATE_IDLE,
|
|
|
|
SKD_REQ_STATE_SETUP,
|
|
|
|
SKD_REQ_STATE_BUSY,
|
|
|
|
SKD_REQ_STATE_COMPLETED,
|
|
|
|
SKD_REQ_STATE_TIMEOUT,
|
|
|
|
};
|
|
|
|
|
|
|
|
enum skd_check_status_action {
|
|
|
|
SKD_CHECK_STATUS_REPORT_GOOD,
|
|
|
|
SKD_CHECK_STATUS_REPORT_SMART_ALERT,
|
|
|
|
SKD_CHECK_STATUS_REQUEUE_REQUEST,
|
|
|
|
SKD_CHECK_STATUS_REPORT_ERROR,
|
|
|
|
SKD_CHECK_STATUS_BUSY_IMMINENT,
|
|
|
|
};
|
|
|
|
|
2017-08-18 04:13:07 +08:00
|
|
|
struct skd_msg_buf {
|
|
|
|
struct fit_msg_hdr fmh;
|
|
|
|
struct skd_scsi_request scsi[SKD_MAX_REQ_PER_MSG];
|
|
|
|
};
|
|
|
|
|
2013-10-16 04:19:07 +08:00
|
|
|
struct skd_fitmsg_context {
|
|
|
|
u32 id;
|
|
|
|
|
|
|
|
u32 length;
|
|
|
|
|
2017-08-18 04:13:07 +08:00
|
|
|
struct skd_msg_buf *msg_buf;
|
2013-10-16 04:19:07 +08:00
|
|
|
dma_addr_t mb_dma_address;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct skd_request_context {
|
|
|
|
enum skd_req_state state;
|
|
|
|
|
|
|
|
u16 id;
|
|
|
|
u32 fitmsg_id;
|
|
|
|
|
|
|
|
u8 flush_cmd;
|
|
|
|
|
2017-08-18 04:13:12 +08:00
|
|
|
enum dma_data_direction data_dir;
|
2013-10-16 04:19:07 +08:00
|
|
|
struct scatterlist *sg;
|
|
|
|
u32 n_sg;
|
|
|
|
u32 sg_byte_count;
|
|
|
|
|
|
|
|
struct fit_sg_descriptor *sksg_list;
|
|
|
|
dma_addr_t sksg_dma_address;
|
|
|
|
|
|
|
|
struct fit_completion_entry_v1 completion;
|
|
|
|
|
|
|
|
struct fit_comp_error_info err_info;
|
|
|
|
|
2017-08-24 01:56:32 +08:00
|
|
|
blk_status_t status;
|
2013-10-16 04:19:07 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct skd_special_context {
|
|
|
|
struct skd_request_context req;
|
|
|
|
|
|
|
|
void *data_buf;
|
|
|
|
dma_addr_t db_dma_address;
|
|
|
|
|
2017-08-18 04:13:07 +08:00
|
|
|
struct skd_msg_buf *msg_buf;
|
2013-10-16 04:19:07 +08:00
|
|
|
dma_addr_t mb_dma_address;
|
|
|
|
};
|
|
|
|
|
|
|
|
typedef enum skd_irq_type {
|
|
|
|
SKD_IRQ_LEGACY,
|
|
|
|
SKD_IRQ_MSI,
|
|
|
|
SKD_IRQ_MSIX
|
|
|
|
} skd_irq_type_t;
|
|
|
|
|
|
|
|
#define SKD_MAX_BARS 2
|
|
|
|
|
|
|
|
struct skd_device {
|
2017-08-18 04:13:17 +08:00
|
|
|
void __iomem *mem_map[SKD_MAX_BARS];
|
2013-10-16 04:19:07 +08:00
|
|
|
resource_size_t mem_phys[SKD_MAX_BARS];
|
|
|
|
u32 mem_size[SKD_MAX_BARS];
|
|
|
|
|
|
|
|
struct skd_msix_entry *msix_entries;
|
|
|
|
|
|
|
|
struct pci_dev *pdev;
|
|
|
|
int pcie_error_reporting_is_enabled;
|
|
|
|
|
|
|
|
spinlock_t lock;
|
|
|
|
struct gendisk *disk;
|
2017-08-18 04:13:32 +08:00
|
|
|
struct blk_mq_tag_set tag_set;
|
2013-10-16 04:19:07 +08:00
|
|
|
struct request_queue *queue;
|
2017-08-18 04:13:28 +08:00
|
|
|
struct skd_fitmsg_context *skmsg;
|
2013-10-16 04:19:07 +08:00
|
|
|
struct device *class_dev;
|
|
|
|
int gendisk_on;
|
|
|
|
int sync_done;
|
|
|
|
|
|
|
|
u32 devno;
|
|
|
|
u32 major;
|
|
|
|
char isr_name[30];
|
|
|
|
|
|
|
|
enum skd_drvr_state state;
|
|
|
|
u32 drive_state;
|
|
|
|
|
|
|
|
u32 cur_max_queue_depth;
|
|
|
|
u32 queue_low_water_mark;
|
|
|
|
u32 dev_max_queue_depth;
|
|
|
|
|
|
|
|
u32 num_fitmsg_context;
|
|
|
|
u32 num_req_context;
|
|
|
|
|
|
|
|
struct skd_fitmsg_context *skmsg_table;
|
|
|
|
|
|
|
|
struct skd_special_context internal_skspcl;
|
|
|
|
u32 read_cap_blocksize;
|
|
|
|
u32 read_cap_last_lba;
|
|
|
|
int read_cap_is_valid;
|
|
|
|
int inquiry_is_valid;
|
|
|
|
u8 inq_serial_num[13]; /*12 chars plus null term */
|
|
|
|
|
|
|
|
u8 skcomp_cycle;
|
|
|
|
u32 skcomp_ix;
|
2017-08-18 04:13:35 +08:00
|
|
|
struct kmem_cache *msgbuf_cache;
|
|
|
|
struct kmem_cache *sglist_cache;
|
|
|
|
struct kmem_cache *databuf_cache;
|
2013-10-16 04:19:07 +08:00
|
|
|
struct fit_completion_entry_v1 *skcomp_table;
|
|
|
|
struct fit_comp_error_info *skerr_table;
|
|
|
|
dma_addr_t cq_dma_address;
|
|
|
|
|
|
|
|
wait_queue_head_t waitq;
|
|
|
|
|
|
|
|
struct timer_list timer;
|
|
|
|
u32 timer_countdown;
|
|
|
|
u32 timer_substate;
|
|
|
|
|
|
|
|
int sgs_per_request;
|
|
|
|
u32 last_mtd;
|
|
|
|
|
|
|
|
u32 proto_ver;
|
|
|
|
|
|
|
|
int dbg_level;
|
|
|
|
u32 connect_time_stamp;
|
|
|
|
int connect_retries;
|
|
|
|
#define SKD_MAX_CONNECT_RETRIES 16
|
|
|
|
u32 drive_jiffies;
|
|
|
|
|
|
|
|
u32 timo_slot;
|
|
|
|
|
2017-08-18 04:13:32 +08:00
|
|
|
struct work_struct start_queue;
|
2013-11-02 03:05:10 +08:00
|
|
|
struct work_struct completion_worker;
|
2013-10-16 04:19:07 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
#define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
|
|
|
|
#define SKD_READL(DEV, OFF) skd_reg_read32(DEV, OFF)
|
|
|
|
#define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
|
|
|
|
|
|
|
|
static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
|
|
|
|
{
|
2017-08-18 04:12:57 +08:00
|
|
|
u32 val = readl(skdev->mem_map[1] + offset);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
2017-08-18 04:12:57 +08:00
|
|
|
if (unlikely(skdev->dbg_level >= 2))
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
|
2017-08-18 04:12:57 +08:00
|
|
|
return val;
|
2013-10-16 04:19:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
|
|
|
|
u32 offset)
|
|
|
|
{
|
2017-08-18 04:12:57 +08:00
|
|
|
writel(val, skdev->mem_map[1] + offset);
|
|
|
|
if (unlikely(skdev->dbg_level >= 2))
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
|
2013-10-16 04:19:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
|
|
|
|
u32 offset)
|
|
|
|
{
|
2017-08-18 04:12:57 +08:00
|
|
|
writeq(val, skdev->mem_map[1] + offset);
|
|
|
|
if (unlikely(skdev->dbg_level >= 2))
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "offset %x = %016llx\n", offset,
|
|
|
|
val);
|
2013-10-16 04:19:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-08-24 01:56:33 +08:00
|
|
|
#define SKD_IRQ_DEFAULT SKD_IRQ_MSIX
|
2013-10-16 04:19:07 +08:00
|
|
|
static int skd_isr_type = SKD_IRQ_DEFAULT;
|
|
|
|
|
|
|
|
module_param(skd_isr_type, int, 0444);
|
|
|
|
MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability."
|
|
|
|
" (0==legacy, 1==MSI, 2==MSI-X, default==1)");
|
|
|
|
|
|
|
|
#define SKD_MAX_REQ_PER_MSG_DEFAULT 1
|
|
|
|
static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
|
|
|
|
|
|
|
|
module_param(skd_max_req_per_msg, int, 0444);
|
|
|
|
MODULE_PARM_DESC(skd_max_req_per_msg,
|
|
|
|
"Maximum SCSI requests packed in a single message."
|
2017-08-18 04:13:01 +08:00
|
|
|
" (1-" __stringify(SKD_MAX_REQ_PER_MSG) ", default==1)");
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
#define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
|
|
|
|
#define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
|
|
|
|
static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
|
|
|
|
|
|
|
|
module_param(skd_max_queue_depth, int, 0444);
|
|
|
|
MODULE_PARM_DESC(skd_max_queue_depth,
|
|
|
|
"Maximum SCSI requests issued to s1120."
|
|
|
|
" (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")");
|
|
|
|
|
|
|
|
static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
|
|
|
|
module_param(skd_sgs_per_request, int, 0444);
|
|
|
|
MODULE_PARM_DESC(skd_sgs_per_request,
|
|
|
|
"Maximum SG elements per block request."
|
|
|
|
" (1-4096, default==256)");
|
|
|
|
|
2017-08-18 04:13:23 +08:00
|
|
|
static int skd_max_pass_thru = 1;
|
2013-10-16 04:19:07 +08:00
|
|
|
module_param(skd_max_pass_thru, int, 0444);
|
|
|
|
MODULE_PARM_DESC(skd_max_pass_thru,
|
2017-08-18 04:13:23 +08:00
|
|
|
"Maximum SCSI pass-thru at a time. IGNORED");
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
module_param(skd_dbg_level, int, 0444);
|
|
|
|
MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
|
|
|
|
|
|
|
|
module_param(skd_isr_comp_limit, int, 0444);
|
|
|
|
MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
|
|
|
|
|
|
|
|
/* Major device number dynamically assigned. */
|
|
|
|
static u32 skd_major;
|
|
|
|
|
|
|
|
static void skd_destruct(struct skd_device *skdev);
|
|
|
|
static const struct block_device_operations skd_blockdev_ops;
|
|
|
|
static void skd_send_fitmsg(struct skd_device *skdev,
|
|
|
|
struct skd_fitmsg_context *skmsg);
|
|
|
|
static void skd_send_special_fitmsg(struct skd_device *skdev,
|
|
|
|
struct skd_special_context *skspcl);
|
2017-06-03 15:38:04 +08:00
|
|
|
static bool skd_preop_sg_list(struct skd_device *skdev,
|
2013-10-16 04:19:07 +08:00
|
|
|
struct skd_request_context *skreq);
|
|
|
|
static void skd_postop_sg_list(struct skd_device *skdev,
|
|
|
|
struct skd_request_context *skreq);
|
|
|
|
|
|
|
|
static void skd_restart_device(struct skd_device *skdev);
|
|
|
|
static int skd_quiesce_dev(struct skd_device *skdev);
|
|
|
|
static int skd_unquiesce_dev(struct skd_device *skdev);
|
|
|
|
static void skd_disable_interrupts(struct skd_device *skdev);
|
|
|
|
static void skd_isr_fwstate(struct skd_device *skdev);
|
2017-08-18 04:13:14 +08:00
|
|
|
static void skd_recover_requests(struct skd_device *skdev);
|
2013-10-16 04:19:07 +08:00
|
|
|
static void skd_soft_reset(struct skd_device *skdev);
|
|
|
|
|
|
|
|
const char *skd_drive_state_to_str(int state);
|
|
|
|
const char *skd_skdev_state_to_str(enum skd_drvr_state state);
|
|
|
|
static void skd_log_skdev(struct skd_device *skdev, const char *event);
|
|
|
|
static void skd_log_skreq(struct skd_device *skdev,
|
|
|
|
struct skd_request_context *skreq, const char *event);
|
|
|
|
|
|
|
|
/*
|
|
|
|
*****************************************************************************
|
|
|
|
* READ/WRITE REQUESTS
|
|
|
|
*****************************************************************************
|
|
|
|
*/
|
2017-08-18 04:13:34 +08:00
|
|
|
static void skd_inc_in_flight(struct request *rq, void *data, bool reserved)
|
|
|
|
{
|
|
|
|
int *count = data;
|
|
|
|
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int skd_in_flight(struct skd_device *skdev)
|
|
|
|
{
|
|
|
|
int count = 0;
|
|
|
|
|
|
|
|
blk_mq_tagset_busy_iter(&skdev->tag_set, skd_inc_in_flight, &count);
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2013-10-16 04:19:07 +08:00
|
|
|
static void
|
|
|
|
skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
|
|
|
|
int data_dir, unsigned lba,
|
|
|
|
unsigned count)
|
|
|
|
{
|
|
|
|
if (data_dir == READ)
|
2017-08-18 04:13:19 +08:00
|
|
|
scsi_req->cdb[0] = READ_10;
|
2013-10-16 04:19:07 +08:00
|
|
|
else
|
2017-08-18 04:13:19 +08:00
|
|
|
scsi_req->cdb[0] = WRITE_10;
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
scsi_req->cdb[1] = 0;
|
|
|
|
scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
|
|
|
|
scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
|
|
|
|
scsi_req->cdb[4] = (lba & 0xff00) >> 8;
|
|
|
|
scsi_req->cdb[5] = (lba & 0xff);
|
|
|
|
scsi_req->cdb[6] = 0;
|
|
|
|
scsi_req->cdb[7] = (count & 0xff00) >> 8;
|
|
|
|
scsi_req->cdb[8] = count & 0xff;
|
|
|
|
scsi_req->cdb[9] = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
|
2013-11-02 03:05:10 +08:00
|
|
|
struct skd_request_context *skreq)
|
2013-10-16 04:19:07 +08:00
|
|
|
{
|
|
|
|
skreq->flush_cmd = 1;
|
|
|
|
|
2017-08-18 04:13:19 +08:00
|
|
|
scsi_req->cdb[0] = SYNCHRONIZE_CACHE;
|
2013-10-16 04:19:07 +08:00
|
|
|
scsi_req->cdb[1] = 0;
|
|
|
|
scsi_req->cdb[2] = 0;
|
|
|
|
scsi_req->cdb[3] = 0;
|
|
|
|
scsi_req->cdb[4] = 0;
|
|
|
|
scsi_req->cdb[5] = 0;
|
|
|
|
scsi_req->cdb[6] = 0;
|
|
|
|
scsi_req->cdb[7] = 0;
|
|
|
|
scsi_req->cdb[8] = 0;
|
|
|
|
scsi_req->cdb[9] = 0;
|
|
|
|
}
|
|
|
|
|
2017-08-18 04:13:21 +08:00
|
|
|
/*
|
|
|
|
* Return true if and only if all pending requests should be failed.
|
|
|
|
*/
|
|
|
|
static bool skd_fail_all(struct request_queue *q)
|
2017-08-18 04:13:20 +08:00
|
|
|
{
|
|
|
|
struct skd_device *skdev = q->queuedata;
|
|
|
|
|
|
|
|
SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
|
|
|
|
|
|
|
|
skd_log_skdev(skdev, "req_not_online");
|
|
|
|
switch (skdev->state) {
|
|
|
|
case SKD_DRVR_STATE_PAUSING:
|
|
|
|
case SKD_DRVR_STATE_PAUSED:
|
|
|
|
case SKD_DRVR_STATE_STARTING:
|
|
|
|
case SKD_DRVR_STATE_RESTARTING:
|
|
|
|
case SKD_DRVR_STATE_WAIT_BOOT:
|
|
|
|
/* In case of starting, we haven't started the queue,
|
|
|
|
* so we can't get here... but requests are
|
|
|
|
* possibly hanging out waiting for us because we
|
|
|
|
* reported the dev/skd0 already. They'll wait
|
|
|
|
* forever if connect doesn't complete.
|
|
|
|
* What to do??? delay dev/skd0 ??
|
|
|
|
*/
|
|
|
|
case SKD_DRVR_STATE_BUSY:
|
|
|
|
case SKD_DRVR_STATE_BUSY_IMMINENT:
|
|
|
|
case SKD_DRVR_STATE_BUSY_ERASE:
|
2017-08-18 04:13:21 +08:00
|
|
|
return false;
|
2017-08-18 04:13:20 +08:00
|
|
|
|
|
|
|
case SKD_DRVR_STATE_BUSY_SANITIZE:
|
|
|
|
case SKD_DRVR_STATE_STOPPING:
|
|
|
|
case SKD_DRVR_STATE_SYNCING:
|
|
|
|
case SKD_DRVR_STATE_FAULT:
|
|
|
|
case SKD_DRVR_STATE_DISAPPEARED:
|
|
|
|
default:
|
2017-08-18 04:13:21 +08:00
|
|
|
return true;
|
2017-08-18 04:13:20 +08:00
|
|
|
}
|
|
|
|
}
|
2013-10-16 04:19:07 +08:00
|
|
|
|
2017-08-24 01:56:31 +08:00
|
|
|
static blk_status_t skd_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|
|
|
const struct blk_mq_queue_data *mqd)
|
2013-10-16 04:19:07 +08:00
|
|
|
{
|
2017-08-24 01:56:31 +08:00
|
|
|
struct request *const req = mqd->rq;
|
2017-08-18 04:13:28 +08:00
|
|
|
struct request_queue *const q = req->q;
|
2013-10-16 04:19:07 +08:00
|
|
|
struct skd_device *skdev = q->queuedata;
|
2017-08-18 04:13:28 +08:00
|
|
|
struct skd_fitmsg_context *skmsg;
|
|
|
|
struct fit_msg_hdr *fmh;
|
|
|
|
const u32 tag = blk_mq_unique_tag(req);
|
2017-08-18 04:13:31 +08:00
|
|
|
struct skd_request_context *const skreq = blk_mq_rq_to_pdu(req);
|
2013-10-16 04:19:07 +08:00
|
|
|
struct skd_scsi_request *scsi_req;
|
2017-08-18 04:13:37 +08:00
|
|
|
unsigned long flags = 0;
|
2017-08-18 04:13:36 +08:00
|
|
|
const u32 lba = blk_rq_pos(req);
|
|
|
|
const u32 count = blk_rq_sectors(req);
|
|
|
|
const int data_dir = rq_data_dir(req);
|
2017-08-18 04:13:28 +08:00
|
|
|
|
2017-08-24 01:56:31 +08:00
|
|
|
if (unlikely(skdev->state != SKD_DRVR_STATE_ONLINE))
|
|
|
|
return skd_fail_all(q) ? BLK_STS_IOERR : BLK_STS_RESOURCE;
|
|
|
|
|
|
|
|
blk_mq_start_request(req);
|
|
|
|
|
2017-08-18 04:13:28 +08:00
|
|
|
WARN_ONCE(tag >= skd_max_queue_depth, "%#x > %#x (nr_requests = %lu)\n",
|
|
|
|
tag, skd_max_queue_depth, q->nr_requests);
|
|
|
|
|
|
|
|
SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
|
|
|
|
|
|
|
|
dev_dbg(&skdev->pdev->dev,
|
|
|
|
"new req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req, lba,
|
|
|
|
lba, count, count, data_dir);
|
|
|
|
|
|
|
|
skreq->id = tag + SKD_ID_RW_REQUEST;
|
|
|
|
skreq->flush_cmd = 0;
|
|
|
|
skreq->n_sg = 0;
|
|
|
|
skreq->sg_byte_count = 0;
|
|
|
|
|
|
|
|
skreq->fitmsg_id = 0;
|
|
|
|
|
|
|
|
skreq->data_dir = data_dir == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
|
|
|
|
|
|
|
|
if (req->bio && !skd_preop_sg_list(skdev, skreq)) {
|
|
|
|
dev_dbg(&skdev->pdev->dev, "error Out\n");
|
2017-08-26 05:24:12 +08:00
|
|
|
skreq->status = BLK_STS_RESOURCE;
|
|
|
|
blk_mq_complete_request(req);
|
2017-08-24 01:56:31 +08:00
|
|
|
return BLK_STS_OK;
|
2017-08-18 04:13:28 +08:00
|
|
|
}
|
|
|
|
|
2017-08-18 04:13:35 +08:00
|
|
|
dma_sync_single_for_device(&skdev->pdev->dev, skreq->sksg_dma_address,
|
|
|
|
skreq->n_sg *
|
|
|
|
sizeof(struct fit_sg_descriptor),
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
|
2017-08-18 04:13:28 +08:00
|
|
|
/* Either a FIT msg is in progress or we have to start one. */
|
2017-08-18 04:13:37 +08:00
|
|
|
if (skd_max_req_per_msg == 1) {
|
|
|
|
skmsg = NULL;
|
|
|
|
} else {
|
|
|
|
spin_lock_irqsave(&skdev->lock, flags);
|
|
|
|
skmsg = skdev->skmsg;
|
|
|
|
}
|
2017-08-18 04:13:28 +08:00
|
|
|
if (!skmsg) {
|
|
|
|
skmsg = &skdev->skmsg_table[tag];
|
|
|
|
skdev->skmsg = skmsg;
|
|
|
|
|
|
|
|
/* Initialize the FIT msg header */
|
|
|
|
fmh = &skmsg->msg_buf->fmh;
|
|
|
|
memset(fmh, 0, sizeof(*fmh));
|
|
|
|
fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
|
|
|
|
skmsg->length = sizeof(*fmh);
|
|
|
|
} else {
|
|
|
|
fmh = &skmsg->msg_buf->fmh;
|
|
|
|
}
|
|
|
|
|
|
|
|
skreq->fitmsg_id = skmsg->id;
|
|
|
|
|
|
|
|
scsi_req = &skmsg->msg_buf->scsi[fmh->num_protocol_cmds_coalesced];
|
|
|
|
memset(scsi_req, 0, sizeof(*scsi_req));
|
|
|
|
|
|
|
|
scsi_req->hdr.tag = skreq->id;
|
2017-08-18 04:13:36 +08:00
|
|
|
scsi_req->hdr.sg_list_dma_address =
|
|
|
|
cpu_to_be64(skreq->sksg_dma_address);
|
2017-08-18 04:13:28 +08:00
|
|
|
|
2017-08-18 04:13:36 +08:00
|
|
|
if (req_op(req) == REQ_OP_FLUSH) {
|
2017-08-18 04:13:28 +08:00
|
|
|
skd_prep_zerosize_flush_cdb(scsi_req, skreq);
|
|
|
|
SKD_ASSERT(skreq->flush_cmd == 1);
|
|
|
|
} else {
|
|
|
|
skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
|
|
|
|
}
|
|
|
|
|
2017-08-18 04:13:36 +08:00
|
|
|
if (req->cmd_flags & REQ_FUA)
|
2017-08-18 04:13:28 +08:00
|
|
|
scsi_req->cdb[1] |= SKD_FUA_NV;
|
|
|
|
|
|
|
|
scsi_req->hdr.sg_list_len_bytes = cpu_to_be32(skreq->sg_byte_count);
|
|
|
|
|
|
|
|
/* Complete resource allocations. */
|
|
|
|
skreq->state = SKD_REQ_STATE_BUSY;
|
|
|
|
|
|
|
|
skmsg->length += sizeof(struct skd_scsi_request);
|
|
|
|
fmh->num_protocol_cmds_coalesced++;
|
|
|
|
|
|
|
|
dev_dbg(&skdev->pdev->dev, "req=0x%x busy=%d\n", skreq->id,
|
2017-08-18 04:13:34 +08:00
|
|
|
skd_in_flight(skdev));
|
2017-08-18 04:13:28 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the FIT msg buffer is full send it.
|
|
|
|
*/
|
2017-08-18 04:13:37 +08:00
|
|
|
if (skd_max_req_per_msg == 1) {
|
2017-08-18 04:13:28 +08:00
|
|
|
skd_send_fitmsg(skdev, skmsg);
|
2017-08-18 04:13:37 +08:00
|
|
|
} else {
|
2017-08-24 01:56:31 +08:00
|
|
|
if (mqd->last ||
|
2017-08-18 04:13:37 +08:00
|
|
|
fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
|
|
|
|
skd_send_fitmsg(skdev, skmsg);
|
|
|
|
skdev->skmsg = NULL;
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&skdev->lock, flags);
|
2017-08-18 04:13:28 +08:00
|
|
|
}
|
2013-10-16 04:19:07 +08:00
|
|
|
|
2017-08-18 04:13:32 +08:00
|
|
|
return BLK_STS_OK;
|
2013-10-16 04:19:07 +08:00
|
|
|
}
|
|
|
|
|
2017-08-24 01:56:32 +08:00
|
|
|
static enum blk_eh_timer_return skd_timed_out(struct request *req,
|
|
|
|
bool reserved)
|
2017-08-18 04:13:33 +08:00
|
|
|
{
|
|
|
|
struct skd_device *skdev = req->q->queuedata;
|
|
|
|
|
|
|
|
dev_err(&skdev->pdev->dev, "request with tag %#x timed out\n",
|
|
|
|
blk_mq_unique_tag(req));
|
|
|
|
|
2017-08-24 01:56:32 +08:00
|
|
|
return BLK_EH_RESET_TIMER;
|
2017-08-18 04:13:33 +08:00
|
|
|
}
|
|
|
|
|
2017-08-26 05:24:11 +08:00
|
|
|
static void skd_complete_rq(struct request *req)
|
2017-08-18 04:13:33 +08:00
|
|
|
{
|
|
|
|
struct skd_request_context *skreq = blk_mq_rq_to_pdu(req);
|
|
|
|
|
2017-08-24 01:56:32 +08:00
|
|
|
blk_mq_end_request(req, skreq->status);
|
2017-08-18 04:13:33 +08:00
|
|
|
}
|
|
|
|
|
2017-06-03 15:38:04 +08:00
|
|
|
static bool skd_preop_sg_list(struct skd_device *skdev,
|
2013-11-02 03:05:10 +08:00
|
|
|
struct skd_request_context *skreq)
|
2013-10-16 04:19:07 +08:00
|
|
|
{
|
2017-08-18 04:13:31 +08:00
|
|
|
struct request *req = blk_mq_rq_from_pdu(skreq);
|
2017-08-18 04:13:15 +08:00
|
|
|
struct scatterlist *sgl = &skreq->sg[0], *sg;
|
2013-10-16 04:19:07 +08:00
|
|
|
int n_sg;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
skreq->sg_byte_count = 0;
|
|
|
|
|
2017-08-18 04:13:12 +08:00
|
|
|
WARN_ON_ONCE(skreq->data_dir != DMA_TO_DEVICE &&
|
|
|
|
skreq->data_dir != DMA_FROM_DEVICE);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
2017-08-18 04:13:15 +08:00
|
|
|
n_sg = blk_rq_map_sg(skdev->queue, req, sgl);
|
2013-10-16 04:19:07 +08:00
|
|
|
if (n_sg <= 0)
|
2017-06-03 15:38:04 +08:00
|
|
|
return false;
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Map scatterlist to PCI bus addresses.
|
|
|
|
* Note PCI might change the number of entries.
|
|
|
|
*/
|
2017-08-18 04:13:15 +08:00
|
|
|
n_sg = pci_map_sg(skdev->pdev, sgl, n_sg, skreq->data_dir);
|
2013-10-16 04:19:07 +08:00
|
|
|
if (n_sg <= 0)
|
2017-06-03 15:38:04 +08:00
|
|
|
return false;
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
SKD_ASSERT(n_sg <= skdev->sgs_per_request);
|
|
|
|
|
|
|
|
skreq->n_sg = n_sg;
|
|
|
|
|
2017-08-18 04:13:15 +08:00
|
|
|
for_each_sg(sgl, sg, n_sg, i) {
|
2013-10-16 04:19:07 +08:00
|
|
|
struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
|
2017-08-18 04:13:15 +08:00
|
|
|
u32 cnt = sg_dma_len(sg);
|
|
|
|
uint64_t dma_addr = sg_dma_address(sg);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
sgd->control = FIT_SGD_CONTROL_NOT_LAST;
|
|
|
|
sgd->byte_count = cnt;
|
|
|
|
skreq->sg_byte_count += cnt;
|
|
|
|
sgd->host_side_addr = dma_addr;
|
|
|
|
sgd->dev_side_addr = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
|
|
|
|
skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
|
|
|
|
|
|
|
|
if (unlikely(skdev->dbg_level > 1)) {
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev,
|
|
|
|
"skreq=%x sksg_list=%p sksg_dma=%llx\n",
|
|
|
|
skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
|
2013-10-16 04:19:07 +08:00
|
|
|
for (i = 0; i < n_sg; i++) {
|
|
|
|
struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
|
2017-08-18 04:12:58 +08:00
|
|
|
|
|
|
|
dev_dbg(&skdev->pdev->dev,
|
|
|
|
" sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
|
|
|
|
i, sgd->byte_count, sgd->control,
|
|
|
|
sgd->host_side_addr, sgd->next_desc_ptr);
|
2013-10-16 04:19:07 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-03 15:38:04 +08:00
|
|
|
return true;
|
2013-10-16 04:19:07 +08:00
|
|
|
}
|
|
|
|
|
2013-11-02 00:14:56 +08:00
|
|
|
static void skd_postop_sg_list(struct skd_device *skdev,
|
2013-11-02 03:05:10 +08:00
|
|
|
struct skd_request_context *skreq)
|
2013-10-16 04:19:07 +08:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* restore the next ptr for next IO request so we
|
|
|
|
* don't have to set it every time.
|
|
|
|
*/
|
|
|
|
skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
|
|
|
|
skreq->sksg_dma_address +
|
|
|
|
((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
|
2017-08-18 04:13:12 +08:00
|
|
|
pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, skreq->data_dir);
|
2013-10-16 04:19:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
*****************************************************************************
|
|
|
|
* TIMER
|
|
|
|
*****************************************************************************
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void skd_timer_tick_not_online(struct skd_device *skdev);
|
|
|
|
|
2017-08-18 04:13:32 +08:00
|
|
|
static void skd_start_queue(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct skd_device *skdev = container_of(work, typeof(*skdev),
|
|
|
|
start_queue);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Although it is safe to call blk_start_queue() from interrupt
|
|
|
|
* context, blk_mq_start_hw_queues() must not be called from
|
|
|
|
* interrupt context.
|
|
|
|
*/
|
|
|
|
blk_mq_start_hw_queues(skdev->queue);
|
|
|
|
}
|
|
|
|
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-17 05:43:17 +08:00
|
|
|
static void skd_timer_tick(struct timer_list *t)
|
2013-10-16 04:19:07 +08:00
|
|
|
{
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-17 05:43:17 +08:00
|
|
|
struct skd_device *skdev = from_timer(skdev, t, timer);
|
2013-10-16 04:19:07 +08:00
|
|
|
unsigned long reqflags;
|
|
|
|
u32 state;
|
|
|
|
|
|
|
|
if (skdev->state == SKD_DRVR_STATE_FAULT)
|
|
|
|
/* The driver has declared fault, and we want it to
|
|
|
|
* stay that way until driver is reloaded.
|
|
|
|
*/
|
|
|
|
return;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&skdev->lock, reqflags);
|
|
|
|
|
|
|
|
state = SKD_READL(skdev, FIT_STATUS);
|
|
|
|
state &= FIT_SR_DRIVE_STATE_MASK;
|
|
|
|
if (state != skdev->drive_state)
|
|
|
|
skd_isr_fwstate(skdev);
|
|
|
|
|
2017-08-18 04:13:33 +08:00
|
|
|
if (skdev->state != SKD_DRVR_STATE_ONLINE)
|
2013-10-16 04:19:07 +08:00
|
|
|
skd_timer_tick_not_online(skdev);
|
|
|
|
|
|
|
|
mod_timer(&skdev->timer, (jiffies + HZ));
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&skdev->lock, reqflags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void skd_timer_tick_not_online(struct skd_device *skdev)
|
|
|
|
{
|
|
|
|
switch (skdev->state) {
|
|
|
|
case SKD_DRVR_STATE_IDLE:
|
|
|
|
case SKD_DRVR_STATE_LOAD:
|
|
|
|
break;
|
|
|
|
case SKD_DRVR_STATE_BUSY_SANITIZE:
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev,
|
|
|
|
"drive busy sanitize[%x], driver[%x]\n",
|
|
|
|
skdev->drive_state, skdev->state);
|
2013-10-16 04:19:07 +08:00
|
|
|
/* If we've been in sanitize for 3 seconds, we figure we're not
|
|
|
|
* going to get anymore completions, so recover requests now
|
|
|
|
*/
|
|
|
|
if (skdev->timer_countdown > 0) {
|
|
|
|
skdev->timer_countdown--;
|
|
|
|
return;
|
|
|
|
}
|
2017-08-18 04:13:14 +08:00
|
|
|
skd_recover_requests(skdev);
|
2013-10-16 04:19:07 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case SKD_DRVR_STATE_BUSY:
|
|
|
|
case SKD_DRVR_STATE_BUSY_IMMINENT:
|
|
|
|
case SKD_DRVR_STATE_BUSY_ERASE:
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "busy[%x], countdown=%d\n",
|
|
|
|
skdev->state, skdev->timer_countdown);
|
2013-10-16 04:19:07 +08:00
|
|
|
if (skdev->timer_countdown > 0) {
|
|
|
|
skdev->timer_countdown--;
|
|
|
|
return;
|
|
|
|
}
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev,
|
|
|
|
"busy[%x], timedout=%d, restarting device.",
|
|
|
|
skdev->state, skdev->timer_countdown);
|
2013-10-16 04:19:07 +08:00
|
|
|
skd_restart_device(skdev);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SKD_DRVR_STATE_WAIT_BOOT:
|
|
|
|
case SKD_DRVR_STATE_STARTING:
|
|
|
|
if (skdev->timer_countdown > 0) {
|
|
|
|
skdev->timer_countdown--;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
/* For now, we fault the drive. Could attempt resets to
|
|
|
|
* revcover at some point. */
|
|
|
|
skdev->state = SKD_DRVR_STATE_FAULT;
|
|
|
|
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&skdev->pdev->dev, "DriveFault Connect Timeout (%x)\n",
|
|
|
|
skdev->drive_state);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
/*start the queue so we can respond with error to requests */
|
|
|
|
/* wakeup anyone waiting for startup complete */
|
2017-08-18 04:13:32 +08:00
|
|
|
schedule_work(&skdev->start_queue);
|
2013-10-16 04:19:07 +08:00
|
|
|
skdev->gendisk_on = -1;
|
|
|
|
wake_up_interruptible(&skdev->waitq);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SKD_DRVR_STATE_ONLINE:
|
|
|
|
/* shouldn't get here. */
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SKD_DRVR_STATE_PAUSING:
|
|
|
|
case SKD_DRVR_STATE_PAUSED:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SKD_DRVR_STATE_RESTARTING:
|
|
|
|
if (skdev->timer_countdown > 0) {
|
|
|
|
skdev->timer_countdown--;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
/* For now, we fault the drive. Could attempt resets to
|
|
|
|
* revcover at some point. */
|
|
|
|
skdev->state = SKD_DRVR_STATE_FAULT;
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&skdev->pdev->dev,
|
|
|
|
"DriveFault Reconnect Timeout (%x)\n",
|
|
|
|
skdev->drive_state);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Recovering does two things:
|
|
|
|
* 1. completes IO with error
|
|
|
|
* 2. reclaims dma resources
|
|
|
|
* When is it safe to recover requests?
|
|
|
|
* - if the drive state is faulted
|
|
|
|
* - if the state is still soft reset after out timeout
|
|
|
|
* - if the drive registers are dead (state = FF)
|
|
|
|
* If it is "unsafe", we still need to recover, so we will
|
|
|
|
* disable pci bus mastering and disable our interrupts.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
|
|
|
|
(skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
|
|
|
|
(skdev->drive_state == FIT_SR_DRIVE_STATE_MASK))
|
|
|
|
/* It never came out of soft reset. Try to
|
|
|
|
* recover the requests and then let them
|
|
|
|
* fail. This is to mitigate hung processes. */
|
2017-08-18 04:13:14 +08:00
|
|
|
skd_recover_requests(skdev);
|
2013-10-16 04:19:07 +08:00
|
|
|
else {
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&skdev->pdev->dev, "Disable BusMaster (%x)\n",
|
|
|
|
skdev->drive_state);
|
2013-10-16 04:19:07 +08:00
|
|
|
pci_disable_device(skdev->pdev);
|
|
|
|
skd_disable_interrupts(skdev);
|
2017-08-18 04:13:14 +08:00
|
|
|
skd_recover_requests(skdev);
|
2013-10-16 04:19:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*start the queue so we can respond with error to requests */
|
|
|
|
/* wakeup anyone waiting for startup complete */
|
2017-08-18 04:13:32 +08:00
|
|
|
schedule_work(&skdev->start_queue);
|
2013-10-16 04:19:07 +08:00
|
|
|
skdev->gendisk_on = -1;
|
|
|
|
wake_up_interruptible(&skdev->waitq);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SKD_DRVR_STATE_RESUMING:
|
|
|
|
case SKD_DRVR_STATE_STOPPING:
|
|
|
|
case SKD_DRVR_STATE_SYNCING:
|
|
|
|
case SKD_DRVR_STATE_FAULT:
|
|
|
|
case SKD_DRVR_STATE_DISAPPEARED:
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int skd_start_timer(struct skd_device *skdev)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-17 05:43:17 +08:00
|
|
|
timer_setup(&skdev->timer, skd_timer_tick, 0);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
rc = mod_timer(&skdev->timer, (jiffies + HZ));
|
|
|
|
if (rc)
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&skdev->pdev->dev, "failed to start timer %d\n", rc);
|
2013-10-16 04:19:07 +08:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void skd_kill_timer(struct skd_device *skdev)
|
|
|
|
{
|
|
|
|
del_timer_sync(&skdev->timer);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
*****************************************************************************
|
|
|
|
* INTERNAL REQUESTS -- generated by driver itself
|
|
|
|
*****************************************************************************
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int skd_format_internal_skspcl(struct skd_device *skdev)
|
|
|
|
{
|
|
|
|
struct skd_special_context *skspcl = &skdev->internal_skspcl;
|
|
|
|
struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
|
|
|
|
struct fit_msg_hdr *fmh;
|
|
|
|
uint64_t dma_address;
|
|
|
|
struct skd_scsi_request *scsi;
|
|
|
|
|
2017-08-18 04:13:07 +08:00
|
|
|
fmh = &skspcl->msg_buf->fmh;
|
2013-10-16 04:19:07 +08:00
|
|
|
fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
|
|
|
|
fmh->num_protocol_cmds_coalesced = 1;
|
|
|
|
|
2017-08-18 04:13:07 +08:00
|
|
|
scsi = &skspcl->msg_buf->scsi[0];
|
2013-10-16 04:19:07 +08:00
|
|
|
memset(scsi, 0, sizeof(*scsi));
|
|
|
|
dma_address = skspcl->req.sksg_dma_address;
|
|
|
|
scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
|
2017-08-18 04:13:25 +08:00
|
|
|
skspcl->req.n_sg = 1;
|
2013-10-16 04:19:07 +08:00
|
|
|
sgd->control = FIT_SGD_CONTROL_LAST;
|
|
|
|
sgd->byte_count = 0;
|
|
|
|
sgd->host_side_addr = skspcl->db_dma_address;
|
|
|
|
sgd->dev_side_addr = 0;
|
|
|
|
sgd->next_desc_ptr = 0LL;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
|
|
|
|
|
|
|
|
static void skd_send_internal_skspcl(struct skd_device *skdev,
|
|
|
|
struct skd_special_context *skspcl,
|
|
|
|
u8 opcode)
|
|
|
|
{
|
|
|
|
struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
|
|
|
|
struct skd_scsi_request *scsi;
|
|
|
|
unsigned char *buf = skspcl->data_buf;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (skspcl->req.state != SKD_REQ_STATE_IDLE)
|
|
|
|
/*
|
|
|
|
* A refresh is already in progress.
|
|
|
|
* Just wait for it to finish.
|
|
|
|
*/
|
|
|
|
return;
|
|
|
|
|
|
|
|
skspcl->req.state = SKD_REQ_STATE_BUSY;
|
|
|
|
|
2017-08-18 04:13:07 +08:00
|
|
|
scsi = &skspcl->msg_buf->scsi[0];
|
2013-10-16 04:19:07 +08:00
|
|
|
scsi->hdr.tag = skspcl->req.id;
|
|
|
|
|
|
|
|
memset(scsi->cdb, 0, sizeof(scsi->cdb));
|
|
|
|
|
|
|
|
switch (opcode) {
|
|
|
|
case TEST_UNIT_READY:
|
|
|
|
scsi->cdb[0] = TEST_UNIT_READY;
|
|
|
|
sgd->byte_count = 0;
|
|
|
|
scsi->hdr.sg_list_len_bytes = 0;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case READ_CAPACITY:
|
|
|
|
scsi->cdb[0] = READ_CAPACITY;
|
|
|
|
sgd->byte_count = SKD_N_READ_CAP_BYTES;
|
|
|
|
scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case INQUIRY:
|
|
|
|
scsi->cdb[0] = INQUIRY;
|
|
|
|
scsi->cdb[1] = 0x01; /* evpd */
|
|
|
|
scsi->cdb[2] = 0x80; /* serial number page */
|
|
|
|
scsi->cdb[4] = 0x10;
|
|
|
|
sgd->byte_count = 16;
|
|
|
|
scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SYNCHRONIZE_CACHE:
|
|
|
|
scsi->cdb[0] = SYNCHRONIZE_CACHE;
|
|
|
|
sgd->byte_count = 0;
|
|
|
|
scsi->hdr.sg_list_len_bytes = 0;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case WRITE_BUFFER:
|
|
|
|
scsi->cdb[0] = WRITE_BUFFER;
|
|
|
|
scsi->cdb[1] = 0x02;
|
|
|
|
scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
|
|
|
|
scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
|
|
|
|
sgd->byte_count = WR_BUF_SIZE;
|
|
|
|
scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
|
|
|
|
/* fill incrementing byte pattern */
|
|
|
|
for (i = 0; i < sgd->byte_count; i++)
|
|
|
|
buf[i] = i & 0xFF;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case READ_BUFFER:
|
|
|
|
scsi->cdb[0] = READ_BUFFER;
|
|
|
|
scsi->cdb[1] = 0x02;
|
|
|
|
scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
|
|
|
|
scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
|
|
|
|
sgd->byte_count = WR_BUF_SIZE;
|
|
|
|
scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
|
|
|
|
memset(skspcl->data_buf, 0, sgd->byte_count);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
SKD_ASSERT("Don't know what to send");
|
|
|
|
return;
|
|
|
|
|
|
|
|
}
|
|
|
|
skd_send_special_fitmsg(skdev, skspcl);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void skd_refresh_device_data(struct skd_device *skdev)
|
|
|
|
{
|
|
|
|
struct skd_special_context *skspcl = &skdev->internal_skspcl;
|
|
|
|
|
|
|
|
skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int skd_chk_read_buf(struct skd_device *skdev,
|
|
|
|
struct skd_special_context *skspcl)
|
|
|
|
{
|
|
|
|
unsigned char *buf = skspcl->data_buf;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* check for incrementing byte pattern */
|
|
|
|
for (i = 0; i < WR_BUF_SIZE; i++)
|
|
|
|
if (buf[i] != (i & 0xFF))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
|
|
|
|
u8 code, u8 qual, u8 fruc)
|
|
|
|
{
|
|
|
|
/* If the check condition is of special interest, log a message */
|
|
|
|
if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
|
|
|
|
&& (code == 0x04) && (qual == 0x06)) {
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&skdev->pdev->dev,
|
|
|
|
"*** LOST_WRITE_DATA ERROR *** key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
|
|
|
|
key, code, qual, fruc);
|
2013-10-16 04:19:07 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void skd_complete_internal(struct skd_device *skdev,
|
2017-08-18 04:13:17 +08:00
|
|
|
struct fit_completion_entry_v1 *skcomp,
|
|
|
|
struct fit_comp_error_info *skerr,
|
2013-10-16 04:19:07 +08:00
|
|
|
struct skd_special_context *skspcl)
|
|
|
|
{
|
|
|
|
u8 *buf = skspcl->data_buf;
|
|
|
|
u8 status;
|
|
|
|
int i;
|
2017-08-18 04:13:07 +08:00
|
|
|
struct skd_scsi_request *scsi = &skspcl->msg_buf->scsi[0];
|
2013-10-16 04:19:07 +08:00
|
|
|
|
2017-08-18 04:13:00 +08:00
|
|
|
lockdep_assert_held(&skdev->lock);
|
|
|
|
|
2013-10-16 04:19:07 +08:00
|
|
|
SKD_ASSERT(skspcl == &skdev->internal_skspcl);
|
|
|
|
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "complete internal %x\n", scsi->cdb[0]);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
2017-08-18 04:13:35 +08:00
|
|
|
dma_sync_single_for_cpu(&skdev->pdev->dev,
|
|
|
|
skspcl->db_dma_address,
|
|
|
|
skspcl->req.sksg_list[0].byte_count,
|
|
|
|
DMA_BIDIRECTIONAL);
|
|
|
|
|
2013-10-16 04:19:07 +08:00
|
|
|
skspcl->req.completion = *skcomp;
|
|
|
|
skspcl->req.state = SKD_REQ_STATE_IDLE;
|
|
|
|
|
|
|
|
status = skspcl->req.completion.status;
|
|
|
|
|
|
|
|
skd_log_check_status(skdev, status, skerr->key, skerr->code,
|
|
|
|
skerr->qual, skerr->fruc);
|
|
|
|
|
|
|
|
switch (scsi->cdb[0]) {
|
|
|
|
case TEST_UNIT_READY:
|
|
|
|
if (status == SAM_STAT_GOOD)
|
|
|
|
skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
|
|
|
|
else if ((status == SAM_STAT_CHECK_CONDITION) &&
|
|
|
|
(skerr->key == MEDIUM_ERROR))
|
|
|
|
skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
|
|
|
|
else {
|
|
|
|
if (skdev->state == SKD_DRVR_STATE_STOPPING) {
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev,
|
|
|
|
"TUR failed, don't send anymore state 0x%x\n",
|
|
|
|
skdev->state);
|
2013-10-16 04:19:07 +08:00
|
|
|
return;
|
|
|
|
}
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev,
|
|
|
|
"**** TUR failed, retry skerr\n");
|
2017-08-18 04:13:19 +08:00
|
|
|
skd_send_internal_skspcl(skdev, skspcl,
|
|
|
|
TEST_UNIT_READY);
|
2013-10-16 04:19:07 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case WRITE_BUFFER:
|
|
|
|
if (status == SAM_STAT_GOOD)
|
|
|
|
skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
|
|
|
|
else {
|
|
|
|
if (skdev->state == SKD_DRVR_STATE_STOPPING) {
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev,
|
|
|
|
"write buffer failed, don't send anymore state 0x%x\n",
|
|
|
|
skdev->state);
|
2013-10-16 04:19:07 +08:00
|
|
|
return;
|
|
|
|
}
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev,
|
|
|
|
"**** write buffer failed, retry skerr\n");
|
2017-08-18 04:13:19 +08:00
|
|
|
skd_send_internal_skspcl(skdev, skspcl,
|
|
|
|
TEST_UNIT_READY);
|
2013-10-16 04:19:07 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case READ_BUFFER:
|
|
|
|
if (status == SAM_STAT_GOOD) {
|
|
|
|
if (skd_chk_read_buf(skdev, skspcl) == 0)
|
|
|
|
skd_send_internal_skspcl(skdev, skspcl,
|
|
|
|
READ_CAPACITY);
|
|
|
|
else {
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&skdev->pdev->dev,
|
|
|
|
"*** W/R Buffer mismatch %d ***\n",
|
|
|
|
skdev->connect_retries);
|
2013-10-16 04:19:07 +08:00
|
|
|
if (skdev->connect_retries <
|
|
|
|
SKD_MAX_CONNECT_RETRIES) {
|
|
|
|
skdev->connect_retries++;
|
|
|
|
skd_soft_reset(skdev);
|
|
|
|
} else {
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&skdev->pdev->dev,
|
|
|
|
"W/R Buffer Connect Error\n");
|
2013-10-16 04:19:07 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
|
|
|
if (skdev->state == SKD_DRVR_STATE_STOPPING) {
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev,
|
|
|
|
"read buffer failed, don't send anymore state 0x%x\n",
|
|
|
|
skdev->state);
|
2013-10-16 04:19:07 +08:00
|
|
|
return;
|
|
|
|
}
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev,
|
|
|
|
"**** read buffer failed, retry skerr\n");
|
2017-08-18 04:13:19 +08:00
|
|
|
skd_send_internal_skspcl(skdev, skspcl,
|
|
|
|
TEST_UNIT_READY);
|
2013-10-16 04:19:07 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case READ_CAPACITY:
|
|
|
|
skdev->read_cap_is_valid = 0;
|
|
|
|
if (status == SAM_STAT_GOOD) {
|
|
|
|
skdev->read_cap_last_lba =
|
|
|
|
(buf[0] << 24) | (buf[1] << 16) |
|
|
|
|
(buf[2] << 8) | buf[3];
|
|
|
|
skdev->read_cap_blocksize =
|
|
|
|
(buf[4] << 24) | (buf[5] << 16) |
|
|
|
|
(buf[6] << 8) | buf[7];
|
|
|
|
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "last lba %d, bs %d\n",
|
|
|
|
skdev->read_cap_last_lba,
|
|
|
|
skdev->read_cap_blocksize);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
|
|
|
|
|
|
|
|
skdev->read_cap_is_valid = 1;
|
|
|
|
|
|
|
|
skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
|
|
|
|
} else if ((status == SAM_STAT_CHECK_CONDITION) &&
|
|
|
|
(skerr->key == MEDIUM_ERROR)) {
|
|
|
|
skdev->read_cap_last_lba = ~0;
|
|
|
|
set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n");
|
2013-10-16 04:19:07 +08:00
|
|
|
skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
|
|
|
|
} else {
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "**** READCAP failed, retry TUR\n");
|
2013-10-16 04:19:07 +08:00
|
|
|
skd_send_internal_skspcl(skdev, skspcl,
|
|
|
|
TEST_UNIT_READY);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case INQUIRY:
|
|
|
|
skdev->inquiry_is_valid = 0;
|
|
|
|
if (status == SAM_STAT_GOOD) {
|
|
|
|
skdev->inquiry_is_valid = 1;
|
|
|
|
|
|
|
|
for (i = 0; i < 12; i++)
|
|
|
|
skdev->inq_serial_num[i] = buf[i + 4];
|
|
|
|
skdev->inq_serial_num[12] = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (skd_unquiesce_dev(skdev) < 0)
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "**** failed, to ONLINE device\n");
|
2013-10-16 04:19:07 +08:00
|
|
|
/* connection is complete */
|
|
|
|
skdev->connect_retries = 0;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SYNCHRONIZE_CACHE:
|
|
|
|
if (status == SAM_STAT_GOOD)
|
|
|
|
skdev->sync_done = 1;
|
|
|
|
else
|
|
|
|
skdev->sync_done = -1;
|
|
|
|
wake_up_interruptible(&skdev->waitq);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
SKD_ASSERT("we didn't send this");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
*****************************************************************************
|
|
|
|
* FIT MESSAGES
|
|
|
|
*****************************************************************************
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void skd_send_fitmsg(struct skd_device *skdev,
|
|
|
|
struct skd_fitmsg_context *skmsg)
|
|
|
|
{
|
|
|
|
u64 qcmd;
|
|
|
|
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "dma address 0x%llx, busy=%d\n",
|
2017-08-18 04:13:34 +08:00
|
|
|
skmsg->mb_dma_address, skd_in_flight(skdev));
|
2017-08-18 04:13:06 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "msg_buf %p\n", skmsg->msg_buf);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
qcmd = skmsg->mb_dma_address;
|
|
|
|
qcmd |= FIT_QCMD_QID_NORMAL;
|
|
|
|
|
|
|
|
if (unlikely(skdev->dbg_level > 1)) {
|
|
|
|
u8 *bp = (u8 *)skmsg->msg_buf;
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < skmsg->length; i += 8) {
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "msg[%2d] %8ph\n", i,
|
|
|
|
&bp[i]);
|
2013-10-16 04:19:07 +08:00
|
|
|
if (i == 0)
|
|
|
|
i = 64 - 8;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (skmsg->length > 256)
|
|
|
|
qcmd |= FIT_QCMD_MSGSIZE_512;
|
|
|
|
else if (skmsg->length > 128)
|
|
|
|
qcmd |= FIT_QCMD_MSGSIZE_256;
|
|
|
|
else if (skmsg->length > 64)
|
|
|
|
qcmd |= FIT_QCMD_MSGSIZE_128;
|
|
|
|
else
|
|
|
|
/*
|
|
|
|
* This makes no sense because the FIT msg header is
|
|
|
|
* 64 bytes. If the msg is only 64 bytes long it has
|
|
|
|
* no payload.
|
|
|
|
*/
|
|
|
|
qcmd |= FIT_QCMD_MSGSIZE_64;
|
|
|
|
|
2017-08-18 04:13:35 +08:00
|
|
|
dma_sync_single_for_device(&skdev->pdev->dev, skmsg->mb_dma_address,
|
|
|
|
skmsg->length, DMA_TO_DEVICE);
|
|
|
|
|
2017-08-18 04:12:46 +08:00
|
|
|
/* Make sure skd_msg_buf is written before the doorbell is triggered. */
|
|
|
|
smp_wmb();
|
|
|
|
|
2013-10-16 04:19:07 +08:00
|
|
|
SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void skd_send_special_fitmsg(struct skd_device *skdev,
|
|
|
|
struct skd_special_context *skspcl)
|
|
|
|
{
|
|
|
|
u64 qcmd;
|
|
|
|
|
2017-08-18 04:13:35 +08:00
|
|
|
WARN_ON_ONCE(skspcl->req.n_sg != 1);
|
|
|
|
|
2013-10-16 04:19:07 +08:00
|
|
|
if (unlikely(skdev->dbg_level > 1)) {
|
|
|
|
u8 *bp = (u8 *)skspcl->msg_buf;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, " spcl[%2d] %8ph\n", i,
|
|
|
|
&bp[i]);
|
2013-10-16 04:19:07 +08:00
|
|
|
if (i == 0)
|
|
|
|
i = 64 - 8;
|
|
|
|
}
|
|
|
|
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev,
|
|
|
|
"skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
|
|
|
|
skspcl, skspcl->req.id, skspcl->req.sksg_list,
|
|
|
|
skspcl->req.sksg_dma_address);
|
2013-10-16 04:19:07 +08:00
|
|
|
for (i = 0; i < skspcl->req.n_sg; i++) {
|
|
|
|
struct fit_sg_descriptor *sgd =
|
|
|
|
&skspcl->req.sksg_list[i];
|
|
|
|
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev,
|
|
|
|
" sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
|
|
|
|
i, sgd->byte_count, sgd->control,
|
|
|
|
sgd->host_side_addr, sgd->next_desc_ptr);
|
2013-10-16 04:19:07 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
|
|
|
|
* and one 64-byte SSDI command.
|
|
|
|
*/
|
|
|
|
qcmd = skspcl->mb_dma_address;
|
|
|
|
qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
|
|
|
|
|
2017-08-18 04:13:35 +08:00
|
|
|
dma_sync_single_for_device(&skdev->pdev->dev, skspcl->mb_dma_address,
|
|
|
|
SKD_N_SPECIAL_FITMSG_BYTES, DMA_TO_DEVICE);
|
|
|
|
dma_sync_single_for_device(&skdev->pdev->dev,
|
|
|
|
skspcl->req.sksg_dma_address,
|
|
|
|
1 * sizeof(struct fit_sg_descriptor),
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
dma_sync_single_for_device(&skdev->pdev->dev,
|
|
|
|
skspcl->db_dma_address,
|
|
|
|
skspcl->req.sksg_list[0].byte_count,
|
|
|
|
DMA_BIDIRECTIONAL);
|
|
|
|
|
2017-08-18 04:12:46 +08:00
|
|
|
/* Make sure skd_msg_buf is written before the doorbell is triggered. */
|
|
|
|
smp_wmb();
|
|
|
|
|
2013-10-16 04:19:07 +08:00
|
|
|
SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
*****************************************************************************
|
|
|
|
* COMPLETION QUEUE
|
|
|
|
*****************************************************************************
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void skd_complete_other(struct skd_device *skdev,
|
2017-08-18 04:13:17 +08:00
|
|
|
struct fit_completion_entry_v1 *skcomp,
|
|
|
|
struct fit_comp_error_info *skerr);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
struct sns_info {
|
|
|
|
u8 type;
|
|
|
|
u8 stat;
|
|
|
|
u8 key;
|
|
|
|
u8 asc;
|
|
|
|
u8 ascq;
|
|
|
|
u8 mask;
|
|
|
|
enum skd_check_status_action action;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct sns_info skd_chkstat_table[] = {
|
|
|
|
/* Good */
|
|
|
|
{ 0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c,
|
|
|
|
SKD_CHECK_STATUS_REPORT_GOOD },
|
|
|
|
|
|
|
|
/* Smart alerts */
|
|
|
|
{ 0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */
|
|
|
|
SKD_CHECK_STATUS_REPORT_SMART_ALERT },
|
|
|
|
{ 0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */
|
|
|
|
SKD_CHECK_STATUS_REPORT_SMART_ALERT },
|
|
|
|
{ 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temperature over trigger */
|
|
|
|
SKD_CHECK_STATUS_REPORT_SMART_ALERT },
|
|
|
|
|
|
|
|
/* Retry (with limits) */
|
|
|
|
{ 0x70, 0x02, 0x0B, 0, 0, 0x1C, /* This one is for DMA ERROR */
|
|
|
|
SKD_CHECK_STATUS_REQUEUE_REQUEST },
|
|
|
|
{ 0x70, 0x02, 0x06, 0x0B, 0x00, 0x1E, /* warnings */
|
|
|
|
SKD_CHECK_STATUS_REQUEUE_REQUEST },
|
|
|
|
{ 0x70, 0x02, 0x06, 0x5D, 0x00, 0x1E, /* thresholds */
|
|
|
|
SKD_CHECK_STATUS_REQUEUE_REQUEST },
|
|
|
|
{ 0x70, 0x02, 0x06, 0x80, 0x30, 0x1F, /* backup power */
|
|
|
|
SKD_CHECK_STATUS_REQUEUE_REQUEST },
|
|
|
|
|
|
|
|
/* Busy (or about to be) */
|
|
|
|
{ 0x70, 0x02, 0x06, 0x3f, 0x01, 0x1F, /* fw changed */
|
|
|
|
SKD_CHECK_STATUS_BUSY_IMMINENT },
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Look up status and sense data to decide how to handle the error
|
|
|
|
* from the device.
|
|
|
|
* mask says which fields must match e.g., mask=0x18 means check
|
|
|
|
* type and stat, ignore key, asc, ascq.
|
|
|
|
*/
|
|
|
|
|
2013-11-02 03:05:10 +08:00
|
|
|
static enum skd_check_status_action
|
|
|
|
skd_check_status(struct skd_device *skdev,
|
2017-08-18 04:13:17 +08:00
|
|
|
u8 cmp_status, struct fit_comp_error_info *skerr)
|
2013-10-16 04:19:07 +08:00
|
|
|
{
|
2017-08-18 04:13:11 +08:00
|
|
|
int i;
|
2013-10-16 04:19:07 +08:00
|
|
|
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&skdev->pdev->dev, "key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
|
|
|
|
skerr->key, skerr->code, skerr->qual, skerr->fruc);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev,
|
|
|
|
"stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
|
|
|
|
skerr->type, cmp_status, skerr->key, skerr->code, skerr->qual,
|
|
|
|
skerr->fruc);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
/* Does the info match an entry in the good category? */
|
2017-08-18 04:13:11 +08:00
|
|
|
for (i = 0; i < ARRAY_SIZE(skd_chkstat_table); i++) {
|
2013-10-16 04:19:07 +08:00
|
|
|
struct sns_info *sns = &skd_chkstat_table[i];
|
|
|
|
|
|
|
|
if (sns->mask & 0x10)
|
|
|
|
if (skerr->type != sns->type)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (sns->mask & 0x08)
|
|
|
|
if (cmp_status != sns->stat)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (sns->mask & 0x04)
|
|
|
|
if (skerr->key != sns->key)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (sns->mask & 0x02)
|
|
|
|
if (skerr->code != sns->asc)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (sns->mask & 0x01)
|
|
|
|
if (skerr->qual != sns->ascq)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&skdev->pdev->dev,
|
|
|
|
"SMART Alert: sense key/asc/ascq %02x/%02x/%02x\n",
|
|
|
|
skerr->key, skerr->code, skerr->qual);
|
2013-10-16 04:19:07 +08:00
|
|
|
}
|
|
|
|
return sns->action;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* No other match, so nonzero status means error,
|
|
|
|
* zero status means good
|
|
|
|
*/
|
|
|
|
if (cmp_status) {
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "status check: error\n");
|
2013-10-16 04:19:07 +08:00
|
|
|
return SKD_CHECK_STATUS_REPORT_ERROR;
|
|
|
|
}
|
|
|
|
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "status check good default\n");
|
2013-10-16 04:19:07 +08:00
|
|
|
return SKD_CHECK_STATUS_REPORT_GOOD;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void skd_resolve_req_exception(struct skd_device *skdev,
|
2017-08-18 04:13:26 +08:00
|
|
|
struct skd_request_context *skreq,
|
|
|
|
struct request *req)
|
2013-10-16 04:19:07 +08:00
|
|
|
{
|
|
|
|
u8 cmp_status = skreq->completion.status;
|
|
|
|
|
|
|
|
switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
|
|
|
|
case SKD_CHECK_STATUS_REPORT_GOOD:
|
|
|
|
case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
|
2017-08-26 05:24:12 +08:00
|
|
|
skreq->status = BLK_STS_OK;
|
|
|
|
blk_mq_complete_request(req);
|
2013-10-16 04:19:07 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case SKD_CHECK_STATUS_BUSY_IMMINENT:
|
|
|
|
skd_log_skreq(skdev, skreq, "retry(busy)");
|
2017-08-18 04:13:26 +08:00
|
|
|
blk_requeue_request(skdev->queue, req);
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_info(&skdev->pdev->dev, "drive BUSY imminent\n");
|
2013-10-16 04:19:07 +08:00
|
|
|
skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
|
|
|
|
skdev->timer_countdown = SKD_TIMER_MINUTES(20);
|
|
|
|
skd_quiesce_dev(skdev);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SKD_CHECK_STATUS_REQUEUE_REQUEST:
|
2017-08-18 04:13:26 +08:00
|
|
|
if ((unsigned long) ++req->special < SKD_MAX_RETRIES) {
|
2013-11-02 00:14:56 +08:00
|
|
|
skd_log_skreq(skdev, skreq, "retry");
|
2017-08-18 04:13:26 +08:00
|
|
|
blk_requeue_request(skdev->queue, req);
|
2013-11-02 00:14:56 +08:00
|
|
|
break;
|
2013-10-16 04:19:07 +08:00
|
|
|
}
|
2017-08-18 04:12:52 +08:00
|
|
|
/* fall through */
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
case SKD_CHECK_STATUS_REPORT_ERROR:
|
|
|
|
default:
|
2017-08-26 05:24:12 +08:00
|
|
|
skreq->status = BLK_STS_IOERR;
|
|
|
|
blk_mq_complete_request(req);
|
2013-10-16 04:19:07 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void skd_release_skreq(struct skd_device *skdev,
|
|
|
|
struct skd_request_context *skreq)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Reclaim the skd_request_context
|
|
|
|
*/
|
|
|
|
skreq->state = SKD_REQ_STATE_IDLE;
|
2017-08-18 04:13:26 +08:00
|
|
|
}
|
|
|
|
|
2013-10-16 04:19:07 +08:00
|
|
|
static int skd_isr_completion_posted(struct skd_device *skdev,
|
|
|
|
int limit, int *enqueued)
|
|
|
|
{
|
2017-08-18 04:13:17 +08:00
|
|
|
struct fit_completion_entry_v1 *skcmp;
|
|
|
|
struct fit_comp_error_info *skerr;
|
2013-10-16 04:19:07 +08:00
|
|
|
u16 req_id;
|
2017-08-18 04:13:26 +08:00
|
|
|
u32 tag;
|
2017-08-18 04:13:32 +08:00
|
|
|
u16 hwq = 0;
|
2017-08-18 04:13:26 +08:00
|
|
|
struct request *rq;
|
2013-10-16 04:19:07 +08:00
|
|
|
struct skd_request_context *skreq;
|
2017-08-18 04:13:13 +08:00
|
|
|
u16 cmp_cntxt;
|
|
|
|
u8 cmp_status;
|
|
|
|
u8 cmp_cycle;
|
|
|
|
u32 cmp_bytes;
|
2017-08-23 18:44:20 +08:00
|
|
|
int rc = 0;
|
2013-10-16 04:19:07 +08:00
|
|
|
int processed = 0;
|
|
|
|
|
2017-08-18 04:13:00 +08:00
|
|
|
lockdep_assert_held(&skdev->lock);
|
|
|
|
|
2013-10-16 04:19:07 +08:00
|
|
|
for (;; ) {
|
|
|
|
SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
|
|
|
|
|
|
|
|
skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
|
|
|
|
cmp_cycle = skcmp->cycle;
|
|
|
|
cmp_cntxt = skcmp->tag;
|
|
|
|
cmp_status = skcmp->status;
|
|
|
|
cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
|
|
|
|
|
|
|
|
skerr = &skdev->skerr_table[skdev->skcomp_ix];
|
|
|
|
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev,
|
|
|
|
"cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d busy=%d rbytes=0x%x proto=%d\n",
|
|
|
|
skdev->skcomp_cycle, skdev->skcomp_ix, cmp_cycle,
|
2017-08-18 04:13:34 +08:00
|
|
|
cmp_cntxt, cmp_status, skd_in_flight(skdev),
|
2017-08-18 04:13:27 +08:00
|
|
|
cmp_bytes, skdev->proto_ver);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
if (cmp_cycle != skdev->skcomp_cycle) {
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "end of completions\n");
|
2013-10-16 04:19:07 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Update the completion queue head index and possibly
|
|
|
|
* the completion cycle count. 8-bit wrap-around.
|
|
|
|
*/
|
|
|
|
skdev->skcomp_ix++;
|
|
|
|
if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
|
|
|
|
skdev->skcomp_ix = 0;
|
|
|
|
skdev->skcomp_cycle++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The command context is a unique 32-bit ID. The low order
|
|
|
|
* bits help locate the request. The request is usually a
|
|
|
|
* r/w request (see skd_start() above) or a special request.
|
|
|
|
*/
|
|
|
|
req_id = cmp_cntxt;
|
2017-08-18 04:13:26 +08:00
|
|
|
tag = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
/* Is this other than a r/w request? */
|
2017-08-18 04:13:26 +08:00
|
|
|
if (tag >= skdev->num_req_context) {
|
2013-10-16 04:19:07 +08:00
|
|
|
/*
|
|
|
|
* This is not a completion for a r/w request.
|
|
|
|
*/
|
2017-08-18 04:13:32 +08:00
|
|
|
WARN_ON_ONCE(blk_mq_tag_to_rq(skdev->tag_set.tags[hwq],
|
|
|
|
tag));
|
2013-10-16 04:19:07 +08:00
|
|
|
skd_complete_other(skdev, skcmp, skerr);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-08-18 04:13:32 +08:00
|
|
|
rq = blk_mq_tag_to_rq(skdev->tag_set.tags[hwq], tag);
|
2017-08-18 04:13:26 +08:00
|
|
|
if (WARN(!rq, "No request for tag %#x -> %#x\n", cmp_cntxt,
|
|
|
|
tag))
|
|
|
|
continue;
|
2017-08-18 04:13:31 +08:00
|
|
|
skreq = blk_mq_rq_to_pdu(rq);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure the request ID for the slot matches.
|
|
|
|
*/
|
|
|
|
if (skreq->id != req_id) {
|
2017-08-24 01:56:30 +08:00
|
|
|
dev_err(&skdev->pdev->dev,
|
|
|
|
"Completion mismatch comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
|
|
|
|
req_id, skreq->id, cmp_cntxt);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
2017-08-24 01:56:30 +08:00
|
|
|
continue;
|
2013-10-16 04:19:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
|
|
|
|
|
|
|
|
skreq->completion = *skcmp;
|
|
|
|
if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
|
|
|
|
skreq->err_info = *skerr;
|
|
|
|
skd_log_check_status(skdev, cmp_status, skerr->key,
|
|
|
|
skerr->code, skerr->qual,
|
|
|
|
skerr->fruc);
|
|
|
|
}
|
|
|
|
/* Release DMA resources for the request. */
|
|
|
|
if (skreq->n_sg > 0)
|
|
|
|
skd_postop_sg_list(skdev, skreq);
|
|
|
|
|
2017-08-18 04:13:26 +08:00
|
|
|
skd_release_skreq(skdev, skreq);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
/*
|
2017-08-18 04:13:26 +08:00
|
|
|
* Capture the outcome and post it back to the native request.
|
2013-10-16 04:19:07 +08:00
|
|
|
*/
|
2017-08-26 05:24:12 +08:00
|
|
|
if (likely(cmp_status == SAM_STAT_GOOD)) {
|
|
|
|
skreq->status = BLK_STS_OK;
|
|
|
|
blk_mq_complete_request(rq);
|
|
|
|
} else {
|
2017-08-18 04:13:26 +08:00
|
|
|
skd_resolve_req_exception(skdev, skreq, rq);
|
2017-08-26 05:24:12 +08:00
|
|
|
}
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
/* skd_isr_comp_limit equal zero means no limit */
|
|
|
|
if (limit) {
|
|
|
|
if (++processed >= limit) {
|
|
|
|
rc = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-18 04:13:27 +08:00
|
|
|
if (skdev->state == SKD_DRVR_STATE_PAUSING &&
|
2017-08-18 04:13:34 +08:00
|
|
|
skd_in_flight(skdev) == 0) {
|
2013-10-16 04:19:07 +08:00
|
|
|
skdev->state = SKD_DRVR_STATE_PAUSED;
|
|
|
|
wake_up_interruptible(&skdev->waitq);
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void skd_complete_other(struct skd_device *skdev,
|
2017-08-18 04:13:17 +08:00
|
|
|
struct fit_completion_entry_v1 *skcomp,
|
|
|
|
struct fit_comp_error_info *skerr)
|
2013-10-16 04:19:07 +08:00
|
|
|
{
|
|
|
|
u32 req_id = 0;
|
|
|
|
u32 req_table;
|
|
|
|
u32 req_slot;
|
|
|
|
struct skd_special_context *skspcl;
|
|
|
|
|
2017-08-18 04:13:00 +08:00
|
|
|
lockdep_assert_held(&skdev->lock);
|
|
|
|
|
2013-10-16 04:19:07 +08:00
|
|
|
req_id = skcomp->tag;
|
|
|
|
req_table = req_id & SKD_ID_TABLE_MASK;
|
|
|
|
req_slot = req_id & SKD_ID_SLOT_MASK;
|
|
|
|
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "table=0x%x id=0x%x slot=%d\n", req_table,
|
|
|
|
req_id, req_slot);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Based on the request id, determine how to dispatch this completion.
|
|
|
|
* This swich/case is finding the good cases and forwarding the
|
|
|
|
* completion entry. Errors are reported below the switch.
|
|
|
|
*/
|
|
|
|
switch (req_table) {
|
|
|
|
case SKD_ID_RW_REQUEST:
|
|
|
|
/*
|
2017-08-18 04:12:54 +08:00
|
|
|
* The caller, skd_isr_completion_posted() above,
|
2013-10-16 04:19:07 +08:00
|
|
|
* handles r/w requests. The only way we get here
|
|
|
|
* is if the req_slot is out of bounds.
|
|
|
|
*/
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SKD_ID_INTERNAL:
|
|
|
|
if (req_slot == 0) {
|
|
|
|
skspcl = &skdev->internal_skspcl;
|
|
|
|
if (skspcl->req.id == req_id &&
|
|
|
|
skspcl->req.state == SKD_REQ_STATE_BUSY) {
|
|
|
|
skd_complete_internal(skdev,
|
|
|
|
skcomp, skerr, skspcl);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SKD_ID_FIT_MSG:
|
|
|
|
/*
|
|
|
|
* These id's should never appear in a completion record.
|
|
|
|
*/
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
/*
|
|
|
|
* These id's should never appear anywhere;
|
|
|
|
*/
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we get here it is a bad or stale id.
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
|
|
|
|
static void skd_reset_skcomp(struct skd_device *skdev)
|
|
|
|
{
|
2017-08-18 04:13:02 +08:00
|
|
|
memset(skdev->skcomp_table, 0, SKD_SKCOMP_SIZE);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
skdev->skcomp_ix = 0;
|
|
|
|
skdev->skcomp_cycle = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
*****************************************************************************
|
|
|
|
* INTERRUPTS
|
|
|
|
*****************************************************************************
|
|
|
|
*/
|
|
|
|
static void skd_completion_worker(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct skd_device *skdev =
|
|
|
|
container_of(work, struct skd_device, completion_worker);
|
|
|
|
unsigned long flags;
|
|
|
|
int flush_enqueued = 0;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&skdev->lock, flags);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* pass in limit=0, which means no limit..
|
|
|
|
* process everything in compq
|
|
|
|
*/
|
|
|
|
skd_isr_completion_posted(skdev, 0, &flush_enqueued);
|
2017-08-18 04:13:32 +08:00
|
|
|
schedule_work(&skdev->start_queue);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
spin_unlock_irqrestore(&skdev->lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void skd_isr_msg_from_dev(struct skd_device *skdev);
|
|
|
|
|
2016-11-09 20:55:35 +08:00
|
|
|
static irqreturn_t
|
|
|
|
skd_isr(int irq, void *ptr)
|
2013-10-16 04:19:07 +08:00
|
|
|
{
|
2017-08-18 04:13:10 +08:00
|
|
|
struct skd_device *skdev = ptr;
|
2013-10-16 04:19:07 +08:00
|
|
|
u32 intstat;
|
|
|
|
u32 ack;
|
|
|
|
int rc = 0;
|
|
|
|
int deferred = 0;
|
|
|
|
int flush_enqueued = 0;
|
|
|
|
|
|
|
|
spin_lock(&skdev->lock);
|
|
|
|
|
|
|
|
for (;; ) {
|
|
|
|
intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
|
|
|
|
|
|
|
|
ack = FIT_INT_DEF_MASK;
|
|
|
|
ack &= intstat;
|
|
|
|
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "intstat=0x%x ack=0x%x\n", intstat,
|
|
|
|
ack);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
/* As long as there is an int pending on device, keep
|
|
|
|
* running loop. When none, get out, but if we've never
|
|
|
|
* done any processing, call completion handler?
|
|
|
|
*/
|
|
|
|
if (ack == 0) {
|
|
|
|
/* No interrupts on device, but run the completion
|
|
|
|
* processor anyway?
|
|
|
|
*/
|
|
|
|
if (rc == 0)
|
|
|
|
if (likely (skdev->state
|
|
|
|
== SKD_DRVR_STATE_ONLINE))
|
|
|
|
deferred = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = IRQ_HANDLED;
|
|
|
|
|
|
|
|
SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
|
|
|
|
|
|
|
|
if (likely((skdev->state != SKD_DRVR_STATE_LOAD) &&
|
|
|
|
(skdev->state != SKD_DRVR_STATE_STOPPING))) {
|
|
|
|
if (intstat & FIT_ISH_COMPLETION_POSTED) {
|
|
|
|
/*
|
|
|
|
* If we have already deferred completion
|
|
|
|
* processing, don't bother running it again
|
|
|
|
*/
|
|
|
|
if (deferred == 0)
|
|
|
|
deferred =
|
|
|
|
skd_isr_completion_posted(skdev,
|
|
|
|
skd_isr_comp_limit, &flush_enqueued);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (intstat & FIT_ISH_FW_STATE_CHANGE) {
|
|
|
|
skd_isr_fwstate(skdev);
|
|
|
|
if (skdev->state == SKD_DRVR_STATE_FAULT ||
|
|
|
|
skdev->state ==
|
|
|
|
SKD_DRVR_STATE_DISAPPEARED) {
|
|
|
|
spin_unlock(&skdev->lock);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (intstat & FIT_ISH_MSG_FROM_DEV)
|
|
|
|
skd_isr_msg_from_dev(skdev);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(flush_enqueued))
|
2017-08-18 04:13:32 +08:00
|
|
|
schedule_work(&skdev->start_queue);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
if (deferred)
|
|
|
|
schedule_work(&skdev->completion_worker);
|
|
|
|
else if (!flush_enqueued)
|
2017-08-18 04:13:32 +08:00
|
|
|
schedule_work(&skdev->start_queue);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
spin_unlock(&skdev->lock);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void skd_drive_fault(struct skd_device *skdev)
|
|
|
|
{
|
|
|
|
skdev->state = SKD_DRVR_STATE_FAULT;
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&skdev->pdev->dev, "Drive FAULT\n");
|
2013-10-16 04:19:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void skd_drive_disappeared(struct skd_device *skdev)
|
|
|
|
{
|
|
|
|
skdev->state = SKD_DRVR_STATE_DISAPPEARED;
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&skdev->pdev->dev, "Drive DISAPPEARED\n");
|
2013-10-16 04:19:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void skd_isr_fwstate(struct skd_device *skdev)
|
|
|
|
{
|
|
|
|
u32 sense;
|
|
|
|
u32 state;
|
|
|
|
u32 mtd;
|
|
|
|
int prev_driver_state = skdev->state;
|
|
|
|
|
|
|
|
sense = SKD_READL(skdev, FIT_STATUS);
|
|
|
|
state = sense & FIT_SR_DRIVE_STATE_MASK;
|
|
|
|
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&skdev->pdev->dev, "s1120 state %s(%d)=>%s(%d)\n",
|
|
|
|
skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
|
|
|
|
skd_drive_state_to_str(state), state);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
skdev->drive_state = state;
|
|
|
|
|
|
|
|
switch (skdev->drive_state) {
|
|
|
|
case FIT_SR_DRIVE_INIT:
|
|
|
|
if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
|
|
|
|
skd_disable_interrupts(skdev);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (skdev->state == SKD_DRVR_STATE_RESTARTING)
|
2017-08-18 04:13:14 +08:00
|
|
|
skd_recover_requests(skdev);
|
2013-10-16 04:19:07 +08:00
|
|
|
if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
|
|
|
|
skdev->timer_countdown = SKD_STARTING_TIMO;
|
|
|
|
skdev->state = SKD_DRVR_STATE_STARTING;
|
|
|
|
skd_soft_reset(skdev);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
|
|
|
|
SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
|
|
|
|
skdev->last_mtd = mtd;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case FIT_SR_DRIVE_ONLINE:
|
|
|
|
skdev->cur_max_queue_depth = skd_max_queue_depth;
|
|
|
|
if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth)
|
|
|
|
skdev->cur_max_queue_depth = skdev->dev_max_queue_depth;
|
|
|
|
|
|
|
|
skdev->queue_low_water_mark =
|
|
|
|
skdev->cur_max_queue_depth * 2 / 3 + 1;
|
|
|
|
if (skdev->queue_low_water_mark < 1)
|
|
|
|
skdev->queue_low_water_mark = 1;
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_info(&skdev->pdev->dev,
|
|
|
|
"Queue depth limit=%d dev=%d lowat=%d\n",
|
|
|
|
skdev->cur_max_queue_depth,
|
|
|
|
skdev->dev_max_queue_depth,
|
|
|
|
skdev->queue_low_water_mark);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
skd_refresh_device_data(skdev);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case FIT_SR_DRIVE_BUSY:
|
|
|
|
skdev->state = SKD_DRVR_STATE_BUSY;
|
|
|
|
skdev->timer_countdown = SKD_BUSY_TIMO;
|
|
|
|
skd_quiesce_dev(skdev);
|
|
|
|
break;
|
|
|
|
case FIT_SR_DRIVE_BUSY_SANITIZE:
|
|
|
|
/* set timer for 3 seconds, we'll abort any unfinished
|
|
|
|
* commands after that expires
|
|
|
|
*/
|
|
|
|
skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
|
|
|
|
skdev->timer_countdown = SKD_TIMER_SECONDS(3);
|
2017-08-18 04:13:32 +08:00
|
|
|
schedule_work(&skdev->start_queue);
|
2013-10-16 04:19:07 +08:00
|
|
|
break;
|
|
|
|
case FIT_SR_DRIVE_BUSY_ERASE:
|
|
|
|
skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
|
|
|
|
skdev->timer_countdown = SKD_BUSY_TIMO;
|
|
|
|
break;
|
|
|
|
case FIT_SR_DRIVE_OFFLINE:
|
|
|
|
skdev->state = SKD_DRVR_STATE_IDLE;
|
|
|
|
break;
|
|
|
|
case FIT_SR_DRIVE_SOFT_RESET:
|
|
|
|
switch (skdev->state) {
|
|
|
|
case SKD_DRVR_STATE_STARTING:
|
|
|
|
case SKD_DRVR_STATE_RESTARTING:
|
|
|
|
/* Expected by a caller of skd_soft_reset() */
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
skdev->state = SKD_DRVR_STATE_RESTARTING;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case FIT_SR_DRIVE_FW_BOOTING:
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "ISR FIT_SR_DRIVE_FW_BOOTING\n");
|
2013-10-16 04:19:07 +08:00
|
|
|
skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
|
|
|
|
skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case FIT_SR_DRIVE_DEGRADED:
|
|
|
|
case FIT_SR_PCIE_LINK_DOWN:
|
|
|
|
case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case FIT_SR_DRIVE_FAULT:
|
|
|
|
skd_drive_fault(skdev);
|
2017-08-18 04:13:14 +08:00
|
|
|
skd_recover_requests(skdev);
|
2017-08-18 04:13:32 +08:00
|
|
|
schedule_work(&skdev->start_queue);
|
2013-10-16 04:19:07 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* PCIe bus returned all Fs? */
|
|
|
|
case 0xFF:
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_info(&skdev->pdev->dev, "state=0x%x sense=0x%x\n", state,
|
|
|
|
sense);
|
2013-10-16 04:19:07 +08:00
|
|
|
skd_drive_disappeared(skdev);
|
2017-08-18 04:13:14 +08:00
|
|
|
skd_recover_requests(skdev);
|
2017-08-18 04:13:32 +08:00
|
|
|
schedule_work(&skdev->start_queue);
|
2013-10-16 04:19:07 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/*
|
|
|
|
* Uknown FW State. Wait for a state we recognize.
|
|
|
|
*/
|
|
|
|
break;
|
|
|
|
}
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
|
|
|
|
skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
|
|
|
|
skd_skdev_state_to_str(skdev->state), skdev->state);
|
2013-10-16 04:19:07 +08:00
|
|
|
}
|
|
|
|
|
2017-08-18 04:13:32 +08:00
|
|
|
static void skd_recover_request(struct request *req, void *data, bool reserved)
|
2013-10-16 04:19:07 +08:00
|
|
|
{
|
2017-08-18 04:13:32 +08:00
|
|
|
struct skd_device *const skdev = data;
|
|
|
|
struct skd_request_context *skreq = blk_mq_rq_to_pdu(req);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
2017-08-18 04:13:29 +08:00
|
|
|
if (skreq->state != SKD_REQ_STATE_BUSY)
|
|
|
|
return;
|
2013-10-16 04:19:07 +08:00
|
|
|
|
2017-08-18 04:13:29 +08:00
|
|
|
skd_log_skreq(skdev, skreq, "recover");
|
2013-10-16 04:19:07 +08:00
|
|
|
|
2017-08-18 04:13:29 +08:00
|
|
|
/* Release DMA resources for the request. */
|
|
|
|
if (skreq->n_sg > 0)
|
|
|
|
skd_postop_sg_list(skdev, skreq);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
2017-08-18 04:13:29 +08:00
|
|
|
skreq->state = SKD_REQ_STATE_IDLE;
|
2017-08-26 05:24:12 +08:00
|
|
|
skreq->status = BLK_STS_IOERR;
|
|
|
|
blk_mq_complete_request(req);
|
2017-08-18 04:13:29 +08:00
|
|
|
}
|
2013-10-16 04:19:07 +08:00
|
|
|
|
2017-08-18 04:13:29 +08:00
|
|
|
static void skd_recover_requests(struct skd_device *skdev)
|
|
|
|
{
|
2017-08-18 04:13:32 +08:00
|
|
|
blk_mq_tagset_busy_iter(&skdev->tag_set, skd_recover_request, skdev);
|
2013-10-16 04:19:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void skd_isr_msg_from_dev(struct skd_device *skdev)
|
|
|
|
{
|
|
|
|
u32 mfd;
|
|
|
|
u32 mtd;
|
|
|
|
u32 data;
|
|
|
|
|
|
|
|
mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
|
|
|
|
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "mfd=0x%x last_mtd=0x%x\n", mfd,
|
|
|
|
skdev->last_mtd);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
/* ignore any mtd that is an ack for something we didn't send */
|
|
|
|
if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
|
|
|
|
return;
|
|
|
|
|
|
|
|
switch (FIT_MXD_TYPE(mfd)) {
|
|
|
|
case FIT_MTD_FITFW_INIT:
|
|
|
|
skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
|
|
|
|
|
|
|
|
if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&skdev->pdev->dev, "protocol mismatch\n");
|
|
|
|
dev_err(&skdev->pdev->dev, " got=%d support=%d\n",
|
|
|
|
skdev->proto_ver, FIT_PROTOCOL_VERSION_1);
|
|
|
|
dev_err(&skdev->pdev->dev, " please upgrade driver\n");
|
2013-10-16 04:19:07 +08:00
|
|
|
skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
|
|
|
|
skd_soft_reset(skdev);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
|
|
|
|
SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
|
|
|
|
skdev->last_mtd = mtd;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case FIT_MTD_GET_CMDQ_DEPTH:
|
|
|
|
skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd);
|
|
|
|
mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
|
|
|
|
SKD_N_COMPLETION_ENTRY);
|
|
|
|
SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
|
|
|
|
skdev->last_mtd = mtd;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case FIT_MTD_SET_COMPQ_DEPTH:
|
|
|
|
SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG);
|
|
|
|
mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
|
|
|
|
SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
|
|
|
|
skdev->last_mtd = mtd;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case FIT_MTD_SET_COMPQ_ADDR:
|
|
|
|
skd_reset_skcomp(skdev);
|
|
|
|
mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno);
|
|
|
|
SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
|
|
|
|
skdev->last_mtd = mtd;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case FIT_MTD_CMD_LOG_HOST_ID:
|
2017-11-02 19:42:00 +08:00
|
|
|
/* hardware interface overflows in y2106 */
|
|
|
|
skdev->connect_time_stamp = (u32)ktime_get_real_seconds();
|
2013-10-16 04:19:07 +08:00
|
|
|
data = skdev->connect_time_stamp & 0xFFFF;
|
|
|
|
mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data);
|
|
|
|
SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
|
|
|
|
skdev->last_mtd = mtd;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case FIT_MTD_CMD_LOG_TIME_STAMP_LO:
|
|
|
|
skdev->drive_jiffies = FIT_MXD_DATA(mfd);
|
|
|
|
data = (skdev->connect_time_stamp >> 16) & 0xFFFF;
|
|
|
|
mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data);
|
|
|
|
SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
|
|
|
|
skdev->last_mtd = mtd;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case FIT_MTD_CMD_LOG_TIME_STAMP_HI:
|
|
|
|
skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16);
|
|
|
|
mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
|
|
|
|
SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
|
|
|
|
skdev->last_mtd = mtd;
|
|
|
|
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&skdev->pdev->dev, "Time sync driver=0x%x device=0x%x\n",
|
|
|
|
skdev->connect_time_stamp, skdev->drive_jiffies);
|
2013-10-16 04:19:07 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case FIT_MTD_ARM_QUEUE:
|
|
|
|
skdev->last_mtd = 0;
|
|
|
|
/*
|
|
|
|
* State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
|
|
|
|
*/
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void skd_disable_interrupts(struct skd_device *skdev)
|
|
|
|
{
|
|
|
|
u32 sense;
|
|
|
|
|
|
|
|
sense = SKD_READL(skdev, FIT_CONTROL);
|
|
|
|
sense &= ~FIT_CR_ENABLE_INTERRUPTS;
|
|
|
|
SKD_WRITEL(skdev, sense, FIT_CONTROL);
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "sense 0x%x\n", sense);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
/* Note that the 1s is written. A 1-bit means
|
|
|
|
* disable, a 0 means enable.
|
|
|
|
*/
|
|
|
|
SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void skd_enable_interrupts(struct skd_device *skdev)
|
|
|
|
{
|
|
|
|
u32 val;
|
|
|
|
|
|
|
|
/* unmask interrupts first */
|
|
|
|
val = FIT_ISH_FW_STATE_CHANGE +
|
|
|
|
FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV;
|
|
|
|
|
|
|
|
/* Note that the compliment of mask is written. A 1-bit means
|
|
|
|
* disable, a 0 means enable. */
|
|
|
|
SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "interrupt mask=0x%x\n", ~val);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
val = SKD_READL(skdev, FIT_CONTROL);
|
|
|
|
val |= FIT_CR_ENABLE_INTERRUPTS;
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
|
2013-10-16 04:19:07 +08:00
|
|
|
SKD_WRITEL(skdev, val, FIT_CONTROL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
*****************************************************************************
|
|
|
|
* START, STOP, RESTART, QUIESCE, UNQUIESCE
|
|
|
|
*****************************************************************************
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void skd_soft_reset(struct skd_device *skdev)
|
|
|
|
{
|
|
|
|
u32 val;
|
|
|
|
|
|
|
|
val = SKD_READL(skdev, FIT_CONTROL);
|
|
|
|
val |= (FIT_CR_SOFT_RESET);
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
|
2013-10-16 04:19:07 +08:00
|
|
|
SKD_WRITEL(skdev, val, FIT_CONTROL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void skd_start_device(struct skd_device *skdev)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
u32 sense;
|
|
|
|
u32 state;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&skdev->lock, flags);
|
|
|
|
|
|
|
|
/* ack all ghost interrupts */
|
|
|
|
SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
|
|
|
|
|
|
|
|
sense = SKD_READL(skdev, FIT_STATUS);
|
|
|
|
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "initial status=0x%x\n", sense);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
state = sense & FIT_SR_DRIVE_STATE_MASK;
|
|
|
|
skdev->drive_state = state;
|
|
|
|
skdev->last_mtd = 0;
|
|
|
|
|
|
|
|
skdev->state = SKD_DRVR_STATE_STARTING;
|
|
|
|
skdev->timer_countdown = SKD_STARTING_TIMO;
|
|
|
|
|
|
|
|
skd_enable_interrupts(skdev);
|
|
|
|
|
|
|
|
switch (skdev->drive_state) {
|
|
|
|
case FIT_SR_DRIVE_OFFLINE:
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&skdev->pdev->dev, "Drive offline...\n");
|
2013-10-16 04:19:07 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case FIT_SR_DRIVE_FW_BOOTING:
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "FIT_SR_DRIVE_FW_BOOTING\n");
|
2013-10-16 04:19:07 +08:00
|
|
|
skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
|
|
|
|
skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case FIT_SR_DRIVE_BUSY_SANITIZE:
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_info(&skdev->pdev->dev, "Start: BUSY_SANITIZE\n");
|
2013-10-16 04:19:07 +08:00
|
|
|
skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
|
|
|
|
skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case FIT_SR_DRIVE_BUSY_ERASE:
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_info(&skdev->pdev->dev, "Start: BUSY_ERASE\n");
|
2013-10-16 04:19:07 +08:00
|
|
|
skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
|
|
|
|
skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case FIT_SR_DRIVE_INIT:
|
|
|
|
case FIT_SR_DRIVE_ONLINE:
|
|
|
|
skd_soft_reset(skdev);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case FIT_SR_DRIVE_BUSY:
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&skdev->pdev->dev, "Drive Busy...\n");
|
2013-10-16 04:19:07 +08:00
|
|
|
skdev->state = SKD_DRVR_STATE_BUSY;
|
|
|
|
skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case FIT_SR_DRIVE_SOFT_RESET:
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&skdev->pdev->dev, "drive soft reset in prog\n");
|
2013-10-16 04:19:07 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case FIT_SR_DRIVE_FAULT:
|
|
|
|
/* Fault state is bad...soft reset won't do it...
|
|
|
|
* Hard reset, maybe, but does it work on device?
|
|
|
|
* For now, just fault so the system doesn't hang.
|
|
|
|
*/
|
|
|
|
skd_drive_fault(skdev);
|
|
|
|
/*start the queue so we can respond with error to requests */
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "starting queue\n");
|
2017-08-18 04:13:32 +08:00
|
|
|
schedule_work(&skdev->start_queue);
|
2013-10-16 04:19:07 +08:00
|
|
|
skdev->gendisk_on = -1;
|
|
|
|
wake_up_interruptible(&skdev->waitq);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 0xFF:
|
|
|
|
/* Most likely the device isn't there or isn't responding
|
|
|
|
* to the BAR1 addresses. */
|
|
|
|
skd_drive_disappeared(skdev);
|
|
|
|
/*start the queue so we can respond with error to requests */
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev,
|
|
|
|
"starting queue to error-out reqs\n");
|
2017-08-18 04:13:32 +08:00
|
|
|
schedule_work(&skdev->start_queue);
|
2013-10-16 04:19:07 +08:00
|
|
|
skdev->gendisk_on = -1;
|
|
|
|
wake_up_interruptible(&skdev->waitq);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&skdev->pdev->dev, "Start: unknown state %x\n",
|
|
|
|
skdev->drive_state);
|
2013-10-16 04:19:07 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
state = SKD_READL(skdev, FIT_CONTROL);
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "FIT Control Status=0x%x\n", state);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "Intr Status=0x%x\n", state);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
state = SKD_READL(skdev, FIT_INT_MASK_HOST);
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "Intr Mask=0x%x\n", state);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "Msg from Dev=0x%x\n", state);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
state = SKD_READL(skdev, FIT_HW_VERSION);
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "HW version=0x%x\n", state);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
spin_unlock_irqrestore(&skdev->lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void skd_stop_device(struct skd_device *skdev)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
struct skd_special_context *skspcl = &skdev->internal_skspcl;
|
|
|
|
u32 dev_state;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&skdev->lock, flags);
|
|
|
|
|
|
|
|
if (skdev->state != SKD_DRVR_STATE_ONLINE) {
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&skdev->pdev->dev, "%s not online no sync\n", __func__);
|
2013-10-16 04:19:07 +08:00
|
|
|
goto stop_out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&skdev->pdev->dev, "%s no special\n", __func__);
|
2013-10-16 04:19:07 +08:00
|
|
|
goto stop_out;
|
|
|
|
}
|
|
|
|
|
|
|
|
skdev->state = SKD_DRVR_STATE_SYNCING;
|
|
|
|
skdev->sync_done = 0;
|
|
|
|
|
|
|
|
skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&skdev->lock, flags);
|
|
|
|
|
|
|
|
wait_event_interruptible_timeout(skdev->waitq,
|
|
|
|
(skdev->sync_done), (10 * HZ));
|
|
|
|
|
|
|
|
spin_lock_irqsave(&skdev->lock, flags);
|
|
|
|
|
|
|
|
switch (skdev->sync_done) {
|
|
|
|
case 0:
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&skdev->pdev->dev, "%s no sync\n", __func__);
|
2013-10-16 04:19:07 +08:00
|
|
|
break;
|
|
|
|
case 1:
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&skdev->pdev->dev, "%s sync done\n", __func__);
|
2013-10-16 04:19:07 +08:00
|
|
|
break;
|
|
|
|
default:
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&skdev->pdev->dev, "%s sync error\n", __func__);
|
2013-10-16 04:19:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
stop_out:
|
|
|
|
skdev->state = SKD_DRVR_STATE_STOPPING;
|
|
|
|
spin_unlock_irqrestore(&skdev->lock, flags);
|
|
|
|
|
|
|
|
skd_kill_timer(skdev);
|
|
|
|
|
|
|
|
spin_lock_irqsave(&skdev->lock, flags);
|
|
|
|
skd_disable_interrupts(skdev);
|
|
|
|
|
|
|
|
/* ensure all ints on device are cleared */
|
|
|
|
/* soft reset the device to unload with a clean slate */
|
|
|
|
SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
|
|
|
|
SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&skdev->lock, flags);
|
|
|
|
|
|
|
|
/* poll every 100ms, 1 second timeout */
|
|
|
|
for (i = 0; i < 10; i++) {
|
|
|
|
dev_state =
|
|
|
|
SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK;
|
|
|
|
if (dev_state == FIT_SR_DRIVE_INIT)
|
|
|
|
break;
|
|
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
|
|
schedule_timeout(msecs_to_jiffies(100));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dev_state != FIT_SR_DRIVE_INIT)
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&skdev->pdev->dev, "%s state error 0x%02x\n", __func__,
|
|
|
|
dev_state);
|
2013-10-16 04:19:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* assume spinlock is held */
|
|
|
|
static void skd_restart_device(struct skd_device *skdev)
|
|
|
|
{
|
|
|
|
u32 state;
|
|
|
|
|
|
|
|
/* ack all ghost interrupts */
|
|
|
|
SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
|
|
|
|
|
|
|
|
state = SKD_READL(skdev, FIT_STATUS);
|
|
|
|
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "drive status=0x%x\n", state);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
state &= FIT_SR_DRIVE_STATE_MASK;
|
|
|
|
skdev->drive_state = state;
|
|
|
|
skdev->last_mtd = 0;
|
|
|
|
|
|
|
|
skdev->state = SKD_DRVR_STATE_RESTARTING;
|
|
|
|
skdev->timer_countdown = SKD_RESTARTING_TIMO;
|
|
|
|
|
|
|
|
skd_soft_reset(skdev);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* assume spinlock is held */
|
|
|
|
static int skd_quiesce_dev(struct skd_device *skdev)
|
|
|
|
{
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
switch (skdev->state) {
|
|
|
|
case SKD_DRVR_STATE_BUSY:
|
|
|
|
case SKD_DRVR_STATE_BUSY_IMMINENT:
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "stopping queue\n");
|
2017-08-18 04:13:32 +08:00
|
|
|
blk_mq_stop_hw_queues(skdev->queue);
|
2013-10-16 04:19:07 +08:00
|
|
|
break;
|
|
|
|
case SKD_DRVR_STATE_ONLINE:
|
|
|
|
case SKD_DRVR_STATE_STOPPING:
|
|
|
|
case SKD_DRVR_STATE_SYNCING:
|
|
|
|
case SKD_DRVR_STATE_PAUSING:
|
|
|
|
case SKD_DRVR_STATE_PAUSED:
|
|
|
|
case SKD_DRVR_STATE_STARTING:
|
|
|
|
case SKD_DRVR_STATE_RESTARTING:
|
|
|
|
case SKD_DRVR_STATE_RESUMING:
|
|
|
|
default:
|
|
|
|
rc = -EINVAL;
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "state [%d] not implemented\n",
|
|
|
|
skdev->state);
|
2013-10-16 04:19:07 +08:00
|
|
|
}
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* assume spinlock is held */
|
|
|
|
static int skd_unquiesce_dev(struct skd_device *skdev)
|
|
|
|
{
|
|
|
|
int prev_driver_state = skdev->state;
|
|
|
|
|
|
|
|
skd_log_skdev(skdev, "unquiesce");
|
|
|
|
if (skdev->state == SKD_DRVR_STATE_ONLINE) {
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "**** device already ONLINE\n");
|
2013-10-16 04:19:07 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
|
|
|
|
/*
|
|
|
|
* If there has been an state change to other than
|
|
|
|
* ONLINE, we will rely on controller state change
|
|
|
|
* to come back online and restart the queue.
|
|
|
|
* The BUSY state means that driver is ready to
|
|
|
|
* continue normal processing but waiting for controller
|
|
|
|
* to become available.
|
|
|
|
*/
|
|
|
|
skdev->state = SKD_DRVR_STATE_BUSY;
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "drive BUSY state\n");
|
2013-10-16 04:19:07 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Drive has just come online, driver is either in startup,
|
|
|
|
* paused performing a task, or bust waiting for hardware.
|
|
|
|
*/
|
|
|
|
switch (skdev->state) {
|
|
|
|
case SKD_DRVR_STATE_PAUSED:
|
|
|
|
case SKD_DRVR_STATE_BUSY:
|
|
|
|
case SKD_DRVR_STATE_BUSY_IMMINENT:
|
|
|
|
case SKD_DRVR_STATE_BUSY_ERASE:
|
|
|
|
case SKD_DRVR_STATE_STARTING:
|
|
|
|
case SKD_DRVR_STATE_RESTARTING:
|
|
|
|
case SKD_DRVR_STATE_FAULT:
|
|
|
|
case SKD_DRVR_STATE_IDLE:
|
|
|
|
case SKD_DRVR_STATE_LOAD:
|
|
|
|
skdev->state = SKD_DRVR_STATE_ONLINE;
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
|
|
|
|
skd_skdev_state_to_str(prev_driver_state),
|
|
|
|
prev_driver_state, skd_skdev_state_to_str(skdev->state),
|
|
|
|
skdev->state);
|
|
|
|
dev_dbg(&skdev->pdev->dev,
|
|
|
|
"**** device ONLINE...starting block queue\n");
|
|
|
|
dev_dbg(&skdev->pdev->dev, "starting queue\n");
|
|
|
|
dev_info(&skdev->pdev->dev, "STEC s1120 ONLINE\n");
|
2017-08-18 04:13:32 +08:00
|
|
|
schedule_work(&skdev->start_queue);
|
2013-10-16 04:19:07 +08:00
|
|
|
skdev->gendisk_on = 1;
|
|
|
|
wake_up_interruptible(&skdev->waitq);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SKD_DRVR_STATE_DISAPPEARED:
|
|
|
|
default:
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev,
|
|
|
|
"**** driver state %d, not implemented\n",
|
|
|
|
skdev->state);
|
2013-10-16 04:19:07 +08:00
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
*****************************************************************************
|
|
|
|
* PCIe MSI/MSI-X INTERRUPT HANDLERS
|
|
|
|
*****************************************************************************
|
|
|
|
*/
|
|
|
|
|
|
|
|
static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
|
|
|
|
{
|
|
|
|
struct skd_device *skdev = skd_host_data;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&skdev->lock, flags);
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
|
|
|
|
SKD_READL(skdev, FIT_INT_STATUS_HOST));
|
|
|
|
dev_err(&skdev->pdev->dev, "MSIX reserved irq %d = 0x%x\n", irq,
|
|
|
|
SKD_READL(skdev, FIT_INT_STATUS_HOST));
|
2013-10-16 04:19:07 +08:00
|
|
|
SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
|
|
|
|
spin_unlock_irqrestore(&skdev->lock, flags);
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
|
|
|
|
{
|
|
|
|
struct skd_device *skdev = skd_host_data;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&skdev->lock, flags);
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
|
|
|
|
SKD_READL(skdev, FIT_INT_STATUS_HOST));
|
2013-10-16 04:19:07 +08:00
|
|
|
SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
|
|
|
|
skd_isr_fwstate(skdev);
|
|
|
|
spin_unlock_irqrestore(&skdev->lock, flags);
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
|
|
|
|
{
|
|
|
|
struct skd_device *skdev = skd_host_data;
|
|
|
|
unsigned long flags;
|
|
|
|
int flush_enqueued = 0;
|
|
|
|
int deferred;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&skdev->lock, flags);
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
|
|
|
|
SKD_READL(skdev, FIT_INT_STATUS_HOST));
|
2013-10-16 04:19:07 +08:00
|
|
|
SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
|
|
|
|
deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
|
|
|
|
&flush_enqueued);
|
|
|
|
if (flush_enqueued)
|
2017-08-18 04:13:32 +08:00
|
|
|
schedule_work(&skdev->start_queue);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
if (deferred)
|
|
|
|
schedule_work(&skdev->completion_worker);
|
|
|
|
else if (!flush_enqueued)
|
2017-08-18 04:13:32 +08:00
|
|
|
schedule_work(&skdev->start_queue);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
spin_unlock_irqrestore(&skdev->lock, flags);
|
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
|
|
|
|
{
|
|
|
|
struct skd_device *skdev = skd_host_data;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&skdev->lock, flags);
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
|
|
|
|
SKD_READL(skdev, FIT_INT_STATUS_HOST));
|
2013-10-16 04:19:07 +08:00
|
|
|
SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
|
|
|
|
skd_isr_msg_from_dev(skdev);
|
|
|
|
spin_unlock_irqrestore(&skdev->lock, flags);
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
|
|
|
|
{
|
|
|
|
struct skd_device *skdev = skd_host_data;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&skdev->lock, flags);
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
|
|
|
|
SKD_READL(skdev, FIT_INT_STATUS_HOST));
|
2013-10-16 04:19:07 +08:00
|
|
|
SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
|
|
|
|
spin_unlock_irqrestore(&skdev->lock, flags);
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
*****************************************************************************
|
|
|
|
* PCIe MSI/MSI-X SETUP
|
|
|
|
*****************************************************************************
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct skd_msix_entry {
|
|
|
|
char isr_name[30];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct skd_init_msix_entry {
|
|
|
|
const char *name;
|
|
|
|
irq_handler_t handler;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define SKD_MAX_MSIX_COUNT 13
|
|
|
|
#define SKD_MIN_MSIX_COUNT 7
|
|
|
|
#define SKD_BASE_MSIX_IRQ 4
|
|
|
|
|
|
|
|
static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
|
|
|
|
{ "(DMA 0)", skd_reserved_isr },
|
|
|
|
{ "(DMA 1)", skd_reserved_isr },
|
|
|
|
{ "(DMA 2)", skd_reserved_isr },
|
|
|
|
{ "(DMA 3)", skd_reserved_isr },
|
|
|
|
{ "(State Change)", skd_statec_isr },
|
|
|
|
{ "(COMPL_Q)", skd_comp_q },
|
|
|
|
{ "(MSG)", skd_msg_isr },
|
|
|
|
{ "(Reserved)", skd_reserved_isr },
|
|
|
|
{ "(Reserved)", skd_reserved_isr },
|
|
|
|
{ "(Queue Full 0)", skd_qfull_isr },
|
|
|
|
{ "(Queue Full 1)", skd_qfull_isr },
|
|
|
|
{ "(Queue Full 2)", skd_qfull_isr },
|
|
|
|
{ "(Queue Full 3)", skd_qfull_isr },
|
|
|
|
};
|
|
|
|
|
|
|
|
static int skd_acquire_msix(struct skd_device *skdev)
|
|
|
|
{
|
2014-02-19 16:58:21 +08:00
|
|
|
int i, rc;
|
2014-02-19 16:58:19 +08:00
|
|
|
struct pci_dev *pdev = skdev->pdev;
|
2013-10-16 04:19:07 +08:00
|
|
|
|
2016-11-08 03:14:07 +08:00
|
|
|
rc = pci_alloc_irq_vectors(pdev, SKD_MAX_MSIX_COUNT, SKD_MAX_MSIX_COUNT,
|
|
|
|
PCI_IRQ_MSIX);
|
|
|
|
if (rc < 0) {
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&skdev->pdev->dev, "failed to enable MSI-X %d\n", rc);
|
2016-11-09 20:55:34 +08:00
|
|
|
goto out;
|
2013-10-16 04:19:07 +08:00
|
|
|
}
|
2014-02-19 16:58:19 +08:00
|
|
|
|
2016-11-08 03:14:07 +08:00
|
|
|
skdev->msix_entries = kcalloc(SKD_MAX_MSIX_COUNT,
|
|
|
|
sizeof(struct skd_msix_entry), GFP_KERNEL);
|
2013-10-16 04:19:07 +08:00
|
|
|
if (!skdev->msix_entries) {
|
|
|
|
rc = -ENOMEM;
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&skdev->pdev->dev, "msix table allocation error\n");
|
2016-11-09 20:55:34 +08:00
|
|
|
goto out;
|
2013-10-16 04:19:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Enable MSI-X vectors for the base queue */
|
2016-11-08 03:14:07 +08:00
|
|
|
for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
|
|
|
|
struct skd_msix_entry *qentry = &skdev->msix_entries[i];
|
|
|
|
|
2013-10-16 04:19:07 +08:00
|
|
|
snprintf(qentry->isr_name, sizeof(qentry->isr_name),
|
|
|
|
"%s%d-msix %s", DRV_NAME, skdev->devno,
|
|
|
|
msix_entries[i].name);
|
2016-11-08 03:14:07 +08:00
|
|
|
|
|
|
|
rc = devm_request_irq(&skdev->pdev->dev,
|
|
|
|
pci_irq_vector(skdev->pdev, i),
|
|
|
|
msix_entries[i].handler, 0,
|
|
|
|
qentry->isr_name, skdev);
|
2013-10-16 04:19:07 +08:00
|
|
|
if (rc) {
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&skdev->pdev->dev,
|
|
|
|
"Unable to register(%d) MSI-X handler %d: %s\n",
|
|
|
|
rc, i, qentry->isr_name);
|
2013-10-16 04:19:07 +08:00
|
|
|
goto msix_out;
|
|
|
|
}
|
|
|
|
}
|
2016-11-08 03:14:07 +08:00
|
|
|
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "%d msix irq(s) enabled\n",
|
|
|
|
SKD_MAX_MSIX_COUNT);
|
2013-10-16 04:19:07 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
msix_out:
|
2016-11-08 03:14:07 +08:00
|
|
|
while (--i >= 0)
|
|
|
|
devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), skdev);
|
2016-11-09 20:55:34 +08:00
|
|
|
out:
|
2016-11-08 03:14:07 +08:00
|
|
|
kfree(skdev->msix_entries);
|
|
|
|
skdev->msix_entries = NULL;
|
2013-10-16 04:19:07 +08:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int skd_acquire_irq(struct skd_device *skdev)
|
|
|
|
{
|
2016-11-08 03:14:07 +08:00
|
|
|
struct pci_dev *pdev = skdev->pdev;
|
|
|
|
unsigned int irq_flag = PCI_IRQ_LEGACY;
|
2013-10-16 04:19:07 +08:00
|
|
|
int rc;
|
|
|
|
|
2016-11-08 03:14:07 +08:00
|
|
|
if (skd_isr_type == SKD_IRQ_MSIX) {
|
2013-10-16 04:19:07 +08:00
|
|
|
rc = skd_acquire_msix(skdev);
|
|
|
|
if (!rc)
|
2016-11-08 03:14:07 +08:00
|
|
|
return 0;
|
|
|
|
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&skdev->pdev->dev,
|
|
|
|
"failed to enable MSI-X, re-trying with MSI %d\n", rc);
|
2013-10-16 04:19:07 +08:00
|
|
|
}
|
2016-11-08 03:14:07 +08:00
|
|
|
|
|
|
|
snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d", DRV_NAME,
|
|
|
|
skdev->devno);
|
|
|
|
|
|
|
|
if (skd_isr_type != SKD_IRQ_LEGACY)
|
|
|
|
irq_flag |= PCI_IRQ_MSI;
|
|
|
|
rc = pci_alloc_irq_vectors(pdev, 1, 1, irq_flag);
|
|
|
|
if (rc < 0) {
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&skdev->pdev->dev,
|
|
|
|
"failed to allocate the MSI interrupt %d\n", rc);
|
2016-11-08 03:14:07 +08:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
|
|
|
|
pdev->msi_enabled ? 0 : IRQF_SHARED,
|
|
|
|
skdev->isr_name, skdev);
|
|
|
|
if (rc) {
|
|
|
|
pci_free_irq_vectors(pdev);
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&skdev->pdev->dev, "failed to allocate interrupt %d\n",
|
|
|
|
rc);
|
2016-11-08 03:14:07 +08:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2013-10-16 04:19:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void skd_release_irq(struct skd_device *skdev)
|
|
|
|
{
|
2016-11-08 03:14:07 +08:00
|
|
|
struct pci_dev *pdev = skdev->pdev;
|
|
|
|
|
|
|
|
if (skdev->msix_entries) {
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
|
|
|
|
devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i),
|
|
|
|
skdev);
|
|
|
|
}
|
|
|
|
|
|
|
|
kfree(skdev->msix_entries);
|
|
|
|
skdev->msix_entries = NULL;
|
|
|
|
} else {
|
|
|
|
devm_free_irq(&pdev->dev, pdev->irq, skdev);
|
2013-10-16 04:19:07 +08:00
|
|
|
}
|
2016-11-08 03:14:07 +08:00
|
|
|
|
|
|
|
pci_free_irq_vectors(pdev);
|
2013-10-16 04:19:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
*****************************************************************************
|
|
|
|
* CONSTRUCT
|
|
|
|
*****************************************************************************
|
|
|
|
*/
|
|
|
|
|
2017-08-18 04:13:35 +08:00
|
|
|
static void *skd_alloc_dma(struct skd_device *skdev, struct kmem_cache *s,
|
|
|
|
dma_addr_t *dma_handle, gfp_t gfp,
|
|
|
|
enum dma_data_direction dir)
|
|
|
|
{
|
|
|
|
struct device *dev = &skdev->pdev->dev;
|
|
|
|
void *buf;
|
|
|
|
|
|
|
|
buf = kmem_cache_alloc(s, gfp);
|
|
|
|
if (!buf)
|
|
|
|
return NULL;
|
2018-02-02 23:03:04 +08:00
|
|
|
*dma_handle = dma_map_single(dev, buf,
|
|
|
|
kmem_cache_size(s), dir);
|
2017-08-18 04:13:35 +08:00
|
|
|
if (dma_mapping_error(dev, *dma_handle)) {
|
2017-10-09 05:30:28 +08:00
|
|
|
kmem_cache_free(s, buf);
|
2017-08-18 04:13:35 +08:00
|
|
|
buf = NULL;
|
|
|
|
}
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void skd_free_dma(struct skd_device *skdev, struct kmem_cache *s,
|
|
|
|
void *vaddr, dma_addr_t dma_handle,
|
|
|
|
enum dma_data_direction dir)
|
|
|
|
{
|
|
|
|
if (!vaddr)
|
|
|
|
return;
|
|
|
|
|
2018-02-02 23:03:04 +08:00
|
|
|
dma_unmap_single(&skdev->pdev->dev, dma_handle,
|
|
|
|
kmem_cache_size(s), dir);
|
2017-08-18 04:13:35 +08:00
|
|
|
kmem_cache_free(s, vaddr);
|
|
|
|
}
|
|
|
|
|
2013-10-16 04:19:07 +08:00
|
|
|
static int skd_cons_skcomp(struct skd_device *skdev)
|
|
|
|
{
|
|
|
|
int rc = 0;
|
|
|
|
struct fit_completion_entry_v1 *skcomp;
|
|
|
|
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev,
|
2017-08-18 04:13:02 +08:00
|
|
|
"comp pci_alloc, total bytes %zd entries %d\n",
|
|
|
|
SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
2017-08-18 04:13:02 +08:00
|
|
|
skcomp = pci_zalloc_consistent(skdev->pdev, SKD_SKCOMP_SIZE,
|
2014-08-09 05:24:12 +08:00
|
|
|
&skdev->cq_dma_address);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
if (skcomp == NULL) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto err_out;
|
|
|
|
}
|
|
|
|
|
|
|
|
skdev->skcomp_table = skcomp;
|
|
|
|
skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp +
|
|
|
|
sizeof(*skcomp) *
|
|
|
|
SKD_N_COMPLETION_ENTRY);
|
|
|
|
|
|
|
|
err_out:
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int skd_cons_skmsg(struct skd_device *skdev)
|
|
|
|
{
|
|
|
|
int rc = 0;
|
|
|
|
u32 i;
|
|
|
|
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev,
|
2017-08-18 04:13:18 +08:00
|
|
|
"skmsg_table kcalloc, struct %lu, count %u total %lu\n",
|
2017-08-18 04:12:58 +08:00
|
|
|
sizeof(struct skd_fitmsg_context), skdev->num_fitmsg_context,
|
|
|
|
sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
2017-08-18 04:13:18 +08:00
|
|
|
skdev->skmsg_table = kcalloc(skdev->num_fitmsg_context,
|
|
|
|
sizeof(struct skd_fitmsg_context),
|
|
|
|
GFP_KERNEL);
|
2013-10-16 04:19:07 +08:00
|
|
|
if (skdev->skmsg_table == NULL) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto err_out;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < skdev->num_fitmsg_context; i++) {
|
|
|
|
struct skd_fitmsg_context *skmsg;
|
|
|
|
|
|
|
|
skmsg = &skdev->skmsg_table[i];
|
|
|
|
|
|
|
|
skmsg->id = i + SKD_ID_FIT_MSG;
|
|
|
|
|
|
|
|
skmsg->msg_buf = pci_alloc_consistent(skdev->pdev,
|
2017-08-18 04:13:06 +08:00
|
|
|
SKD_N_FITMSG_BYTES,
|
2013-10-16 04:19:07 +08:00
|
|
|
&skmsg->mb_dma_address);
|
|
|
|
|
|
|
|
if (skmsg->msg_buf == NULL) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto err_out;
|
|
|
|
}
|
|
|
|
|
2017-08-18 04:13:06 +08:00
|
|
|
WARN(((uintptr_t)skmsg->msg_buf | skmsg->mb_dma_address) &
|
|
|
|
(FIT_QCMD_ALIGN - 1),
|
|
|
|
"not aligned: msg_buf %p mb_dma_address %#llx\n",
|
|
|
|
skmsg->msg_buf, skmsg->mb_dma_address);
|
2013-10-16 04:19:07 +08:00
|
|
|
memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
|
|
|
|
}
|
|
|
|
|
|
|
|
err_out:
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2013-11-05 19:37:08 +08:00
|
|
|
static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
|
|
|
|
u32 n_sg,
|
|
|
|
dma_addr_t *ret_dma_addr)
|
|
|
|
{
|
|
|
|
struct fit_sg_descriptor *sg_list;
|
|
|
|
|
2017-08-18 04:13:35 +08:00
|
|
|
sg_list = skd_alloc_dma(skdev, skdev->sglist_cache, ret_dma_addr,
|
|
|
|
GFP_DMA | __GFP_ZERO, DMA_TO_DEVICE);
|
2013-11-05 19:37:08 +08:00
|
|
|
|
|
|
|
if (sg_list != NULL) {
|
|
|
|
uint64_t dma_address = *ret_dma_addr;
|
|
|
|
u32 i;
|
|
|
|
|
|
|
|
for (i = 0; i < n_sg - 1; i++) {
|
|
|
|
uint64_t ndp_off;
|
|
|
|
ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
|
|
|
|
|
|
|
|
sg_list[i].next_desc_ptr = dma_address + ndp_off;
|
|
|
|
}
|
|
|
|
sg_list[i].next_desc_ptr = 0LL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return sg_list;
|
|
|
|
}
|
|
|
|
|
2017-08-18 04:13:30 +08:00
|
|
|
static void skd_free_sg_list(struct skd_device *skdev,
|
2017-08-18 04:13:35 +08:00
|
|
|
struct fit_sg_descriptor *sg_list,
|
2017-08-18 04:13:30 +08:00
|
|
|
dma_addr_t dma_addr)
|
|
|
|
{
|
|
|
|
if (WARN_ON_ONCE(!sg_list))
|
|
|
|
return;
|
|
|
|
|
2017-08-18 04:13:35 +08:00
|
|
|
skd_free_dma(skdev, skdev->sglist_cache, sg_list, dma_addr,
|
|
|
|
DMA_TO_DEVICE);
|
2017-08-18 04:13:30 +08:00
|
|
|
}
|
|
|
|
|
2017-08-18 04:13:32 +08:00
|
|
|
static int skd_init_request(struct blk_mq_tag_set *set, struct request *rq,
|
|
|
|
unsigned int hctx_idx, unsigned int numa_node)
|
2013-10-16 04:19:07 +08:00
|
|
|
{
|
2017-08-18 04:13:32 +08:00
|
|
|
struct skd_device *skdev = set->driver_data;
|
2017-08-18 04:13:31 +08:00
|
|
|
struct skd_request_context *skreq = blk_mq_rq_to_pdu(rq);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
2017-08-18 04:13:31 +08:00
|
|
|
skreq->state = SKD_REQ_STATE_IDLE;
|
|
|
|
skreq->sg = (void *)(skreq + 1);
|
|
|
|
sg_init_table(skreq->sg, skd_sgs_per_request);
|
|
|
|
skreq->sksg_list = skd_cons_sg_list(skdev, skd_sgs_per_request,
|
|
|
|
&skreq->sksg_dma_address);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
2017-08-18 04:13:31 +08:00
|
|
|
return skreq->sksg_list ? 0 : -ENOMEM;
|
|
|
|
}
|
2013-10-16 04:19:07 +08:00
|
|
|
|
2017-08-18 04:13:32 +08:00
|
|
|
static void skd_exit_request(struct blk_mq_tag_set *set, struct request *rq,
|
|
|
|
unsigned int hctx_idx)
|
2017-08-18 04:13:31 +08:00
|
|
|
{
|
2017-08-18 04:13:32 +08:00
|
|
|
struct skd_device *skdev = set->driver_data;
|
2017-08-18 04:13:31 +08:00
|
|
|
struct skd_request_context *skreq = blk_mq_rq_to_pdu(rq);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
2017-08-18 04:13:35 +08:00
|
|
|
skd_free_sg_list(skdev, skreq->sksg_list, skreq->sksg_dma_address);
|
2013-10-16 04:19:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int skd_cons_sksb(struct skd_device *skdev)
|
|
|
|
{
|
|
|
|
int rc = 0;
|
|
|
|
struct skd_special_context *skspcl;
|
|
|
|
|
|
|
|
skspcl = &skdev->internal_skspcl;
|
|
|
|
|
|
|
|
skspcl->req.id = 0 + SKD_ID_INTERNAL;
|
|
|
|
skspcl->req.state = SKD_REQ_STATE_IDLE;
|
|
|
|
|
2017-08-18 04:13:35 +08:00
|
|
|
skspcl->data_buf = skd_alloc_dma(skdev, skdev->databuf_cache,
|
|
|
|
&skspcl->db_dma_address,
|
|
|
|
GFP_DMA | __GFP_ZERO,
|
|
|
|
DMA_BIDIRECTIONAL);
|
2013-10-16 04:19:07 +08:00
|
|
|
if (skspcl->data_buf == NULL) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto err_out;
|
|
|
|
}
|
|
|
|
|
2017-08-18 04:13:35 +08:00
|
|
|
skspcl->msg_buf = skd_alloc_dma(skdev, skdev->msgbuf_cache,
|
|
|
|
&skspcl->mb_dma_address,
|
|
|
|
GFP_DMA | __GFP_ZERO, DMA_TO_DEVICE);
|
2013-10-16 04:19:07 +08:00
|
|
|
if (skspcl->msg_buf == NULL) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto err_out;
|
|
|
|
}
|
|
|
|
|
|
|
|
skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
|
|
|
|
&skspcl->req.sksg_dma_address);
|
|
|
|
if (skspcl->req.sksg_list == NULL) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto err_out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!skd_format_internal_skspcl(skdev)) {
|
|
|
|
rc = -EINVAL;
|
|
|
|
goto err_out;
|
|
|
|
}
|
|
|
|
|
|
|
|
err_out:
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2017-08-18 04:13:32 +08:00
|
|
|
static const struct blk_mq_ops skd_mq_ops = {
|
|
|
|
.queue_rq = skd_mq_queue_rq,
|
2017-08-26 05:24:11 +08:00
|
|
|
.complete = skd_complete_rq,
|
2017-08-24 01:56:32 +08:00
|
|
|
.timeout = skd_timed_out,
|
2017-08-18 04:13:32 +08:00
|
|
|
.init_request = skd_init_request,
|
|
|
|
.exit_request = skd_exit_request,
|
|
|
|
};
|
|
|
|
|
2013-10-16 04:19:07 +08:00
|
|
|
static int skd_cons_disk(struct skd_device *skdev)
|
|
|
|
{
|
|
|
|
int rc = 0;
|
|
|
|
struct gendisk *disk;
|
|
|
|
struct request_queue *q;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
disk = alloc_disk(SKD_MINORS_PER_DEVICE);
|
|
|
|
if (!disk) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto err_out;
|
|
|
|
}
|
|
|
|
|
|
|
|
skdev->disk = disk;
|
|
|
|
sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno);
|
|
|
|
|
|
|
|
disk->major = skdev->major;
|
|
|
|
disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE;
|
|
|
|
disk->fops = &skd_blockdev_ops;
|
|
|
|
disk->private_data = skdev;
|
|
|
|
|
2017-08-18 04:13:32 +08:00
|
|
|
memset(&skdev->tag_set, 0, sizeof(skdev->tag_set));
|
|
|
|
skdev->tag_set.ops = &skd_mq_ops;
|
|
|
|
skdev->tag_set.nr_hw_queues = 1;
|
|
|
|
skdev->tag_set.queue_depth = skd_max_queue_depth;
|
|
|
|
skdev->tag_set.cmd_size = sizeof(struct skd_request_context) +
|
|
|
|
skdev->sgs_per_request * sizeof(struct scatterlist);
|
|
|
|
skdev->tag_set.numa_node = NUMA_NO_NODE;
|
|
|
|
skdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
|
|
|
|
BLK_MQ_F_SG_MERGE |
|
|
|
|
BLK_ALLOC_POLICY_TO_MQ_FLAG(BLK_TAG_ALLOC_FIFO);
|
|
|
|
skdev->tag_set.driver_data = skdev;
|
2017-08-23 19:20:57 +08:00
|
|
|
rc = blk_mq_alloc_tag_set(&skdev->tag_set);
|
|
|
|
if (rc)
|
|
|
|
goto err_out;
|
|
|
|
q = blk_mq_init_queue(&skdev->tag_set);
|
|
|
|
if (IS_ERR(q)) {
|
|
|
|
blk_mq_free_tag_set(&skdev->tag_set);
|
|
|
|
rc = PTR_ERR(q);
|
2013-10-16 04:19:07 +08:00
|
|
|
goto err_out;
|
|
|
|
}
|
2017-08-18 04:13:31 +08:00
|
|
|
q->queuedata = skdev;
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
skdev->queue = q;
|
|
|
|
disk->queue = q;
|
|
|
|
|
2016-03-31 00:11:42 +08:00
|
|
|
blk_queue_write_cache(q, true, true);
|
2013-10-16 04:19:07 +08:00
|
|
|
blk_queue_max_segments(q, skdev->sgs_per_request);
|
|
|
|
blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
|
|
|
|
|
2017-08-18 04:12:53 +08:00
|
|
|
/* set optimal I/O size to 8KB */
|
2013-10-16 04:19:07 +08:00
|
|
|
blk_queue_io_opt(q, 8192);
|
|
|
|
|
|
|
|
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
|
2014-10-05 00:55:32 +08:00
|
|
|
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
2017-08-18 04:13:33 +08:00
|
|
|
blk_queue_rq_timeout(q, 8 * HZ);
|
|
|
|
|
2013-10-16 04:19:07 +08:00
|
|
|
spin_lock_irqsave(&skdev->lock, flags);
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "stopping queue\n");
|
2017-08-18 04:13:32 +08:00
|
|
|
blk_mq_stop_hw_queues(skdev->queue);
|
2013-10-16 04:19:07 +08:00
|
|
|
spin_unlock_irqrestore(&skdev->lock, flags);
|
|
|
|
|
|
|
|
err_out:
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2013-11-05 19:37:08 +08:00
|
|
|
#define SKD_N_DEV_TABLE 16u
|
|
|
|
static u32 skd_next_devno;
|
2013-10-16 04:19:07 +08:00
|
|
|
|
2013-11-05 19:37:08 +08:00
|
|
|
static struct skd_device *skd_construct(struct pci_dev *pdev)
|
2013-10-16 04:19:07 +08:00
|
|
|
{
|
2013-11-05 19:37:08 +08:00
|
|
|
struct skd_device *skdev;
|
|
|
|
int blk_major = skd_major;
|
2017-08-18 04:13:35 +08:00
|
|
|
size_t size;
|
2013-11-05 19:37:08 +08:00
|
|
|
int rc;
|
2013-10-16 04:19:07 +08:00
|
|
|
|
2013-11-05 19:37:08 +08:00
|
|
|
skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
2013-11-05 19:37:08 +08:00
|
|
|
if (!skdev) {
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&pdev->dev, "memory alloc failure\n");
|
2013-11-05 19:37:08 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
2013-10-16 04:19:07 +08:00
|
|
|
|
2013-11-05 19:37:08 +08:00
|
|
|
skdev->state = SKD_DRVR_STATE_LOAD;
|
|
|
|
skdev->pdev = pdev;
|
|
|
|
skdev->devno = skd_next_devno++;
|
|
|
|
skdev->major = blk_major;
|
|
|
|
skdev->dev_max_queue_depth = 0;
|
2013-10-16 04:19:07 +08:00
|
|
|
|
2013-11-05 19:37:08 +08:00
|
|
|
skdev->num_req_context = skd_max_queue_depth;
|
|
|
|
skdev->num_fitmsg_context = skd_max_queue_depth;
|
|
|
|
skdev->cur_max_queue_depth = 1;
|
|
|
|
skdev->queue_low_water_mark = 1;
|
|
|
|
skdev->proto_ver = 99;
|
|
|
|
skdev->sgs_per_request = skd_sgs_per_request;
|
|
|
|
skdev->dbg_level = skd_dbg_level;
|
2013-10-16 04:19:07 +08:00
|
|
|
|
2013-11-05 19:37:08 +08:00
|
|
|
spin_lock_init(&skdev->lock);
|
|
|
|
|
2017-08-18 04:13:32 +08:00
|
|
|
INIT_WORK(&skdev->start_queue, skd_start_queue);
|
2013-11-05 19:37:08 +08:00
|
|
|
INIT_WORK(&skdev->completion_worker, skd_completion_worker);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
2017-08-18 04:13:35 +08:00
|
|
|
size = max(SKD_N_FITMSG_BYTES, SKD_N_SPECIAL_FITMSG_BYTES);
|
|
|
|
skdev->msgbuf_cache = kmem_cache_create("skd-msgbuf", size, 0,
|
|
|
|
SLAB_HWCACHE_ALIGN, NULL);
|
|
|
|
if (!skdev->msgbuf_cache)
|
|
|
|
goto err_out;
|
|
|
|
WARN_ONCE(kmem_cache_size(skdev->msgbuf_cache) < size,
|
|
|
|
"skd-msgbuf: %d < %zd\n",
|
|
|
|
kmem_cache_size(skdev->msgbuf_cache), size);
|
|
|
|
size = skd_sgs_per_request * sizeof(struct fit_sg_descriptor);
|
|
|
|
skdev->sglist_cache = kmem_cache_create("skd-sglist", size, 0,
|
|
|
|
SLAB_HWCACHE_ALIGN, NULL);
|
|
|
|
if (!skdev->sglist_cache)
|
|
|
|
goto err_out;
|
|
|
|
WARN_ONCE(kmem_cache_size(skdev->sglist_cache) < size,
|
|
|
|
"skd-sglist: %d < %zd\n",
|
|
|
|
kmem_cache_size(skdev->sglist_cache), size);
|
|
|
|
size = SKD_N_INTERNAL_BYTES;
|
|
|
|
skdev->databuf_cache = kmem_cache_create("skd-databuf", size, 0,
|
|
|
|
SLAB_HWCACHE_ALIGN, NULL);
|
|
|
|
if (!skdev->databuf_cache)
|
|
|
|
goto err_out;
|
|
|
|
WARN_ONCE(kmem_cache_size(skdev->databuf_cache) < size,
|
|
|
|
"skd-databuf: %d < %zd\n",
|
|
|
|
kmem_cache_size(skdev->databuf_cache), size);
|
|
|
|
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "skcomp\n");
|
2013-11-05 19:37:08 +08:00
|
|
|
rc = skd_cons_skcomp(skdev);
|
|
|
|
if (rc < 0)
|
|
|
|
goto err_out;
|
2013-10-16 04:19:07 +08:00
|
|
|
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "skmsg\n");
|
2013-11-05 19:37:08 +08:00
|
|
|
rc = skd_cons_skmsg(skdev);
|
|
|
|
if (rc < 0)
|
|
|
|
goto err_out;
|
|
|
|
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "sksb\n");
|
2013-11-05 19:37:08 +08:00
|
|
|
rc = skd_cons_sksb(skdev);
|
|
|
|
if (rc < 0)
|
|
|
|
goto err_out;
|
|
|
|
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "disk\n");
|
2013-11-05 19:37:08 +08:00
|
|
|
rc = skd_cons_disk(skdev);
|
|
|
|
if (rc < 0)
|
|
|
|
goto err_out;
|
|
|
|
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "VICTORY\n");
|
2013-11-05 19:37:08 +08:00
|
|
|
return skdev;
|
|
|
|
|
|
|
|
err_out:
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "construct failed\n");
|
2013-11-05 19:37:08 +08:00
|
|
|
skd_destruct(skdev);
|
|
|
|
return NULL;
|
2013-10-16 04:19:07 +08:00
|
|
|
}
|
|
|
|
|
2013-11-05 19:37:08 +08:00
|
|
|
/*
|
|
|
|
*****************************************************************************
|
|
|
|
* DESTRUCT (FREE)
|
|
|
|
*****************************************************************************
|
|
|
|
*/
|
|
|
|
|
2013-10-16 04:19:07 +08:00
|
|
|
static void skd_free_skcomp(struct skd_device *skdev)
|
|
|
|
{
|
2017-08-18 04:13:03 +08:00
|
|
|
if (skdev->skcomp_table)
|
|
|
|
pci_free_consistent(skdev->pdev, SKD_SKCOMP_SIZE,
|
2013-10-16 04:19:07 +08:00
|
|
|
skdev->skcomp_table, skdev->cq_dma_address);
|
|
|
|
|
|
|
|
skdev->skcomp_table = NULL;
|
|
|
|
skdev->cq_dma_address = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void skd_free_skmsg(struct skd_device *skdev)
|
|
|
|
{
|
|
|
|
u32 i;
|
|
|
|
|
|
|
|
if (skdev->skmsg_table == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (i = 0; i < skdev->num_fitmsg_context; i++) {
|
|
|
|
struct skd_fitmsg_context *skmsg;
|
|
|
|
|
|
|
|
skmsg = &skdev->skmsg_table[i];
|
|
|
|
|
|
|
|
if (skmsg->msg_buf != NULL) {
|
|
|
|
pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES,
|
|
|
|
skmsg->msg_buf,
|
|
|
|
skmsg->mb_dma_address);
|
|
|
|
}
|
|
|
|
skmsg->msg_buf = NULL;
|
|
|
|
skmsg->mb_dma_address = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
kfree(skdev->skmsg_table);
|
|
|
|
skdev->skmsg_table = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void skd_free_sksb(struct skd_device *skdev)
|
|
|
|
{
|
2017-08-18 04:13:35 +08:00
|
|
|
struct skd_special_context *skspcl = &skdev->internal_skspcl;
|
2013-10-16 04:19:07 +08:00
|
|
|
|
2017-08-18 04:13:35 +08:00
|
|
|
skd_free_dma(skdev, skdev->databuf_cache, skspcl->data_buf,
|
|
|
|
skspcl->db_dma_address, DMA_BIDIRECTIONAL);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
skspcl->data_buf = NULL;
|
|
|
|
skspcl->db_dma_address = 0;
|
|
|
|
|
2017-08-18 04:13:35 +08:00
|
|
|
skd_free_dma(skdev, skdev->msgbuf_cache, skspcl->msg_buf,
|
|
|
|
skspcl->mb_dma_address, DMA_TO_DEVICE);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
skspcl->msg_buf = NULL;
|
|
|
|
skspcl->mb_dma_address = 0;
|
|
|
|
|
2017-08-18 04:13:35 +08:00
|
|
|
skd_free_sg_list(skdev, skspcl->req.sksg_list,
|
2013-10-16 04:19:07 +08:00
|
|
|
skspcl->req.sksg_dma_address);
|
|
|
|
|
|
|
|
skspcl->req.sksg_list = NULL;
|
|
|
|
skspcl->req.sksg_dma_address = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void skd_free_disk(struct skd_device *skdev)
|
|
|
|
{
|
|
|
|
struct gendisk *disk = skdev->disk;
|
|
|
|
|
2017-08-18 04:12:45 +08:00
|
|
|
if (disk && (disk->flags & GENHD_FL_UP))
|
|
|
|
del_gendisk(disk);
|
|
|
|
|
|
|
|
if (skdev->queue) {
|
|
|
|
blk_cleanup_queue(skdev->queue);
|
|
|
|
skdev->queue = NULL;
|
2017-08-26 05:24:13 +08:00
|
|
|
if (disk)
|
|
|
|
disk->queue = NULL;
|
2013-10-16 04:19:07 +08:00
|
|
|
}
|
2017-08-18 04:12:45 +08:00
|
|
|
|
2017-08-18 04:13:32 +08:00
|
|
|
if (skdev->tag_set.tags)
|
|
|
|
blk_mq_free_tag_set(&skdev->tag_set);
|
|
|
|
|
2017-08-18 04:12:45 +08:00
|
|
|
put_disk(disk);
|
2013-10-16 04:19:07 +08:00
|
|
|
skdev->disk = NULL;
|
|
|
|
}
|
|
|
|
|
2013-11-05 19:37:08 +08:00
|
|
|
static void skd_destruct(struct skd_device *skdev)
|
|
|
|
{
|
|
|
|
if (skdev == NULL)
|
|
|
|
return;
|
|
|
|
|
2017-08-18 04:13:32 +08:00
|
|
|
cancel_work_sync(&skdev->start_queue);
|
|
|
|
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "disk\n");
|
2013-11-05 19:37:08 +08:00
|
|
|
skd_free_disk(skdev);
|
|
|
|
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "sksb\n");
|
2013-11-05 19:37:08 +08:00
|
|
|
skd_free_sksb(skdev);
|
|
|
|
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "skmsg\n");
|
2013-11-05 19:37:08 +08:00
|
|
|
skd_free_skmsg(skdev);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "skcomp\n");
|
2013-11-05 19:37:08 +08:00
|
|
|
skd_free_skcomp(skdev);
|
|
|
|
|
2017-08-18 04:13:35 +08:00
|
|
|
kmem_cache_destroy(skdev->databuf_cache);
|
|
|
|
kmem_cache_destroy(skdev->sglist_cache);
|
|
|
|
kmem_cache_destroy(skdev->msgbuf_cache);
|
|
|
|
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "skdev\n");
|
2013-11-05 19:37:08 +08:00
|
|
|
kfree(skdev);
|
|
|
|
}
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
*****************************************************************************
|
|
|
|
* BLOCK DEVICE (BDEV) GLUE
|
|
|
|
*****************************************************************************
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
|
|
|
|
{
|
|
|
|
struct skd_device *skdev;
|
|
|
|
u64 capacity;
|
|
|
|
|
|
|
|
skdev = bdev->bd_disk->private_data;
|
|
|
|
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "%s: CMD[%s] getgeo device\n",
|
|
|
|
bdev->bd_disk->disk_name, current->comm);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
if (skdev->read_cap_is_valid) {
|
|
|
|
capacity = get_capacity(skdev->disk);
|
|
|
|
geo->heads = 64;
|
|
|
|
geo->sectors = 255;
|
|
|
|
geo->cylinders = (capacity) / (255 * 64);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
2016-06-16 10:44:20 +08:00
|
|
|
static int skd_bdev_attach(struct device *parent, struct skd_device *skdev)
|
2013-10-16 04:19:07 +08:00
|
|
|
{
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "add_disk\n");
|
2016-06-16 10:44:20 +08:00
|
|
|
device_add_disk(parent, skdev->disk);
|
2013-10-16 04:19:07 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct block_device_operations skd_blockdev_ops = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.getgeo = skd_bdev_getgeo,
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
*****************************************************************************
|
|
|
|
* PCIe DRIVER GLUE
|
|
|
|
*****************************************************************************
|
|
|
|
*/
|
|
|
|
|
2014-08-08 21:56:03 +08:00
|
|
|
static const struct pci_device_id skd_pci_tbl[] = {
|
2013-10-16 04:19:07 +08:00
|
|
|
{ PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120,
|
|
|
|
PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
|
|
|
|
{ 0 } /* terminate list */
|
|
|
|
};
|
|
|
|
|
|
|
|
MODULE_DEVICE_TABLE(pci, skd_pci_tbl);
|
|
|
|
|
|
|
|
static char *skd_pci_info(struct skd_device *skdev, char *str)
|
|
|
|
{
|
|
|
|
int pcie_reg;
|
|
|
|
|
|
|
|
strcpy(str, "PCIe (");
|
|
|
|
pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP);
|
|
|
|
|
|
|
|
if (pcie_reg) {
|
|
|
|
|
|
|
|
char lwstr[6];
|
|
|
|
uint16_t pcie_lstat, lspeed, lwidth;
|
|
|
|
|
|
|
|
pcie_reg += 0x12;
|
|
|
|
pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat);
|
|
|
|
lspeed = pcie_lstat & (0xF);
|
|
|
|
lwidth = (pcie_lstat & 0x3F0) >> 4;
|
|
|
|
|
|
|
|
if (lspeed == 1)
|
|
|
|
strcat(str, "2.5GT/s ");
|
|
|
|
else if (lspeed == 2)
|
|
|
|
strcat(str, "5.0GT/s ");
|
|
|
|
else
|
|
|
|
strcat(str, "<unknown> ");
|
|
|
|
snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth);
|
|
|
|
strcat(str, lwstr);
|
|
|
|
}
|
|
|
|
return str;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int rc = 0;
|
|
|
|
char pci_str[32];
|
|
|
|
struct skd_device *skdev;
|
|
|
|
|
2017-08-18 04:13:38 +08:00
|
|
|
dev_dbg(&pdev->dev, "vendor=%04X device=%04x\n", pdev->vendor,
|
|
|
|
pdev->device);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
rc = pci_enable_device(pdev);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
rc = pci_request_regions(pdev, DRV_NAME);
|
|
|
|
if (rc)
|
|
|
|
goto err_out;
|
|
|
|
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
|
|
|
|
if (!rc) {
|
|
|
|
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&pdev->dev, "consistent DMA mask error %d\n",
|
|
|
|
rc);
|
2013-10-16 04:19:07 +08:00
|
|
|
}
|
|
|
|
} else {
|
2017-08-18 04:12:58 +08:00
|
|
|
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
2013-10-16 04:19:07 +08:00
|
|
|
if (rc) {
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&pdev->dev, "DMA mask error %d\n", rc);
|
2013-10-16 04:19:07 +08:00
|
|
|
goto err_out_regions;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-05 19:37:02 +08:00
|
|
|
if (!skd_major) {
|
|
|
|
rc = register_blkdev(0, DRV_NAME);
|
|
|
|
if (rc < 0)
|
|
|
|
goto err_out_regions;
|
|
|
|
BUG_ON(!rc);
|
|
|
|
skd_major = rc;
|
|
|
|
}
|
|
|
|
|
2013-10-16 04:19:07 +08:00
|
|
|
skdev = skd_construct(pdev);
|
2013-10-30 13:23:53 +08:00
|
|
|
if (skdev == NULL) {
|
|
|
|
rc = -ENOMEM;
|
2013-10-16 04:19:07 +08:00
|
|
|
goto err_out_regions;
|
2013-10-30 13:23:53 +08:00
|
|
|
}
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
skd_pci_info(skdev, pci_str);
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_info(&pdev->dev, "%s 64bit\n", pci_str);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
pci_set_master(pdev);
|
|
|
|
rc = pci_enable_pcie_error_reporting(pdev);
|
|
|
|
if (rc) {
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&pdev->dev,
|
|
|
|
"bad enable of PCIe error reporting rc=%d\n", rc);
|
2013-10-16 04:19:07 +08:00
|
|
|
skdev->pcie_error_reporting_is_enabled = 0;
|
|
|
|
} else
|
|
|
|
skdev->pcie_error_reporting_is_enabled = 1;
|
|
|
|
|
|
|
|
pci_set_drvdata(pdev, skdev);
|
2013-11-05 19:37:05 +08:00
|
|
|
|
2013-10-16 04:19:07 +08:00
|
|
|
for (i = 0; i < SKD_MAX_BARS; i++) {
|
|
|
|
skdev->mem_phys[i] = pci_resource_start(pdev, i);
|
|
|
|
skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
|
|
|
|
skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
|
|
|
|
skdev->mem_size[i]);
|
|
|
|
if (!skdev->mem_map[i]) {
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&pdev->dev,
|
|
|
|
"Unable to map adapter memory!\n");
|
2013-10-16 04:19:07 +08:00
|
|
|
rc = -ENODEV;
|
|
|
|
goto err_out_iounmap;
|
|
|
|
}
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n",
|
|
|
|
skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
|
|
|
|
skdev->mem_size[i]);
|
2013-10-16 04:19:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
rc = skd_acquire_irq(skdev);
|
|
|
|
if (rc) {
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&pdev->dev, "interrupt resource error %d\n", rc);
|
2013-10-16 04:19:07 +08:00
|
|
|
goto err_out_iounmap;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = skd_start_timer(skdev);
|
|
|
|
if (rc)
|
|
|
|
goto err_out_timer;
|
|
|
|
|
|
|
|
init_waitqueue_head(&skdev->waitq);
|
|
|
|
|
|
|
|
skd_start_device(skdev);
|
|
|
|
|
|
|
|
rc = wait_event_interruptible_timeout(skdev->waitq,
|
|
|
|
(skdev->gendisk_on),
|
|
|
|
(SKD_START_WAIT_SECONDS * HZ));
|
|
|
|
if (skdev->gendisk_on > 0) {
|
|
|
|
/* device came on-line after reset */
|
2016-06-16 10:44:20 +08:00
|
|
|
skd_bdev_attach(&pdev->dev, skdev);
|
2013-10-16 04:19:07 +08:00
|
|
|
rc = 0;
|
|
|
|
} else {
|
|
|
|
/* we timed out, something is wrong with the device,
|
|
|
|
don't add the disk structure */
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&pdev->dev, "error: waiting for s1120 timed out %d!\n",
|
|
|
|
rc);
|
2013-10-16 04:19:07 +08:00
|
|
|
/* in case of no error; we timeout with ENXIO */
|
|
|
|
if (!rc)
|
|
|
|
rc = -ENXIO;
|
|
|
|
goto err_out_timer;
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
err_out_timer:
|
|
|
|
skd_stop_device(skdev);
|
|
|
|
skd_release_irq(skdev);
|
|
|
|
|
|
|
|
err_out_iounmap:
|
|
|
|
for (i = 0; i < SKD_MAX_BARS; i++)
|
|
|
|
if (skdev->mem_map[i])
|
|
|
|
iounmap(skdev->mem_map[i]);
|
|
|
|
|
|
|
|
if (skdev->pcie_error_reporting_is_enabled)
|
|
|
|
pci_disable_pcie_error_reporting(pdev);
|
|
|
|
|
|
|
|
skd_destruct(skdev);
|
|
|
|
|
|
|
|
err_out_regions:
|
|
|
|
pci_release_regions(pdev);
|
|
|
|
|
|
|
|
err_out:
|
|
|
|
pci_disable_device(pdev);
|
|
|
|
pci_set_drvdata(pdev, NULL);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void skd_pci_remove(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct skd_device *skdev;
|
|
|
|
|
|
|
|
skdev = pci_get_drvdata(pdev);
|
|
|
|
if (!skdev) {
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&pdev->dev, "no device data for PCI\n");
|
2013-10-16 04:19:07 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
skd_stop_device(skdev);
|
|
|
|
skd_release_irq(skdev);
|
|
|
|
|
|
|
|
for (i = 0; i < SKD_MAX_BARS; i++)
|
|
|
|
if (skdev->mem_map[i])
|
2017-08-18 04:12:59 +08:00
|
|
|
iounmap(skdev->mem_map[i]);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
if (skdev->pcie_error_reporting_is_enabled)
|
|
|
|
pci_disable_pcie_error_reporting(pdev);
|
|
|
|
|
|
|
|
skd_destruct(skdev);
|
|
|
|
|
|
|
|
pci_release_regions(pdev);
|
|
|
|
pci_disable_device(pdev);
|
|
|
|
pci_set_drvdata(pdev, NULL);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct skd_device *skdev;
|
|
|
|
|
|
|
|
skdev = pci_get_drvdata(pdev);
|
|
|
|
if (!skdev) {
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&pdev->dev, "no device data for PCI\n");
|
2013-10-16 04:19:07 +08:00
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
skd_stop_device(skdev);
|
|
|
|
|
|
|
|
skd_release_irq(skdev);
|
|
|
|
|
|
|
|
for (i = 0; i < SKD_MAX_BARS; i++)
|
|
|
|
if (skdev->mem_map[i])
|
2017-08-18 04:12:59 +08:00
|
|
|
iounmap(skdev->mem_map[i]);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
if (skdev->pcie_error_reporting_is_enabled)
|
|
|
|
pci_disable_pcie_error_reporting(pdev);
|
|
|
|
|
|
|
|
pci_release_regions(pdev);
|
|
|
|
pci_save_state(pdev);
|
|
|
|
pci_disable_device(pdev);
|
|
|
|
pci_set_power_state(pdev, pci_choose_state(pdev, state));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int skd_pci_resume(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int rc = 0;
|
|
|
|
struct skd_device *skdev;
|
|
|
|
|
|
|
|
skdev = pci_get_drvdata(pdev);
|
|
|
|
if (!skdev) {
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&pdev->dev, "no device data for PCI\n");
|
2013-10-16 04:19:07 +08:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
pci_set_power_state(pdev, PCI_D0);
|
|
|
|
pci_enable_wake(pdev, PCI_D0, 0);
|
|
|
|
pci_restore_state(pdev);
|
|
|
|
|
|
|
|
rc = pci_enable_device(pdev);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
rc = pci_request_regions(pdev, DRV_NAME);
|
|
|
|
if (rc)
|
|
|
|
goto err_out;
|
|
|
|
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
|
|
|
|
if (!rc) {
|
|
|
|
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
|
|
|
|
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&pdev->dev, "consistent DMA mask error %d\n",
|
|
|
|
rc);
|
2013-10-16 04:19:07 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
|
|
|
if (rc) {
|
|
|
|
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&pdev->dev, "DMA mask error %d\n", rc);
|
2013-10-16 04:19:07 +08:00
|
|
|
goto err_out_regions;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pci_set_master(pdev);
|
|
|
|
rc = pci_enable_pcie_error_reporting(pdev);
|
|
|
|
if (rc) {
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&pdev->dev,
|
|
|
|
"bad enable of PCIe error reporting rc=%d\n", rc);
|
2013-10-16 04:19:07 +08:00
|
|
|
skdev->pcie_error_reporting_is_enabled = 0;
|
|
|
|
} else
|
|
|
|
skdev->pcie_error_reporting_is_enabled = 1;
|
|
|
|
|
|
|
|
for (i = 0; i < SKD_MAX_BARS; i++) {
|
|
|
|
|
|
|
|
skdev->mem_phys[i] = pci_resource_start(pdev, i);
|
|
|
|
skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
|
|
|
|
skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
|
|
|
|
skdev->mem_size[i]);
|
|
|
|
if (!skdev->mem_map[i]) {
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&pdev->dev, "Unable to map adapter memory!\n");
|
2013-10-16 04:19:07 +08:00
|
|
|
rc = -ENODEV;
|
|
|
|
goto err_out_iounmap;
|
|
|
|
}
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n",
|
|
|
|
skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
|
|
|
|
skdev->mem_size[i]);
|
2013-10-16 04:19:07 +08:00
|
|
|
}
|
|
|
|
rc = skd_acquire_irq(skdev);
|
|
|
|
if (rc) {
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&pdev->dev, "interrupt resource error %d\n", rc);
|
2013-10-16 04:19:07 +08:00
|
|
|
goto err_out_iounmap;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = skd_start_timer(skdev);
|
|
|
|
if (rc)
|
|
|
|
goto err_out_timer;
|
|
|
|
|
|
|
|
init_waitqueue_head(&skdev->waitq);
|
|
|
|
|
|
|
|
skd_start_device(skdev);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
err_out_timer:
|
|
|
|
skd_stop_device(skdev);
|
|
|
|
skd_release_irq(skdev);
|
|
|
|
|
|
|
|
err_out_iounmap:
|
|
|
|
for (i = 0; i < SKD_MAX_BARS; i++)
|
|
|
|
if (skdev->mem_map[i])
|
|
|
|
iounmap(skdev->mem_map[i]);
|
|
|
|
|
|
|
|
if (skdev->pcie_error_reporting_is_enabled)
|
|
|
|
pci_disable_pcie_error_reporting(pdev);
|
|
|
|
|
|
|
|
err_out_regions:
|
|
|
|
pci_release_regions(pdev);
|
|
|
|
|
|
|
|
err_out:
|
|
|
|
pci_disable_device(pdev);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void skd_pci_shutdown(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
struct skd_device *skdev;
|
|
|
|
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&pdev->dev, "%s called\n", __func__);
|
2013-10-16 04:19:07 +08:00
|
|
|
|
|
|
|
skdev = pci_get_drvdata(pdev);
|
|
|
|
if (!skdev) {
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&pdev->dev, "no device data for PCI\n");
|
2013-10-16 04:19:07 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_err(&pdev->dev, "calling stop\n");
|
2013-10-16 04:19:07 +08:00
|
|
|
skd_stop_device(skdev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct pci_driver skd_driver = {
|
|
|
|
.name = DRV_NAME,
|
|
|
|
.id_table = skd_pci_tbl,
|
|
|
|
.probe = skd_pci_probe,
|
|
|
|
.remove = skd_pci_remove,
|
|
|
|
.suspend = skd_pci_suspend,
|
|
|
|
.resume = skd_pci_resume,
|
|
|
|
.shutdown = skd_pci_shutdown,
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
*****************************************************************************
|
|
|
|
* LOGGING SUPPORT
|
|
|
|
*****************************************************************************
|
|
|
|
*/
|
|
|
|
|
|
|
|
const char *skd_drive_state_to_str(int state)
|
|
|
|
{
|
|
|
|
switch (state) {
|
|
|
|
case FIT_SR_DRIVE_OFFLINE:
|
|
|
|
return "OFFLINE";
|
|
|
|
case FIT_SR_DRIVE_INIT:
|
|
|
|
return "INIT";
|
|
|
|
case FIT_SR_DRIVE_ONLINE:
|
|
|
|
return "ONLINE";
|
|
|
|
case FIT_SR_DRIVE_BUSY:
|
|
|
|
return "BUSY";
|
|
|
|
case FIT_SR_DRIVE_FAULT:
|
|
|
|
return "FAULT";
|
|
|
|
case FIT_SR_DRIVE_DEGRADED:
|
|
|
|
return "DEGRADED";
|
|
|
|
case FIT_SR_PCIE_LINK_DOWN:
|
|
|
|
return "INK_DOWN";
|
|
|
|
case FIT_SR_DRIVE_SOFT_RESET:
|
|
|
|
return "SOFT_RESET";
|
|
|
|
case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
|
|
|
|
return "NEED_FW";
|
|
|
|
case FIT_SR_DRIVE_INIT_FAULT:
|
|
|
|
return "INIT_FAULT";
|
|
|
|
case FIT_SR_DRIVE_BUSY_SANITIZE:
|
|
|
|
return "BUSY_SANITIZE";
|
|
|
|
case FIT_SR_DRIVE_BUSY_ERASE:
|
|
|
|
return "BUSY_ERASE";
|
|
|
|
case FIT_SR_DRIVE_FW_BOOTING:
|
|
|
|
return "FW_BOOTING";
|
|
|
|
default:
|
|
|
|
return "???";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
const char *skd_skdev_state_to_str(enum skd_drvr_state state)
|
|
|
|
{
|
|
|
|
switch (state) {
|
|
|
|
case SKD_DRVR_STATE_LOAD:
|
|
|
|
return "LOAD";
|
|
|
|
case SKD_DRVR_STATE_IDLE:
|
|
|
|
return "IDLE";
|
|
|
|
case SKD_DRVR_STATE_BUSY:
|
|
|
|
return "BUSY";
|
|
|
|
case SKD_DRVR_STATE_STARTING:
|
|
|
|
return "STARTING";
|
|
|
|
case SKD_DRVR_STATE_ONLINE:
|
|
|
|
return "ONLINE";
|
|
|
|
case SKD_DRVR_STATE_PAUSING:
|
|
|
|
return "PAUSING";
|
|
|
|
case SKD_DRVR_STATE_PAUSED:
|
|
|
|
return "PAUSED";
|
|
|
|
case SKD_DRVR_STATE_RESTARTING:
|
|
|
|
return "RESTARTING";
|
|
|
|
case SKD_DRVR_STATE_RESUMING:
|
|
|
|
return "RESUMING";
|
|
|
|
case SKD_DRVR_STATE_STOPPING:
|
|
|
|
return "STOPPING";
|
|
|
|
case SKD_DRVR_STATE_SYNCING:
|
|
|
|
return "SYNCING";
|
|
|
|
case SKD_DRVR_STATE_FAULT:
|
|
|
|
return "FAULT";
|
|
|
|
case SKD_DRVR_STATE_DISAPPEARED:
|
|
|
|
return "DISAPPEARED";
|
|
|
|
case SKD_DRVR_STATE_BUSY_ERASE:
|
|
|
|
return "BUSY_ERASE";
|
|
|
|
case SKD_DRVR_STATE_BUSY_SANITIZE:
|
|
|
|
return "BUSY_SANITIZE";
|
|
|
|
case SKD_DRVR_STATE_BUSY_IMMINENT:
|
|
|
|
return "BUSY_IMMINENT";
|
|
|
|
case SKD_DRVR_STATE_WAIT_BOOT:
|
|
|
|
return "WAIT_BOOT";
|
|
|
|
|
|
|
|
default:
|
|
|
|
return "???";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-12-19 17:32:22 +08:00
|
|
|
static const char *skd_skreq_state_to_str(enum skd_req_state state)
|
2013-10-16 04:19:07 +08:00
|
|
|
{
|
|
|
|
switch (state) {
|
|
|
|
case SKD_REQ_STATE_IDLE:
|
|
|
|
return "IDLE";
|
|
|
|
case SKD_REQ_STATE_SETUP:
|
|
|
|
return "SETUP";
|
|
|
|
case SKD_REQ_STATE_BUSY:
|
|
|
|
return "BUSY";
|
|
|
|
case SKD_REQ_STATE_COMPLETED:
|
|
|
|
return "COMPLETED";
|
|
|
|
case SKD_REQ_STATE_TIMEOUT:
|
|
|
|
return "TIMEOUT";
|
|
|
|
default:
|
|
|
|
return "???";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void skd_log_skdev(struct skd_device *skdev, const char *event)
|
|
|
|
{
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "skdev=%p event='%s'\n", skdev, event);
|
|
|
|
dev_dbg(&skdev->pdev->dev, " drive_state=%s(%d) driver_state=%s(%d)\n",
|
|
|
|
skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
|
|
|
|
skd_skdev_state_to_str(skdev->state), skdev->state);
|
|
|
|
dev_dbg(&skdev->pdev->dev, " busy=%d limit=%d dev=%d lowat=%d\n",
|
2017-08-18 04:13:34 +08:00
|
|
|
skd_in_flight(skdev), skdev->cur_max_queue_depth,
|
2017-08-18 04:12:58 +08:00
|
|
|
skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
|
2017-08-18 04:13:33 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, " cycle=%d cycle_ix=%d\n",
|
|
|
|
skdev->skcomp_cycle, skdev->skcomp_ix);
|
2013-10-16 04:19:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void skd_log_skreq(struct skd_device *skdev,
|
|
|
|
struct skd_request_context *skreq, const char *event)
|
|
|
|
{
|
2017-08-18 04:13:31 +08:00
|
|
|
struct request *req = blk_mq_rq_from_pdu(skreq);
|
|
|
|
u32 lba = blk_rq_pos(req);
|
|
|
|
u32 count = blk_rq_sectors(req);
|
|
|
|
|
2017-08-18 04:12:58 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, "skreq=%p event='%s'\n", skreq, event);
|
|
|
|
dev_dbg(&skdev->pdev->dev, " state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
|
|
|
|
skd_skreq_state_to_str(skreq->state), skreq->state, skreq->id,
|
|
|
|
skreq->fitmsg_id);
|
2017-08-18 04:13:33 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev, " sg_dir=%d n_sg=%d\n",
|
|
|
|
skreq->data_dir, skreq->n_sg);
|
2017-08-18 04:13:32 +08:00
|
|
|
|
2017-08-18 04:13:31 +08:00
|
|
|
dev_dbg(&skdev->pdev->dev,
|
|
|
|
"req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req, lba, lba,
|
|
|
|
count, count, (int)rq_data_dir(req));
|
2013-10-16 04:19:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
*****************************************************************************
|
|
|
|
* MODULE GLUE
|
|
|
|
*****************************************************************************
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int __init skd_init(void)
|
|
|
|
{
|
2017-08-18 04:13:08 +08:00
|
|
|
BUILD_BUG_ON(sizeof(struct fit_completion_entry_v1) != 8);
|
|
|
|
BUILD_BUG_ON(sizeof(struct fit_comp_error_info) != 32);
|
|
|
|
BUILD_BUG_ON(sizeof(struct skd_command_header) != 16);
|
|
|
|
BUILD_BUG_ON(sizeof(struct skd_scsi_request) != 32);
|
|
|
|
BUILD_BUG_ON(sizeof(struct driver_inquiry_data) != 44);
|
2017-08-18 04:13:07 +08:00
|
|
|
BUILD_BUG_ON(offsetof(struct skd_msg_buf, fmh) != 0);
|
|
|
|
BUILD_BUG_ON(offsetof(struct skd_msg_buf, scsi) != 64);
|
|
|
|
BUILD_BUG_ON(sizeof(struct skd_msg_buf) != SKD_N_FITMSG_BYTES);
|
2017-08-18 04:13:01 +08:00
|
|
|
|
2013-10-16 04:19:07 +08:00
|
|
|
switch (skd_isr_type) {
|
|
|
|
case SKD_IRQ_LEGACY:
|
|
|
|
case SKD_IRQ_MSI:
|
|
|
|
case SKD_IRQ_MSIX:
|
|
|
|
break;
|
|
|
|
default:
|
2013-11-05 19:37:01 +08:00
|
|
|
pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n",
|
2013-10-16 04:19:07 +08:00
|
|
|
skd_isr_type, SKD_IRQ_DEFAULT);
|
|
|
|
skd_isr_type = SKD_IRQ_DEFAULT;
|
|
|
|
}
|
|
|
|
|
2013-11-05 19:37:01 +08:00
|
|
|
if (skd_max_queue_depth < 1 ||
|
|
|
|
skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
|
|
|
|
pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n",
|
2013-10-16 04:19:07 +08:00
|
|
|
skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
|
|
|
|
skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
|
|
|
|
}
|
|
|
|
|
2017-08-18 04:13:01 +08:00
|
|
|
if (skd_max_req_per_msg < 1 ||
|
|
|
|
skd_max_req_per_msg > SKD_MAX_REQ_PER_MSG) {
|
2013-11-05 19:37:01 +08:00
|
|
|
pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n",
|
2013-10-16 04:19:07 +08:00
|
|
|
skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
|
|
|
|
skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
|
2013-11-05 19:37:01 +08:00
|
|
|
pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n",
|
2013-10-16 04:19:07 +08:00
|
|
|
skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
|
|
|
|
skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (skd_dbg_level < 0 || skd_dbg_level > 2) {
|
2013-11-05 19:37:01 +08:00
|
|
|
pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n",
|
2013-10-16 04:19:07 +08:00
|
|
|
skd_dbg_level, 0);
|
|
|
|
skd_dbg_level = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (skd_isr_comp_limit < 0) {
|
2013-11-05 19:37:01 +08:00
|
|
|
pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n",
|
2013-10-16 04:19:07 +08:00
|
|
|
skd_isr_comp_limit, 0);
|
|
|
|
skd_isr_comp_limit = 0;
|
|
|
|
}
|
|
|
|
|
2013-11-05 19:37:02 +08:00
|
|
|
return pci_register_driver(&skd_driver);
|
2013-10-16 04:19:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit skd_exit(void)
|
|
|
|
{
|
|
|
|
pci_unregister_driver(&skd_driver);
|
2013-11-05 19:37:02 +08:00
|
|
|
|
|
|
|
if (skd_major)
|
|
|
|
unregister_blkdev(skd_major, DRV_NAME);
|
2013-10-16 04:19:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
module_init(skd_init);
|
|
|
|
module_exit(skd_exit);
|