Merge branch 'x86/cpufeature' into x86/urgent, because it's ready

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2015-08-31 19:47:03 +02:00
commit 0050ae57cd
23 changed files with 162 additions and 354 deletions

View File

@ -239,6 +239,7 @@
#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */
#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */

View File

@ -92,7 +92,7 @@ static inline u32 brcm_sata_readreg(void __iomem *addr)
* Other architectures (e.g., ARM) either do not support big endian, or
* else leave I/O in little endian mode.
*/
if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
return __raw_readl(addr);
else
return readl_relaxed(addr);
@ -101,7 +101,7 @@ static inline u32 brcm_sata_readreg(void __iomem *addr)
static inline void brcm_sata_writereg(u32 val, void __iomem *addr)
{
/* See brcm_sata_readreg() comments */
if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
__raw_writel(val, addr);
else
writel_relaxed(val, addr);
@ -209,6 +209,7 @@ static void brcm_sata_init(struct brcm_ahci_priv *priv)
priv->top_ctrl + SATA_TOP_CTRL_BUS_CTRL);
}
#ifdef CONFIG_PM_SLEEP
static int brcm_ahci_suspend(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
@ -231,6 +232,7 @@ static int brcm_ahci_resume(struct device *dev)
brcm_sata_phys_enable(priv);
return ahci_platform_resume(dev);
}
#endif
static struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),

View File

@ -694,11 +694,11 @@ static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
* RETURNS:
* Block address read from @tf.
*/
u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
{
u64 block = 0;
if (!dev || tf->flags & ATA_TFLAG_LBA) {
if (tf->flags & ATA_TFLAG_LBA) {
if (tf->flags & ATA_TFLAG_LBA48) {
block |= (u64)tf->hob_lbah << 40;
block |= (u64)tf->hob_lbam << 32;
@ -2147,24 +2147,6 @@ static int ata_dev_config_ncq(struct ata_device *dev,
return 0;
}
static void ata_dev_config_sense_reporting(struct ata_device *dev)
{
unsigned int err_mask;
if (!ata_id_has_sense_reporting(dev->id))
return;
if (ata_id_sense_reporting_enabled(dev->id))
return;
err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
if (err_mask) {
ata_dev_dbg(dev,
"failed to enable Sense Data Reporting, Emask 0x%x\n",
err_mask);
}
}
/**
* ata_dev_configure - Configure the specified ATA/ATAPI device
* @dev: Target device to configure
@ -2387,7 +2369,7 @@ int ata_dev_configure(struct ata_device *dev)
dev->devslp_timing[i] = sata_setting[j];
}
}
ata_dev_config_sense_reporting(dev);
dev->cdb_len = 16;
}

View File

@ -1592,8 +1592,6 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
tf->hob_lbah = buf[10];
tf->nsect = buf[12];
tf->hob_nsect = buf[13];
if (ata_id_has_ncq_autosense(dev->id))
tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
return 0;
}
@ -1629,70 +1627,6 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
return err_mask;
}
/**
* ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
* @dev: device to perform REQUEST_SENSE_SENSE_DATA_EXT to
* @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
* @dfl_sense_key: default sense key to use
*
* Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
* SENSE. This function is EH helper.
*
* LOCKING:
* Kernel thread context (may sleep).
*
* RETURNS:
* encoded sense data on success, 0 on failure or if sense data
* is not available.
*/
static u32 ata_eh_request_sense(struct ata_queued_cmd *qc,
struct scsi_cmnd *cmd)
{
struct ata_device *dev = qc->dev;
struct ata_taskfile tf;
unsigned int err_mask;
if (!cmd)
return 0;
DPRINTK("ATA request sense\n");
ata_dev_warn(dev, "request sense\n");
if (!ata_id_sense_reporting_enabled(dev->id)) {
ata_dev_warn(qc->dev, "sense data reporting disabled\n");
return 0;
}
ata_tf_init(dev, &tf);
tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
tf.command = ATA_CMD_REQ_SENSE_DATA;
tf.protocol = ATA_PROT_NODATA;
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
/*
* ACS-4 states:
* The device may set the SENSE DATA AVAILABLE bit to one in the
* STATUS field and clear the ERROR bit to zero in the STATUS field
* to indicate that the command returned completion without an error
* and the sense data described in table 306 is available.
*
* IOW the 'ATA_SENSE' bit might not be set even though valid
* sense data is available.
* So check for both.
*/
if ((tf.command & ATA_SENSE) ||
tf.lbah != 0 || tf.lbam != 0 || tf.lbal != 0) {
ata_scsi_set_sense(cmd, tf.lbah, tf.lbam, tf.lbal);
qc->flags |= ATA_QCFLAG_SENSE_VALID;
ata_dev_warn(dev, "sense data %02x/%02x/%02x\n",
tf.lbah, tf.lbam, tf.lbal);
} else {
ata_dev_warn(dev, "request sense failed stat %02x emask %x\n",
tf.command, err_mask);
}
return err_mask;
}
/**
* atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
* @dev: device to perform REQUEST_SENSE to
@ -1855,19 +1789,6 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
memcpy(&qc->result_tf, &tf, sizeof(tf));
qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
if (qc->result_tf.auxiliary) {
char sense_key, asc, ascq;
sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
asc = (qc->result_tf.auxiliary >> 8) & 0xff;
ascq = qc->result_tf.auxiliary & 0xff;
ata_dev_dbg(dev, "NCQ Autosense %02x/%02x/%02x\n",
sense_key, asc, ascq);
ata_scsi_set_sense(qc->scsicmd, sense_key, asc, ascq);
ata_scsi_set_sense_information(qc->scsicmd, &qc->result_tf);
qc->flags |= ATA_QCFLAG_SENSE_VALID;
}
ehc->i.err_mask &= ~AC_ERR_DEV;
}
@ -1897,27 +1818,6 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
return ATA_EH_RESET;
}
/*
* Sense data reporting does not work if the
* device fault bit is set.
*/
if ((stat & ATA_SENSE) && !(stat & ATA_DF) &&
!(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
tmp = ata_eh_request_sense(qc, qc->scsicmd);
if (tmp)
qc->err_mask |= tmp;
else
ata_scsi_set_sense_information(qc->scsicmd, tf);
} else {
ata_dev_warn(qc->dev, "sense data available but port frozen\n");
}
}
/* Set by NCQ autosense or request sense above */
if (qc->flags & ATA_QCFLAG_SENSE_VALID)
return 0;
if (stat & (ATA_ERR | ATA_DF))
qc->err_mask |= AC_ERR_DEV;
else
@ -2661,15 +2561,14 @@ static void ata_eh_link_report(struct ata_link *link)
#ifdef CONFIG_ATA_VERBOSE_ERROR
if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
ATA_SENSE | ATA_ERR)) {
ATA_ERR)) {
if (res->command & ATA_BUSY)
ata_dev_err(qc->dev, "status: { Busy }\n");
else
ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n",
ata_dev_err(qc->dev, "status: { %s%s%s%s}\n",
res->command & ATA_DRDY ? "DRDY " : "",
res->command & ATA_DF ? "DF " : "",
res->command & ATA_DRQ ? "DRQ " : "",
res->command & ATA_SENSE ? "SENSE " : "",
res->command & ATA_ERR ? "ERR " : "");
}

View File

@ -270,28 +270,13 @@ DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
ata_scsi_park_show, ata_scsi_park_store);
EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
{
if (!cmd)
return;
cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq);
}
void ata_scsi_set_sense_information(struct scsi_cmnd *cmd,
const struct ata_taskfile *tf)
{
u64 information;
if (!cmd)
return;
information = ata_tf_read_block(tf, NULL);
scsi_set_sense_information(cmd->sense_buffer, information);
}
static ssize_t
ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@ -1792,9 +1777,7 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
((cdb[2] & 0x20) || need_sense)) {
ata_gen_passthru_sense(qc);
} else {
if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
cmd->result = SAM_STAT_CHECK_CONDITION;
} else if (!need_sense) {
if (!need_sense) {
cmd->result = SAM_STAT_GOOD;
} else {
/* TODO: decide which descriptor format to use

View File

@ -67,8 +67,7 @@ extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag);
extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
u64 block, u32 n_block, unsigned int tf_flags,
unsigned int tag);
extern u64 ata_tf_read_block(const struct ata_taskfile *tf,
struct ata_device *dev);
extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev);
extern unsigned ata_exec_internal(struct ata_device *dev,
struct ata_taskfile *tf, const u8 *cdb,
int dma_dir, void *buf, unsigned int buflen,
@ -138,9 +137,6 @@ extern int ata_scsi_add_hosts(struct ata_host *host,
struct scsi_host_template *sht);
extern void ata_scsi_scan_host(struct ata_port *ap, int sync);
extern int ata_scsi_offline_dev(struct ata_device *dev);
extern void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq);
extern void ata_scsi_set_sense_information(struct scsi_cmnd *cmd,
const struct ata_taskfile *tf);
extern void ata_scsi_media_change_notify(struct ata_device *dev);
extern void ata_scsi_hotplug(struct work_struct *work);
extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);

View File

@ -1238,8 +1238,12 @@ static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
readl(mmio + PDC_SDRAM_CONTROL);
/* Turn on for ECC */
pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
PDC_DIMM_SPD_TYPE, &spd0);
if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
PDC_DIMM_SPD_TYPE, &spd0)) {
pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
return 1;
}
if (spd0 == 0x02) {
data |= (0x01 << 16);
writel(data, mmio + PDC_SDRAM_CONTROL);
@ -1380,8 +1384,12 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
/* ECC initiliazation. */
pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
PDC_DIMM_SPD_TYPE, &spd0);
if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
PDC_DIMM_SPD_TYPE, &spd0)) {
pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
return 1;
}
if (spd0 == 0x02) {
void *buf;
VPRINTK("Start ECC initialization\n");

View File

@ -689,6 +689,10 @@ struct dma_chan *dma_request_slave_channel(struct device *dev,
struct dma_chan *ch = dma_request_slave_channel_reason(dev, name);
if (IS_ERR(ch))
return NULL;
dma_cap_set(DMA_PRIVATE, ch->device->cap_mask);
ch->device->privatecnt++;
return ch;
}
EXPORT_SYMBOL_GPL(dma_request_slave_channel);

View File

@ -873,9 +873,10 @@ static void drm_dp_destroy_port(struct kref *kref)
from an EDID retrieval */
if (port->connector) {
mutex_lock(&mgr->destroy_connector_lock);
list_add(&port->connector->destroy_list, &mgr->destroy_connector_list);
list_add(&port->next, &mgr->destroy_connector_list);
mutex_unlock(&mgr->destroy_connector_lock);
schedule_work(&mgr->destroy_connector_work);
return;
}
drm_dp_port_teardown_pdt(port, port->pdt);
@ -2659,7 +2660,7 @@ static void drm_dp_tx_work(struct work_struct *work)
static void drm_dp_destroy_connector_work(struct work_struct *work)
{
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
struct drm_connector *connector;
struct drm_dp_mst_port *port;
/*
* Not a regular list traverse as we have to drop the destroy
@ -2668,15 +2669,21 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
*/
for (;;) {
mutex_lock(&mgr->destroy_connector_lock);
connector = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_connector, destroy_list);
if (!connector) {
port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
if (!port) {
mutex_unlock(&mgr->destroy_connector_lock);
break;
}
list_del(&connector->destroy_list);
list_del(&port->next);
mutex_unlock(&mgr->destroy_connector_lock);
mgr->cbs->destroy_connector(mgr, connector);
mgr->cbs->destroy_connector(mgr, port->connector);
drm_dp_port_teardown_pdt(port, port->pdt);
if (!port->input && port->vcpi.vcpi > 0)
drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
kfree(port);
}
}

View File

@ -129,8 +129,9 @@ int intel_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state,
bool async)
{
int ret;
int i;
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
int ret, i;
if (async) {
DRM_DEBUG_KMS("i915 does not yet support async commit\n");
@ -142,48 +143,18 @@ int intel_atomic_commit(struct drm_device *dev,
return ret;
/* Point of no return */
/*
* FIXME: The proper sequence here will eventually be:
*
* drm_atomic_helper_swap_state(dev, state)
* drm_atomic_helper_commit_modeset_disables(dev, state);
* drm_atomic_helper_commit_planes(dev, state);
* drm_atomic_helper_commit_modeset_enables(dev, state);
* drm_atomic_helper_wait_for_vblanks(dev, state);
* drm_atomic_helper_cleanup_planes(dev, state);
* drm_atomic_state_free(state);
*
* once we have full atomic modeset. For now, just manually update
* plane states to avoid clobbering good states with dummy states
* while nuclear pageflipping.
*/
for (i = 0; i < dev->mode_config.num_total_plane; i++) {
struct drm_plane *plane = state->planes[i];
if (!plane)
continue;
plane->state->state = state;
swap(state->plane_states[i], plane->state);
plane->state->state = NULL;
}
drm_atomic_helper_swap_state(dev, state);
/* swap crtc_scaler_state */
for (i = 0; i < dev->mode_config.num_crtc; i++) {
struct drm_crtc *crtc = state->crtcs[i];
if (!crtc) {
continue;
}
to_intel_crtc(crtc)->config->scaler_state =
to_intel_crtc_state(state->crtc_states[i])->scaler_state;
for_each_crtc_in_state(state, crtc, crtc_state, i) {
to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
if (INTEL_INFO(dev)->gen >= 9)
skl_detach_scalers(to_intel_crtc(crtc));
drm_atomic_helper_commit_planes_on_crtc(crtc_state);
}
drm_atomic_helper_commit_planes(dev, state);
drm_atomic_helper_wait_for_vblanks(dev, state);
drm_atomic_helper_cleanup_planes(dev, state);
drm_atomic_state_free(state);

View File

@ -11826,7 +11826,9 @@ encoder_retry:
goto encoder_retry;
}
pipe_config->dither = pipe_config->pipe_bpp != base_bpp;
/* Dithering seems to not pass-through bits correctly when it should, so
* only enable it on 6bpc panels. */
pipe_config->dither = pipe_config->pipe_bpp == 6*3;
DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
@ -12624,17 +12626,17 @@ static int __intel_set_mode(struct drm_crtc *modeset_crtc,
modeset_update_crtc_power_domains(state);
drm_atomic_helper_commit_planes(dev, state);
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
for_each_crtc_in_state(state, crtc, crtc_state, i) {
if (!needs_modeset(crtc->state) || !crtc->state->enable)
if (!needs_modeset(crtc->state) || !crtc->state->enable) {
drm_atomic_helper_commit_planes_on_crtc(crtc_state);
continue;
}
update_scanline_offset(to_intel_crtc(crtc));
dev_priv->display.crtc_enable(crtc);
intel_crtc_enable_planes(crtc);
drm_atomic_helper_commit_planes_on_crtc(crtc_state);
}
/* FIXME: add subpixel order */
@ -12891,20 +12893,11 @@ intel_modeset_stage_output_state(struct drm_device *dev,
return 0;
}
static bool primary_plane_visible(struct drm_crtc *crtc)
{
struct intel_plane_state *plane_state =
to_intel_plane_state(crtc->primary->state);
return plane_state->visible;
}
static int intel_crtc_set_config(struct drm_mode_set *set)
{
struct drm_device *dev;
struct drm_atomic_state *state = NULL;
struct intel_crtc_state *pipe_config;
bool primary_plane_was_visible;
int ret;
BUG_ON(!set);
@ -12943,38 +12936,8 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
intel_update_pipe_size(to_intel_crtc(set->crtc));
primary_plane_was_visible = primary_plane_visible(set->crtc);
ret = intel_set_mode_with_config(set->crtc, pipe_config, true);
if (ret == 0 &&
pipe_config->base.enable &&
pipe_config->base.planes_changed &&
!needs_modeset(&pipe_config->base)) {
struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
/*
* We need to make sure the primary plane is re-enabled if it
* has previously been turned off.
*/
if (ret == 0 && !primary_plane_was_visible &&
primary_plane_visible(set->crtc)) {
WARN_ON(!intel_crtc->active);
intel_post_enable_primary(set->crtc);
}
/*
* In the fastboot case this may be our only check of the
* state after boot. It would be better to only do it on
* the first update, but we don't have a nice way of doing that
* (and really, set_config isn't used much for high freq page
* flipping, so increasing its cost here shouldn't be a big
* deal).
*/
if (i915.fastboot && ret == 0)
intel_modeset_check_state(set->crtc->dev);
}
if (ret) {
DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
set->crtc->base.id, ret);
@ -13305,6 +13268,9 @@ intel_check_primary_plane(struct drm_plane *plane,
*/
if (IS_BROADWELL(dev))
intel_crtc->atomic.wait_vblank = true;
if (crtc_state)
intel_crtc->atomic.post_enable_primary = true;
}
/*
@ -13317,6 +13283,10 @@ intel_check_primary_plane(struct drm_plane *plane,
if (!state->visible || !fb)
intel_crtc->atomic.disable_ips = true;
if (!state->visible && old_state->visible &&
crtc_state && !needs_modeset(&crtc_state->base))
intel_crtc->atomic.pre_disable_primary = true;
intel_crtc->atomic.fb_bits |=
INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
@ -15034,6 +15004,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
struct intel_plane_state *plane_state;
memset(crtc->config, 0, sizeof(*crtc->config));
crtc->config->base.crtc = &crtc->base;
crtc->config->quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;

View File

@ -814,7 +814,7 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
printk(KERN_ERR MOD
"Unexpected cqe_status 0x%x for QPID=0x%0x\n",
CQE_STATUS(&cqe), CQE_QPID(&cqe));
ret = -EINVAL;
wc->status = IB_WC_FATAL_ERR;
}
}
out:

View File

@ -26,7 +26,6 @@
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/jiffies.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@ -2523,33 +2522,3 @@ void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq)
}
}
EXPORT_SYMBOL(scsi_build_sense_buffer);
/**
* scsi_set_sense_information - set the information field in a
* formatted sense data buffer
* @buf: Where to build sense data
* @info: 64-bit information value to be set
*
**/
void scsi_set_sense_information(u8 *buf, u64 info)
{
if ((buf[0] & 0x7f) == 0x72) {
u8 *ucp, len;
len = buf[7];
ucp = (char *)scsi_sense_desc_find(buf, len + 8, 0);
if (!ucp) {
buf[7] = len + 0xa;
ucp = buf + 8 + len;
}
ucp[0] = 0;
ucp[1] = 0xa;
ucp[2] = 0x80; /* Valid bit */
ucp[3] = 0;
put_unaligned_be64(info, &ucp[4]);
} else if ((buf[0] & 0x7f) == 0x70) {
buf[0] |= 0x80;
put_unaligned_be64(info, &buf[3]);
}
}
EXPORT_SYMBOL(scsi_set_sense_information);

View File

@ -968,9 +968,9 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA;
conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
if (hdr->flags & ISCSI_FLAG_CMD_READ) {
if (hdr->flags & ISCSI_FLAG_CMD_READ)
cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
} else if (hdr->flags & ISCSI_FLAG_CMD_WRITE)
else
cmd->targ_xfer_tag = 0xFFFFFFFF;
cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);

View File

@ -457,8 +457,15 @@ void target_unregister_template(const struct target_core_fabric_ops *fo)
if (!strcmp(t->tf_ops->name, fo->name)) {
BUG_ON(atomic_read(&t->tf_access_cnt));
list_del(&t->tf_list);
mutex_unlock(&g_tf_lock);
/*
* Wait for any outstanding fabric se_deve_entry->rcu_head
* callbacks to complete post kfree_rcu(), before allowing
* fabric driver unload of TFO->module to proceed.
*/
rcu_barrier();
kfree(t);
break;
return;
}
}
mutex_unlock(&g_tf_lock);

View File

@ -84,8 +84,16 @@ void target_backend_unregister(const struct target_backend_ops *ops)
list_for_each_entry(tb, &backend_list, list) {
if (tb->ops == ops) {
list_del(&tb->list);
mutex_unlock(&backend_mutex);
/*
* Wait for any outstanding backend driver ->rcu_head
* callbacks to complete post TBO->free_device() ->
* call_rcu(), before allowing backend driver module
* unload of target_backend_ops->owner to proceed.
*/
rcu_barrier();
kfree(tb);
break;
return;
}
}
mutex_unlock(&backend_mutex);

View File

@ -1203,17 +1203,13 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
struct se_dev_entry *deve;
struct se_session *sess = cmd->se_sess;
struct se_node_acl *nacl;
struct scsi_lun slun;
unsigned char *buf;
u32 lun_count = 0, offset = 8;
if (cmd->data_length < 16) {
pr_warn("REPORT LUNS allocation length %u too small\n",
cmd->data_length);
return TCM_INVALID_CDB_FIELD;
}
__be32 len;
buf = transport_kmap_data_sg(cmd);
if (!buf)
if (cmd->data_length && !buf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
/*
@ -1221,11 +1217,9 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
* coming via a target_core_mod PASSTHROUGH op, and not through
* a $FABRIC_MOD. In that case, report LUN=0 only.
*/
if (!sess) {
int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
lun_count = 1;
if (!sess)
goto done;
}
nacl = sess->se_node_acl;
rcu_read_lock();
@ -1236,10 +1230,12 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
* See SPC2-R20 7.19.
*/
lun_count++;
if ((offset + 8) > cmd->data_length)
if (offset >= cmd->data_length)
continue;
int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
int_to_scsilun(deve->mapped_lun, &slun);
memcpy(buf + offset, &slun,
min(8u, cmd->data_length - offset));
offset += 8;
}
rcu_read_unlock();
@ -1248,12 +1244,22 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
* See SPC3 r07, page 159.
*/
done:
lun_count *= 8;
buf[0] = ((lun_count >> 24) & 0xff);
buf[1] = ((lun_count >> 16) & 0xff);
buf[2] = ((lun_count >> 8) & 0xff);
buf[3] = (lun_count & 0xff);
transport_kunmap_data_sg(cmd);
/*
* If no LUNs are accessible, report virtual LUN 0.
*/
if (lun_count == 0) {
int_to_scsilun(0, &slun);
if (cmd->data_length > 8)
memcpy(buf + offset, &slun,
min(8u, cmd->data_length - offset));
lun_count = 1;
}
if (buf) {
len = cpu_to_be32(lun_count * 8);
memcpy(buf, &len, min_t(int, sizeof len, cmd->data_length));
transport_kunmap_data_sg(cmd);
}
target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8);
return 0;

View File

@ -68,7 +68,7 @@ struct power_table {
* registered cooling device.
* @cpufreq_state: integer value representing the current state of cpufreq
* cooling devices.
* @cpufreq_val: integer value representing the absolute value of the clipped
* @clipped_freq: integer value representing the absolute value of the clipped
* frequency.
* @max_level: maximum cooling level. One less than total number of valid
* cpufreq frequencies.
@ -91,7 +91,7 @@ struct cpufreq_cooling_device {
int id;
struct thermal_cooling_device *cool_dev;
unsigned int cpufreq_state;
unsigned int cpufreq_val;
unsigned int clipped_freq;
unsigned int max_level;
unsigned int *freq_table; /* In descending order */
struct cpumask allowed_cpus;
@ -107,6 +107,9 @@ struct cpufreq_cooling_device {
static DEFINE_IDR(cpufreq_idr);
static DEFINE_MUTEX(cooling_cpufreq_lock);
static unsigned int cpufreq_dev_count;
static DEFINE_MUTEX(cooling_list_lock);
static LIST_HEAD(cpufreq_dev_list);
/**
@ -185,14 +188,14 @@ unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
{
struct cpufreq_cooling_device *cpufreq_dev;
mutex_lock(&cooling_cpufreq_lock);
mutex_lock(&cooling_list_lock);
list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) {
mutex_unlock(&cooling_cpufreq_lock);
mutex_unlock(&cooling_list_lock);
return get_level(cpufreq_dev, freq);
}
}
mutex_unlock(&cooling_cpufreq_lock);
mutex_unlock(&cooling_list_lock);
pr_err("%s: cpu:%d not part of any cooling device\n", __func__, cpu);
return THERMAL_CSTATE_INVALID;
@ -215,29 +218,35 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
unsigned long event, void *data)
{
struct cpufreq_policy *policy = data;
unsigned long max_freq = 0;
unsigned long clipped_freq;
struct cpufreq_cooling_device *cpufreq_dev;
switch (event) {
case CPUFREQ_ADJUST:
mutex_lock(&cooling_cpufreq_lock);
list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
if (!cpumask_test_cpu(policy->cpu,
&cpufreq_dev->allowed_cpus))
continue;
max_freq = cpufreq_dev->cpufreq_val;
if (policy->max != max_freq)
cpufreq_verify_within_limits(policy, 0,
max_freq);
}
mutex_unlock(&cooling_cpufreq_lock);
break;
default:
if (event != CPUFREQ_ADJUST)
return NOTIFY_DONE;
mutex_lock(&cooling_list_lock);
list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
if (!cpumask_test_cpu(policy->cpu, &cpufreq_dev->allowed_cpus))
continue;
/*
* policy->max is the maximum allowed frequency defined by user
* and clipped_freq is the maximum that thermal constraints
* allow.
*
* If clipped_freq is lower than policy->max, then we need to
* readjust policy->max.
*
* But, if clipped_freq is greater than policy->max, we don't
* need to do anything.
*/
clipped_freq = cpufreq_dev->clipped_freq;
if (policy->max > clipped_freq)
cpufreq_verify_within_limits(policy, 0, clipped_freq);
break;
}
mutex_unlock(&cooling_list_lock);
return NOTIFY_OK;
}
@ -519,7 +528,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
clip_freq = cpufreq_device->freq_table[state];
cpufreq_device->cpufreq_state = state;
cpufreq_device->cpufreq_val = clip_freq;
cpufreq_device->clipped_freq = clip_freq;
cpufreq_update_policy(cpu);
@ -861,17 +870,19 @@ __cpufreq_cooling_register(struct device_node *np,
pr_debug("%s: freq:%u KHz\n", __func__, freq);
}
cpufreq_dev->cpufreq_val = cpufreq_dev->freq_table[0];
cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0];
cpufreq_dev->cool_dev = cool_dev;
mutex_lock(&cooling_cpufreq_lock);
mutex_lock(&cooling_list_lock);
list_add(&cpufreq_dev->node, &cpufreq_dev_list);
mutex_unlock(&cooling_list_lock);
/* Register the notifier for first cpufreq cooling device */
if (list_empty(&cpufreq_dev_list))
if (!cpufreq_dev_count++)
cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
list_add(&cpufreq_dev->node, &cpufreq_dev_list);
mutex_unlock(&cooling_cpufreq_lock);
return cool_dev;
@ -1013,13 +1024,17 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
return;
cpufreq_dev = cdev->devdata;
mutex_lock(&cooling_cpufreq_lock);
list_del(&cpufreq_dev->node);
/* Unregister the notifier for the last cpufreq cooling device */
if (list_empty(&cpufreq_dev_list))
mutex_lock(&cooling_cpufreq_lock);
if (!--cpufreq_dev_count)
cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
mutex_lock(&cooling_list_lock);
list_del(&cpufreq_dev->node);
mutex_unlock(&cooling_list_lock);
mutex_unlock(&cooling_cpufreq_lock);
thermal_cooling_device_unregister(cpufreq_dev->cool_dev);

View File

@ -334,7 +334,7 @@ static int allocate_power(struct thermal_zone_device *tz,
max_allocatable_power, current_temp,
(s32)control_temp - (s32)current_temp);
devm_kfree(&tz->device, req_power);
kfree(req_power);
unlock:
mutex_unlock(&tz->lock);
@ -426,7 +426,7 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
return -EINVAL;
}
params = devm_kzalloc(&tz->device, sizeof(*params), GFP_KERNEL);
params = kzalloc(sizeof(*params), GFP_KERNEL);
if (!params)
return -ENOMEM;
@ -468,14 +468,14 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
return 0;
free:
devm_kfree(&tz->device, params);
kfree(params);
return ret;
}
static void power_allocator_unbind(struct thermal_zone_device *tz)
{
dev_dbg(&tz->device, "Unbinding from thermal zone %d\n", tz->id);
devm_kfree(&tz->device, tz->governor_data);
kfree(tz->governor_data);
tz->governor_data = NULL;
}

View File

@ -743,8 +743,6 @@ struct drm_connector {
uint8_t num_h_tile, num_v_tile;
uint8_t tile_h_loc, tile_v_loc;
uint16_t tile_h_size, tile_v_size;
struct list_head destroy_list;
};
/**

View File

@ -385,8 +385,6 @@ enum {
SATA_SSP = 0x06, /* Software Settings Preservation */
SATA_DEVSLP = 0x09, /* Device Sleep */
SETFEATURE_SENSE_DATA = 0xC3, /* Sense Data Reporting feature */
/* feature values for SET_MAX */
ATA_SET_MAX_ADDR = 0x00,
ATA_SET_MAX_PASSWD = 0x01,
@ -530,8 +528,6 @@ struct ata_bmdma_prd {
#define ata_id_cdb_intr(id) (((id)[ATA_ID_CONFIG] & 0x60) == 0x20)
#define ata_id_has_da(id) ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4))
#define ata_id_has_devslp(id) ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8))
#define ata_id_has_ncq_autosense(id) \
((id)[ATA_ID_FEATURE_SUPP] & (1 << 7))
static inline bool ata_id_has_hipm(const u16 *id)
{
@ -720,20 +716,6 @@ static inline bool ata_id_has_read_log_dma_ext(const u16 *id)
return false;
}
static inline bool ata_id_has_sense_reporting(const u16 *id)
{
if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
return false;
return id[ATA_ID_COMMAND_SET_3] & (1 << 6);
}
static inline bool ata_id_sense_reporting_enabled(const u16 *id)
{
if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
return false;
return id[ATA_ID_COMMAND_SET_4] & (1 << 6);
}
/**
* ata_id_major_version - get ATA level of drive
* @id: Identify data

View File

@ -28,7 +28,6 @@ extern int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
u64 * info_out);
extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq);
extern void scsi_set_sense_information(u8 *buf, u64 info);
extern int scsi_ioctl_reset(struct scsi_device *, int __user *);

View File

@ -1223,7 +1223,7 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
spin_unlock_irq(&callback_lock);
/* use trialcs->mems_allowed as a temp variable */
update_nodemasks_hier(cs, &cs->mems_allowed);
update_nodemasks_hier(cs, &trialcs->mems_allowed);
done:
return retval;
}