Linux 4.2-rc8
-----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJV2pUkAAoJEHm+PkMAQRiGCIoH/Rb29ZjdCoZJp9OtnjAG+qRc bG3YuomIdib86x7xHRKKaLWBa7din7IYjuwT/X4S4duO5a1R5Lp1sRG3IlGfhT0W nBNbjFl4q4bOyiTPtTRTYyh4g5UQv4IuyCnCmZyCTJyVi/O6HVM9TWKUzm68P2dJ 30LwLUcQJ+mHueGJwFBAXe2BaojEpvYCdSX6tvbrQ/8X3FrVExZXuJl4uMYNFYNK ZwG/v5t7tYOiAe76JGbrEuVFPZWLPEW7amHOWR0T4Ye4nWTlBgx7fENiNRlfgcvI CM16l/xkyrZQ3Q5jZy1qYDfdHYF++dyEDysX4w1ae/X0aaLZn7l+u5VQD6WpkQQ= =IF6I -----END PGP SIGNATURE----- Merge tag 'v4.2-rc8' into drm-next Linux 4.2-rc8 Backmerge required for Intel so they can fix their -next tree up properly.
This commit is contained in:
commit
3732ce72b4
|
@ -0,0 +1 @@
|
|||
Christoph Hellwig <hch@lst.de>
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 2
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc7
|
||||
EXTRAVERSION = -rc8
|
||||
NAME = Hurr durr I'ma sheep
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -312,6 +312,9 @@ INSTALL_TARGETS = zinstall uinstall install
|
|||
|
||||
PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
|
||||
|
||||
bootpImage uImage: zImage
|
||||
zImage: Image
|
||||
|
||||
$(BOOT_TARGETS): vmlinux
|
||||
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
|
||||
|
||||
|
|
|
@ -96,7 +96,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
|
|||
}
|
||||
|
||||
/* the mmap semaphore is taken only if not in an atomic context */
|
||||
atomic = in_atomic();
|
||||
atomic = faulthandler_disabled();
|
||||
|
||||
if (!atomic)
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
|
|
|
@ -392,6 +392,7 @@ static struct irq_chip wakeupgen_chip = {
|
|||
.irq_mask = wakeupgen_mask,
|
||||
.irq_unmask = wakeupgen_unmask,
|
||||
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
||||
.irq_set_type = irq_chip_set_type_parent,
|
||||
.flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
|
||||
#ifdef CONFIG_SMP
|
||||
.irq_set_affinity = irq_chip_set_affinity_parent,
|
||||
|
|
|
@ -407,7 +407,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
|
|||
.set noat
|
||||
SAVE_ALL
|
||||
FEXPORT(handle_\exception\ext)
|
||||
__BUILD_clear_\clear
|
||||
__build_clear_\clear
|
||||
.set at
|
||||
__BUILD_\verbose \exception
|
||||
move a0, sp
|
||||
|
|
|
@ -79,12 +79,12 @@ do { \
|
|||
#else /* CONFIG_X86_32 */
|
||||
|
||||
/* frame pointer must be last for get_wchan */
|
||||
#define SAVE_CONTEXT "pushq %%rbp ; movq %%rsi,%%rbp\n\t"
|
||||
#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\t"
|
||||
#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
|
||||
#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
|
||||
|
||||
#define __EXTRA_CLOBBER \
|
||||
, "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
|
||||
"r12", "r13", "r14", "r15", "flags"
|
||||
"r12", "r13", "r14", "r15"
|
||||
|
||||
#ifdef CONFIG_CC_STACKPROTECTOR
|
||||
#define __switch_canary \
|
||||
|
@ -100,11 +100,7 @@ do { \
|
|||
#define __switch_canary_iparam
|
||||
#endif /* CC_STACKPROTECTOR */
|
||||
|
||||
/*
|
||||
* There is no need to save or restore flags, because flags are always
|
||||
* clean in kernel mode, with the possible exception of IOPL. Kernel IOPL
|
||||
* has no effect.
|
||||
*/
|
||||
/* Save restore flags to clear handle leaking NT */
|
||||
#define switch_to(prev, next, last) \
|
||||
asm volatile(SAVE_CONTEXT \
|
||||
"movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
|
||||
|
|
|
@ -322,7 +322,7 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
|
|||
irq_data->chip = &lapic_controller;
|
||||
irq_data->chip_data = data;
|
||||
irq_data->hwirq = virq + i;
|
||||
err = assign_irq_vector_policy(virq, irq_data->node, data,
|
||||
err = assign_irq_vector_policy(virq + i, irq_data->node, data,
|
||||
info);
|
||||
if (err)
|
||||
goto error;
|
||||
|
|
|
@ -270,7 +270,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
|
|||
dst_fpu->fpregs_active = 0;
|
||||
dst_fpu->last_cpu = -1;
|
||||
|
||||
if (src_fpu->fpstate_active)
|
||||
if (src_fpu->fpstate_active && cpu_has_fpu)
|
||||
fpu_copy(dst_fpu, src_fpu);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -40,7 +40,12 @@ static void fpu__init_cpu_generic(void)
|
|||
write_cr0(cr0);
|
||||
|
||||
/* Flush out any pending x87 state: */
|
||||
asm volatile ("fninit");
|
||||
#ifdef CONFIG_MATH_EMULATION
|
||||
if (!cpu_has_fpu)
|
||||
fpstate_init_soft(¤t->thread.fpu.state.soft);
|
||||
else
|
||||
#endif
|
||||
asm volatile ("fninit");
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -408,6 +408,7 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
|
|||
static void mwait_idle(void)
|
||||
{
|
||||
if (!current_set_polling_and_test()) {
|
||||
trace_cpu_idle_rcuidle(1, smp_processor_id());
|
||||
if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
|
||||
smp_mb(); /* quirk */
|
||||
clflush((void *)¤t_thread_info()->flags);
|
||||
|
@ -419,6 +420,7 @@ static void mwait_idle(void)
|
|||
__sti_mwait(0, 0);
|
||||
else
|
||||
local_irq_enable();
|
||||
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
|
||||
} else {
|
||||
local_irq_enable();
|
||||
}
|
||||
|
|
|
@ -8,7 +8,7 @@ config XEN
|
|||
select PARAVIRT_CLOCK
|
||||
select XEN_HAVE_PVMMU
|
||||
depends on X86_64 || (X86_32 && X86_PAE)
|
||||
depends on X86_TSC
|
||||
depends on X86_LOCAL_APIC && X86_TSC
|
||||
help
|
||||
This is the Linux Xen port. Enabling this will allow the
|
||||
kernel to boot in a paravirtualized environment under the
|
||||
|
@ -17,7 +17,7 @@ config XEN
|
|||
config XEN_DOM0
|
||||
def_bool y
|
||||
depends on XEN && PCI_XEN && SWIOTLB_XEN
|
||||
depends on X86_LOCAL_APIC && X86_IO_APIC && ACPI && PCI
|
||||
depends on X86_IO_APIC && ACPI && PCI
|
||||
|
||||
config XEN_PVHVM
|
||||
def_bool y
|
||||
|
|
|
@ -393,8 +393,6 @@ static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv,
|
|||
struct scatterlist *cipher = areq_ctx->cipher;
|
||||
struct scatterlist *hsg = areq_ctx->hsg;
|
||||
struct scatterlist *tsg = areq_ctx->tsg;
|
||||
struct scatterlist *assoc1;
|
||||
struct scatterlist *assoc2;
|
||||
unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
|
||||
unsigned int cryptlen = req->cryptlen;
|
||||
struct page *dstp;
|
||||
|
@ -412,27 +410,19 @@ static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv,
|
|||
cryptlen += ivsize;
|
||||
}
|
||||
|
||||
if (sg_is_last(assoc))
|
||||
return -EINVAL;
|
||||
|
||||
assoc1 = assoc + 1;
|
||||
if (sg_is_last(assoc1))
|
||||
return -EINVAL;
|
||||
|
||||
assoc2 = assoc + 2;
|
||||
if (!sg_is_last(assoc2))
|
||||
if (assoc->length < 12)
|
||||
return -EINVAL;
|
||||
|
||||
sg_init_table(hsg, 2);
|
||||
sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset);
|
||||
sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset);
|
||||
sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
|
||||
sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);
|
||||
|
||||
sg_init_table(tsg, 1);
|
||||
sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset);
|
||||
sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);
|
||||
|
||||
areq_ctx->cryptlen = cryptlen;
|
||||
areq_ctx->headlen = assoc->length + assoc2->length;
|
||||
areq_ctx->trailen = assoc1->length;
|
||||
areq_ctx->headlen = 8;
|
||||
areq_ctx->trailen = 4;
|
||||
areq_ctx->sg = dst;
|
||||
|
||||
areq_ctx->complete = authenc_esn_geniv_ahash_done;
|
||||
|
@ -563,8 +553,6 @@ static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
|
|||
struct scatterlist *cipher = areq_ctx->cipher;
|
||||
struct scatterlist *hsg = areq_ctx->hsg;
|
||||
struct scatterlist *tsg = areq_ctx->tsg;
|
||||
struct scatterlist *assoc1;
|
||||
struct scatterlist *assoc2;
|
||||
unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
|
||||
struct page *srcp;
|
||||
u8 *vsrc;
|
||||
|
@ -580,27 +568,19 @@ static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
|
|||
cryptlen += ivsize;
|
||||
}
|
||||
|
||||
if (sg_is_last(assoc))
|
||||
return -EINVAL;
|
||||
|
||||
assoc1 = assoc + 1;
|
||||
if (sg_is_last(assoc1))
|
||||
return -EINVAL;
|
||||
|
||||
assoc2 = assoc + 2;
|
||||
if (!sg_is_last(assoc2))
|
||||
if (assoc->length < 12)
|
||||
return -EINVAL;
|
||||
|
||||
sg_init_table(hsg, 2);
|
||||
sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset);
|
||||
sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset);
|
||||
sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
|
||||
sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);
|
||||
|
||||
sg_init_table(tsg, 1);
|
||||
sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset);
|
||||
sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);
|
||||
|
||||
areq_ctx->cryptlen = cryptlen;
|
||||
areq_ctx->headlen = assoc->length + assoc2->length;
|
||||
areq_ctx->trailen = assoc1->length;
|
||||
areq_ctx->headlen = 8;
|
||||
areq_ctx->trailen = 4;
|
||||
areq_ctx->sg = src;
|
||||
|
||||
areq_ctx->complete = authenc_esn_verify_ahash_done;
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <acpi/video.h>
|
||||
|
||||
ACPI_MODULE_NAME("video");
|
||||
|
@ -41,6 +42,7 @@ void acpi_video_unregister_backlight(void);
|
|||
|
||||
static bool backlight_notifier_registered;
|
||||
static struct notifier_block backlight_nb;
|
||||
static struct work_struct backlight_notify_work;
|
||||
|
||||
static enum acpi_backlight_type acpi_backlight_cmdline = acpi_backlight_undef;
|
||||
static enum acpi_backlight_type acpi_backlight_dmi = acpi_backlight_undef;
|
||||
|
@ -262,6 +264,13 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
|||
{ },
|
||||
};
|
||||
|
||||
/* This uses a workqueue to avoid various locking ordering issues */
|
||||
static void acpi_video_backlight_notify_work(struct work_struct *work)
|
||||
{
|
||||
if (acpi_video_get_backlight_type() != acpi_backlight_video)
|
||||
acpi_video_unregister_backlight();
|
||||
}
|
||||
|
||||
static int acpi_video_backlight_notify(struct notifier_block *nb,
|
||||
unsigned long val, void *bd)
|
||||
{
|
||||
|
@ -269,9 +278,8 @@ static int acpi_video_backlight_notify(struct notifier_block *nb,
|
|||
|
||||
/* A raw bl registering may change video -> native */
|
||||
if (backlight->props.type == BACKLIGHT_RAW &&
|
||||
val == BACKLIGHT_REGISTERED &&
|
||||
acpi_video_get_backlight_type() != acpi_backlight_video)
|
||||
acpi_video_unregister_backlight();
|
||||
val == BACKLIGHT_REGISTERED)
|
||||
schedule_work(&backlight_notify_work);
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
@ -304,6 +312,8 @@ enum acpi_backlight_type acpi_video_get_backlight_type(void)
|
|||
acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
|
||||
ACPI_UINT32_MAX, find_video, NULL,
|
||||
&video_caps, NULL);
|
||||
INIT_WORK(&backlight_notify_work,
|
||||
acpi_video_backlight_notify_work);
|
||||
backlight_nb.notifier_call = acpi_video_backlight_notify;
|
||||
backlight_nb.priority = 0;
|
||||
if (backlight_register_notifier(&backlight_nb) == 0)
|
||||
|
|
|
@ -92,7 +92,7 @@ static inline u32 brcm_sata_readreg(void __iomem *addr)
|
|||
* Other architectures (e.g., ARM) either do not support big endian, or
|
||||
* else leave I/O in little endian mode.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
|
||||
if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
|
||||
return __raw_readl(addr);
|
||||
else
|
||||
return readl_relaxed(addr);
|
||||
|
@ -101,7 +101,7 @@ static inline u32 brcm_sata_readreg(void __iomem *addr)
|
|||
static inline void brcm_sata_writereg(u32 val, void __iomem *addr)
|
||||
{
|
||||
/* See brcm_sata_readreg() comments */
|
||||
if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
|
||||
if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
|
||||
__raw_writel(val, addr);
|
||||
else
|
||||
writel_relaxed(val, addr);
|
||||
|
@ -209,6 +209,7 @@ static void brcm_sata_init(struct brcm_ahci_priv *priv)
|
|||
priv->top_ctrl + SATA_TOP_CTRL_BUS_CTRL);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int brcm_ahci_suspend(struct device *dev)
|
||||
{
|
||||
struct ata_host *host = dev_get_drvdata(dev);
|
||||
|
@ -231,6 +232,7 @@ static int brcm_ahci_resume(struct device *dev)
|
|||
brcm_sata_phys_enable(priv);
|
||||
return ahci_platform_resume(dev);
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct scsi_host_template ahci_platform_sht = {
|
||||
AHCI_SHT(DRV_NAME),
|
||||
|
|
|
@ -694,11 +694,11 @@ static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
|
|||
* RETURNS:
|
||||
* Block address read from @tf.
|
||||
*/
|
||||
u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
|
||||
u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
|
||||
{
|
||||
u64 block = 0;
|
||||
|
||||
if (!dev || tf->flags & ATA_TFLAG_LBA) {
|
||||
if (tf->flags & ATA_TFLAG_LBA) {
|
||||
if (tf->flags & ATA_TFLAG_LBA48) {
|
||||
block |= (u64)tf->hob_lbah << 40;
|
||||
block |= (u64)tf->hob_lbam << 32;
|
||||
|
@ -2147,24 +2147,6 @@ static int ata_dev_config_ncq(struct ata_device *dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void ata_dev_config_sense_reporting(struct ata_device *dev)
|
||||
{
|
||||
unsigned int err_mask;
|
||||
|
||||
if (!ata_id_has_sense_reporting(dev->id))
|
||||
return;
|
||||
|
||||
if (ata_id_sense_reporting_enabled(dev->id))
|
||||
return;
|
||||
|
||||
err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
|
||||
if (err_mask) {
|
||||
ata_dev_dbg(dev,
|
||||
"failed to enable Sense Data Reporting, Emask 0x%x\n",
|
||||
err_mask);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_dev_configure - Configure the specified ATA/ATAPI device
|
||||
* @dev: Target device to configure
|
||||
|
@ -2387,7 +2369,7 @@ int ata_dev_configure(struct ata_device *dev)
|
|||
dev->devslp_timing[i] = sata_setting[j];
|
||||
}
|
||||
}
|
||||
ata_dev_config_sense_reporting(dev);
|
||||
|
||||
dev->cdb_len = 16;
|
||||
}
|
||||
|
||||
|
|
|
@ -1592,8 +1592,6 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
|
|||
tf->hob_lbah = buf[10];
|
||||
tf->nsect = buf[12];
|
||||
tf->hob_nsect = buf[13];
|
||||
if (ata_id_has_ncq_autosense(dev->id))
|
||||
tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1629,70 +1627,6 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
|
|||
return err_mask;
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
|
||||
* @dev: device to perform REQUEST_SENSE_SENSE_DATA_EXT to
|
||||
* @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
|
||||
* @dfl_sense_key: default sense key to use
|
||||
*
|
||||
* Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
|
||||
* SENSE. This function is EH helper.
|
||||
*
|
||||
* LOCKING:
|
||||
* Kernel thread context (may sleep).
|
||||
*
|
||||
* RETURNS:
|
||||
* encoded sense data on success, 0 on failure or if sense data
|
||||
* is not available.
|
||||
*/
|
||||
static u32 ata_eh_request_sense(struct ata_queued_cmd *qc,
|
||||
struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct ata_device *dev = qc->dev;
|
||||
struct ata_taskfile tf;
|
||||
unsigned int err_mask;
|
||||
|
||||
if (!cmd)
|
||||
return 0;
|
||||
|
||||
DPRINTK("ATA request sense\n");
|
||||
ata_dev_warn(dev, "request sense\n");
|
||||
if (!ata_id_sense_reporting_enabled(dev->id)) {
|
||||
ata_dev_warn(qc->dev, "sense data reporting disabled\n");
|
||||
return 0;
|
||||
}
|
||||
ata_tf_init(dev, &tf);
|
||||
|
||||
tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
|
||||
tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
|
||||
tf.command = ATA_CMD_REQ_SENSE_DATA;
|
||||
tf.protocol = ATA_PROT_NODATA;
|
||||
|
||||
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
|
||||
/*
|
||||
* ACS-4 states:
|
||||
* The device may set the SENSE DATA AVAILABLE bit to one in the
|
||||
* STATUS field and clear the ERROR bit to zero in the STATUS field
|
||||
* to indicate that the command returned completion without an error
|
||||
* and the sense data described in table 306 is available.
|
||||
*
|
||||
* IOW the 'ATA_SENSE' bit might not be set even though valid
|
||||
* sense data is available.
|
||||
* So check for both.
|
||||
*/
|
||||
if ((tf.command & ATA_SENSE) ||
|
||||
tf.lbah != 0 || tf.lbam != 0 || tf.lbal != 0) {
|
||||
ata_scsi_set_sense(cmd, tf.lbah, tf.lbam, tf.lbal);
|
||||
qc->flags |= ATA_QCFLAG_SENSE_VALID;
|
||||
ata_dev_warn(dev, "sense data %02x/%02x/%02x\n",
|
||||
tf.lbah, tf.lbam, tf.lbal);
|
||||
} else {
|
||||
ata_dev_warn(dev, "request sense failed stat %02x emask %x\n",
|
||||
tf.command, err_mask);
|
||||
}
|
||||
return err_mask;
|
||||
}
|
||||
|
||||
/**
|
||||
* atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
|
||||
* @dev: device to perform REQUEST_SENSE to
|
||||
|
@ -1855,19 +1789,6 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
|
|||
memcpy(&qc->result_tf, &tf, sizeof(tf));
|
||||
qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
|
||||
qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
|
||||
if (qc->result_tf.auxiliary) {
|
||||
char sense_key, asc, ascq;
|
||||
|
||||
sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
|
||||
asc = (qc->result_tf.auxiliary >> 8) & 0xff;
|
||||
ascq = qc->result_tf.auxiliary & 0xff;
|
||||
ata_dev_dbg(dev, "NCQ Autosense %02x/%02x/%02x\n",
|
||||
sense_key, asc, ascq);
|
||||
ata_scsi_set_sense(qc->scsicmd, sense_key, asc, ascq);
|
||||
ata_scsi_set_sense_information(qc->scsicmd, &qc->result_tf);
|
||||
qc->flags |= ATA_QCFLAG_SENSE_VALID;
|
||||
}
|
||||
|
||||
ehc->i.err_mask &= ~AC_ERR_DEV;
|
||||
}
|
||||
|
||||
|
@ -1897,27 +1818,6 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
|
|||
return ATA_EH_RESET;
|
||||
}
|
||||
|
||||
/*
|
||||
* Sense data reporting does not work if the
|
||||
* device fault bit is set.
|
||||
*/
|
||||
if ((stat & ATA_SENSE) && !(stat & ATA_DF) &&
|
||||
!(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
|
||||
if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
|
||||
tmp = ata_eh_request_sense(qc, qc->scsicmd);
|
||||
if (tmp)
|
||||
qc->err_mask |= tmp;
|
||||
else
|
||||
ata_scsi_set_sense_information(qc->scsicmd, tf);
|
||||
} else {
|
||||
ata_dev_warn(qc->dev, "sense data available but port frozen\n");
|
||||
}
|
||||
}
|
||||
|
||||
/* Set by NCQ autosense or request sense above */
|
||||
if (qc->flags & ATA_QCFLAG_SENSE_VALID)
|
||||
return 0;
|
||||
|
||||
if (stat & (ATA_ERR | ATA_DF))
|
||||
qc->err_mask |= AC_ERR_DEV;
|
||||
else
|
||||
|
@ -2661,15 +2561,14 @@ static void ata_eh_link_report(struct ata_link *link)
|
|||
|
||||
#ifdef CONFIG_ATA_VERBOSE_ERROR
|
||||
if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
|
||||
ATA_SENSE | ATA_ERR)) {
|
||||
ATA_ERR)) {
|
||||
if (res->command & ATA_BUSY)
|
||||
ata_dev_err(qc->dev, "status: { Busy }\n");
|
||||
else
|
||||
ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n",
|
||||
ata_dev_err(qc->dev, "status: { %s%s%s%s}\n",
|
||||
res->command & ATA_DRDY ? "DRDY " : "",
|
||||
res->command & ATA_DF ? "DF " : "",
|
||||
res->command & ATA_DRQ ? "DRQ " : "",
|
||||
res->command & ATA_SENSE ? "SENSE " : "",
|
||||
res->command & ATA_ERR ? "ERR " : "");
|
||||
}
|
||||
|
||||
|
|
|
@ -270,28 +270,13 @@ DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
|
|||
ata_scsi_park_show, ata_scsi_park_store);
|
||||
EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
|
||||
|
||||
void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
|
||||
static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
|
||||
{
|
||||
if (!cmd)
|
||||
return;
|
||||
|
||||
cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
|
||||
|
||||
scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq);
|
||||
}
|
||||
|
||||
void ata_scsi_set_sense_information(struct scsi_cmnd *cmd,
|
||||
const struct ata_taskfile *tf)
|
||||
{
|
||||
u64 information;
|
||||
|
||||
if (!cmd)
|
||||
return;
|
||||
|
||||
information = ata_tf_read_block(tf, NULL);
|
||||
scsi_set_sense_information(cmd->sense_buffer, information);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
|
@ -1792,9 +1777,7 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
|
|||
((cdb[2] & 0x20) || need_sense)) {
|
||||
ata_gen_passthru_sense(qc);
|
||||
} else {
|
||||
if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
|
||||
cmd->result = SAM_STAT_CHECK_CONDITION;
|
||||
} else if (!need_sense) {
|
||||
if (!need_sense) {
|
||||
cmd->result = SAM_STAT_GOOD;
|
||||
} else {
|
||||
/* TODO: decide which descriptor format to use
|
||||
|
|
|
@ -67,8 +67,7 @@ extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag);
|
|||
extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
|
||||
u64 block, u32 n_block, unsigned int tf_flags,
|
||||
unsigned int tag);
|
||||
extern u64 ata_tf_read_block(const struct ata_taskfile *tf,
|
||||
struct ata_device *dev);
|
||||
extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev);
|
||||
extern unsigned ata_exec_internal(struct ata_device *dev,
|
||||
struct ata_taskfile *tf, const u8 *cdb,
|
||||
int dma_dir, void *buf, unsigned int buflen,
|
||||
|
@ -138,9 +137,6 @@ extern int ata_scsi_add_hosts(struct ata_host *host,
|
|||
struct scsi_host_template *sht);
|
||||
extern void ata_scsi_scan_host(struct ata_port *ap, int sync);
|
||||
extern int ata_scsi_offline_dev(struct ata_device *dev);
|
||||
extern void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq);
|
||||
extern void ata_scsi_set_sense_information(struct scsi_cmnd *cmd,
|
||||
const struct ata_taskfile *tf);
|
||||
extern void ata_scsi_media_change_notify(struct ata_device *dev);
|
||||
extern void ata_scsi_hotplug(struct work_struct *work);
|
||||
extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
|
||||
|
|
|
@ -1238,8 +1238,12 @@ static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
|
|||
readl(mmio + PDC_SDRAM_CONTROL);
|
||||
|
||||
/* Turn on for ECC */
|
||||
pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
|
||||
PDC_DIMM_SPD_TYPE, &spd0);
|
||||
if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
|
||||
PDC_DIMM_SPD_TYPE, &spd0)) {
|
||||
pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
|
||||
PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
|
||||
return 1;
|
||||
}
|
||||
if (spd0 == 0x02) {
|
||||
data |= (0x01 << 16);
|
||||
writel(data, mmio + PDC_SDRAM_CONTROL);
|
||||
|
@ -1380,8 +1384,12 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
|
|||
|
||||
/* ECC initiliazation. */
|
||||
|
||||
pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
|
||||
PDC_DIMM_SPD_TYPE, &spd0);
|
||||
if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
|
||||
PDC_DIMM_SPD_TYPE, &spd0)) {
|
||||
pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
|
||||
PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
|
||||
return 1;
|
||||
}
|
||||
if (spd0 == 0x02) {
|
||||
void *buf;
|
||||
VPRINTK("Start ECC initialization\n");
|
||||
|
|
|
@ -462,6 +462,7 @@ void __init mxc_timer_init(unsigned long pbase, int irq, enum imx_gpt_type type)
|
|||
BUG_ON(!imxtm->base);
|
||||
|
||||
imxtm->type = type;
|
||||
imxtm->irq = irq;
|
||||
|
||||
_mxc_timer_init(imxtm);
|
||||
}
|
||||
|
|
|
@ -180,7 +180,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
|
|||
ret = exynos5250_cpufreq_init(exynos_info);
|
||||
} else {
|
||||
pr_err("%s: Unknown SoC type\n", __func__);
|
||||
return -ENODEV;
|
||||
ret = -ENODEV;
|
||||
}
|
||||
|
||||
if (ret)
|
||||
|
@ -188,12 +188,14 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
|
|||
|
||||
if (exynos_info->set_freq == NULL) {
|
||||
dev_err(&pdev->dev, "No set_freq function (ERR)\n");
|
||||
ret = -EINVAL;
|
||||
goto err_vdd_arm;
|
||||
}
|
||||
|
||||
arm_regulator = regulator_get(NULL, "vdd_arm");
|
||||
if (IS_ERR(arm_regulator)) {
|
||||
dev_err(&pdev->dev, "failed to get resource vdd_arm\n");
|
||||
ret = -EINVAL;
|
||||
goto err_vdd_arm;
|
||||
}
|
||||
|
||||
|
@ -225,7 +227,7 @@ err_cpufreq_reg:
|
|||
regulator_put(arm_regulator);
|
||||
err_vdd_arm:
|
||||
kfree(exynos_info);
|
||||
return -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct platform_driver exynos_cpufreq_platdrv = {
|
||||
|
|
|
@ -909,13 +909,14 @@ static int ahash_final_ctx(struct ahash_request *req)
|
|||
state->buflen_1;
|
||||
u32 *sh_desc = ctx->sh_desc_fin, *desc;
|
||||
dma_addr_t ptr = ctx->sh_desc_fin_dma;
|
||||
int sec4_sg_bytes;
|
||||
int sec4_sg_bytes, sec4_sg_src_index;
|
||||
int digestsize = crypto_ahash_digestsize(ahash);
|
||||
struct ahash_edesc *edesc;
|
||||
int ret = 0;
|
||||
int sh_len;
|
||||
|
||||
sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
|
||||
sec4_sg_src_index = 1 + (buflen ? 1 : 0);
|
||||
sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
|
||||
|
||||
/* allocate space for base edesc and hw desc commands, link tables */
|
||||
edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
|
||||
|
@ -942,7 +943,7 @@ static int ahash_final_ctx(struct ahash_request *req)
|
|||
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
|
||||
buf, state->buf_dma, buflen,
|
||||
last_buflen);
|
||||
(edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
|
||||
(edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN;
|
||||
|
||||
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
|
||||
sec4_sg_bytes, DMA_TO_DEVICE);
|
||||
|
|
|
@ -71,7 +71,6 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
|
|||
struct sha256_state *sctx = shash_desc_ctx(desc);
|
||||
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
|
||||
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
|
||||
struct nx_sg *in_sg;
|
||||
struct nx_sg *out_sg;
|
||||
u64 to_process = 0, leftover, total;
|
||||
unsigned long irq_flags;
|
||||
|
@ -97,7 +96,6 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
|
|||
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
|
||||
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
|
||||
|
||||
in_sg = nx_ctx->in_sg;
|
||||
max_sg_len = min_t(u64, nx_ctx->ap->sglen,
|
||||
nx_driver.of.max_sg_len/sizeof(struct nx_sg));
|
||||
max_sg_len = min_t(u64, max_sg_len,
|
||||
|
@ -114,17 +112,12 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
|
|||
}
|
||||
|
||||
do {
|
||||
/*
|
||||
* to_process: the SHA256_BLOCK_SIZE data chunk to process in
|
||||
* this update. This value is also restricted by the sg list
|
||||
* limits.
|
||||
*/
|
||||
to_process = total - to_process;
|
||||
to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
|
||||
int used_sgs = 0;
|
||||
struct nx_sg *in_sg = nx_ctx->in_sg;
|
||||
|
||||
if (buf_len) {
|
||||
data_len = buf_len;
|
||||
in_sg = nx_build_sg_list(nx_ctx->in_sg,
|
||||
in_sg = nx_build_sg_list(in_sg,
|
||||
(u8 *) sctx->buf,
|
||||
&data_len,
|
||||
max_sg_len);
|
||||
|
@ -133,15 +126,27 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
|
|||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
used_sgs = in_sg - nx_ctx->in_sg;
|
||||
}
|
||||
|
||||
/* to_process: SHA256_BLOCK_SIZE aligned chunk to be
|
||||
* processed in this iteration. This value is restricted
|
||||
* by sg list limits and number of sgs we already used
|
||||
* for leftover data. (see above)
|
||||
* In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
|
||||
* but because data may not be aligned, we need to account
|
||||
* for that too. */
|
||||
to_process = min_t(u64, total,
|
||||
(max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
|
||||
to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
|
||||
|
||||
data_len = to_process - buf_len;
|
||||
in_sg = nx_build_sg_list(in_sg, (u8 *) data,
|
||||
&data_len, max_sg_len);
|
||||
|
||||
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
|
||||
|
||||
to_process = (data_len + buf_len);
|
||||
to_process = data_len + buf_len;
|
||||
leftover = total - to_process;
|
||||
|
||||
/*
|
||||
|
|
|
@ -71,7 +71,6 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
|
|||
struct sha512_state *sctx = shash_desc_ctx(desc);
|
||||
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
|
||||
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
|
||||
struct nx_sg *in_sg;
|
||||
struct nx_sg *out_sg;
|
||||
u64 to_process, leftover = 0, total;
|
||||
unsigned long irq_flags;
|
||||
|
@ -97,7 +96,6 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
|
|||
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
|
||||
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
|
||||
|
||||
in_sg = nx_ctx->in_sg;
|
||||
max_sg_len = min_t(u64, nx_ctx->ap->sglen,
|
||||
nx_driver.of.max_sg_len/sizeof(struct nx_sg));
|
||||
max_sg_len = min_t(u64, max_sg_len,
|
||||
|
@ -114,18 +112,12 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
|
|||
}
|
||||
|
||||
do {
|
||||
/*
|
||||
* to_process: the SHA512_BLOCK_SIZE data chunk to process in
|
||||
* this update. This value is also restricted by the sg list
|
||||
* limits.
|
||||
*/
|
||||
to_process = total - leftover;
|
||||
to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
|
||||
leftover = total - to_process;
|
||||
int used_sgs = 0;
|
||||
struct nx_sg *in_sg = nx_ctx->in_sg;
|
||||
|
||||
if (buf_len) {
|
||||
data_len = buf_len;
|
||||
in_sg = nx_build_sg_list(nx_ctx->in_sg,
|
||||
in_sg = nx_build_sg_list(in_sg,
|
||||
(u8 *) sctx->buf,
|
||||
&data_len, max_sg_len);
|
||||
|
||||
|
@ -133,8 +125,20 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
|
|||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
used_sgs = in_sg - nx_ctx->in_sg;
|
||||
}
|
||||
|
||||
/* to_process: SHA512_BLOCK_SIZE aligned chunk to be
|
||||
* processed in this iteration. This value is restricted
|
||||
* by sg list limits and number of sgs we already used
|
||||
* for leftover data. (see above)
|
||||
* In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
|
||||
* but because data may not be aligned, we need to account
|
||||
* for that too. */
|
||||
to_process = min_t(u64, total,
|
||||
(max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
|
||||
to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
|
||||
|
||||
data_len = to_process - buf_len;
|
||||
in_sg = nx_build_sg_list(in_sg, (u8 *) data,
|
||||
&data_len, max_sg_len);
|
||||
|
@ -146,7 +150,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
|
|||
goto out;
|
||||
}
|
||||
|
||||
to_process = (data_len + buf_len);
|
||||
to_process = data_len + buf_len;
|
||||
leftover = total - to_process;
|
||||
|
||||
/*
|
||||
|
|
|
@ -689,6 +689,10 @@ struct dma_chan *dma_request_slave_channel(struct device *dev,
|
|||
struct dma_chan *ch = dma_request_slave_channel_reason(dev, name);
|
||||
if (IS_ERR(ch))
|
||||
return NULL;
|
||||
|
||||
dma_cap_set(DMA_PRIVATE, ch->device->cap_mask);
|
||||
ch->device->privatecnt++;
|
||||
|
||||
return ch;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_request_slave_channel);
|
||||
|
|
|
@ -245,4 +245,4 @@ char *bcm47xx_nvram_get_contents(size_t *nvram_size)
|
|||
}
|
||||
EXPORT_SYMBOL(bcm47xx_nvram_get_contents);
|
||||
|
||||
MODULE_LICENSE("GPLv2");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
|
|
@ -773,7 +773,7 @@ static int atmel_hlcdc_dc_drm_remove(struct platform_device *pdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int atmel_hlcdc_dc_drm_suspend(struct device *dev)
|
||||
{
|
||||
struct drm_device *drm_dev = dev_get_drvdata(dev);
|
||||
|
|
|
@ -873,9 +873,10 @@ static void drm_dp_destroy_port(struct kref *kref)
|
|||
from an EDID retrieval */
|
||||
if (port->connector) {
|
||||
mutex_lock(&mgr->destroy_connector_lock);
|
||||
list_add(&port->connector->destroy_list, &mgr->destroy_connector_list);
|
||||
list_add(&port->next, &mgr->destroy_connector_list);
|
||||
mutex_unlock(&mgr->destroy_connector_lock);
|
||||
schedule_work(&mgr->destroy_connector_work);
|
||||
return;
|
||||
}
|
||||
drm_dp_port_teardown_pdt(port, port->pdt);
|
||||
|
||||
|
@ -2669,7 +2670,7 @@ static void drm_dp_tx_work(struct work_struct *work)
|
|||
static void drm_dp_destroy_connector_work(struct work_struct *work)
|
||||
{
|
||||
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
|
||||
struct drm_connector *connector;
|
||||
struct drm_dp_mst_port *port;
|
||||
|
||||
/*
|
||||
* Not a regular list traverse as we have to drop the destroy
|
||||
|
@ -2678,15 +2679,21 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
|
|||
*/
|
||||
for (;;) {
|
||||
mutex_lock(&mgr->destroy_connector_lock);
|
||||
connector = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_connector, destroy_list);
|
||||
if (!connector) {
|
||||
port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
|
||||
if (!port) {
|
||||
mutex_unlock(&mgr->destroy_connector_lock);
|
||||
break;
|
||||
}
|
||||
list_del(&connector->destroy_list);
|
||||
list_del(&port->next);
|
||||
mutex_unlock(&mgr->destroy_connector_lock);
|
||||
|
||||
mgr->cbs->destroy_connector(mgr, connector);
|
||||
mgr->cbs->destroy_connector(mgr, port->connector);
|
||||
|
||||
drm_dp_port_teardown_pdt(port, port->pdt);
|
||||
|
||||
if (!port->input && port->vcpi.vcpi > 0)
|
||||
drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
|
||||
kfree(port);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1051,34 +1051,15 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
|
|||
const union child_device_config *p_child;
|
||||
union child_device_config *child_dev_ptr;
|
||||
int i, child_device_num, count;
|
||||
u8 expected_size;
|
||||
u16 block_size;
|
||||
u16 block_size;
|
||||
|
||||
p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
|
||||
if (!p_defs) {
|
||||
DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
|
||||
return;
|
||||
}
|
||||
if (bdb->version < 195) {
|
||||
expected_size = 33;
|
||||
} else if (bdb->version == 195) {
|
||||
expected_size = 37;
|
||||
} else if (bdb->version <= 197) {
|
||||
expected_size = 38;
|
||||
} else {
|
||||
expected_size = 38;
|
||||
DRM_DEBUG_DRIVER("Expected child_device_config size for BDB version %u not known; assuming %u\n",
|
||||
expected_size, bdb->version);
|
||||
}
|
||||
|
||||
if (expected_size > sizeof(*p_child)) {
|
||||
DRM_ERROR("child_device_config cannot fit in p_child\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (p_defs->child_dev_size != expected_size) {
|
||||
DRM_ERROR("Size mismatch; child_device_config size=%u (expected %u); bdb->version: %u\n",
|
||||
p_defs->child_dev_size, expected_size, bdb->version);
|
||||
if (p_defs->child_dev_size < sizeof(*p_child)) {
|
||||
DRM_ERROR("General definiton block child device size is too small.\n");
|
||||
return;
|
||||
}
|
||||
/* get the block size of general definitions */
|
||||
|
@ -1125,7 +1106,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
|
|||
|
||||
child_dev_ptr = dev_priv->vbt.child_dev + count;
|
||||
count++;
|
||||
memcpy(child_dev_ptr, p_child, p_defs->child_dev_size);
|
||||
memcpy(child_dev_ptr, p_child, sizeof(*p_child));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -95,9 +95,6 @@ static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
|
|||
324000, 432000, 540000 };
|
||||
static const int skl_rates[] = { 162000, 216000, 270000,
|
||||
324000, 432000, 540000 };
|
||||
static const int chv_rates[] = { 162000, 202500, 210000, 216000,
|
||||
243000, 270000, 324000, 405000,
|
||||
420000, 432000, 540000 };
|
||||
static const int default_rates[] = { 162000, 270000, 540000 };
|
||||
|
||||
/**
|
||||
|
@ -1210,6 +1207,19 @@ intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
|
|||
return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
|
||||
}
|
||||
|
||||
static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
|
||||
{
|
||||
/* WaDisableHBR2:skl */
|
||||
if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
|
||||
return false;
|
||||
|
||||
if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
|
||||
(INTEL_INFO(dev)->gen >= 9))
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
static int
|
||||
intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
|
||||
{
|
||||
|
@ -1219,18 +1229,12 @@ intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
|
|||
} else if (IS_SKYLAKE(dev)) {
|
||||
*source_rates = skl_rates;
|
||||
return ARRAY_SIZE(skl_rates);
|
||||
} else if (IS_CHERRYVIEW(dev)) {
|
||||
*source_rates = chv_rates;
|
||||
return ARRAY_SIZE(chv_rates);
|
||||
}
|
||||
|
||||
*source_rates = default_rates;
|
||||
|
||||
if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
|
||||
/* WaDisableHBR2:skl */
|
||||
return (DP_LINK_BW_2_7 >> 3) + 1;
|
||||
else if (INTEL_INFO(dev)->gen >= 8 ||
|
||||
(IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
|
||||
/* This depends on the fact that 5.4 is last value in the array */
|
||||
if (intel_dp_source_supports_hbr2(dev))
|
||||
return (DP_LINK_BW_5_4 >> 3) + 1;
|
||||
else
|
||||
return (DP_LINK_BW_2_7 >> 3) + 1;
|
||||
|
@ -3902,10 +3906,15 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
|
|||
}
|
||||
}
|
||||
|
||||
/* Training Pattern 3 support, both source and sink */
|
||||
/* Training Pattern 3 support, Intel platforms that support HBR2 alone
|
||||
* have support for TP3 hence that check is used along with dpcd check
|
||||
* to ensure TP3 can be enabled.
|
||||
* SKL < B0: due it's WaDisableHBR2 is the only exception where TP3 is
|
||||
* supported but still not enabled.
|
||||
*/
|
||||
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
|
||||
intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
|
||||
(IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
|
||||
intel_dp_source_supports_hbr2(dev)) {
|
||||
intel_dp->use_tps3 = true;
|
||||
DRM_DEBUG_KMS("Displayport TPS3 supported\n");
|
||||
} else
|
||||
|
|
|
@ -1003,6 +1003,8 @@ static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
|
|||
ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
|
||||
if (ret)
|
||||
goto unpin_ctx_obj;
|
||||
|
||||
ctx_obj->dirty = true;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -79,6 +79,11 @@ static void radeon_hotplug_work_func(struct work_struct *work)
|
|||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
struct drm_connector *connector;
|
||||
|
||||
/* we can race here at startup, some boards seem to trigger
|
||||
* hotplug irqs when they shouldn't. */
|
||||
if (!rdev->mode_info.mode_config_initialized)
|
||||
return;
|
||||
|
||||
mutex_lock(&mode_config->mutex);
|
||||
if (mode_config->num_connector) {
|
||||
list_for_each_entry(connector, &mode_config->connector_list, head)
|
||||
|
|
|
@ -814,7 +814,7 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
|
|||
printk(KERN_ERR MOD
|
||||
"Unexpected cqe_status 0x%x for QPID=0x%0x\n",
|
||||
CQE_STATUS(&cqe), CQE_QPID(&cqe));
|
||||
ret = -EINVAL;
|
||||
wc->status = IB_WC_FATAL_ERR;
|
||||
}
|
||||
}
|
||||
out:
|
||||
|
|
|
@ -246,7 +246,7 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
|
|||
* convert it to descriptor.
|
||||
*/
|
||||
if (!button->gpiod && gpio_is_valid(button->gpio)) {
|
||||
unsigned flags = 0;
|
||||
unsigned flags = GPIOF_IN;
|
||||
|
||||
if (button->active_low)
|
||||
flags |= GPIOF_ACTIVE_LOW;
|
||||
|
|
|
@ -68,7 +68,9 @@ static struct irq_chip crossbar_chip = {
|
|||
.irq_mask = irq_chip_mask_parent,
|
||||
.irq_unmask = irq_chip_unmask_parent,
|
||||
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
||||
.irq_set_wake = irq_chip_set_wake_parent,
|
||||
.irq_set_type = irq_chip_set_type_parent,
|
||||
.flags = IRQCHIP_MASK_ON_SUSPEND |
|
||||
IRQCHIP_SKIP_SET_WAKE,
|
||||
#ifdef CONFIG_SMP
|
||||
.irq_set_affinity = irq_chip_set_affinity_parent,
|
||||
#endif
|
||||
|
|
|
@ -240,7 +240,7 @@ config DVB_SI21XX
|
|||
|
||||
config DVB_TS2020
|
||||
tristate "Montage Tehnology TS2020 based tuners"
|
||||
depends on DVB_CORE
|
||||
depends on DVB_CORE && I2C
|
||||
select REGMAP_I2C
|
||||
default m if !MEDIA_SUBDRV_AUTOSELECT
|
||||
help
|
||||
|
|
|
@ -2,6 +2,7 @@ config VIDEO_COBALT
|
|||
tristate "Cisco Cobalt support"
|
||||
depends on VIDEO_V4L2 && I2C && MEDIA_CONTROLLER
|
||||
depends on PCI_MSI && MTD_COMPLEX_MAPPINGS && GPIOLIB
|
||||
depends on SND
|
||||
select I2C_ALGOBIT
|
||||
select VIDEO_ADV7604
|
||||
select VIDEO_ADV7511
|
||||
|
|
|
@ -139,7 +139,7 @@ done:
|
|||
also know about dropped frames. */
|
||||
cb->vb.v4l2_buf.sequence = s->sequence++;
|
||||
vb2_buffer_done(&cb->vb, (skip || s->unstable_frame) ?
|
||||
VB2_BUF_STATE_QUEUED : VB2_BUF_STATE_DONE);
|
||||
VB2_BUF_STATE_REQUEUEING : VB2_BUF_STATE_DONE);
|
||||
}
|
||||
|
||||
irqreturn_t cobalt_irq_handler(int irq, void *dev_id)
|
||||
|
|
|
@ -130,10 +130,11 @@ err:
|
|||
|
||||
int mantis_dma_init(struct mantis_pci *mantis)
|
||||
{
|
||||
int err = 0;
|
||||
int err;
|
||||
|
||||
dprintk(MANTIS_DEBUG, 1, "Mantis DMA init");
|
||||
if (mantis_alloc_buffers(mantis) < 0) {
|
||||
err = mantis_alloc_buffers(mantis);
|
||||
if (err < 0) {
|
||||
dprintk(MANTIS_ERROR, 1, "Error allocating DMA buffer");
|
||||
|
||||
/* Stop RISC Engine */
|
||||
|
|
|
@ -184,125 +184,9 @@ out:
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
static struct ir_raw_timings_manchester ir_rc5_timings = {
|
||||
.leader = RC5_UNIT,
|
||||
.pulse_space_start = 0,
|
||||
.clock = RC5_UNIT,
|
||||
.trailer_space = RC5_UNIT * 10,
|
||||
};
|
||||
|
||||
static struct ir_raw_timings_manchester ir_rc5x_timings[2] = {
|
||||
{
|
||||
.leader = RC5_UNIT,
|
||||
.pulse_space_start = 0,
|
||||
.clock = RC5_UNIT,
|
||||
.trailer_space = RC5X_SPACE,
|
||||
},
|
||||
{
|
||||
.clock = RC5_UNIT,
|
||||
.trailer_space = RC5_UNIT * 10,
|
||||
},
|
||||
};
|
||||
|
||||
static struct ir_raw_timings_manchester ir_rc5_sz_timings = {
|
||||
.leader = RC5_UNIT,
|
||||
.pulse_space_start = 0,
|
||||
.clock = RC5_UNIT,
|
||||
.trailer_space = RC5_UNIT * 10,
|
||||
};
|
||||
|
||||
static int ir_rc5_validate_filter(const struct rc_scancode_filter *scancode,
|
||||
unsigned int important_bits)
|
||||
{
|
||||
/* all important bits of scancode should be set in mask */
|
||||
if (~scancode->mask & important_bits)
|
||||
return -EINVAL;
|
||||
/* extra bits in mask should be zero in data */
|
||||
if (scancode->mask & scancode->data & ~important_bits)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ir_rc5_encode() - Encode a scancode as a stream of raw events
|
||||
*
|
||||
* @protocols: allowed protocols
|
||||
* @scancode: scancode filter describing scancode (helps distinguish between
|
||||
* protocol subtypes when scancode is ambiguous)
|
||||
* @events: array of raw ir events to write into
|
||||
* @max: maximum size of @events
|
||||
*
|
||||
* Returns: The number of events written.
|
||||
* -ENOBUFS if there isn't enough space in the array to fit the
|
||||
* encoding. In this case all @max events will have been written.
|
||||
* -EINVAL if the scancode is ambiguous or invalid.
|
||||
*/
|
||||
static int ir_rc5_encode(u64 protocols,
|
||||
const struct rc_scancode_filter *scancode,
|
||||
struct ir_raw_event *events, unsigned int max)
|
||||
{
|
||||
int ret;
|
||||
struct ir_raw_event *e = events;
|
||||
unsigned int data, xdata, command, commandx, system;
|
||||
|
||||
/* Detect protocol and convert scancode to raw data */
|
||||
if (protocols & RC_BIT_RC5 &&
|
||||
!ir_rc5_validate_filter(scancode, 0x1f7f)) {
|
||||
/* decode scancode */
|
||||
command = (scancode->data & 0x003f) >> 0;
|
||||
commandx = (scancode->data & 0x0040) >> 6;
|
||||
system = (scancode->data & 0x1f00) >> 8;
|
||||
/* encode data */
|
||||
data = !commandx << 12 | system << 6 | command;
|
||||
|
||||
/* Modulate the data */
|
||||
ret = ir_raw_gen_manchester(&e, max, &ir_rc5_timings, RC5_NBITS,
|
||||
data);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
} else if (protocols & RC_BIT_RC5X &&
|
||||
!ir_rc5_validate_filter(scancode, 0x1f7f3f)) {
|
||||
/* decode scancode */
|
||||
xdata = (scancode->data & 0x00003f) >> 0;
|
||||
command = (scancode->data & 0x003f00) >> 8;
|
||||
commandx = (scancode->data & 0x004000) >> 14;
|
||||
system = (scancode->data & 0x1f0000) >> 16;
|
||||
/* commandx and system overlap, bits must match when encoded */
|
||||
if (commandx == (system & 0x1))
|
||||
return -EINVAL;
|
||||
/* encode data */
|
||||
data = 1 << 18 | system << 12 | command << 6 | xdata;
|
||||
|
||||
/* Modulate the data */
|
||||
ret = ir_raw_gen_manchester(&e, max, &ir_rc5x_timings[0],
|
||||
CHECK_RC5X_NBITS,
|
||||
data >> (RC5X_NBITS-CHECK_RC5X_NBITS));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ret = ir_raw_gen_manchester(&e, max - (e - events),
|
||||
&ir_rc5x_timings[1],
|
||||
RC5X_NBITS - CHECK_RC5X_NBITS,
|
||||
data);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
} else if (protocols & RC_BIT_RC5_SZ &&
|
||||
!ir_rc5_validate_filter(scancode, 0x2fff)) {
|
||||
/* RC5-SZ scancode is raw enough for Manchester as it is */
|
||||
ret = ir_raw_gen_manchester(&e, max, &ir_rc5_sz_timings,
|
||||
RC5_SZ_NBITS, scancode->data & 0x2fff);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return e - events;
|
||||
}
|
||||
|
||||
static struct ir_raw_handler rc5_handler = {
|
||||
.protocols = RC_BIT_RC5 | RC_BIT_RC5X | RC_BIT_RC5_SZ,
|
||||
.decode = ir_rc5_decode,
|
||||
.encode = ir_rc5_encode,
|
||||
};
|
||||
|
||||
static int __init ir_rc5_decode_init(void)
|
||||
|
|
|
@ -291,133 +291,11 @@ out:
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
static struct ir_raw_timings_manchester ir_rc6_timings[4] = {
|
||||
{
|
||||
.leader = RC6_PREFIX_PULSE,
|
||||
.pulse_space_start = 0,
|
||||
.clock = RC6_UNIT,
|
||||
.invert = 1,
|
||||
.trailer_space = RC6_PREFIX_SPACE,
|
||||
},
|
||||
{
|
||||
.clock = RC6_UNIT,
|
||||
.invert = 1,
|
||||
},
|
||||
{
|
||||
.clock = RC6_UNIT * 2,
|
||||
.invert = 1,
|
||||
},
|
||||
{
|
||||
.clock = RC6_UNIT,
|
||||
.invert = 1,
|
||||
.trailer_space = RC6_SUFFIX_SPACE,
|
||||
},
|
||||
};
|
||||
|
||||
static int ir_rc6_validate_filter(const struct rc_scancode_filter *scancode,
|
||||
unsigned int important_bits)
|
||||
{
|
||||
/* all important bits of scancode should be set in mask */
|
||||
if (~scancode->mask & important_bits)
|
||||
return -EINVAL;
|
||||
/* extra bits in mask should be zero in data */
|
||||
if (scancode->mask & scancode->data & ~important_bits)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ir_rc6_encode() - Encode a scancode as a stream of raw events
|
||||
*
|
||||
* @protocols: allowed protocols
|
||||
* @scancode: scancode filter describing scancode (helps distinguish between
|
||||
* protocol subtypes when scancode is ambiguous)
|
||||
* @events: array of raw ir events to write into
|
||||
* @max: maximum size of @events
|
||||
*
|
||||
* Returns: The number of events written.
|
||||
* -ENOBUFS if there isn't enough space in the array to fit the
|
||||
* encoding. In this case all @max events will have been written.
|
||||
* -EINVAL if the scancode is ambiguous or invalid.
|
||||
*/
|
||||
static int ir_rc6_encode(u64 protocols,
|
||||
const struct rc_scancode_filter *scancode,
|
||||
struct ir_raw_event *events, unsigned int max)
|
||||
{
|
||||
int ret;
|
||||
struct ir_raw_event *e = events;
|
||||
|
||||
if (protocols & RC_BIT_RC6_0 &&
|
||||
!ir_rc6_validate_filter(scancode, 0xffff)) {
|
||||
|
||||
/* Modulate the preamble */
|
||||
ret = ir_raw_gen_manchester(&e, max, &ir_rc6_timings[0], 0, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* Modulate the header (Start Bit & Mode-0) */
|
||||
ret = ir_raw_gen_manchester(&e, max - (e - events),
|
||||
&ir_rc6_timings[1],
|
||||
RC6_HEADER_NBITS, (1 << 3));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* Modulate Trailer Bit */
|
||||
ret = ir_raw_gen_manchester(&e, max - (e - events),
|
||||
&ir_rc6_timings[2], 1, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* Modulate rest of the data */
|
||||
ret = ir_raw_gen_manchester(&e, max - (e - events),
|
||||
&ir_rc6_timings[3], RC6_0_NBITS,
|
||||
scancode->data);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
} else if (protocols & (RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 |
|
||||
RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE) &&
|
||||
!ir_rc6_validate_filter(scancode, 0x8fffffff)) {
|
||||
|
||||
/* Modulate the preamble */
|
||||
ret = ir_raw_gen_manchester(&e, max, &ir_rc6_timings[0], 0, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* Modulate the header (Start Bit & Header-version 6 */
|
||||
ret = ir_raw_gen_manchester(&e, max - (e - events),
|
||||
&ir_rc6_timings[1],
|
||||
RC6_HEADER_NBITS, (1 << 3 | 6));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* Modulate Trailer Bit */
|
||||
ret = ir_raw_gen_manchester(&e, max - (e - events),
|
||||
&ir_rc6_timings[2], 1, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* Modulate rest of the data */
|
||||
ret = ir_raw_gen_manchester(&e, max - (e - events),
|
||||
&ir_rc6_timings[3],
|
||||
fls(scancode->mask),
|
||||
scancode->data);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return e - events;
|
||||
}
|
||||
|
||||
static struct ir_raw_handler rc6_handler = {
|
||||
.protocols = RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 |
|
||||
RC_BIT_RC6_6A_24 | RC_BIT_RC6_6A_32 |
|
||||
RC_BIT_RC6_MCE,
|
||||
.decode = ir_rc6_decode,
|
||||
.encode = ir_rc6_encode,
|
||||
};
|
||||
|
||||
static int __init ir_rc6_decode_init(void)
|
||||
|
|
|
@ -526,130 +526,6 @@ static int nvt_set_tx_carrier(struct rc_dev *dev, u32 carrier)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int nvt_write_wakeup_codes(struct rc_dev *dev,
|
||||
const u8 *wakeup_sample_buf, int count)
|
||||
{
|
||||
int i = 0;
|
||||
u8 reg, reg_learn_mode;
|
||||
unsigned long flags;
|
||||
struct nvt_dev *nvt = dev->priv;
|
||||
|
||||
nvt_dbg_wake("writing wakeup samples");
|
||||
|
||||
reg = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON);
|
||||
reg_learn_mode = reg & ~CIR_WAKE_IRCON_MODE0;
|
||||
reg_learn_mode |= CIR_WAKE_IRCON_MODE1;
|
||||
|
||||
/* Lock the learn area to prevent racing with wake-isr */
|
||||
spin_lock_irqsave(&nvt->nvt_lock, flags);
|
||||
|
||||
/* Enable fifo writes */
|
||||
nvt_cir_wake_reg_write(nvt, reg_learn_mode, CIR_WAKE_IRCON);
|
||||
|
||||
/* Clear cir wake rx fifo */
|
||||
nvt_clear_cir_wake_fifo(nvt);
|
||||
|
||||
if (count > WAKE_FIFO_LEN) {
|
||||
nvt_dbg_wake("HW FIFO too small for all wake samples");
|
||||
count = WAKE_FIFO_LEN;
|
||||
}
|
||||
|
||||
if (count)
|
||||
pr_info("Wake samples (%d) =", count);
|
||||
else
|
||||
pr_info("Wake sample fifo cleared");
|
||||
|
||||
/* Write wake samples to fifo */
|
||||
for (i = 0; i < count; i++) {
|
||||
pr_cont(" %02x", wakeup_sample_buf[i]);
|
||||
nvt_cir_wake_reg_write(nvt, wakeup_sample_buf[i],
|
||||
CIR_WAKE_WR_FIFO_DATA);
|
||||
}
|
||||
pr_cont("\n");
|
||||
|
||||
/* Switch cir to wakeup mode and disable fifo writing */
|
||||
nvt_cir_wake_reg_write(nvt, reg, CIR_WAKE_IRCON);
|
||||
|
||||
/* Set number of bytes needed for wake */
|
||||
nvt_cir_wake_reg_write(nvt, count ? count :
|
||||
CIR_WAKE_FIFO_CMP_BYTES,
|
||||
CIR_WAKE_FIFO_CMP_DEEP);
|
||||
|
||||
spin_unlock_irqrestore(&nvt->nvt_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nvt_ir_raw_set_wakeup_filter(struct rc_dev *dev,
|
||||
struct rc_scancode_filter *sc_filter)
|
||||
{
|
||||
u8 *reg_buf;
|
||||
u8 buf_val;
|
||||
int i, ret, count;
|
||||
unsigned int val;
|
||||
struct ir_raw_event *raw;
|
||||
bool complete;
|
||||
|
||||
/* Require both mask and data to be set before actually committing */
|
||||
if (!sc_filter->mask || !sc_filter->data)
|
||||
return 0;
|
||||
|
||||
raw = kmalloc_array(WAKE_FIFO_LEN, sizeof(*raw), GFP_KERNEL);
|
||||
if (!raw)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = ir_raw_encode_scancode(dev->enabled_wakeup_protocols, sc_filter,
|
||||
raw, WAKE_FIFO_LEN);
|
||||
complete = (ret != -ENOBUFS);
|
||||
if (!complete)
|
||||
ret = WAKE_FIFO_LEN;
|
||||
else if (ret < 0)
|
||||
goto out_raw;
|
||||
|
||||
reg_buf = kmalloc_array(WAKE_FIFO_LEN, sizeof(*reg_buf), GFP_KERNEL);
|
||||
if (!reg_buf) {
|
||||
ret = -ENOMEM;
|
||||
goto out_raw;
|
||||
}
|
||||
|
||||
/* Inspect the ir samples */
|
||||
for (i = 0, count = 0; i < ret && count < WAKE_FIFO_LEN; ++i) {
|
||||
val = NS_TO_US((raw[i]).duration) / SAMPLE_PERIOD;
|
||||
|
||||
/* Split too large values into several smaller ones */
|
||||
while (val > 0 && count < WAKE_FIFO_LEN) {
|
||||
|
||||
/* Skip last value for better comparison tolerance */
|
||||
if (complete && i == ret - 1 && val < BUF_LEN_MASK)
|
||||
break;
|
||||
|
||||
/* Clamp values to BUF_LEN_MASK at most */
|
||||
buf_val = (val > BUF_LEN_MASK) ? BUF_LEN_MASK : val;
|
||||
|
||||
reg_buf[count] = buf_val;
|
||||
val -= buf_val;
|
||||
if ((raw[i]).pulse)
|
||||
reg_buf[count] |= BUF_PULSE_BIT;
|
||||
count++;
|
||||
}
|
||||
}
|
||||
|
||||
ret = nvt_write_wakeup_codes(dev, reg_buf, count);
|
||||
|
||||
kfree(reg_buf);
|
||||
out_raw:
|
||||
kfree(raw);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Dummy implementation. nuvoton is agnostic to the protocol used */
|
||||
static int nvt_ir_raw_change_wakeup_protocol(struct rc_dev *dev,
|
||||
u64 *rc_type)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* nvt_tx_ir
|
||||
*
|
||||
|
@ -1167,14 +1043,11 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
|
|||
/* Set up the rc device */
|
||||
rdev->priv = nvt;
|
||||
rdev->driver_type = RC_DRIVER_IR_RAW;
|
||||
rdev->encode_wakeup = true;
|
||||
rdev->allowed_protocols = RC_BIT_ALL;
|
||||
rdev->open = nvt_open;
|
||||
rdev->close = nvt_close;
|
||||
rdev->tx_ir = nvt_tx_ir;
|
||||
rdev->s_tx_carrier = nvt_set_tx_carrier;
|
||||
rdev->s_wakeup_filter = nvt_ir_raw_set_wakeup_filter;
|
||||
rdev->change_wakeup_protocol = nvt_ir_raw_change_wakeup_protocol;
|
||||
rdev->input_name = "Nuvoton w836x7hg Infrared Remote Transceiver";
|
||||
rdev->input_phys = "nuvoton/cir0";
|
||||
rdev->input_id.bustype = BUS_HOST;
|
||||
|
|
|
@ -63,7 +63,6 @@ static int debug;
|
|||
*/
|
||||
#define TX_BUF_LEN 256
|
||||
#define RX_BUF_LEN 32
|
||||
#define WAKE_FIFO_LEN 67
|
||||
|
||||
struct nvt_dev {
|
||||
struct pnp_dev *pdev;
|
||||
|
|
|
@ -25,8 +25,6 @@ struct ir_raw_handler {
|
|||
|
||||
u64 protocols; /* which are handled by this handler */
|
||||
int (*decode)(struct rc_dev *dev, struct ir_raw_event event);
|
||||
int (*encode)(u64 protocols, const struct rc_scancode_filter *scancode,
|
||||
struct ir_raw_event *events, unsigned int max);
|
||||
|
||||
/* These two should only be used by the lirc decoder */
|
||||
int (*raw_register)(struct rc_dev *dev);
|
||||
|
@ -152,44 +150,10 @@ static inline bool is_timing_event(struct ir_raw_event ev)
|
|||
#define TO_US(duration) DIV_ROUND_CLOSEST((duration), 1000)
|
||||
#define TO_STR(is_pulse) ((is_pulse) ? "pulse" : "space")
|
||||
|
||||
/* functions for IR encoders */
|
||||
|
||||
static inline void init_ir_raw_event_duration(struct ir_raw_event *ev,
|
||||
unsigned int pulse,
|
||||
u32 duration)
|
||||
{
|
||||
init_ir_raw_event(ev);
|
||||
ev->duration = duration;
|
||||
ev->pulse = pulse;
|
||||
}
|
||||
|
||||
/**
|
||||
* struct ir_raw_timings_manchester - Manchester coding timings
|
||||
* @leader: duration of leader pulse (if any) 0 if continuing
|
||||
* existing signal (see @pulse_space_start)
|
||||
* @pulse_space_start: 1 for starting with pulse (0 for starting with space)
|
||||
* @clock: duration of each pulse/space in ns
|
||||
* @invert: if set clock logic is inverted
|
||||
* (0 = space + pulse, 1 = pulse + space)
|
||||
* @trailer_space: duration of trailer space in ns
|
||||
*/
|
||||
struct ir_raw_timings_manchester {
|
||||
unsigned int leader;
|
||||
unsigned int pulse_space_start:1;
|
||||
unsigned int clock;
|
||||
unsigned int invert:1;
|
||||
unsigned int trailer_space;
|
||||
};
|
||||
|
||||
int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
|
||||
const struct ir_raw_timings_manchester *timings,
|
||||
unsigned int n, unsigned int data);
|
||||
|
||||
/*
|
||||
* Routines from rc-raw.c to be used internally and by decoders
|
||||
*/
|
||||
u64 ir_raw_get_allowed_protocols(void);
|
||||
u64 ir_raw_get_encode_protocols(void);
|
||||
int ir_raw_event_register(struct rc_dev *dev);
|
||||
void ir_raw_event_unregister(struct rc_dev *dev);
|
||||
int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler);
|
||||
|
|
|
@ -30,7 +30,6 @@ static LIST_HEAD(ir_raw_client_list);
|
|||
static DEFINE_MUTEX(ir_raw_handler_lock);
|
||||
static LIST_HEAD(ir_raw_handler_list);
|
||||
static u64 available_protocols;
|
||||
static u64 encode_protocols;
|
||||
|
||||
static int ir_raw_event_thread(void *data)
|
||||
{
|
||||
|
@ -241,146 +240,12 @@ ir_raw_get_allowed_protocols(void)
|
|||
return protocols;
|
||||
}
|
||||
|
||||
/* used internally by the sysfs interface */
|
||||
u64
|
||||
ir_raw_get_encode_protocols(void)
|
||||
{
|
||||
u64 protocols;
|
||||
|
||||
mutex_lock(&ir_raw_handler_lock);
|
||||
protocols = encode_protocols;
|
||||
mutex_unlock(&ir_raw_handler_lock);
|
||||
return protocols;
|
||||
}
|
||||
|
||||
static int change_protocol(struct rc_dev *dev, u64 *rc_type)
|
||||
{
|
||||
/* the caller will update dev->enabled_protocols */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ir_raw_gen_manchester() - Encode data with Manchester (bi-phase) modulation.
|
||||
* @ev: Pointer to pointer to next free event. *@ev is incremented for
|
||||
* each raw event filled.
|
||||
* @max: Maximum number of raw events to fill.
|
||||
* @timings: Manchester modulation timings.
|
||||
* @n: Number of bits of data.
|
||||
* @data: Data bits to encode.
|
||||
*
|
||||
* Encodes the @n least significant bits of @data using Manchester (bi-phase)
|
||||
* modulation with the timing characteristics described by @timings, writing up
|
||||
* to @max raw IR events using the *@ev pointer.
|
||||
*
|
||||
* Returns: 0 on success.
|
||||
* -ENOBUFS if there isn't enough space in the array to fit the
|
||||
* full encoded data. In this case all @max events will have been
|
||||
* written.
|
||||
*/
|
||||
int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
|
||||
const struct ir_raw_timings_manchester *timings,
|
||||
unsigned int n, unsigned int data)
|
||||
{
|
||||
bool need_pulse;
|
||||
unsigned int i;
|
||||
int ret = -ENOBUFS;
|
||||
|
||||
i = 1 << (n - 1);
|
||||
|
||||
if (timings->leader) {
|
||||
if (!max--)
|
||||
return ret;
|
||||
if (timings->pulse_space_start) {
|
||||
init_ir_raw_event_duration((*ev)++, 1, timings->leader);
|
||||
|
||||
if (!max--)
|
||||
return ret;
|
||||
init_ir_raw_event_duration((*ev), 0, timings->leader);
|
||||
} else {
|
||||
init_ir_raw_event_duration((*ev), 1, timings->leader);
|
||||
}
|
||||
i >>= 1;
|
||||
} else {
|
||||
/* continue existing signal */
|
||||
--(*ev);
|
||||
}
|
||||
/* from here on *ev will point to the last event rather than the next */
|
||||
|
||||
while (n && i > 0) {
|
||||
need_pulse = !(data & i);
|
||||
if (timings->invert)
|
||||
need_pulse = !need_pulse;
|
||||
if (need_pulse == !!(*ev)->pulse) {
|
||||
(*ev)->duration += timings->clock;
|
||||
} else {
|
||||
if (!max--)
|
||||
goto nobufs;
|
||||
init_ir_raw_event_duration(++(*ev), need_pulse,
|
||||
timings->clock);
|
||||
}
|
||||
|
||||
if (!max--)
|
||||
goto nobufs;
|
||||
init_ir_raw_event_duration(++(*ev), !need_pulse,
|
||||
timings->clock);
|
||||
i >>= 1;
|
||||
}
|
||||
|
||||
if (timings->trailer_space) {
|
||||
if (!(*ev)->pulse)
|
||||
(*ev)->duration += timings->trailer_space;
|
||||
else if (!max--)
|
||||
goto nobufs;
|
||||
else
|
||||
init_ir_raw_event_duration(++(*ev), 0,
|
||||
timings->trailer_space);
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
nobufs:
|
||||
/* point to the next event rather than last event before returning */
|
||||
++(*ev);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ir_raw_gen_manchester);
|
||||
|
||||
/**
|
||||
* ir_raw_encode_scancode() - Encode a scancode as raw events
|
||||
*
|
||||
* @protocols: permitted protocols
|
||||
* @scancode: scancode filter describing a single scancode
|
||||
* @events: array of raw events to write into
|
||||
* @max: max number of raw events
|
||||
*
|
||||
* Attempts to encode the scancode as raw events.
|
||||
*
|
||||
* Returns: The number of events written.
|
||||
* -ENOBUFS if there isn't enough space in the array to fit the
|
||||
* encoding. In this case all @max events will have been written.
|
||||
* -EINVAL if the scancode is ambiguous or invalid, or if no
|
||||
* compatible encoder was found.
|
||||
*/
|
||||
int ir_raw_encode_scancode(u64 protocols,
|
||||
const struct rc_scancode_filter *scancode,
|
||||
struct ir_raw_event *events, unsigned int max)
|
||||
{
|
||||
struct ir_raw_handler *handler;
|
||||
int ret = -EINVAL;
|
||||
|
||||
mutex_lock(&ir_raw_handler_lock);
|
||||
list_for_each_entry(handler, &ir_raw_handler_list, list) {
|
||||
if (handler->protocols & protocols && handler->encode) {
|
||||
ret = handler->encode(protocols, scancode, events, max);
|
||||
if (ret >= 0 || ret == -ENOBUFS)
|
||||
break;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&ir_raw_handler_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ir_raw_encode_scancode);
|
||||
|
||||
/*
|
||||
* Used to (un)register raw event clients
|
||||
*/
|
||||
|
@ -463,8 +328,6 @@ int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
|
|||
list_for_each_entry(raw, &ir_raw_client_list, list)
|
||||
ir_raw_handler->raw_register(raw->dev);
|
||||
available_protocols |= ir_raw_handler->protocols;
|
||||
if (ir_raw_handler->encode)
|
||||
encode_protocols |= ir_raw_handler->protocols;
|
||||
mutex_unlock(&ir_raw_handler_lock);
|
||||
|
||||
return 0;
|
||||
|
@ -481,8 +344,6 @@ void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
|
|||
list_for_each_entry(raw, &ir_raw_client_list, list)
|
||||
ir_raw_handler->raw_unregister(raw->dev);
|
||||
available_protocols &= ~ir_raw_handler->protocols;
|
||||
if (ir_raw_handler->encode)
|
||||
encode_protocols &= ~ir_raw_handler->protocols;
|
||||
mutex_unlock(&ir_raw_handler_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(ir_raw_handler_unregister);
|
||||
|
|
|
@ -26,7 +26,6 @@
|
|||
#include <linux/device.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <media/rc-core.h>
|
||||
|
||||
#define DRIVER_NAME "rc-loopback"
|
||||
|
@ -177,39 +176,6 @@ static int loop_set_carrier_report(struct rc_dev *dev, int enable)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int loop_set_wakeup_filter(struct rc_dev *dev,
|
||||
struct rc_scancode_filter *sc_filter)
|
||||
{
|
||||
static const unsigned int max = 512;
|
||||
struct ir_raw_event *raw;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
/* fine to disable filter */
|
||||
if (!sc_filter->mask)
|
||||
return 0;
|
||||
|
||||
/* encode the specified filter and loop it back */
|
||||
raw = kmalloc_array(max, sizeof(*raw), GFP_KERNEL);
|
||||
ret = ir_raw_encode_scancode(dev->enabled_wakeup_protocols, sc_filter,
|
||||
raw, max);
|
||||
/* still loop back the partial raw IR even if it's incomplete */
|
||||
if (ret == -ENOBUFS)
|
||||
ret = max;
|
||||
if (ret >= 0) {
|
||||
/* do the loopback */
|
||||
for (i = 0; i < ret; ++i)
|
||||
ir_raw_event_store(dev, &raw[i]);
|
||||
ir_raw_event_handle(dev);
|
||||
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
kfree(raw);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __init loop_init(void)
|
||||
{
|
||||
struct rc_dev *rc;
|
||||
|
@ -229,7 +195,6 @@ static int __init loop_init(void)
|
|||
rc->map_name = RC_MAP_EMPTY;
|
||||
rc->priv = &loopdev;
|
||||
rc->driver_type = RC_DRIVER_IR_RAW;
|
||||
rc->encode_wakeup = true;
|
||||
rc->allowed_protocols = RC_BIT_ALL;
|
||||
rc->timeout = 100 * 1000 * 1000; /* 100 ms */
|
||||
rc->min_timeout = 1;
|
||||
|
@ -244,7 +209,6 @@ static int __init loop_init(void)
|
|||
rc->s_idle = loop_set_idle;
|
||||
rc->s_learning_mode = loop_set_learning_mode;
|
||||
rc->s_carrier_report = loop_set_carrier_report;
|
||||
rc->s_wakeup_filter = loop_set_wakeup_filter;
|
||||
|
||||
loopdev.txmask = RXMASK_REGULAR;
|
||||
loopdev.txcarrier = 36000;
|
||||
|
|
|
@ -865,8 +865,6 @@ static ssize_t show_protocols(struct device *device,
|
|||
} else {
|
||||
enabled = dev->enabled_wakeup_protocols;
|
||||
allowed = dev->allowed_wakeup_protocols;
|
||||
if (dev->encode_wakeup && !allowed)
|
||||
allowed = ir_raw_get_encode_protocols();
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->lock);
|
||||
|
@ -1408,16 +1406,13 @@ int rc_register_device(struct rc_dev *dev)
|
|||
path ? path : "N/A");
|
||||
kfree(path);
|
||||
|
||||
if (dev->driver_type == RC_DRIVER_IR_RAW || dev->encode_wakeup) {
|
||||
if (dev->driver_type == RC_DRIVER_IR_RAW) {
|
||||
/* Load raw decoders, if they aren't already */
|
||||
if (!raw_init) {
|
||||
IR_dprintk(1, "Loading raw decoders\n");
|
||||
ir_raw_init();
|
||||
raw_init = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (dev->driver_type == RC_DRIVER_IR_RAW) {
|
||||
/* calls ir_register_device so unlock mutex here*/
|
||||
mutex_unlock(&dev->lock);
|
||||
rc = ir_raw_event_register(dev);
|
||||
|
|
|
@ -715,6 +715,7 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
|
|||
break;
|
||||
case VB2_BUF_STATE_PREPARING:
|
||||
case VB2_BUF_STATE_DEQUEUED:
|
||||
case VB2_BUF_STATE_REQUEUEING:
|
||||
/* nothing */
|
||||
break;
|
||||
}
|
||||
|
@ -1182,7 +1183,8 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
|
|||
|
||||
if (WARN_ON(state != VB2_BUF_STATE_DONE &&
|
||||
state != VB2_BUF_STATE_ERROR &&
|
||||
state != VB2_BUF_STATE_QUEUED))
|
||||
state != VB2_BUF_STATE_QUEUED &&
|
||||
state != VB2_BUF_STATE_REQUEUEING))
|
||||
state = VB2_BUF_STATE_ERROR;
|
||||
|
||||
#ifdef CONFIG_VIDEO_ADV_DEBUG
|
||||
|
@ -1199,22 +1201,30 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
|
|||
for (plane = 0; plane < vb->num_planes; ++plane)
|
||||
call_void_memop(vb, finish, vb->planes[plane].mem_priv);
|
||||
|
||||
/* Add the buffer to the done buffers list */
|
||||
spin_lock_irqsave(&q->done_lock, flags);
|
||||
vb->state = state;
|
||||
if (state != VB2_BUF_STATE_QUEUED)
|
||||
if (state == VB2_BUF_STATE_QUEUED ||
|
||||
state == VB2_BUF_STATE_REQUEUEING) {
|
||||
vb->state = VB2_BUF_STATE_QUEUED;
|
||||
} else {
|
||||
/* Add the buffer to the done buffers list */
|
||||
list_add_tail(&vb->done_entry, &q->done_list);
|
||||
vb->state = state;
|
||||
}
|
||||
atomic_dec(&q->owned_by_drv_count);
|
||||
spin_unlock_irqrestore(&q->done_lock, flags);
|
||||
|
||||
if (state == VB2_BUF_STATE_QUEUED) {
|
||||
switch (state) {
|
||||
case VB2_BUF_STATE_QUEUED:
|
||||
return;
|
||||
case VB2_BUF_STATE_REQUEUEING:
|
||||
if (q->start_streaming_called)
|
||||
__enqueue_in_driver(vb);
|
||||
return;
|
||||
default:
|
||||
/* Inform any processes that may be waiting for buffers */
|
||||
wake_up(&q->done_wq);
|
||||
break;
|
||||
}
|
||||
|
||||
/* Inform any processes that may be waiting for buffers */
|
||||
wake_up(&q->done_wq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_buffer_done);
|
||||
|
||||
|
@ -1244,19 +1254,19 @@ EXPORT_SYMBOL_GPL(vb2_discard_done);
|
|||
|
||||
static void vb2_warn_zero_bytesused(struct vb2_buffer *vb)
|
||||
{
|
||||
static bool __check_once __read_mostly;
|
||||
static bool check_once;
|
||||
|
||||
if (__check_once)
|
||||
if (check_once)
|
||||
return;
|
||||
|
||||
__check_once = true;
|
||||
__WARN();
|
||||
check_once = true;
|
||||
WARN_ON(1);
|
||||
|
||||
pr_warn_once("use of bytesused == 0 is deprecated and will be removed in the future,\n");
|
||||
pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n");
|
||||
if (vb->vb2_queue->allow_zero_bytesused)
|
||||
pr_warn_once("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
|
||||
pr_warn("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
|
||||
else
|
||||
pr_warn_once("use the actual size instead.\n");
|
||||
pr_warn("use the actual size instead.\n");
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -5174,7 +5174,7 @@ static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
|
|||
struct device *dev = &adapter->pdev->dev;
|
||||
int status;
|
||||
|
||||
if (lancer_chip(adapter) || BEx_chip(adapter))
|
||||
if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
|
||||
return;
|
||||
|
||||
if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
|
||||
|
@ -5221,7 +5221,7 @@ static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
|
|||
{
|
||||
struct be_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
if (lancer_chip(adapter) || BEx_chip(adapter))
|
||||
if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
|
||||
return;
|
||||
|
||||
if (adapter->vxlan_port != port)
|
||||
|
|
|
@ -2102,6 +2102,11 @@ int startup_gfar(struct net_device *ndev)
|
|||
/* Start Rx/Tx DMA and enable the interrupts */
|
||||
gfar_start(priv);
|
||||
|
||||
/* force link state update after mac reset */
|
||||
priv->oldlink = 0;
|
||||
priv->oldspeed = 0;
|
||||
priv->oldduplex = -1;
|
||||
|
||||
phy_start(priv->phydev);
|
||||
|
||||
enable_napi(priv);
|
||||
|
|
|
@ -216,7 +216,7 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
|
|||
|
||||
static inline bool fm10k_page_is_reserved(struct page *page)
|
||||
{
|
||||
return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
|
||||
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
|
||||
}
|
||||
|
||||
static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
|
||||
|
|
|
@ -6566,7 +6566,7 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
|
|||
|
||||
static inline bool igb_page_is_reserved(struct page *page)
|
||||
{
|
||||
return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
|
||||
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
|
||||
}
|
||||
|
||||
static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
|
||||
|
|
|
@ -1832,7 +1832,7 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
|
|||
|
||||
static inline bool ixgbe_page_is_reserved(struct page *page)
|
||||
{
|
||||
return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
|
||||
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -765,7 +765,7 @@ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
|
|||
|
||||
static inline bool ixgbevf_page_is_reserved(struct page *page)
|
||||
{
|
||||
return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
|
||||
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -952,9 +952,8 @@ static int ks8842_alloc_dma_bufs(struct net_device *netdev)
|
|||
|
||||
sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev,
|
||||
tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE);
|
||||
err = dma_mapping_error(adapter->dev,
|
||||
sg_dma_address(&tx_ctl->sg));
|
||||
if (err) {
|
||||
if (dma_mapping_error(adapter->dev, sg_dma_address(&tx_ctl->sg))) {
|
||||
err = -ENOMEM;
|
||||
sg_dma_address(&tx_ctl->sg) = 0;
|
||||
goto err;
|
||||
}
|
||||
|
|
|
@ -811,6 +811,7 @@ void phy_state_machine(struct work_struct *work)
|
|||
bool needs_aneg = false, do_suspend = false;
|
||||
enum phy_state old_state;
|
||||
int err = 0;
|
||||
int old_link;
|
||||
|
||||
mutex_lock(&phydev->lock);
|
||||
|
||||
|
@ -896,11 +897,18 @@ void phy_state_machine(struct work_struct *work)
|
|||
phydev->adjust_link(phydev->attached_dev);
|
||||
break;
|
||||
case PHY_RUNNING:
|
||||
/* Only register a CHANGE if we are
|
||||
* polling or ignoring interrupts
|
||||
/* Only register a CHANGE if we are polling or ignoring
|
||||
* interrupts and link changed since latest checking.
|
||||
*/
|
||||
if (!phy_interrupt_is_valid(phydev))
|
||||
phydev->state = PHY_CHANGELINK;
|
||||
if (!phy_interrupt_is_valid(phydev)) {
|
||||
old_link = phydev->link;
|
||||
err = phy_read_status(phydev);
|
||||
if (err)
|
||||
break;
|
||||
|
||||
if (old_link != phydev->link)
|
||||
phydev->state = PHY_CHANGELINK;
|
||||
}
|
||||
break;
|
||||
case PHY_CHANGELINK:
|
||||
err = phy_read_status(phydev);
|
||||
|
|
|
@ -91,19 +91,18 @@ static int lan911x_config_init(struct phy_device *phydev)
|
|||
}
|
||||
|
||||
/*
|
||||
* The LAN8710/LAN8720 requires a minimum of 2 link pulses within 64ms of each
|
||||
* other in order to set the ENERGYON bit and exit EDPD mode. If a link partner
|
||||
* does send the pulses within this interval, the PHY will remained powered
|
||||
* down.
|
||||
*
|
||||
* This workaround will manually toggle the PHY on/off upon calls to read_status
|
||||
* in order to generate link test pulses if the link is down. If a link partner
|
||||
* is present, it will respond to the pulses, which will cause the ENERGYON bit
|
||||
* to be set and will cause the EDPD mode to be exited.
|
||||
* The LAN87xx suffers from rare absence of the ENERGYON-bit when Ethernet cable
|
||||
* plugs in while LAN87xx is in Energy Detect Power-Down mode. This leads to
|
||||
* unstable detection of plugging in Ethernet cable.
|
||||
* This workaround disables Energy Detect Power-Down mode and waiting for
|
||||
* response on link pulses to detect presence of plugged Ethernet cable.
|
||||
* The Energy Detect Power-Down mode is enabled again in the end of procedure to
|
||||
* save approximately 220 mW of power if cable is unplugged.
|
||||
*/
|
||||
static int lan87xx_read_status(struct phy_device *phydev)
|
||||
{
|
||||
int err = genphy_read_status(phydev);
|
||||
int i;
|
||||
|
||||
if (!phydev->link) {
|
||||
/* Disable EDPD to wake up PHY */
|
||||
|
@ -116,8 +115,16 @@ static int lan87xx_read_status(struct phy_device *phydev)
|
|||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
/* Sleep 64 ms to allow ~5 link test pulses to be sent */
|
||||
msleep(64);
|
||||
/* Wait max 640 ms to detect energy */
|
||||
for (i = 0; i < 64; i++) {
|
||||
/* Sleep to allow link test pulses to be sent */
|
||||
msleep(10);
|
||||
rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
if (rc & MII_LAN83C185_ENERGYON)
|
||||
break;
|
||||
}
|
||||
|
||||
/* Re-enable EDPD */
|
||||
rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
|
||||
|
@ -191,7 +198,7 @@ static struct phy_driver smsc_phy_driver[] = {
|
|||
|
||||
/* basic functions */
|
||||
.config_aneg = genphy_config_aneg,
|
||||
.read_status = genphy_read_status,
|
||||
.read_status = lan87xx_read_status,
|
||||
.config_init = smsc_phy_config_init,
|
||||
.soft_reset = smsc_phy_reset,
|
||||
|
||||
|
|
|
@ -269,9 +269,9 @@ static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound);
|
|||
static void ppp_ccp_closed(struct ppp *ppp);
|
||||
static struct compressor *find_compressor(int type);
|
||||
static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st);
|
||||
static struct ppp *ppp_create_interface(struct net *net, int unit, int *retp);
|
||||
static struct ppp *ppp_create_interface(struct net *net, int unit,
|
||||
struct file *file, int *retp);
|
||||
static void init_ppp_file(struct ppp_file *pf, int kind);
|
||||
static void ppp_shutdown_interface(struct ppp *ppp);
|
||||
static void ppp_destroy_interface(struct ppp *ppp);
|
||||
static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit);
|
||||
static struct channel *ppp_find_channel(struct ppp_net *pn, int unit);
|
||||
|
@ -392,8 +392,10 @@ static int ppp_release(struct inode *unused, struct file *file)
|
|||
file->private_data = NULL;
|
||||
if (pf->kind == INTERFACE) {
|
||||
ppp = PF_TO_PPP(pf);
|
||||
rtnl_lock();
|
||||
if (file == ppp->owner)
|
||||
ppp_shutdown_interface(ppp);
|
||||
unregister_netdevice(ppp->dev);
|
||||
rtnl_unlock();
|
||||
}
|
||||
if (atomic_dec_and_test(&pf->refcnt)) {
|
||||
switch (pf->kind) {
|
||||
|
@ -593,8 +595,10 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|||
mutex_lock(&ppp_mutex);
|
||||
if (pf->kind == INTERFACE) {
|
||||
ppp = PF_TO_PPP(pf);
|
||||
rtnl_lock();
|
||||
if (file == ppp->owner)
|
||||
ppp_shutdown_interface(ppp);
|
||||
unregister_netdevice(ppp->dev);
|
||||
rtnl_unlock();
|
||||
}
|
||||
if (atomic_long_read(&file->f_count) < 2) {
|
||||
ppp_release(NULL, file);
|
||||
|
@ -838,11 +842,10 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
|
|||
/* Create a new ppp unit */
|
||||
if (get_user(unit, p))
|
||||
break;
|
||||
ppp = ppp_create_interface(net, unit, &err);
|
||||
ppp = ppp_create_interface(net, unit, file, &err);
|
||||
if (!ppp)
|
||||
break;
|
||||
file->private_data = &ppp->file;
|
||||
ppp->owner = file;
|
||||
err = -EFAULT;
|
||||
if (put_user(ppp->file.index, p))
|
||||
break;
|
||||
|
@ -916,6 +919,16 @@ static __net_init int ppp_init_net(struct net *net)
|
|||
static __net_exit void ppp_exit_net(struct net *net)
|
||||
{
|
||||
struct ppp_net *pn = net_generic(net, ppp_net_id);
|
||||
struct ppp *ppp;
|
||||
LIST_HEAD(list);
|
||||
int id;
|
||||
|
||||
rtnl_lock();
|
||||
idr_for_each_entry(&pn->units_idr, ppp, id)
|
||||
unregister_netdevice_queue(ppp->dev, &list);
|
||||
|
||||
unregister_netdevice_many(&list);
|
||||
rtnl_unlock();
|
||||
|
||||
idr_destroy(&pn->units_idr);
|
||||
}
|
||||
|
@ -1088,8 +1101,28 @@ static int ppp_dev_init(struct net_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void ppp_dev_uninit(struct net_device *dev)
|
||||
{
|
||||
struct ppp *ppp = netdev_priv(dev);
|
||||
struct ppp_net *pn = ppp_pernet(ppp->ppp_net);
|
||||
|
||||
ppp_lock(ppp);
|
||||
ppp->closing = 1;
|
||||
ppp_unlock(ppp);
|
||||
|
||||
mutex_lock(&pn->all_ppp_mutex);
|
||||
unit_put(&pn->units_idr, ppp->file.index);
|
||||
mutex_unlock(&pn->all_ppp_mutex);
|
||||
|
||||
ppp->owner = NULL;
|
||||
|
||||
ppp->file.dead = 1;
|
||||
wake_up_interruptible(&ppp->file.rwait);
|
||||
}
|
||||
|
||||
static const struct net_device_ops ppp_netdev_ops = {
|
||||
.ndo_init = ppp_dev_init,
|
||||
.ndo_uninit = ppp_dev_uninit,
|
||||
.ndo_start_xmit = ppp_start_xmit,
|
||||
.ndo_do_ioctl = ppp_net_ioctl,
|
||||
.ndo_get_stats64 = ppp_get_stats64,
|
||||
|
@ -2667,8 +2700,8 @@ ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
|
|||
* or if there is already a unit with the requested number.
|
||||
* unit == -1 means allocate a new number.
|
||||
*/
|
||||
static struct ppp *
|
||||
ppp_create_interface(struct net *net, int unit, int *retp)
|
||||
static struct ppp *ppp_create_interface(struct net *net, int unit,
|
||||
struct file *file, int *retp)
|
||||
{
|
||||
struct ppp *ppp;
|
||||
struct ppp_net *pn;
|
||||
|
@ -2688,6 +2721,7 @@ ppp_create_interface(struct net *net, int unit, int *retp)
|
|||
ppp->mru = PPP_MRU;
|
||||
init_ppp_file(&ppp->file, INTERFACE);
|
||||
ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */
|
||||
ppp->owner = file;
|
||||
for (i = 0; i < NUM_NP; ++i)
|
||||
ppp->npmode[i] = NPMODE_PASS;
|
||||
INIT_LIST_HEAD(&ppp->channels);
|
||||
|
@ -2775,34 +2809,6 @@ init_ppp_file(struct ppp_file *pf, int kind)
|
|||
init_waitqueue_head(&pf->rwait);
|
||||
}
|
||||
|
||||
/*
|
||||
* Take down a ppp interface unit - called when the owning file
|
||||
* (the one that created the unit) is closed or detached.
|
||||
*/
|
||||
static void ppp_shutdown_interface(struct ppp *ppp)
|
||||
{
|
||||
struct ppp_net *pn;
|
||||
|
||||
pn = ppp_pernet(ppp->ppp_net);
|
||||
mutex_lock(&pn->all_ppp_mutex);
|
||||
|
||||
/* This will call dev_close() for us. */
|
||||
ppp_lock(ppp);
|
||||
if (!ppp->closing) {
|
||||
ppp->closing = 1;
|
||||
ppp_unlock(ppp);
|
||||
unregister_netdev(ppp->dev);
|
||||
unit_put(&pn->units_idr, ppp->file.index);
|
||||
} else
|
||||
ppp_unlock(ppp);
|
||||
|
||||
ppp->file.dead = 1;
|
||||
ppp->owner = NULL;
|
||||
wake_up_interruptible(&ppp->file.rwait);
|
||||
|
||||
mutex_unlock(&pn->all_ppp_mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
* Free the memory used by a ppp unit. This is only called once
|
||||
* there are no channels connected to the unit and no file structs
|
||||
|
|
|
@ -785,6 +785,7 @@ static const struct usb_device_id products[] = {
|
|||
{QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
|
||||
{QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
|
||||
{QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
|
||||
{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
|
||||
{QMI_FIXED_INTF(0x03f0, 0x581d, 4)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
|
||||
|
||||
/* 4. Gobi 1000 devices */
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# PCI configuration
|
||||
#
|
||||
config PCI_BUS_ADDR_T_64BIT
|
||||
def_bool y if (ARCH_DMA_ADDR_T_64BIT || 64BIT)
|
||||
def_bool y if (ARCH_DMA_ADDR_T_64BIT || (64BIT && !PARISC))
|
||||
depends on PCI
|
||||
|
||||
config PCI_MSI
|
||||
|
|
|
@ -997,7 +997,12 @@ void set_pcie_port_type(struct pci_dev *pdev)
|
|||
else if (type == PCI_EXP_TYPE_UPSTREAM ||
|
||||
type == PCI_EXP_TYPE_DOWNSTREAM) {
|
||||
parent = pci_upstream_bridge(pdev);
|
||||
if (!parent->has_secondary_link)
|
||||
|
||||
/*
|
||||
* Usually there's an upstream device (Root Port or Switch
|
||||
* Downstream Port), but we can't assume one exists.
|
||||
*/
|
||||
if (parent && !parent->has_secondary_link)
|
||||
pdev->has_secondary_link = 1;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@
|
|||
|
||||
#define DRV_NAME "fnic"
|
||||
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
|
||||
#define DRV_VERSION "1.6.0.17"
|
||||
#define DRV_VERSION "1.6.0.17a"
|
||||
#define PFX DRV_NAME ": "
|
||||
#define DFX DRV_NAME "%d: "
|
||||
|
||||
|
|
|
@ -425,6 +425,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
|
|||
unsigned long ptr;
|
||||
struct fc_rport_priv *rdata;
|
||||
spinlock_t *io_lock = NULL;
|
||||
int io_lock_acquired = 0;
|
||||
|
||||
if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
|
||||
return SCSI_MLQUEUE_HOST_BUSY;
|
||||
|
@ -518,6 +519,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
|
|||
spin_lock_irqsave(io_lock, flags);
|
||||
|
||||
/* initialize rest of io_req */
|
||||
io_lock_acquired = 1;
|
||||
io_req->port_id = rport->port_id;
|
||||
io_req->start_time = jiffies;
|
||||
CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
|
||||
|
@ -571,7 +573,7 @@ out:
|
|||
(((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
|
||||
|
||||
/* if only we issued IO, will we have the io lock */
|
||||
if (CMD_FLAGS(sc) & FNIC_IO_INITIALIZED)
|
||||
if (io_lock_acquired)
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
|
||||
atomic_dec(&fnic->in_flight);
|
||||
|
|
|
@ -26,7 +26,6 @@
|
|||
#include <linux/blkdev.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
|
@ -2523,33 +2522,3 @@ void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq)
|
|||
}
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_build_sense_buffer);
|
||||
|
||||
/**
|
||||
* scsi_set_sense_information - set the information field in a
|
||||
* formatted sense data buffer
|
||||
* @buf: Where to build sense data
|
||||
* @info: 64-bit information value to be set
|
||||
*
|
||||
**/
|
||||
void scsi_set_sense_information(u8 *buf, u64 info)
|
||||
{
|
||||
if ((buf[0] & 0x7f) == 0x72) {
|
||||
u8 *ucp, len;
|
||||
|
||||
len = buf[7];
|
||||
ucp = (char *)scsi_sense_desc_find(buf, len + 8, 0);
|
||||
if (!ucp) {
|
||||
buf[7] = len + 0xa;
|
||||
ucp = buf + 8 + len;
|
||||
}
|
||||
ucp[0] = 0;
|
||||
ucp[1] = 0xa;
|
||||
ucp[2] = 0x80; /* Valid bit */
|
||||
ucp[3] = 0;
|
||||
put_unaligned_be64(info, &ucp[4]);
|
||||
} else if ((buf[0] & 0x7f) == 0x70) {
|
||||
buf[0] |= 0x80;
|
||||
put_unaligned_be64(info, &buf[3]);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_set_sense_information);
|
||||
|
|
|
@ -217,15 +217,15 @@ static int sdev_runtime_suspend(struct device *dev)
|
|||
{
|
||||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||
struct scsi_device *sdev = to_scsi_device(dev);
|
||||
int err;
|
||||
int err = 0;
|
||||
|
||||
err = blk_pre_runtime_suspend(sdev->request_queue);
|
||||
if (err)
|
||||
return err;
|
||||
if (pm && pm->runtime_suspend)
|
||||
if (pm && pm->runtime_suspend) {
|
||||
err = blk_pre_runtime_suspend(sdev->request_queue);
|
||||
if (err)
|
||||
return err;
|
||||
err = pm->runtime_suspend(dev);
|
||||
blk_post_runtime_suspend(sdev->request_queue, err);
|
||||
|
||||
blk_post_runtime_suspend(sdev->request_queue, err);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -248,11 +248,11 @@ static int sdev_runtime_resume(struct device *dev)
|
|||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||
int err = 0;
|
||||
|
||||
blk_pre_runtime_resume(sdev->request_queue);
|
||||
if (pm && pm->runtime_resume)
|
||||
if (pm && pm->runtime_resume) {
|
||||
blk_pre_runtime_resume(sdev->request_queue);
|
||||
err = pm->runtime_resume(dev);
|
||||
blk_post_runtime_resume(sdev->request_queue, err);
|
||||
|
||||
blk_post_runtime_resume(sdev->request_queue, err);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -968,9 +968,9 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
|||
cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA;
|
||||
|
||||
conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
|
||||
if (hdr->flags & ISCSI_FLAG_CMD_READ) {
|
||||
if (hdr->flags & ISCSI_FLAG_CMD_READ)
|
||||
cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
|
||||
} else if (hdr->flags & ISCSI_FLAG_CMD_WRITE)
|
||||
else
|
||||
cmd->targ_xfer_tag = 0xFFFFFFFF;
|
||||
cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
|
||||
cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
|
||||
|
|
|
@ -457,8 +457,15 @@ void target_unregister_template(const struct target_core_fabric_ops *fo)
|
|||
if (!strcmp(t->tf_ops->name, fo->name)) {
|
||||
BUG_ON(atomic_read(&t->tf_access_cnt));
|
||||
list_del(&t->tf_list);
|
||||
mutex_unlock(&g_tf_lock);
|
||||
/*
|
||||
* Wait for any outstanding fabric se_deve_entry->rcu_head
|
||||
* callbacks to complete post kfree_rcu(), before allowing
|
||||
* fabric driver unload of TFO->module to proceed.
|
||||
*/
|
||||
rcu_barrier();
|
||||
kfree(t);
|
||||
break;
|
||||
return;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&g_tf_lock);
|
||||
|
|
|
@ -84,8 +84,16 @@ void target_backend_unregister(const struct target_backend_ops *ops)
|
|||
list_for_each_entry(tb, &backend_list, list) {
|
||||
if (tb->ops == ops) {
|
||||
list_del(&tb->list);
|
||||
mutex_unlock(&backend_mutex);
|
||||
/*
|
||||
* Wait for any outstanding backend driver ->rcu_head
|
||||
* callbacks to complete post TBO->free_device() ->
|
||||
* call_rcu(), before allowing backend driver module
|
||||
* unload of target_backend_ops->owner to proceed.
|
||||
*/
|
||||
rcu_barrier();
|
||||
kfree(tb);
|
||||
break;
|
||||
return;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&backend_mutex);
|
||||
|
|
|
@ -1203,17 +1203,13 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
|
|||
struct se_dev_entry *deve;
|
||||
struct se_session *sess = cmd->se_sess;
|
||||
struct se_node_acl *nacl;
|
||||
struct scsi_lun slun;
|
||||
unsigned char *buf;
|
||||
u32 lun_count = 0, offset = 8;
|
||||
|
||||
if (cmd->data_length < 16) {
|
||||
pr_warn("REPORT LUNS allocation length %u too small\n",
|
||||
cmd->data_length);
|
||||
return TCM_INVALID_CDB_FIELD;
|
||||
}
|
||||
__be32 len;
|
||||
|
||||
buf = transport_kmap_data_sg(cmd);
|
||||
if (!buf)
|
||||
if (cmd->data_length && !buf)
|
||||
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
|
||||
/*
|
||||
|
@ -1221,11 +1217,9 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
|
|||
* coming via a target_core_mod PASSTHROUGH op, and not through
|
||||
* a $FABRIC_MOD. In that case, report LUN=0 only.
|
||||
*/
|
||||
if (!sess) {
|
||||
int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
|
||||
lun_count = 1;
|
||||
if (!sess)
|
||||
goto done;
|
||||
}
|
||||
|
||||
nacl = sess->se_node_acl;
|
||||
|
||||
rcu_read_lock();
|
||||
|
@ -1236,10 +1230,12 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
|
|||
* See SPC2-R20 7.19.
|
||||
*/
|
||||
lun_count++;
|
||||
if ((offset + 8) > cmd->data_length)
|
||||
if (offset >= cmd->data_length)
|
||||
continue;
|
||||
|
||||
int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
|
||||
int_to_scsilun(deve->mapped_lun, &slun);
|
||||
memcpy(buf + offset, &slun,
|
||||
min(8u, cmd->data_length - offset));
|
||||
offset += 8;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
@ -1248,12 +1244,22 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
|
|||
* See SPC3 r07, page 159.
|
||||
*/
|
||||
done:
|
||||
lun_count *= 8;
|
||||
buf[0] = ((lun_count >> 24) & 0xff);
|
||||
buf[1] = ((lun_count >> 16) & 0xff);
|
||||
buf[2] = ((lun_count >> 8) & 0xff);
|
||||
buf[3] = (lun_count & 0xff);
|
||||
transport_kunmap_data_sg(cmd);
|
||||
/*
|
||||
* If no LUNs are accessible, report virtual LUN 0.
|
||||
*/
|
||||
if (lun_count == 0) {
|
||||
int_to_scsilun(0, &slun);
|
||||
if (cmd->data_length > 8)
|
||||
memcpy(buf + offset, &slun,
|
||||
min(8u, cmd->data_length - offset));
|
||||
lun_count = 1;
|
||||
}
|
||||
|
||||
if (buf) {
|
||||
len = cpu_to_be32(lun_count * 8);
|
||||
memcpy(buf, &len, min_t(int, sizeof len, cmd->data_length));
|
||||
transport_kunmap_data_sg(cmd);
|
||||
}
|
||||
|
||||
target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8);
|
||||
return 0;
|
||||
|
|
|
@ -68,7 +68,7 @@ struct power_table {
|
|||
* registered cooling device.
|
||||
* @cpufreq_state: integer value representing the current state of cpufreq
|
||||
* cooling devices.
|
||||
* @cpufreq_val: integer value representing the absolute value of the clipped
|
||||
* @clipped_freq: integer value representing the absolute value of the clipped
|
||||
* frequency.
|
||||
* @max_level: maximum cooling level. One less than total number of valid
|
||||
* cpufreq frequencies.
|
||||
|
@ -91,7 +91,7 @@ struct cpufreq_cooling_device {
|
|||
int id;
|
||||
struct thermal_cooling_device *cool_dev;
|
||||
unsigned int cpufreq_state;
|
||||
unsigned int cpufreq_val;
|
||||
unsigned int clipped_freq;
|
||||
unsigned int max_level;
|
||||
unsigned int *freq_table; /* In descending order */
|
||||
struct cpumask allowed_cpus;
|
||||
|
@ -107,6 +107,9 @@ struct cpufreq_cooling_device {
|
|||
static DEFINE_IDR(cpufreq_idr);
|
||||
static DEFINE_MUTEX(cooling_cpufreq_lock);
|
||||
|
||||
static unsigned int cpufreq_dev_count;
|
||||
|
||||
static DEFINE_MUTEX(cooling_list_lock);
|
||||
static LIST_HEAD(cpufreq_dev_list);
|
||||
|
||||
/**
|
||||
|
@ -185,14 +188,14 @@ unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
|
|||
{
|
||||
struct cpufreq_cooling_device *cpufreq_dev;
|
||||
|
||||
mutex_lock(&cooling_cpufreq_lock);
|
||||
mutex_lock(&cooling_list_lock);
|
||||
list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
|
||||
if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) {
|
||||
mutex_unlock(&cooling_cpufreq_lock);
|
||||
mutex_unlock(&cooling_list_lock);
|
||||
return get_level(cpufreq_dev, freq);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&cooling_cpufreq_lock);
|
||||
mutex_unlock(&cooling_list_lock);
|
||||
|
||||
pr_err("%s: cpu:%d not part of any cooling device\n", __func__, cpu);
|
||||
return THERMAL_CSTATE_INVALID;
|
||||
|
@ -215,29 +218,35 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
|
|||
unsigned long event, void *data)
|
||||
{
|
||||
struct cpufreq_policy *policy = data;
|
||||
unsigned long max_freq = 0;
|
||||
unsigned long clipped_freq;
|
||||
struct cpufreq_cooling_device *cpufreq_dev;
|
||||
|
||||
switch (event) {
|
||||
|
||||
case CPUFREQ_ADJUST:
|
||||
mutex_lock(&cooling_cpufreq_lock);
|
||||
list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
|
||||
if (!cpumask_test_cpu(policy->cpu,
|
||||
&cpufreq_dev->allowed_cpus))
|
||||
continue;
|
||||
|
||||
max_freq = cpufreq_dev->cpufreq_val;
|
||||
|
||||
if (policy->max != max_freq)
|
||||
cpufreq_verify_within_limits(policy, 0,
|
||||
max_freq);
|
||||
}
|
||||
mutex_unlock(&cooling_cpufreq_lock);
|
||||
break;
|
||||
default:
|
||||
if (event != CPUFREQ_ADJUST)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
mutex_lock(&cooling_list_lock);
|
||||
list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
|
||||
if (!cpumask_test_cpu(policy->cpu, &cpufreq_dev->allowed_cpus))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* policy->max is the maximum allowed frequency defined by user
|
||||
* and clipped_freq is the maximum that thermal constraints
|
||||
* allow.
|
||||
*
|
||||
* If clipped_freq is lower than policy->max, then we need to
|
||||
* readjust policy->max.
|
||||
*
|
||||
* But, if clipped_freq is greater than policy->max, we don't
|
||||
* need to do anything.
|
||||
*/
|
||||
clipped_freq = cpufreq_dev->clipped_freq;
|
||||
|
||||
if (policy->max > clipped_freq)
|
||||
cpufreq_verify_within_limits(policy, 0, clipped_freq);
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&cooling_list_lock);
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
@ -519,7 +528,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
|
|||
|
||||
clip_freq = cpufreq_device->freq_table[state];
|
||||
cpufreq_device->cpufreq_state = state;
|
||||
cpufreq_device->cpufreq_val = clip_freq;
|
||||
cpufreq_device->clipped_freq = clip_freq;
|
||||
|
||||
cpufreq_update_policy(cpu);
|
||||
|
||||
|
@ -861,17 +870,19 @@ __cpufreq_cooling_register(struct device_node *np,
|
|||
pr_debug("%s: freq:%u KHz\n", __func__, freq);
|
||||
}
|
||||
|
||||
cpufreq_dev->cpufreq_val = cpufreq_dev->freq_table[0];
|
||||
cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0];
|
||||
cpufreq_dev->cool_dev = cool_dev;
|
||||
|
||||
mutex_lock(&cooling_cpufreq_lock);
|
||||
|
||||
mutex_lock(&cooling_list_lock);
|
||||
list_add(&cpufreq_dev->node, &cpufreq_dev_list);
|
||||
mutex_unlock(&cooling_list_lock);
|
||||
|
||||
/* Register the notifier for first cpufreq cooling device */
|
||||
if (list_empty(&cpufreq_dev_list))
|
||||
if (!cpufreq_dev_count++)
|
||||
cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
|
||||
CPUFREQ_POLICY_NOTIFIER);
|
||||
list_add(&cpufreq_dev->node, &cpufreq_dev_list);
|
||||
|
||||
mutex_unlock(&cooling_cpufreq_lock);
|
||||
|
||||
return cool_dev;
|
||||
|
@ -1013,13 +1024,17 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
|
|||
return;
|
||||
|
||||
cpufreq_dev = cdev->devdata;
|
||||
mutex_lock(&cooling_cpufreq_lock);
|
||||
list_del(&cpufreq_dev->node);
|
||||
|
||||
/* Unregister the notifier for the last cpufreq cooling device */
|
||||
if (list_empty(&cpufreq_dev_list))
|
||||
mutex_lock(&cooling_cpufreq_lock);
|
||||
if (!--cpufreq_dev_count)
|
||||
cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
|
||||
CPUFREQ_POLICY_NOTIFIER);
|
||||
|
||||
mutex_lock(&cooling_list_lock);
|
||||
list_del(&cpufreq_dev->node);
|
||||
mutex_unlock(&cooling_list_lock);
|
||||
|
||||
mutex_unlock(&cooling_cpufreq_lock);
|
||||
|
||||
thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
|
||||
|
|
|
@ -334,7 +334,7 @@ static int allocate_power(struct thermal_zone_device *tz,
|
|||
max_allocatable_power, current_temp,
|
||||
(s32)control_temp - (s32)current_temp);
|
||||
|
||||
devm_kfree(&tz->device, req_power);
|
||||
kfree(req_power);
|
||||
unlock:
|
||||
mutex_unlock(&tz->lock);
|
||||
|
||||
|
@ -426,7 +426,7 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
params = devm_kzalloc(&tz->device, sizeof(*params), GFP_KERNEL);
|
||||
params = kzalloc(sizeof(*params), GFP_KERNEL);
|
||||
if (!params)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -468,14 +468,14 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
|
|||
return 0;
|
||||
|
||||
free:
|
||||
devm_kfree(&tz->device, params);
|
||||
kfree(params);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void power_allocator_unbind(struct thermal_zone_device *tz)
|
||||
{
|
||||
dev_dbg(&tz->device, "Unbinding from thermal zone %d\n", tz->id);
|
||||
devm_kfree(&tz->device, tz->governor_data);
|
||||
kfree(tz->governor_data);
|
||||
tz->governor_data = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -745,8 +745,6 @@ struct drm_connector {
|
|||
uint8_t num_h_tile, num_v_tile;
|
||||
uint8_t tile_h_loc, tile_v_loc;
|
||||
uint16_t tile_h_size, tile_v_size;
|
||||
|
||||
struct list_head destroy_list;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -347,6 +347,25 @@ static inline int drm_eld_mnl(const uint8_t *eld)
|
|||
return (eld[DRM_ELD_CEA_EDID_VER_MNL] & DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_eld_sad - Get ELD SAD structures.
|
||||
* @eld: pointer to an eld memory structure with sad_count set
|
||||
*/
|
||||
static inline const uint8_t *drm_eld_sad(const uint8_t *eld)
|
||||
{
|
||||
unsigned int ver, mnl;
|
||||
|
||||
ver = (eld[DRM_ELD_VER] & DRM_ELD_VER_MASK) >> DRM_ELD_VER_SHIFT;
|
||||
if (ver != 2 && ver != 31)
|
||||
return NULL;
|
||||
|
||||
mnl = drm_eld_mnl(eld);
|
||||
if (mnl > 16)
|
||||
return NULL;
|
||||
|
||||
return eld + DRM_ELD_CEA_SAD(mnl, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_eld_sad_count - Get ELD SAD count.
|
||||
* @eld: pointer to an eld memory structure with sad_count set
|
||||
|
|
|
@ -385,8 +385,6 @@ enum {
|
|||
SATA_SSP = 0x06, /* Software Settings Preservation */
|
||||
SATA_DEVSLP = 0x09, /* Device Sleep */
|
||||
|
||||
SETFEATURE_SENSE_DATA = 0xC3, /* Sense Data Reporting feature */
|
||||
|
||||
/* feature values for SET_MAX */
|
||||
ATA_SET_MAX_ADDR = 0x00,
|
||||
ATA_SET_MAX_PASSWD = 0x01,
|
||||
|
@ -530,8 +528,6 @@ struct ata_bmdma_prd {
|
|||
#define ata_id_cdb_intr(id) (((id)[ATA_ID_CONFIG] & 0x60) == 0x20)
|
||||
#define ata_id_has_da(id) ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4))
|
||||
#define ata_id_has_devslp(id) ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8))
|
||||
#define ata_id_has_ncq_autosense(id) \
|
||||
((id)[ATA_ID_FEATURE_SUPP] & (1 << 7))
|
||||
|
||||
static inline bool ata_id_has_hipm(const u16 *id)
|
||||
{
|
||||
|
@ -720,20 +716,6 @@ static inline bool ata_id_has_read_log_dma_ext(const u16 *id)
|
|||
return false;
|
||||
}
|
||||
|
||||
static inline bool ata_id_has_sense_reporting(const u16 *id)
|
||||
{
|
||||
if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
|
||||
return false;
|
||||
return id[ATA_ID_COMMAND_SET_3] & (1 << 6);
|
||||
}
|
||||
|
||||
static inline bool ata_id_sense_reporting_enabled(const u16 *id)
|
||||
{
|
||||
if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
|
||||
return false;
|
||||
return id[ATA_ID_COMMAND_SET_4] & (1 << 6);
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_id_major_version - get ATA level of drive
|
||||
* @id: Identify data
|
||||
|
|
|
@ -484,6 +484,7 @@ extern int irq_chip_set_affinity_parent(struct irq_data *data,
|
|||
extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on);
|
||||
extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data,
|
||||
void *vcpu_info);
|
||||
extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type);
|
||||
#endif
|
||||
|
||||
/* Handling of unhandled and spurious interrupts: */
|
||||
|
|
|
@ -1002,6 +1002,34 @@ static inline int page_mapped(struct page *page)
|
|||
return atomic_read(&(page)->_mapcount) >= 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return true only if the page has been allocated with
|
||||
* ALLOC_NO_WATERMARKS and the low watermark was not
|
||||
* met implying that the system is under some pressure.
|
||||
*/
|
||||
static inline bool page_is_pfmemalloc(struct page *page)
|
||||
{
|
||||
/*
|
||||
* Page index cannot be this large so this must be
|
||||
* a pfmemalloc page.
|
||||
*/
|
||||
return page->index == -1UL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Only to be called by the page allocator on a freshly allocated
|
||||
* page.
|
||||
*/
|
||||
static inline void set_page_pfmemalloc(struct page *page)
|
||||
{
|
||||
page->index = -1UL;
|
||||
}
|
||||
|
||||
static inline void clear_page_pfmemalloc(struct page *page)
|
||||
{
|
||||
page->index = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Different kinds of faults, as returned by handle_mm_fault().
|
||||
* Used to decide whether a process gets delivered SIGBUS or
|
||||
|
|
|
@ -63,15 +63,6 @@ struct page {
|
|||
union {
|
||||
pgoff_t index; /* Our offset within mapping. */
|
||||
void *freelist; /* sl[aou]b first free object */
|
||||
bool pfmemalloc; /* If set by the page allocator,
|
||||
* ALLOC_NO_WATERMARKS was set
|
||||
* and the low watermark was not
|
||||
* met implying that the system
|
||||
* is under some pressure. The
|
||||
* caller should try ensure
|
||||
* this page is only used to
|
||||
* free other pages.
|
||||
*/
|
||||
};
|
||||
|
||||
union {
|
||||
|
|
|
@ -1602,20 +1602,16 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
|
|||
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||||
|
||||
/*
|
||||
* Propagate page->pfmemalloc to the skb if we can. The problem is
|
||||
* that not all callers have unique ownership of the page. If
|
||||
* pfmemalloc is set, we check the mapping as a mapping implies
|
||||
* page->index is set (index and pfmemalloc share space).
|
||||
* If it's a valid mapping, we cannot use page->pfmemalloc but we
|
||||
* do not lose pfmemalloc information as the pages would not be
|
||||
* allocated using __GFP_MEMALLOC.
|
||||
* Propagate page pfmemalloc to the skb if we can. The problem is
|
||||
* that not all callers have unique ownership of the page but rely
|
||||
* on page_is_pfmemalloc doing the right thing(tm).
|
||||
*/
|
||||
frag->page.p = page;
|
||||
frag->page_offset = off;
|
||||
skb_frag_size_set(frag, size);
|
||||
|
||||
page = compound_head(page);
|
||||
if (page->pfmemalloc && !page->mapping)
|
||||
if (page_is_pfmemalloc(page))
|
||||
skb->pfmemalloc = true;
|
||||
}
|
||||
|
||||
|
@ -2263,7 +2259,7 @@ static inline struct page *dev_alloc_page(void)
|
|||
static inline void skb_propagate_pfmemalloc(struct page *page,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
if (page && page->pfmemalloc)
|
||||
if (page_is_pfmemalloc(page))
|
||||
skb->pfmemalloc = true;
|
||||
}
|
||||
|
||||
|
|
|
@ -74,8 +74,6 @@ enum rc_filter_type {
|
|||
* @input_dev: the input child device used to communicate events to userspace
|
||||
* @driver_type: specifies if protocol decoding is done in hardware or software
|
||||
* @idle: used to keep track of RX state
|
||||
* @encode_wakeup: wakeup filtering uses IR encode API, therefore the allowed
|
||||
* wakeup protocols is the set of all raw encoders
|
||||
* @allowed_protocols: bitmask with the supported RC_BIT_* protocols
|
||||
* @enabled_protocols: bitmask with the enabled RC_BIT_* protocols
|
||||
* @allowed_wakeup_protocols: bitmask with the supported RC_BIT_* wakeup protocols
|
||||
|
@ -136,7 +134,6 @@ struct rc_dev {
|
|||
struct input_dev *input_dev;
|
||||
enum rc_driver_type driver_type;
|
||||
bool idle;
|
||||
bool encode_wakeup;
|
||||
u64 allowed_protocols;
|
||||
u64 enabled_protocols;
|
||||
u64 allowed_wakeup_protocols;
|
||||
|
@ -246,7 +243,6 @@ static inline void init_ir_raw_event(struct ir_raw_event *ev)
|
|||
#define US_TO_NS(usec) ((usec) * 1000)
|
||||
#define MS_TO_US(msec) ((msec) * 1000)
|
||||
#define MS_TO_NS(msec) ((msec) * 1000 * 1000)
|
||||
#define NS_TO_US(nsec) DIV_ROUND_UP(nsec, 1000L)
|
||||
|
||||
void ir_raw_event_handle(struct rc_dev *dev);
|
||||
int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev);
|
||||
|
@ -254,9 +250,6 @@ int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type);
|
|||
int ir_raw_event_store_with_filter(struct rc_dev *dev,
|
||||
struct ir_raw_event *ev);
|
||||
void ir_raw_event_set_idle(struct rc_dev *dev, bool idle);
|
||||
int ir_raw_encode_scancode(u64 protocols,
|
||||
const struct rc_scancode_filter *scancode,
|
||||
struct ir_raw_event *events, unsigned int max);
|
||||
|
||||
static inline void ir_raw_event_reset(struct rc_dev *dev)
|
||||
{
|
||||
|
|
|
@ -139,6 +139,7 @@ enum vb2_io_modes {
|
|||
* @VB2_BUF_STATE_PREPARING: buffer is being prepared in videobuf
|
||||
* @VB2_BUF_STATE_PREPARED: buffer prepared in videobuf and by the driver
|
||||
* @VB2_BUF_STATE_QUEUED: buffer queued in videobuf, but not in driver
|
||||
* @VB2_BUF_STATE_REQUEUEING: re-queue a buffer to the driver
|
||||
* @VB2_BUF_STATE_ACTIVE: buffer queued in driver and possibly used
|
||||
* in a hardware operation
|
||||
* @VB2_BUF_STATE_DONE: buffer returned from driver to videobuf, but
|
||||
|
@ -152,6 +153,7 @@ enum vb2_buffer_state {
|
|||
VB2_BUF_STATE_PREPARING,
|
||||
VB2_BUF_STATE_PREPARED,
|
||||
VB2_BUF_STATE_QUEUED,
|
||||
VB2_BUF_STATE_REQUEUEING,
|
||||
VB2_BUF_STATE_ACTIVE,
|
||||
VB2_BUF_STATE_DONE,
|
||||
VB2_BUF_STATE_ERROR,
|
||||
|
|
|
@ -28,7 +28,6 @@ extern int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
|
|||
u64 * info_out);
|
||||
|
||||
extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq);
|
||||
extern void scsi_set_sense_information(u8 *buf, u64 info);
|
||||
|
||||
extern int scsi_ioctl_reset(struct scsi_device *, int __user *);
|
||||
|
||||
|
|
|
@ -141,6 +141,8 @@ struct snd_soc_tplg_ops {
|
|||
int io_ops_count;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SND_SOC_TOPOLOGY
|
||||
|
||||
/* gets a pointer to data from the firmware block header */
|
||||
static inline const void *snd_soc_tplg_get_data(struct snd_soc_tplg_hdr *hdr)
|
||||
{
|
||||
|
@ -165,4 +167,14 @@ int snd_soc_tplg_widget_bind_event(struct snd_soc_dapm_widget *w,
|
|||
const struct snd_soc_tplg_widget_events *events, int num_events,
|
||||
u16 event_type);
|
||||
|
||||
#else
|
||||
|
||||
static inline int snd_soc_tplg_component_remove(struct snd_soc_component *comp,
|
||||
u32 index)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -18,6 +18,12 @@
|
|||
#include <linux/types.h>
|
||||
#include <sound/asound.h>
|
||||
|
||||
#ifndef __KERNEL__
|
||||
#error This API is an early revision and not enabled in the current
|
||||
#error kernel release, it will be enabled in a future kernel version
|
||||
#error with incompatible changes to what is here.
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Maximum number of channels topology kcontrol can represent.
|
||||
*/
|
||||
|
|
|
@ -1223,7 +1223,7 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
|
|||
spin_unlock_irq(&callback_lock);
|
||||
|
||||
/* use trialcs->mems_allowed as a temp variable */
|
||||
update_nodemasks_hier(cs, &cs->mems_allowed);
|
||||
update_nodemasks_hier(cs, &trialcs->mems_allowed);
|
||||
done:
|
||||
return retval;
|
||||
}
|
||||
|
|
|
@ -984,6 +984,23 @@ int irq_chip_set_affinity_parent(struct irq_data *data,
|
|||
return -ENOSYS;
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_chip_set_type_parent - Set IRQ type on the parent interrupt
|
||||
* @data: Pointer to interrupt specific data
|
||||
* @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
|
||||
*
|
||||
* Conditional, as the underlying parent chip might not implement it.
|
||||
*/
|
||||
int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
|
||||
{
|
||||
data = data->parent_data;
|
||||
|
||||
if (data->chip->irq_set_type)
|
||||
return data->chip->irq_set_type(data, type);
|
||||
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
|
||||
* @data: Pointer to interrupt specific data
|
||||
|
@ -997,7 +1014,7 @@ int irq_chip_retrigger_hierarchy(struct irq_data *data)
|
|||
if (data->chip && data->chip->irq_retrigger)
|
||||
return data->chip->irq_retrigger(data);
|
||||
|
||||
return -ENOSYS;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -807,8 +807,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
|
|||
spin_unlock(&base->lock);
|
||||
base = new_base;
|
||||
spin_lock(&base->lock);
|
||||
timer->flags &= ~TIMER_BASEMASK;
|
||||
timer->flags |= base->cpu;
|
||||
WRITE_ONCE(timer->flags,
|
||||
(timer->flags & ~TIMER_BASEMASK) | base->cpu);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1343,12 +1343,15 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
|
|||
set_page_owner(page, order, gfp_flags);
|
||||
|
||||
/*
|
||||
* page->pfmemalloc is set when ALLOC_NO_WATERMARKS was necessary to
|
||||
* page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
|
||||
* allocate the page. The expectation is that the caller is taking
|
||||
* steps that will free more memory. The caller should avoid the page
|
||||
* being used for !PFMEMALLOC purposes.
|
||||
*/
|
||||
page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
|
||||
if (alloc_flags & ALLOC_NO_WATERMARKS)
|
||||
set_page_pfmemalloc(page);
|
||||
else
|
||||
clear_page_pfmemalloc(page);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -3345,7 +3348,7 @@ refill:
|
|||
atomic_add(size - 1, &page->_count);
|
||||
|
||||
/* reset page count bias and offset to start of new frag */
|
||||
nc->pfmemalloc = page->pfmemalloc;
|
||||
nc->pfmemalloc = page_is_pfmemalloc(page);
|
||||
nc->pagecnt_bias = size;
|
||||
nc->offset = size;
|
||||
}
|
||||
|
|
|
@ -1603,7 +1603,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
|
|||
}
|
||||
|
||||
/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
|
||||
if (unlikely(page->pfmemalloc))
|
||||
if (page_is_pfmemalloc(page))
|
||||
pfmemalloc_active = true;
|
||||
|
||||
nr_pages = (1 << cachep->gfporder);
|
||||
|
@ -1614,7 +1614,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
|
|||
add_zone_page_state(page_zone(page),
|
||||
NR_SLAB_UNRECLAIMABLE, nr_pages);
|
||||
__SetPageSlab(page);
|
||||
if (page->pfmemalloc)
|
||||
if (page_is_pfmemalloc(page))
|
||||
SetPageSlabPfmemalloc(page);
|
||||
|
||||
if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
|
||||
|
|
|
@ -1427,7 +1427,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
|
|||
inc_slabs_node(s, page_to_nid(page), page->objects);
|
||||
page->slab_cache = s;
|
||||
__SetPageSlab(page);
|
||||
if (page->pfmemalloc)
|
||||
if (page_is_pfmemalloc(page))
|
||||
SetPageSlabPfmemalloc(page);
|
||||
|
||||
start = page_address(page);
|
||||
|
|
|
@ -1541,6 +1541,7 @@ p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err)
|
|||
struct p9_client *clnt = fid->clnt;
|
||||
struct p9_req_t *req;
|
||||
int total = 0;
|
||||
*err = 0;
|
||||
|
||||
p9_debug(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n",
|
||||
fid->fid, (unsigned long long) offset, (int)iov_iter_count(to));
|
||||
|
@ -1620,6 +1621,7 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
|
|||
struct p9_client *clnt = fid->clnt;
|
||||
struct p9_req_t *req;
|
||||
int total = 0;
|
||||
*err = 0;
|
||||
|
||||
p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %zd\n",
|
||||
fid->fid, (unsigned long long) offset,
|
||||
|
|
|
@ -595,8 +595,11 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
|
|||
/* increase the refcounter of the related vlan */
|
||||
vlan = batadv_softif_vlan_get(bat_priv, vid);
|
||||
if (WARN(!vlan, "adding TT local entry %pM to non-existent VLAN %d",
|
||||
addr, BATADV_PRINT_VID(vid)))
|
||||
addr, BATADV_PRINT_VID(vid))) {
|
||||
kfree(tt_local);
|
||||
tt_local = NULL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
batadv_dbg(BATADV_DBG_TT, bat_priv,
|
||||
"Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
|
||||
|
|
|
@ -1591,7 +1591,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
|
|||
break;
|
||||
}
|
||||
|
||||
if (skb_trimmed)
|
||||
if (skb_trimmed && skb_trimmed != skb)
|
||||
kfree_skb(skb_trimmed);
|
||||
|
||||
return err;
|
||||
|
@ -1636,7 +1636,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
|
|||
break;
|
||||
}
|
||||
|
||||
if (skb_trimmed)
|
||||
if (skb_trimmed && skb_trimmed != skb)
|
||||
kfree_skb(skb_trimmed);
|
||||
|
||||
return err;
|
||||
|
|
|
@ -340,7 +340,7 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
|
|||
|
||||
if (skb && frag_size) {
|
||||
skb->head_frag = 1;
|
||||
if (virt_to_head_page(data)->pfmemalloc)
|
||||
if (page_is_pfmemalloc(virt_to_head_page(data)))
|
||||
skb->pfmemalloc = 1;
|
||||
}
|
||||
return skb;
|
||||
|
@ -4022,8 +4022,8 @@ EXPORT_SYMBOL(skb_checksum_setup);
|
|||
* Otherwise returns the provided skb. Returns NULL in error cases
|
||||
* (e.g. transport_len exceeds skb length or out-of-memory).
|
||||
*
|
||||
* Caller needs to set the skb transport header and release the returned skb.
|
||||
* Provided skb is consumed.
|
||||
* Caller needs to set the skb transport header and free any returned skb if it
|
||||
* differs from the provided skb.
|
||||
*/
|
||||
static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
|
||||
unsigned int transport_len)
|
||||
|
@ -4032,16 +4032,12 @@ static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
|
|||
unsigned int len = skb_transport_offset(skb) + transport_len;
|
||||
int ret;
|
||||
|
||||
if (skb->len < len) {
|
||||
kfree_skb(skb);
|
||||
if (skb->len < len)
|
||||
return NULL;
|
||||
} else if (skb->len == len) {
|
||||
else if (skb->len == len)
|
||||
return skb;
|
||||
}
|
||||
|
||||
skb_chk = skb_clone(skb, GFP_ATOMIC);
|
||||
kfree_skb(skb);
|
||||
|
||||
if (!skb_chk)
|
||||
return NULL;
|
||||
|
||||
|
@ -4066,8 +4062,8 @@ static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
|
|||
* If the skb has data beyond the given transport length, then a
|
||||
* trimmed & cloned skb is checked and returned.
|
||||
*
|
||||
* Caller needs to set the skb transport header and release the returned skb.
|
||||
* Provided skb is consumed.
|
||||
* Caller needs to set the skb transport header and free any returned skb if it
|
||||
* differs from the provided skb.
|
||||
*/
|
||||
struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
|
||||
unsigned int transport_len,
|
||||
|
@ -4079,23 +4075,26 @@ struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
|
|||
|
||||
skb_chk = skb_checksum_maybe_trim(skb, transport_len);
|
||||
if (!skb_chk)
|
||||
return NULL;
|
||||
goto err;
|
||||
|
||||
if (!pskb_may_pull(skb_chk, offset)) {
|
||||
kfree_skb(skb_chk);
|
||||
return NULL;
|
||||
}
|
||||
if (!pskb_may_pull(skb_chk, offset))
|
||||
goto err;
|
||||
|
||||
__skb_pull(skb_chk, offset);
|
||||
ret = skb_chkf(skb_chk);
|
||||
__skb_push(skb_chk, offset);
|
||||
|
||||
if (ret) {
|
||||
kfree_skb(skb_chk);
|
||||
return NULL;
|
||||
}
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
return skb_chk;
|
||||
|
||||
err:
|
||||
if (skb_chk && skb_chk != skb)
|
||||
kfree_skb(skb_chk);
|
||||
|
||||
return NULL;
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(skb_checksum_trimmed);
|
||||
|
||||
|
|
|
@ -2465,7 +2465,7 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
|
|||
key = l->key + 1;
|
||||
iter->pos++;
|
||||
|
||||
if (pos-- <= 0)
|
||||
if (--pos <= 0)
|
||||
break;
|
||||
|
||||
l = NULL;
|
||||
|
|
|
@ -1435,33 +1435,35 @@ static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
|
|||
struct sk_buff *skb_chk;
|
||||
unsigned int transport_len;
|
||||
unsigned int len = skb_transport_offset(skb) + sizeof(struct igmphdr);
|
||||
int ret;
|
||||
int ret = -EINVAL;
|
||||
|
||||
transport_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
|
||||
|
||||
skb_get(skb);
|
||||
skb_chk = skb_checksum_trimmed(skb, transport_len,
|
||||
ip_mc_validate_checksum);
|
||||
if (!skb_chk)
|
||||
return -EINVAL;
|
||||
goto err;
|
||||
|
||||
if (!pskb_may_pull(skb_chk, len)) {
|
||||
kfree_skb(skb_chk);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!pskb_may_pull(skb_chk, len))
|
||||
goto err;
|
||||
|
||||
ret = ip_mc_check_igmp_msg(skb_chk);
|
||||
if (ret) {
|
||||
kfree_skb(skb_chk);
|
||||
return ret;
|
||||
}
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (skb_trimmed)
|
||||
*skb_trimmed = skb_chk;
|
||||
else
|
||||
/* free now unneeded clone */
|
||||
else if (skb_chk != skb)
|
||||
kfree_skb(skb_chk);
|
||||
|
||||
return 0;
|
||||
ret = 0;
|
||||
|
||||
err:
|
||||
if (ret && skb_chk && skb_chk != skb)
|
||||
kfree_skb(skb_chk);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1470,7 +1472,7 @@ static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
|
|||
* @skb_trimmed: to store an skb pointer trimmed to IPv4 packet tail (optional)
|
||||
*
|
||||
* Checks whether an IPv4 packet is a valid IGMP packet. If so sets
|
||||
* skb network and transport headers accordingly and returns zero.
|
||||
* skb transport header accordingly and returns zero.
|
||||
*
|
||||
* -EINVAL: A broken packet was detected, i.e. it violates some internet
|
||||
* standard
|
||||
|
@ -1485,7 +1487,8 @@ static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
|
|||
* to leave the original skb and its full frame unchanged (which might be
|
||||
* desirable for layer 2 frame jugglers).
|
||||
*
|
||||
* The caller needs to release a reference count from any returned skb_trimmed.
|
||||
* Caller needs to set the skb network header and free any returned skb if it
|
||||
* differs from the provided skb.
|
||||
*/
|
||||
int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
|
||||
{
|
||||
|
|
|
@ -593,7 +593,7 @@ static bool reqsk_queue_unlink(struct request_sock_queue *queue,
|
|||
}
|
||||
|
||||
spin_unlock(&queue->syn_wait_lock);
|
||||
if (del_timer_sync(&req->rsk_timer))
|
||||
if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
|
||||
reqsk_put(req);
|
||||
return found;
|
||||
}
|
||||
|
|
|
@ -41,8 +41,6 @@ static int tcp_syn_retries_min = 1;
|
|||
static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
|
||||
static int ip_ping_group_range_min[] = { 0, 0 };
|
||||
static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
|
||||
static int min_sndbuf = SOCK_MIN_SNDBUF;
|
||||
static int min_rcvbuf = SOCK_MIN_RCVBUF;
|
||||
|
||||
/* Update system visible IP port range */
|
||||
static void set_local_port_range(struct net *net, int range[2])
|
||||
|
@ -530,7 +528,7 @@ static struct ctl_table ipv4_table[] = {
|
|||
.maxlen = sizeof(sysctl_tcp_wmem),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_minmax,
|
||||
.extra1 = &min_sndbuf,
|
||||
.extra1 = &one,
|
||||
},
|
||||
{
|
||||
.procname = "tcp_notsent_lowat",
|
||||
|
@ -545,7 +543,7 @@ static struct ctl_table ipv4_table[] = {
|
|||
.maxlen = sizeof(sysctl_tcp_rmem),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_minmax,
|
||||
.extra1 = &min_rcvbuf,
|
||||
.extra1 = &one,
|
||||
},
|
||||
{
|
||||
.procname = "tcp_app_win",
|
||||
|
@ -758,7 +756,7 @@ static struct ctl_table ipv4_table[] = {
|
|||
.maxlen = sizeof(sysctl_udp_rmem_min),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_minmax,
|
||||
.extra1 = &min_rcvbuf,
|
||||
.extra1 = &one
|
||||
},
|
||||
{
|
||||
.procname = "udp_wmem_min",
|
||||
|
@ -766,7 +764,7 @@ static struct ctl_table ipv4_table[] = {
|
|||
.maxlen = sizeof(sysctl_udp_wmem_min),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_minmax,
|
||||
.extra1 = &min_sndbuf,
|
||||
.extra1 = &one
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
|
|
@ -172,6 +172,8 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
|
|||
*ppcpu_rt = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
non_pcpu_rt->rt6i_pcpu = NULL;
|
||||
}
|
||||
|
||||
static void rt6_release(struct rt6_info *rt)
|
||||
|
|
|
@ -143,34 +143,36 @@ static int __ipv6_mc_check_mld(struct sk_buff *skb,
|
|||
struct sk_buff *skb_chk = NULL;
|
||||
unsigned int transport_len;
|
||||
unsigned int len = skb_transport_offset(skb) + sizeof(struct mld_msg);
|
||||
int ret;
|
||||
int ret = -EINVAL;
|
||||
|
||||
transport_len = ntohs(ipv6_hdr(skb)->payload_len);
|
||||
transport_len -= skb_transport_offset(skb) - sizeof(struct ipv6hdr);
|
||||
|
||||
skb_get(skb);
|
||||
skb_chk = skb_checksum_trimmed(skb, transport_len,
|
||||
ipv6_mc_validate_checksum);
|
||||
if (!skb_chk)
|
||||
return -EINVAL;
|
||||
goto err;
|
||||
|
||||
if (!pskb_may_pull(skb_chk, len)) {
|
||||
kfree_skb(skb_chk);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!pskb_may_pull(skb_chk, len))
|
||||
goto err;
|
||||
|
||||
ret = ipv6_mc_check_mld_msg(skb_chk);
|
||||
if (ret) {
|
||||
kfree_skb(skb_chk);
|
||||
return ret;
|
||||
}
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (skb_trimmed)
|
||||
*skb_trimmed = skb_chk;
|
||||
else
|
||||
/* free now unneeded clone */
|
||||
else if (skb_chk != skb)
|
||||
kfree_skb(skb_chk);
|
||||
|
||||
return 0;
|
||||
ret = 0;
|
||||
|
||||
err:
|
||||
if (ret && skb_chk && skb_chk != skb)
|
||||
kfree_skb(skb_chk);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -179,7 +181,7 @@ static int __ipv6_mc_check_mld(struct sk_buff *skb,
|
|||
* @skb_trimmed: to store an skb pointer trimmed to IPv6 packet tail (optional)
|
||||
*
|
||||
* Checks whether an IPv6 packet is a valid MLD packet. If so sets
|
||||
* skb network and transport headers accordingly and returns zero.
|
||||
* skb transport header accordingly and returns zero.
|
||||
*
|
||||
* -EINVAL: A broken packet was detected, i.e. it violates some internet
|
||||
* standard
|
||||
|
@ -194,7 +196,8 @@ static int __ipv6_mc_check_mld(struct sk_buff *skb,
|
|||
* to leave the original skb and its full frame unchanged (which might be
|
||||
* desirable for layer 2 frame jugglers).
|
||||
*
|
||||
* The caller needs to release a reference count from any returned skb_trimmed.
|
||||
* Caller needs to set the skb network header and free any returned skb if it
|
||||
* differs from the provided skb.
|
||||
*/
|
||||
int ipv6_mc_check_mld(struct sk_buff *skb, struct sk_buff **skb_trimmed)
|
||||
{
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue