Merge branch 'linus' into locking/core, to resolve conflicts
Conflicts: kernel/locking/lockdep.c Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
e2d6f8a5f5
|
@ -7,7 +7,7 @@ This is the authoritative documentation on the design, interface and
|
|||
conventions of cgroup v2. It describes all userland-visible aspects
|
||||
of cgroup including core and specific controller behaviors. All
|
||||
future changes must be reflected in this document. Documentation for
|
||||
v1 is available under Documentation/cgroup-legacy/.
|
||||
v1 is available under Documentation/cgroup-v1/.
|
||||
|
||||
CONTENTS
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@ OHCI and EHCI controllers.
|
|||
Required properties:
|
||||
- compatible: "renesas,pci-r8a7790" for the R8A7790 SoC;
|
||||
"renesas,pci-r8a7791" for the R8A7791 SoC;
|
||||
"renesas,pci-r8a7793" for the R8A7793 SoC;
|
||||
"renesas,pci-r8a7794" for the R8A7794 SoC;
|
||||
"renesas,pci-rcar-gen2" for a generic R-Car Gen2 compatible device
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@ Required properties:
|
|||
compatible: "renesas,pcie-r8a7779" for the R8A7779 SoC;
|
||||
"renesas,pcie-r8a7790" for the R8A7790 SoC;
|
||||
"renesas,pcie-r8a7791" for the R8A7791 SoC;
|
||||
"renesas,pcie-r8a7793" for the R8A7793 SoC;
|
||||
"renesas,pcie-r8a7795" for the R8A7795 SoC;
|
||||
"renesas,pcie-rcar-gen2" for a generic R-Car Gen2 compatible device.
|
||||
|
||||
|
|
|
@ -30,6 +30,8 @@ The compatible list for this generic sound card currently:
|
|||
"fsl,imx-audio-sgtl5000"
|
||||
(compatible with Documentation/devicetree/bindings/sound/imx-audio-sgtl5000.txt)
|
||||
|
||||
"fsl,imx-audio-wm8960"
|
||||
|
||||
Required properties:
|
||||
|
||||
- compatible : Contains one of entries in the compatible list.
|
||||
|
|
|
@ -4235,6 +4235,17 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|||
The default value of this parameter is determined by
|
||||
the config option CONFIG_WQ_POWER_EFFICIENT_DEFAULT.
|
||||
|
||||
workqueue.debug_force_rr_cpu
|
||||
Workqueue used to implicitly guarantee that work
|
||||
items queued without explicit CPU specified are put
|
||||
on the local CPU. This guarantee is no longer true
|
||||
and while local CPU is still preferred work items
|
||||
may be put on foreign CPUs. This debug option
|
||||
forces round-robin CPU selection to flush out
|
||||
usages which depend on the now broken guarantee.
|
||||
When enabled, memory and cache locality will be
|
||||
impacted.
|
||||
|
||||
x2apic_phys [X86-64,APIC] Use x2apic physical mode instead of
|
||||
default x2apic cluster mode on platforms
|
||||
supporting x2apic.
|
||||
|
|
|
@ -9787,10 +9787,11 @@ S: Supported
|
|||
F: drivers/scsi/be2iscsi/
|
||||
|
||||
Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER
|
||||
M: Sathya Perla <sathya.perla@avagotech.com>
|
||||
M: Ajit Khaparde <ajit.khaparde@avagotech.com>
|
||||
M: Padmanabh Ratnakar <padmanabh.ratnakar@avagotech.com>
|
||||
M: Sriharsha Basavapatna <sriharsha.basavapatna@avagotech.com>
|
||||
M: Sathya Perla <sathya.perla@broadcom.com>
|
||||
M: Ajit Khaparde <ajit.khaparde@broadcom.com>
|
||||
M: Padmanabh Ratnakar <padmanabh.ratnakar@broadcom.com>
|
||||
M: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
|
||||
M: Somnath Kotur <somnath.kotur@broadcom.com>
|
||||
L: netdev@vger.kernel.org
|
||||
W: http://www.emulex.com
|
||||
S: Supported
|
||||
|
|
|
@ -292,24 +292,23 @@ CONFIG_FB=y
|
|||
CONFIG_FIRMWARE_EDID=y
|
||||
CONFIG_FB_MODE_HELPERS=y
|
||||
CONFIG_FB_TILEBLITTING=y
|
||||
CONFIG_OMAP2_DSS=m
|
||||
CONFIG_OMAP5_DSS_HDMI=y
|
||||
CONFIG_OMAP2_DSS_SDI=y
|
||||
CONFIG_OMAP2_DSS_DSI=y
|
||||
CONFIG_FB_OMAP5_DSS_HDMI=y
|
||||
CONFIG_FB_OMAP2_DSS_SDI=y
|
||||
CONFIG_FB_OMAP2_DSS_DSI=y
|
||||
CONFIG_FB_OMAP2=m
|
||||
CONFIG_DISPLAY_ENCODER_TFP410=m
|
||||
CONFIG_DISPLAY_ENCODER_TPD12S015=m
|
||||
CONFIG_DISPLAY_CONNECTOR_DVI=m
|
||||
CONFIG_DISPLAY_CONNECTOR_HDMI=m
|
||||
CONFIG_DISPLAY_CONNECTOR_ANALOG_TV=m
|
||||
CONFIG_DISPLAY_PANEL_DPI=m
|
||||
CONFIG_DISPLAY_PANEL_DSI_CM=m
|
||||
CONFIG_DISPLAY_PANEL_SONY_ACX565AKM=m
|
||||
CONFIG_DISPLAY_PANEL_LGPHILIPS_LB035Q02=m
|
||||
CONFIG_DISPLAY_PANEL_SHARP_LS037V7DW01=m
|
||||
CONFIG_DISPLAY_PANEL_TPO_TD028TTEC1=m
|
||||
CONFIG_DISPLAY_PANEL_TPO_TD043MTEA1=m
|
||||
CONFIG_DISPLAY_PANEL_NEC_NL8048HL11=m
|
||||
CONFIG_FB_OMAP2_ENCODER_TFP410=m
|
||||
CONFIG_FB_OMAP2_ENCODER_TPD12S015=m
|
||||
CONFIG_FB_OMAP2_CONNECTOR_DVI=m
|
||||
CONFIG_FB_OMAP2_CONNECTOR_HDMI=m
|
||||
CONFIG_FB_OMAP2_CONNECTOR_ANALOG_TV=m
|
||||
CONFIG_FB_OMAP2_PANEL_DPI=m
|
||||
CONFIG_FB_OMAP2_PANEL_DSI_CM=m
|
||||
CONFIG_FB_OMAP2_PANEL_SONY_ACX565AKM=m
|
||||
CONFIG_FB_OMAP2_PANEL_LGPHILIPS_LB035Q02=m
|
||||
CONFIG_FB_OMAP2_PANEL_SHARP_LS037V7DW01=m
|
||||
CONFIG_FB_OMAP2_PANEL_TPO_TD028TTEC1=m
|
||||
CONFIG_FB_OMAP2_PANEL_TPO_TD043MTEA1=m
|
||||
CONFIG_FB_OMAP2_PANEL_NEC_NL8048HL11=m
|
||||
CONFIG_BACKLIGHT_LCD_SUPPORT=y
|
||||
CONFIG_LCD_CLASS_DEVICE=y
|
||||
CONFIG_LCD_PLATFORM=y
|
||||
|
|
|
@ -475,6 +475,7 @@ config X86_UV
|
|||
depends on X86_64
|
||||
depends on X86_EXTENDED_PLATFORM
|
||||
depends on NUMA
|
||||
depends on EFI
|
||||
depends on X86_X2APIC
|
||||
depends on PCI
|
||||
---help---
|
||||
|
|
|
@ -2455,14 +2455,16 @@ struct request *blk_peek_request(struct request_queue *q)
|
|||
|
||||
rq = NULL;
|
||||
break;
|
||||
} else if (ret == BLKPREP_KILL) {
|
||||
} else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) {
|
||||
int err = (ret == BLKPREP_INVALID) ? -EREMOTEIO : -EIO;
|
||||
|
||||
rq->cmd_flags |= REQ_QUIET;
|
||||
/*
|
||||
* Mark this request as started so we don't trigger
|
||||
* any debug logic in the end I/O path.
|
||||
*/
|
||||
blk_start_request(rq);
|
||||
__blk_end_request_all(rq, -EIO);
|
||||
__blk_end_request_all(rq, err);
|
||||
} else {
|
||||
printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
|
||||
break;
|
||||
|
|
|
@ -65,18 +65,10 @@ struct skcipher_async_req {
|
|||
struct skcipher_async_rsgl first_sgl;
|
||||
struct list_head list;
|
||||
struct scatterlist *tsg;
|
||||
char iv[];
|
||||
atomic_t *inflight;
|
||||
struct skcipher_request req;
|
||||
};
|
||||
|
||||
#define GET_SREQ(areq, ctx) (struct skcipher_async_req *)((char *)areq + \
|
||||
crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req)))
|
||||
|
||||
#define GET_REQ_SIZE(ctx) \
|
||||
crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req))
|
||||
|
||||
#define GET_IV_SIZE(ctx) \
|
||||
crypto_skcipher_ivsize(crypto_skcipher_reqtfm(&ctx->req))
|
||||
|
||||
#define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
|
||||
sizeof(struct scatterlist) - 1)
|
||||
|
||||
|
@ -102,15 +94,12 @@ static void skcipher_free_async_sgls(struct skcipher_async_req *sreq)
|
|||
|
||||
static void skcipher_async_cb(struct crypto_async_request *req, int err)
|
||||
{
|
||||
struct sock *sk = req->data;
|
||||
struct alg_sock *ask = alg_sk(sk);
|
||||
struct skcipher_ctx *ctx = ask->private;
|
||||
struct skcipher_async_req *sreq = GET_SREQ(req, ctx);
|
||||
struct skcipher_async_req *sreq = req->data;
|
||||
struct kiocb *iocb = sreq->iocb;
|
||||
|
||||
atomic_dec(&ctx->inflight);
|
||||
atomic_dec(sreq->inflight);
|
||||
skcipher_free_async_sgls(sreq);
|
||||
kfree(req);
|
||||
kzfree(sreq);
|
||||
iocb->ki_complete(iocb, err, err);
|
||||
}
|
||||
|
||||
|
@ -306,8 +295,11 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
|
|||
{
|
||||
struct sock *sk = sock->sk;
|
||||
struct alg_sock *ask = alg_sk(sk);
|
||||
struct sock *psk = ask->parent;
|
||||
struct alg_sock *pask = alg_sk(psk);
|
||||
struct skcipher_ctx *ctx = ask->private;
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req);
|
||||
struct skcipher_tfm *skc = pask->private;
|
||||
struct crypto_skcipher *tfm = skc->skcipher;
|
||||
unsigned ivsize = crypto_skcipher_ivsize(tfm);
|
||||
struct skcipher_sg_list *sgl;
|
||||
struct af_alg_control con = {};
|
||||
|
@ -509,37 +501,43 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
|
|||
{
|
||||
struct sock *sk = sock->sk;
|
||||
struct alg_sock *ask = alg_sk(sk);
|
||||
struct sock *psk = ask->parent;
|
||||
struct alg_sock *pask = alg_sk(psk);
|
||||
struct skcipher_ctx *ctx = ask->private;
|
||||
struct skcipher_tfm *skc = pask->private;
|
||||
struct crypto_skcipher *tfm = skc->skcipher;
|
||||
struct skcipher_sg_list *sgl;
|
||||
struct scatterlist *sg;
|
||||
struct skcipher_async_req *sreq;
|
||||
struct skcipher_request *req;
|
||||
struct skcipher_async_rsgl *last_rsgl = NULL;
|
||||
unsigned int txbufs = 0, len = 0, tx_nents = skcipher_all_sg_nents(ctx);
|
||||
unsigned int reqlen = sizeof(struct skcipher_async_req) +
|
||||
GET_REQ_SIZE(ctx) + GET_IV_SIZE(ctx);
|
||||
unsigned int txbufs = 0, len = 0, tx_nents;
|
||||
unsigned int reqsize = crypto_skcipher_reqsize(tfm);
|
||||
unsigned int ivsize = crypto_skcipher_ivsize(tfm);
|
||||
int err = -ENOMEM;
|
||||
bool mark = false;
|
||||
char *iv;
|
||||
|
||||
sreq = kzalloc(sizeof(*sreq) + reqsize + ivsize, GFP_KERNEL);
|
||||
if (unlikely(!sreq))
|
||||
goto out;
|
||||
|
||||
req = &sreq->req;
|
||||
iv = (char *)(req + 1) + reqsize;
|
||||
sreq->iocb = msg->msg_iocb;
|
||||
INIT_LIST_HEAD(&sreq->list);
|
||||
sreq->inflight = &ctx->inflight;
|
||||
|
||||
lock_sock(sk);
|
||||
req = kmalloc(reqlen, GFP_KERNEL);
|
||||
if (unlikely(!req))
|
||||
goto unlock;
|
||||
|
||||
sreq = GET_SREQ(req, ctx);
|
||||
sreq->iocb = msg->msg_iocb;
|
||||
memset(&sreq->first_sgl, '\0', sizeof(struct skcipher_async_rsgl));
|
||||
INIT_LIST_HEAD(&sreq->list);
|
||||
tx_nents = skcipher_all_sg_nents(ctx);
|
||||
sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL);
|
||||
if (unlikely(!sreq->tsg)) {
|
||||
kfree(req);
|
||||
if (unlikely(!sreq->tsg))
|
||||
goto unlock;
|
||||
}
|
||||
sg_init_table(sreq->tsg, tx_nents);
|
||||
memcpy(sreq->iv, ctx->iv, GET_IV_SIZE(ctx));
|
||||
skcipher_request_set_tfm(req, crypto_skcipher_reqtfm(&ctx->req));
|
||||
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
skcipher_async_cb, sk);
|
||||
memcpy(iv, ctx->iv, ivsize);
|
||||
skcipher_request_set_tfm(req, tfm);
|
||||
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||
skcipher_async_cb, sreq);
|
||||
|
||||
while (iov_iter_count(&msg->msg_iter)) {
|
||||
struct skcipher_async_rsgl *rsgl;
|
||||
|
@ -615,20 +613,22 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
|
|||
sg_mark_end(sreq->tsg + txbufs - 1);
|
||||
|
||||
skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
|
||||
len, sreq->iv);
|
||||
len, iv);
|
||||
err = ctx->enc ? crypto_skcipher_encrypt(req) :
|
||||
crypto_skcipher_decrypt(req);
|
||||
if (err == -EINPROGRESS) {
|
||||
atomic_inc(&ctx->inflight);
|
||||
err = -EIOCBQUEUED;
|
||||
sreq = NULL;
|
||||
goto unlock;
|
||||
}
|
||||
free:
|
||||
skcipher_free_async_sgls(sreq);
|
||||
kfree(req);
|
||||
unlock:
|
||||
skcipher_wmem_wakeup(sk);
|
||||
release_sock(sk);
|
||||
kzfree(sreq);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -637,9 +637,12 @@ static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg,
|
|||
{
|
||||
struct sock *sk = sock->sk;
|
||||
struct alg_sock *ask = alg_sk(sk);
|
||||
struct sock *psk = ask->parent;
|
||||
struct alg_sock *pask = alg_sk(psk);
|
||||
struct skcipher_ctx *ctx = ask->private;
|
||||
unsigned bs = crypto_skcipher_blocksize(crypto_skcipher_reqtfm(
|
||||
&ctx->req));
|
||||
struct skcipher_tfm *skc = pask->private;
|
||||
struct crypto_skcipher *tfm = skc->skcipher;
|
||||
unsigned bs = crypto_skcipher_blocksize(tfm);
|
||||
struct skcipher_sg_list *sgl;
|
||||
struct scatterlist *sg;
|
||||
int err = -EAGAIN;
|
||||
|
@ -947,7 +950,8 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
|
|||
ask->private = ctx;
|
||||
|
||||
skcipher_request_set_tfm(&ctx->req, skcipher);
|
||||
skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_SLEEP |
|
||||
CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
af_alg_complete, &ctx->completion);
|
||||
|
||||
sk->sk_destruct = skcipher_sock_destruct;
|
||||
|
|
|
@ -499,6 +499,7 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
|
|||
if (link->dump == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
down_read(&crypto_alg_sem);
|
||||
list_for_each_entry(alg, &crypto_alg_list, cra_list)
|
||||
dump_alloc += CRYPTO_REPORT_MAXSIZE;
|
||||
|
||||
|
@ -508,8 +509,11 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
|
|||
.done = link->done,
|
||||
.min_dump_alloc = dump_alloc,
|
||||
};
|
||||
return netlink_dump_start(crypto_nlsk, skb, nlh, &c);
|
||||
err = netlink_dump_start(crypto_nlsk, skb, nlh, &c);
|
||||
}
|
||||
up_read(&crypto_alg_sem);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
err = nlmsg_parse(nlh, crypto_msg_min[type], attrs, CRYPTOCFGA_MAX,
|
||||
|
|
|
@ -264,6 +264,26 @@ static const struct pci_device_id ahci_pci_tbl[] = {
|
|||
{ PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x19b2), board_ahci }, /* DNV AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x19b3), board_ahci }, /* DNV AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x19b4), board_ahci }, /* DNV AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x19b5), board_ahci }, /* DNV AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x19b6), board_ahci }, /* DNV AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x19b7), board_ahci }, /* DNV AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x19bE), board_ahci }, /* DNV AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x19bF), board_ahci }, /* DNV AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x19c0), board_ahci }, /* DNV AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x19c1), board_ahci }, /* DNV AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x19c2), board_ahci }, /* DNV AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x19c3), board_ahci }, /* DNV AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x19c4), board_ahci }, /* DNV AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x19c5), board_ahci }, /* DNV AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x19c6), board_ahci }, /* DNV AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x19c7), board_ahci }, /* DNV AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
|
||||
|
|
|
@ -250,6 +250,7 @@ enum {
|
|||
AHCI_HFLAG_MULTI_MSI = 0,
|
||||
AHCI_HFLAG_MULTI_MSIX = 0,
|
||||
#endif
|
||||
AHCI_HFLAG_WAKE_BEFORE_STOP = (1 << 22), /* wake before DMA stop */
|
||||
|
||||
/* ap->flags bits */
|
||||
|
||||
|
|
|
@ -317,6 +317,7 @@ static int brcm_ahci_probe(struct platform_device *pdev)
|
|||
if (IS_ERR(hpriv))
|
||||
return PTR_ERR(hpriv);
|
||||
hpriv->plat_data = priv;
|
||||
hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP;
|
||||
|
||||
brcm_sata_alpm_init(hpriv);
|
||||
|
||||
|
|
|
@ -496,8 +496,8 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
|
|||
}
|
||||
}
|
||||
|
||||
/* fabricate port_map from cap.nr_ports */
|
||||
if (!port_map) {
|
||||
/* fabricate port_map from cap.nr_ports for < AHCI 1.3 */
|
||||
if (!port_map && vers < 0x10300) {
|
||||
port_map = (1 << ahci_nr_ports(cap)) - 1;
|
||||
dev_warn(dev, "forcing PORTS_IMPL to 0x%x\n", port_map);
|
||||
|
||||
|
@ -593,8 +593,22 @@ EXPORT_SYMBOL_GPL(ahci_start_engine);
|
|||
int ahci_stop_engine(struct ata_port *ap)
|
||||
{
|
||||
void __iomem *port_mmio = ahci_port_base(ap);
|
||||
struct ahci_host_priv *hpriv = ap->host->private_data;
|
||||
u32 tmp;
|
||||
|
||||
/*
|
||||
* On some controllers, stopping a port's DMA engine while the port
|
||||
* is in ALPM state (partial or slumber) results in failures on
|
||||
* subsequent DMA engine starts. For those controllers, put the
|
||||
* port back in active state before stopping its DMA engine.
|
||||
*/
|
||||
if ((hpriv->flags & AHCI_HFLAG_WAKE_BEFORE_STOP) &&
|
||||
(ap->link.lpm_policy > ATA_LPM_MAX_POWER) &&
|
||||
ahci_set_lpm(&ap->link, ATA_LPM_MAX_POWER, ATA_LPM_WAKE_ONLY)) {
|
||||
dev_err(ap->host->dev, "Failed to wake up port before engine stop\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
tmp = readl(port_mmio + PORT_CMD);
|
||||
|
||||
/* check if the HBA is idle */
|
||||
|
@ -689,6 +703,9 @@ static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
|
|||
void __iomem *port_mmio = ahci_port_base(ap);
|
||||
|
||||
if (policy != ATA_LPM_MAX_POWER) {
|
||||
/* wakeup flag only applies to the max power policy */
|
||||
hints &= ~ATA_LPM_WAKE_ONLY;
|
||||
|
||||
/*
|
||||
* Disable interrupts on Phy Ready. This keeps us from
|
||||
* getting woken up due to spurious phy ready
|
||||
|
@ -704,7 +721,8 @@ static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
|
|||
u32 cmd = readl(port_mmio + PORT_CMD);
|
||||
|
||||
if (policy == ATA_LPM_MAX_POWER || !(hints & ATA_LPM_HIPM)) {
|
||||
cmd &= ~(PORT_CMD_ASP | PORT_CMD_ALPE);
|
||||
if (!(hints & ATA_LPM_WAKE_ONLY))
|
||||
cmd &= ~(PORT_CMD_ASP | PORT_CMD_ALPE);
|
||||
cmd |= PORT_CMD_ICC_ACTIVE;
|
||||
|
||||
writel(cmd, port_mmio + PORT_CMD);
|
||||
|
@ -712,6 +730,9 @@ static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
|
|||
|
||||
/* wait 10ms to be sure we've come out of LPM state */
|
||||
ata_msleep(ap, 10);
|
||||
|
||||
if (hints & ATA_LPM_WAKE_ONLY)
|
||||
return 0;
|
||||
} else {
|
||||
cmd |= PORT_CMD_ALPE;
|
||||
if (policy == ATA_LPM_MIN_POWER)
|
||||
|
|
|
@ -4125,6 +4125,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
|||
{ "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
|
||||
{ "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
|
||||
{ " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA },
|
||||
{ "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA },
|
||||
/* Odd clown on sil3726/4726 PMPs */
|
||||
{ "Config Disk", NULL, ATA_HORKAGE_DISABLE },
|
||||
|
||||
|
|
|
@ -997,12 +997,9 @@ static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
|
|||
static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
|
||||
{
|
||||
struct ata_port *ap = qc->ap;
|
||||
unsigned long flags;
|
||||
|
||||
if (ap->ops->error_handler) {
|
||||
if (in_wq) {
|
||||
spin_lock_irqsave(ap->lock, flags);
|
||||
|
||||
/* EH might have kicked in while host lock is
|
||||
* released.
|
||||
*/
|
||||
|
@ -1014,8 +1011,6 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
|
|||
} else
|
||||
ata_port_freeze(ap);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(ap->lock, flags);
|
||||
} else {
|
||||
if (likely(!(qc->err_mask & AC_ERR_HSM)))
|
||||
ata_qc_complete(qc);
|
||||
|
@ -1024,10 +1019,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
|
|||
}
|
||||
} else {
|
||||
if (in_wq) {
|
||||
spin_lock_irqsave(ap->lock, flags);
|
||||
ata_sff_irq_on(ap);
|
||||
ata_qc_complete(qc);
|
||||
spin_unlock_irqrestore(ap->lock, flags);
|
||||
} else
|
||||
ata_qc_complete(qc);
|
||||
}
|
||||
|
@ -1048,9 +1041,10 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
|
|||
{
|
||||
struct ata_link *link = qc->dev->link;
|
||||
struct ata_eh_info *ehi = &link->eh_info;
|
||||
unsigned long flags = 0;
|
||||
int poll_next;
|
||||
|
||||
lockdep_assert_held(ap->lock);
|
||||
|
||||
WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
|
||||
|
||||
/* Make sure ata_sff_qc_issue() does not throw things
|
||||
|
@ -1112,14 +1106,6 @@ fsm_start:
|
|||
}
|
||||
}
|
||||
|
||||
/* Send the CDB (atapi) or the first data block (ata pio out).
|
||||
* During the state transition, interrupt handler shouldn't
|
||||
* be invoked before the data transfer is complete and
|
||||
* hsm_task_state is changed. Hence, the following locking.
|
||||
*/
|
||||
if (in_wq)
|
||||
spin_lock_irqsave(ap->lock, flags);
|
||||
|
||||
if (qc->tf.protocol == ATA_PROT_PIO) {
|
||||
/* PIO data out protocol.
|
||||
* send first data block.
|
||||
|
@ -1135,9 +1121,6 @@ fsm_start:
|
|||
/* send CDB */
|
||||
atapi_send_cdb(ap, qc);
|
||||
|
||||
if (in_wq)
|
||||
spin_unlock_irqrestore(ap->lock, flags);
|
||||
|
||||
/* if polling, ata_sff_pio_task() handles the rest.
|
||||
* otherwise, interrupt handler takes over from here.
|
||||
*/
|
||||
|
@ -1296,7 +1279,8 @@ fsm_start:
|
|||
break;
|
||||
default:
|
||||
poll_next = 0;
|
||||
BUG();
|
||||
WARN(true, "ata%d: SFF host state machine in invalid state %d",
|
||||
ap->print_id, ap->hsm_task_state);
|
||||
}
|
||||
|
||||
return poll_next;
|
||||
|
@ -1361,12 +1345,14 @@ static void ata_sff_pio_task(struct work_struct *work)
|
|||
u8 status;
|
||||
int poll_next;
|
||||
|
||||
spin_lock_irq(ap->lock);
|
||||
|
||||
BUG_ON(ap->sff_pio_task_link == NULL);
|
||||
/* qc can be NULL if timeout occurred */
|
||||
qc = ata_qc_from_tag(ap, link->active_tag);
|
||||
if (!qc) {
|
||||
ap->sff_pio_task_link = NULL;
|
||||
return;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
fsm_start:
|
||||
|
@ -1381,11 +1367,14 @@ fsm_start:
|
|||
*/
|
||||
status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
|
||||
if (status & ATA_BUSY) {
|
||||
spin_unlock_irq(ap->lock);
|
||||
ata_msleep(ap, 2);
|
||||
spin_lock_irq(ap->lock);
|
||||
|
||||
status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
|
||||
if (status & ATA_BUSY) {
|
||||
ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
|
||||
return;
|
||||
goto out_unlock;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1402,6 +1391,8 @@ fsm_start:
|
|||
*/
|
||||
if (poll_next)
|
||||
goto fsm_start;
|
||||
out_unlock:
|
||||
spin_unlock_irq(ap->lock);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -782,7 +782,7 @@ static void atmel_sha_finish_req(struct ahash_request *req, int err)
|
|||
dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU |
|
||||
SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY);
|
||||
|
||||
clk_disable_unprepare(dd->iclk);
|
||||
clk_disable(dd->iclk);
|
||||
|
||||
if (req->base.complete)
|
||||
req->base.complete(&req->base, err);
|
||||
|
@ -795,7 +795,7 @@ static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
|
|||
{
|
||||
int err;
|
||||
|
||||
err = clk_prepare_enable(dd->iclk);
|
||||
err = clk_enable(dd->iclk);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -822,7 +822,7 @@ static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
|
|||
dev_info(dd->dev,
|
||||
"version: 0x%x\n", dd->hw_version);
|
||||
|
||||
clk_disable_unprepare(dd->iclk);
|
||||
clk_disable(dd->iclk);
|
||||
}
|
||||
|
||||
static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
|
||||
|
@ -1410,6 +1410,10 @@ static int atmel_sha_probe(struct platform_device *pdev)
|
|||
goto res_err;
|
||||
}
|
||||
|
||||
err = clk_prepare(sha_dd->iclk);
|
||||
if (err)
|
||||
goto res_err;
|
||||
|
||||
atmel_sha_hw_version_init(sha_dd);
|
||||
|
||||
atmel_sha_get_cap(sha_dd);
|
||||
|
@ -1421,12 +1425,12 @@ static int atmel_sha_probe(struct platform_device *pdev)
|
|||
if (IS_ERR(pdata)) {
|
||||
dev_err(&pdev->dev, "platform data not available\n");
|
||||
err = PTR_ERR(pdata);
|
||||
goto res_err;
|
||||
goto iclk_unprepare;
|
||||
}
|
||||
}
|
||||
if (!pdata->dma_slave) {
|
||||
err = -ENXIO;
|
||||
goto res_err;
|
||||
goto iclk_unprepare;
|
||||
}
|
||||
err = atmel_sha_dma_init(sha_dd, pdata);
|
||||
if (err)
|
||||
|
@ -1457,6 +1461,8 @@ err_algs:
|
|||
if (sha_dd->caps.has_dma)
|
||||
atmel_sha_dma_cleanup(sha_dd);
|
||||
err_sha_dma:
|
||||
iclk_unprepare:
|
||||
clk_unprepare(sha_dd->iclk);
|
||||
res_err:
|
||||
tasklet_kill(&sha_dd->done_task);
|
||||
sha_dd_err:
|
||||
|
@ -1483,12 +1489,7 @@ static int atmel_sha_remove(struct platform_device *pdev)
|
|||
if (sha_dd->caps.has_dma)
|
||||
atmel_sha_dma_cleanup(sha_dd);
|
||||
|
||||
iounmap(sha_dd->io_base);
|
||||
|
||||
clk_put(sha_dd->iclk);
|
||||
|
||||
if (sha_dd->irq >= 0)
|
||||
free_irq(sha_dd->irq, sha_dd);
|
||||
clk_unprepare(sha_dd->iclk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -306,7 +306,7 @@ static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa)
|
|||
return -ENOMEM;
|
||||
|
||||
dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0);
|
||||
if (!dma->cache_pool)
|
||||
if (!dma->padding_pool)
|
||||
return -ENOMEM;
|
||||
|
||||
cesa->dma = dma;
|
||||
|
|
|
@ -312,8 +312,8 @@ static int altera_gpio_probe(struct platform_device *pdev)
|
|||
handle_simple_irq, IRQ_TYPE_NONE);
|
||||
|
||||
if (ret) {
|
||||
dev_info(&pdev->dev, "could not add irqchip\n");
|
||||
return ret;
|
||||
dev_err(&pdev->dev, "could not add irqchip\n");
|
||||
goto teardown;
|
||||
}
|
||||
|
||||
gpiochip_set_chained_irqchip(&altera_gc->mmchip.gc,
|
||||
|
@ -326,6 +326,7 @@ static int altera_gpio_probe(struct platform_device *pdev)
|
|||
skip_irq:
|
||||
return 0;
|
||||
teardown:
|
||||
of_mm_gpiochip_remove(&altera_gc->mmchip);
|
||||
pr_err("%s: registration failed with status %d\n",
|
||||
node->full_name, ret);
|
||||
|
||||
|
|
|
@ -195,7 +195,7 @@ static int davinci_gpio_of_xlate(struct gpio_chip *gc,
|
|||
static int davinci_gpio_probe(struct platform_device *pdev)
|
||||
{
|
||||
int i, base;
|
||||
unsigned ngpio;
|
||||
unsigned ngpio, nbank;
|
||||
struct davinci_gpio_controller *chips;
|
||||
struct davinci_gpio_platform_data *pdata;
|
||||
struct davinci_gpio_regs __iomem *regs;
|
||||
|
@ -224,8 +224,9 @@ static int davinci_gpio_probe(struct platform_device *pdev)
|
|||
if (WARN_ON(ARCH_NR_GPIOS < ngpio))
|
||||
ngpio = ARCH_NR_GPIOS;
|
||||
|
||||
nbank = DIV_ROUND_UP(ngpio, 32);
|
||||
chips = devm_kzalloc(dev,
|
||||
ngpio * sizeof(struct davinci_gpio_controller),
|
||||
nbank * sizeof(struct davinci_gpio_controller),
|
||||
GFP_KERNEL);
|
||||
if (!chips)
|
||||
return -ENOMEM;
|
||||
|
@ -511,7 +512,7 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
|
|||
return irq;
|
||||
}
|
||||
|
||||
irq_domain = irq_domain_add_legacy(NULL, ngpio, irq, 0,
|
||||
irq_domain = irq_domain_add_legacy(dev->of_node, ngpio, irq, 0,
|
||||
&davinci_gpio_irq_ops,
|
||||
chips);
|
||||
if (!irq_domain) {
|
||||
|
|
|
@ -87,6 +87,8 @@ extern int amdgpu_sched_jobs;
|
|||
extern int amdgpu_sched_hw_submission;
|
||||
extern int amdgpu_enable_semaphores;
|
||||
extern int amdgpu_powerplay;
|
||||
extern unsigned amdgpu_pcie_gen_cap;
|
||||
extern unsigned amdgpu_pcie_lane_cap;
|
||||
|
||||
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
|
||||
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
|
||||
|
@ -132,47 +134,6 @@ extern int amdgpu_powerplay;
|
|||
#define AMDGPU_RESET_VCE (1 << 13)
|
||||
#define AMDGPU_RESET_VCE1 (1 << 14)
|
||||
|
||||
/* CG block flags */
|
||||
#define AMDGPU_CG_BLOCK_GFX (1 << 0)
|
||||
#define AMDGPU_CG_BLOCK_MC (1 << 1)
|
||||
#define AMDGPU_CG_BLOCK_SDMA (1 << 2)
|
||||
#define AMDGPU_CG_BLOCK_UVD (1 << 3)
|
||||
#define AMDGPU_CG_BLOCK_VCE (1 << 4)
|
||||
#define AMDGPU_CG_BLOCK_HDP (1 << 5)
|
||||
#define AMDGPU_CG_BLOCK_BIF (1 << 6)
|
||||
|
||||
/* CG flags */
|
||||
#define AMDGPU_CG_SUPPORT_GFX_MGCG (1 << 0)
|
||||
#define AMDGPU_CG_SUPPORT_GFX_MGLS (1 << 1)
|
||||
#define AMDGPU_CG_SUPPORT_GFX_CGCG (1 << 2)
|
||||
#define AMDGPU_CG_SUPPORT_GFX_CGLS (1 << 3)
|
||||
#define AMDGPU_CG_SUPPORT_GFX_CGTS (1 << 4)
|
||||
#define AMDGPU_CG_SUPPORT_GFX_CGTS_LS (1 << 5)
|
||||
#define AMDGPU_CG_SUPPORT_GFX_CP_LS (1 << 6)
|
||||
#define AMDGPU_CG_SUPPORT_GFX_RLC_LS (1 << 7)
|
||||
#define AMDGPU_CG_SUPPORT_MC_LS (1 << 8)
|
||||
#define AMDGPU_CG_SUPPORT_MC_MGCG (1 << 9)
|
||||
#define AMDGPU_CG_SUPPORT_SDMA_LS (1 << 10)
|
||||
#define AMDGPU_CG_SUPPORT_SDMA_MGCG (1 << 11)
|
||||
#define AMDGPU_CG_SUPPORT_BIF_LS (1 << 12)
|
||||
#define AMDGPU_CG_SUPPORT_UVD_MGCG (1 << 13)
|
||||
#define AMDGPU_CG_SUPPORT_VCE_MGCG (1 << 14)
|
||||
#define AMDGPU_CG_SUPPORT_HDP_LS (1 << 15)
|
||||
#define AMDGPU_CG_SUPPORT_HDP_MGCG (1 << 16)
|
||||
|
||||
/* PG flags */
|
||||
#define AMDGPU_PG_SUPPORT_GFX_PG (1 << 0)
|
||||
#define AMDGPU_PG_SUPPORT_GFX_SMG (1 << 1)
|
||||
#define AMDGPU_PG_SUPPORT_GFX_DMG (1 << 2)
|
||||
#define AMDGPU_PG_SUPPORT_UVD (1 << 3)
|
||||
#define AMDGPU_PG_SUPPORT_VCE (1 << 4)
|
||||
#define AMDGPU_PG_SUPPORT_CP (1 << 5)
|
||||
#define AMDGPU_PG_SUPPORT_GDS (1 << 6)
|
||||
#define AMDGPU_PG_SUPPORT_RLC_SMU_HS (1 << 7)
|
||||
#define AMDGPU_PG_SUPPORT_SDMA (1 << 8)
|
||||
#define AMDGPU_PG_SUPPORT_ACP (1 << 9)
|
||||
#define AMDGPU_PG_SUPPORT_SAMU (1 << 10)
|
||||
|
||||
/* GFX current status */
|
||||
#define AMDGPU_GFX_NORMAL_MODE 0x00000000L
|
||||
#define AMDGPU_GFX_SAFE_MODE 0x00000001L
|
||||
|
@ -606,8 +567,6 @@ struct amdgpu_sa_manager {
|
|||
uint32_t align;
|
||||
};
|
||||
|
||||
struct amdgpu_sa_bo;
|
||||
|
||||
/* sub-allocation buffer */
|
||||
struct amdgpu_sa_bo {
|
||||
struct list_head olist;
|
||||
|
@ -2360,6 +2319,8 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
|
|||
int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
|
||||
uint32_t flags);
|
||||
bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
|
||||
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
|
||||
unsigned long end);
|
||||
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
|
||||
uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
|
||||
struct ttm_mem_reg *mem);
|
||||
|
|
|
@ -795,6 +795,12 @@ static int amdgpu_cgs_query_system_info(void *cgs_device,
|
|||
case CGS_SYSTEM_INFO_PCIE_MLW:
|
||||
sys_info->value = adev->pm.pcie_mlw_mask;
|
||||
break;
|
||||
case CGS_SYSTEM_INFO_CG_FLAGS:
|
||||
sys_info->value = adev->cg_flags;
|
||||
break;
|
||||
case CGS_SYSTEM_INFO_PG_FLAGS:
|
||||
sys_info->value = adev->pg_flags;
|
||||
break;
|
||||
default:
|
||||
return -ENODEV;
|
||||
}
|
||||
|
|
|
@ -1795,15 +1795,20 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
|
|||
}
|
||||
|
||||
/* post card */
|
||||
amdgpu_atom_asic_init(adev->mode_info.atom_context);
|
||||
if (!amdgpu_card_posted(adev))
|
||||
amdgpu_atom_asic_init(adev->mode_info.atom_context);
|
||||
|
||||
r = amdgpu_resume(adev);
|
||||
if (r)
|
||||
DRM_ERROR("amdgpu_resume failed (%d).\n", r);
|
||||
|
||||
amdgpu_fence_driver_resume(adev);
|
||||
|
||||
r = amdgpu_ib_ring_tests(adev);
|
||||
if (r)
|
||||
DRM_ERROR("ib ring test failed (%d).\n", r);
|
||||
if (resume) {
|
||||
r = amdgpu_ib_ring_tests(adev);
|
||||
if (r)
|
||||
DRM_ERROR("ib ring test failed (%d).\n", r);
|
||||
}
|
||||
|
||||
r = amdgpu_late_init(adev);
|
||||
if (r)
|
||||
|
@ -1933,80 +1938,97 @@ retry:
|
|||
return r;
|
||||
}
|
||||
|
||||
#define AMDGPU_DEFAULT_PCIE_GEN_MASK 0x30007 /* gen: chipset 1/2, asic 1/2/3 */
|
||||
#define AMDGPU_DEFAULT_PCIE_MLW_MASK 0x2f0000 /* 1/2/4/8/16 lanes */
|
||||
|
||||
void amdgpu_get_pcie_info(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 mask;
|
||||
int ret;
|
||||
|
||||
if (pci_is_root_bus(adev->pdev->bus))
|
||||
if (amdgpu_pcie_gen_cap)
|
||||
adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
|
||||
|
||||
if (amdgpu_pcie_lane_cap)
|
||||
adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
|
||||
|
||||
/* covers APUs as well */
|
||||
if (pci_is_root_bus(adev->pdev->bus)) {
|
||||
if (adev->pm.pcie_gen_mask == 0)
|
||||
adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
|
||||
if (adev->pm.pcie_mlw_mask == 0)
|
||||
adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
|
||||
return;
|
||||
|
||||
if (amdgpu_pcie_gen2 == 0)
|
||||
return;
|
||||
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
return;
|
||||
|
||||
ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
|
||||
if (!ret) {
|
||||
adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
|
||||
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
|
||||
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
|
||||
|
||||
if (mask & DRM_PCIE_SPEED_25)
|
||||
adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
|
||||
if (mask & DRM_PCIE_SPEED_50)
|
||||
adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
|
||||
if (mask & DRM_PCIE_SPEED_80)
|
||||
adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
|
||||
}
|
||||
ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
|
||||
if (!ret) {
|
||||
switch (mask) {
|
||||
case 32:
|
||||
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
|
||||
break;
|
||||
case 16:
|
||||
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
|
||||
break;
|
||||
case 12:
|
||||
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
|
||||
break;
|
||||
case 8:
|
||||
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
|
||||
break;
|
||||
case 4:
|
||||
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
|
||||
break;
|
||||
case 2:
|
||||
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
|
||||
break;
|
||||
case 1:
|
||||
adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
||||
if (adev->pm.pcie_gen_mask == 0) {
|
||||
ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
|
||||
if (!ret) {
|
||||
adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
|
||||
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
|
||||
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
|
||||
|
||||
if (mask & DRM_PCIE_SPEED_25)
|
||||
adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
|
||||
if (mask & DRM_PCIE_SPEED_50)
|
||||
adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
|
||||
if (mask & DRM_PCIE_SPEED_80)
|
||||
adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
|
||||
} else {
|
||||
adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
|
||||
}
|
||||
}
|
||||
if (adev->pm.pcie_mlw_mask == 0) {
|
||||
ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
|
||||
if (!ret) {
|
||||
switch (mask) {
|
||||
case 32:
|
||||
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
|
||||
break;
|
||||
case 16:
|
||||
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
|
||||
break;
|
||||
case 12:
|
||||
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
|
||||
break;
|
||||
case 8:
|
||||
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
|
||||
break;
|
||||
case 4:
|
||||
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
|
||||
break;
|
||||
case 2:
|
||||
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
|
||||
break;
|
||||
case 1:
|
||||
adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -83,6 +83,8 @@ int amdgpu_sched_jobs = 32;
|
|||
int amdgpu_sched_hw_submission = 2;
|
||||
int amdgpu_enable_semaphores = 0;
|
||||
int amdgpu_powerplay = -1;
|
||||
unsigned amdgpu_pcie_gen_cap = 0;
|
||||
unsigned amdgpu_pcie_lane_cap = 0;
|
||||
|
||||
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
|
||||
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
|
||||
|
@ -170,6 +172,12 @@ MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 =
|
|||
module_param_named(powerplay, amdgpu_powerplay, int, 0444);
|
||||
#endif
|
||||
|
||||
MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))");
|
||||
module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444);
|
||||
|
||||
MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))");
|
||||
module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444);
|
||||
|
||||
static struct pci_device_id pciidlist[] = {
|
||||
#ifdef CONFIG_DRM_AMDGPU_CIK
|
||||
/* Kaveri */
|
||||
|
|
|
@ -142,7 +142,8 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
|
|||
|
||||
list_for_each_entry(bo, &node->bos, mn_list) {
|
||||
|
||||
if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound)
|
||||
if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start,
|
||||
end))
|
||||
continue;
|
||||
|
||||
r = amdgpu_bo_reserve(bo, true);
|
||||
|
|
|
@ -354,12 +354,15 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
|
|||
|
||||
for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i)
|
||||
if (fences[i])
|
||||
fences[count++] = fences[i];
|
||||
fences[count++] = fence_get(fences[i]);
|
||||
|
||||
if (count) {
|
||||
spin_unlock(&sa_manager->wq.lock);
|
||||
t = fence_wait_any_timeout(fences, count, false,
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
for (i = 0; i < count; ++i)
|
||||
fence_put(fences[i]);
|
||||
|
||||
r = (t > 0) ? 0 : t;
|
||||
spin_lock(&sa_manager->wq.lock);
|
||||
} else {
|
||||
|
|
|
@ -783,6 +783,25 @@ bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm)
|
|||
return !!gtt->userptr;
|
||||
}
|
||||
|
||||
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
unsigned long size;
|
||||
|
||||
if (gtt == NULL)
|
||||
return false;
|
||||
|
||||
if (gtt->ttm.ttm.state != tt_bound || !gtt->userptr)
|
||||
return false;
|
||||
|
||||
size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
|
||||
if (gtt->userptr > end || gtt->userptr + size <= start)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
|
||||
{
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include "ci_dpm.h"
|
||||
#include "gfx_v7_0.h"
|
||||
#include "atom.h"
|
||||
#include "amd_pcie.h"
|
||||
#include <linux/seq_file.h>
|
||||
|
||||
#include "smu/smu_7_0_1_d.h"
|
||||
|
@ -5835,18 +5836,16 @@ static int ci_dpm_init(struct amdgpu_device *adev)
|
|||
u8 frev, crev;
|
||||
struct ci_power_info *pi;
|
||||
int ret;
|
||||
u32 mask;
|
||||
|
||||
pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
|
||||
if (pi == NULL)
|
||||
return -ENOMEM;
|
||||
adev->pm.dpm.priv = pi;
|
||||
|
||||
ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
|
||||
if (ret)
|
||||
pi->sys_pcie_mask = 0;
|
||||
else
|
||||
pi->sys_pcie_mask = mask;
|
||||
pi->sys_pcie_mask =
|
||||
(adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
|
||||
CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
|
||||
|
||||
pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
|
||||
|
||||
pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1;
|
||||
|
|
|
@ -1762,6 +1762,9 @@ static void cik_program_aspm(struct amdgpu_device *adev)
|
|||
if (amdgpu_aspm == 0)
|
||||
return;
|
||||
|
||||
if (pci_is_root_bus(adev->pdev->bus))
|
||||
return;
|
||||
|
||||
/* XXX double check APUs */
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
return;
|
||||
|
@ -2332,72 +2335,72 @@ static int cik_common_early_init(void *handle)
|
|||
switch (adev->asic_type) {
|
||||
case CHIP_BONAIRE:
|
||||
adev->cg_flags =
|
||||
AMDGPU_CG_SUPPORT_GFX_MGCG |
|
||||
AMDGPU_CG_SUPPORT_GFX_MGLS |
|
||||
/*AMDGPU_CG_SUPPORT_GFX_CGCG |*/
|
||||
AMDGPU_CG_SUPPORT_GFX_CGLS |
|
||||
AMDGPU_CG_SUPPORT_GFX_CGTS |
|
||||
AMDGPU_CG_SUPPORT_GFX_CGTS_LS |
|
||||
AMDGPU_CG_SUPPORT_GFX_CP_LS |
|
||||
AMDGPU_CG_SUPPORT_MC_LS |
|
||||
AMDGPU_CG_SUPPORT_MC_MGCG |
|
||||
AMDGPU_CG_SUPPORT_SDMA_MGCG |
|
||||
AMDGPU_CG_SUPPORT_SDMA_LS |
|
||||
AMDGPU_CG_SUPPORT_BIF_LS |
|
||||
AMDGPU_CG_SUPPORT_VCE_MGCG |
|
||||
AMDGPU_CG_SUPPORT_UVD_MGCG |
|
||||
AMDGPU_CG_SUPPORT_HDP_LS |
|
||||
AMDGPU_CG_SUPPORT_HDP_MGCG;
|
||||
AMD_CG_SUPPORT_GFX_MGCG |
|
||||
AMD_CG_SUPPORT_GFX_MGLS |
|
||||
/*AMD_CG_SUPPORT_GFX_CGCG |*/
|
||||
AMD_CG_SUPPORT_GFX_CGLS |
|
||||
AMD_CG_SUPPORT_GFX_CGTS |
|
||||
AMD_CG_SUPPORT_GFX_CGTS_LS |
|
||||
AMD_CG_SUPPORT_GFX_CP_LS |
|
||||
AMD_CG_SUPPORT_MC_LS |
|
||||
AMD_CG_SUPPORT_MC_MGCG |
|
||||
AMD_CG_SUPPORT_SDMA_MGCG |
|
||||
AMD_CG_SUPPORT_SDMA_LS |
|
||||
AMD_CG_SUPPORT_BIF_LS |
|
||||
AMD_CG_SUPPORT_VCE_MGCG |
|
||||
AMD_CG_SUPPORT_UVD_MGCG |
|
||||
AMD_CG_SUPPORT_HDP_LS |
|
||||
AMD_CG_SUPPORT_HDP_MGCG;
|
||||
adev->pg_flags = 0;
|
||||
adev->external_rev_id = adev->rev_id + 0x14;
|
||||
break;
|
||||
case CHIP_HAWAII:
|
||||
adev->cg_flags =
|
||||
AMDGPU_CG_SUPPORT_GFX_MGCG |
|
||||
AMDGPU_CG_SUPPORT_GFX_MGLS |
|
||||
/*AMDGPU_CG_SUPPORT_GFX_CGCG |*/
|
||||
AMDGPU_CG_SUPPORT_GFX_CGLS |
|
||||
AMDGPU_CG_SUPPORT_GFX_CGTS |
|
||||
AMDGPU_CG_SUPPORT_GFX_CP_LS |
|
||||
AMDGPU_CG_SUPPORT_MC_LS |
|
||||
AMDGPU_CG_SUPPORT_MC_MGCG |
|
||||
AMDGPU_CG_SUPPORT_SDMA_MGCG |
|
||||
AMDGPU_CG_SUPPORT_SDMA_LS |
|
||||
AMDGPU_CG_SUPPORT_BIF_LS |
|
||||
AMDGPU_CG_SUPPORT_VCE_MGCG |
|
||||
AMDGPU_CG_SUPPORT_UVD_MGCG |
|
||||
AMDGPU_CG_SUPPORT_HDP_LS |
|
||||
AMDGPU_CG_SUPPORT_HDP_MGCG;
|
||||
AMD_CG_SUPPORT_GFX_MGCG |
|
||||
AMD_CG_SUPPORT_GFX_MGLS |
|
||||
/*AMD_CG_SUPPORT_GFX_CGCG |*/
|
||||
AMD_CG_SUPPORT_GFX_CGLS |
|
||||
AMD_CG_SUPPORT_GFX_CGTS |
|
||||
AMD_CG_SUPPORT_GFX_CP_LS |
|
||||
AMD_CG_SUPPORT_MC_LS |
|
||||
AMD_CG_SUPPORT_MC_MGCG |
|
||||
AMD_CG_SUPPORT_SDMA_MGCG |
|
||||
AMD_CG_SUPPORT_SDMA_LS |
|
||||
AMD_CG_SUPPORT_BIF_LS |
|
||||
AMD_CG_SUPPORT_VCE_MGCG |
|
||||
AMD_CG_SUPPORT_UVD_MGCG |
|
||||
AMD_CG_SUPPORT_HDP_LS |
|
||||
AMD_CG_SUPPORT_HDP_MGCG;
|
||||
adev->pg_flags = 0;
|
||||
adev->external_rev_id = 0x28;
|
||||
break;
|
||||
case CHIP_KAVERI:
|
||||
adev->cg_flags =
|
||||
AMDGPU_CG_SUPPORT_GFX_MGCG |
|
||||
AMDGPU_CG_SUPPORT_GFX_MGLS |
|
||||
/*AMDGPU_CG_SUPPORT_GFX_CGCG |*/
|
||||
AMDGPU_CG_SUPPORT_GFX_CGLS |
|
||||
AMDGPU_CG_SUPPORT_GFX_CGTS |
|
||||
AMDGPU_CG_SUPPORT_GFX_CGTS_LS |
|
||||
AMDGPU_CG_SUPPORT_GFX_CP_LS |
|
||||
AMDGPU_CG_SUPPORT_SDMA_MGCG |
|
||||
AMDGPU_CG_SUPPORT_SDMA_LS |
|
||||
AMDGPU_CG_SUPPORT_BIF_LS |
|
||||
AMDGPU_CG_SUPPORT_VCE_MGCG |
|
||||
AMDGPU_CG_SUPPORT_UVD_MGCG |
|
||||
AMDGPU_CG_SUPPORT_HDP_LS |
|
||||
AMDGPU_CG_SUPPORT_HDP_MGCG;
|
||||
AMD_CG_SUPPORT_GFX_MGCG |
|
||||
AMD_CG_SUPPORT_GFX_MGLS |
|
||||
/*AMD_CG_SUPPORT_GFX_CGCG |*/
|
||||
AMD_CG_SUPPORT_GFX_CGLS |
|
||||
AMD_CG_SUPPORT_GFX_CGTS |
|
||||
AMD_CG_SUPPORT_GFX_CGTS_LS |
|
||||
AMD_CG_SUPPORT_GFX_CP_LS |
|
||||
AMD_CG_SUPPORT_SDMA_MGCG |
|
||||
AMD_CG_SUPPORT_SDMA_LS |
|
||||
AMD_CG_SUPPORT_BIF_LS |
|
||||
AMD_CG_SUPPORT_VCE_MGCG |
|
||||
AMD_CG_SUPPORT_UVD_MGCG |
|
||||
AMD_CG_SUPPORT_HDP_LS |
|
||||
AMD_CG_SUPPORT_HDP_MGCG;
|
||||
adev->pg_flags =
|
||||
/*AMDGPU_PG_SUPPORT_GFX_PG |
|
||||
AMDGPU_PG_SUPPORT_GFX_SMG |
|
||||
AMDGPU_PG_SUPPORT_GFX_DMG |*/
|
||||
AMDGPU_PG_SUPPORT_UVD |
|
||||
/*AMDGPU_PG_SUPPORT_VCE |
|
||||
AMDGPU_PG_SUPPORT_CP |
|
||||
AMDGPU_PG_SUPPORT_GDS |
|
||||
AMDGPU_PG_SUPPORT_RLC_SMU_HS |
|
||||
AMDGPU_PG_SUPPORT_ACP |
|
||||
AMDGPU_PG_SUPPORT_SAMU |*/
|
||||
/*AMD_PG_SUPPORT_GFX_PG |
|
||||
AMD_PG_SUPPORT_GFX_SMG |
|
||||
AMD_PG_SUPPORT_GFX_DMG |*/
|
||||
AMD_PG_SUPPORT_UVD |
|
||||
/*AMD_PG_SUPPORT_VCE |
|
||||
AMD_PG_SUPPORT_CP |
|
||||
AMD_PG_SUPPORT_GDS |
|
||||
AMD_PG_SUPPORT_RLC_SMU_HS |
|
||||
AMD_PG_SUPPORT_ACP |
|
||||
AMD_PG_SUPPORT_SAMU |*/
|
||||
0;
|
||||
if (adev->pdev->device == 0x1312 ||
|
||||
adev->pdev->device == 0x1316 ||
|
||||
|
@ -2409,29 +2412,29 @@ static int cik_common_early_init(void *handle)
|
|||
case CHIP_KABINI:
|
||||
case CHIP_MULLINS:
|
||||
adev->cg_flags =
|
||||
AMDGPU_CG_SUPPORT_GFX_MGCG |
|
||||
AMDGPU_CG_SUPPORT_GFX_MGLS |
|
||||
/*AMDGPU_CG_SUPPORT_GFX_CGCG |*/
|
||||
AMDGPU_CG_SUPPORT_GFX_CGLS |
|
||||
AMDGPU_CG_SUPPORT_GFX_CGTS |
|
||||
AMDGPU_CG_SUPPORT_GFX_CGTS_LS |
|
||||
AMDGPU_CG_SUPPORT_GFX_CP_LS |
|
||||
AMDGPU_CG_SUPPORT_SDMA_MGCG |
|
||||
AMDGPU_CG_SUPPORT_SDMA_LS |
|
||||
AMDGPU_CG_SUPPORT_BIF_LS |
|
||||
AMDGPU_CG_SUPPORT_VCE_MGCG |
|
||||
AMDGPU_CG_SUPPORT_UVD_MGCG |
|
||||
AMDGPU_CG_SUPPORT_HDP_LS |
|
||||
AMDGPU_CG_SUPPORT_HDP_MGCG;
|
||||
AMD_CG_SUPPORT_GFX_MGCG |
|
||||
AMD_CG_SUPPORT_GFX_MGLS |
|
||||
/*AMD_CG_SUPPORT_GFX_CGCG |*/
|
||||
AMD_CG_SUPPORT_GFX_CGLS |
|
||||
AMD_CG_SUPPORT_GFX_CGTS |
|
||||
AMD_CG_SUPPORT_GFX_CGTS_LS |
|
||||
AMD_CG_SUPPORT_GFX_CP_LS |
|
||||
AMD_CG_SUPPORT_SDMA_MGCG |
|
||||
AMD_CG_SUPPORT_SDMA_LS |
|
||||
AMD_CG_SUPPORT_BIF_LS |
|
||||
AMD_CG_SUPPORT_VCE_MGCG |
|
||||
AMD_CG_SUPPORT_UVD_MGCG |
|
||||
AMD_CG_SUPPORT_HDP_LS |
|
||||
AMD_CG_SUPPORT_HDP_MGCG;
|
||||
adev->pg_flags =
|
||||
/*AMDGPU_PG_SUPPORT_GFX_PG |
|
||||
AMDGPU_PG_SUPPORT_GFX_SMG | */
|
||||
AMDGPU_PG_SUPPORT_UVD |
|
||||
/*AMDGPU_PG_SUPPORT_VCE |
|
||||
AMDGPU_PG_SUPPORT_CP |
|
||||
AMDGPU_PG_SUPPORT_GDS |
|
||||
AMDGPU_PG_SUPPORT_RLC_SMU_HS |
|
||||
AMDGPU_PG_SUPPORT_SAMU |*/
|
||||
/*AMD_PG_SUPPORT_GFX_PG |
|
||||
AMD_PG_SUPPORT_GFX_SMG | */
|
||||
AMD_PG_SUPPORT_UVD |
|
||||
/*AMD_PG_SUPPORT_VCE |
|
||||
AMD_PG_SUPPORT_CP |
|
||||
AMD_PG_SUPPORT_GDS |
|
||||
AMD_PG_SUPPORT_RLC_SMU_HS |
|
||||
AMD_PG_SUPPORT_SAMU |*/
|
||||
0;
|
||||
if (adev->asic_type == CHIP_KABINI) {
|
||||
if (adev->rev_id == 0)
|
||||
|
|
|
@ -885,7 +885,7 @@ static void cik_enable_sdma_mgcg(struct amdgpu_device *adev,
|
|||
{
|
||||
u32 orig, data;
|
||||
|
||||
if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_MGCG)) {
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
|
||||
WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100);
|
||||
WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100);
|
||||
} else {
|
||||
|
@ -906,7 +906,7 @@ static void cik_enable_sdma_mgls(struct amdgpu_device *adev,
|
|||
{
|
||||
u32 orig, data;
|
||||
|
||||
if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_LS)) {
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
|
||||
orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
|
||||
data |= 0x100;
|
||||
if (orig != data)
|
||||
|
|
|
@ -445,13 +445,13 @@ static int cz_dpm_init(struct amdgpu_device *adev)
|
|||
pi->gfx_pg_threshold = 500;
|
||||
pi->caps_fps = true;
|
||||
/* uvd */
|
||||
pi->caps_uvd_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_UVD) ? true : false;
|
||||
pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false;
|
||||
pi->caps_uvd_dpm = true;
|
||||
/* vce */
|
||||
pi->caps_vce_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_VCE) ? true : false;
|
||||
pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false;
|
||||
pi->caps_vce_dpm = true;
|
||||
/* acp */
|
||||
pi->caps_acp_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_ACP) ? true : false;
|
||||
pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false;
|
||||
pi->caps_acp_dpm = true;
|
||||
|
||||
pi->caps_stable_power_state = false;
|
||||
|
|
|
@ -4109,7 +4109,7 @@ static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
|
|||
|
||||
orig = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
|
||||
|
||||
if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGCG)) {
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
|
||||
gfx_v7_0_enable_gui_idle_interrupt(adev, true);
|
||||
|
||||
tmp = gfx_v7_0_halt_rlc(adev);
|
||||
|
@ -4147,9 +4147,9 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
|
|||
{
|
||||
u32 data, orig, tmp = 0;
|
||||
|
||||
if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGCG)) {
|
||||
if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGLS) {
|
||||
if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CP_LS) {
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
|
||||
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
|
||||
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
|
||||
orig = data = RREG32(mmCP_MEM_SLP_CNTL);
|
||||
data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
|
||||
if (orig != data)
|
||||
|
@ -4176,14 +4176,14 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
|
|||
|
||||
gfx_v7_0_update_rlc(adev, tmp);
|
||||
|
||||
if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGTS) {
|
||||
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS) {
|
||||
orig = data = RREG32(mmCGTS_SM_CTRL_REG);
|
||||
data &= ~CGTS_SM_CTRL_REG__SM_MODE_MASK;
|
||||
data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT);
|
||||
data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK;
|
||||
data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK;
|
||||
if ((adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGLS) &&
|
||||
(adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGTS_LS))
|
||||
if ((adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) &&
|
||||
(adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS_LS))
|
||||
data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
|
||||
data &= ~CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK;
|
||||
data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK;
|
||||
|
@ -4249,7 +4249,7 @@ static void gfx_v7_0_enable_sclk_slowdown_on_pu(struct amdgpu_device *adev,
|
|||
u32 data, orig;
|
||||
|
||||
orig = data = RREG32(mmRLC_PG_CNTL);
|
||||
if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_RLC_SMU_HS))
|
||||
if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS))
|
||||
data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
|
||||
else
|
||||
data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
|
||||
|
@ -4263,7 +4263,7 @@ static void gfx_v7_0_enable_sclk_slowdown_on_pd(struct amdgpu_device *adev,
|
|||
u32 data, orig;
|
||||
|
||||
orig = data = RREG32(mmRLC_PG_CNTL);
|
||||
if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_RLC_SMU_HS))
|
||||
if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS))
|
||||
data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
|
||||
else
|
||||
data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
|
||||
|
@ -4276,7 +4276,7 @@ static void gfx_v7_0_enable_cp_pg(struct amdgpu_device *adev, bool enable)
|
|||
u32 data, orig;
|
||||
|
||||
orig = data = RREG32(mmRLC_PG_CNTL);
|
||||
if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_CP))
|
||||
if (enable && (adev->pg_flags & AMD_PG_SUPPORT_CP))
|
||||
data &= ~0x8000;
|
||||
else
|
||||
data |= 0x8000;
|
||||
|
@ -4289,7 +4289,7 @@ static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable)
|
|||
u32 data, orig;
|
||||
|
||||
orig = data = RREG32(mmRLC_PG_CNTL);
|
||||
if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GDS))
|
||||
if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GDS))
|
||||
data &= ~0x2000;
|
||||
else
|
||||
data |= 0x2000;
|
||||
|
@ -4370,7 +4370,7 @@ static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev,
|
|||
{
|
||||
u32 data, orig;
|
||||
|
||||
if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG)) {
|
||||
if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
|
||||
orig = data = RREG32(mmRLC_PG_CNTL);
|
||||
data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
|
||||
if (orig != data)
|
||||
|
@ -4442,7 +4442,7 @@ static void gfx_v7_0_enable_gfx_static_mgpg(struct amdgpu_device *adev,
|
|||
u32 data, orig;
|
||||
|
||||
orig = data = RREG32(mmRLC_PG_CNTL);
|
||||
if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_SMG))
|
||||
if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG))
|
||||
data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
|
||||
else
|
||||
data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
|
||||
|
@ -4456,7 +4456,7 @@ static void gfx_v7_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev,
|
|||
u32 data, orig;
|
||||
|
||||
orig = data = RREG32(mmRLC_PG_CNTL);
|
||||
if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_DMG))
|
||||
if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG))
|
||||
data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
|
||||
else
|
||||
data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
|
||||
|
@ -4623,15 +4623,15 @@ static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev,
|
|||
|
||||
static void gfx_v7_0_init_pg(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG |
|
||||
AMDGPU_PG_SUPPORT_GFX_SMG |
|
||||
AMDGPU_PG_SUPPORT_GFX_DMG |
|
||||
AMDGPU_PG_SUPPORT_CP |
|
||||
AMDGPU_PG_SUPPORT_GDS |
|
||||
AMDGPU_PG_SUPPORT_RLC_SMU_HS)) {
|
||||
if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
|
||||
AMD_PG_SUPPORT_GFX_SMG |
|
||||
AMD_PG_SUPPORT_GFX_DMG |
|
||||
AMD_PG_SUPPORT_CP |
|
||||
AMD_PG_SUPPORT_GDS |
|
||||
AMD_PG_SUPPORT_RLC_SMU_HS)) {
|
||||
gfx_v7_0_enable_sclk_slowdown_on_pu(adev, true);
|
||||
gfx_v7_0_enable_sclk_slowdown_on_pd(adev, true);
|
||||
if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) {
|
||||
if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
|
||||
gfx_v7_0_init_gfx_cgpg(adev);
|
||||
gfx_v7_0_enable_cp_pg(adev, true);
|
||||
gfx_v7_0_enable_gds_pg(adev, true);
|
||||
|
@ -4643,14 +4643,14 @@ static void gfx_v7_0_init_pg(struct amdgpu_device *adev)
|
|||
|
||||
static void gfx_v7_0_fini_pg(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG |
|
||||
AMDGPU_PG_SUPPORT_GFX_SMG |
|
||||
AMDGPU_PG_SUPPORT_GFX_DMG |
|
||||
AMDGPU_PG_SUPPORT_CP |
|
||||
AMDGPU_PG_SUPPORT_GDS |
|
||||
AMDGPU_PG_SUPPORT_RLC_SMU_HS)) {
|
||||
if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
|
||||
AMD_PG_SUPPORT_GFX_SMG |
|
||||
AMD_PG_SUPPORT_GFX_DMG |
|
||||
AMD_PG_SUPPORT_CP |
|
||||
AMD_PG_SUPPORT_GDS |
|
||||
AMD_PG_SUPPORT_RLC_SMU_HS)) {
|
||||
gfx_v7_0_update_gfx_pg(adev, false);
|
||||
if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) {
|
||||
if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
|
||||
gfx_v7_0_enable_cp_pg(adev, false);
|
||||
gfx_v7_0_enable_gds_pg(adev, false);
|
||||
}
|
||||
|
@ -5527,14 +5527,14 @@ static int gfx_v7_0_set_powergating_state(void *handle,
|
|||
if (state == AMD_PG_STATE_GATE)
|
||||
gate = true;
|
||||
|
||||
if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG |
|
||||
AMDGPU_PG_SUPPORT_GFX_SMG |
|
||||
AMDGPU_PG_SUPPORT_GFX_DMG |
|
||||
AMDGPU_PG_SUPPORT_CP |
|
||||
AMDGPU_PG_SUPPORT_GDS |
|
||||
AMDGPU_PG_SUPPORT_RLC_SMU_HS)) {
|
||||
if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
|
||||
AMD_PG_SUPPORT_GFX_SMG |
|
||||
AMD_PG_SUPPORT_GFX_DMG |
|
||||
AMD_PG_SUPPORT_CP |
|
||||
AMD_PG_SUPPORT_GDS |
|
||||
AMD_PG_SUPPORT_RLC_SMU_HS)) {
|
||||
gfx_v7_0_update_gfx_pg(adev, gate);
|
||||
if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) {
|
||||
if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
|
||||
gfx_v7_0_enable_cp_pg(adev, gate);
|
||||
gfx_v7_0_enable_gds_pg(adev, gate);
|
||||
}
|
||||
|
|
|
@ -792,7 +792,7 @@ static void gmc_v7_0_enable_mc_ls(struct amdgpu_device *adev,
|
|||
|
||||
for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
|
||||
orig = data = RREG32(mc_cg_registers[i]);
|
||||
if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS))
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
|
||||
data |= mc_cg_ls_en[i];
|
||||
else
|
||||
data &= ~mc_cg_ls_en[i];
|
||||
|
@ -809,7 +809,7 @@ static void gmc_v7_0_enable_mc_mgcg(struct amdgpu_device *adev,
|
|||
|
||||
for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
|
||||
orig = data = RREG32(mc_cg_registers[i]);
|
||||
if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG))
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
|
||||
data |= mc_cg_en[i];
|
||||
else
|
||||
data &= ~mc_cg_en[i];
|
||||
|
@ -825,7 +825,7 @@ static void gmc_v7_0_enable_bif_mgls(struct amdgpu_device *adev,
|
|||
|
||||
orig = data = RREG32_PCIE(ixPCIE_CNTL2);
|
||||
|
||||
if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) {
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
|
||||
data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
|
||||
data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
|
||||
data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
|
||||
|
@ -848,7 +848,7 @@ static void gmc_v7_0_enable_hdp_mgcg(struct amdgpu_device *adev,
|
|||
|
||||
orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
|
||||
|
||||
if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG))
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
|
||||
data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
|
||||
else
|
||||
data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
|
||||
|
@ -864,7 +864,7 @@ static void gmc_v7_0_enable_hdp_ls(struct amdgpu_device *adev,
|
|||
|
||||
orig = data = RREG32(mmHDP_MEM_POWER_LS);
|
||||
|
||||
if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS))
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
|
||||
data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
|
||||
else
|
||||
data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
|
||||
|
|
|
@ -2859,11 +2859,11 @@ static int kv_dpm_init(struct amdgpu_device *adev)
|
|||
pi->voltage_drop_t = 0;
|
||||
pi->caps_sclk_throttle_low_notification = false;
|
||||
pi->caps_fps = false; /* true? */
|
||||
pi->caps_uvd_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_UVD) ? true : false;
|
||||
pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false;
|
||||
pi->caps_uvd_dpm = true;
|
||||
pi->caps_vce_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_VCE) ? true : false;
|
||||
pi->caps_samu_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_SAMU) ? true : false;
|
||||
pi->caps_acp_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_ACP) ? true : false;
|
||||
pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false;
|
||||
pi->caps_samu_pg = (adev->pg_flags & AMD_PG_SUPPORT_SAMU) ? true : false;
|
||||
pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false;
|
||||
pi->caps_stable_p_state = false;
|
||||
|
||||
ret = kv_parse_sys_info_table(adev);
|
||||
|
|
|
@ -611,7 +611,7 @@ static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
|
|||
{
|
||||
u32 orig, data;
|
||||
|
||||
if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG)) {
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
|
||||
data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
|
||||
data = 0xfff;
|
||||
WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
|
||||
|
@ -830,6 +830,9 @@ static int uvd_v4_2_set_clockgating_state(void *handle,
|
|||
bool gate = false;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
|
||||
return 0;
|
||||
|
||||
if (state == AMD_CG_STATE_GATE)
|
||||
gate = true;
|
||||
|
||||
|
@ -848,7 +851,10 @@ static int uvd_v4_2_set_powergating_state(void *handle,
|
|||
* revisit this when there is a cleaner line between
|
||||
* the smc and the hw blocks
|
||||
*/
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
|
||||
return 0;
|
||||
|
||||
if (state == AMD_PG_STATE_GATE) {
|
||||
uvd_v4_2_stop(adev);
|
||||
|
|
|
@ -774,6 +774,11 @@ static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev,
|
|||
static int uvd_v5_0_set_clockgating_state(void *handle,
|
||||
enum amd_clockgating_state state)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
|
||||
return 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -789,6 +794,9 @@ static int uvd_v5_0_set_powergating_state(void *handle,
|
|||
*/
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
|
||||
return 0;
|
||||
|
||||
if (state == AMD_PG_STATE_GATE) {
|
||||
uvd_v5_0_stop(adev);
|
||||
return 0;
|
||||
|
|
|
@ -532,7 +532,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
|
|||
uvd_v6_0_mc_resume(adev);
|
||||
|
||||
/* Set dynamic clock gating in S/W control mode */
|
||||
if (adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG) {
|
||||
if (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG) {
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
cz_set_uvd_clock_gating_branches(adev, false);
|
||||
else
|
||||
|
@ -1000,7 +1000,7 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
|
||||
|
||||
if (!(adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG))
|
||||
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
|
||||
return 0;
|
||||
|
||||
if (enable) {
|
||||
|
@ -1030,6 +1030,9 @@ static int uvd_v6_0_set_powergating_state(void *handle,
|
|||
*/
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
|
||||
return 0;
|
||||
|
||||
if (state == AMD_PG_STATE_GATE) {
|
||||
uvd_v6_0_stop(adev);
|
||||
return 0;
|
||||
|
|
|
@ -373,7 +373,7 @@ static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
|
|||
{
|
||||
bool sw_cg = false;
|
||||
|
||||
if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG)) {
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) {
|
||||
if (sw_cg)
|
||||
vce_v2_0_set_sw_cg(adev, true);
|
||||
else
|
||||
|
@ -608,6 +608,9 @@ static int vce_v2_0_set_powergating_state(void *handle,
|
|||
*/
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
|
||||
return 0;
|
||||
|
||||
if (state == AMD_PG_STATE_GATE)
|
||||
/* XXX do we need a vce_v2_0_stop()? */
|
||||
return 0;
|
||||
|
|
|
@ -277,7 +277,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
|
|||
WREG32_P(mmVCE_STATUS, 0, ~1);
|
||||
|
||||
/* Set Clock-Gating off */
|
||||
if (adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG)
|
||||
if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
|
||||
vce_v3_0_set_vce_sw_clock_gating(adev, false);
|
||||
|
||||
if (r) {
|
||||
|
@ -676,7 +676,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
|
|||
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
|
||||
int i;
|
||||
|
||||
if (!(adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG))
|
||||
if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
|
||||
return 0;
|
||||
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
|
@ -728,6 +728,9 @@ static int vce_v3_0_set_powergating_state(void *handle,
|
|||
*/
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
|
||||
return 0;
|
||||
|
||||
if (state == AMD_PG_STATE_GATE)
|
||||
/* XXX do we need a vce_v3_0_stop()? */
|
||||
return 0;
|
||||
|
|
|
@ -1457,8 +1457,7 @@ static int vi_common_early_init(void *handle)
|
|||
case CHIP_STONEY:
|
||||
adev->has_uvd = true;
|
||||
adev->cg_flags = 0;
|
||||
/* Disable UVD pg */
|
||||
adev->pg_flags = /* AMDGPU_PG_SUPPORT_UVD | */AMDGPU_PG_SUPPORT_VCE;
|
||||
adev->pg_flags = 0;
|
||||
adev->external_rev_id = adev->rev_id + 0x1;
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -85,6 +85,38 @@ enum amd_powergating_state {
|
|||
AMD_PG_STATE_UNGATE,
|
||||
};
|
||||
|
||||
/* CG flags */
|
||||
#define AMD_CG_SUPPORT_GFX_MGCG (1 << 0)
|
||||
#define AMD_CG_SUPPORT_GFX_MGLS (1 << 1)
|
||||
#define AMD_CG_SUPPORT_GFX_CGCG (1 << 2)
|
||||
#define AMD_CG_SUPPORT_GFX_CGLS (1 << 3)
|
||||
#define AMD_CG_SUPPORT_GFX_CGTS (1 << 4)
|
||||
#define AMD_CG_SUPPORT_GFX_CGTS_LS (1 << 5)
|
||||
#define AMD_CG_SUPPORT_GFX_CP_LS (1 << 6)
|
||||
#define AMD_CG_SUPPORT_GFX_RLC_LS (1 << 7)
|
||||
#define AMD_CG_SUPPORT_MC_LS (1 << 8)
|
||||
#define AMD_CG_SUPPORT_MC_MGCG (1 << 9)
|
||||
#define AMD_CG_SUPPORT_SDMA_LS (1 << 10)
|
||||
#define AMD_CG_SUPPORT_SDMA_MGCG (1 << 11)
|
||||
#define AMD_CG_SUPPORT_BIF_LS (1 << 12)
|
||||
#define AMD_CG_SUPPORT_UVD_MGCG (1 << 13)
|
||||
#define AMD_CG_SUPPORT_VCE_MGCG (1 << 14)
|
||||
#define AMD_CG_SUPPORT_HDP_LS (1 << 15)
|
||||
#define AMD_CG_SUPPORT_HDP_MGCG (1 << 16)
|
||||
|
||||
/* PG flags */
|
||||
#define AMD_PG_SUPPORT_GFX_PG (1 << 0)
|
||||
#define AMD_PG_SUPPORT_GFX_SMG (1 << 1)
|
||||
#define AMD_PG_SUPPORT_GFX_DMG (1 << 2)
|
||||
#define AMD_PG_SUPPORT_UVD (1 << 3)
|
||||
#define AMD_PG_SUPPORT_VCE (1 << 4)
|
||||
#define AMD_PG_SUPPORT_CP (1 << 5)
|
||||
#define AMD_PG_SUPPORT_GDS (1 << 6)
|
||||
#define AMD_PG_SUPPORT_RLC_SMU_HS (1 << 7)
|
||||
#define AMD_PG_SUPPORT_SDMA (1 << 8)
|
||||
#define AMD_PG_SUPPORT_ACP (1 << 9)
|
||||
#define AMD_PG_SUPPORT_SAMU (1 << 10)
|
||||
|
||||
enum amd_pm_state_type {
|
||||
/* not used for dpm */
|
||||
POWER_STATE_TYPE_DEFAULT,
|
||||
|
|
|
@ -109,6 +109,8 @@ enum cgs_system_info_id {
|
|||
CGS_SYSTEM_INFO_ADAPTER_BDF_ID = 1,
|
||||
CGS_SYSTEM_INFO_PCIE_GEN_INFO,
|
||||
CGS_SYSTEM_INFO_PCIE_MLW,
|
||||
CGS_SYSTEM_INFO_CG_FLAGS,
|
||||
CGS_SYSTEM_INFO_PG_FLAGS,
|
||||
CGS_SYSTEM_INFO_ID_MAXIMUM,
|
||||
};
|
||||
|
||||
|
|
|
@ -174,6 +174,8 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
|
|||
{
|
||||
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
|
||||
uint32_t i;
|
||||
struct cgs_system_info sys_info = {0};
|
||||
int result;
|
||||
|
||||
cz_hwmgr->gfx_ramp_step = 256*25/100;
|
||||
|
||||
|
@ -247,6 +249,22 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
|
|||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_DisableVoltageIsland);
|
||||
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_UVDPowerGating);
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_VCEPowerGating);
|
||||
sys_info.size = sizeof(struct cgs_system_info);
|
||||
sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
|
||||
result = cgs_query_system_info(hwmgr->device, &sys_info);
|
||||
if (!result) {
|
||||
if (sys_info.value & AMD_PG_SUPPORT_UVD)
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_UVDPowerGating);
|
||||
if (sys_info.value & AMD_PG_SUPPORT_VCE)
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_VCEPowerGating);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -4451,6 +4451,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
|
|||
pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
|
||||
struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
|
||||
phw_tonga_ulv_parm *ulv;
|
||||
struct cgs_system_info sys_info = {0};
|
||||
|
||||
PP_ASSERT_WITH_CODE((NULL != hwmgr),
|
||||
"Invalid Parameter!", return -1;);
|
||||
|
@ -4615,9 +4616,23 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
|
|||
|
||||
data->vddc_phase_shed_control = 0;
|
||||
|
||||
if (0 == result) {
|
||||
struct cgs_system_info sys_info = {0};
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_UVDPowerGating);
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_VCEPowerGating);
|
||||
sys_info.size = sizeof(struct cgs_system_info);
|
||||
sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
|
||||
result = cgs_query_system_info(hwmgr->device, &sys_info);
|
||||
if (!result) {
|
||||
if (sys_info.value & AMD_PG_SUPPORT_UVD)
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_UVDPowerGating);
|
||||
if (sys_info.value & AMD_PG_SUPPORT_VCE)
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_VCEPowerGating);
|
||||
}
|
||||
|
||||
if (0 == result) {
|
||||
data->is_tlu_enabled = 0;
|
||||
hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
|
||||
TONGA_MAX_HARDWARE_POWERLEVELS;
|
||||
|
|
|
@ -349,8 +349,13 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
|
|||
/* see if we can skip over some allocations */
|
||||
} while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
|
||||
|
||||
for (i = 0; i < RADEON_NUM_RINGS; ++i)
|
||||
radeon_fence_ref(fences[i]);
|
||||
|
||||
spin_unlock(&sa_manager->wq.lock);
|
||||
r = radeon_fence_wait_any(rdev, fences, false);
|
||||
for (i = 0; i < RADEON_NUM_RINGS; ++i)
|
||||
radeon_fence_unref(&fences[i]);
|
||||
spin_lock(&sa_manager->wq.lock);
|
||||
/* if we have nothing to wait for block */
|
||||
if (r == -ENOENT) {
|
||||
|
|
|
@ -336,7 +336,6 @@ static ssize_t _show_port_gid_attr(struct ib_port *p,
|
|||
union ib_gid gid;
|
||||
struct ib_gid_attr gid_attr = {};
|
||||
ssize_t ret;
|
||||
va_list args;
|
||||
|
||||
ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid,
|
||||
&gid_attr);
|
||||
|
@ -348,7 +347,6 @@ static ssize_t _show_port_gid_attr(struct ib_port *p,
|
|||
err:
|
||||
if (gid_attr.ndev)
|
||||
dev_put(gid_attr.ndev);
|
||||
va_end(args);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -228,6 +228,11 @@ static int ocrdma_alloc_resources(struct ocrdma_dev *dev)
|
|||
|
||||
ocrdma_alloc_pd_pool(dev);
|
||||
|
||||
if (!ocrdma_alloc_stats_resources(dev)) {
|
||||
pr_err("%s: stats resource allocation failed\n", __func__);
|
||||
goto alloc_err;
|
||||
}
|
||||
|
||||
spin_lock_init(&dev->av_tbl.lock);
|
||||
spin_lock_init(&dev->flush_q_lock);
|
||||
return 0;
|
||||
|
@ -238,6 +243,7 @@ alloc_err:
|
|||
|
||||
static void ocrdma_free_resources(struct ocrdma_dev *dev)
|
||||
{
|
||||
ocrdma_release_stats_resources(dev);
|
||||
kfree(dev->stag_arr);
|
||||
kfree(dev->qp_tbl);
|
||||
kfree(dev->cq_tbl);
|
||||
|
|
|
@ -64,10 +64,11 @@ static int ocrdma_add_stat(char *start, char *pcur,
|
|||
return cpy_len;
|
||||
}
|
||||
|
||||
static bool ocrdma_alloc_stats_mem(struct ocrdma_dev *dev)
|
||||
bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev)
|
||||
{
|
||||
struct stats_mem *mem = &dev->stats_mem;
|
||||
|
||||
mutex_init(&dev->stats_lock);
|
||||
/* Alloc mbox command mem*/
|
||||
mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req),
|
||||
sizeof(struct ocrdma_rdma_stats_resp));
|
||||
|
@ -91,13 +92,14 @@ static bool ocrdma_alloc_stats_mem(struct ocrdma_dev *dev)
|
|||
return true;
|
||||
}
|
||||
|
||||
static void ocrdma_release_stats_mem(struct ocrdma_dev *dev)
|
||||
void ocrdma_release_stats_resources(struct ocrdma_dev *dev)
|
||||
{
|
||||
struct stats_mem *mem = &dev->stats_mem;
|
||||
|
||||
if (mem->va)
|
||||
dma_free_coherent(&dev->nic_info.pdev->dev, mem->size,
|
||||
mem->va, mem->pa);
|
||||
mem->va = NULL;
|
||||
kfree(mem->debugfs_mem);
|
||||
}
|
||||
|
||||
|
@ -838,15 +840,9 @@ void ocrdma_add_port_stats(struct ocrdma_dev *dev)
|
|||
&dev->reset_stats, &ocrdma_dbg_ops))
|
||||
goto err;
|
||||
|
||||
/* Now create dma_mem for stats mbx command */
|
||||
if (!ocrdma_alloc_stats_mem(dev))
|
||||
goto err;
|
||||
|
||||
mutex_init(&dev->stats_lock);
|
||||
|
||||
return;
|
||||
err:
|
||||
ocrdma_release_stats_mem(dev);
|
||||
debugfs_remove_recursive(dev->dir);
|
||||
dev->dir = NULL;
|
||||
}
|
||||
|
@ -855,9 +851,7 @@ void ocrdma_rem_port_stats(struct ocrdma_dev *dev)
|
|||
{
|
||||
if (!dev->dir)
|
||||
return;
|
||||
debugfs_remove(dev->dir);
|
||||
mutex_destroy(&dev->stats_lock);
|
||||
ocrdma_release_stats_mem(dev);
|
||||
debugfs_remove_recursive(dev->dir);
|
||||
}
|
||||
|
||||
void ocrdma_init_debugfs(void)
|
||||
|
|
|
@ -65,6 +65,8 @@ enum OCRDMA_STATS_TYPE {
|
|||
|
||||
void ocrdma_rem_debugfs(void);
|
||||
void ocrdma_init_debugfs(void);
|
||||
bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev);
|
||||
void ocrdma_release_stats_resources(struct ocrdma_dev *dev);
|
||||
void ocrdma_rem_port_stats(struct ocrdma_dev *dev);
|
||||
void ocrdma_add_port_stats(struct ocrdma_dev *dev);
|
||||
int ocrdma_pma_counters(struct ocrdma_dev *dev,
|
||||
|
|
|
@ -125,8 +125,8 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
|
|||
IB_DEVICE_SYS_IMAGE_GUID |
|
||||
IB_DEVICE_LOCAL_DMA_LKEY |
|
||||
IB_DEVICE_MEM_MGT_EXTENSIONS;
|
||||
attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge);
|
||||
attr->max_sge_rd = 0;
|
||||
attr->max_sge = dev->attr.max_send_sge;
|
||||
attr->max_sge_rd = attr->max_sge;
|
||||
attr->max_cq = dev->attr.max_cq;
|
||||
attr->max_cqe = dev->attr.max_cqe;
|
||||
attr->max_mr = dev->attr.max_mr;
|
||||
|
@ -2726,8 +2726,7 @@ static int ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe)
|
|||
OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT;
|
||||
ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) &
|
||||
OCRDMA_CQE_SRCQP_MASK;
|
||||
ibwc->pkey_index = le32_to_cpu(cqe->ud.rxlen_pkey) &
|
||||
OCRDMA_CQE_PKEY_MASK;
|
||||
ibwc->pkey_index = 0;
|
||||
ibwc->wc_flags = IB_WC_GRH;
|
||||
ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
|
||||
OCRDMA_CQE_UD_XFER_LEN_SHIFT);
|
||||
|
|
|
@ -245,8 +245,6 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
|
|||
skb_reset_mac_header(skb);
|
||||
skb_pull(skb, IPOIB_ENCAP_LEN);
|
||||
|
||||
skb->truesize = SKB_TRUESIZE(skb->len);
|
||||
|
||||
++dev->stats.rx_packets;
|
||||
dev->stats.rx_bytes += skb->len;
|
||||
|
||||
|
|
|
@ -1207,7 +1207,6 @@ static void xpad_led_disconnect(struct usb_xpad *xpad)
|
|||
#else
|
||||
static int xpad_led_probe(struct usb_xpad *xpad) { return 0; }
|
||||
static void xpad_led_disconnect(struct usb_xpad *xpad) { }
|
||||
static void xpad_identify_controller(struct usb_xpad *xpad) { }
|
||||
#endif
|
||||
|
||||
static int xpad_start_input(struct usb_xpad *xpad)
|
||||
|
|
|
@ -235,7 +235,7 @@ struct adp5589_kpad {
|
|||
unsigned short gpimapsize;
|
||||
unsigned extend_cfg;
|
||||
bool is_adp5585;
|
||||
bool adp5585_support_row5;
|
||||
bool support_row5;
|
||||
#ifdef CONFIG_GPIOLIB
|
||||
unsigned char gpiomap[ADP5589_MAXGPIO];
|
||||
bool export_gpio;
|
||||
|
@ -485,7 +485,7 @@ static int adp5589_build_gpiomap(struct adp5589_kpad *kpad,
|
|||
if (kpad->extend_cfg & C4_EXTEND_CFG)
|
||||
pin_used[kpad->var->c4_extend_cfg] = true;
|
||||
|
||||
if (!kpad->adp5585_support_row5)
|
||||
if (!kpad->support_row5)
|
||||
pin_used[5] = true;
|
||||
|
||||
for (i = 0; i < kpad->var->maxgpio; i++)
|
||||
|
@ -884,12 +884,13 @@ static int adp5589_probe(struct i2c_client *client,
|
|||
|
||||
switch (id->driver_data) {
|
||||
case ADP5585_02:
|
||||
kpad->adp5585_support_row5 = true;
|
||||
kpad->support_row5 = true;
|
||||
case ADP5585_01:
|
||||
kpad->is_adp5585 = true;
|
||||
kpad->var = &const_adp5585;
|
||||
break;
|
||||
case ADP5589:
|
||||
kpad->support_row5 = true;
|
||||
kpad->var = &const_adp5589;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -304,8 +304,10 @@ static int cap11xx_init_leds(struct device *dev,
|
|||
led->cdev.brightness = LED_OFF;
|
||||
|
||||
error = of_property_read_u32(child, "reg", ®);
|
||||
if (error != 0 || reg >= num_leds)
|
||||
if (error != 0 || reg >= num_leds) {
|
||||
of_node_put(child);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
led->reg = reg;
|
||||
led->priv = priv;
|
||||
|
@ -313,8 +315,10 @@ static int cap11xx_init_leds(struct device *dev,
|
|||
INIT_WORK(&led->work, cap11xx_led_work);
|
||||
|
||||
error = devm_led_classdev_register(dev, &led->cdev);
|
||||
if (error)
|
||||
if (error) {
|
||||
of_node_put(child);
|
||||
return error;
|
||||
}
|
||||
|
||||
priv->num_leds++;
|
||||
led++;
|
||||
|
|
|
@ -733,7 +733,7 @@ config INPUT_XEN_KBDDEV_FRONTEND
|
|||
module will be called xen-kbdfront.
|
||||
|
||||
config INPUT_SIRFSOC_ONKEY
|
||||
bool "CSR SiRFSoC power on/off/suspend key support"
|
||||
tristate "CSR SiRFSoC power on/off/suspend key support"
|
||||
depends on ARCH_SIRF && OF
|
||||
default y
|
||||
help
|
||||
|
|
|
@ -101,7 +101,7 @@ static void sirfsoc_pwrc_close(struct input_dev *input)
|
|||
static const struct of_device_id sirfsoc_pwrc_of_match[] = {
|
||||
{ .compatible = "sirf,prima2-pwrc" },
|
||||
{},
|
||||
}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, sirfsoc_pwrc_of_match);
|
||||
|
||||
static int sirfsoc_pwrc_probe(struct platform_device *pdev)
|
||||
|
|
|
@ -458,8 +458,6 @@ int vmmouse_init(struct psmouse *psmouse)
|
|||
priv->abs_dev = abs_dev;
|
||||
psmouse->private = priv;
|
||||
|
||||
input_set_capability(rel_dev, EV_REL, REL_WHEEL);
|
||||
|
||||
/* Set up and register absolute device */
|
||||
snprintf(priv->phys, sizeof(priv->phys), "%s/input1",
|
||||
psmouse->ps2dev.serio->phys);
|
||||
|
@ -475,10 +473,6 @@ int vmmouse_init(struct psmouse *psmouse)
|
|||
abs_dev->id.version = psmouse->model;
|
||||
abs_dev->dev.parent = &psmouse->ps2dev.serio->dev;
|
||||
|
||||
error = input_register_device(priv->abs_dev);
|
||||
if (error)
|
||||
goto init_fail;
|
||||
|
||||
/* Set absolute device capabilities */
|
||||
input_set_capability(abs_dev, EV_KEY, BTN_LEFT);
|
||||
input_set_capability(abs_dev, EV_KEY, BTN_RIGHT);
|
||||
|
@ -488,6 +482,13 @@ int vmmouse_init(struct psmouse *psmouse)
|
|||
input_set_abs_params(abs_dev, ABS_X, 0, VMMOUSE_MAX_X, 0, 0);
|
||||
input_set_abs_params(abs_dev, ABS_Y, 0, VMMOUSE_MAX_Y, 0, 0);
|
||||
|
||||
error = input_register_device(priv->abs_dev);
|
||||
if (error)
|
||||
goto init_fail;
|
||||
|
||||
/* Add wheel capability to the relative device */
|
||||
input_set_capability(rel_dev, EV_REL, REL_WHEEL);
|
||||
|
||||
psmouse->protocol_handler = vmmouse_process_byte;
|
||||
psmouse->disconnect = vmmouse_disconnect;
|
||||
psmouse->reconnect = vmmouse_reconnect;
|
||||
|
|
|
@ -134,7 +134,7 @@ static void serio_find_driver(struct serio *serio)
|
|||
int error;
|
||||
|
||||
error = device_attach(&serio->dev);
|
||||
if (error < 0)
|
||||
if (error < 0 && error != -EPROBE_DEFER)
|
||||
dev_warn(&serio->dev,
|
||||
"device_attach() failed for %s (%s), error: %d\n",
|
||||
serio->phys, serio->name, error);
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <linux/interrupt.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/pinctrl/consumer.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
|
|
|
@ -822,16 +822,22 @@ static void edt_ft5x06_ts_get_defaults(struct device *dev,
|
|||
int error;
|
||||
|
||||
error = device_property_read_u32(dev, "threshold", &val);
|
||||
if (!error)
|
||||
reg_addr->reg_threshold = val;
|
||||
if (!error) {
|
||||
edt_ft5x06_register_write(tsdata, reg_addr->reg_threshold, val);
|
||||
tsdata->threshold = val;
|
||||
}
|
||||
|
||||
error = device_property_read_u32(dev, "gain", &val);
|
||||
if (!error)
|
||||
reg_addr->reg_gain = val;
|
||||
if (!error) {
|
||||
edt_ft5x06_register_write(tsdata, reg_addr->reg_gain, val);
|
||||
tsdata->gain = val;
|
||||
}
|
||||
|
||||
error = device_property_read_u32(dev, "offset", &val);
|
||||
if (!error)
|
||||
reg_addr->reg_offset = val;
|
||||
if (!error) {
|
||||
edt_ft5x06_register_write(tsdata, reg_addr->reg_offset, val);
|
||||
tsdata->offset = val;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
@ -47,13 +47,10 @@
|
|||
#include "queue.h"
|
||||
|
||||
MODULE_ALIAS("mmc:block");
|
||||
|
||||
#ifdef KERNEL
|
||||
#ifdef MODULE_PARAM_PREFIX
|
||||
#undef MODULE_PARAM_PREFIX
|
||||
#endif
|
||||
#define MODULE_PARAM_PREFIX "mmcblk."
|
||||
#endif
|
||||
|
||||
#define INAND_CMD38_ARG_EXT_CSD 113
|
||||
#define INAND_CMD38_ARG_ERASE 0x00
|
||||
|
@ -655,8 +652,10 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
|
|||
}
|
||||
|
||||
md = mmc_blk_get(bdev->bd_disk);
|
||||
if (!md)
|
||||
if (!md) {
|
||||
err = -EINVAL;
|
||||
goto cmd_err;
|
||||
}
|
||||
|
||||
card = md->queue.card;
|
||||
if (IS_ERR(card)) {
|
||||
|
|
|
@ -925,6 +925,10 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
|
|||
|
||||
dma_addr = dma_map_page(dma_dev, sg_page(sg), 0,
|
||||
PAGE_SIZE, dir);
|
||||
if (dma_mapping_error(dma_dev, dma_addr)) {
|
||||
data->error = -EFAULT;
|
||||
break;
|
||||
}
|
||||
if (direction == DMA_TO_DEVICE)
|
||||
t->tx_dma = dma_addr + sg->offset;
|
||||
else
|
||||
|
@ -1393,10 +1397,12 @@ static int mmc_spi_probe(struct spi_device *spi)
|
|||
host->dma_dev = dev;
|
||||
host->ones_dma = dma_map_single(dev, ones,
|
||||
MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dev, host->ones_dma))
|
||||
goto fail_ones_dma;
|
||||
host->data_dma = dma_map_single(dev, host->data,
|
||||
sizeof(*host->data), DMA_BIDIRECTIONAL);
|
||||
|
||||
/* REVISIT in theory those map operations can fail... */
|
||||
if (dma_mapping_error(dev, host->data_dma))
|
||||
goto fail_data_dma;
|
||||
|
||||
dma_sync_single_for_cpu(host->dma_dev,
|
||||
host->data_dma, sizeof(*host->data),
|
||||
|
@ -1462,6 +1468,11 @@ fail_glue_init:
|
|||
if (host->dma_dev)
|
||||
dma_unmap_single(host->dma_dev, host->data_dma,
|
||||
sizeof(*host->data), DMA_BIDIRECTIONAL);
|
||||
fail_data_dma:
|
||||
if (host->dma_dev)
|
||||
dma_unmap_single(host->dma_dev, host->ones_dma,
|
||||
MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE);
|
||||
fail_ones_dma:
|
||||
kfree(host->data);
|
||||
|
||||
fail_nobuf1:
|
||||
|
|
|
@ -86,7 +86,7 @@ struct pxamci_host {
|
|||
static inline void pxamci_init_ocr(struct pxamci_host *host)
|
||||
{
|
||||
#ifdef CONFIG_REGULATOR
|
||||
host->vcc = regulator_get_optional(mmc_dev(host->mmc), "vmmc");
|
||||
host->vcc = devm_regulator_get_optional(mmc_dev(host->mmc), "vmmc");
|
||||
|
||||
if (IS_ERR(host->vcc))
|
||||
host->vcc = NULL;
|
||||
|
@ -654,12 +654,8 @@ static int pxamci_probe(struct platform_device *pdev)
|
|||
|
||||
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (!r || irq < 0)
|
||||
return -ENXIO;
|
||||
|
||||
r = request_mem_region(r->start, SZ_4K, DRIVER_NAME);
|
||||
if (!r)
|
||||
return -EBUSY;
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
|
||||
mmc = mmc_alloc_host(sizeof(struct pxamci_host), &pdev->dev);
|
||||
if (!mmc) {
|
||||
|
@ -695,7 +691,7 @@ static int pxamci_probe(struct platform_device *pdev)
|
|||
host->pdata = pdev->dev.platform_data;
|
||||
host->clkrt = CLKRT_OFF;
|
||||
|
||||
host->clk = clk_get(&pdev->dev, NULL);
|
||||
host->clk = devm_clk_get(&pdev->dev, NULL);
|
||||
if (IS_ERR(host->clk)) {
|
||||
ret = PTR_ERR(host->clk);
|
||||
host->clk = NULL;
|
||||
|
@ -727,9 +723,9 @@ static int pxamci_probe(struct platform_device *pdev)
|
|||
host->irq = irq;
|
||||
host->imask = MMC_I_MASK_ALL;
|
||||
|
||||
host->base = ioremap(r->start, SZ_4K);
|
||||
if (!host->base) {
|
||||
ret = -ENOMEM;
|
||||
host->base = devm_ioremap_resource(&pdev->dev, r);
|
||||
if (IS_ERR(host->base)) {
|
||||
ret = PTR_ERR(host->base);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -742,7 +738,8 @@ static int pxamci_probe(struct platform_device *pdev)
|
|||
writel(64, host->base + MMC_RESTO);
|
||||
writel(host->imask, host->base + MMC_I_MASK);
|
||||
|
||||
ret = request_irq(host->irq, pxamci_irq, 0, DRIVER_NAME, host);
|
||||
ret = devm_request_irq(&pdev->dev, host->irq, pxamci_irq, 0,
|
||||
DRIVER_NAME, host);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
|
@ -804,7 +801,7 @@ static int pxamci_probe(struct platform_device *pdev)
|
|||
dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
|
||||
goto out;
|
||||
} else {
|
||||
mmc->caps |= host->pdata->gpio_card_ro_invert ?
|
||||
mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
|
||||
0 : MMC_CAP2_RO_ACTIVE_HIGH;
|
||||
}
|
||||
|
||||
|
@ -833,14 +830,9 @@ out:
|
|||
dma_release_channel(host->dma_chan_rx);
|
||||
if (host->dma_chan_tx)
|
||||
dma_release_channel(host->dma_chan_tx);
|
||||
if (host->base)
|
||||
iounmap(host->base);
|
||||
if (host->clk)
|
||||
clk_put(host->clk);
|
||||
}
|
||||
if (mmc)
|
||||
mmc_free_host(mmc);
|
||||
release_resource(r);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -859,9 +851,6 @@ static int pxamci_remove(struct platform_device *pdev)
|
|||
gpio_ro = host->pdata->gpio_card_ro;
|
||||
gpio_power = host->pdata->gpio_power;
|
||||
}
|
||||
if (host->vcc)
|
||||
regulator_put(host->vcc);
|
||||
|
||||
if (host->pdata && host->pdata->exit)
|
||||
host->pdata->exit(&pdev->dev, mmc);
|
||||
|
||||
|
@ -870,16 +859,10 @@ static int pxamci_remove(struct platform_device *pdev)
|
|||
END_CMD_RES|PRG_DONE|DATA_TRAN_DONE,
|
||||
host->base + MMC_I_MASK);
|
||||
|
||||
free_irq(host->irq, host);
|
||||
dmaengine_terminate_all(host->dma_chan_rx);
|
||||
dmaengine_terminate_all(host->dma_chan_tx);
|
||||
dma_release_channel(host->dma_chan_rx);
|
||||
dma_release_channel(host->dma_chan_tx);
|
||||
iounmap(host->base);
|
||||
|
||||
clk_put(host->clk);
|
||||
|
||||
release_resource(host->res);
|
||||
|
||||
mmc_free_host(mmc);
|
||||
}
|
||||
|
|
|
@ -146,6 +146,33 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
|
|||
.ops = &sdhci_acpi_ops_int,
|
||||
};
|
||||
|
||||
static int bxt_get_cd(struct mmc_host *mmc)
|
||||
{
|
||||
int gpio_cd = mmc_gpio_get_cd(mmc);
|
||||
struct sdhci_host *host = mmc_priv(mmc);
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
if (!gpio_cd)
|
||||
return 0;
|
||||
|
||||
pm_runtime_get_sync(mmc->parent);
|
||||
|
||||
spin_lock_irqsave(&host->lock, flags);
|
||||
|
||||
if (host->flags & SDHCI_DEVICE_DEAD)
|
||||
goto out;
|
||||
|
||||
ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
|
||||
out:
|
||||
spin_unlock_irqrestore(&host->lock, flags);
|
||||
|
||||
pm_runtime_mark_last_busy(mmc->parent);
|
||||
pm_runtime_put_autosuspend(mmc->parent);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int sdhci_acpi_emmc_probe_slot(struct platform_device *pdev,
|
||||
const char *hid, const char *uid)
|
||||
{
|
||||
|
@ -196,6 +223,9 @@ static int sdhci_acpi_sd_probe_slot(struct platform_device *pdev,
|
|||
|
||||
/* Platform specific code during sd probe slot goes here */
|
||||
|
||||
if (hid && !strcmp(hid, "80865ACA"))
|
||||
host->mmc_host_ops.get_cd = bxt_get_cd;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -217,6 +217,7 @@ static int sdhci_at91_probe(struct platform_device *pdev)
|
|||
pm_runtime_disable:
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
pm_runtime_set_suspended(&pdev->dev);
|
||||
pm_runtime_put_noidle(&pdev->dev);
|
||||
clocks_disable_unprepare:
|
||||
clk_disable_unprepare(priv->gck);
|
||||
clk_disable_unprepare(priv->mainck);
|
||||
|
|
|
@ -330,6 +330,33 @@ static void spt_read_drive_strength(struct sdhci_host *host)
|
|||
sdhci_pci_spt_drive_strength = 0x10 | ((val >> 12) & 0xf);
|
||||
}
|
||||
|
||||
static int bxt_get_cd(struct mmc_host *mmc)
|
||||
{
|
||||
int gpio_cd = mmc_gpio_get_cd(mmc);
|
||||
struct sdhci_host *host = mmc_priv(mmc);
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
if (!gpio_cd)
|
||||
return 0;
|
||||
|
||||
pm_runtime_get_sync(mmc->parent);
|
||||
|
||||
spin_lock_irqsave(&host->lock, flags);
|
||||
|
||||
if (host->flags & SDHCI_DEVICE_DEAD)
|
||||
goto out;
|
||||
|
||||
ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
|
||||
out:
|
||||
spin_unlock_irqrestore(&host->lock, flags);
|
||||
|
||||
pm_runtime_mark_last_busy(mmc->parent);
|
||||
pm_runtime_put_autosuspend(mmc->parent);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
|
||||
{
|
||||
slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
|
||||
|
@ -362,6 +389,10 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
|
|||
slot->cd_con_id = NULL;
|
||||
slot->cd_idx = 0;
|
||||
slot->cd_override_level = true;
|
||||
if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
|
||||
slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD)
|
||||
slot->host->mmc_host_ops.get_cd = bxt_get_cd;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1360,7 +1360,7 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
|
|||
sdhci_runtime_pm_get(host);
|
||||
|
||||
/* Firstly check card presence */
|
||||
present = sdhci_do_get_cd(host);
|
||||
present = mmc->ops->get_cd(mmc);
|
||||
|
||||
spin_lock_irqsave(&host->lock, flags);
|
||||
|
||||
|
@ -2849,6 +2849,8 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev,
|
|||
|
||||
host = mmc_priv(mmc);
|
||||
host->mmc = mmc;
|
||||
host->mmc_host_ops = sdhci_ops;
|
||||
mmc->ops = &host->mmc_host_ops;
|
||||
|
||||
return host;
|
||||
}
|
||||
|
@ -3037,7 +3039,6 @@ int sdhci_add_host(struct sdhci_host *host)
|
|||
/*
|
||||
* Set host parameters.
|
||||
*/
|
||||
mmc->ops = &sdhci_ops;
|
||||
max_clk = host->max_clk;
|
||||
|
||||
if (host->ops->get_min_clock)
|
||||
|
|
|
@ -430,6 +430,7 @@ struct sdhci_host {
|
|||
|
||||
/* Internal data */
|
||||
struct mmc_host *mmc; /* MMC structure */
|
||||
struct mmc_host_ops mmc_host_ops; /* MMC host ops */
|
||||
u64 dma_mask; /* custom DMA mask */
|
||||
|
||||
#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
|
||||
|
|
|
@ -445,7 +445,7 @@ static void sh_mmcif_request_dma(struct sh_mmcif_host *host)
|
|||
pdata->slave_id_rx);
|
||||
} else {
|
||||
host->chan_tx = dma_request_slave_channel(dev, "tx");
|
||||
host->chan_tx = dma_request_slave_channel(dev, "rx");
|
||||
host->chan_rx = dma_request_slave_channel(dev, "rx");
|
||||
}
|
||||
dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx,
|
||||
host->chan_rx);
|
||||
|
|
|
@ -7831,6 +7831,14 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
|
||||
{
|
||||
/* Check if we will never have enough descriptors,
|
||||
* as gso_segs can be more than current ring size
|
||||
*/
|
||||
return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
|
||||
}
|
||||
|
||||
static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
|
||||
|
||||
/* Use GSO to workaround all TSO packets that meet HW bug conditions
|
||||
|
@ -7934,14 +7942,19 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
* vlan encapsulated.
|
||||
*/
|
||||
if (skb->protocol == htons(ETH_P_8021Q) ||
|
||||
skb->protocol == htons(ETH_P_8021AD))
|
||||
return tg3_tso_bug(tp, tnapi, txq, skb);
|
||||
skb->protocol == htons(ETH_P_8021AD)) {
|
||||
if (tg3_tso_bug_gso_check(tnapi, skb))
|
||||
return tg3_tso_bug(tp, tnapi, txq, skb);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
if (!skb_is_gso_v6(skb)) {
|
||||
if (unlikely((ETH_HLEN + hdr_len) > 80) &&
|
||||
tg3_flag(tp, TSO_BUG))
|
||||
return tg3_tso_bug(tp, tnapi, txq, skb);
|
||||
|
||||
tg3_flag(tp, TSO_BUG)) {
|
||||
if (tg3_tso_bug_gso_check(tnapi, skb))
|
||||
return tg3_tso_bug(tp, tnapi, txq, skb);
|
||||
goto drop;
|
||||
}
|
||||
ip_csum = iph->check;
|
||||
ip_tot_len = iph->tot_len;
|
||||
iph->check = 0;
|
||||
|
@ -8073,7 +8086,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
if (would_hit_hwbug) {
|
||||
tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
|
||||
|
||||
if (mss) {
|
||||
if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
|
||||
/* If it's a TSO packet, do GSO instead of
|
||||
* allocating and copying to a large linear SKB
|
||||
*/
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
|
||||
#define DRV_NAME "enic"
|
||||
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
|
||||
#define DRV_VERSION "2.3.0.12"
|
||||
#define DRV_VERSION "2.3.0.20"
|
||||
#define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc"
|
||||
|
||||
#define ENIC_BARS_MAX 6
|
||||
|
|
|
@ -298,7 +298,8 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
|
|||
int wait)
|
||||
{
|
||||
struct devcmd2_controller *dc2c = vdev->devcmd2;
|
||||
struct devcmd2_result *result = dc2c->result + dc2c->next_result;
|
||||
struct devcmd2_result *result;
|
||||
u8 color;
|
||||
unsigned int i;
|
||||
int delay, err;
|
||||
u32 fetch_index, new_posted;
|
||||
|
@ -336,13 +337,17 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
|
|||
if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
|
||||
return 0;
|
||||
|
||||
result = dc2c->result + dc2c->next_result;
|
||||
color = dc2c->color;
|
||||
|
||||
dc2c->next_result++;
|
||||
if (dc2c->next_result == dc2c->result_size) {
|
||||
dc2c->next_result = 0;
|
||||
dc2c->color = dc2c->color ? 0 : 1;
|
||||
}
|
||||
|
||||
for (delay = 0; delay < wait; delay++) {
|
||||
if (result->color == dc2c->color) {
|
||||
dc2c->next_result++;
|
||||
if (dc2c->next_result == dc2c->result_size) {
|
||||
dc2c->next_result = 0;
|
||||
dc2c->color = dc2c->color ? 0 : 1;
|
||||
}
|
||||
if (result->color == color) {
|
||||
if (result->error) {
|
||||
err = result->error;
|
||||
if (err != ERR_ECMDUNKNOWN ||
|
||||
|
|
|
@ -1880,9 +1880,9 @@ static int dwceqos_open(struct net_device *ndev)
|
|||
}
|
||||
netdev_reset_queue(ndev);
|
||||
|
||||
dwceqos_init_hw(lp);
|
||||
napi_enable(&lp->napi);
|
||||
phy_start(lp->phy_dev);
|
||||
dwceqos_init_hw(lp);
|
||||
|
||||
netif_start_queue(ndev);
|
||||
tasklet_enable(&lp->tx_bdreclaim_tasklet);
|
||||
|
|
|
@ -1039,6 +1039,17 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
return geneve_xmit_skb(skb, dev, info);
|
||||
}
|
||||
|
||||
static int geneve_change_mtu(struct net_device *dev, int new_mtu)
|
||||
{
|
||||
/* GENEVE overhead is not fixed, so we can't enforce a more
|
||||
* precise max MTU.
|
||||
*/
|
||||
if (new_mtu < 68 || new_mtu > IP_MAX_MTU)
|
||||
return -EINVAL;
|
||||
dev->mtu = new_mtu;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
|
||||
{
|
||||
struct ip_tunnel_info *info = skb_tunnel_info(skb);
|
||||
|
@ -1083,7 +1094,7 @@ static const struct net_device_ops geneve_netdev_ops = {
|
|||
.ndo_stop = geneve_stop,
|
||||
.ndo_start_xmit = geneve_xmit,
|
||||
.ndo_get_stats64 = ip_tunnel_get_stats64,
|
||||
.ndo_change_mtu = eth_change_mtu,
|
||||
.ndo_change_mtu = geneve_change_mtu,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
.ndo_fill_metadata_dst = geneve_fill_metadata_dst,
|
||||
|
@ -1442,11 +1453,21 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
|
|||
|
||||
err = geneve_configure(net, dev, &geneve_remote_unspec,
|
||||
0, 0, 0, htons(dst_port), true, 0);
|
||||
if (err) {
|
||||
free_netdev(dev);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
/* openvswitch users expect packet sizes to be unrestricted,
|
||||
* so set the largest MTU we can.
|
||||
*/
|
||||
err = geneve_change_mtu(dev, IP_MAX_MTU);
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
return dev;
|
||||
|
||||
err:
|
||||
free_netdev(dev);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(geneve_dev_create_fb);
|
||||
|
||||
|
|
|
@ -2367,27 +2367,41 @@ static void vxlan_set_multicast_list(struct net_device *dev)
|
|||
{
|
||||
}
|
||||
|
||||
static int __vxlan_change_mtu(struct net_device *dev,
|
||||
struct net_device *lowerdev,
|
||||
struct vxlan_rdst *dst, int new_mtu, bool strict)
|
||||
{
|
||||
int max_mtu = IP_MAX_MTU;
|
||||
|
||||
if (lowerdev)
|
||||
max_mtu = lowerdev->mtu;
|
||||
|
||||
if (dst->remote_ip.sa.sa_family == AF_INET6)
|
||||
max_mtu -= VXLAN6_HEADROOM;
|
||||
else
|
||||
max_mtu -= VXLAN_HEADROOM;
|
||||
|
||||
if (new_mtu < 68)
|
||||
return -EINVAL;
|
||||
|
||||
if (new_mtu > max_mtu) {
|
||||
if (strict)
|
||||
return -EINVAL;
|
||||
|
||||
new_mtu = max_mtu;
|
||||
}
|
||||
|
||||
dev->mtu = new_mtu;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
|
||||
{
|
||||
struct vxlan_dev *vxlan = netdev_priv(dev);
|
||||
struct vxlan_rdst *dst = &vxlan->default_dst;
|
||||
struct net_device *lowerdev;
|
||||
int max_mtu;
|
||||
|
||||
lowerdev = __dev_get_by_index(vxlan->net, dst->remote_ifindex);
|
||||
if (lowerdev == NULL)
|
||||
return eth_change_mtu(dev, new_mtu);
|
||||
|
||||
if (dst->remote_ip.sa.sa_family == AF_INET6)
|
||||
max_mtu = lowerdev->mtu - VXLAN6_HEADROOM;
|
||||
else
|
||||
max_mtu = lowerdev->mtu - VXLAN_HEADROOM;
|
||||
|
||||
if (new_mtu < 68 || new_mtu > max_mtu)
|
||||
return -EINVAL;
|
||||
|
||||
dev->mtu = new_mtu;
|
||||
return 0;
|
||||
struct net_device *lowerdev = __dev_get_by_index(vxlan->net,
|
||||
dst->remote_ifindex);
|
||||
return __vxlan_change_mtu(dev, lowerdev, dst, new_mtu, true);
|
||||
}
|
||||
|
||||
static int egress_ipv4_tun_info(struct net_device *dev, struct sk_buff *skb,
|
||||
|
@ -2765,6 +2779,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
|
|||
int err;
|
||||
bool use_ipv6 = false;
|
||||
__be16 default_port = vxlan->cfg.dst_port;
|
||||
struct net_device *lowerdev = NULL;
|
||||
|
||||
vxlan->net = src_net;
|
||||
|
||||
|
@ -2785,9 +2800,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
|
|||
}
|
||||
|
||||
if (conf->remote_ifindex) {
|
||||
struct net_device *lowerdev
|
||||
= __dev_get_by_index(src_net, conf->remote_ifindex);
|
||||
|
||||
lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex);
|
||||
dst->remote_ifindex = conf->remote_ifindex;
|
||||
|
||||
if (!lowerdev) {
|
||||
|
@ -2811,6 +2824,12 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
|
|||
needed_headroom = lowerdev->hard_header_len;
|
||||
}
|
||||
|
||||
if (conf->mtu) {
|
||||
err = __vxlan_change_mtu(dev, lowerdev, dst, conf->mtu, false);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA)
|
||||
needed_headroom += VXLAN6_HEADROOM;
|
||||
else
|
||||
|
|
|
@ -154,6 +154,7 @@ static const struct of_device_id whitelist_phys[] = {
|
|||
{ .compatible = "marvell,88E1111", },
|
||||
{ .compatible = "marvell,88e1116", },
|
||||
{ .compatible = "marvell,88e1118", },
|
||||
{ .compatible = "marvell,88e1145", },
|
||||
{ .compatible = "marvell,88e1149r", },
|
||||
{ .compatible = "marvell,88e1310", },
|
||||
{ .compatible = "marvell,88E1510", },
|
||||
|
|
|
@ -64,7 +64,6 @@
|
|||
#define OARR_SIZE_CFG BIT(OARR_SIZE_CFG_SHIFT)
|
||||
|
||||
#define MAX_NUM_OB_WINDOWS 2
|
||||
#define MAX_NUM_PAXC_PF 4
|
||||
|
||||
#define IPROC_PCIE_REG_INVALID 0xffff
|
||||
|
||||
|
@ -170,20 +169,6 @@ static inline void iproc_pcie_ob_write(struct iproc_pcie *pcie,
|
|||
writel(val, pcie->base + offset + (window * 8));
|
||||
}
|
||||
|
||||
static inline bool iproc_pcie_device_is_valid(struct iproc_pcie *pcie,
|
||||
unsigned int slot,
|
||||
unsigned int fn)
|
||||
{
|
||||
if (slot > 0)
|
||||
return false;
|
||||
|
||||
/* PAXC can only support limited number of functions */
|
||||
if (pcie->type == IPROC_PCIE_PAXC && fn >= MAX_NUM_PAXC_PF)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Note access to the configuration registers are protected at the higher layer
|
||||
* by 'pci_lock' in drivers/pci/access.c
|
||||
|
@ -199,11 +184,11 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus,
|
|||
u32 val;
|
||||
u16 offset;
|
||||
|
||||
if (!iproc_pcie_device_is_valid(pcie, slot, fn))
|
||||
return NULL;
|
||||
|
||||
/* root complex access */
|
||||
if (busno == 0) {
|
||||
if (slot > 0 || fn > 0)
|
||||
return NULL;
|
||||
|
||||
iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_IND_ADDR,
|
||||
where & CFG_IND_ADDR_MASK);
|
||||
offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_IND_DATA);
|
||||
|
@ -213,6 +198,14 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus,
|
|||
return (pcie->base + offset);
|
||||
}
|
||||
|
||||
/*
|
||||
* PAXC is connected to an internally emulated EP within the SoC. It
|
||||
* allows only one device.
|
||||
*/
|
||||
if (pcie->type == IPROC_PCIE_PAXC)
|
||||
if (slot > 0)
|
||||
return NULL;
|
||||
|
||||
/* EP device access */
|
||||
val = (busno << CFG_ADDR_BUS_NUM_SHIFT) |
|
||||
(slot << CFG_ADDR_DEV_NUM_SHIFT) |
|
||||
|
|
|
@ -262,7 +262,6 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
|
|||
rpc->rpd = dev;
|
||||
INIT_WORK(&rpc->dpc_handler, aer_isr);
|
||||
mutex_init(&rpc->rpc_mutex);
|
||||
init_waitqueue_head(&rpc->wait_release);
|
||||
|
||||
/* Use PCIe bus function to store rpc into PCIe device */
|
||||
set_service_data(dev, rpc);
|
||||
|
@ -285,8 +284,7 @@ static void aer_remove(struct pcie_device *dev)
|
|||
if (rpc->isr)
|
||||
free_irq(dev->irq, dev);
|
||||
|
||||
wait_event(rpc->wait_release, rpc->prod_idx == rpc->cons_idx);
|
||||
|
||||
flush_work(&rpc->dpc_handler);
|
||||
aer_disable_rootport(rpc);
|
||||
kfree(rpc);
|
||||
set_service_data(dev, NULL);
|
||||
|
|
|
@ -72,7 +72,6 @@ struct aer_rpc {
|
|||
* recovery on the same
|
||||
* root port hierarchy
|
||||
*/
|
||||
wait_queue_head_t wait_release;
|
||||
};
|
||||
|
||||
struct aer_broadcast_data {
|
||||
|
|
|
@ -811,8 +811,6 @@ void aer_isr(struct work_struct *work)
|
|||
while (get_e_source(rpc, &e_src))
|
||||
aer_isr_one_error(p_device, &e_src);
|
||||
mutex_unlock(&rpc->rpc_mutex);
|
||||
|
||||
wake_up(&rpc->wait_release);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -41,8 +41,7 @@ static const struct key_entry intel_hid_keymap[] = {
|
|||
{ KE_KEY, 4, { KEY_HOME } },
|
||||
{ KE_KEY, 5, { KEY_END } },
|
||||
{ KE_KEY, 6, { KEY_PAGEUP } },
|
||||
{ KE_KEY, 4, { KEY_PAGEDOWN } },
|
||||
{ KE_KEY, 4, { KEY_HOME } },
|
||||
{ KE_KEY, 7, { KEY_PAGEDOWN } },
|
||||
{ KE_KEY, 8, { KEY_RFKILL } },
|
||||
{ KE_KEY, 9, { KEY_POWER } },
|
||||
{ KE_KEY, 11, { KEY_SLEEP } },
|
||||
|
|
|
@ -49,7 +49,7 @@ struct scu_ipc_data {
|
|||
|
||||
static int scu_reg_access(u32 cmd, struct scu_ipc_data *data)
|
||||
{
|
||||
int count = data->count;
|
||||
unsigned int count = data->count;
|
||||
|
||||
if (count == 0 || count == 3 || count > 4)
|
||||
return -EINVAL;
|
||||
|
|
|
@ -562,7 +562,7 @@ static int mode_select_handle_sense(struct scsi_device *sdev,
|
|||
/*
|
||||
* Command Lock contention
|
||||
*/
|
||||
err = SCSI_DH_RETRY;
|
||||
err = SCSI_DH_IMM_RETRY;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -612,6 +612,8 @@ retry:
|
|||
err = mode_select_handle_sense(sdev, h->sense);
|
||||
if (err == SCSI_DH_RETRY && retry_cnt--)
|
||||
goto retry;
|
||||
if (err == SCSI_DH_IMM_RETRY)
|
||||
goto retry;
|
||||
}
|
||||
if (err == SCSI_DH_OK) {
|
||||
h->state = RDAC_STATE_ACTIVE;
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
config SCSI_HISI_SAS
|
||||
tristate "HiSilicon SAS"
|
||||
depends on HAS_DMA
|
||||
depends on HAS_DMA && HAS_IOMEM
|
||||
depends on ARM64 || COMPILE_TEST
|
||||
select SCSI_SAS_LIBSAS
|
||||
select BLK_DEV_INTEGRITY
|
||||
|
|
|
@ -1289,13 +1289,10 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (cmplt_hdr_data & CMPLT_HDR_ERR_RCRD_XFRD_MSK) {
|
||||
if (!(cmplt_hdr_data & CMPLT_HDR_CMD_CMPLT_MSK) ||
|
||||
!(cmplt_hdr_data & CMPLT_HDR_RSPNS_XFRD_MSK))
|
||||
ts->stat = SAS_DATA_OVERRUN;
|
||||
else
|
||||
slot_err_v1_hw(hisi_hba, task, slot);
|
||||
if (cmplt_hdr_data & CMPLT_HDR_ERR_RCRD_XFRD_MSK &&
|
||||
!(cmplt_hdr_data & CMPLT_HDR_RSPNS_XFRD_MSK)) {
|
||||
|
||||
slot_err_v1_hw(hisi_hba, task, slot);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
|
@ -205,6 +205,7 @@ static struct {
|
|||
{"Intel", "Multi-Flex", NULL, BLIST_NO_RSOC},
|
||||
{"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
|
||||
{"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN},
|
||||
{"Marvell", "Console", NULL, BLIST_SKIP_VPD_PAGES},
|
||||
{"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
|
||||
{"MATSHITA", "DMC-LC5", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
|
||||
{"MATSHITA", "DMC-LC40", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
|
||||
|
|
|
@ -761,7 +761,7 @@ static int sd_setup_discard_cmnd(struct scsi_cmnd *cmd)
|
|||
break;
|
||||
|
||||
default:
|
||||
ret = BLKPREP_KILL;
|
||||
ret = BLKPREP_INVALID;
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -839,7 +839,7 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
|
|||
int ret;
|
||||
|
||||
if (sdkp->device->no_write_same)
|
||||
return BLKPREP_KILL;
|
||||
return BLKPREP_INVALID;
|
||||
|
||||
BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);
|
||||
|
||||
|
|
|
@ -42,6 +42,7 @@
|
|||
#include <scsi/scsi_devinfo.h>
|
||||
#include <scsi/scsi_dbg.h>
|
||||
#include <scsi/scsi_transport_fc.h>
|
||||
#include <scsi/scsi_transport.h>
|
||||
|
||||
/*
|
||||
* All wire protocol details (storage protocol between the guest and the host)
|
||||
|
@ -477,19 +478,18 @@ struct hv_host_device {
|
|||
struct storvsc_scan_work {
|
||||
struct work_struct work;
|
||||
struct Scsi_Host *host;
|
||||
uint lun;
|
||||
u8 lun;
|
||||
u8 tgt_id;
|
||||
};
|
||||
|
||||
static void storvsc_device_scan(struct work_struct *work)
|
||||
{
|
||||
struct storvsc_scan_work *wrk;
|
||||
uint lun;
|
||||
struct scsi_device *sdev;
|
||||
|
||||
wrk = container_of(work, struct storvsc_scan_work, work);
|
||||
lun = wrk->lun;
|
||||
|
||||
sdev = scsi_device_lookup(wrk->host, 0, 0, lun);
|
||||
sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun);
|
||||
if (!sdev)
|
||||
goto done;
|
||||
scsi_rescan_device(&sdev->sdev_gendev);
|
||||
|
@ -540,7 +540,7 @@ static void storvsc_remove_lun(struct work_struct *work)
|
|||
if (!scsi_host_get(wrk->host))
|
||||
goto done;
|
||||
|
||||
sdev = scsi_device_lookup(wrk->host, 0, 0, wrk->lun);
|
||||
sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun);
|
||||
|
||||
if (sdev) {
|
||||
scsi_remove_device(sdev);
|
||||
|
@ -940,6 +940,7 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
|
|||
|
||||
wrk->host = host;
|
||||
wrk->lun = vm_srb->lun;
|
||||
wrk->tgt_id = vm_srb->target_id;
|
||||
INIT_WORK(&wrk->work, process_err_fn);
|
||||
schedule_work(&wrk->work);
|
||||
}
|
||||
|
@ -1770,6 +1771,11 @@ static int __init storvsc_drv_init(void)
|
|||
fc_transport_template = fc_attach_transport(&fc_transport_functions);
|
||||
if (!fc_transport_template)
|
||||
return -ENODEV;
|
||||
|
||||
/*
|
||||
* Install Hyper-V specific timeout handler.
|
||||
*/
|
||||
fc_transport_template->eh_timed_out = storvsc_eh_timed_out;
|
||||
#endif
|
||||
|
||||
ret = vmbus_driver_register(&storvsc_drv);
|
||||
|
|
|
@ -152,7 +152,7 @@ static void lcdc_write(unsigned int val, unsigned int addr)
|
|||
|
||||
struct da8xx_fb_par {
|
||||
struct device *dev;
|
||||
resource_size_t p_palette_base;
|
||||
dma_addr_t p_palette_base;
|
||||
unsigned char *v_palette_base;
|
||||
dma_addr_t vram_phys;
|
||||
unsigned long vram_size;
|
||||
|
@ -1428,7 +1428,7 @@ static int fb_probe(struct platform_device *device)
|
|||
|
||||
par->vram_virt = dma_alloc_coherent(NULL,
|
||||
par->vram_size,
|
||||
(resource_size_t *) &par->vram_phys,
|
||||
&par->vram_phys,
|
||||
GFP_KERNEL | GFP_DMA);
|
||||
if (!par->vram_virt) {
|
||||
dev_err(&device->dev,
|
||||
|
@ -1448,7 +1448,7 @@ static int fb_probe(struct platform_device *device)
|
|||
|
||||
/* allocate palette buffer */
|
||||
par->v_palette_base = dma_zalloc_coherent(NULL, PALETTE_SIZE,
|
||||
(resource_size_t *)&par->p_palette_base,
|
||||
&par->p_palette_base,
|
||||
GFP_KERNEL | GFP_DMA);
|
||||
if (!par->v_palette_base) {
|
||||
dev_err(&device->dev,
|
||||
|
|
|
@ -829,8 +829,7 @@ static int s6e8ax0_probe(struct mipi_dsim_lcd_device *dsim_dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static int s6e8ax0_suspend(struct mipi_dsim_lcd_device *dsim_dev)
|
||||
static int __maybe_unused s6e8ax0_suspend(struct mipi_dsim_lcd_device *dsim_dev)
|
||||
{
|
||||
struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev);
|
||||
|
||||
|
@ -843,7 +842,7 @@ static int s6e8ax0_suspend(struct mipi_dsim_lcd_device *dsim_dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int s6e8ax0_resume(struct mipi_dsim_lcd_device *dsim_dev)
|
||||
static int __maybe_unused s6e8ax0_resume(struct mipi_dsim_lcd_device *dsim_dev)
|
||||
{
|
||||
struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev);
|
||||
|
||||
|
@ -855,10 +854,6 @@ static int s6e8ax0_resume(struct mipi_dsim_lcd_device *dsim_dev)
|
|||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
#define s6e8ax0_suspend NULL
|
||||
#define s6e8ax0_resume NULL
|
||||
#endif
|
||||
|
||||
static struct mipi_dsim_lcd_driver s6e8ax0_dsim_ddi_driver = {
|
||||
.name = "s6e8ax0",
|
||||
|
@ -867,8 +862,8 @@ static struct mipi_dsim_lcd_driver s6e8ax0_dsim_ddi_driver = {
|
|||
.power_on = s6e8ax0_power_on,
|
||||
.set_sequence = s6e8ax0_set_sequence,
|
||||
.probe = s6e8ax0_probe,
|
||||
.suspend = s6e8ax0_suspend,
|
||||
.resume = s6e8ax0_resume,
|
||||
.suspend = IS_ENABLED(CONFIG_PM) ? s6e8ax0_suspend : NULL,
|
||||
.resume = IS_ENABLED(CONFIG_PM) ? s6e8ax0_resume : NULL,
|
||||
};
|
||||
|
||||
static int s6e8ax0_init(void)
|
||||
|
|
|
@ -902,6 +902,21 @@ static int imxfb_probe(struct platform_device *pdev)
|
|||
goto failed_getclock;
|
||||
}
|
||||
|
||||
/*
|
||||
* The LCDC controller does not have an enable bit. The
|
||||
* controller starts directly when the clocks are enabled.
|
||||
* If the clocks are enabled when the controller is not yet
|
||||
* programmed with proper register values (enabled at the
|
||||
* bootloader, for example) then it just goes into some undefined
|
||||
* state.
|
||||
* To avoid this issue, let's enable and disable LCDC IPG clock
|
||||
* so that we force some kind of 'reset' to the LCDC block.
|
||||
*/
|
||||
ret = clk_prepare_enable(fbi->clk_ipg);
|
||||
if (ret)
|
||||
goto failed_getclock;
|
||||
clk_disable_unprepare(fbi->clk_ipg);
|
||||
|
||||
fbi->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
|
||||
if (IS_ERR(fbi->clk_ahb)) {
|
||||
ret = PTR_ERR(fbi->clk_ahb);
|
||||
|
|
|
@ -503,8 +503,7 @@ static int mmphw_probe(struct platform_device *pdev)
|
|||
ctrl->reg_base = devm_ioremap_nocache(ctrl->dev,
|
||||
res->start, resource_size(res));
|
||||
if (ctrl->reg_base == NULL) {
|
||||
dev_err(ctrl->dev, "%s: res %x - %x map failed\n", __func__,
|
||||
res->start, res->end);
|
||||
dev_err(ctrl->dev, "%s: res %pR map failed\n", __func__, res);
|
||||
ret = -ENOMEM;
|
||||
goto failed;
|
||||
}
|
||||
|
|
|
@ -123,11 +123,11 @@ static int ocfb_setupfb(struct ocfb_dev *fbdev)
|
|||
|
||||
/* Horizontal timings */
|
||||
ocfb_writereg(fbdev, OCFB_HTIM, (var->hsync_len - 1) << 24 |
|
||||
(var->right_margin - 1) << 16 | (var->xres - 1));
|
||||
(var->left_margin - 1) << 16 | (var->xres - 1));
|
||||
|
||||
/* Vertical timings */
|
||||
ocfb_writereg(fbdev, OCFB_VTIM, (var->vsync_len - 1) << 24 |
|
||||
(var->lower_margin - 1) << 16 | (var->yres - 1));
|
||||
(var->upper_margin - 1) << 16 | (var->yres - 1));
|
||||
|
||||
/* Total length of frame */
|
||||
hlen = var->left_margin + var->right_margin + var->hsync_len +
|
||||
|
|
|
@ -1406,7 +1406,8 @@ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
|
|||
read_extent_buffer(eb, dest + bytes_left,
|
||||
name_off, name_len);
|
||||
if (eb != eb_in) {
|
||||
btrfs_tree_read_unlock_blocking(eb);
|
||||
if (!path->skip_locking)
|
||||
btrfs_tree_read_unlock_blocking(eb);
|
||||
free_extent_buffer(eb);
|
||||
}
|
||||
ret = btrfs_find_item(fs_root, path, parent, 0,
|
||||
|
@ -1426,9 +1427,10 @@ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
|
|||
eb = path->nodes[0];
|
||||
/* make sure we can use eb after releasing the path */
|
||||
if (eb != eb_in) {
|
||||
atomic_inc(&eb->refs);
|
||||
btrfs_tree_read_lock(eb);
|
||||
btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
|
||||
if (!path->skip_locking)
|
||||
btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
|
||||
path->nodes[0] = NULL;
|
||||
path->locks[0] = 0;
|
||||
}
|
||||
btrfs_release_path(path);
|
||||
iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
|
||||
|
|
|
@ -637,11 +637,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
|
|||
faili = nr_pages - 1;
|
||||
cb->nr_pages = nr_pages;
|
||||
|
||||
/* In the parent-locked case, we only locked the range we are
|
||||
* interested in. In all other cases, we can opportunistically
|
||||
* cache decompressed data that goes beyond the requested range. */
|
||||
if (!(bio_flags & EXTENT_BIO_PARENT_LOCKED))
|
||||
add_ra_bio_pages(inode, em_start + em_len, cb);
|
||||
add_ra_bio_pages(inode, em_start + em_len, cb);
|
||||
|
||||
/* include any pages we added in add_ra-bio_pages */
|
||||
uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE;
|
||||
|
|
|
@ -1689,7 +1689,7 @@ int btrfs_should_delete_dir_index(struct list_head *del_list,
|
|||
*
|
||||
*/
|
||||
int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
|
||||
struct list_head *ins_list)
|
||||
struct list_head *ins_list, bool *emitted)
|
||||
{
|
||||
struct btrfs_dir_item *di;
|
||||
struct btrfs_delayed_item *curr, *next;
|
||||
|
@ -1733,6 +1733,7 @@ int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
|
|||
|
||||
if (over)
|
||||
return 1;
|
||||
*emitted = true;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -144,7 +144,7 @@ void btrfs_put_delayed_items(struct list_head *ins_list,
|
|||
int btrfs_should_delete_dir_index(struct list_head *del_list,
|
||||
u64 index);
|
||||
int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
|
||||
struct list_head *ins_list);
|
||||
struct list_head *ins_list, bool *emitted);
|
||||
|
||||
/* for init */
|
||||
int __init btrfs_delayed_inode_init(void);
|
||||
|
|
|
@ -2897,12 +2897,11 @@ static int __do_readpage(struct extent_io_tree *tree,
|
|||
struct block_device *bdev;
|
||||
int ret;
|
||||
int nr = 0;
|
||||
int parent_locked = *bio_flags & EXTENT_BIO_PARENT_LOCKED;
|
||||
size_t pg_offset = 0;
|
||||
size_t iosize;
|
||||
size_t disk_io_size;
|
||||
size_t blocksize = inode->i_sb->s_blocksize;
|
||||
unsigned long this_bio_flag = *bio_flags & EXTENT_BIO_PARENT_LOCKED;
|
||||
unsigned long this_bio_flag = 0;
|
||||
|
||||
set_page_extent_mapped(page);
|
||||
|
||||
|
@ -2942,18 +2941,16 @@ static int __do_readpage(struct extent_io_tree *tree,
|
|||
kunmap_atomic(userpage);
|
||||
set_extent_uptodate(tree, cur, cur + iosize - 1,
|
||||
&cached, GFP_NOFS);
|
||||
if (!parent_locked)
|
||||
unlock_extent_cached(tree, cur,
|
||||
cur + iosize - 1,
|
||||
&cached, GFP_NOFS);
|
||||
unlock_extent_cached(tree, cur,
|
||||
cur + iosize - 1,
|
||||
&cached, GFP_NOFS);
|
||||
break;
|
||||
}
|
||||
em = __get_extent_map(inode, page, pg_offset, cur,
|
||||
end - cur + 1, get_extent, em_cached);
|
||||
if (IS_ERR_OR_NULL(em)) {
|
||||
SetPageError(page);
|
||||
if (!parent_locked)
|
||||
unlock_extent(tree, cur, end);
|
||||
unlock_extent(tree, cur, end);
|
||||
break;
|
||||
}
|
||||
extent_offset = cur - em->start;
|
||||
|
@ -3038,12 +3035,9 @@ static int __do_readpage(struct extent_io_tree *tree,
|
|||
|
||||
set_extent_uptodate(tree, cur, cur + iosize - 1,
|
||||
&cached, GFP_NOFS);
|
||||
if (parent_locked)
|
||||
free_extent_state(cached);
|
||||
else
|
||||
unlock_extent_cached(tree, cur,
|
||||
cur + iosize - 1,
|
||||
&cached, GFP_NOFS);
|
||||
unlock_extent_cached(tree, cur,
|
||||
cur + iosize - 1,
|
||||
&cached, GFP_NOFS);
|
||||
cur = cur + iosize;
|
||||
pg_offset += iosize;
|
||||
continue;
|
||||
|
@ -3052,8 +3046,7 @@ static int __do_readpage(struct extent_io_tree *tree,
|
|||
if (test_range_bit(tree, cur, cur_end,
|
||||
EXTENT_UPTODATE, 1, NULL)) {
|
||||
check_page_uptodate(tree, page);
|
||||
if (!parent_locked)
|
||||
unlock_extent(tree, cur, cur + iosize - 1);
|
||||
unlock_extent(tree, cur, cur + iosize - 1);
|
||||
cur = cur + iosize;
|
||||
pg_offset += iosize;
|
||||
continue;
|
||||
|
@ -3063,8 +3056,7 @@ static int __do_readpage(struct extent_io_tree *tree,
|
|||
*/
|
||||
if (block_start == EXTENT_MAP_INLINE) {
|
||||
SetPageError(page);
|
||||
if (!parent_locked)
|
||||
unlock_extent(tree, cur, cur + iosize - 1);
|
||||
unlock_extent(tree, cur, cur + iosize - 1);
|
||||
cur = cur + iosize;
|
||||
pg_offset += iosize;
|
||||
continue;
|
||||
|
@ -3083,8 +3075,7 @@ static int __do_readpage(struct extent_io_tree *tree,
|
|||
*bio_flags = this_bio_flag;
|
||||
} else {
|
||||
SetPageError(page);
|
||||
if (!parent_locked)
|
||||
unlock_extent(tree, cur, cur + iosize - 1);
|
||||
unlock_extent(tree, cur, cur + iosize - 1);
|
||||
}
|
||||
cur = cur + iosize;
|
||||
pg_offset += iosize;
|
||||
|
@ -3213,20 +3204,6 @@ int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
|
|||
return ret;
|
||||
}
|
||||
|
||||
int extent_read_full_page_nolock(struct extent_io_tree *tree, struct page *page,
|
||||
get_extent_t *get_extent, int mirror_num)
|
||||
{
|
||||
struct bio *bio = NULL;
|
||||
unsigned long bio_flags = EXTENT_BIO_PARENT_LOCKED;
|
||||
int ret;
|
||||
|
||||
ret = __do_readpage(tree, page, get_extent, NULL, &bio, mirror_num,
|
||||
&bio_flags, READ, NULL);
|
||||
if (bio)
|
||||
ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static noinline void update_nr_written(struct page *page,
|
||||
struct writeback_control *wbc,
|
||||
unsigned long nr_written)
|
||||
|
|
|
@ -29,7 +29,6 @@
|
|||
*/
|
||||
#define EXTENT_BIO_COMPRESSED 1
|
||||
#define EXTENT_BIO_TREE_LOG 2
|
||||
#define EXTENT_BIO_PARENT_LOCKED 4
|
||||
#define EXTENT_BIO_FLAG_SHIFT 16
|
||||
|
||||
/* these are bit numbers for test/set bit */
|
||||
|
@ -210,8 +209,6 @@ static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
|
|||
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
|
||||
int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
|
||||
get_extent_t *get_extent, int mirror_num);
|
||||
int extent_read_full_page_nolock(struct extent_io_tree *tree, struct page *page,
|
||||
get_extent_t *get_extent, int mirror_num);
|
||||
int __init extent_io_init(void);
|
||||
void extent_io_exit(void);
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue