USB / Thunderbolt fixes for 5.13-rc4
Here are a number of tiny USB and Thunderbolt driver fixes for 5.13-rc4. They consist of: - thunderbolt fixes for some NVM bound issues - xhci fixes for reported problems - control-request fixups - documentation build warning fixes - new usb-serial driver device ids - typec bugfixes for reported issues - usbfs warning fixups (could be triggered from userspace) - other tiny fixes for reported problems. All of these have been in linux-next with no reported issues. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> -----BEGIN PGP SIGNATURE----- iG0EABECAC0WIQT0tgzFv3jCIUoxPcsxR9QN2y37KQUCYLJOvA8cZ3JlZ0Brcm9h aC5jb20ACgkQMUfUDdst+ymoGACg1Fu7eGouMBaHMykdR04lHgixb20An2Pna0fu MPOqJSrKIF4/D6rUcsep =wA32 -----END PGP SIGNATURE----- Merge tag 'usb-5.13-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb Pull USB / Thunderbolt fixes from Greg KH: "Here are a number of tiny USB and Thunderbolt driver fixes for 5.13-rc4. They consist of: - thunderbolt fixes for some NVM bound issues - xhci fixes for reported problems - control-request fixups - documentation build warning fixes - new usb-serial driver device ids - typec bugfixes for reported issues - usbfs warning fixups (could be triggered from userspace) - other tiny fixes for reported problems. All of these have been in linux-next with no reported issues" * tag 'usb-5.13-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb: (22 commits) xhci: Fix 5.12 regression of missing xHC cache clearing command after a Stall xhci: fix giving back URB with incorrect status regression in 5.12 usb: gadget: udc: renesas_usb3: Fix a race in usb3_start_pipen() usb: typec: tcpm: Respond Not_Supported if no snk_vdo usb: typec: tcpm: Properly interrupt VDM AMS USB: trancevibrator: fix control-request direction usb: Restore the usb_header label usb: typec: tcpm: Use LE to CPU conversion when accessing msg->header usb: typec: ucsi: Clear pending after acking connector change usb: typec: mux: Fix matching with typec_altmode_desc misc/uss720: fix memory leak in uss720_probe usb: dwc3: gadget: Properly track pending and queued SG USB: usbfs: Don't WARN about excessively large memory allocations thunderbolt: usb4: Fix NVM read buffer bounds and offset issue thunderbolt: dma_port: Fix NVM read buffer bounds and offset issue usb: chipidea: udc: assign interrupt number to USB gadget structure usb: cdnsp: Fix lack of removing request from pending list. usb: cdns3: Fix runtime PM imbalance on error USB: serial: pl2303: add device id for ADLINK ND-6530 GC USB: serial: ti_usb_3410_5052: add startech.com device id ...
This commit is contained in:
commit
523d0b1e9c
|
@ -123,6 +123,8 @@ are in ``drivers/usb/common/common.c``.
|
|||
In addition, some functions useful for creating debugging output are
|
||||
defined in ``drivers/usb/common/debug.c``.
|
||||
|
||||
.. _usb_header:
|
||||
|
||||
Host-Side Data Types and Macros
|
||||
===============================
|
||||
|
||||
|
|
|
@ -366,15 +366,15 @@ int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
|
|||
void *buf, size_t size)
|
||||
{
|
||||
unsigned int retries = DMA_PORT_RETRIES;
|
||||
unsigned int offset;
|
||||
|
||||
offset = address & 3;
|
||||
address = address & ~3;
|
||||
|
||||
do {
|
||||
u32 nbytes = min_t(u32, size, MAIL_DATA_DWORDS * 4);
|
||||
unsigned int offset;
|
||||
size_t nbytes;
|
||||
int ret;
|
||||
|
||||
offset = address & 3;
|
||||
nbytes = min_t(size_t, size + offset, MAIL_DATA_DWORDS * 4);
|
||||
|
||||
ret = dma_port_flash_read_block(dma, address, dma->buf,
|
||||
ALIGN(nbytes, 4));
|
||||
if (ret) {
|
||||
|
@ -386,6 +386,7 @@ int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
|
|||
return ret;
|
||||
}
|
||||
|
||||
nbytes -= offset;
|
||||
memcpy(buf, dma->buf + offset, nbytes);
|
||||
|
||||
size -= nbytes;
|
||||
|
|
|
@ -68,15 +68,15 @@ static int usb4_do_read_data(u16 address, void *buf, size_t size,
|
|||
unsigned int retries = USB4_DATA_RETRIES;
|
||||
unsigned int offset;
|
||||
|
||||
offset = address & 3;
|
||||
address = address & ~3;
|
||||
|
||||
do {
|
||||
size_t nbytes = min_t(size_t, size, USB4_DATA_DWORDS * 4);
|
||||
unsigned int dwaddress, dwords;
|
||||
u8 data[USB4_DATA_DWORDS * 4];
|
||||
size_t nbytes;
|
||||
int ret;
|
||||
|
||||
offset = address & 3;
|
||||
nbytes = min_t(size_t, size + offset, USB4_DATA_DWORDS * 4);
|
||||
|
||||
dwaddress = address / 4;
|
||||
dwords = ALIGN(nbytes, 4) / 4;
|
||||
|
||||
|
@ -87,6 +87,7 @@ static int usb4_do_read_data(u16 address, void *buf, size_t size,
|
|||
return ret;
|
||||
}
|
||||
|
||||
nbytes -= offset;
|
||||
memcpy(buf, data + offset, nbytes);
|
||||
|
||||
size -= nbytes;
|
||||
|
|
|
@ -3268,8 +3268,10 @@ static int __cdns3_gadget_init(struct cdns *cdns)
|
|||
pm_runtime_get_sync(cdns->dev);
|
||||
|
||||
ret = cdns3_gadget_start(cdns);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
pm_runtime_put_sync(cdns->dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Because interrupt line can be shared with other components in
|
||||
|
|
|
@ -422,17 +422,17 @@ unmap:
|
|||
int cdnsp_ep_dequeue(struct cdnsp_ep *pep, struct cdnsp_request *preq)
|
||||
{
|
||||
struct cdnsp_device *pdev = pep->pdev;
|
||||
int ret;
|
||||
int ret_stop = 0;
|
||||
int ret_rem;
|
||||
|
||||
trace_cdnsp_request_dequeue(preq);
|
||||
|
||||
if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_RUNNING) {
|
||||
ret = cdnsp_cmd_stop_ep(pdev, pep);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_RUNNING)
|
||||
ret_stop = cdnsp_cmd_stop_ep(pdev, pep);
|
||||
|
||||
return cdnsp_remove_request(pdev, preq, pep);
|
||||
ret_rem = cdnsp_remove_request(pdev, preq, pep);
|
||||
|
||||
return ret_rem ? ret_rem : ret_stop;
|
||||
}
|
||||
|
||||
static void cdnsp_zero_in_ctx(struct cdnsp_device *pdev)
|
||||
|
|
|
@ -2061,6 +2061,7 @@ static int udc_start(struct ci_hdrc *ci)
|
|||
ci->gadget.name = ci->platdata->name;
|
||||
ci->gadget.otg_caps = otg_caps;
|
||||
ci->gadget.sg_supported = 1;
|
||||
ci->gadget.irq = ci->irq;
|
||||
|
||||
if (ci->platdata->flags & CI_HDRC_REQUIRES_ALIGNED_DMA)
|
||||
ci->gadget.quirk_avoids_skb_reserve = 1;
|
||||
|
|
|
@ -1218,7 +1218,12 @@ static int do_proc_bulk(struct usb_dev_state *ps,
|
|||
ret = usbfs_increase_memory_usage(len1 + sizeof(struct urb));
|
||||
if (ret)
|
||||
return ret;
|
||||
tbuf = kmalloc(len1, GFP_KERNEL);
|
||||
|
||||
/*
|
||||
* len1 can be almost arbitrarily large. Don't WARN if it's
|
||||
* too big, just fail the request.
|
||||
*/
|
||||
tbuf = kmalloc(len1, GFP_KERNEL | __GFP_NOWARN);
|
||||
if (!tbuf) {
|
||||
ret = -ENOMEM;
|
||||
goto done;
|
||||
|
@ -1696,7 +1701,7 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
|
|||
if (num_sgs) {
|
||||
as->urb->sg = kmalloc_array(num_sgs,
|
||||
sizeof(struct scatterlist),
|
||||
GFP_KERNEL);
|
||||
GFP_KERNEL | __GFP_NOWARN);
|
||||
if (!as->urb->sg) {
|
||||
ret = -ENOMEM;
|
||||
goto error;
|
||||
|
@ -1731,7 +1736,7 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
|
|||
(uurb_start - as->usbm->vm_start);
|
||||
} else {
|
||||
as->urb->transfer_buffer = kmalloc(uurb->buffer_length,
|
||||
GFP_KERNEL);
|
||||
GFP_KERNEL | __GFP_NOWARN);
|
||||
if (!as->urb->transfer_buffer) {
|
||||
ret = -ENOMEM;
|
||||
goto error;
|
||||
|
|
|
@ -1244,6 +1244,7 @@ static int dwc3_prepare_trbs_sg(struct dwc3_ep *dep,
|
|||
req->start_sg = sg_next(s);
|
||||
|
||||
req->num_queued_sgs++;
|
||||
req->num_pending_sgs--;
|
||||
|
||||
/*
|
||||
* The number of pending SG entries may not correspond to the
|
||||
|
@ -1251,7 +1252,7 @@ static int dwc3_prepare_trbs_sg(struct dwc3_ep *dep,
|
|||
* don't include unused SG entries.
|
||||
*/
|
||||
if (length == 0) {
|
||||
req->num_pending_sgs -= req->request.num_mapped_sgs - req->num_queued_sgs;
|
||||
req->num_pending_sgs = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -2873,15 +2874,15 @@ static int dwc3_gadget_ep_reclaim_trb_sg(struct dwc3_ep *dep,
|
|||
struct dwc3_trb *trb = &dep->trb_pool[dep->trb_dequeue];
|
||||
struct scatterlist *sg = req->sg;
|
||||
struct scatterlist *s;
|
||||
unsigned int pending = req->num_pending_sgs;
|
||||
unsigned int num_queued = req->num_queued_sgs;
|
||||
unsigned int i;
|
||||
int ret = 0;
|
||||
|
||||
for_each_sg(sg, s, pending, i) {
|
||||
for_each_sg(sg, s, num_queued, i) {
|
||||
trb = &dep->trb_pool[dep->trb_dequeue];
|
||||
|
||||
req->sg = sg_next(s);
|
||||
req->num_pending_sgs--;
|
||||
req->num_queued_sgs--;
|
||||
|
||||
ret = dwc3_gadget_ep_reclaim_completed_trb(dep, req,
|
||||
trb, event, status, true);
|
||||
|
@ -2904,7 +2905,7 @@ static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep *dep,
|
|||
|
||||
static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req)
|
||||
{
|
||||
return req->num_pending_sgs == 0;
|
||||
return req->num_pending_sgs == 0 && req->num_queued_sgs == 0;
|
||||
}
|
||||
|
||||
static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
|
||||
|
@ -2913,7 +2914,7 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
|
|||
{
|
||||
int ret;
|
||||
|
||||
if (req->num_pending_sgs)
|
||||
if (req->request.num_mapped_sgs)
|
||||
ret = dwc3_gadget_ep_reclaim_trb_sg(dep, req, event,
|
||||
status);
|
||||
else
|
||||
|
|
|
@ -1488,7 +1488,7 @@ static void usb3_start_pipen(struct renesas_usb3_ep *usb3_ep,
|
|||
struct renesas_usb3_request *usb3_req)
|
||||
{
|
||||
struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
|
||||
struct renesas_usb3_request *usb3_req_first = usb3_get_request(usb3_ep);
|
||||
struct renesas_usb3_request *usb3_req_first;
|
||||
unsigned long flags;
|
||||
int ret = -EAGAIN;
|
||||
u32 enable_bits = 0;
|
||||
|
@ -1496,7 +1496,8 @@ static void usb3_start_pipen(struct renesas_usb3_ep *usb3_ep,
|
|||
spin_lock_irqsave(&usb3->lock, flags);
|
||||
if (usb3_ep->halt || usb3_ep->started)
|
||||
goto out;
|
||||
if (usb3_req != usb3_req_first)
|
||||
usb3_req_first = __usb3_get_request(usb3_ep);
|
||||
if (!usb3_req_first || usb3_req != usb3_req_first)
|
||||
goto out;
|
||||
|
||||
if (usb3_pn_change(usb3, usb3_ep->num) < 0)
|
||||
|
|
|
@ -828,14 +828,10 @@ static void xhci_giveback_invalidated_tds(struct xhci_virt_ep *ep)
|
|||
list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list,
|
||||
cancelled_td_list) {
|
||||
|
||||
/*
|
||||
* Doesn't matter what we pass for status, since the core will
|
||||
* just overwrite it (because the URB has been unlinked).
|
||||
*/
|
||||
ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb);
|
||||
|
||||
if (td->cancel_status == TD_CLEARED)
|
||||
xhci_td_cleanup(ep->xhci, td, ring, 0);
|
||||
xhci_td_cleanup(ep->xhci, td, ring, td->status);
|
||||
|
||||
if (ep->xhci->xhc_state & XHCI_STATE_DYING)
|
||||
return;
|
||||
|
@ -937,14 +933,18 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
|
|||
continue;
|
||||
}
|
||||
/*
|
||||
* If ring stopped on the TD we need to cancel, then we have to
|
||||
* If a ring stopped on the TD we need to cancel then we have to
|
||||
* move the xHC endpoint ring dequeue pointer past this TD.
|
||||
* Rings halted due to STALL may show hw_deq is past the stalled
|
||||
* TD, but still require a set TR Deq command to flush xHC cache.
|
||||
*/
|
||||
hw_deq = xhci_get_hw_deq(xhci, ep->vdev, ep->ep_index,
|
||||
td->urb->stream_id);
|
||||
hw_deq &= ~0xf;
|
||||
|
||||
if (trb_in_td(xhci, td->start_seg, td->first_trb,
|
||||
if (td->cancel_status == TD_HALTED) {
|
||||
cached_td = td;
|
||||
} else if (trb_in_td(xhci, td->start_seg, td->first_trb,
|
||||
td->last_trb, hw_deq, false)) {
|
||||
switch (td->cancel_status) {
|
||||
case TD_CLEARED: /* TD is already no-op */
|
||||
|
|
|
@ -61,9 +61,9 @@ static ssize_t speed_store(struct device *dev, struct device_attribute *attr,
|
|||
/* Set speed */
|
||||
retval = usb_control_msg(tv->udev, usb_sndctrlpipe(tv->udev, 0),
|
||||
0x01, /* vendor request: set speed */
|
||||
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER,
|
||||
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
|
||||
tv->speed, /* speed value */
|
||||
0, NULL, 0, USB_CTRL_GET_TIMEOUT);
|
||||
0, NULL, 0, USB_CTRL_SET_TIMEOUT);
|
||||
if (retval) {
|
||||
tv->speed = old;
|
||||
dev_dbg(&tv->udev->dev, "retval = %d\n", retval);
|
||||
|
|
|
@ -736,6 +736,7 @@ static int uss720_probe(struct usb_interface *intf,
|
|||
parport_announce_port(pp);
|
||||
|
||||
usb_set_intfdata(intf, pp);
|
||||
usb_put_dev(usbdev);
|
||||
return 0;
|
||||
|
||||
probe_abort:
|
||||
|
|
|
@ -1034,6 +1034,9 @@ static const struct usb_device_id id_table_combined[] = {
|
|||
/* Sienna devices */
|
||||
{ USB_DEVICE(FTDI_VID, FTDI_SIENNA_PID) },
|
||||
{ USB_DEVICE(ECHELON_VID, ECHELON_U20_PID) },
|
||||
/* IDS GmbH devices */
|
||||
{ USB_DEVICE(IDS_VID, IDS_SI31A_PID) },
|
||||
{ USB_DEVICE(IDS_VID, IDS_CM31A_PID) },
|
||||
/* U-Blox devices */
|
||||
{ USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ZED_PID) },
|
||||
{ USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ODIN_PID) },
|
||||
|
|
|
@ -1567,6 +1567,13 @@
|
|||
#define UNJO_VID 0x22B7
|
||||
#define UNJO_ISODEBUG_V1_PID 0x150D
|
||||
|
||||
/*
|
||||
* IDS GmbH
|
||||
*/
|
||||
#define IDS_VID 0x2CAF
|
||||
#define IDS_SI31A_PID 0x13A2
|
||||
#define IDS_CM31A_PID 0x13A3
|
||||
|
||||
/*
|
||||
* U-Blox products (http://www.u-blox.com).
|
||||
*/
|
||||
|
|
|
@ -1240,6 +1240,10 @@ static const struct usb_device_id option_ids[] = {
|
|||
.driver_info = NCTRL(0) | RSVD(1) },
|
||||
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */
|
||||
.driver_info = NCTRL(0) },
|
||||
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7010, 0xff), /* Telit LE910-S1 (RNDIS) */
|
||||
.driver_info = NCTRL(2) },
|
||||
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7011, 0xff), /* Telit LE910-S1 (ECM) */
|
||||
.driver_info = NCTRL(2) },
|
||||
{ USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */
|
||||
.driver_info = NCTRL(0) | ZLP },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
|
||||
|
|
|
@ -113,6 +113,7 @@ static const struct usb_device_id id_table[] = {
|
|||
{ USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
|
||||
{ USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
|
||||
{ USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
|
||||
{ USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530GC_PRODUCT_ID) },
|
||||
{ USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) },
|
||||
{ USB_DEVICE(AT_VENDOR_ID, AT_VTKIT3_PRODUCT_ID) },
|
||||
{ } /* Terminating entry */
|
||||
|
|
|
@ -158,6 +158,7 @@
|
|||
/* ADLINK ND-6530 RS232,RS485 and RS422 adapter */
|
||||
#define ADLINK_VENDOR_ID 0x0b63
|
||||
#define ADLINK_ND6530_PRODUCT_ID 0x6530
|
||||
#define ADLINK_ND6530GC_PRODUCT_ID 0x653a
|
||||
|
||||
/* SMART USB Serial Adapter */
|
||||
#define SMART_VENDOR_ID 0x0b8c
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
/* Vendor and product ids */
|
||||
#define TI_VENDOR_ID 0x0451
|
||||
#define IBM_VENDOR_ID 0x04b3
|
||||
#define STARTECH_VENDOR_ID 0x14b0
|
||||
#define TI_3410_PRODUCT_ID 0x3410
|
||||
#define IBM_4543_PRODUCT_ID 0x4543
|
||||
#define IBM_454B_PRODUCT_ID 0x454b
|
||||
|
@ -370,6 +371,7 @@ static const struct usb_device_id ti_id_table_3410[] = {
|
|||
{ USB_DEVICE(MXU1_VENDOR_ID, MXU1_1131_PRODUCT_ID) },
|
||||
{ USB_DEVICE(MXU1_VENDOR_ID, MXU1_1150_PRODUCT_ID) },
|
||||
{ USB_DEVICE(MXU1_VENDOR_ID, MXU1_1151_PRODUCT_ID) },
|
||||
{ USB_DEVICE(STARTECH_VENDOR_ID, TI_3410_PRODUCT_ID) },
|
||||
{ } /* terminator */
|
||||
};
|
||||
|
||||
|
@ -408,6 +410,7 @@ static const struct usb_device_id ti_id_table_combined[] = {
|
|||
{ USB_DEVICE(MXU1_VENDOR_ID, MXU1_1131_PRODUCT_ID) },
|
||||
{ USB_DEVICE(MXU1_VENDOR_ID, MXU1_1150_PRODUCT_ID) },
|
||||
{ USB_DEVICE(MXU1_VENDOR_ID, MXU1_1151_PRODUCT_ID) },
|
||||
{ USB_DEVICE(STARTECH_VENDOR_ID, TI_3410_PRODUCT_ID) },
|
||||
{ } /* terminator */
|
||||
};
|
||||
|
||||
|
|
|
@ -191,6 +191,7 @@ static void *typec_mux_match(struct fwnode_handle *fwnode, const char *id,
|
|||
bool match;
|
||||
int nval;
|
||||
u16 *val;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
/*
|
||||
|
@ -218,10 +219,10 @@ static void *typec_mux_match(struct fwnode_handle *fwnode, const char *id,
|
|||
if (!val)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
nval = fwnode_property_read_u16_array(fwnode, "svid", val, nval);
|
||||
if (nval < 0) {
|
||||
ret = fwnode_property_read_u16_array(fwnode, "svid", val, nval);
|
||||
if (ret < 0) {
|
||||
kfree(val);
|
||||
return ERR_PTR(nval);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
for (i = 0; i < nval; i++) {
|
||||
|
|
|
@ -1550,6 +1550,8 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
|
|||
if (PD_VDO_SVDM_VER(p[0]) < svdm_version)
|
||||
typec_partner_set_svdm_version(port->partner,
|
||||
PD_VDO_SVDM_VER(p[0]));
|
||||
|
||||
tcpm_ams_start(port, DISCOVER_IDENTITY);
|
||||
/* 6.4.4.3.1: Only respond as UFP (device) */
|
||||
if (port->data_role == TYPEC_DEVICE &&
|
||||
port->nr_snk_vdo) {
|
||||
|
@ -1568,14 +1570,19 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
|
|||
}
|
||||
break;
|
||||
case CMD_DISCOVER_SVID:
|
||||
tcpm_ams_start(port, DISCOVER_SVIDS);
|
||||
break;
|
||||
case CMD_DISCOVER_MODES:
|
||||
tcpm_ams_start(port, DISCOVER_MODES);
|
||||
break;
|
||||
case CMD_ENTER_MODE:
|
||||
tcpm_ams_start(port, DFP_TO_UFP_ENTER_MODE);
|
||||
break;
|
||||
case CMD_EXIT_MODE:
|
||||
tcpm_ams_start(port, DFP_TO_UFP_EXIT_MODE);
|
||||
break;
|
||||
case CMD_ATTENTION:
|
||||
tcpm_ams_start(port, ATTENTION);
|
||||
/* Attention command does not have response */
|
||||
*adev_action = ADEV_ATTENTION;
|
||||
return 0;
|
||||
|
@ -2287,6 +2294,12 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
|
|||
bool frs_enable;
|
||||
int ret;
|
||||
|
||||
if (tcpm_vdm_ams(port) && type != PD_DATA_VENDOR_DEF) {
|
||||
port->vdm_state = VDM_STATE_ERR_BUSY;
|
||||
tcpm_ams_finish(port);
|
||||
mod_vdm_delayed_work(port, 0);
|
||||
}
|
||||
|
||||
switch (type) {
|
||||
case PD_DATA_SOURCE_CAP:
|
||||
for (i = 0; i < cnt; i++)
|
||||
|
@ -2417,7 +2430,10 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
|
|||
NONE_AMS);
|
||||
break;
|
||||
case PD_DATA_VENDOR_DEF:
|
||||
tcpm_handle_vdm_request(port, msg->payload, cnt);
|
||||
if (tcpm_vdm_ams(port) || port->nr_snk_vdo)
|
||||
tcpm_handle_vdm_request(port, msg->payload, cnt);
|
||||
else if (port->negotiated_rev > PD_REV20)
|
||||
tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
|
||||
break;
|
||||
case PD_DATA_BIST:
|
||||
port->bist_request = le32_to_cpu(msg->payload[0]);
|
||||
|
@ -2459,6 +2475,16 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
|
|||
enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
|
||||
enum tcpm_state next_state;
|
||||
|
||||
/*
|
||||
* Stop VDM state machine if interrupted by other Messages while NOT_SUPP is allowed in
|
||||
* VDM AMS if waiting for VDM responses and will be handled later.
|
||||
*/
|
||||
if (tcpm_vdm_ams(port) && type != PD_CTRL_NOT_SUPP && type != PD_CTRL_GOOD_CRC) {
|
||||
port->vdm_state = VDM_STATE_ERR_BUSY;
|
||||
tcpm_ams_finish(port);
|
||||
mod_vdm_delayed_work(port, 0);
|
||||
}
|
||||
|
||||
switch (type) {
|
||||
case PD_CTRL_GOOD_CRC:
|
||||
case PD_CTRL_PING:
|
||||
|
@ -2717,7 +2743,14 @@ static void tcpm_pd_ext_msg_request(struct tcpm_port *port,
|
|||
enum pd_ext_msg_type type = pd_header_type_le(msg->header);
|
||||
unsigned int data_size = pd_ext_header_data_size_le(msg->ext_msg.header);
|
||||
|
||||
if (!(msg->ext_msg.header & PD_EXT_HDR_CHUNKED)) {
|
||||
/* stopping VDM state machine if interrupted by other Messages */
|
||||
if (tcpm_vdm_ams(port)) {
|
||||
port->vdm_state = VDM_STATE_ERR_BUSY;
|
||||
tcpm_ams_finish(port);
|
||||
mod_vdm_delayed_work(port, 0);
|
||||
}
|
||||
|
||||
if (!(le16_to_cpu(msg->ext_msg.header) & PD_EXT_HDR_CHUNKED)) {
|
||||
tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
|
||||
tcpm_log(port, "Unchunked extended messages unsupported");
|
||||
return;
|
||||
|
@ -2811,7 +2844,7 @@ static void tcpm_pd_rx_handler(struct kthread_work *work)
|
|||
"Data role mismatch, initiating error recovery");
|
||||
tcpm_set_state(port, ERROR_RECOVERY, 0);
|
||||
} else {
|
||||
if (msg->header & PD_HEADER_EXT_HDR)
|
||||
if (le16_to_cpu(msg->header) & PD_HEADER_EXT_HDR)
|
||||
tcpm_pd_ext_msg_request(port, msg);
|
||||
else if (cnt)
|
||||
tcpm_pd_data_request(port, msg);
|
||||
|
|
|
@ -717,8 +717,8 @@ static void ucsi_handle_connector_change(struct work_struct *work)
|
|||
ucsi_send_command(con->ucsi, command, NULL, 0);
|
||||
|
||||
/* 3. ACK connector change */
|
||||
clear_bit(EVENT_PENDING, &ucsi->flags);
|
||||
ret = ucsi_acknowledge_connector_change(ucsi);
|
||||
clear_bit(EVENT_PENDING, &ucsi->flags);
|
||||
if (ret) {
|
||||
dev_err(ucsi->dev, "%s: ACK failed (%d)", __func__, ret);
|
||||
goto out_unlock;
|
||||
|
|
Loading…
Reference in New Issue