usb: typec: tcpm: Send DISCOVER_IDENTITY from dedicated work

In current design, DISCOVER_IDENTITY is queued to VDM state machine
immediately in Ready states and never retries if it fails in the AMS.
Move the process to a delayed work so that when it fails for some
reasons (e.g. Sink Tx No Go), it can be retried by queueing the work
again. Also fix a problem that the vdm_state is not set to a proper
state if it is blocked by Collision Avoidance mechanism.

Reviewed-by: Guenter Roeck <linux@roeck-us.net>
Acked-by: Heikki Krogerus <heikki.krogerus@linux.intel.com>
Signed-off-by: Kyle Tso <kyletso@google.com>
Link: https://lore.kernel.org/r/20210507062300.1945009-2-kyletso@google.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Kyle Tso 2021-05-07 14:22:59 +08:00 committed by Greg Kroah-Hartman
parent 1f4642b72b
commit c34e85fa69
1 changed files with 75 additions and 10 deletions

View File

@ -259,6 +259,7 @@ enum frs_typec_current {
#define ALTMODE_DISCOVERY_MAX (SVID_DISCOVERY_MAX * MODE_DISCOVERY_MAX) #define ALTMODE_DISCOVERY_MAX (SVID_DISCOVERY_MAX * MODE_DISCOVERY_MAX)
#define GET_SINK_CAP_RETRY_MS 100 #define GET_SINK_CAP_RETRY_MS 100
#define SEND_DISCOVER_RETRY_MS 100
struct pd_mode_data { struct pd_mode_data {
int svid_index; /* current SVID index */ int svid_index; /* current SVID index */
@ -366,6 +367,8 @@ struct tcpm_port {
struct kthread_work vdm_state_machine; struct kthread_work vdm_state_machine;
struct hrtimer enable_frs_timer; struct hrtimer enable_frs_timer;
struct kthread_work enable_frs; struct kthread_work enable_frs;
struct hrtimer send_discover_timer;
struct kthread_work send_discover_work;
bool state_machine_running; bool state_machine_running;
bool vdm_sm_running; bool vdm_sm_running;
@ -1178,6 +1181,16 @@ static void mod_enable_frs_delayed_work(struct tcpm_port *port, unsigned int del
} }
} }
static void mod_send_discover_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
{
if (delay_ms) {
hrtimer_start(&port->send_discover_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
} else {
hrtimer_cancel(&port->send_discover_timer);
kthread_queue_work(port->wq, &port->send_discover_work);
}
}
static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state, static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state,
unsigned int delay_ms) unsigned int delay_ms)
{ {
@ -1855,6 +1868,9 @@ static void vdm_run_state_machine(struct tcpm_port *port)
res = tcpm_ams_start(port, DISCOVER_IDENTITY); res = tcpm_ams_start(port, DISCOVER_IDENTITY);
if (res == 0) if (res == 0)
port->send_discover = false; port->send_discover = false;
else if (res == -EAGAIN)
mod_send_discover_delayed_work(port,
SEND_DISCOVER_RETRY_MS);
break; break;
case CMD_DISCOVER_SVID: case CMD_DISCOVER_SVID:
res = tcpm_ams_start(port, DISCOVER_SVIDS); res = tcpm_ams_start(port, DISCOVER_SVIDS);
@ -1880,6 +1896,7 @@ static void vdm_run_state_machine(struct tcpm_port *port)
} }
if (res < 0) { if (res < 0) {
port->vdm_state = VDM_STATE_ERR_BUSY;
port->vdm_sm_running = false; port->vdm_sm_running = false;
return; return;
} }
@ -3682,14 +3699,6 @@ static inline enum tcpm_state unattached_state(struct tcpm_port *port)
return SNK_UNATTACHED; return SNK_UNATTACHED;
} }
static void tcpm_check_send_discover(struct tcpm_port *port)
{
if ((port->data_role == TYPEC_HOST || port->negotiated_rev > PD_REV20) &&
port->send_discover && port->pd_capable)
tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0);
port->send_discover = false;
}
static void tcpm_swap_complete(struct tcpm_port *port, int result) static void tcpm_swap_complete(struct tcpm_port *port, int result)
{ {
if (port->swap_pending) { if (port->swap_pending) {
@ -3926,7 +3935,18 @@ static void run_state_machine(struct tcpm_port *port)
break; break;
} }
tcpm_check_send_discover(port); /*
* 6.4.4.3.1 Discover Identity
* "The Discover Identity Command Shall only be sent to SOP when there is an
* Explicit Contract."
* For now, this driver only supports SOP for DISCOVER_IDENTITY, thus using
* port->explicit_contract to decide whether to send the command.
*/
if (port->explicit_contract)
mod_send_discover_delayed_work(port, 0);
else
port->send_discover = false;
/* /*
* 6.3.5 * 6.3.5
* Sending ping messages is not necessary if * Sending ping messages is not necessary if
@ -4194,7 +4214,18 @@ static void run_state_machine(struct tcpm_port *port)
break; break;
} }
tcpm_check_send_discover(port); /*
* 6.4.4.3.1 Discover Identity
* "The Discover Identity Command Shall only be sent to SOP when there is an
* Explicit Contract."
* For now, this driver only supports SOP for DISCOVER_IDENTITY, thus using
* port->explicit_contract.
*/
if (port->explicit_contract)
mod_send_discover_delayed_work(port, 0);
else
port->send_discover = false;
power_supply_changed(port->psy); power_supply_changed(port->psy);
break; break;
@ -5288,6 +5319,29 @@ unlock:
mutex_unlock(&port->lock); mutex_unlock(&port->lock);
} }
static void tcpm_send_discover_work(struct kthread_work *work)
{
struct tcpm_port *port = container_of(work, struct tcpm_port, send_discover_work);
mutex_lock(&port->lock);
/* No need to send DISCOVER_IDENTITY anymore */
if (!port->send_discover)
goto unlock;
/* Retry if the port is not idle */
if ((port->state != SRC_READY && port->state != SNK_READY) || port->vdm_sm_running) {
mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
goto unlock;
}
/* Only send the Message if the port is host for PD rev2.0 */
if (port->data_role == TYPEC_HOST || port->negotiated_rev > PD_REV20)
tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0);
unlock:
mutex_unlock(&port->lock);
}
static int tcpm_dr_set(struct typec_port *p, enum typec_data_role data) static int tcpm_dr_set(struct typec_port *p, enum typec_data_role data)
{ {
struct tcpm_port *port = typec_get_drvdata(p); struct tcpm_port *port = typec_get_drvdata(p);
@ -6093,6 +6147,14 @@ static enum hrtimer_restart enable_frs_timer_handler(struct hrtimer *timer)
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
} }
static enum hrtimer_restart send_discover_timer_handler(struct hrtimer *timer)
{
struct tcpm_port *port = container_of(timer, struct tcpm_port, send_discover_timer);
kthread_queue_work(port->wq, &port->send_discover_work);
return HRTIMER_NORESTART;
}
struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc) struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
{ {
struct tcpm_port *port; struct tcpm_port *port;
@ -6123,12 +6185,15 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
kthread_init_work(&port->vdm_state_machine, vdm_state_machine_work); kthread_init_work(&port->vdm_state_machine, vdm_state_machine_work);
kthread_init_work(&port->event_work, tcpm_pd_event_handler); kthread_init_work(&port->event_work, tcpm_pd_event_handler);
kthread_init_work(&port->enable_frs, tcpm_enable_frs_work); kthread_init_work(&port->enable_frs, tcpm_enable_frs_work);
kthread_init_work(&port->send_discover_work, tcpm_send_discover_work);
hrtimer_init(&port->state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer_init(&port->state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
port->state_machine_timer.function = state_machine_timer_handler; port->state_machine_timer.function = state_machine_timer_handler;
hrtimer_init(&port->vdm_state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer_init(&port->vdm_state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
port->vdm_state_machine_timer.function = vdm_state_machine_timer_handler; port->vdm_state_machine_timer.function = vdm_state_machine_timer_handler;
hrtimer_init(&port->enable_frs_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer_init(&port->enable_frs_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
port->enable_frs_timer.function = enable_frs_timer_handler; port->enable_frs_timer.function = enable_frs_timer_handler;
hrtimer_init(&port->send_discover_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
port->send_discover_timer.function = send_discover_timer_handler;
spin_lock_init(&port->pd_event_lock); spin_lock_init(&port->pd_event_lock);