xhci: USB 3.0 BW checking.
The Intel Panther Point xHCI host tracks SuperSpeed endpoints in a different way than USB 2.0/1.1 endpoints. The bandwidth interval tables are not used, and instead the bandwidth is calculated in a very simple way. Bandwidth for SuperSpeed endpoints is tracked individually in each direction, since each direction has the full USB 3.0 bandwidth available. 10% of the bus bandwidth is reserved for non-periodic transfers. This checking would be more complex if we had USB 3.0 LPM enabled, because an additional latency for isochronous ping times need to be taken into account. However, we don't have USB 3.0 LPM support in Linux yet. Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
This commit is contained in:
parent
170c026347
commit
2b69899934
|
@ -1810,6 +1810,22 @@ static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int xhci_check_ss_bw(struct xhci_hcd *xhci,
|
||||||
|
struct xhci_virt_device *virt_dev)
|
||||||
|
{
|
||||||
|
unsigned int bw_reserved;
|
||||||
|
|
||||||
|
bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
|
||||||
|
if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
|
||||||
|
if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This algorithm is a very conservative estimate of the worst-case scheduling
|
* This algorithm is a very conservative estimate of the worst-case scheduling
|
||||||
* scenario for any one interval. The hardware dynamically schedules the
|
* scenario for any one interval. The hardware dynamically schedules the
|
||||||
|
@ -1866,6 +1882,9 @@ static int xhci_check_bw_table(struct xhci_hcd *xhci,
|
||||||
unsigned int packets_remaining = 0;
|
unsigned int packets_remaining = 0;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
|
if (virt_dev->udev->speed == USB_SPEED_SUPER)
|
||||||
|
return xhci_check_ss_bw(xhci, virt_dev);
|
||||||
|
|
||||||
if (virt_dev->udev->speed == USB_SPEED_HIGH) {
|
if (virt_dev->udev->speed == USB_SPEED_HIGH) {
|
||||||
max_bandwidth = HS_BW_LIMIT;
|
max_bandwidth = HS_BW_LIMIT;
|
||||||
/* Convert percent of bus BW reserved to blocks reserved */
|
/* Convert percent of bus BW reserved to blocks reserved */
|
||||||
|
@ -2028,6 +2047,25 @@ static bool xhci_is_async_ep(unsigned int ep_type)
|
||||||
ep_type != INT_IN_EP);
|
ep_type != INT_IN_EP);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool xhci_is_sync_in_ep(unsigned int ep_type)
|
||||||
|
{
|
||||||
|
return (ep_type == ISOC_IN_EP || ep_type != INT_IN_EP);
|
||||||
|
}
|
||||||
|
|
||||||
|
static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
|
||||||
|
{
|
||||||
|
unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
|
||||||
|
|
||||||
|
if (ep_bw->ep_interval == 0)
|
||||||
|
return SS_OVERHEAD_BURST +
|
||||||
|
(ep_bw->mult * ep_bw->num_packets *
|
||||||
|
(SS_OVERHEAD + mps));
|
||||||
|
return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
|
||||||
|
(SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
|
||||||
|
1 << ep_bw->ep_interval);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
|
void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
|
||||||
struct xhci_bw_info *ep_bw,
|
struct xhci_bw_info *ep_bw,
|
||||||
struct xhci_interval_bw_table *bw_table,
|
struct xhci_interval_bw_table *bw_table,
|
||||||
|
@ -2038,10 +2076,24 @@ void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
|
||||||
struct xhci_interval_bw *interval_bw;
|
struct xhci_interval_bw *interval_bw;
|
||||||
int normalized_interval;
|
int normalized_interval;
|
||||||
|
|
||||||
if (xhci_is_async_ep(ep_bw->type) ||
|
if (xhci_is_async_ep(ep_bw->type))
|
||||||
list_empty(&virt_ep->bw_endpoint_list))
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
if (udev->speed == USB_SPEED_SUPER) {
|
||||||
|
if (xhci_is_sync_in_ep(ep_bw->type))
|
||||||
|
xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
|
||||||
|
xhci_get_ss_bw_consumed(ep_bw);
|
||||||
|
else
|
||||||
|
xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
|
||||||
|
xhci_get_ss_bw_consumed(ep_bw);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* SuperSpeed endpoints never get added to intervals in the table, so
|
||||||
|
* this check is only valid for HS/FS/LS devices.
|
||||||
|
*/
|
||||||
|
if (list_empty(&virt_ep->bw_endpoint_list))
|
||||||
|
return;
|
||||||
/* For LS/FS devices, we need to translate the interval expressed in
|
/* For LS/FS devices, we need to translate the interval expressed in
|
||||||
* microframes to frames.
|
* microframes to frames.
|
||||||
*/
|
*/
|
||||||
|
@ -2091,6 +2143,16 @@ static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
|
||||||
if (xhci_is_async_ep(ep_bw->type))
|
if (xhci_is_async_ep(ep_bw->type))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
if (udev->speed == USB_SPEED_SUPER) {
|
||||||
|
if (xhci_is_sync_in_ep(ep_bw->type))
|
||||||
|
xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
|
||||||
|
xhci_get_ss_bw_consumed(ep_bw);
|
||||||
|
else
|
||||||
|
xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
|
||||||
|
xhci_get_ss_bw_consumed(ep_bw);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/* For LS/FS devices, we need to translate the interval expressed in
|
/* For LS/FS devices, we need to translate the interval expressed in
|
||||||
* microframes to frames.
|
* microframes to frames.
|
||||||
*/
|
*/
|
||||||
|
@ -2169,9 +2231,6 @@ static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
|
||||||
struct xhci_input_control_ctx *ctrl_ctx;
|
struct xhci_input_control_ctx *ctrl_ctx;
|
||||||
int old_active_eps = 0;
|
int old_active_eps = 0;
|
||||||
|
|
||||||
if (virt_dev->udev->speed == USB_SPEED_SUPER)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (virt_dev->tt_info)
|
if (virt_dev->tt_info)
|
||||||
old_active_eps = virt_dev->tt_info->active_eps;
|
old_active_eps = virt_dev->tt_info->active_eps;
|
||||||
|
|
||||||
|
|
|
@ -799,6 +799,7 @@ struct xhci_bw_info {
|
||||||
/* Percentage of bus bandwidth reserved for non-periodic transfers */
|
/* Percentage of bus bandwidth reserved for non-periodic transfers */
|
||||||
#define FS_BW_RESERVED 10
|
#define FS_BW_RESERVED 10
|
||||||
#define HS_BW_RESERVED 20
|
#define HS_BW_RESERVED 20
|
||||||
|
#define SS_BW_RESERVED 10
|
||||||
|
|
||||||
struct xhci_virt_ep {
|
struct xhci_virt_ep {
|
||||||
struct xhci_ring *ring;
|
struct xhci_ring *ring;
|
||||||
|
@ -869,6 +870,8 @@ struct xhci_interval_bw_table {
|
||||||
struct xhci_interval_bw interval_bw[XHCI_MAX_INTERVAL];
|
struct xhci_interval_bw interval_bw[XHCI_MAX_INTERVAL];
|
||||||
/* Includes reserved bandwidth for async endpoints */
|
/* Includes reserved bandwidth for async endpoints */
|
||||||
unsigned int bw_used;
|
unsigned int bw_used;
|
||||||
|
unsigned int ss_bw_in;
|
||||||
|
unsigned int ss_bw_out;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue