xen: bug fixes for 4.4-rc5
- XSA-155 security fixes to backend drivers. - XSA-157 security fixes to pciback. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJWdDrXAAoJEFxbo/MsZsTR3N0H/0Lvz6MWBARCje7livbz7nqE PS0Bea+2yAfNhCDDiDlpV0lor8qlyfWDF6lGhLjItldAzahag3ZDKDf1Z/lcQvhf 3MwFOcOVZE8lLtvLT6LGnPuehi1Mfdi1Qk1/zQhPhsq6+FLPLT2y+whmBihp8mMh C12f7KRg5r3U7eZXNB6MEtGA0RFrOp0lBdvsiZx3qyVLpezj9mIe0NueQqwY3QCS xQ0fILp/x2EnZNZuzgghFTPRxMAx5ReOezgn9Rzvq4aThD+irz1y6ghkYN4rG2s2 tyYOTqBnjJEJEQ+wmYMhnfCwVvDffztG+uI9hqN31QFJiNB0xsjSWFCkDAWchiU= =Argz -----END PGP SIGNATURE----- Merge tag 'for-linus-4.4-rc5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip Pull xen bug fixes from David Vrabel: - XSA-155 security fixes to backend drivers. - XSA-157 security fixes to pciback. * tag 'for-linus-4.4-rc5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: xen-pciback: fix up cleanup path when alloc fails xen/pciback: Don't allow MSI-X ops if PCI_COMMAND_MEMORY is not set. xen/pciback: For XEN_PCI_OP_disable_msi[|x] only disable if device has MSI(X) enabled. xen/pciback: Do not install an IRQ handler for MSI interrupts. xen/pciback: Return error on XEN_PCI_OP_enable_msix when device has MSI or MSI-X enabled xen/pciback: Return error on XEN_PCI_OP_enable_msi when device has MSI or MSI-X enabled xen/pciback: Save xen_pci_op commands before processing it xen-scsiback: safely copy requests xen-blkback: read from indirect descriptors only once xen-blkback: only read request operation from shared ring once xen-netback: use RING_COPY_REQUEST() throughout xen-netback: don't use last request to determine minimum Tx credit xen: Add RING_COPY_REQUEST() xen/x86/pvh: Use HVM's flush_tlb_others op xen: Resume PMU from non-atomic context xen/events/fifo: Consume unprocessed events when a CPU dies
This commit is contained in:
commit
3273cba195
|
@ -2495,14 +2495,9 @@ void __init xen_init_mmu_ops(void)
|
||||||
{
|
{
|
||||||
x86_init.paging.pagetable_init = xen_pagetable_init;
|
x86_init.paging.pagetable_init = xen_pagetable_init;
|
||||||
|
|
||||||
/* Optimization - we can use the HVM one but it has no idea which
|
if (xen_feature(XENFEAT_auto_translated_physmap))
|
||||||
* VCPUs are descheduled - which means that it will needlessly IPI
|
|
||||||
* them. Xen knows so let it do the job.
|
|
||||||
*/
|
|
||||||
if (xen_feature(XENFEAT_auto_translated_physmap)) {
|
|
||||||
pv_mmu_ops.flush_tlb_others = xen_flush_tlb_others;
|
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
pv_mmu_ops = xen_mmu_ops;
|
pv_mmu_ops = xen_mmu_ops;
|
||||||
|
|
||||||
memset(dummy_mapping, 0xff, PAGE_SIZE);
|
memset(dummy_mapping, 0xff, PAGE_SIZE);
|
||||||
|
|
|
@ -68,26 +68,16 @@ static void xen_pv_post_suspend(int suspend_cancelled)
|
||||||
|
|
||||||
void xen_arch_pre_suspend(void)
|
void xen_arch_pre_suspend(void)
|
||||||
{
|
{
|
||||||
int cpu;
|
|
||||||
|
|
||||||
for_each_online_cpu(cpu)
|
|
||||||
xen_pmu_finish(cpu);
|
|
||||||
|
|
||||||
if (xen_pv_domain())
|
if (xen_pv_domain())
|
||||||
xen_pv_pre_suspend();
|
xen_pv_pre_suspend();
|
||||||
}
|
}
|
||||||
|
|
||||||
void xen_arch_post_suspend(int cancelled)
|
void xen_arch_post_suspend(int cancelled)
|
||||||
{
|
{
|
||||||
int cpu;
|
|
||||||
|
|
||||||
if (xen_pv_domain())
|
if (xen_pv_domain())
|
||||||
xen_pv_post_suspend(cancelled);
|
xen_pv_post_suspend(cancelled);
|
||||||
else
|
else
|
||||||
xen_hvm_post_suspend(cancelled);
|
xen_hvm_post_suspend(cancelled);
|
||||||
|
|
||||||
for_each_online_cpu(cpu)
|
|
||||||
xen_pmu_init(cpu);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void xen_vcpu_notify_restore(void *data)
|
static void xen_vcpu_notify_restore(void *data)
|
||||||
|
@ -106,10 +96,20 @@ static void xen_vcpu_notify_suspend(void *data)
|
||||||
|
|
||||||
void xen_arch_resume(void)
|
void xen_arch_resume(void)
|
||||||
{
|
{
|
||||||
|
int cpu;
|
||||||
|
|
||||||
on_each_cpu(xen_vcpu_notify_restore, NULL, 1);
|
on_each_cpu(xen_vcpu_notify_restore, NULL, 1);
|
||||||
|
|
||||||
|
for_each_online_cpu(cpu)
|
||||||
|
xen_pmu_init(cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
void xen_arch_suspend(void)
|
void xen_arch_suspend(void)
|
||||||
{
|
{
|
||||||
|
int cpu;
|
||||||
|
|
||||||
|
for_each_online_cpu(cpu)
|
||||||
|
xen_pmu_finish(cpu);
|
||||||
|
|
||||||
on_each_cpu(xen_vcpu_notify_suspend, NULL, 1);
|
on_each_cpu(xen_vcpu_notify_suspend, NULL, 1);
|
||||||
}
|
}
|
||||||
|
|
|
@ -950,6 +950,8 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
|
||||||
goto unmap;
|
goto unmap;
|
||||||
|
|
||||||
for (n = 0, i = 0; n < nseg; n++) {
|
for (n = 0, i = 0; n < nseg; n++) {
|
||||||
|
uint8_t first_sect, last_sect;
|
||||||
|
|
||||||
if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
|
if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
|
||||||
/* Map indirect segments */
|
/* Map indirect segments */
|
||||||
if (segments)
|
if (segments)
|
||||||
|
@ -957,15 +959,18 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
|
||||||
segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
|
segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
|
||||||
}
|
}
|
||||||
i = n % SEGS_PER_INDIRECT_FRAME;
|
i = n % SEGS_PER_INDIRECT_FRAME;
|
||||||
|
|
||||||
pending_req->segments[n]->gref = segments[i].gref;
|
pending_req->segments[n]->gref = segments[i].gref;
|
||||||
seg[n].nsec = segments[i].last_sect -
|
|
||||||
segments[i].first_sect + 1;
|
first_sect = READ_ONCE(segments[i].first_sect);
|
||||||
seg[n].offset = (segments[i].first_sect << 9);
|
last_sect = READ_ONCE(segments[i].last_sect);
|
||||||
if ((segments[i].last_sect >= (XEN_PAGE_SIZE >> 9)) ||
|
if (last_sect >= (XEN_PAGE_SIZE >> 9) || last_sect < first_sect) {
|
||||||
(segments[i].last_sect < segments[i].first_sect)) {
|
|
||||||
rc = -EINVAL;
|
rc = -EINVAL;
|
||||||
goto unmap;
|
goto unmap;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
seg[n].nsec = last_sect - first_sect + 1;
|
||||||
|
seg[n].offset = first_sect << 9;
|
||||||
preq->nr_sects += seg[n].nsec;
|
preq->nr_sects += seg[n].nsec;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -408,8 +408,8 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
|
||||||
struct blkif_x86_32_request *src)
|
struct blkif_x86_32_request *src)
|
||||||
{
|
{
|
||||||
int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
|
int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
|
||||||
dst->operation = src->operation;
|
dst->operation = READ_ONCE(src->operation);
|
||||||
switch (src->operation) {
|
switch (dst->operation) {
|
||||||
case BLKIF_OP_READ:
|
case BLKIF_OP_READ:
|
||||||
case BLKIF_OP_WRITE:
|
case BLKIF_OP_WRITE:
|
||||||
case BLKIF_OP_WRITE_BARRIER:
|
case BLKIF_OP_WRITE_BARRIER:
|
||||||
|
@ -456,8 +456,8 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
|
||||||
struct blkif_x86_64_request *src)
|
struct blkif_x86_64_request *src)
|
||||||
{
|
{
|
||||||
int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
|
int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
|
||||||
dst->operation = src->operation;
|
dst->operation = READ_ONCE(src->operation);
|
||||||
switch (src->operation) {
|
switch (dst->operation) {
|
||||||
case BLKIF_OP_READ:
|
case BLKIF_OP_READ:
|
||||||
case BLKIF_OP_WRITE:
|
case BLKIF_OP_WRITE:
|
||||||
case BLKIF_OP_WRITE_BARRIER:
|
case BLKIF_OP_WRITE_BARRIER:
|
||||||
|
|
|
@ -258,18 +258,18 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
|
||||||
struct netrx_pending_operations *npo)
|
struct netrx_pending_operations *npo)
|
||||||
{
|
{
|
||||||
struct xenvif_rx_meta *meta;
|
struct xenvif_rx_meta *meta;
|
||||||
struct xen_netif_rx_request *req;
|
struct xen_netif_rx_request req;
|
||||||
|
|
||||||
req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
|
RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
|
||||||
|
|
||||||
meta = npo->meta + npo->meta_prod++;
|
meta = npo->meta + npo->meta_prod++;
|
||||||
meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
|
meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
|
||||||
meta->gso_size = 0;
|
meta->gso_size = 0;
|
||||||
meta->size = 0;
|
meta->size = 0;
|
||||||
meta->id = req->id;
|
meta->id = req.id;
|
||||||
|
|
||||||
npo->copy_off = 0;
|
npo->copy_off = 0;
|
||||||
npo->copy_gref = req->gref;
|
npo->copy_gref = req.gref;
|
||||||
|
|
||||||
return meta;
|
return meta;
|
||||||
}
|
}
|
||||||
|
@ -424,7 +424,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
|
||||||
struct xenvif *vif = netdev_priv(skb->dev);
|
struct xenvif *vif = netdev_priv(skb->dev);
|
||||||
int nr_frags = skb_shinfo(skb)->nr_frags;
|
int nr_frags = skb_shinfo(skb)->nr_frags;
|
||||||
int i;
|
int i;
|
||||||
struct xen_netif_rx_request *req;
|
struct xen_netif_rx_request req;
|
||||||
struct xenvif_rx_meta *meta;
|
struct xenvif_rx_meta *meta;
|
||||||
unsigned char *data;
|
unsigned char *data;
|
||||||
int head = 1;
|
int head = 1;
|
||||||
|
@ -443,15 +443,15 @@ static int xenvif_gop_skb(struct sk_buff *skb,
|
||||||
|
|
||||||
/* Set up a GSO prefix descriptor, if necessary */
|
/* Set up a GSO prefix descriptor, if necessary */
|
||||||
if ((1 << gso_type) & vif->gso_prefix_mask) {
|
if ((1 << gso_type) & vif->gso_prefix_mask) {
|
||||||
req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
|
RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
|
||||||
meta = npo->meta + npo->meta_prod++;
|
meta = npo->meta + npo->meta_prod++;
|
||||||
meta->gso_type = gso_type;
|
meta->gso_type = gso_type;
|
||||||
meta->gso_size = skb_shinfo(skb)->gso_size;
|
meta->gso_size = skb_shinfo(skb)->gso_size;
|
||||||
meta->size = 0;
|
meta->size = 0;
|
||||||
meta->id = req->id;
|
meta->id = req.id;
|
||||||
}
|
}
|
||||||
|
|
||||||
req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
|
RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
|
||||||
meta = npo->meta + npo->meta_prod++;
|
meta = npo->meta + npo->meta_prod++;
|
||||||
|
|
||||||
if ((1 << gso_type) & vif->gso_mask) {
|
if ((1 << gso_type) & vif->gso_mask) {
|
||||||
|
@ -463,9 +463,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
|
||||||
}
|
}
|
||||||
|
|
||||||
meta->size = 0;
|
meta->size = 0;
|
||||||
meta->id = req->id;
|
meta->id = req.id;
|
||||||
npo->copy_off = 0;
|
npo->copy_off = 0;
|
||||||
npo->copy_gref = req->gref;
|
npo->copy_gref = req.gref;
|
||||||
|
|
||||||
data = skb->data;
|
data = skb->data;
|
||||||
while (data < skb_tail_pointer(skb)) {
|
while (data < skb_tail_pointer(skb)) {
|
||||||
|
@ -679,9 +679,7 @@ static void tx_add_credit(struct xenvif_queue *queue)
|
||||||
* Allow a burst big enough to transmit a jumbo packet of up to 128kB.
|
* Allow a burst big enough to transmit a jumbo packet of up to 128kB.
|
||||||
* Otherwise the interface can seize up due to insufficient credit.
|
* Otherwise the interface can seize up due to insufficient credit.
|
||||||
*/
|
*/
|
||||||
max_burst = RING_GET_REQUEST(&queue->tx, queue->tx.req_cons)->size;
|
max_burst = max(131072UL, queue->credit_bytes);
|
||||||
max_burst = min(max_burst, 131072UL);
|
|
||||||
max_burst = max(max_burst, queue->credit_bytes);
|
|
||||||
|
|
||||||
/* Take care that adding a new chunk of credit doesn't wrap to zero. */
|
/* Take care that adding a new chunk of credit doesn't wrap to zero. */
|
||||||
max_credit = queue->remaining_credit + queue->credit_bytes;
|
max_credit = queue->remaining_credit + queue->credit_bytes;
|
||||||
|
@ -711,7 +709,7 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
|
||||||
spin_unlock_irqrestore(&queue->response_lock, flags);
|
spin_unlock_irqrestore(&queue->response_lock, flags);
|
||||||
if (cons == end)
|
if (cons == end)
|
||||||
break;
|
break;
|
||||||
txp = RING_GET_REQUEST(&queue->tx, cons++);
|
RING_COPY_REQUEST(&queue->tx, cons++, txp);
|
||||||
} while (1);
|
} while (1);
|
||||||
queue->tx.req_cons = cons;
|
queue->tx.req_cons = cons;
|
||||||
}
|
}
|
||||||
|
@ -778,8 +776,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
|
||||||
if (drop_err)
|
if (drop_err)
|
||||||
txp = &dropped_tx;
|
txp = &dropped_tx;
|
||||||
|
|
||||||
memcpy(txp, RING_GET_REQUEST(&queue->tx, cons + slots),
|
RING_COPY_REQUEST(&queue->tx, cons + slots, txp);
|
||||||
sizeof(*txp));
|
|
||||||
|
|
||||||
/* If the guest submitted a frame >= 64 KiB then
|
/* If the guest submitted a frame >= 64 KiB then
|
||||||
* first->size overflowed and following slots will
|
* first->size overflowed and following slots will
|
||||||
|
@ -1112,8 +1109,7 @@ static int xenvif_get_extras(struct xenvif_queue *queue,
|
||||||
return -EBADR;
|
return -EBADR;
|
||||||
}
|
}
|
||||||
|
|
||||||
memcpy(&extra, RING_GET_REQUEST(&queue->tx, cons),
|
RING_COPY_REQUEST(&queue->tx, cons, &extra);
|
||||||
sizeof(extra));
|
|
||||||
if (unlikely(!extra.type ||
|
if (unlikely(!extra.type ||
|
||||||
extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
|
extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
|
||||||
queue->tx.req_cons = ++cons;
|
queue->tx.req_cons = ++cons;
|
||||||
|
@ -1322,7 +1318,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
|
||||||
|
|
||||||
idx = queue->tx.req_cons;
|
idx = queue->tx.req_cons;
|
||||||
rmb(); /* Ensure that we see the request before we copy it. */
|
rmb(); /* Ensure that we see the request before we copy it. */
|
||||||
memcpy(&txreq, RING_GET_REQUEST(&queue->tx, idx), sizeof(txreq));
|
RING_COPY_REQUEST(&queue->tx, idx, &txreq);
|
||||||
|
|
||||||
/* Credit-based scheduling. */
|
/* Credit-based scheduling. */
|
||||||
if (txreq.size > queue->remaining_credit &&
|
if (txreq.size > queue->remaining_credit &&
|
||||||
|
|
|
@ -281,7 +281,8 @@ static void handle_irq_for_port(unsigned port)
|
||||||
|
|
||||||
static void consume_one_event(unsigned cpu,
|
static void consume_one_event(unsigned cpu,
|
||||||
struct evtchn_fifo_control_block *control_block,
|
struct evtchn_fifo_control_block *control_block,
|
||||||
unsigned priority, unsigned long *ready)
|
unsigned priority, unsigned long *ready,
|
||||||
|
bool drop)
|
||||||
{
|
{
|
||||||
struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
|
struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
|
||||||
uint32_t head;
|
uint32_t head;
|
||||||
|
@ -313,13 +314,17 @@ static void consume_one_event(unsigned cpu,
|
||||||
if (head == 0)
|
if (head == 0)
|
||||||
clear_bit(priority, ready);
|
clear_bit(priority, ready);
|
||||||
|
|
||||||
if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port))
|
if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) {
|
||||||
handle_irq_for_port(port);
|
if (unlikely(drop))
|
||||||
|
pr_warn("Dropping pending event for port %u\n", port);
|
||||||
|
else
|
||||||
|
handle_irq_for_port(port);
|
||||||
|
}
|
||||||
|
|
||||||
q->head[priority] = head;
|
q->head[priority] = head;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void evtchn_fifo_handle_events(unsigned cpu)
|
static void __evtchn_fifo_handle_events(unsigned cpu, bool drop)
|
||||||
{
|
{
|
||||||
struct evtchn_fifo_control_block *control_block;
|
struct evtchn_fifo_control_block *control_block;
|
||||||
unsigned long ready;
|
unsigned long ready;
|
||||||
|
@ -331,11 +336,16 @@ static void evtchn_fifo_handle_events(unsigned cpu)
|
||||||
|
|
||||||
while (ready) {
|
while (ready) {
|
||||||
q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES);
|
q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES);
|
||||||
consume_one_event(cpu, control_block, q, &ready);
|
consume_one_event(cpu, control_block, q, &ready, drop);
|
||||||
ready |= xchg(&control_block->ready, 0);
|
ready |= xchg(&control_block->ready, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void evtchn_fifo_handle_events(unsigned cpu)
|
||||||
|
{
|
||||||
|
__evtchn_fifo_handle_events(cpu, false);
|
||||||
|
}
|
||||||
|
|
||||||
static void evtchn_fifo_resume(void)
|
static void evtchn_fifo_resume(void)
|
||||||
{
|
{
|
||||||
unsigned cpu;
|
unsigned cpu;
|
||||||
|
@ -420,6 +430,9 @@ static int evtchn_fifo_cpu_notification(struct notifier_block *self,
|
||||||
if (!per_cpu(cpu_control_block, cpu))
|
if (!per_cpu(cpu_control_block, cpu))
|
||||||
ret = evtchn_fifo_alloc_control_block(cpu);
|
ret = evtchn_fifo_alloc_control_block(cpu);
|
||||||
break;
|
break;
|
||||||
|
case CPU_DEAD:
|
||||||
|
__evtchn_fifo_handle_events(cpu, true);
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -37,6 +37,7 @@ struct xen_pcibk_device {
|
||||||
struct xen_pci_sharedinfo *sh_info;
|
struct xen_pci_sharedinfo *sh_info;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct work_struct op_work;
|
struct work_struct op_work;
|
||||||
|
struct xen_pci_op op;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct xen_pcibk_dev_data {
|
struct xen_pcibk_dev_data {
|
||||||
|
|
|
@ -70,6 +70,13 @@ static void xen_pcibk_control_isr(struct pci_dev *dev, int reset)
|
||||||
enable ? "enable" : "disable");
|
enable ? "enable" : "disable");
|
||||||
|
|
||||||
if (enable) {
|
if (enable) {
|
||||||
|
/*
|
||||||
|
* The MSI or MSI-X should not have an IRQ handler. Otherwise
|
||||||
|
* if the guest terminates we BUG_ON in free_msi_irqs.
|
||||||
|
*/
|
||||||
|
if (dev->msi_enabled || dev->msix_enabled)
|
||||||
|
goto out;
|
||||||
|
|
||||||
rc = request_irq(dev_data->irq,
|
rc = request_irq(dev_data->irq,
|
||||||
xen_pcibk_guest_interrupt, IRQF_SHARED,
|
xen_pcibk_guest_interrupt, IRQF_SHARED,
|
||||||
dev_data->irq_name, dev);
|
dev_data->irq_name, dev);
|
||||||
|
@ -144,7 +151,12 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev,
|
||||||
if (unlikely(verbose_request))
|
if (unlikely(verbose_request))
|
||||||
printk(KERN_DEBUG DRV_NAME ": %s: enable MSI\n", pci_name(dev));
|
printk(KERN_DEBUG DRV_NAME ": %s: enable MSI\n", pci_name(dev));
|
||||||
|
|
||||||
status = pci_enable_msi(dev);
|
if (dev->msi_enabled)
|
||||||
|
status = -EALREADY;
|
||||||
|
else if (dev->msix_enabled)
|
||||||
|
status = -ENXIO;
|
||||||
|
else
|
||||||
|
status = pci_enable_msi(dev);
|
||||||
|
|
||||||
if (status) {
|
if (status) {
|
||||||
pr_warn_ratelimited("%s: error enabling MSI for guest %u: err %d\n",
|
pr_warn_ratelimited("%s: error enabling MSI for guest %u: err %d\n",
|
||||||
|
@ -173,20 +185,23 @@ static
|
||||||
int xen_pcibk_disable_msi(struct xen_pcibk_device *pdev,
|
int xen_pcibk_disable_msi(struct xen_pcibk_device *pdev,
|
||||||
struct pci_dev *dev, struct xen_pci_op *op)
|
struct pci_dev *dev, struct xen_pci_op *op)
|
||||||
{
|
{
|
||||||
struct xen_pcibk_dev_data *dev_data;
|
|
||||||
|
|
||||||
if (unlikely(verbose_request))
|
if (unlikely(verbose_request))
|
||||||
printk(KERN_DEBUG DRV_NAME ": %s: disable MSI\n",
|
printk(KERN_DEBUG DRV_NAME ": %s: disable MSI\n",
|
||||||
pci_name(dev));
|
pci_name(dev));
|
||||||
pci_disable_msi(dev);
|
|
||||||
|
|
||||||
|
if (dev->msi_enabled) {
|
||||||
|
struct xen_pcibk_dev_data *dev_data;
|
||||||
|
|
||||||
|
pci_disable_msi(dev);
|
||||||
|
|
||||||
|
dev_data = pci_get_drvdata(dev);
|
||||||
|
if (dev_data)
|
||||||
|
dev_data->ack_intr = 1;
|
||||||
|
}
|
||||||
op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
|
op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
|
||||||
if (unlikely(verbose_request))
|
if (unlikely(verbose_request))
|
||||||
printk(KERN_DEBUG DRV_NAME ": %s: MSI: %d\n", pci_name(dev),
|
printk(KERN_DEBUG DRV_NAME ": %s: MSI: %d\n", pci_name(dev),
|
||||||
op->value);
|
op->value);
|
||||||
dev_data = pci_get_drvdata(dev);
|
|
||||||
if (dev_data)
|
|
||||||
dev_data->ack_intr = 1;
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -197,13 +212,26 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
|
||||||
struct xen_pcibk_dev_data *dev_data;
|
struct xen_pcibk_dev_data *dev_data;
|
||||||
int i, result;
|
int i, result;
|
||||||
struct msix_entry *entries;
|
struct msix_entry *entries;
|
||||||
|
u16 cmd;
|
||||||
|
|
||||||
if (unlikely(verbose_request))
|
if (unlikely(verbose_request))
|
||||||
printk(KERN_DEBUG DRV_NAME ": %s: enable MSI-X\n",
|
printk(KERN_DEBUG DRV_NAME ": %s: enable MSI-X\n",
|
||||||
pci_name(dev));
|
pci_name(dev));
|
||||||
|
|
||||||
if (op->value > SH_INFO_MAX_VEC)
|
if (op->value > SH_INFO_MAX_VEC)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (dev->msix_enabled)
|
||||||
|
return -EALREADY;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able
|
||||||
|
* to access the BARs where the MSI-X entries reside.
|
||||||
|
*/
|
||||||
|
pci_read_config_word(dev, PCI_COMMAND, &cmd);
|
||||||
|
if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY))
|
||||||
|
return -ENXIO;
|
||||||
|
|
||||||
entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL);
|
entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL);
|
||||||
if (entries == NULL)
|
if (entries == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -245,23 +273,27 @@ static
|
||||||
int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev,
|
int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev,
|
||||||
struct pci_dev *dev, struct xen_pci_op *op)
|
struct pci_dev *dev, struct xen_pci_op *op)
|
||||||
{
|
{
|
||||||
struct xen_pcibk_dev_data *dev_data;
|
|
||||||
if (unlikely(verbose_request))
|
if (unlikely(verbose_request))
|
||||||
printk(KERN_DEBUG DRV_NAME ": %s: disable MSI-X\n",
|
printk(KERN_DEBUG DRV_NAME ": %s: disable MSI-X\n",
|
||||||
pci_name(dev));
|
pci_name(dev));
|
||||||
pci_disable_msix(dev);
|
|
||||||
|
|
||||||
|
if (dev->msix_enabled) {
|
||||||
|
struct xen_pcibk_dev_data *dev_data;
|
||||||
|
|
||||||
|
pci_disable_msix(dev);
|
||||||
|
|
||||||
|
dev_data = pci_get_drvdata(dev);
|
||||||
|
if (dev_data)
|
||||||
|
dev_data->ack_intr = 1;
|
||||||
|
}
|
||||||
/*
|
/*
|
||||||
* SR-IOV devices (which don't have any legacy IRQ) have
|
* SR-IOV devices (which don't have any legacy IRQ) have
|
||||||
* an undefined IRQ value of zero.
|
* an undefined IRQ value of zero.
|
||||||
*/
|
*/
|
||||||
op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
|
op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
|
||||||
if (unlikely(verbose_request))
|
if (unlikely(verbose_request))
|
||||||
printk(KERN_DEBUG DRV_NAME ": %s: MSI-X: %d\n", pci_name(dev),
|
printk(KERN_DEBUG DRV_NAME ": %s: MSI-X: %d\n",
|
||||||
op->value);
|
pci_name(dev), op->value);
|
||||||
dev_data = pci_get_drvdata(dev);
|
|
||||||
if (dev_data)
|
|
||||||
dev_data->ack_intr = 1;
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -298,9 +330,11 @@ void xen_pcibk_do_op(struct work_struct *data)
|
||||||
container_of(data, struct xen_pcibk_device, op_work);
|
container_of(data, struct xen_pcibk_device, op_work);
|
||||||
struct pci_dev *dev;
|
struct pci_dev *dev;
|
||||||
struct xen_pcibk_dev_data *dev_data = NULL;
|
struct xen_pcibk_dev_data *dev_data = NULL;
|
||||||
struct xen_pci_op *op = &pdev->sh_info->op;
|
struct xen_pci_op *op = &pdev->op;
|
||||||
int test_intx = 0;
|
int test_intx = 0;
|
||||||
|
|
||||||
|
*op = pdev->sh_info->op;
|
||||||
|
barrier();
|
||||||
dev = xen_pcibk_get_pci_dev(pdev, op->domain, op->bus, op->devfn);
|
dev = xen_pcibk_get_pci_dev(pdev, op->domain, op->bus, op->devfn);
|
||||||
|
|
||||||
if (dev == NULL)
|
if (dev == NULL)
|
||||||
|
@ -342,6 +376,17 @@ void xen_pcibk_do_op(struct work_struct *data)
|
||||||
if ((dev_data->enable_intx != test_intx))
|
if ((dev_data->enable_intx != test_intx))
|
||||||
xen_pcibk_control_isr(dev, 0 /* no reset */);
|
xen_pcibk_control_isr(dev, 0 /* no reset */);
|
||||||
}
|
}
|
||||||
|
pdev->sh_info->op.err = op->err;
|
||||||
|
pdev->sh_info->op.value = op->value;
|
||||||
|
#ifdef CONFIG_PCI_MSI
|
||||||
|
if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) {
|
||||||
|
unsigned int i;
|
||||||
|
|
||||||
|
for (i = 0; i < op->value; i++)
|
||||||
|
pdev->sh_info->op.msix_entries[i].vector =
|
||||||
|
op->msix_entries[i].vector;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
/* Tell the driver domain that we're done. */
|
/* Tell the driver domain that we're done. */
|
||||||
wmb();
|
wmb();
|
||||||
clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
|
clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
|
||||||
|
|
|
@ -44,7 +44,6 @@ static struct xen_pcibk_device *alloc_pdev(struct xenbus_device *xdev)
|
||||||
dev_dbg(&xdev->dev, "allocated pdev @ 0x%p\n", pdev);
|
dev_dbg(&xdev->dev, "allocated pdev @ 0x%p\n", pdev);
|
||||||
|
|
||||||
pdev->xdev = xdev;
|
pdev->xdev = xdev;
|
||||||
dev_set_drvdata(&xdev->dev, pdev);
|
|
||||||
|
|
||||||
mutex_init(&pdev->dev_lock);
|
mutex_init(&pdev->dev_lock);
|
||||||
|
|
||||||
|
@ -58,6 +57,9 @@ static struct xen_pcibk_device *alloc_pdev(struct xenbus_device *xdev)
|
||||||
kfree(pdev);
|
kfree(pdev);
|
||||||
pdev = NULL;
|
pdev = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dev_set_drvdata(&xdev->dev, pdev);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
return pdev;
|
return pdev;
|
||||||
}
|
}
|
||||||
|
|
|
@ -726,7 +726,7 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
|
||||||
if (!pending_req)
|
if (!pending_req)
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
ring_req = *RING_GET_REQUEST(ring, rc);
|
RING_COPY_REQUEST(ring, rc, &ring_req);
|
||||||
ring->req_cons = ++rc;
|
ring->req_cons = ++rc;
|
||||||
|
|
||||||
err = prepare_pending_reqs(info, &ring_req, pending_req);
|
err = prepare_pending_reqs(info, &ring_req, pending_req);
|
||||||
|
|
|
@ -181,6 +181,20 @@ struct __name##_back_ring { \
|
||||||
#define RING_GET_REQUEST(_r, _idx) \
|
#define RING_GET_REQUEST(_r, _idx) \
|
||||||
(&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
|
(&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Get a local copy of a request.
|
||||||
|
*
|
||||||
|
* Use this in preference to RING_GET_REQUEST() so all processing is
|
||||||
|
* done on a local copy that cannot be modified by the other end.
|
||||||
|
*
|
||||||
|
* Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this
|
||||||
|
* to be ineffective where _req is a struct which consists of only bitfields.
|
||||||
|
*/
|
||||||
|
#define RING_COPY_REQUEST(_r, _idx, _req) do { \
|
||||||
|
/* Use volatile to force the copy into _req. */ \
|
||||||
|
*(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
#define RING_GET_RESPONSE(_r, _idx) \
|
#define RING_GET_RESPONSE(_r, _idx) \
|
||||||
(&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
|
(&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue