2006-02-10 14:05:54 +08:00
|
|
|
/* pci_sun4v_asm: Hypervisor calls for PCI support.
|
|
|
|
*
|
2008-09-01 18:18:49 +08:00
|
|
|
* Copyright (C) 2006, 2008 David S. Miller <davem@davemloft.net>
|
2006-02-10 14:05:54 +08:00
|
|
|
*/
|
|
|
|
|
2008-09-01 18:18:49 +08:00
|
|
|
#include <linux/linkage.h>
|
2006-02-10 14:05:54 +08:00
|
|
|
#include <asm/hypervisor.h>
|
|
|
|
|
|
|
|
/* %o0: devhandle
|
|
|
|
* %o1: tsbid
|
|
|
|
* %o2: num ttes
|
|
|
|
* %o3: io_attributes
|
|
|
|
* %o4: io_page_list phys address
|
|
|
|
*
|
2006-02-20 14:21:32 +08:00
|
|
|
* returns %o0: -status if status was non-zero, else
|
|
|
|
* %o0: num pages mapped
|
2006-02-10 14:05:54 +08:00
|
|
|
*/
|
2008-09-01 18:18:49 +08:00
|
|
|
ENTRY(pci_sun4v_iommu_map)
|
2006-02-20 14:21:32 +08:00
|
|
|
mov %o5, %g1
|
2006-02-10 16:08:26 +08:00
|
|
|
mov HV_FAST_PCI_IOMMU_MAP, %o5
|
2006-02-10 14:05:54 +08:00
|
|
|
ta HV_FAST_TRAP
|
2006-02-20 14:21:32 +08:00
|
|
|
brnz,pn %o0, 1f
|
|
|
|
sub %g0, %o0, %o0
|
|
|
|
mov %o1, %o0
|
|
|
|
1: retl
|
|
|
|
nop
|
2008-09-01 18:18:49 +08:00
|
|
|
ENDPROC(pci_sun4v_iommu_map)
|
2006-02-10 14:05:54 +08:00
|
|
|
|
|
|
|
/* %o0: devhandle
|
|
|
|
* %o1: tsbid
|
|
|
|
* %o2: num ttes
|
|
|
|
*
|
|
|
|
* returns %o0: num ttes demapped
|
|
|
|
*/
|
2008-09-01 18:18:49 +08:00
|
|
|
ENTRY(pci_sun4v_iommu_demap)
|
2006-02-10 16:08:26 +08:00
|
|
|
mov HV_FAST_PCI_IOMMU_DEMAP, %o5
|
2006-02-10 14:05:54 +08:00
|
|
|
ta HV_FAST_TRAP
|
|
|
|
retl
|
|
|
|
mov %o1, %o0
|
2008-09-01 18:18:49 +08:00
|
|
|
ENDPROC(pci_sun4v_iommu_demap)
|
2006-02-10 14:20:01 +08:00
|
|
|
|
2006-02-10 16:08:26 +08:00
|
|
|
/* %o0: devhandle
|
|
|
|
* %o1: tsbid
|
|
|
|
* %o2: &io_attributes
|
|
|
|
* %o3: &real_address
|
|
|
|
*
|
|
|
|
* returns %o0: status
|
|
|
|
*/
|
2008-09-01 18:18:49 +08:00
|
|
|
ENTRY(pci_sun4v_iommu_getmap)
|
2006-02-10 16:08:26 +08:00
|
|
|
mov %o2, %o4
|
|
|
|
mov HV_FAST_PCI_IOMMU_GETMAP, %o5
|
|
|
|
ta HV_FAST_TRAP
|
|
|
|
stx %o1, [%o4]
|
|
|
|
stx %o2, [%o3]
|
|
|
|
retl
|
|
|
|
mov %o0, %o0
|
2008-09-01 18:18:49 +08:00
|
|
|
ENDPROC(pci_sun4v_iommu_getmap)
|
2006-02-10 16:08:26 +08:00
|
|
|
|
2006-02-10 14:20:01 +08:00
|
|
|
/* %o0: devhandle
|
|
|
|
* %o1: pci_device
|
|
|
|
* %o2: pci_config_offset
|
|
|
|
* %o3: size
|
|
|
|
*
|
|
|
|
* returns %o0: data
|
|
|
|
*
|
|
|
|
* If there is an error, the data will be returned
|
|
|
|
* as all 1's.
|
|
|
|
*/
|
2008-09-01 18:18:49 +08:00
|
|
|
ENTRY(pci_sun4v_config_get)
|
2006-02-10 16:08:26 +08:00
|
|
|
mov HV_FAST_PCI_CONFIG_GET, %o5
|
2006-02-10 14:20:01 +08:00
|
|
|
ta HV_FAST_TRAP
|
|
|
|
brnz,a,pn %o1, 1f
|
|
|
|
mov -1, %o2
|
|
|
|
1: retl
|
|
|
|
mov %o2, %o0
|
2008-09-01 18:18:49 +08:00
|
|
|
ENDPROC(pci_sun4v_config_get)
|
2006-02-10 14:20:01 +08:00
|
|
|
|
|
|
|
/* %o0: devhandle
|
|
|
|
* %o1: pci_device
|
|
|
|
* %o2: pci_config_offset
|
|
|
|
* %o3: size
|
|
|
|
* %o4: data
|
|
|
|
*
|
|
|
|
* returns %o0: status
|
|
|
|
*
|
|
|
|
* status will be zero if the operation completed
|
|
|
|
* successfully, else -1 if not
|
|
|
|
*/
|
2008-09-01 18:18:49 +08:00
|
|
|
ENTRY(pci_sun4v_config_put)
|
2006-02-10 16:08:26 +08:00
|
|
|
mov HV_FAST_PCI_CONFIG_PUT, %o5
|
2006-02-10 14:20:01 +08:00
|
|
|
ta HV_FAST_TRAP
|
|
|
|
brnz,a,pn %o1, 1f
|
|
|
|
mov -1, %o1
|
|
|
|
1: retl
|
|
|
|
mov %o1, %o0
|
2008-09-01 18:18:49 +08:00
|
|
|
ENDPROC(pci_sun4v_config_put)
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
|
|
|
|
/* %o0: devhandle
|
|
|
|
* %o1: msiqid
|
|
|
|
* %o2: msiq phys address
|
|
|
|
* %o3: num entries
|
|
|
|
*
|
|
|
|
* returns %o0: status
|
|
|
|
*
|
|
|
|
* status will be zero if the operation completed
|
|
|
|
* successfully, else -1 if not
|
|
|
|
*/
|
2008-09-01 18:18:49 +08:00
|
|
|
ENTRY(pci_sun4v_msiq_conf)
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
mov HV_FAST_PCI_MSIQ_CONF, %o5
|
|
|
|
ta HV_FAST_TRAP
|
|
|
|
retl
|
|
|
|
mov %o0, %o0
|
2008-09-01 18:18:49 +08:00
|
|
|
ENDPROC(pci_sun4v_msiq_conf)
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
|
|
|
|
/* %o0: devhandle
|
|
|
|
* %o1: msiqid
|
|
|
|
* %o2: &msiq_phys_addr
|
|
|
|
* %o3: &msiq_num_entries
|
|
|
|
*
|
|
|
|
* returns %o0: status
|
|
|
|
*/
|
2008-09-01 18:18:49 +08:00
|
|
|
ENTRY(pci_sun4v_msiq_info)
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
mov %o2, %o4
|
|
|
|
mov HV_FAST_PCI_MSIQ_INFO, %o5
|
|
|
|
ta HV_FAST_TRAP
|
|
|
|
stx %o1, [%o4]
|
|
|
|
stx %o2, [%o3]
|
|
|
|
retl
|
|
|
|
mov %o0, %o0
|
2008-09-01 18:18:49 +08:00
|
|
|
ENDPROC(pci_sun4v_msiq_info)
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
|
|
|
|
/* %o0: devhandle
|
|
|
|
* %o1: msiqid
|
|
|
|
* %o2: &valid
|
|
|
|
*
|
|
|
|
* returns %o0: status
|
|
|
|
*/
|
2008-09-01 18:18:49 +08:00
|
|
|
ENTRY(pci_sun4v_msiq_getvalid)
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
mov HV_FAST_PCI_MSIQ_GETVALID, %o5
|
|
|
|
ta HV_FAST_TRAP
|
|
|
|
stx %o1, [%o2]
|
|
|
|
retl
|
|
|
|
mov %o0, %o0
|
2008-09-01 18:18:49 +08:00
|
|
|
ENDPROC(pci_sun4v_msiq_getvalid)
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
|
|
|
|
/* %o0: devhandle
|
|
|
|
* %o1: msiqid
|
|
|
|
* %o2: valid
|
|
|
|
*
|
|
|
|
* returns %o0: status
|
|
|
|
*/
|
2008-09-01 18:18:49 +08:00
|
|
|
ENTRY(pci_sun4v_msiq_setvalid)
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
mov HV_FAST_PCI_MSIQ_SETVALID, %o5
|
|
|
|
ta HV_FAST_TRAP
|
|
|
|
retl
|
|
|
|
mov %o0, %o0
|
2008-09-01 18:18:49 +08:00
|
|
|
ENDPROC(pci_sun4v_msiq_setvalid)
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
|
|
|
|
/* %o0: devhandle
|
|
|
|
* %o1: msiqid
|
|
|
|
* %o2: &state
|
|
|
|
*
|
|
|
|
* returns %o0: status
|
|
|
|
*/
|
2008-09-01 18:18:49 +08:00
|
|
|
ENTRY(pci_sun4v_msiq_getstate)
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
mov HV_FAST_PCI_MSIQ_GETSTATE, %o5
|
|
|
|
ta HV_FAST_TRAP
|
|
|
|
stx %o1, [%o2]
|
|
|
|
retl
|
|
|
|
mov %o0, %o0
|
2008-09-01 18:18:49 +08:00
|
|
|
ENDPROC(pci_sun4v_msiq_getstate)
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
|
|
|
|
/* %o0: devhandle
|
|
|
|
* %o1: msiqid
|
|
|
|
* %o2: state
|
|
|
|
*
|
|
|
|
* returns %o0: status
|
|
|
|
*/
|
2008-09-01 18:18:49 +08:00
|
|
|
ENTRY(pci_sun4v_msiq_setstate)
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
mov HV_FAST_PCI_MSIQ_SETSTATE, %o5
|
|
|
|
ta HV_FAST_TRAP
|
|
|
|
retl
|
|
|
|
mov %o0, %o0
|
2008-09-01 18:18:49 +08:00
|
|
|
ENDPROC(pci_sun4v_msiq_setstate)
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
|
|
|
|
/* %o0: devhandle
|
|
|
|
* %o1: msiqid
|
|
|
|
* %o2: &head
|
|
|
|
*
|
|
|
|
* returns %o0: status
|
|
|
|
*/
|
2008-09-01 18:18:49 +08:00
|
|
|
ENTRY(pci_sun4v_msiq_gethead)
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
mov HV_FAST_PCI_MSIQ_GETHEAD, %o5
|
|
|
|
ta HV_FAST_TRAP
|
|
|
|
stx %o1, [%o2]
|
|
|
|
retl
|
|
|
|
mov %o0, %o0
|
2008-09-01 18:18:49 +08:00
|
|
|
ENDPROC(pci_sun4v_msiq_gethead)
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
|
|
|
|
/* %o0: devhandle
|
|
|
|
* %o1: msiqid
|
|
|
|
* %o2: head
|
|
|
|
*
|
|
|
|
* returns %o0: status
|
|
|
|
*/
|
2008-09-01 18:18:49 +08:00
|
|
|
ENTRY(pci_sun4v_msiq_sethead)
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
mov HV_FAST_PCI_MSIQ_SETHEAD, %o5
|
|
|
|
ta HV_FAST_TRAP
|
|
|
|
retl
|
|
|
|
mov %o0, %o0
|
2008-09-01 18:18:49 +08:00
|
|
|
ENDPROC(pci_sun4v_msiq_sethead)
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
|
|
|
|
/* %o0: devhandle
|
|
|
|
* %o1: msiqid
|
|
|
|
* %o2: &tail
|
|
|
|
*
|
|
|
|
* returns %o0: status
|
|
|
|
*/
|
2008-09-01 18:18:49 +08:00
|
|
|
ENTRY(pci_sun4v_msiq_gettail)
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
mov HV_FAST_PCI_MSIQ_GETTAIL, %o5
|
|
|
|
ta HV_FAST_TRAP
|
|
|
|
stx %o1, [%o2]
|
|
|
|
retl
|
|
|
|
mov %o0, %o0
|
2008-09-01 18:18:49 +08:00
|
|
|
ENDPROC(pci_sun4v_msiq_gettail)
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
|
|
|
|
/* %o0: devhandle
|
|
|
|
* %o1: msinum
|
|
|
|
* %o2: &valid
|
|
|
|
*
|
|
|
|
* returns %o0: status
|
|
|
|
*/
|
2008-09-01 18:18:49 +08:00
|
|
|
ENTRY(pci_sun4v_msi_getvalid)
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
mov HV_FAST_PCI_MSI_GETVALID, %o5
|
|
|
|
ta HV_FAST_TRAP
|
|
|
|
stx %o1, [%o2]
|
|
|
|
retl
|
|
|
|
mov %o0, %o0
|
2008-09-01 18:18:49 +08:00
|
|
|
ENDPROC(pci_sun4v_msi_getvalid)
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
|
|
|
|
/* %o0: devhandle
|
|
|
|
* %o1: msinum
|
|
|
|
* %o2: valid
|
|
|
|
*
|
|
|
|
* returns %o0: status
|
|
|
|
*/
|
2008-09-01 18:18:49 +08:00
|
|
|
ENTRY(pci_sun4v_msi_setvalid)
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
mov HV_FAST_PCI_MSI_SETVALID, %o5
|
|
|
|
ta HV_FAST_TRAP
|
|
|
|
retl
|
|
|
|
mov %o0, %o0
|
2008-09-01 18:18:49 +08:00
|
|
|
ENDPROC(pci_sun4v_msi_setvalid)
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
|
|
|
|
/* %o0: devhandle
|
|
|
|
* %o1: msinum
|
|
|
|
* %o2: &msiq
|
|
|
|
*
|
|
|
|
* returns %o0: status
|
|
|
|
*/
|
2008-09-01 18:18:49 +08:00
|
|
|
ENTRY(pci_sun4v_msi_getmsiq)
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
mov HV_FAST_PCI_MSI_GETMSIQ, %o5
|
|
|
|
ta HV_FAST_TRAP
|
|
|
|
stx %o1, [%o2]
|
|
|
|
retl
|
|
|
|
mov %o0, %o0
|
2008-09-01 18:18:49 +08:00
|
|
|
ENDPROC(pci_sun4v_msi_getmsiq)
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
|
|
|
|
/* %o0: devhandle
|
|
|
|
* %o1: msinum
|
|
|
|
* %o2: msitype
|
|
|
|
* %o3: msiq
|
|
|
|
*
|
|
|
|
* returns %o0: status
|
|
|
|
*/
|
2008-09-01 18:18:49 +08:00
|
|
|
ENTRY(pci_sun4v_msi_setmsiq)
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
mov HV_FAST_PCI_MSI_SETMSIQ, %o5
|
|
|
|
ta HV_FAST_TRAP
|
|
|
|
retl
|
|
|
|
mov %o0, %o0
|
2008-09-01 18:18:49 +08:00
|
|
|
ENDPROC(pci_sun4v_msi_setmsiq)
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
|
|
|
|
/* %o0: devhandle
|
|
|
|
* %o1: msinum
|
|
|
|
* %o2: &state
|
|
|
|
*
|
|
|
|
* returns %o0: status
|
|
|
|
*/
|
2008-09-01 18:18:49 +08:00
|
|
|
ENTRY(pci_sun4v_msi_getstate)
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
mov HV_FAST_PCI_MSI_GETSTATE, %o5
|
|
|
|
ta HV_FAST_TRAP
|
|
|
|
stx %o1, [%o2]
|
|
|
|
retl
|
|
|
|
mov %o0, %o0
|
2008-09-01 18:18:49 +08:00
|
|
|
ENDPROC(pci_sun4v_msi_getstate)
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
|
|
|
|
/* %o0: devhandle
|
|
|
|
* %o1: msinum
|
|
|
|
* %o2: state
|
|
|
|
*
|
|
|
|
* returns %o0: status
|
|
|
|
*/
|
2008-09-01 18:18:49 +08:00
|
|
|
ENTRY(pci_sun4v_msi_setstate)
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
mov HV_FAST_PCI_MSI_SETSTATE, %o5
|
|
|
|
ta HV_FAST_TRAP
|
|
|
|
retl
|
|
|
|
mov %o0, %o0
|
2008-09-01 18:18:49 +08:00
|
|
|
ENDPROC(pci_sun4v_msi_setstate)
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
|
|
|
|
/* %o0: devhandle
|
|
|
|
* %o1: msinum
|
|
|
|
* %o2: &msiq
|
|
|
|
*
|
|
|
|
* returns %o0: status
|
|
|
|
*/
|
2008-09-01 18:18:49 +08:00
|
|
|
ENTRY(pci_sun4v_msg_getmsiq)
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
mov HV_FAST_PCI_MSG_GETMSIQ, %o5
|
|
|
|
ta HV_FAST_TRAP
|
|
|
|
stx %o1, [%o2]
|
|
|
|
retl
|
|
|
|
mov %o0, %o0
|
2008-09-01 18:18:49 +08:00
|
|
|
ENDPROC(pci_sun4v_msg_getmsiq)
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
|
|
|
|
/* %o0: devhandle
|
|
|
|
* %o1: msinum
|
|
|
|
* %o2: msiq
|
|
|
|
*
|
|
|
|
* returns %o0: status
|
|
|
|
*/
|
2008-09-01 18:18:49 +08:00
|
|
|
ENTRY(pci_sun4v_msg_setmsiq)
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
mov HV_FAST_PCI_MSG_SETMSIQ, %o5
|
|
|
|
ta HV_FAST_TRAP
|
|
|
|
retl
|
|
|
|
mov %o0, %o0
|
2008-09-01 18:18:49 +08:00
|
|
|
ENDPROC(pci_sun4v_msg_setmsiq)
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
|
|
|
|
/* %o0: devhandle
|
|
|
|
* %o1: msinum
|
|
|
|
* %o2: &valid
|
|
|
|
*
|
|
|
|
* returns %o0: status
|
|
|
|
*/
|
2008-09-01 18:18:49 +08:00
|
|
|
ENTRY(pci_sun4v_msg_getvalid)
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
mov HV_FAST_PCI_MSG_GETVALID, %o5
|
|
|
|
ta HV_FAST_TRAP
|
|
|
|
stx %o1, [%o2]
|
|
|
|
retl
|
|
|
|
mov %o0, %o0
|
2008-09-01 18:18:49 +08:00
|
|
|
ENDPROC(pci_sun4v_msg_getvalid)
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
|
|
|
|
/* %o0: devhandle
|
|
|
|
* %o1: msinum
|
|
|
|
* %o2: valid
|
|
|
|
*
|
|
|
|
* returns %o0: status
|
|
|
|
*/
|
2008-09-01 18:18:49 +08:00
|
|
|
ENTRY(pci_sun4v_msg_setvalid)
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
mov HV_FAST_PCI_MSG_SETVALID, %o5
|
|
|
|
ta HV_FAST_TRAP
|
|
|
|
retl
|
|
|
|
mov %o0, %o0
|
2008-09-01 18:18:49 +08:00
|
|
|
ENDPROC(pci_sun4v_msg_setvalid)
|
[SPARC64]: Add PCI MSI support on Niagara.
This is kind of hokey, we could use the hardware provided facilities
much better.
MSIs are assosciated with MSI Queues. MSI Queues generate interrupts
when any MSI assosciated with it is signalled. This suggests a
two-tiered IRQ dispatch scheme:
MSI Queue interrupt --> queue interrupt handler
MSI dispatch --> driver interrupt handler
But we just get one-level under Linux currently. What I'd like to do
is possibly stick the IRQ actions into a per-MSI-Queue data structure,
and dispatch them form there, but the generic IRQ layer doesn't
provide a way to do that right now.
So, the current kludge is to "ACK" the interrupt by processing the
MSI Queue data structures and ACK'ing them, then we run the actual
handler like normal.
We are wasting a lot of useful information, for example the MSI data
and address are provided with ever MSI, as well as a system tick if
available. If we could pass this into the IRQ handler it could help
with certain things, in particular for PCI-Express error messages.
The MSI entries on sparc64 also tell you exactly which bus/device/fn
sent the MSI, which would be great for error handling when no
registered IRQ handler can service the interrupt.
We override the disable/enable IRQ chip methods in sun4v_msi, so we
have to call {mask,unmask}_msi_irq() directly from there. This is
another ugly wart.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-02-11 09:41:02 +08:00
|
|
|
|
2016-10-29 01:12:41 +08:00
|
|
|
/*
|
|
|
|
* %o0: devhandle
|
|
|
|
* %o1: r_addr
|
|
|
|
* %o2: size
|
|
|
|
* %o3: pagesize
|
|
|
|
* %o4: virt
|
|
|
|
* %o5: &iotsb_num/&iotsb_handle
|
|
|
|
*
|
|
|
|
* returns %o0: status
|
|
|
|
* %o1: iotsb_num/iotsb_handle
|
|
|
|
*/
|
|
|
|
ENTRY(pci_sun4v_iotsb_conf)
|
|
|
|
mov %o5, %g1
|
|
|
|
mov HV_FAST_PCI_IOTSB_CONF, %o5
|
|
|
|
ta HV_FAST_TRAP
|
|
|
|
retl
|
|
|
|
stx %o1, [%g1]
|
|
|
|
ENDPROC(pci_sun4v_iotsb_conf)
|
2016-10-29 01:12:43 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* %o0: devhandle
|
|
|
|
* %o1: iotsb_num/iotsb_handle
|
|
|
|
* %o2: pci_device
|
|
|
|
*
|
|
|
|
* returns %o0: status
|
|
|
|
*/
|
|
|
|
ENTRY(pci_sun4v_iotsb_bind)
|
|
|
|
mov HV_FAST_PCI_IOTSB_BIND, %o5
|
|
|
|
ta HV_FAST_TRAP
|
|
|
|
retl
|
|
|
|
nop
|
|
|
|
ENDPROC(pci_sun4v_iotsb_bind)
|
2016-10-29 01:12:44 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* %o0: devhandle
|
|
|
|
* %o1: iotsb_num/iotsb_handle
|
|
|
|
* %o2: index_count
|
|
|
|
* %o3: iotte_attributes
|
|
|
|
* %o4: io_page_list_p
|
|
|
|
* %o5: &mapped
|
|
|
|
*
|
|
|
|
* returns %o0: status
|
|
|
|
* %o1: #mapped
|
|
|
|
*/
|
|
|
|
ENTRY(pci_sun4v_iotsb_map)
|
|
|
|
mov %o5, %g1
|
|
|
|
mov HV_FAST_PCI_IOTSB_MAP, %o5
|
|
|
|
ta HV_FAST_TRAP
|
|
|
|
retl
|
|
|
|
stx %o1, [%g1]
|
|
|
|
ENDPROC(pci_sun4v_iotsb_map)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* %o0: devhandle
|
|
|
|
* %o1: iotsb_num/iotsb_handle
|
|
|
|
* %o2: iotsb_index
|
|
|
|
* %o3: #iottes
|
|
|
|
* %o4: &demapped
|
|
|
|
*
|
|
|
|
* returns %o0: status
|
|
|
|
* %o1: #demapped
|
|
|
|
*/
|
|
|
|
ENTRY(pci_sun4v_iotsb_demap)
|
|
|
|
mov HV_FAST_PCI_IOTSB_DEMAP, %o5
|
|
|
|
ta HV_FAST_TRAP
|
|
|
|
retl
|
|
|
|
stx %o1, [%o4]
|
|
|
|
ENDPROC(pci_sun4v_iotsb_demap)
|