powerpc fixes for 4.3
- Fix 32-bit TCE table init in kdump kernel from Nish - Fix kdump with non-power-of-2 crashkernel= from Nish - Abort cxl_pci_enable_device_hook() if PCI channel is offline from Andrew - Fix to release DRC when configure_connector() fails from Bharata - Wire up sys_userfaultfd() - Fix race condition in tearing down MSI interrupts from Paul - Fix unbalanced pci_dev_get() in cxl_probe() from Daniel - Fix cxl build failure due to -Wunused-variable gcc behaviour change from Ian - Tell the toolchain to use ABI v2 when building an LE boot wrapper from Benh - Fix THP to recompute hash value after a failed update from Aneesh - 32-bit memcpy/memset: only use dcbz once cache is enabled from Christophe -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJV+h5EAAoJEFHr6jzI4aWADboP/3jc6vUtOmFZ1GBGwXrftOc0 PGtkTtYnGbaNsp+ZBl39Y+CFsSfhFVaDgylm/8G5NMKZaSBVdcFJwfU1w6Ymn49R nZkAT5PC9KgT5RTuRTZ3DO/Y2RC9vg2T9pXjEn8NGYcV8GgUkc3dZAn48S3AFgnV 4jQI5sbxvwU12XkCUn+DkETh13g3gLYtRxwehBu/S/ovED5iNHKJwnXRzxyAA969 dARNriSeyLBVMLamJ+rJB1S5hVTZTMbughFVVFbgriyIGuC/C1g9b9GN86dCGS6w T6VrKveK/iVCLUB16KV8+inbfvUrXItOxhGJWPHw9uAJGLZTz5G+yLHRRPX8onyC pgDesJDDpP/P7sAnKto3tF1Vzi7lwVtVPC1dT1Fc9VAWJGPYC/d16EKGIpNqqlnc mAIJ7wcI5c/HxvqXR2rdRV6fMer+aY7utwMsh4o/gDs7ArQUcuCrOKSW0jvHGmyr MumARXnUGDwPnGD8IfYI2vDOvwisv9g6XACwsM+pi499SfowaiLuK3utsMccagGZ INFtaqS7gpcj8TTu3kymw1TbKW/tqG9T81RRZ0rFH1q3aSfvwQ9QvsXctSeqOl/n lkxR3Mk0CT0oupXNKV6pjqsRwLroA0AF5tGxuw4ap1Rt4i8G7WHaPgRp7SPZxtkR qVBryfK5bKqYtWp4ztVK =Dgn5 -----END PGP SIGNATURE----- Merge tag 'powerpc-4.3-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux Pull powerpc fixes from Michael Ellerman: - Fix 32-bit TCE table init in kdump kernel from Nish - Fix kdump with non-power-of-2 crashkernel= from Nish - Abort cxl_pci_enable_device_hook() if PCI channel is offline from Andrew - Fix to release DRC when configure_connector() fails from Bharata - Wire up sys_userfaultfd() - Fix race condition in tearing down MSI interrupts from Paul - Fix unbalanced pci_dev_get() in cxl_probe() from Daniel - Fix cxl build failure due to -Wunused-variable gcc behaviour change from Ian - Tell the toolchain to use ABI v2 when building an LE boot wrapper from Benh - Fix THP to recompute hash value after a failed update from Aneesh - 32-bit memcpy/memset: only use dcbz once cache is enabled from Christophe * tag 'powerpc-4.3-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: powerpc32: memset: only use dcbz once cache is enabled powerpc32: memcpy: only use dcbz once cache is enabled powerpc/mm: Recompute hash value after a failed update powerpc/boot: Specify ABI v2 when building an LE boot wrapper cxl: Fix build failure due to -Wunused-variable behaviour change cxl: Fix unbalanced pci_dev_get in cxl_probe powerpc/MSI: Fix race condition in tearing down MSI interrupts powerpc: Wire up sys_userfaultfd() powerpc/pseries: Release DRC when configure_connector fails cxl: abort cxl_pci_enable_device_hook() if PCI channel is offline powerpc/powernv/pci-ioda: fix kdump with non-power-of-2 crashkernel= powerpc/powernv/pci-ioda: fix 32-bit TCE table init in kdump kernel
This commit is contained in:
commit
f240bdd2a5
|
@ -28,6 +28,9 @@ BOOTCFLAGS += -m64
|
||||||
endif
|
endif
|
||||||
ifdef CONFIG_CPU_BIG_ENDIAN
|
ifdef CONFIG_CPU_BIG_ENDIAN
|
||||||
BOOTCFLAGS += -mbig-endian
|
BOOTCFLAGS += -mbig-endian
|
||||||
|
else
|
||||||
|
BOOTCFLAGS += -mlittle-endian
|
||||||
|
BOOTCFLAGS += $(call cc-option,-mabi=elfv2)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc
|
BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc
|
||||||
|
|
|
@ -368,3 +368,4 @@ SYSCALL_SPU(memfd_create)
|
||||||
SYSCALL_SPU(bpf)
|
SYSCALL_SPU(bpf)
|
||||||
COMPAT_SYS(execveat)
|
COMPAT_SYS(execveat)
|
||||||
PPC64ONLY(switch_endian)
|
PPC64ONLY(switch_endian)
|
||||||
|
SYSCALL_SPU(userfaultfd)
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
#include <uapi/asm/unistd.h>
|
#include <uapi/asm/unistd.h>
|
||||||
|
|
||||||
|
|
||||||
#define __NR_syscalls 364
|
#define __NR_syscalls 365
|
||||||
|
|
||||||
#define __NR__exit __NR_exit
|
#define __NR__exit __NR_exit
|
||||||
#define NR_syscalls __NR_syscalls
|
#define NR_syscalls __NR_syscalls
|
||||||
|
|
|
@ -386,5 +386,6 @@
|
||||||
#define __NR_bpf 361
|
#define __NR_bpf 361
|
||||||
#define __NR_execveat 362
|
#define __NR_execveat 362
|
||||||
#define __NR_switch_endian 363
|
#define __NR_switch_endian 363
|
||||||
|
#define __NR_userfaultfd 364
|
||||||
|
|
||||||
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
|
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
|
||||||
|
|
|
@ -38,6 +38,7 @@
|
||||||
#include <asm/udbg.h>
|
#include <asm/udbg.h>
|
||||||
#include <asm/mmu_context.h>
|
#include <asm/mmu_context.h>
|
||||||
#include <asm/epapr_hcalls.h>
|
#include <asm/epapr_hcalls.h>
|
||||||
|
#include <asm/code-patching.h>
|
||||||
|
|
||||||
#define DBG(fmt...)
|
#define DBG(fmt...)
|
||||||
|
|
||||||
|
@ -109,6 +110,8 @@ notrace unsigned long __init early_init(unsigned long dt_ptr)
|
||||||
* This is called very early on the boot process, after a minimal
|
* This is called very early on the boot process, after a minimal
|
||||||
* MMU environment has been set up but before MMU_init is called.
|
* MMU environment has been set up but before MMU_init is called.
|
||||||
*/
|
*/
|
||||||
|
extern unsigned int memset_nocache_branch; /* Insn to be replaced by NOP */
|
||||||
|
|
||||||
notrace void __init machine_init(u64 dt_ptr)
|
notrace void __init machine_init(u64 dt_ptr)
|
||||||
{
|
{
|
||||||
lockdep_init();
|
lockdep_init();
|
||||||
|
@ -116,6 +119,9 @@ notrace void __init machine_init(u64 dt_ptr)
|
||||||
/* Enable early debugging if any specified (see udbg.h) */
|
/* Enable early debugging if any specified (see udbg.h) */
|
||||||
udbg_early_init();
|
udbg_early_init();
|
||||||
|
|
||||||
|
patch_instruction((unsigned int *)&memcpy, PPC_INST_NOP);
|
||||||
|
patch_instruction(&memset_nocache_branch, PPC_INST_NOP);
|
||||||
|
|
||||||
/* Do some early initialization based on the flat device tree */
|
/* Do some early initialization based on the flat device tree */
|
||||||
early_init_devtree(__va(dt_ptr));
|
early_init_devtree(__va(dt_ptr));
|
||||||
|
|
||||||
|
|
|
@ -73,6 +73,10 @@ CACHELINE_MASK = (L1_CACHE_BYTES-1)
|
||||||
* Use dcbz on the complete cache lines in the destination
|
* Use dcbz on the complete cache lines in the destination
|
||||||
* to set them to zero. This requires that the destination
|
* to set them to zero. This requires that the destination
|
||||||
* area is cacheable. -- paulus
|
* area is cacheable. -- paulus
|
||||||
|
*
|
||||||
|
* During early init, cache might not be active yet, so dcbz cannot be used.
|
||||||
|
* We therefore skip the optimised bloc that uses dcbz. This jump is
|
||||||
|
* replaced by a nop once cache is active. This is done in machine_init()
|
||||||
*/
|
*/
|
||||||
_GLOBAL(memset)
|
_GLOBAL(memset)
|
||||||
rlwimi r4,r4,8,16,23
|
rlwimi r4,r4,8,16,23
|
||||||
|
@ -88,6 +92,8 @@ _GLOBAL(memset)
|
||||||
subf r6,r0,r6
|
subf r6,r0,r6
|
||||||
cmplwi 0,r4,0
|
cmplwi 0,r4,0
|
||||||
bne 2f /* Use normal procedure if r4 is not zero */
|
bne 2f /* Use normal procedure if r4 is not zero */
|
||||||
|
_GLOBAL(memset_nocache_branch)
|
||||||
|
b 2f /* Skip optimised bloc until cache is enabled */
|
||||||
|
|
||||||
clrlwi r7,r6,32-LG_CACHELINE_BYTES
|
clrlwi r7,r6,32-LG_CACHELINE_BYTES
|
||||||
add r8,r7,r5
|
add r8,r7,r5
|
||||||
|
@ -128,6 +134,10 @@ _GLOBAL(memset)
|
||||||
* the destination area is cacheable.
|
* the destination area is cacheable.
|
||||||
* We only use this version if the source and dest don't overlap.
|
* We only use this version if the source and dest don't overlap.
|
||||||
* -- paulus.
|
* -- paulus.
|
||||||
|
*
|
||||||
|
* During early init, cache might not be active yet, so dcbz cannot be used.
|
||||||
|
* We therefore jump to generic_memcpy which doesn't use dcbz. This jump is
|
||||||
|
* replaced by a nop once cache is active. This is done in machine_init()
|
||||||
*/
|
*/
|
||||||
_GLOBAL(memmove)
|
_GLOBAL(memmove)
|
||||||
cmplw 0,r3,r4
|
cmplw 0,r3,r4
|
||||||
|
@ -135,6 +145,7 @@ _GLOBAL(memmove)
|
||||||
/* fall through */
|
/* fall through */
|
||||||
|
|
||||||
_GLOBAL(memcpy)
|
_GLOBAL(memcpy)
|
||||||
|
b generic_memcpy
|
||||||
add r7,r3,r5 /* test if the src & dst overlap */
|
add r7,r3,r5 /* test if the src & dst overlap */
|
||||||
add r8,r4,r5
|
add r8,r4,r5
|
||||||
cmplw 0,r4,r7
|
cmplw 0,r4,r7
|
||||||
|
|
|
@ -85,7 +85,6 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
|
||||||
BUG_ON(index >= 4096);
|
BUG_ON(index >= 4096);
|
||||||
|
|
||||||
vpn = hpt_vpn(ea, vsid, ssize);
|
vpn = hpt_vpn(ea, vsid, ssize);
|
||||||
hash = hpt_hash(vpn, shift, ssize);
|
|
||||||
hpte_slot_array = get_hpte_slot_array(pmdp);
|
hpte_slot_array = get_hpte_slot_array(pmdp);
|
||||||
if (psize == MMU_PAGE_4K) {
|
if (psize == MMU_PAGE_4K) {
|
||||||
/*
|
/*
|
||||||
|
@ -101,6 +100,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
|
||||||
valid = hpte_valid(hpte_slot_array, index);
|
valid = hpte_valid(hpte_slot_array, index);
|
||||||
if (valid) {
|
if (valid) {
|
||||||
/* update the hpte bits */
|
/* update the hpte bits */
|
||||||
|
hash = hpt_hash(vpn, shift, ssize);
|
||||||
hidx = hpte_hash_index(hpte_slot_array, index);
|
hidx = hpte_hash_index(hpte_slot_array, index);
|
||||||
if (hidx & _PTEIDX_SECONDARY)
|
if (hidx & _PTEIDX_SECONDARY)
|
||||||
hash = ~hash;
|
hash = ~hash;
|
||||||
|
@ -126,6 +126,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
|
||||||
if (!valid) {
|
if (!valid) {
|
||||||
unsigned long hpte_group;
|
unsigned long hpte_group;
|
||||||
|
|
||||||
|
hash = hpt_hash(vpn, shift, ssize);
|
||||||
/* insert new entry */
|
/* insert new entry */
|
||||||
pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
|
pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
|
||||||
new_pmd |= _PAGE_HASHPTE;
|
new_pmd |= _PAGE_HASHPTE;
|
||||||
|
|
|
@ -63,6 +63,7 @@ static struct irq_chip mpic_pasemi_msi_chip = {
|
||||||
static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
|
static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
|
||||||
{
|
{
|
||||||
struct msi_desc *entry;
|
struct msi_desc *entry;
|
||||||
|
irq_hw_number_t hwirq;
|
||||||
|
|
||||||
pr_debug("pasemi_msi_teardown_msi_irqs, pdev %p\n", pdev);
|
pr_debug("pasemi_msi_teardown_msi_irqs, pdev %p\n", pdev);
|
||||||
|
|
||||||
|
@ -70,10 +71,10 @@ static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
|
||||||
if (entry->irq == NO_IRQ)
|
if (entry->irq == NO_IRQ)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
hwirq = virq_to_hw(entry->irq);
|
||||||
irq_set_msi_desc(entry->irq, NULL);
|
irq_set_msi_desc(entry->irq, NULL);
|
||||||
msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
|
|
||||||
virq_to_hw(entry->irq), ALLOC_CHUNK);
|
|
||||||
irq_dispose_mapping(entry->irq);
|
irq_dispose_mapping(entry->irq);
|
||||||
|
msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, ALLOC_CHUNK);
|
||||||
}
|
}
|
||||||
|
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -2049,9 +2049,23 @@ static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe)
|
||||||
struct iommu_table *tbl = NULL;
|
struct iommu_table *tbl = NULL;
|
||||||
long rc;
|
long rc;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* crashkernel= specifies the kdump kernel's maximum memory at
|
||||||
|
* some offset and there is no guaranteed the result is a power
|
||||||
|
* of 2, which will cause errors later.
|
||||||
|
*/
|
||||||
|
const u64 max_memory = __rounddown_pow_of_two(memory_hotplug_max());
|
||||||
|
|
||||||
|
/*
|
||||||
|
* In memory constrained environments, e.g. kdump kernel, the
|
||||||
|
* DMA window can be larger than available memory, which will
|
||||||
|
* cause errors later.
|
||||||
|
*/
|
||||||
|
const u64 window_size = min((u64)pe->table_group.tce32_size, max_memory);
|
||||||
|
|
||||||
rc = pnv_pci_ioda2_create_table(&pe->table_group, 0,
|
rc = pnv_pci_ioda2_create_table(&pe->table_group, 0,
|
||||||
IOMMU_PAGE_SHIFT_4K,
|
IOMMU_PAGE_SHIFT_4K,
|
||||||
pe->table_group.tce32_size,
|
window_size,
|
||||||
POWERNV_IOMMU_DEFAULT_LEVELS, &tbl);
|
POWERNV_IOMMU_DEFAULT_LEVELS, &tbl);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
pe_err(pe, "Failed to create 32-bit TCE table, err %ld",
|
pe_err(pe, "Failed to create 32-bit TCE table, err %ld",
|
||||||
|
|
|
@ -99,6 +99,7 @@ void pnv_teardown_msi_irqs(struct pci_dev *pdev)
|
||||||
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
|
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
|
||||||
struct pnv_phb *phb = hose->private_data;
|
struct pnv_phb *phb = hose->private_data;
|
||||||
struct msi_desc *entry;
|
struct msi_desc *entry;
|
||||||
|
irq_hw_number_t hwirq;
|
||||||
|
|
||||||
if (WARN_ON(!phb))
|
if (WARN_ON(!phb))
|
||||||
return;
|
return;
|
||||||
|
@ -106,10 +107,10 @@ void pnv_teardown_msi_irqs(struct pci_dev *pdev)
|
||||||
for_each_pci_msi_entry(entry, pdev) {
|
for_each_pci_msi_entry(entry, pdev) {
|
||||||
if (entry->irq == NO_IRQ)
|
if (entry->irq == NO_IRQ)
|
||||||
continue;
|
continue;
|
||||||
|
hwirq = virq_to_hw(entry->irq);
|
||||||
irq_set_msi_desc(entry->irq, NULL);
|
irq_set_msi_desc(entry->irq, NULL);
|
||||||
msi_bitmap_free_hwirqs(&phb->msi_bmp,
|
|
||||||
virq_to_hw(entry->irq) - phb->msi_base, 1);
|
|
||||||
irq_dispose_mapping(entry->irq);
|
irq_dispose_mapping(entry->irq);
|
||||||
|
msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_PCI_MSI */
|
#endif /* CONFIG_PCI_MSI */
|
||||||
|
|
|
@ -422,8 +422,10 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
|
||||||
|
|
||||||
dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent);
|
dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent);
|
||||||
of_node_put(parent);
|
of_node_put(parent);
|
||||||
if (!dn)
|
if (!dn) {
|
||||||
|
dlpar_release_drc(drc_index);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
rc = dlpar_attach_node(dn);
|
rc = dlpar_attach_node(dn);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
|
|
|
@ -128,15 +128,16 @@ static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
|
||||||
{
|
{
|
||||||
struct msi_desc *entry;
|
struct msi_desc *entry;
|
||||||
struct fsl_msi *msi_data;
|
struct fsl_msi *msi_data;
|
||||||
|
irq_hw_number_t hwirq;
|
||||||
|
|
||||||
for_each_pci_msi_entry(entry, pdev) {
|
for_each_pci_msi_entry(entry, pdev) {
|
||||||
if (entry->irq == NO_IRQ)
|
if (entry->irq == NO_IRQ)
|
||||||
continue;
|
continue;
|
||||||
|
hwirq = virq_to_hw(entry->irq);
|
||||||
msi_data = irq_get_chip_data(entry->irq);
|
msi_data = irq_get_chip_data(entry->irq);
|
||||||
irq_set_msi_desc(entry->irq, NULL);
|
irq_set_msi_desc(entry->irq, NULL);
|
||||||
msi_bitmap_free_hwirqs(&msi_data->bitmap,
|
|
||||||
virq_to_hw(entry->irq), 1);
|
|
||||||
irq_dispose_mapping(entry->irq);
|
irq_dispose_mapping(entry->irq);
|
||||||
|
msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -107,15 +107,16 @@ static u64 find_u4_magic_addr(struct pci_dev *pdev, unsigned int hwirq)
|
||||||
static void u3msi_teardown_msi_irqs(struct pci_dev *pdev)
|
static void u3msi_teardown_msi_irqs(struct pci_dev *pdev)
|
||||||
{
|
{
|
||||||
struct msi_desc *entry;
|
struct msi_desc *entry;
|
||||||
|
irq_hw_number_t hwirq;
|
||||||
|
|
||||||
for_each_pci_msi_entry(entry, pdev) {
|
for_each_pci_msi_entry(entry, pdev) {
|
||||||
if (entry->irq == NO_IRQ)
|
if (entry->irq == NO_IRQ)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
hwirq = virq_to_hw(entry->irq);
|
||||||
irq_set_msi_desc(entry->irq, NULL);
|
irq_set_msi_desc(entry->irq, NULL);
|
||||||
msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
|
|
||||||
virq_to_hw(entry->irq), 1);
|
|
||||||
irq_dispose_mapping(entry->irq);
|
irq_dispose_mapping(entry->irq);
|
||||||
|
msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -124,16 +124,17 @@ void ppc4xx_teardown_msi_irqs(struct pci_dev *dev)
|
||||||
{
|
{
|
||||||
struct msi_desc *entry;
|
struct msi_desc *entry;
|
||||||
struct ppc4xx_msi *msi_data = &ppc4xx_msi;
|
struct ppc4xx_msi *msi_data = &ppc4xx_msi;
|
||||||
|
irq_hw_number_t hwirq;
|
||||||
|
|
||||||
dev_dbg(&dev->dev, "PCIE-MSI: tearing down msi irqs\n");
|
dev_dbg(&dev->dev, "PCIE-MSI: tearing down msi irqs\n");
|
||||||
|
|
||||||
for_each_pci_msi_entry(entry, dev) {
|
for_each_pci_msi_entry(entry, dev) {
|
||||||
if (entry->irq == NO_IRQ)
|
if (entry->irq == NO_IRQ)
|
||||||
continue;
|
continue;
|
||||||
|
hwirq = virq_to_hw(entry->irq);
|
||||||
irq_set_msi_desc(entry->irq, NULL);
|
irq_set_msi_desc(entry->irq, NULL);
|
||||||
msi_bitmap_free_hwirqs(&msi_data->bitmap,
|
|
||||||
virq_to_hw(entry->irq), 1);
|
|
||||||
irq_dispose_mapping(entry->irq);
|
irq_dispose_mapping(entry->irq);
|
||||||
|
msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
ccflags-y := -Werror
|
ccflags-y := -Werror -Wno-unused-const-variable
|
||||||
|
|
||||||
cxl-y += main.o file.o irq.o fault.o native.o
|
cxl-y += main.o file.o irq.o fault.o native.o
|
||||||
cxl-y += context.o sysfs.o debugfs.o pci.o trace.o
|
cxl-y += context.o sysfs.o debugfs.o pci.o trace.o
|
||||||
|
|
|
@ -1249,8 +1249,6 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
|
||||||
int slice;
|
int slice;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
pci_dev_get(dev);
|
|
||||||
|
|
||||||
if (cxl_verbose)
|
if (cxl_verbose)
|
||||||
dump_cxl_config_space(dev);
|
dump_cxl_config_space(dev);
|
||||||
|
|
||||||
|
|
|
@ -48,6 +48,12 @@ static bool cxl_pci_enable_device_hook(struct pci_dev *dev)
|
||||||
|
|
||||||
phb = pci_bus_to_host(dev->bus);
|
phb = pci_bus_to_host(dev->bus);
|
||||||
afu = (struct cxl_afu *)phb->private_data;
|
afu = (struct cxl_afu *)phb->private_data;
|
||||||
|
|
||||||
|
if (!cxl_adapter_link_ok(afu->adapter)) {
|
||||||
|
dev_warn(&dev->dev, "%s: Device link is down, refusing to enable AFU\n", __func__);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
set_dma_ops(&dev->dev, &dma_direct_ops);
|
set_dma_ops(&dev->dev, &dma_direct_ops);
|
||||||
set_dma_offset(&dev->dev, PAGE_OFFSET);
|
set_dma_offset(&dev->dev, PAGE_OFFSET);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue