Merge ock linux-6.6/devel branch into tk5 release branch

This commit is contained in:
Jianping Liu 2024-11-12 14:24:07 +08:00
commit 681c35f961
87 changed files with 12848 additions and 550 deletions

View File

@ -3948,9 +3948,10 @@
vulnerability. System may allow data leaks with this
option.
no-steal-acc [X86,PV_OPS,ARM64,PPC/PSERIES] Disable paravirtualized
steal time accounting. steal time is computed, but
won't influence scheduler behaviour
no-steal-acc [X86,PV_OPS,ARM64,PPC/PSERIES,RISCV,LOONGARCH,EARLY]
Disable paravirtualized steal time accounting. steal time
is computed, but won't influence scheduler behaviour
nosync [HW,M68K] Disables sync negotiation for all devices.

View File

@ -11551,6 +11551,7 @@ L: kvm@vger.kernel.org
L: loongarch@lists.linux.dev
S: Maintained
T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git
F: Documentation/virt/kvm/loongarch/
F: arch/loongarch/include/asm/kvm*
F: arch/loongarch/include/uapi/asm/kvm*
F: arch/loongarch/kvm/

View File

@ -619,6 +619,26 @@ config RANDOMIZE_BASE_MAX_OFFSET
This is limited by the size of the lower address memory, 256MB.
config PARAVIRT
bool "Enable paravirtualization code"
depends on AS_HAS_LVZ_EXTENSION
help
This changes the kernel so it can modify itself when it is run
under a hypervisor, potentially improving performance significantly
over full virtualization. However, when run without a hypervisor
the kernel is theoretically slower and slightly larger.
config PARAVIRT_TIME_ACCOUNTING
bool "Paravirtual steal time accounting"
depends on PARAVIRT
help
Select this option to enable fine granularity task steal time
accounting. Time spent executing other tasks in parallel with
the current vCPU is discounted from the vCPU power. To account for
that, there can be a small performance impact.
If in doubt, say N here.
endmenu
config ARCH_SELECT_MEMORY_MODEL
@ -671,6 +691,17 @@ source "drivers/cpufreq/Kconfig"
source "kernel/power/Kconfig"
source "drivers/acpi/Kconfig"
config PARAVIRT_TIME_ACCOUNTING
bool "Paravirtual steal time accounting"
depends on PARAVIRT
help
Select this option to enable fine granularity task steal time
accounting. Time spent executing other tasks in parallel with
the current vCPU is discounted from the vCPU power. To account for
that, there can be a small performance impact.
If in doubt, say N here.
endmenu
source "arch/loongarch/kvm/Kconfig"

View File

@ -13,6 +13,8 @@ CONFIG_TASKSTATS=y
CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=18
CONFIG_NUMA_BALANCING=y
CONFIG_MEMCG=y
@ -47,7 +49,6 @@ CONFIG_RANDOMIZE_BASE=y
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_STAT=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_LOONGSON3_ACPI_CPUFREQ=y
CONFIG_HIBERNATION=y
CONFIG_ACPI_SPCR_TABLE=y
CONFIG_ACPI_TAD=y
@ -1469,7 +1470,7 @@ CONFIG_DRM_AMDGPU_USERPTR=y
CONFIG_DRM_NOUVEAU=m
CONFIG_DRM_VKMS=m
CONFIG_DRM_UDL=m
CONFIG_DRM_AST=y
CONFIG_DRM_AST_LOONGSON=y
CONFIG_DRM_MGAG200=m
CONFIG_DRM_QXL=m
CONFIG_DRM_VIRTIO_GPU=m
@ -2199,3 +2200,6 @@ CONFIG_RCU_CPU_STALL_TIMEOUT=60
# CONFIG_RCU_TRACE is not set
# CONFIG_STRICT_DEVMEM is not set
# CONFIG_RUNTIME_TESTING_MENU is not set
CONFIG_LOONGARCH_IOMMU=m
CONFIG_CMDLINE_EXTEND=y
CONFIG_CMDLINE="vfio_iommu_type1.allow_unsafe_interrupts=1 nokaslr"

View File

@ -4,7 +4,6 @@ generic-y += mcs_spinlock.h
generic-y += parport.h
generic-y += early_ioremap.h
generic-y += qrwlock.h
generic-y += qspinlock.h
generic-y += rwsem.h
generic-y += segment.h
generic-y += user.h

View File

@ -0,0 +1,36 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Arch specific extensions to struct device
*
* This file is released under the GPLv2
* Copyright (C) 2020 Loongson Technology Corporation Limited
*/
#ifndef _ASM_LOONGARCH_DEVICE_H
#define _ASM_LOONGARCH_DEVICE_H
struct dev_archdata {
/* hook for IOMMU specific extension */
void *iommu;
struct bus_dma_region *dma_range_map;
/*
* On some old 7A chipset, dma address is different from physical
* address, the main difference is that node id. For dma address
* node id starts from bit 36, physical node id starts from
* bit 44. The remaining address below node id is the same.
*/
unsigned long dma_node_mask;
unsigned int dma_node_off;
};
struct pdev_archdata {
};
struct dma_domain {
struct list_head node;
const struct dma_map_ops *dma_ops;
int domain_nr;
};
void add_dma_domain(struct dma_domain *domain);
void del_dma_domain(struct dma_domain *domain);
#endif /* _ASM_LOONGARCH_DEVICE_H*/

View File

@ -0,0 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _LOONGARCH_DMA_DIRECT_H
#define _LOONGARCH_DMA_DIRECT_H
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
#endif /* _LOONGARCH_DMA_DIRECT_H */

View File

@ -30,6 +30,7 @@
: [val] "+r" (__v) \
: [reg] "i" (csr) \
: "memory"); \
__v; \
})
#define gcsr_xchg(v, m, csr) \
@ -181,6 +182,8 @@ __BUILD_GCSR_OP(tlbidx)
#define kvm_save_hw_gcsr(csr, gid) (csr->csrs[gid] = gcsr_read(gid))
#define kvm_restore_hw_gcsr(csr, gid) (gcsr_write(csr->csrs[gid], gid))
#define kvm_read_clear_hw_gcsr(csr, gid) (csr->csrs[gid] = gcsr_write(0, gid))
int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu);
static __always_inline unsigned long kvm_read_sw_gcsr(struct loongarch_csrs *csr, int gid)
@ -208,9 +211,7 @@ static __always_inline void kvm_change_sw_gcsr(struct loongarch_csrs *csr,
csr->csrs[gid] |= val & _mask;
}
#define KVM_PMU_PLV_ENABLE (CSR_PERFCTRL_PLV0 | \
CSR_PERFCTRL_PLV1 | \
CSR_PERFCTRL_PLV2 | \
CSR_PERFCTRL_PLV3)
#define KVM_PMU_EVENT_ENABLED (CSR_PERFCTRL_PLV0 | CSR_PERFCTRL_PLV1 | \
CSR_PERFCTRL_PLV2 | CSR_PERFCTRL_PLV3)
#endif /* __ASM_LOONGARCH_KVM_CSR_H__ */

View File

@ -0,0 +1,95 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2024 Loongson Technology Corporation Limited
*/
#ifndef LOONGARCH_EXTIOI_H
#define LOONGARCH_EXTIOI_H
#include <kvm/iodev.h>
#define EXTIOI_IRQS 256
#define EXTIOI_ROUTE_MAX_VCPUS 256
#define EXTIOI_IRQS_U8_NUMS (EXTIOI_IRQS / 8)
#define EXTIOI_IRQS_U32_NUMS (EXTIOI_IRQS_U8_NUMS / 4)
#define EXTIOI_IRQS_U64_NUMS (EXTIOI_IRQS_U32_NUMS / 2)
/* map to ipnum per 32 irqs */
#define EXTIOI_IRQS_NODETYPE_COUNT 16
#define EXTIOI_BASE 0x1400
#define EXTIOI_SIZE 0x900
#define EXTIOI_NODETYPE_START 0xa0
#define EXTIOI_NODETYPE_END 0xbf
#define EXTIOI_IPMAP_START 0xc0
#define EXTIOI_IPMAP_END 0xc7
#define EXTIOI_ENABLE_START 0x200
#define EXTIOI_ENABLE_END 0x21f
#define EXTIOI_BOUNCE_START 0x280
#define EXTIOI_BOUNCE_END 0x29f
#define EXTIOI_ISR_START 0x300
#define EXTIOI_ISR_END 0x31f
#define EXTIOI_COREISR_START 0x400
#define EXTIOI_COREISR_END 0x71f
#define EXTIOI_COREMAP_START 0x800
#define EXTIOI_COREMAP_END 0x8ff
#define LS3A_INTC_IP 8
#define EXTIOI_SW_COREMAP_FLAG (1 << 0)
struct loongarch_extioi {
spinlock_t lock;
struct kvm *kvm;
struct kvm_io_device device;
/* hardware state */
union nodetype {
u64 reg_u64[EXTIOI_IRQS_NODETYPE_COUNT / 4];
u32 reg_u32[EXTIOI_IRQS_NODETYPE_COUNT / 2];
uint16_t reg_u16[EXTIOI_IRQS_NODETYPE_COUNT];
u8 reg_u8[EXTIOI_IRQS_NODETYPE_COUNT * 2];
} nodetype;
/* one bit shows the state of one irq */
union bounce {
u64 reg_u64[EXTIOI_IRQS_U64_NUMS];
u32 reg_u32[EXTIOI_IRQS_U32_NUMS];
u8 reg_u8[EXTIOI_IRQS_U8_NUMS];
} bounce;
union isr {
u64 reg_u64[EXTIOI_IRQS_U64_NUMS];
u32 reg_u32[EXTIOI_IRQS_U32_NUMS];
u8 reg_u8[EXTIOI_IRQS_U8_NUMS];
} isr;
union coreisr {
u64 reg_u64[EXTIOI_ROUTE_MAX_VCPUS][EXTIOI_IRQS_U64_NUMS];
u32 reg_u32[EXTIOI_ROUTE_MAX_VCPUS][EXTIOI_IRQS_U32_NUMS];
u8 reg_u8[EXTIOI_ROUTE_MAX_VCPUS][EXTIOI_IRQS_U8_NUMS];
} coreisr;
union enable {
u64 reg_u64[EXTIOI_IRQS_U64_NUMS];
u32 reg_u32[EXTIOI_IRQS_U32_NUMS];
u8 reg_u8[EXTIOI_IRQS_U8_NUMS];
} enable;
/* use one byte to config ipmap for 32 irqs at once */
union ipmap {
u64 reg_u64;
u32 reg_u32[EXTIOI_IRQS_U32_NUMS / 4];
u8 reg_u8[EXTIOI_IRQS_U8_NUMS / 4];
} ipmap;
/* use one byte to config coremap for one irq */
union coremap {
u64 reg_u64[EXTIOI_IRQS / 8];
u32 reg_u32[EXTIOI_IRQS / 4];
u8 reg_u8[EXTIOI_IRQS];
} coremap;
DECLARE_BITMAP(sw_coreisr[EXTIOI_ROUTE_MAX_VCPUS][LS3A_INTC_IP], EXTIOI_IRQS);
uint8_t sw_coremap[EXTIOI_IRQS];
};
void extioi_set_irq(struct loongarch_extioi *s, int irq, int level);
int kvm_loongarch_register_extioi_device(void);
#endif /* LOONGARCH_EXTIOI_H */

View File

@ -19,6 +19,9 @@
#include <asm/inst.h>
#include <asm/kvm_mmu.h>
#include <asm/loongarch.h>
#include <asm/kvm_ipi.h>
#include <asm/kvm_extioi.h>
#include <asm/kvm_pch_pic.h>
/* Loongarch KVM register ids */
#define KVM_GET_IOC_CSR_IDX(id) ((id & KVM_CSR_IDX_MASK) >> LOONGARCH_REG_SHIFT)
@ -26,18 +29,49 @@
#define KVM_MAX_VCPUS 256
#define KVM_MAX_CPUCFG_REGS 21
/* memory slots that does not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 0
#define KVM_HALT_POLL_NS_DEFAULT 500000
#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(1)
#define KVM_REQ_TLB_FLUSH_GPA KVM_ARCH_REQ(0)
#define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(1)
#define KVM_REQ_PMU KVM_ARCH_REQ(2)
#define KVM_GUESTDBG_SW_BP_MASK \
(KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)
#define KVM_GUESTDBG_VALID_MASK \
(KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP | KVM_GUESTDBG_SINGLESTEP)
/* KVM_IRQ_LINE irq field index values */
#define KVM_LOONGARCH_IRQ_TYPE_SHIFT 24
#define KVM_LOONGARCH_IRQ_TYPE_MASK 0xff
#define KVM_LOONGARCH_IRQ_VCPU_SHIFT 16
#define KVM_LOONGARCH_IRQ_VCPU_MASK 0xff
#define KVM_LOONGARCH_IRQ_NUM_SHIFT 0
#define KVM_LOONGARCH_IRQ_NUM_MASK 0xffff
/* irq_type field */
#define KVM_LOONGARCH_IRQ_TYPE_CPU_IP 0
#define KVM_LOONGARCH_IRQ_TYPE_CPU_IO 1
#define KVM_LOONGARCH_IRQ_TYPE_HT 2
#define KVM_LOONGARCH_IRQ_TYPE_MSI 3
#define KVM_LOONGARCH_IRQ_TYPE_IOAPIC 4
#define KVM_LOONGARCH_IRQ_TYPE_ROUTE 5
#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
KVM_GUESTDBG_USE_SW_BP | KVM_GUESTDBG_SINGLESTEP)
#define KVM_DIRTY_LOG_MANUAL_CAPS \
(KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | KVM_DIRTY_LOG_INITIALLY_SET)
struct kvm_vm_stat {
struct kvm_vm_stat_generic generic;
u64 pages;
u64 hugepages;
u64 ipi_read_exits;
u64 ipi_write_exits;
u64 extioi_read_exits;
u64 extioi_write_exits;
u64 pch_pic_read_exits;
u64 pch_pic_write_exits;
};
struct kvm_vcpu_stat {
@ -55,9 +89,13 @@ struct kvm_arch_memory_slot {
unsigned long flags;
};
#define HOST_MAX_PMNUM 16
struct kvm_context {
unsigned long vpid_cache;
struct kvm_vcpu *last_vcpu;
/* Host PMU CSR */
u64 perf_ctrl[HOST_MAX_PMNUM];
u64 perf_cntr[HOST_MAX_PMNUM];
};
struct kvm_world_switch {
@ -69,12 +107,13 @@ struct kvm_world_switch {
#define MAX_PGTABLE_LEVELS 4
/*
* Physical cpu id is used for interrupt routing, there are different
* Physical CPUID is used for interrupt routing, there are different
* definitions about physical cpuid on different hardwares.
* For LOONGARCH_CSR_CPUID register, max cpuid size if 512
* For IPI HW, max dest CPUID size 1024
* For extioi interrupt controller, max dest CPUID size is 256
* For MSI interrupt controller, max supported CPUID size is 65536
*
* For LOONGARCH_CSR_CPUID register, max CPUID size if 512
* For IPI hardware, max destination CPUID size 1024
* For extioi interrupt controller, max destination CPUID size is 256
* For msgint interrupt controller, max supported CPUID size is 65536
*
* Currently max CPUID is defined as 256 for KVM hypervisor, in future
* it will be expanded to 4096, including 16 packages at most. And every
@ -101,9 +140,14 @@ struct kvm_arch {
unsigned int root_level;
spinlock_t phyid_map_lock;
struct kvm_phyid_map *phyid_map;
/* Enabled PV features */
unsigned long pv_features;
s64 time_offset;
struct kvm_context __percpu *vmcs;
struct loongarch_ipi *ipi;
struct loongarch_extioi *extioi;
struct loongarch_pch_pic *pch_pic;
};
#define CSR_MAX_NUMS 0x800
@ -127,9 +171,15 @@ enum emulation_result {
#define KVM_LARCH_FPU (0x1 << 0)
#define KVM_LARCH_LSX (0x1 << 1)
#define KVM_LARCH_LASX (0x1 << 2)
#define KVM_LARCH_SWCSR_LATEST (0x1 << 3)
#define KVM_LARCH_HWCSR_USABLE (0x1 << 4)
#define KVM_LARCH_PERF (0x1 << 5)
#define KVM_LARCH_LBT (0x1 << 3)
#define KVM_LARCH_PMU (0x1 << 4)
#define KVM_LARCH_SWCSR_LATEST (0x1 << 5)
#define KVM_LARCH_HWCSR_USABLE (0x1 << 6)
#define LOONGARCH_PV_FEAT_UPDATED BIT_ULL(63)
#define LOONGARCH_PV_FEAT_MASK (BIT(KVM_FEATURE_IPI) | \
BIT(KVM_FEATURE_STEAL_TIME) | \
BIT(KVM_FEATURE_VIRT_EXTIOI))
struct kvm_vcpu_arch {
/*
@ -163,10 +213,14 @@ struct kvm_vcpu_arch {
/* FPU state */
struct loongarch_fpu fpu FPU_ALIGN;
struct loongarch_lbt lbt;
/* CSR state */
struct loongarch_csrs *csr;
/* Guest max PMU CSR id */
int max_pmu_csrid;
/* GPR used as IO source/target */
u32 io_gpr;
@ -188,6 +242,7 @@ struct kvm_vcpu_arch {
/* vcpu's vpid */
u64 vpid;
gpa_t flush_gpa;
/* Frequency of stable timer in Hz */
u64 timer_mhz;
@ -197,6 +252,8 @@ struct kvm_vcpu_arch {
int last_sched_cpu;
/* mp state */
struct kvm_mp_state mp_state;
/* ipi state */
struct ipi_state ipi_state;
/* cpucfg */
u32 cpucfg[KVM_MAX_CPUCFG_REGS];
/* paravirt steal time */
@ -235,6 +292,11 @@ static inline bool kvm_guest_has_lasx(struct kvm_vcpu_arch *arch)
return arch->cpucfg[2] & CPUCFG2_LASX;
}
static inline bool kvm_guest_has_lbt(struct kvm_vcpu_arch *arch)
{
return arch->cpucfg[2] & (CPUCFG2_X86BT | CPUCFG2_ARMBT | CPUCFG2_MIPSBT);
}
static inline bool kvm_guest_has_pmu(struct kvm_vcpu_arch *arch)
{
return arch->cpucfg[6] & CPUCFG6_PMP;

View File

@ -0,0 +1,52 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2024 Loongson Technology Corporation Limited
*/
#ifndef __LS3A_KVM_IPI_H
#define __LS3A_KVM_IPI_H
#include <kvm/iodev.h>
#define LARCH_INT_IPI 12
struct loongarch_ipi {
spinlock_t lock;
struct kvm *kvm;
struct kvm_io_device device;
struct kvm_io_device mail_dev;
};
struct ipi_state {
spinlock_t lock;
uint32_t status;
uint32_t en;
uint32_t set;
uint32_t clear;
uint64_t buf[4];
};
#define SMP_MAILBOX 0x1000
#define KVM_IOCSR_IPI_ADDR_SIZE 0x48
#define CORE_STATUS_OFF 0x000
#define CORE_EN_OFF 0x004
#define CORE_SET_OFF 0x008
#define CORE_CLEAR_OFF 0x00c
#define CORE_BUF_20 0x020
#define CORE_BUF_28 0x028
#define CORE_BUF_30 0x030
#define CORE_BUF_38 0x038
#define IOCSR_IPI_SEND 0x040
#define IOCSR_MAIL_SEND 0x048
#define IOCSR_ANY_SEND 0x158
#define MAIL_SEND_ADDR (SMP_MAILBOX + IOCSR_MAIL_SEND)
#define KVM_IOCSR_MAIL_ADDR_SIZE 0x118
#define MAIL_SEND_OFFSET 0
#define ANY_SEND_OFFSET (IOCSR_ANY_SEND - IOCSR_MAIL_SEND)
int kvm_loongarch_register_ipi_device(void);
#endif

View File

@ -2,28 +2,34 @@
#ifndef _ASM_LOONGARCH_KVM_PARA_H
#define _ASM_LOONGARCH_KVM_PARA_H
#include <uapi/asm/kvm_para.h>
/*
* Hypercall code field
*/
#define HYPERVISOR_KVM 1
#define HYPERVISOR_VENDOR_SHIFT 8
#define HYPERCALL_CODE(vendor, code) ((vendor << HYPERVISOR_VENDOR_SHIFT) + code)
#define KVM_HCALL_CODE_PV_SERVICE 0
#define HYPERCALL_ENCODE(vendor, code) ((vendor << HYPERVISOR_VENDOR_SHIFT) + code)
#define KVM_HCALL_CODE_SERVICE 0
#define KVM_HCALL_CODE_SWDBG 1
#define KVM_HCALL_PV_SERVICE HYPERCALL_CODE(HYPERVISOR_KVM, KVM_HCALL_CODE_PV_SERVICE)
#define KVM_HCALL_FUNC_PV_IPI 1
#define KVM_HCALL_SERVICE HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SERVICE)
#define KVM_HCALL_FUNC_IPI 1
#define KVM_HCALL_FUNC_NOTIFY 2
#define KVM_HCALL_SWDBG HYPERCALL_CODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SWDBG)
#define KVM_HCALL_SWDBG HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SWDBG)
/*
* LoongArch hypercall return code
*/
#define KVM_HCALL_STATUS_SUCCESS 0
#define KVM_HCALL_SUCCESS 0
#define KVM_HCALL_INVALID_CODE -1UL
#define KVM_HCALL_INVALID_PARAMETER -2UL
#define KVM_STEAL_PHYS_VALID BIT_ULL(0)
#define KVM_STEAL_PHYS_MASK GENMASK_ULL(63, 6)
struct kvm_steal_time {
__u64 steal;
__u32 version;
@ -31,21 +37,22 @@ struct kvm_steal_time {
__u32 pad[12];
};
/*
* Hypercall interface for KVM hypervisor
*
* a0: function identifier
* a1-a6: args
* Return value will be placed in v0.
* Up to 6 arguments are passed in a1, a2, a3, a4, a5, a6.
* a1-a5: args
* Return value will be placed in a0.
* Up to 5 arguments are passed in a1, a2, a3, a4, a5.
*/
static __always_inline long kvm_hypercall(u64 fid)
static __always_inline long kvm_hypercall0(u64 fid)
{
register long ret asm("v0");
register long ret asm("a0");
register unsigned long fun asm("a0") = fid;
__asm__ __volatile__(
"hvcl "__stringify(KVM_HCALL_PV_SERVICE)
"hvcl "__stringify(KVM_HCALL_SERVICE)
: "=r" (ret)
: "r" (fun)
: "memory"
@ -56,12 +63,12 @@ static __always_inline long kvm_hypercall(u64 fid)
static __always_inline long kvm_hypercall1(u64 fid, unsigned long arg0)
{
register long ret asm("v0");
register long ret asm("a0");
register unsigned long fun asm("a0") = fid;
register unsigned long a1 asm("a1") = arg0;
__asm__ __volatile__(
"hvcl "__stringify(KVM_HCALL_PV_SERVICE)
"hvcl "__stringify(KVM_HCALL_SERVICE)
: "=r" (ret)
: "r" (fun), "r" (a1)
: "memory"
@ -73,17 +80,17 @@ static __always_inline long kvm_hypercall1(u64 fid, unsigned long arg0)
static __always_inline long kvm_hypercall2(u64 fid,
unsigned long arg0, unsigned long arg1)
{
register long ret asm("v0");
register long ret asm("a0");
register unsigned long fun asm("a0") = fid;
register unsigned long a1 asm("a1") = arg0;
register unsigned long a2 asm("a2") = arg1;
__asm__ __volatile__(
"hvcl "__stringify(KVM_HCALL_PV_SERVICE)
: "=r" (ret)
: "r" (fun), "r" (a1), "r" (a2)
: "memory"
);
"hvcl "__stringify(KVM_HCALL_SERVICE)
: "=r" (ret)
: "r" (fun), "r" (a1), "r" (a2)
: "memory"
);
return ret;
}
@ -91,14 +98,14 @@ static __always_inline long kvm_hypercall2(u64 fid,
static __always_inline long kvm_hypercall3(u64 fid,
unsigned long arg0, unsigned long arg1, unsigned long arg2)
{
register long ret asm("v0");
register long ret asm("a0");
register unsigned long fun asm("a0") = fid;
register unsigned long a1 asm("a1") = arg0;
register unsigned long a2 asm("a2") = arg1;
register unsigned long a3 asm("a3") = arg2;
__asm__ __volatile__(
"hvcl "__stringify(KVM_HCALL_PV_SERVICE)
"hvcl "__stringify(KVM_HCALL_SERVICE)
: "=r" (ret)
: "r" (fun), "r" (a1), "r" (a2), "r" (a3)
: "memory"
@ -108,10 +115,10 @@ static __always_inline long kvm_hypercall3(u64 fid,
}
static __always_inline long kvm_hypercall4(u64 fid,
unsigned long arg0, unsigned long arg1, unsigned long arg2,
unsigned long arg3)
unsigned long arg0, unsigned long arg1,
unsigned long arg2, unsigned long arg3)
{
register long ret asm("v0");
register long ret asm("a0");
register unsigned long fun asm("a0") = fid;
register unsigned long a1 asm("a1") = arg0;
register unsigned long a2 asm("a2") = arg1;
@ -119,7 +126,7 @@ static __always_inline long kvm_hypercall4(u64 fid,
register unsigned long a4 asm("a4") = arg3;
__asm__ __volatile__(
"hvcl "__stringify(KVM_HCALL_PV_SERVICE)
"hvcl "__stringify(KVM_HCALL_SERVICE)
: "=r" (ret)
: "r"(fun), "r" (a1), "r" (a2), "r" (a3), "r" (a4)
: "memory"
@ -129,10 +136,10 @@ static __always_inline long kvm_hypercall4(u64 fid,
}
static __always_inline long kvm_hypercall5(u64 fid,
unsigned long arg0, unsigned long arg1, unsigned long arg2,
unsigned long arg3, unsigned long arg4)
unsigned long arg0, unsigned long arg1,
unsigned long arg2, unsigned long arg3, unsigned long arg4)
{
register long ret asm("v0");
register long ret asm("a0");
register unsigned long fun asm("a0") = fid;
register unsigned long a1 asm("a1") = arg0;
register unsigned long a2 asm("a2") = arg1;
@ -141,7 +148,7 @@ static __always_inline long kvm_hypercall5(u64 fid,
register unsigned long a5 asm("a5") = arg4;
__asm__ __volatile__(
"hvcl "__stringify(KVM_HCALL_PV_SERVICE)
"hvcl "__stringify(KVM_HCALL_SERVICE)
: "=r" (ret)
: "r"(fun), "r" (a1), "r" (a2), "r" (a3), "r" (a4), "r" (a5)
: "memory"
@ -150,11 +157,20 @@ static __always_inline long kvm_hypercall5(u64 fid,
return ret;
}
#ifdef CONFIG_PARAVIRT
bool kvm_para_available(void);
unsigned int kvm_arch_para_features(void);
#else
static inline bool kvm_para_available(void)
{
return false;
}
static inline unsigned int kvm_arch_para_features(void)
{
return 0;
}
#endif
static inline unsigned int kvm_arch_para_hints(void)
{
@ -165,4 +181,5 @@ static inline bool kvm_check_and_clear_guest_paused(void)
{
return false;
}
#endif /* _ASM_LOONGARCH_KVM_PARA_H */

View File

@ -0,0 +1,61 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2024 Loongson Technology Corporation Limited
*/
#ifndef LOONGARCH_PCH_PIC_H
#define LOONGARCH_PCH_PIC_H
#include <kvm/iodev.h>
#define PCH_PIC_SIZE 0x3e8
#define PCH_PIC_INT_ID_START 0x0
#define PCH_PIC_INT_ID_END 0x7
#define PCH_PIC_MASK_START 0x20
#define PCH_PIC_MASK_END 0x27
#define PCH_PIC_HTMSI_EN_START 0x40
#define PCH_PIC_HTMSI_EN_END 0x47
#define PCH_PIC_EDGE_START 0x60
#define PCH_PIC_EDGE_END 0x67
#define PCH_PIC_CLEAR_START 0x80
#define PCH_PIC_CLEAR_END 0x87
#define PCH_PIC_AUTO_CTRL0_START 0xc0
#define PCH_PIC_AUTO_CTRL0_END 0xc7
#define PCH_PIC_AUTO_CTRL1_START 0xe0
#define PCH_PIC_AUTO_CTRL1_END 0xe7
#define PCH_PIC_ROUTE_ENTRY_START 0x100
#define PCH_PIC_ROUTE_ENTRY_END 0x13f
#define PCH_PIC_HTMSI_VEC_START 0x200
#define PCH_PIC_HTMSI_VEC_END 0x23f
#define PCH_PIC_INT_IRR_START 0x380
#define PCH_PIC_INT_IRR_END 0x38f
#define PCH_PIC_INT_ISR_START 0x3a0
#define PCH_PIC_INT_ISR_END 0x3af
#define PCH_PIC_POLARITY_START 0x3e0
#define PCH_PIC_POLARITY_END 0x3e7
#define PCH_PIC_INT_ID_VAL 0x7000000UL
#define PCH_PIC_INT_ID_VER 0x1UL
struct loongarch_pch_pic {
spinlock_t lock;
struct kvm *kvm;
struct kvm_io_device device;
uint64_t mask; /* 1:disable irq, 0:enable irq */
uint64_t htmsi_en; /* 1:msi */
uint64_t edge; /* 1:edge triggered, 0:level triggered */
uint64_t auto_ctrl0; /* only use default value 00b */
uint64_t auto_ctrl1; /* only use default value 00b */
uint64_t last_intirr; /* edge detection */
uint64_t irr; /* interrupt request register */
uint64_t isr; /* interrupt service register */
uint64_t polarity; /* 0: high level trigger, 1: low level trigger */
uint8_t route_entry[64]; /* default value 0, route to int0: extioi */
uint8_t htmsi_vector[64]; /* irq route table for routing to extioi */
uint64_t pch_pic_base;
};
void pch_pic_set_irq(struct loongarch_pch_pic *s, int irq, int level);
void pch_msi_set_irq(struct kvm *kvm, int irq, int level);
int kvm_loongarch_register_pch_pic_device(void);
#endif /* LOONGARCH_PCH_PIC_H */

View File

@ -75,10 +75,13 @@ static inline void kvm_save_lasx(struct loongarch_fpu *fpu) { }
static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { }
#endif
int kvm_own_pmu(struct kvm_vcpu *vcpu);
#ifdef CONFIG_CPU_HAS_LBT
int kvm_own_lbt(struct kvm_vcpu *vcpu);
#else
static inline int kvm_own_lbt(struct kvm_vcpu *vcpu) { return -EINVAL; }
#endif
void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
void kvm_reset_timer(struct kvm_vcpu *vcpu);
void kvm_save_timer(struct kvm_vcpu *vcpu);
void kvm_restore_timer(struct kvm_vcpu *vcpu);
@ -112,4 +115,24 @@ static inline int kvm_queue_exception(struct kvm_vcpu *vcpu,
return -1;
}
static inline unsigned long kvm_read_reg(struct kvm_vcpu *vcpu, int num)
{
return vcpu->arch.gprs[num];
}
static inline void kvm_write_reg(struct kvm_vcpu *vcpu, int num, unsigned long val)
{
vcpu->arch.gprs[num] = val;
}
static inline bool kvm_pvtime_supported(void)
{
return !!sched_info_on();
}
static inline bool kvm_guest_has_pv_feature(struct kvm_vcpu *vcpu, unsigned int feature)
{
return vcpu->kvm->arch.pv_features & BIT(feature);
}
#endif /* __ASM_LOONGARCH_KVM_VCPU_H__ */

View File

@ -161,15 +161,8 @@
/*
* cpucfg index area: 0x40000000 -- 0x400000ff
* SW emulation for KVM hypervirsor
* SW emulation for KVM hypervirsor, see arch/loongarch/include/uapi/asm/kvm_para.h
*/
#define CPUCFG_KVM_BASE 0x40000000UL
#define CPUCFG_KVM_SIZE 0x100
#define CPUCFG_KVM_SIG CPUCFG_KVM_BASE
#define KVM_SIGNATURE "KVM\0"
#define CPUCFG_KVM_FEATURE (CPUCFG_KVM_BASE + 4)
#define KVM_FEATURE_PV_IPI BIT(1)
#define KVM_FEATURE_STEAL_TIME BIT(2)
#ifndef __ASSEMBLY__

View File

@ -18,6 +18,7 @@ static inline u64 paravirt_steal_clock(int cpu)
int pv_ipi_init(void);
int __init pv_time_init(void);
int __init pv_spinlock_init(void);
#else
static inline int pv_ipi_init(void)
{
@ -28,5 +29,11 @@ static inline int pv_time_init(void)
{
return 0;
}
static inline int pv_spinlock_init(void)
{
return 0;
}
#endif // CONFIG_PARAVIRT
#endif

View File

@ -0,0 +1,41 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_LOONGARCH_QSPINLOCK_H
#define _ASM_LOONGARCH_QSPINLOCK_H
#include <linux/jump_label.h>
#ifdef CONFIG_PARAVIRT
DECLARE_STATIC_KEY_FALSE(virt_spin_lock_key);
#define virt_spin_lock virt_spin_lock
static inline bool virt_spin_lock(struct qspinlock *lock)
{
int val;
if (!static_branch_unlikely(&virt_spin_lock_key))
return false;
/*
* On hypervisors without PARAVIRT_SPINLOCKS support we fall
* back to a Test-and-Set spinlock, because fair locks have
* horrible lock 'holder' preemption issues.
*/
__retry:
val = atomic_read(&lock->val);
if (val || !atomic_try_cmpxchg(&lock->val, &val, _Q_LOCKED_VAL)) {
cpu_relax();
goto __retry;
}
return true;
}
#endif /* CONFIG_PARAVIRT */
#include <asm-generic/qspinlock.h>
#endif // _ASM_LOONGARCH_QSPINLOCK_H

View File

@ -19,6 +19,7 @@
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#define KVM_DIRTY_LOG_PAGE_OFFSET 64
#define __KVM_HAVE_IRQ_LINE
#define KVM_GUESTDBG_USE_SW_BP 0x00010000
/*
@ -66,6 +67,7 @@ struct kvm_fpu {
#define KVM_REG_LOONGARCH_KVM (KVM_REG_LOONGARCH | 0x20000ULL)
#define KVM_REG_LOONGARCH_FPSIMD (KVM_REG_LOONGARCH | 0x30000ULL)
#define KVM_REG_LOONGARCH_CPUCFG (KVM_REG_LOONGARCH | 0x40000ULL)
#define KVM_REG_LOONGARCH_LBT (KVM_REG_LOONGARCH | 0x50000ULL)
#define KVM_REG_LOONGARCH_MASK (KVM_REG_LOONGARCH | 0x70000ULL)
#define KVM_CSR_IDX_MASK 0x7fff
#define KVM_CPUCFG_IDX_MASK 0x7fff
@ -79,11 +81,30 @@ struct kvm_fpu {
/* Debugging: Special instruction for software breakpoint */
#define KVM_REG_LOONGARCH_DEBUG_INST (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 3)
/* LBT registers */
#define KVM_REG_LOONGARCH_LBT_SCR0 (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 1)
#define KVM_REG_LOONGARCH_LBT_SCR1 (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 2)
#define KVM_REG_LOONGARCH_LBT_SCR2 (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 3)
#define KVM_REG_LOONGARCH_LBT_SCR3 (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 4)
#define KVM_REG_LOONGARCH_LBT_EFLAGS (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 5)
#define KVM_REG_LOONGARCH_LBT_FTOP (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 6)
#define LOONGARCH_REG_SHIFT 3
#define LOONGARCH_REG_64(TYPE, REG) (TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT))
#define KVM_IOC_CSRID(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CSR, REG)
#define KVM_IOC_CPUCFG(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CPUCFG, REG)
/* Device Control API on vm fd */
#define KVM_LOONGARCH_VM_FEAT_CTRL 0
#define KVM_LOONGARCH_VM_FEAT_LSX 0
#define KVM_LOONGARCH_VM_FEAT_LASX 1
#define KVM_LOONGARCH_VM_FEAT_X86BT 2
#define KVM_LOONGARCH_VM_FEAT_ARMBT 3
#define KVM_LOONGARCH_VM_FEAT_MIPSBT 4
#define KVM_LOONGARCH_VM_FEAT_PMU 5
#define KVM_LOONGARCH_VM_FEAT_PV_IPI 6
#define KVM_LOONGARCH_VM_FEAT_PV_STEALTIME 7
/* Device Control API on vcpu fd */
#define KVM_LOONGARCH_VCPU_CPUCFG 0
#define KVM_LOONGARCH_VCPU_PVTIME_CTRL 1
@ -114,4 +135,15 @@ struct kvm_iocsr_entry {
#define KVM_IRQCHIP_NUM_PINS 64
#define KVM_MAX_CORES 256
#define KVM_LOONGARCH_VM_HAVE_IRQCHIP 0x40000001
#define KVM_DEV_LOONGARCH_IPI_GRP_REGS 0x40000002
#define KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS 0x40000003
#define KVM_DEV_LOONGARCH_PCH_PIC_GRP_CTRL 0x40000004
#define KVM_DEV_LOONGARCH_PCH_PIC_CTRL_INIT 0
#define KVM_DEV_LOONGARCH_PCH_PIC_GRP_REGS 0x40000005
#endif /* __UAPI_ASM_LOONGARCH_KVM_H */

View File

@ -0,0 +1,21 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI_ASM_KVM_PARA_H
#define _UAPI_ASM_KVM_PARA_H
#include <linux/types.h>
/*
* CPUCFG index area: 0x40000000 -- 0x400000ff
* SW emulation for KVM hypervirsor
*/
#define CPUCFG_KVM_BASE 0x40000000
#define CPUCFG_KVM_SIZE 0x100
#define CPUCFG_KVM_SIG (CPUCFG_KVM_BASE + 0)
#define KVM_SIGNATURE "KVM\0"
#define CPUCFG_KVM_FEATURE (CPUCFG_KVM_BASE + 4)
#define KVM_FEATURE_IPI 1
#define KVM_FEATURE_STEAL_TIME 2
/* BIT 24 - 31 are features configurable by user space vmm */
#define KVM_FEATURE_VIRT_EXTIOI 24
#endif /* _UAPI_ASM_KVM_PARA_H */

View File

@ -8,10 +8,11 @@
#include <linux/reboot.h>
#include <linux/static_call.h>
static int has_steal_clock;
struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled;
static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
static int has_steal_clock;
DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key);
static u64 native_steal_clock(int cpu)
{
@ -71,6 +72,62 @@ static int pv_register_steal_time(void)
return 0;
}
static bool steal_acc = true;
static int __init parse_no_stealacc(char *arg)
{
steal_acc = false;
return 0;
}
early_param("no-steal-acc", parse_no_stealacc);
static u64 paravt_steal_clock(int cpu)
{
int version;
u64 steal;
struct kvm_steal_time *src;
src = &per_cpu(steal_time, cpu);
do {
version = src->version;
virt_rmb(); /* Make sure that the version is read before the steal */
steal = src->steal;
virt_rmb(); /* Make sure that the steal is read before the next version */
} while ((version & 1) || (version != src->version));
return steal;
}
static bool steal_acc = true;
static int __init parse_no_stealacc(char *arg)
{
steal_acc = false;
return 0;
}
early_param("no-steal-acc", parse_no_stealacc);
static u64 paravt_steal_clock(int cpu)
{
int version;
u64 steal;
struct kvm_steal_time *src;
src = &per_cpu(steal_time, cpu);
do {
version = src->version;
virt_rmb(); /* Make sure that the version is read before the steal */
steal = src->steal;
virt_rmb(); /* Make sure that the steal is read before the next version */
} while ((version & 1) || (version != src->version));
return steal;
}
#ifdef CONFIG_SMP
static void pv_send_ipi_single(int cpu, unsigned int action)
{
@ -192,11 +249,14 @@ static int pv_cpu_down_prepare(unsigned int cpu)
}
#endif
static bool kvm_para_available(void)
bool kvm_para_available(void)
{
static int hypervisor_type;
int config;
if (!cpu_has_hypervisor)
return false;
if (!hypervisor_type) {
config = read_cpucfg(CPUCFG_KVM_SIG);
if (!memcmp(&config, KVM_SIGNATURE, 4))
@ -206,28 +266,31 @@ static bool kvm_para_available(void)
return hypervisor_type == HYPERVISOR_KVM;
}
int __init pv_ipi_init(void)
unsigned int kvm_arch_para_features(void)
{
int feature;
static unsigned int feature;
if (!cpu_has_hypervisor)
return 0;
if (!kvm_para_available())
return 0;
/*
* check whether KVM hypervisor supports pv_ipi or not
*/
feature = read_cpucfg(CPUCFG_KVM_FEATURE);
if (!feature)
feature = read_cpucfg(CPUCFG_KVM_FEATURE);
return feature;
}
int __init pv_ipi_init(void)
{
if (!kvm_para_has_feature(KVM_FEATURE_IPI))
return 0;
#ifdef CONFIG_SMP
if (feature & KVM_FEATURE_PV_IPI) {
smp_ops.init_ipi = pv_init_ipi;
smp_ops.send_ipi_single = pv_send_ipi_single;
smp_ops.send_ipi_mask = pv_send_ipi_mask;
}
smp_ops.init_ipi = pv_init_ipi;
smp_ops.send_ipi_single = pv_send_ipi_single;
smp_ops.send_ipi_mask = pv_send_ipi_mask;
#endif
return 1;
return 0;
}
static void pv_cpu_reboot(void *unused)
@ -279,3 +342,235 @@ int __init pv_time_init(void)
pr_info("Using stolen time PV\n");
return 0;
}
static int pv_enable_steal_time(void)
{
int cpu = smp_processor_id();
unsigned long addr;
struct kvm_steal_time *st;
if (!has_steal_clock)
return -EPERM;
st = &per_cpu(steal_time, cpu);
addr = per_cpu_ptr_to_phys(st);
/* The whole structure kvm_steal_time should be in one page */
if (PFN_DOWN(addr) != PFN_DOWN(addr + sizeof(*st))) {
pr_warn("Illegal PV steal time addr %lx\n", addr);
return -EFAULT;
}
addr |= KVM_STEAL_PHYS_VALID;
kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, BIT(KVM_FEATURE_STEAL_TIME), addr);
return 0;
}
static void pv_disable_steal_time(void)
{
if (has_steal_clock)
kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, BIT(KVM_FEATURE_STEAL_TIME), 0);
}
#ifdef CONFIG_SMP
static int pv_time_cpu_online(unsigned int cpu)
{
unsigned long flags;
local_irq_save(flags);
pv_enable_steal_time();
local_irq_restore(flags);
return 0;
}
static int pv_time_cpu_down_prepare(unsigned int cpu)
{
unsigned long flags;
local_irq_save(flags);
pv_disable_steal_time();
local_irq_restore(flags);
return 0;
}
#endif
static void pv_cpu_reboot(void *unused)
{
pv_disable_steal_time();
}
static int pv_reboot_notify(struct notifier_block *nb, unsigned long code, void *unused)
{
on_each_cpu(pv_cpu_reboot, NULL, 1);
return NOTIFY_DONE;
}
static struct notifier_block pv_reboot_nb = {
.notifier_call = pv_reboot_notify,
};
int __init pv_time_init(void)
{
int r;
if (!kvm_para_has_feature(KVM_FEATURE_STEAL_TIME))
return 0;
has_steal_clock = 1;
r = pv_enable_steal_time();
if (r < 0) {
has_steal_clock = 0;
return 0;
}
register_reboot_notifier(&pv_reboot_nb);
#ifdef CONFIG_SMP
r = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
"loongarch/pv_time:online",
pv_time_cpu_online, pv_time_cpu_down_prepare);
if (r < 0) {
has_steal_clock = 0;
pr_err("Failed to install cpu hotplug callbacks\n");
return r;
}
#endif
static_call_update(pv_steal_clock, paravt_steal_clock);
static_key_slow_inc(&paravirt_steal_enabled);
#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
if (steal_acc)
static_key_slow_inc(&paravirt_steal_rq_enabled);
#endif
pr_info("Using paravirt steal-time\n");
return 0;
}
static int pv_enable_steal_time(void)
{
int cpu = smp_processor_id();
unsigned long addr;
struct kvm_steal_time *st;
if (!has_steal_clock)
return -EPERM;
st = &per_cpu(steal_time, cpu);
addr = per_cpu_ptr_to_phys(st);
/* The whole structure kvm_steal_time should be in one page */
if (PFN_DOWN(addr) != PFN_DOWN(addr + sizeof(*st))) {
pr_warn("Illegal PV steal time addr %lx\n", addr);
return -EFAULT;
}
addr |= KVM_STEAL_PHYS_VALID;
kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, addr);
return 0;
}
static void pv_disable_steal_time(void)
{
if (has_steal_clock)
kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, 0);
}
#ifdef CONFIG_SMP
static int pv_time_cpu_online(unsigned int cpu)
{
unsigned long flags;
local_irq_save(flags);
pv_enable_steal_time();
local_irq_restore(flags);
return 0;
}
static int pv_time_cpu_down_prepare(unsigned int cpu)
{
unsigned long flags;
local_irq_save(flags);
pv_disable_steal_time();
local_irq_restore(flags);
return 0;
}
#endif
static void pv_cpu_reboot(void *unused)
{
pv_disable_steal_time();
}
static int pv_reboot_notify(struct notifier_block *nb, unsigned long code, void *unused)
{
on_each_cpu(pv_cpu_reboot, NULL, 1);
return NOTIFY_DONE;
}
static struct notifier_block pv_reboot_nb = {
.notifier_call = pv_reboot_notify,
};
int __init pv_time_init(void)
{
int r, feature;
if (!cpu_has_hypervisor)
return 0;
if (!kvm_para_available())
return 0;
feature = read_cpucfg(CPUCFG_KVM_FEATURE);
if (!(feature & KVM_FEATURE_STEAL_TIME))
return 0;
has_steal_clock = 1;
r = pv_enable_steal_time();
if (r < 0) {
has_steal_clock = 0;
return 0;
}
register_reboot_notifier(&pv_reboot_nb);
#ifdef CONFIG_SMP
r = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
"loongarch/pv_time:online",
pv_time_cpu_online, pv_time_cpu_down_prepare);
if (r < 0) {
has_steal_clock = 0;
pr_err("Failed to install cpu hotplug callbacks\n");
return r;
}
#endif
static_call_update(pv_steal_clock, paravt_steal_clock);
static_key_slow_inc(&paravirt_steal_enabled);
#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
if (steal_acc)
static_key_slow_inc(&paravirt_steal_rq_enabled);
#endif
pr_info("Using paravirt steal-time\n");
return 0;
}
int __init pv_spinlock_init(void)
{
if (!cpu_has_hypervisor)
return 0;
static_branch_enable(&virt_spin_lock_key);
return 0;
}

View File

@ -665,6 +665,8 @@ void __init setup_arch(char **cmdline_p)
arch_mem_init(cmdline_p);
resource_init();
jump_label_init(); /* Initialise the static keys for paravirtualization */
#ifdef CONFIG_SMP
plat_smp_setup();
prefill_possible_map();

View File

@ -477,7 +477,7 @@ core_initcall(ipi_pm_init);
#endif
/* Preload SMP state for boot cpu */
void smp_prepare_boot_cpu(void)
void __init smp_prepare_boot_cpu(void)
{
unsigned int cpu, node, rr_node;
@ -510,6 +510,8 @@ void smp_prepare_boot_cpu(void)
rr_node = next_node_in(rr_node, node_online_map);
}
}
pv_spinlock_init();
}
/* called from main before smp_init() */

View File

@ -24,12 +24,18 @@ config KVM
select HAVE_KVM_DIRTY_RING_ACQ_REL
select HAVE_KVM_EVENTFD
select HAVE_KVM_VCPU_ASYNC_IOCTL
select HAVE_KVM_IRQ_ROUTING
select HAVE_KVM_IRQCHIP
select HAVE_KVM_IRQFD
select HAVE_KVM_MSI
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select KVM_GENERIC_HARDWARE_ENABLING
select KVM_MMIO
select KVM_XFER_TO_GUEST_WORK
select SCHED_INFO
select MMU_NOTIFIER
select PREEMPT_NOTIFIERS
select KVM_VFIO
help
Support hosting virtualized guest machines using
hardware virtualization extensions. You will need

View File

@ -18,5 +18,9 @@ kvm-y += timer.o
kvm-y += tlb.o
kvm-y += vcpu.o
kvm-y += vm.o
kvm-y += intc/ipi.o
kvm-y += intc/extioi.o
kvm-y += intc/pch_pic.o
kvm-y += irqfd.o
CFLAGS_exit.o += $(call cc-option,-Wno-override-init,)

View File

@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/preempt.h>
#include <linux/vmalloc.h>
#include <trace/events/kvm.h>
#include <asm/fpu.h>
#include <asm/inst.h>
#include <asm/loongarch.h>
@ -20,6 +21,47 @@
#include <asm/kvm_vcpu.h>
#include "trace.h"
static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst)
{
int rd, rj;
unsigned int index, ret;
if (inst.reg2_format.opcode != cpucfg_op)
return EMULATE_FAIL;
rd = inst.reg2_format.rd;
rj = inst.reg2_format.rj;
++vcpu->stat.cpucfg_exits;
index = vcpu->arch.gprs[rj];
/*
* By LoongArch Reference Manual 2.2.10.5
* Return value is 0 for undefined CPUCFG index
*
* Disable preemption since hw gcsr is accessed
*/
preempt_disable();
switch (index) {
case 0 ... (KVM_MAX_CPUCFG_REGS - 1):
vcpu->arch.gprs[rd] = vcpu->arch.cpucfg[index];
break;
case CPUCFG_KVM_SIG:
/* CPUCFG emulation between 0x40000000 -- 0x400000ff */
vcpu->arch.gprs[rd] = *(unsigned int *)KVM_SIGNATURE;
break;
case CPUCFG_KVM_FEATURE:
ret = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK;
vcpu->arch.gprs[rd] = ret;
break;
default:
vcpu->arch.gprs[rd] = 0;
break;
}
preempt_enable();
return EMULATE_DONE;
}
static unsigned long kvm_emu_read_csr(struct kvm_vcpu *vcpu, int csrid)
{
unsigned long val = 0;
@ -83,9 +125,10 @@ static int kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst)
rj = inst.reg2csr_format.rj;
csrid = inst.reg2csr_format.csr;
if (csrid >= LOONGARCH_CSR_PERFCTRL0 && csrid <= LOONGARCH_CSR_PERFCNTR3) {
if (!kvm_own_pmu(vcpu)) {
if (csrid >= LOONGARCH_CSR_PERFCTRL0 && csrid <= vcpu->arch.max_pmu_csrid) {
if (kvm_guest_has_pmu(&vcpu->arch)) {
vcpu->arch.pc -= 4;
kvm_make_request(KVM_REQ_PMU, vcpu);
return EMULATE_DONE;
}
}
@ -114,7 +157,7 @@ static int kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst)
int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu)
{
int ret;
unsigned long val;
unsigned long *val;
u32 addr, rd, rj, opcode;
/*
@ -127,6 +170,7 @@ int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu)
ret = EMULATE_DO_IOCSR;
run->iocsr_io.phys_addr = addr;
run->iocsr_io.is_write = 0;
val = &vcpu->arch.gprs[rd];
/* LoongArch is Little endian */
switch (opcode) {
@ -158,17 +202,26 @@ int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu)
run->iocsr_io.len = 8;
run->iocsr_io.is_write = 1;
break;
case CPUCFG_KVM_FEATURE:
vcpu->arch.gprs[rd] = KVM_FEATURE_IPI;
break;
default:
ret = EMULATE_FAIL;
break;
return ret;
}
if (ret == EMULATE_DO_IOCSR) {
if (run->iocsr_io.is_write) {
val = vcpu->arch.gprs[rd];
memcpy(run->iocsr_io.data, &val, run->iocsr_io.len);
}
vcpu->arch.io_gpr = rd;
if (run->iocsr_io.is_write) {
if (!kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, run->iocsr_io.len, val))
ret = EMULATE_DONE;
else
/* Save data and let user space to write it */
memcpy(run->iocsr_io.data, val, run->iocsr_io.len);
} else {
if (!kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, run->iocsr_io.len, val))
ret = EMULATE_DONE;
else
/* Save register id for iocsr read completion */
vcpu->arch.io_gpr = rd;
}
return ret;
@ -213,57 +266,6 @@ int kvm_emu_idle(struct kvm_vcpu *vcpu)
return EMULATE_DONE;
}
static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst)
{
int rd, rj;
unsigned int index, ret;
unsigned long plv;
rd = inst.reg2_format.rd;
rj = inst.reg2_format.rj;
++vcpu->stat.cpucfg_exits;
index = vcpu->arch.gprs[rj];
/*
* By LoongArch Reference Manual 2.2.10.5
* Return value is 0 for undefined cpucfg index
*
* Disable preemption since hw gcsr is accessed
*/
preempt_disable();
plv = kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD) >> CSR_CRMD_PLV_SHIFT;
switch (index) {
case 0 ... (KVM_MAX_CPUCFG_REGS - 1):
vcpu->arch.gprs[rd] = vcpu->arch.cpucfg[index];
break;
case CPUCFG_KVM_SIG:
/*
* Cpucfg emulation between 0x40000000 -- 0x400000ff
* Return value with 0 if executed in user mode
*/
if ((plv & CSR_CRMD_PLV) == PLV_KERN)
vcpu->arch.gprs[rd] = *(unsigned int *)KVM_SIGNATURE;
else
vcpu->arch.gprs[rd] = 0;
break;
case CPUCFG_KVM_FEATURE:
ret = 0;
if ((plv & CSR_CRMD_PLV) == PLV_KERN) {
ret = KVM_FEATURE_PV_IPI;
if (sched_info_on())
ret |= KVM_FEATURE_STEAL_TIME;
}
vcpu->arch.gprs[rd] = ret;
break;
default:
vcpu->arch.gprs[rd] = 0;
break;
}
preempt_enable();
return EMULATE_DONE;
}
static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu)
{
unsigned long curr_pc;
@ -280,8 +282,7 @@ static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu)
er = EMULATE_FAIL;
switch (((inst.word >> 24) & 0xff)) {
case 0x0: /* CPUCFG GSPR */
if (inst.reg2_format.opcode == cpucfg_op)
er = kvm_emu_cpucfg(vcpu, inst);
er = kvm_emu_cpucfg(vcpu, inst);
break;
case 0x4: /* CSR{RD,WR,XCHG} GSPR */
er = kvm_handle_csr(vcpu, inst);
@ -456,17 +457,33 @@ int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst)
}
if (ret == EMULATE_DO_MMIO) {
/*
* if mmio device such as pch pic is emulated in KVM,
* it need not return to user space to handle the mmio
* exception.
*/
ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, vcpu->arch.badv,
run->mmio.len, &vcpu->arch.gprs[rd]);
if (!ret) {
update_pc(&vcpu->arch);
vcpu->mmio_needed = 0;
return EMULATE_DONE;
}
/* Set for kvm_complete_mmio_read() use */
vcpu->arch.io_gpr = rd;
run->mmio.is_write = 0;
vcpu->mmio_is_write = 0;
} else {
kvm_err("Read not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
inst.word, vcpu->arch.pc, vcpu->arch.badv);
kvm_arch_vcpu_dump_regs(vcpu);
vcpu->mmio_needed = 0;
trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, run->mmio.len,
run->mmio.phys_addr, NULL);
return EMULATE_DO_MMIO;
}
kvm_err("Read not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
inst.word, vcpu->arch.pc, vcpu->arch.badv);
kvm_arch_vcpu_dump_regs(vcpu);
vcpu->mmio_needed = 0;
return ret;
}
@ -506,6 +523,9 @@ int kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
break;
}
trace_kvm_mmio(KVM_TRACE_MMIO_READ, run->mmio.len,
run->mmio.phys_addr, run->mmio.data);
return er;
}
@ -604,17 +624,30 @@ int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst)
}
if (ret == EMULATE_DO_MMIO) {
/*
* if mmio device such as pch pic is emulated in KVM,
* it need not return to user space to handle the mmio
* exception.
*/
ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, vcpu->arch.badv,
run->mmio.len, data);
if (!ret)
return EMULATE_DONE;
run->mmio.is_write = 1;
vcpu->mmio_needed = 1;
vcpu->mmio_is_write = 1;
} else {
vcpu->arch.pc = curr_pc;
kvm_err("Write not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
inst.word, vcpu->arch.pc, vcpu->arch.badv);
kvm_arch_vcpu_dump_regs(vcpu);
/* Rollback PC if emulation was unsuccessful */
trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, run->mmio.len,
run->mmio.phys_addr, data);
return EMULATE_DO_MMIO;
}
vcpu->arch.pc = curr_pc;
kvm_err("Write not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
inst.word, vcpu->arch.pc, vcpu->arch.badv);
kvm_arch_vcpu_dump_regs(vcpu);
/* Rollback PC if emulation was unsuccessful */
return ret;
}
@ -698,6 +731,31 @@ static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu)
return RESUME_GUEST;
}
static long kvm_save_notify(struct kvm_vcpu *vcpu)
{
unsigned long id, data;
id = kvm_read_reg(vcpu, LOONGARCH_GPR_A1);
data = kvm_read_reg(vcpu, LOONGARCH_GPR_A2);
switch (id) {
case BIT(KVM_FEATURE_STEAL_TIME):
if (data & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID))
return KVM_HCALL_INVALID_PARAMETER;
vcpu->arch.st.guest_addr = data;
if (!(data & KVM_STEAL_PHYS_VALID))
return 0;
vcpu->arch.st.last_steal = current->sched_info.run_delay;
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
return 0;
default:
return KVM_HCALL_INVALID_CODE;
};
return KVM_HCALL_INVALID_CODE;
};
/*
* kvm_handle_lsx_disabled() - Guest used LSX while disabled in root.
* @vcpu: Virtual CPU context.
@ -728,29 +786,34 @@ static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu)
return RESUME_GUEST;
}
static int kvm_pv_send_ipi(struct kvm_vcpu *vcpu)
static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu)
{
if (kvm_own_lbt(vcpu))
kvm_queue_exception(vcpu, EXCCODE_INE, 0);
return RESUME_GUEST;
}
static int kvm_send_pv_ipi(struct kvm_vcpu *vcpu)
{
unsigned long ipi_bitmap;
unsigned int min, cpu, i;
unsigned long ipi_bitmap;
struct kvm_vcpu *dest;
min = vcpu->arch.gprs[LOONGARCH_GPR_A3];
min = kvm_read_reg(vcpu, LOONGARCH_GPR_A3);
for (i = 0; i < 2; i++, min += BITS_PER_LONG) {
ipi_bitmap = vcpu->arch.gprs[LOONGARCH_GPR_A1 + i];
ipi_bitmap = kvm_read_reg(vcpu, LOONGARCH_GPR_A1 + i);
if (!ipi_bitmap)
continue;
cpu = find_first_bit((void *)&ipi_bitmap, BITS_PER_LONG);
while (cpu < BITS_PER_LONG) {
dest = kvm_get_vcpu_by_cpuid(vcpu->kvm, cpu + min);
cpu = find_next_bit((void *)&ipi_bitmap, BITS_PER_LONG,
cpu + 1);
cpu = find_next_bit((void *)&ipi_bitmap, BITS_PER_LONG, cpu + 1);
if (!dest)
continue;
/*
* Send SWI0 to dest vcpu to emulate IPI interrupt
*/
/* Send SWI0 to dest vcpu to emulate IPI interrupt */
kvm_queue_irq(dest, INT_SWI0);
kvm_vcpu_kick(dest);
}
@ -759,75 +822,58 @@ static int kvm_pv_send_ipi(struct kvm_vcpu *vcpu)
return 0;
}
static int kvm_save_notify(struct kvm_vcpu *vcpu)
{
unsigned long id, data;
id = vcpu->arch.gprs[LOONGARCH_GPR_A1];
data = vcpu->arch.gprs[LOONGARCH_GPR_A2];
switch (id) {
case KVM_FEATURE_STEAL_TIME:
vcpu->arch.st.guest_addr = data;
vcpu->arch.st.last_steal = current->sched_info.run_delay;
kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu);
break;
default:
break;
};
return 0;
};
/*
* hypercall emulation always return to guest, Caller should check retval.
* Hypercall emulation always return to guest, Caller should check retval.
*/
static void kvm_handle_pv_service(struct kvm_vcpu *vcpu)
static void kvm_handle_service(struct kvm_vcpu *vcpu)
{
unsigned long func = vcpu->arch.gprs[LOONGARCH_GPR_A0];
long ret;
long ret = KVM_HCALL_INVALID_CODE;
unsigned long func = kvm_read_reg(vcpu, LOONGARCH_GPR_A0);
switch (func) {
case KVM_HCALL_FUNC_PV_IPI:
kvm_pv_send_ipi(vcpu);
ret = KVM_HCALL_STATUS_SUCCESS;
case KVM_HCALL_FUNC_IPI:
if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_IPI)) {
kvm_send_pv_ipi(vcpu);
ret = KVM_HCALL_SUCCESS;
}
break;
case KVM_HCALL_FUNC_NOTIFY:
ret = kvm_save_notify(vcpu);
if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME))
ret = kvm_save_notify(vcpu);
break;
default:
ret = KVM_HCALL_INVALID_CODE;
break;
};
}
vcpu->arch.gprs[LOONGARCH_GPR_A0] = ret;
kvm_write_reg(vcpu, LOONGARCH_GPR_A0, ret);
}
static int kvm_handle_hypercall(struct kvm_vcpu *vcpu)
{
int ret;
larch_inst inst;
unsigned int code;
int ret;
inst.word = vcpu->arch.badi;
code = inst.reg0i15_format.immediate;
ret = RESUME_GUEST;
switch (code) {
case KVM_HCALL_PV_SERVICE:
case KVM_HCALL_SERVICE:
vcpu->stat.hypercall_exits++;
kvm_handle_pv_service(vcpu);
kvm_handle_service(vcpu);
break;
case KVM_HCALL_SWDBG:
/* KVM_HC_SWDBG only in effective when SW_BP is enabled */
if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
/* KVM_HCALL_SWDBG only in effective when SW_BP is enabled */
if (vcpu->guest_debug & KVM_GUESTDBG_SW_BP_MASK) {
vcpu->run->exit_reason = KVM_EXIT_DEBUG;
ret = RESUME_HOST;
} else
vcpu->arch.gprs[LOONGARCH_GPR_A0] = KVM_HCALL_INVALID_CODE;
break;
break;
}
fallthrough;
default:
/* Treat it as noop intruction, only set return value */
vcpu->arch.gprs[LOONGARCH_GPR_A0] = KVM_HCALL_INVALID_CODE;
kvm_write_reg(vcpu, LOONGARCH_GPR_A0, KVM_HCALL_INVALID_CODE);
break;
}
@ -867,6 +913,7 @@ static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = {
[EXCCODE_FPDIS] = kvm_handle_fpu_disabled,
[EXCCODE_LSXDIS] = kvm_handle_lsx_disabled,
[EXCCODE_LASXDIS] = kvm_handle_lasx_disabled,
[EXCCODE_BTDIS] = kvm_handle_lbt_disabled,
[EXCCODE_GSPR] = kvm_handle_gspr,
[EXCCODE_HVC] = kvm_handle_hypercall,
};

View File

@ -0,0 +1,783 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2024 Loongson Technology Corporation Limited
*/
#include <asm/kvm_extioi.h>
#include <asm/kvm_vcpu.h>
#include <linux/count_zeros.h>
#define loongarch_ext_irq_lock(s, flags) spin_lock_irqsave(&s->lock, flags)
#define loongarch_ext_irq_unlock(s, flags) spin_unlock_irqrestore(&s->lock, flags)
static void extioi_update_irq(struct loongarch_extioi *s, int irq, int level)
{
int ipnum, cpu, found, irq_index, irq_mask;
struct kvm_interrupt vcpu_irq;
struct kvm_vcpu *vcpu;
ipnum = s->ipmap.reg_u8[irq / 32];
ipnum = count_trailing_zeros(ipnum);
ipnum = (ipnum >= 0 && ipnum < 4) ? ipnum : 0;
cpu = s->sw_coremap[irq];
vcpu = kvm_get_vcpu(s->kvm, cpu);
irq_index = irq / 32;
/* length of accessing core isr is 4 bytes */
irq_mask = 1 << (irq & 0x1f);
if (level) {
/* if not enable return false */
if (((s->enable.reg_u32[irq_index]) & irq_mask) == 0)
return;
s->coreisr.reg_u32[cpu][irq_index] |= irq_mask;
found = find_first_bit(s->sw_coreisr[cpu][ipnum], EXTIOI_IRQS);
set_bit(irq, s->sw_coreisr[cpu][ipnum]);
} else {
s->coreisr.reg_u32[cpu][irq_index] &= ~irq_mask;
clear_bit(irq, s->sw_coreisr[cpu][ipnum]);
found = find_first_bit(s->sw_coreisr[cpu][ipnum], EXTIOI_IRQS);
}
if (found < EXTIOI_IRQS)
/* other irq is handling, need not update parent irq level */
return;
vcpu_irq.irq = level ? INT_HWI0 + ipnum : -(INT_HWI0 + ipnum);
kvm_vcpu_ioctl_interrupt(vcpu, &vcpu_irq);
}
static void extioi_set_sw_coreisr(struct loongarch_extioi *s)
{
int ipnum, cpu, irq_index, irq_mask, irq;
for (irq = 0; irq < EXTIOI_IRQS; irq++) {
ipnum = s->ipmap.reg_u8[irq / 32];
ipnum = count_trailing_zeros(ipnum);
ipnum = (ipnum >= 0 && ipnum < 4) ? ipnum : 0;
irq_index = irq / 32;
/* length of accessing core isr is 4 bytes */
irq_mask = 1 << (irq & 0x1f);
cpu = s->coremap.reg_u8[irq];
if (!!(s->coreisr.reg_u32[cpu][irq_index] & irq_mask))
set_bit(irq, s->sw_coreisr[cpu][ipnum]);
else
clear_bit(irq, s->sw_coreisr[cpu][ipnum]);
}
}
void extioi_set_irq(struct loongarch_extioi *s, int irq, int level)
{
unsigned long *isr = (unsigned long *)s->isr.reg_u8;
unsigned long flags;
level ? set_bit(irq, isr) : clear_bit(irq, isr);
if (!level)
return;
loongarch_ext_irq_lock(s, flags);
extioi_update_irq(s, irq, level);
loongarch_ext_irq_unlock(s, flags);
}
static inline void extioi_enable_irq(struct kvm_vcpu *vcpu, struct loongarch_extioi *s,
int index, u8 mask, int level)
{
u8 val;
int irq;
val = mask & s->isr.reg_u8[index];
irq = ffs(val);
while (irq != 0) {
/*
* enable bit change from 0 to 1,
* need to update irq by pending bits
*/
extioi_update_irq(s, irq - 1 + index * 8, level);
val &= ~(1 << (irq - 1));
irq = ffs(val);
}
}
static int loongarch_extioi_writeb(struct kvm_vcpu *vcpu,
struct loongarch_extioi *s,
gpa_t addr, int len, const void *val)
{
int index, irq, ret = 0;
u8 data, old_data, cpu;
u8 coreisr, old_coreisr;
gpa_t offset;
data = *(u8 *)val;
offset = addr - EXTIOI_BASE;
switch (offset) {
case EXTIOI_NODETYPE_START ... EXTIOI_NODETYPE_END:
index = (offset - EXTIOI_NODETYPE_START);
s->nodetype.reg_u8[index] = data;
break;
case EXTIOI_IPMAP_START ... EXTIOI_IPMAP_END:
/*
* ipmap cannot be set at runtime, can be set only at the beginning
* of intr driver, need not update upper irq level
*/
index = (offset - EXTIOI_IPMAP_START);
s->ipmap.reg_u8[index] = data;
break;
case EXTIOI_ENABLE_START ... EXTIOI_ENABLE_END:
index = (offset - EXTIOI_ENABLE_START);
old_data = s->enable.reg_u8[index];
s->enable.reg_u8[index] = data;
/*
* 1: enable irq.
* update irq when isr is set.
*/
data = s->enable.reg_u8[index] & ~old_data & s->isr.reg_u8[index];
extioi_enable_irq(vcpu, s, index, data, 1);
/*
* 0: disable irq.
* update irq when isr is set.
*/
data = ~s->enable.reg_u8[index] & old_data & s->isr.reg_u8[index];
extioi_enable_irq(vcpu, s, index, data, 0);
break;
case EXTIOI_BOUNCE_START ... EXTIOI_BOUNCE_END:
/* do not emulate hw bounced irq routing */
index = offset - EXTIOI_BOUNCE_START;
s->bounce.reg_u8[index] = data;
break;
case EXTIOI_COREISR_START ... EXTIOI_COREISR_END:
/* length of accessing core isr is 8 bytes */
index = (offset - EXTIOI_COREISR_START);
/* using attrs to get current cpu index */
cpu = vcpu->vcpu_id;
coreisr = data;
old_coreisr = s->coreisr.reg_u8[cpu][index];
/* write 1 to clear interrupt */
s->coreisr.reg_u8[cpu][index] = old_coreisr & ~coreisr;
coreisr &= old_coreisr;
irq = ffs(coreisr);
while (irq != 0) {
extioi_update_irq(s, irq - 1 + index * 8, 0);
coreisr &= ~(1 << (irq - 1));
irq = ffs(coreisr);
}
break;
case EXTIOI_COREMAP_START ... EXTIOI_COREMAP_END:
irq = offset - EXTIOI_COREMAP_START;
index = irq;
s->coremap.reg_u8[index] = data;
cpu = data & 0xff;
cpu = ffs(cpu) - 1;
cpu = (cpu >= 4) ? 0 : cpu;
if (s->sw_coremap[irq] == cpu)
break;
if (test_bit(irq, (unsigned long *)s->isr.reg_u8)) {
/*
* lower irq at old cpu and raise irq at new cpu
*/
extioi_update_irq(s, irq, 0);
s->sw_coremap[irq] = cpu;
extioi_update_irq(s, irq, 1);
} else
s->sw_coremap[irq] = cpu;
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static int loongarch_extioi_writew(struct kvm_vcpu *vcpu,
struct loongarch_extioi *s,
gpa_t addr, int len, const void *val)
{
int i, index, irq, ret = 0;
u8 cpu;
u32 data, old_data;
u32 coreisr, old_coreisr;
gpa_t offset;
data = *(u32 *)val;
offset = addr - EXTIOI_BASE;
switch (offset) {
case EXTIOI_NODETYPE_START ... EXTIOI_NODETYPE_END:
index = (offset - EXTIOI_NODETYPE_START) >> 2;
s->nodetype.reg_u32[index] = data;
break;
case EXTIOI_IPMAP_START ... EXTIOI_IPMAP_END:
/*
* ipmap cannot be set at runtime, can be set only at the beginning
* of intr driver, need not update upper irq level
*/
index = (offset - EXTIOI_IPMAP_START) >> 2;
s->ipmap.reg_u32[index] = data;
break;
case EXTIOI_ENABLE_START ... EXTIOI_ENABLE_END:
index = (offset - EXTIOI_ENABLE_START) >> 2;
old_data = s->enable.reg_u32[index];
s->enable.reg_u32[index] = data;
/*
* 1: enable irq.
* update irq when isr is set.
*/
data = s->enable.reg_u32[index] & ~old_data & s->isr.reg_u32[index];
index = index << 2;
for (i = 0; i < sizeof(data); i++) {
u8 mask = (data >> (i * 8)) & 0xff;
extioi_enable_irq(vcpu, s, index + i, mask, 1);
}
/*
* 0: disable irq.
* update irq when isr is set.
*/
data = ~s->enable.reg_u32[index] & old_data & s->isr.reg_u32[index];
for (i = 0; i < sizeof(data); i++) {
u8 mask = (data >> (i * 8)) & 0xff;
extioi_enable_irq(vcpu, s, index, mask, 0);
}
break;
case EXTIOI_BOUNCE_START ... EXTIOI_BOUNCE_END:
/* do not emulate hw bounced irq routing */
index = (offset - EXTIOI_BOUNCE_START) >> 2;
s->bounce.reg_u32[index] = data;
break;
case EXTIOI_COREISR_START ... EXTIOI_COREISR_END:
/* length of accessing core isr is 8 bytes */
index = (offset - EXTIOI_COREISR_START) >> 2;
/* using attrs to get current cpu index */
cpu = vcpu->vcpu_id;
coreisr = data;
old_coreisr = s->coreisr.reg_u32[cpu][index];
/* write 1 to clear interrupt */
s->coreisr.reg_u32[cpu][index] = old_coreisr & ~coreisr;
coreisr &= old_coreisr;
irq = ffs(coreisr);
while (irq != 0) {
extioi_update_irq(s, irq - 1 + index * 32, 0);
coreisr &= ~(1 << (irq - 1));
irq = ffs(coreisr);
}
break;
case EXTIOI_COREMAP_START ... EXTIOI_COREMAP_END:
irq = offset - EXTIOI_COREMAP_START;
index = irq >> 2;
s->coremap.reg_u32[index] = data;
for (i = 0; i < sizeof(data); i++) {
cpu = data & 0xff;
cpu = ffs(cpu) - 1;
cpu = (cpu >= 4) ? 0 : cpu;
data = data >> 8;
if (s->sw_coremap[irq + i] == cpu)
continue;
if (test_bit(irq, (unsigned long *)s->isr.reg_u8)) {
/*
* lower irq at old cpu and raise irq at new cpu
*/
extioi_update_irq(s, irq + i, 0);
s->sw_coremap[irq + i] = cpu;
extioi_update_irq(s, irq + i, 1);
} else
s->sw_coremap[irq + i] = cpu;
}
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static int loongarch_extioi_writel(struct kvm_vcpu *vcpu,
struct loongarch_extioi *s,
gpa_t addr, int len, const void *val)
{
int i, index, irq, bits, ret = 0;
u8 cpu;
u64 data, old_data;
u64 coreisr, old_coreisr;
gpa_t offset;
data = *(u64 *)val;
offset = addr - EXTIOI_BASE;
switch (offset) {
case EXTIOI_NODETYPE_START ... EXTIOI_NODETYPE_END:
index = (offset - EXTIOI_NODETYPE_START) >> 3;
s->nodetype.reg_u64[index] = data;
break;
case EXTIOI_IPMAP_START ... EXTIOI_IPMAP_END:
/*
* ipmap cannot be set at runtime, can be set only at the beginning
* of intr driver, need not update upper irq level
*/
index = (offset - EXTIOI_IPMAP_START) >> 3;
s->ipmap.reg_u64 = data;
break;
case EXTIOI_ENABLE_START ... EXTIOI_ENABLE_END:
index = (offset - EXTIOI_ENABLE_START) >> 3;
old_data = s->enable.reg_u64[index];
s->enable.reg_u64[index] = data;
/*
* 1: enable irq.
* update irq when isr is set.
*/
data = s->enable.reg_u64[index] & ~old_data & s->isr.reg_u64[index];
index = index << 3;
for (i = 0; i < sizeof(data); i++) {
u8 mask = (data >> (i * 8)) & 0xff;
extioi_enable_irq(vcpu, s, index + i, mask, 1);
}
/*
* 0: disable irq.
* update irq when isr is set.
*/
data = ~s->enable.reg_u64[index] & old_data & s->isr.reg_u64[index];
for (i = 0; i < sizeof(data); i++) {
u8 mask = (data >> (i * 8)) & 0xff;
extioi_enable_irq(vcpu, s, index, mask, 0);
}
break;
case EXTIOI_BOUNCE_START ... EXTIOI_BOUNCE_END:
/* do not emulate hw bounced irq routing */
index = (offset - EXTIOI_BOUNCE_START) >> 3;
s->bounce.reg_u64[index] = data;
break;
case EXTIOI_COREISR_START ... EXTIOI_COREISR_END:
/* length of accessing core isr is 8 bytes */
index = (offset - EXTIOI_COREISR_START) >> 3;
/* using attrs to get current cpu index */
cpu = vcpu->vcpu_id;
coreisr = data;
old_coreisr = s->coreisr.reg_u64[cpu][index];
/* write 1 to clear interrupt */
s->coreisr.reg_u64[cpu][index] = old_coreisr & ~coreisr;
coreisr &= old_coreisr;
bits = sizeof(u64) * 8;
irq = find_first_bit((void *)&coreisr, bits);
while (irq < bits) {
extioi_update_irq(s, irq + index * bits, 0);
bitmap_clear((void *)&coreisr, irq, 1);
irq = find_first_bit((void *)&coreisr, bits);
}
break;
case EXTIOI_COREMAP_START ... EXTIOI_COREMAP_END:
irq = offset - EXTIOI_COREMAP_START;
index = irq >> 3;
s->coremap.reg_u64[index] = data;
for (i = 0; i < sizeof(data); i++) {
cpu = data & 0xff;
cpu = ffs(cpu) - 1;
cpu = (cpu >= 4) ? 0 : cpu;
data = data >> 8;
if (s->sw_coremap[irq + i] == cpu)
continue;
if (test_bit(irq, (unsigned long *)s->isr.reg_u8)) {
/*
* lower irq at old cpu and raise irq at new cpu
*/
extioi_update_irq(s, irq + i, 0);
s->sw_coremap[irq + i] = cpu;
extioi_update_irq(s, irq + i, 1);
} else
s->sw_coremap[irq + i] = cpu;
}
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static int kvm_loongarch_extioi_write(struct kvm_vcpu *vcpu,
struct kvm_io_device *dev,
gpa_t addr, int len, const void *val)
{
int ret;
struct loongarch_extioi *extioi = vcpu->kvm->arch.extioi;
unsigned long flags;
if (!extioi) {
kvm_err("%s: extioi irqchip not valid!\n", __func__);
return -EINVAL;
}
vcpu->kvm->stat.extioi_write_exits++;
loongarch_ext_irq_lock(extioi, flags);
switch (len) {
case 1:
ret = loongarch_extioi_writeb(vcpu, extioi, addr, len, val);
break;
case 4:
ret = loongarch_extioi_writew(vcpu, extioi, addr, len, val);
break;
case 8:
ret = loongarch_extioi_writel(vcpu, extioi, addr, len, val);
break;
default:
WARN_ONCE(1, "%s: Abnormal address access:addr 0x%llx,size %d\n",
__func__, addr, len);
}
loongarch_ext_irq_unlock(extioi, flags);
return ret;
}
static int loongarch_extioi_readb(struct kvm_vcpu *vcpu, struct loongarch_extioi *s,
gpa_t addr, int len, void *val)
{
int index, ret = 0;
gpa_t offset;
u64 data;
offset = addr - EXTIOI_BASE;
switch (offset) {
case EXTIOI_NODETYPE_START ... EXTIOI_NODETYPE_END:
index = offset - EXTIOI_NODETYPE_START;
data = s->nodetype.reg_u8[index];
break;
case EXTIOI_IPMAP_START ... EXTIOI_IPMAP_END:
index = offset - EXTIOI_IPMAP_START;
data = s->ipmap.reg_u8[index];
break;
case EXTIOI_ENABLE_START ... EXTIOI_ENABLE_END:
index = offset - EXTIOI_ENABLE_START;
data = s->enable.reg_u8[index];
break;
case EXTIOI_BOUNCE_START ... EXTIOI_BOUNCE_END:
index = offset - EXTIOI_BOUNCE_START;
data = s->bounce.reg_u8[index];
break;
case EXTIOI_COREISR_START ... EXTIOI_COREISR_END:
/* length of accessing core isr is 8 bytes */
index = offset - EXTIOI_COREISR_START;
data = s->coreisr.reg_u8[vcpu->vcpu_id][index];
break;
case EXTIOI_COREMAP_START ... EXTIOI_COREMAP_END:
index = offset - EXTIOI_COREMAP_START;
data = s->coremap.reg_u8[index];
break;
default:
ret = -EINVAL;
break;
}
*(u8 *)val = data;
return ret;
}
static int loongarch_extioi_readw(struct kvm_vcpu *vcpu, struct loongarch_extioi *s,
gpa_t addr, int len, void *val)
{
int index, ret = 0;
gpa_t offset;
u64 data;
offset = addr - EXTIOI_BASE;
switch (offset) {
case EXTIOI_NODETYPE_START ... EXTIOI_NODETYPE_END:
index = (offset - EXTIOI_NODETYPE_START) >> 2;
data = s->nodetype.reg_u32[index];
break;
case EXTIOI_IPMAP_START ... EXTIOI_IPMAP_END:
index = (offset - EXTIOI_IPMAP_START) >> 2;
data = s->ipmap.reg_u32[index];
break;
case EXTIOI_ENABLE_START ... EXTIOI_ENABLE_END:
index = (offset - EXTIOI_ENABLE_START) >> 2;
data = s->enable.reg_u32[index];
break;
case EXTIOI_BOUNCE_START ... EXTIOI_BOUNCE_END:
index = (offset - EXTIOI_BOUNCE_START) >> 2;
data = s->bounce.reg_u32[index];
break;
case EXTIOI_COREISR_START ... EXTIOI_COREISR_END:
/* length of accessing core isr is 8 bytes */
index = (offset - EXTIOI_COREISR_START) >> 2;
data = s->coreisr.reg_u32[vcpu->vcpu_id][index];
break;
case EXTIOI_COREMAP_START ... EXTIOI_COREMAP_END:
index = (offset - EXTIOI_COREMAP_START) >> 2;
data = s->coremap.reg_u32[index];
break;
default:
ret = -EINVAL;
break;
}
*(u32 *)val = data;
return ret;
}
static int loongarch_extioi_readl(struct kvm_vcpu *vcpu, struct loongarch_extioi *s,
gpa_t addr, int len, void *val)
{
int index, ret = 0;
gpa_t offset;
u64 data;
offset = addr - EXTIOI_BASE;
switch (offset) {
case EXTIOI_NODETYPE_START ... EXTIOI_NODETYPE_END:
index = (offset - EXTIOI_NODETYPE_START) >> 3;
data = s->nodetype.reg_u64[index];
break;
case EXTIOI_IPMAP_START ... EXTIOI_IPMAP_END:
index = (offset - EXTIOI_IPMAP_START) >> 3;
data = s->ipmap.reg_u64;
break;
case EXTIOI_ENABLE_START ... EXTIOI_ENABLE_END:
index = (offset - EXTIOI_ENABLE_START) >> 3;
data = s->enable.reg_u64[index];
break;
case EXTIOI_BOUNCE_START ... EXTIOI_BOUNCE_END:
index = (offset - EXTIOI_BOUNCE_START) >> 3;
data = s->bounce.reg_u64[index];
break;
case EXTIOI_COREISR_START ... EXTIOI_COREISR_END:
/* length of accessing core isr is 8 bytes */
index = (offset - EXTIOI_COREISR_START) >> 3;
data = s->coreisr.reg_u64[vcpu->vcpu_id][index];
break;
case EXTIOI_COREMAP_START ... EXTIOI_COREMAP_END:
index = (offset - EXTIOI_COREMAP_START) >> 3;
data = s->coremap.reg_u64[index];
break;
default:
ret = -EINVAL;
break;
}
*(u64 *)val = data;
return ret;
}
static int kvm_loongarch_extioi_read(struct kvm_vcpu *vcpu,
struct kvm_io_device *dev,
gpa_t addr, int len, void *val)
{
int ret;
struct loongarch_extioi *extioi = vcpu->kvm->arch.extioi;
unsigned long flags;
if (!extioi) {
kvm_err("%s: extioi irqchip not valid!\n", __func__);
return -EINVAL;
}
vcpu->kvm->stat.extioi_read_exits++;
loongarch_ext_irq_lock(extioi, flags);
switch (len) {
case 1:
ret = loongarch_extioi_readb(vcpu, extioi, addr, len, val);
break;
case 4:
ret = loongarch_extioi_readw(vcpu, extioi, addr, len, val);
break;
case 8:
ret = loongarch_extioi_readl(vcpu, extioi, addr, len, val);
break;
default:
WARN_ONCE(1, "%s: Abnormal address access:addr 0x%llx,size %d\n",
__func__, addr, len);
}
loongarch_ext_irq_unlock(extioi, flags);
return ret;
}
static const struct kvm_io_device_ops kvm_loongarch_extioi_ops = {
.read = kvm_loongarch_extioi_read,
.write = kvm_loongarch_extioi_write,
};
static int kvm_loongarch_extioi_regs_access(struct kvm_device *dev,
struct kvm_device_attr *attr,
bool is_write)
{
int len, addr;
void __user *data;
void *p = NULL;
struct loongarch_extioi *s;
unsigned long flags;
s = dev->kvm->arch.extioi;
addr = attr->attr;
data = (void __user *)attr->addr;
loongarch_ext_irq_lock(s, flags);
switch (addr) {
case EXTIOI_NODETYPE_START:
p = s->nodetype.reg_u8;
len = sizeof(s->nodetype);
break;
case EXTIOI_IPMAP_START:
p = s->ipmap.reg_u8;
len = sizeof(s->ipmap);
break;
case EXTIOI_ENABLE_START:
p = s->enable.reg_u8;
len = sizeof(s->enable);
break;
case EXTIOI_BOUNCE_START:
p = s->bounce.reg_u8;
len = sizeof(s->bounce);
break;
case EXTIOI_ISR_START:
p = s->isr.reg_u8;
len = sizeof(s->isr);
break;
case EXTIOI_COREISR_START:
p = s->coreisr.reg_u8;
len = sizeof(s->coreisr);
break;
case EXTIOI_COREMAP_START:
p = s->coremap.reg_u8;
len = sizeof(s->coremap);
break;
case EXTIOI_SW_COREMAP_FLAG:
p = s->sw_coremap;
len = sizeof(s->sw_coremap);
break;
default:
loongarch_ext_irq_unlock(s, flags);
kvm_err("%s: unknown extioi register, addr = %d\n", __func__, addr);
return -EINVAL;
}
loongarch_ext_irq_unlock(s, flags);
if (is_write) {
if (copy_from_user(p, data, len))
return -EFAULT;
} else {
if (copy_to_user(data, p, len))
return -EFAULT;
}
if ((addr == EXTIOI_COREISR_START) && is_write) {
loongarch_ext_irq_lock(s, flags);
extioi_set_sw_coreisr(s);
loongarch_ext_irq_unlock(s, flags);
}
return 0;
}
static int kvm_loongarch_extioi_get_attr(struct kvm_device *dev,
struct kvm_device_attr *attr)
{
if (attr->group == KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS)
return kvm_loongarch_extioi_regs_access(dev, attr, false);
return -EINVAL;
}
static int kvm_loongarch_extioi_set_attr(struct kvm_device *dev,
struct kvm_device_attr *attr)
{
if (attr->group == KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS)
return kvm_loongarch_extioi_regs_access(dev, attr, true);
return -EINVAL;
}
static void kvm_loongarch_extioi_destroy(struct kvm_device *dev)
{
struct kvm *kvm;
struct loongarch_extioi *extioi;
struct kvm_io_device *device;
if (!dev)
return;
kvm = dev->kvm;
if (!kvm)
return;
extioi = kvm->arch.extioi;
if (!extioi)
return;
device = &extioi->device;
kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, device);
kfree(extioi);
}
static int kvm_loongarch_extioi_create(struct kvm_device *dev, u32 type)
{
int ret;
struct loongarch_extioi *s;
struct kvm_io_device *device;
struct kvm *kvm = dev->kvm;
/* extioi has been created */
if (kvm->arch.extioi)
return -EINVAL;
s = kzalloc(sizeof(struct loongarch_extioi), GFP_KERNEL);
if (!s)
return -ENOMEM;
spin_lock_init(&s->lock);
s->kvm = kvm;
/*
* Initialize IOCSR device
*/
device = &s->device;
kvm_iodevice_init(device, &kvm_loongarch_extioi_ops);
mutex_lock(&kvm->slots_lock);
ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS, EXTIOI_BASE, EXTIOI_SIZE, device);
mutex_unlock(&kvm->slots_lock);
if (ret < 0) {
kfree(s);
return -EFAULT;
}
kvm->arch.extioi = s;
kvm_info("create extioi device successfully\n");
return 0;
}
static struct kvm_device_ops kvm_loongarch_extioi_dev_ops = {
.name = "kvm-loongarch-extioi",
.create = kvm_loongarch_extioi_create,
.destroy = kvm_loongarch_extioi_destroy,
.set_attr = kvm_loongarch_extioi_set_attr,
.get_attr = kvm_loongarch_extioi_get_attr,
};
int kvm_loongarch_register_extioi_device(void)
{
return kvm_register_device_ops(&kvm_loongarch_extioi_dev_ops,
KVM_DEV_TYPE_LA_EXTIOI);
}

View File

@ -0,0 +1,538 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2024 Loongson Technology Corporation Limited
*/
#include <linux/kvm_host.h>
#include <asm/kvm_ipi.h>
#include <asm/kvm_vcpu.h>
static void ipi_send(struct kvm *kvm, uint64_t data)
{
struct kvm_vcpu *vcpu;
struct kvm_interrupt irq;
int cpu, action, status;
cpu = ((data & 0xffffffff) >> 16) & 0x3ff;
vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu);
if (unlikely(vcpu == NULL)) {
kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
return;
}
action = 1 << (data & 0x1f);
spin_lock(&vcpu->arch.ipi_state.lock);
status = vcpu->arch.ipi_state.status;
vcpu->arch.ipi_state.status |= action;
if (status == 0) {
irq.irq = LARCH_INT_IPI;
kvm_vcpu_ioctl_interrupt(vcpu, &irq);
}
spin_unlock(&vcpu->arch.ipi_state.lock);
}
static void ipi_clear(struct kvm_vcpu *vcpu, uint64_t data)
{
struct kvm_interrupt irq;
spin_lock(&vcpu->arch.ipi_state.lock);
vcpu->arch.ipi_state.status &= ~data;
if (!vcpu->arch.ipi_state.status) {
irq.irq = -LARCH_INT_IPI;
kvm_vcpu_ioctl_interrupt(vcpu, &irq);
}
spin_unlock(&vcpu->arch.ipi_state.lock);
}
static uint64_t read_mailbox(struct kvm_vcpu *vcpu, int offset, int len)
{
void *pbuf;
uint64_t ret = 0;
spin_lock(&vcpu->arch.ipi_state.lock);
pbuf = (void *)vcpu->arch.ipi_state.buf + (offset - 0x20);
if (len == 1)
ret = *(unsigned char *)pbuf;
else if (len == 2)
ret = *(unsigned short *)pbuf;
else if (len == 4)
ret = *(unsigned int *)pbuf;
else if (len == 8)
ret = *(unsigned long *)pbuf;
else
kvm_err("%s: unknown data len: %d\n", __func__, len);
spin_unlock(&vcpu->arch.ipi_state.lock);
return ret;
}
static void write_mailbox(struct kvm_vcpu *vcpu, int offset,
uint64_t data, int len)
{
void *pbuf;
spin_lock(&vcpu->arch.ipi_state.lock);
pbuf = (void *)vcpu->arch.ipi_state.buf + (offset - 0x20);
if (len == 1)
*(unsigned char *)pbuf = (unsigned char)data;
else if (len == 2)
*(unsigned short *)pbuf = (unsigned short)data;
else if (len == 4)
*(unsigned int *)pbuf = (unsigned int)data;
else if (len == 8)
*(unsigned long *)pbuf = (unsigned long)data;
else
kvm_err("%s: unknown data len: %d\n", __func__, len);
spin_unlock(&vcpu->arch.ipi_state.lock);
}
static int loongarch_ipi_writel(struct kvm_vcpu *vcpu, gpa_t addr,
int len, const void *val)
{
uint64_t data;
uint32_t offset;
int ret = 0;
data = *(uint64_t *)val;
offset = (uint32_t)(addr & 0xff);
WARN_ON_ONCE(offset & (len - 1));
switch (offset) {
case CORE_STATUS_OFF:
kvm_err("CORE_SET_OFF Can't be write\n");
ret = -EINVAL;
break;
case CORE_EN_OFF:
spin_lock(&vcpu->arch.ipi_state.lock);
vcpu->arch.ipi_state.en = data;
spin_unlock(&vcpu->arch.ipi_state.lock);
break;
case IOCSR_IPI_SEND:
ipi_send(vcpu->kvm, data);
break;
case CORE_SET_OFF:
kvm_info("CORE_SET_OFF simulation is required\n");
ret = -EINVAL;
break;
case CORE_CLEAR_OFF:
/* Just clear the status of the current vcpu */
ipi_clear(vcpu, data);
break;
case CORE_BUF_20 ... CORE_BUF_38 + 7:
if (offset + len > CORE_BUF_38 + 8) {
kvm_err("%s: invalid offset or len: offset = %d, len = %d\n",
__func__, offset, len);
ret = -EINVAL;
break;
}
write_mailbox(vcpu, offset, data, len);
break;
default:
kvm_err("%s: unknown addr: %llx\n", __func__, addr);
ret = -EINVAL;
break;
}
return ret;
}
static int loongarch_ipi_readl(struct kvm_vcpu *vcpu, gpa_t addr,
int len, void *val)
{
uint32_t offset;
uint64_t res = 0;
int ret = 0;
offset = (uint32_t)(addr & 0xff);
WARN_ON_ONCE(offset & (len - 1));
switch (offset) {
case CORE_STATUS_OFF:
spin_lock(&vcpu->arch.ipi_state.lock);
res = vcpu->arch.ipi_state.status;
spin_unlock(&vcpu->arch.ipi_state.lock);
break;
case CORE_EN_OFF:
spin_lock(&vcpu->arch.ipi_state.lock);
res = vcpu->arch.ipi_state.en;
spin_unlock(&vcpu->arch.ipi_state.lock);
break;
case CORE_SET_OFF:
res = 0;
break;
case CORE_CLEAR_OFF:
res = 0;
break;
case CORE_BUF_20 ... CORE_BUF_38 + 7:
if (offset + len > CORE_BUF_38 + 8) {
kvm_err("%s: invalid offset or len: offset = %d, len = %d\n",
__func__, offset, len);
ret = -EINVAL;
break;
}
res = read_mailbox(vcpu, offset, len);
break;
default:
kvm_err("%s: unknown addr: %llx\n", __func__, addr);
ret = -EINVAL;
break;
}
*(uint64_t *)val = res;
return ret;
}
static int kvm_loongarch_ipi_write(struct kvm_vcpu *vcpu,
struct kvm_io_device *dev,
gpa_t addr, int len, const void *val)
{
struct loongarch_ipi *ipi;
int ret;
ipi = vcpu->kvm->arch.ipi;
if (!ipi) {
kvm_err("%s: ipi irqchip not valid!\n", __func__);
return -EINVAL;
}
ipi->kvm->stat.ipi_write_exits++;
ret = loongarch_ipi_writel(vcpu, addr, len, val);
return ret;
}
static int kvm_loongarch_ipi_read(struct kvm_vcpu *vcpu,
struct kvm_io_device *dev,
gpa_t addr, int len, void *val)
{
struct loongarch_ipi *ipi;
int ret;
ipi = vcpu->kvm->arch.ipi;
if (!ipi) {
kvm_err("%s: ipi irqchip not valid!\n", __func__);
return -EINVAL;
}
ipi->kvm->stat.ipi_read_exits++;
ret = loongarch_ipi_readl(vcpu, addr, len, val);
return ret;
}
static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data)
{
int i, ret;
uint32_t val = 0, mask = 0;
/*
* Bit 27-30 is mask for byte writing.
* If the mask is 0, we need not to do anything.
*/
if ((data >> 27) & 0xf) {
/* Read the old val */
ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val);
if (unlikely(ret)) {
kvm_err("%s: : read date from addr %llx failed\n", __func__, addr);
return ret;
}
/* Construct the mask by scanning the bit 27-30 */
for (i = 0; i < 4; i++) {
if (data & (0x1 << (27 + i)))
mask |= (0xff << (i * 8));
}
/* Save the old part of val */
val &= mask;
}
val |= ((uint32_t)(data >> 32) & ~mask);
ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val);
if (unlikely(ret))
kvm_err("%s: : write date to addr %llx failed\n", __func__, addr);
return ret;
}
static int mail_send(struct kvm *kvm, uint64_t data)
{
struct kvm_vcpu *vcpu;
int cpu, mailbox;
int offset, ret;
cpu = ((data & 0xffffffff) >> 16) & 0x3ff;
vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu);
if (unlikely(vcpu == NULL)) {
kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
return -EINVAL;
}
mailbox = ((data & 0xffffffff) >> 2) & 0x7;
offset = SMP_MAILBOX + CORE_BUF_20 + mailbox * 4;
ret = send_ipi_data(vcpu, offset, data);
return ret;
}
static int any_send(struct kvm *kvm, uint64_t data)
{
struct kvm_vcpu *vcpu;
int cpu, offset, ret;
cpu = ((data & 0xffffffff) >> 16) & 0x3ff;
vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu);
if (unlikely(vcpu == NULL)) {
kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
return -EINVAL;
}
offset = data & 0xffff;
ret = send_ipi_data(vcpu, offset, data);
return ret;
}
static int kvm_loongarch_mail_write(struct kvm_vcpu *vcpu,
struct kvm_io_device *dev,
gpa_t addr, int len, const void *val)
{
struct loongarch_ipi *ipi;
int ret;
ipi = vcpu->kvm->arch.ipi;
if (!ipi) {
kvm_err("%s: ipi irqchip not valid!\n", __func__);
return -EINVAL;
}
addr &= 0xfff;
addr -= IOCSR_MAIL_SEND;
switch (addr) {
case MAIL_SEND_OFFSET:
ret = mail_send(vcpu->kvm, *(uint64_t *)val);
break;
case ANY_SEND_OFFSET:
ret = any_send(vcpu->kvm, *(uint64_t *)val);
break;
default:
kvm_err("%s: invalid addr %llx!\n", __func__, addr);
ret = -EINVAL;
break;
}
return ret;
}
static const struct kvm_io_device_ops kvm_loongarch_ipi_ops = {
.read = kvm_loongarch_ipi_read,
.write = kvm_loongarch_ipi_write,
};
static const struct kvm_io_device_ops kvm_loongarch_mail_ops = {
.write = kvm_loongarch_mail_write,
};
static int kvm_loongarch_ipi_regs_access(struct kvm_device *dev,
struct kvm_device_attr *attr,
bool is_write)
{
uint64_t val;
int cpu, addr;
void *p = NULL;
int len = 4;
struct kvm_vcpu *vcpu;
cpu = (attr->attr >> 16) & 0x3ff;
addr = attr->attr & 0xff;
vcpu = kvm_get_vcpu(dev->kvm, cpu);
if (unlikely(vcpu == NULL)) {
kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
return -EINVAL;
}
switch (addr) {
case CORE_STATUS_OFF:
p = &vcpu->arch.ipi_state.status;
break;
case CORE_EN_OFF:
p = &vcpu->arch.ipi_state.en;
break;
case CORE_SET_OFF:
p = &vcpu->arch.ipi_state.set;
break;
case CORE_CLEAR_OFF:
p = &vcpu->arch.ipi_state.clear;
break;
case CORE_BUF_20:
p = &vcpu->arch.ipi_state.buf[0];
len = 8;
break;
case CORE_BUF_28:
p = &vcpu->arch.ipi_state.buf[1];
len = 8;
break;
case CORE_BUF_30:
p = &vcpu->arch.ipi_state.buf[2];
len = 8;
break;
case CORE_BUF_38:
p = &vcpu->arch.ipi_state.buf[3];
len = 8;
break;
default:
kvm_err("%s: unknown ipi register, addr = %d\n", __func__, addr);
return -EINVAL;
}
if (is_write) {
if (len == 4) {
if (get_user(val, (uint32_t __user *)attr->addr))
return -EFAULT;
*(uint32_t *)p = (uint32_t)val;
} else if (len == 8) {
if (get_user(val, (uint64_t __user *)attr->addr))
return -EFAULT;
*(uint64_t *)p = val;
}
} else {
if (len == 4) {
val = *(uint32_t *)p;
return put_user(val, (uint32_t __user *)attr->addr);
} else if (len == 8) {
val = *(uint64_t *)p;
return put_user(val, (uint64_t __user *)attr->addr);
}
}
return 0;
}
static int kvm_loongarch_ipi_get_attr(struct kvm_device *dev,
struct kvm_device_attr *attr)
{
switch (attr->group) {
case KVM_DEV_LOONGARCH_IPI_GRP_REGS:
return kvm_loongarch_ipi_regs_access(dev, attr, false);
default:
kvm_err("%s: unknown group (%d)\n", __func__, attr->group);
return -EINVAL;
}
}
static int kvm_loongarch_ipi_set_attr(struct kvm_device *dev,
struct kvm_device_attr *attr)
{
switch (attr->group) {
case KVM_DEV_LOONGARCH_IPI_GRP_REGS:
return kvm_loongarch_ipi_regs_access(dev, attr, true);
default:
kvm_err("%s: unknown group (%d)\n", __func__, attr->group);
return -EINVAL;
}
}
static void kvm_loongarch_ipi_destroy(struct kvm_device *dev)
{
struct kvm *kvm;
struct loongarch_ipi *ipi;
struct kvm_io_device *device;
if (!dev)
return;
kvm = dev->kvm;
if (!kvm)
return;
ipi = kvm->arch.ipi;
if (!ipi)
return;
device = &ipi->device;
kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, device);
device = &ipi->mail_dev;
kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, device);
kfree(ipi);
}
static int kvm_loongarch_ipi_create(struct kvm_device *dev, u32 type)
{
struct kvm *kvm;
struct loongarch_ipi *s;
unsigned long addr;
struct kvm_io_device *device;
int ret;
kvm_info("begin create loongarch ipi in kvm ...\n");
if (!dev) {
kvm_err("%s: kvm_device ptr is invalid!\n", __func__);
return -EINVAL;
}
kvm = dev->kvm;
if (kvm->arch.ipi) {
kvm_err("%s: loongarch ipi has been created!\n", __func__);
return -EINVAL;
}
s = kzalloc(sizeof(struct loongarch_ipi), GFP_KERNEL);
if (!s)
return -ENOMEM;
spin_lock_init(&s->lock);
s->kvm = kvm;
/*
* Initialize IOCSR device
*/
device = &s->device;
kvm_iodevice_init(device, &kvm_loongarch_ipi_ops);
addr = SMP_MAILBOX;
mutex_lock(&kvm->slots_lock);
ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS, addr,
KVM_IOCSR_IPI_ADDR_SIZE, device);
mutex_unlock(&kvm->slots_lock);
if (ret < 0) {
kvm_err("%s: initialize IOCSR dev failed, ret = %d\n", __func__, ret);
goto err;
}
device = &s->mail_dev;
kvm_iodevice_init(device, &kvm_loongarch_mail_ops);
addr = MAIL_SEND_ADDR;
mutex_lock(&kvm->slots_lock);
ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS, addr,
KVM_IOCSR_MAIL_ADDR_SIZE, device);
mutex_unlock(&kvm->slots_lock);
if (ret < 0) {
device = &s->device;
kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, device);
kvm_err("%s: initialize mail box dev failed, ret = %d\n", __func__, ret);
goto err;
}
kvm->arch.ipi = s;
kvm_info("create loongarch ipi in kvm done!\n");
return 0;
err:
kfree(s);
return -EFAULT;
}
static struct kvm_device_ops kvm_loongarch_ipi_dev_ops = {
.name = "kvm-loongarch-ipi",
.create = kvm_loongarch_ipi_create,
.destroy = kvm_loongarch_ipi_destroy,
.set_attr = kvm_loongarch_ipi_set_attr,
.get_attr = kvm_loongarch_ipi_get_attr,
};
int kvm_loongarch_register_ipi_device(void)
{
return kvm_register_device_ops(&kvm_loongarch_ipi_dev_ops,
KVM_DEV_TYPE_LA_IPI);
}

View File

@ -0,0 +1,540 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2024 Loongson Technology Corporation Limited
*/
#include <asm/kvm_extioi.h>
#include <asm/kvm_pch_pic.h>
#include <asm/kvm_vcpu.h>
#include <linux/count_zeros.h>
/* update the isr according to irq level and route irq to extioi */
static void pch_pic_update_irq(struct loongarch_pch_pic *s, int irq, int level)
{
u64 mask = (1 << irq);
/*
* set isr and route irq to extioi and
* the route table is in htmsi_vector[]
*/
if (level) {
if (mask & s->irr & ~s->mask) {
s->isr |= mask;
irq = s->htmsi_vector[irq];
extioi_set_irq(s->kvm->arch.extioi, irq, level);
}
} else {
if (mask & s->isr & ~s->irr) {
s->isr &= ~mask;
irq = s->htmsi_vector[irq];
extioi_set_irq(s->kvm->arch.extioi, irq, level);
}
}
}
/* msi irq handler */
void pch_msi_set_irq(struct kvm *kvm, int irq, int level)
{
extioi_set_irq(kvm->arch.extioi, irq, level);
}
/* called when a irq is triggered in pch pic */
void pch_pic_set_irq(struct loongarch_pch_pic *s, int irq, int level)
{
u64 mask = (1 << irq);
spin_lock(&s->lock);
if (level)
/* set irr */
s->irr |= mask;
else {
/* 0 level signal in edge triggered irq does not mean to clear irq
* The irr register variable is cleared when the cpu writes to the
* PCH_PIC_CLEAR_START address area
*/
if (s->edge & mask) {
spin_unlock(&s->lock);
return;
}
s->irr &= ~mask;
}
pch_pic_update_irq(s, irq, level);
spin_unlock(&s->lock);
}
/* update batch irqs, the irq_mask is a bitmap of irqs */
static void pch_pic_update_batch_irqs(struct loongarch_pch_pic *s, u64 irq_mask, int level)
{
int irq, bits;
/* find each irq by irqs bitmap and update each irq */
bits = sizeof(irq_mask) * 8;
irq = find_first_bit((void *)&irq_mask, bits);
while (irq < bits) {
pch_pic_update_irq(s, irq, level);
bitmap_clear((void *)&irq_mask, irq, 1);
irq = find_first_bit((void *)&irq_mask, bits);
}
}
/*
* pch pic register is 64-bit, but it is accessed by 32-bit,
* so we use high to get whether low or high 32 bits we want
* to read.
*/
static u32 pch_pic_read_reg(u64 *s, int high)
{
u64 val = *s;
/* read the high 32 bits when the high is 1 */
return high ? (u32)(val >> 32) : (u32)val;
}
/*
* pch pic register is 64-bit, but it is accessed by 32-bit,
* so we use high to get whether low or high 32 bits we want
* to write.
*/
static u32 pch_pic_write_reg(u64 *s, int high, u32 v)
{
u64 val = *s, data = v;
if (high) {
/*
* Clear val high 32 bits
* write the high 32 bits when the high is 1
*/
*s = (val << 32 >> 32) | (data << 32);
val >>= 32;
} else
/*
* Clear val low 32 bits
* write the low 32 bits when the high is 0
*/
*s = (val >> 32 << 32) | v;
return (u32)val;
}
static int loongarch_pch_pic_write(struct loongarch_pch_pic *s, gpa_t addr,
int len, const void *val)
{
u32 old, data, offset, index;
u64 irq;
int ret;
ret = 0;
data = *(u32 *)val;
offset = addr - s->pch_pic_base;
spin_lock(&s->lock);
switch (offset) {
case PCH_PIC_MASK_START ... PCH_PIC_MASK_END:
offset -= PCH_PIC_MASK_START;
/* get whether high or low 32 bits we want to write */
index = offset >> 2;
old = pch_pic_write_reg(&s->mask, index, data);
/* enable irq when mask value change to 0 */
irq = (old & ~data) << (32 * index);
pch_pic_update_batch_irqs(s, irq, 1);
/* disable irq when mask value change to 1 */
irq = (~old & data) << (32 * index);
pch_pic_update_batch_irqs(s, irq, 0);
break;
case PCH_PIC_HTMSI_EN_START ... PCH_PIC_HTMSI_EN_END:
offset -= PCH_PIC_HTMSI_EN_START;
index = offset >> 2;
pch_pic_write_reg(&s->htmsi_en, index, data);
break;
case PCH_PIC_EDGE_START ... PCH_PIC_EDGE_END:
offset -= PCH_PIC_EDGE_START;
index = offset >> 2;
/* 1: edge triggered, 0: level triggered */
pch_pic_write_reg(&s->edge, index, data);
break;
case PCH_PIC_CLEAR_START ... PCH_PIC_CLEAR_END:
offset -= PCH_PIC_CLEAR_START;
index = offset >> 2;
/* write 1 to clear edge irq */
old = pch_pic_read_reg(&s->irr, index);
/*
* get the irq bitmap which is edge triggered and
* already set and to be cleared
*/
irq = old & pch_pic_read_reg(&s->edge, index) & data;
/* write irr to the new state where irqs have been cleared */
pch_pic_write_reg(&s->irr, index, old & ~irq);
/* update cleared irqs */
pch_pic_update_batch_irqs(s, irq, 0);
break;
case PCH_PIC_AUTO_CTRL0_START ... PCH_PIC_AUTO_CTRL0_END:
offset -= PCH_PIC_AUTO_CTRL0_START;
index = offset >> 2;
/* we only use default mode: fixed interrupt distribution mode */
pch_pic_write_reg(&s->auto_ctrl0, index, 0);
break;
case PCH_PIC_AUTO_CTRL1_START ... PCH_PIC_AUTO_CTRL1_END:
offset -= PCH_PIC_AUTO_CTRL1_START;
index = offset >> 2;
/* we only use default mode: fixed interrupt distribution mode */
pch_pic_write_reg(&s->auto_ctrl1, index, 0);
break;
case PCH_PIC_ROUTE_ENTRY_START ... PCH_PIC_ROUTE_ENTRY_END:
offset -= PCH_PIC_ROUTE_ENTRY_START;
/* only route to int0: extioi */
s->route_entry[offset] = 1;
break;
case PCH_PIC_HTMSI_VEC_START ... PCH_PIC_HTMSI_VEC_END:
/* route table to extioi */
offset -= PCH_PIC_HTMSI_VEC_START;
s->htmsi_vector[offset] = (u8)data;
break;
case PCH_PIC_POLARITY_START ... PCH_PIC_POLARITY_END:
offset -= PCH_PIC_POLARITY_START;
index = offset >> 2;
/* we only use defalut value 0: high level triggered */
pch_pic_write_reg(&s->polarity, index, 0);
break;
default:
ret = -EINVAL;
break;
}
spin_unlock(&s->lock);
return ret;
}
static int kvm_loongarch_pch_pic_write(struct kvm_vcpu *vcpu,
struct kvm_io_device *dev,
gpa_t addr, int len, const void *val)
{
int ret;
struct loongarch_pch_pic *s = vcpu->kvm->arch.pch_pic;
if (!s) {
kvm_err("%s: pch pic irqchip not valid!\n", __func__);
return -EINVAL;
}
/* statistics of pch pic writing */
vcpu->kvm->stat.pch_pic_write_exits++;
ret = loongarch_pch_pic_write(s, addr, len, val);
return ret;
}
static int loongarch_pch_pic_read(struct loongarch_pch_pic *s, gpa_t addr, int len, void *val)
{
int offset, index, ret = 0;
u32 data = 0;
u64 int_id = 0;
offset = addr - s->pch_pic_base;
spin_lock(&s->lock);
switch (offset) {
case PCH_PIC_INT_ID_START ... PCH_PIC_INT_ID_END:
/* int id version */
int_id |= (u64)PCH_PIC_INT_ID_VER << 32;
/* irq number */
int_id |= (u64)31 << (32 + 16);
/* int id value */
int_id |= PCH_PIC_INT_ID_VAL;
*(u64 *)val = int_id;
break;
case PCH_PIC_MASK_START ... PCH_PIC_MASK_END:
offset -= PCH_PIC_MASK_START;
index = offset >> 2;
/* read mask reg */
data = pch_pic_read_reg(&s->mask, index);
*(u32 *)val = data;
break;
case PCH_PIC_HTMSI_EN_START ... PCH_PIC_HTMSI_EN_END:
offset -= PCH_PIC_HTMSI_EN_START;
index = offset >> 2;
/* read htmsi enable reg */
data = pch_pic_read_reg(&s->htmsi_en, index);
*(u32 *)val = data;
break;
case PCH_PIC_EDGE_START ... PCH_PIC_EDGE_END:
offset -= PCH_PIC_EDGE_START;
index = offset >> 2;
/* read edge enable reg */
data = pch_pic_read_reg(&s->edge, index);
*(u32 *)val = data;
break;
case PCH_PIC_AUTO_CTRL0_START ... PCH_PIC_AUTO_CTRL0_END:
case PCH_PIC_AUTO_CTRL1_START ... PCH_PIC_AUTO_CTRL1_END:
/* we only use default mode: fixed interrupt distribution mode */
*(u32 *)val = 0;
break;
case PCH_PIC_ROUTE_ENTRY_START ... PCH_PIC_ROUTE_ENTRY_END:
/* only route to int0: extioi */
*(u8 *)val = 1;
break;
case PCH_PIC_HTMSI_VEC_START ... PCH_PIC_HTMSI_VEC_END:
offset -= PCH_PIC_HTMSI_VEC_START;
/* read htmsi vector */
data = s->htmsi_vector[offset];
*(u8 *)val = data;
break;
case PCH_PIC_POLARITY_START ... PCH_PIC_POLARITY_END:
/* we only use defalut value 0: high level triggered */
*(u32 *)val = 0;
break;
default:
ret = -EINVAL;
}
spin_unlock(&s->lock);
return ret;
}
static int kvm_loongarch_pch_pic_read(struct kvm_vcpu *vcpu,
struct kvm_io_device *dev,
gpa_t addr, int len, void *val)
{
int ret;
struct loongarch_pch_pic *s = vcpu->kvm->arch.pch_pic;
if (!s) {
kvm_err("%s: pch pic irqchip not valid!\n", __func__);
return -EINVAL;
}
/* statistics of pch pic reading */
vcpu->kvm->stat.pch_pic_read_exits++;
ret = loongarch_pch_pic_read(s, addr, len, val);
return ret;
}
static const struct kvm_io_device_ops kvm_loongarch_pch_pic_ops = {
.read = kvm_loongarch_pch_pic_read,
.write = kvm_loongarch_pch_pic_write,
};
static int kvm_loongarch_pch_pic_init(struct kvm_device *dev, u64 addr)
{
int ret;
struct loongarch_pch_pic *s = dev->kvm->arch.pch_pic;
struct kvm_io_device *device;
struct kvm *kvm = dev->kvm;
s->pch_pic_base = addr;
device = &s->device;
/* init device by pch pic writing and reading ops */
kvm_iodevice_init(device, &kvm_loongarch_pch_pic_ops);
mutex_lock(&kvm->slots_lock);
/* register pch pic device */
ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, addr, PCH_PIC_SIZE, device);
mutex_unlock(&kvm->slots_lock);
if (ret < 0)
return -EFAULT;
return 0;
}
/* used by user space to get or set pch pic registers */
static int kvm_loongarch_pch_pic_regs_access(struct kvm_device *dev,
struct kvm_device_attr *attr,
bool is_write)
{
int addr, len = 8, ret = 0;
void __user *data;
void *p = NULL;
struct loongarch_pch_pic *s;
s = dev->kvm->arch.pch_pic;
addr = attr->attr;
data = (void __user *)attr->addr;
spin_lock(&s->lock);
/* get pointer to pch pic register by addr */
switch (addr) {
case PCH_PIC_MASK_START:
p = &s->mask;
break;
case PCH_PIC_HTMSI_EN_START:
p = &s->htmsi_en;
break;
case PCH_PIC_EDGE_START:
p = &s->edge;
break;
case PCH_PIC_AUTO_CTRL0_START:
p = &s->auto_ctrl0;
break;
case PCH_PIC_AUTO_CTRL1_START:
p = &s->auto_ctrl1;
break;
case PCH_PIC_ROUTE_ENTRY_START:
p = s->route_entry;
len = 64;
break;
case PCH_PIC_HTMSI_VEC_START:
p = s->htmsi_vector;
len = 64;
break;
case PCH_PIC_INT_IRR_START:
p = &s->irr;
break;
case PCH_PIC_INT_ISR_START:
p = &s->isr;
break;
case PCH_PIC_POLARITY_START:
p = &s->polarity;
break;
default:
ret = -EINVAL;
}
/* write or read value according to is_write */
if (is_write) {
if (copy_from_user(p, data, len))
ret = -EFAULT;
} else {
if (copy_to_user(data, p, len))
ret = -EFAULT;
}
spin_unlock(&s->lock);
return ret;
}
static int kvm_loongarch_pch_pic_get_attr(struct kvm_device *dev,
struct kvm_device_attr *attr)
{
/* only support pch pic group registers */
if (attr->group == KVM_DEV_LOONGARCH_PCH_PIC_GRP_REGS)
return kvm_loongarch_pch_pic_regs_access(dev, attr, false);
return -EINVAL;
}
static int kvm_loongarch_pch_pic_set_attr(struct kvm_device *dev,
struct kvm_device_attr *attr)
{
int ret = -EINVAL;
u64 addr;
void __user *uaddr = (void __user *)(long)attr->addr;
switch (attr->group) {
case KVM_DEV_LOONGARCH_PCH_PIC_GRP_CTRL:
switch (attr->attr) {
case KVM_DEV_LOONGARCH_PCH_PIC_CTRL_INIT:
if (copy_from_user(&addr, uaddr, sizeof(addr)))
return -EFAULT;
if (!dev->kvm->arch.pch_pic) {
kvm_err("%s: please create pch_pic irqchip first!\n", __func__);
ret = -EFAULT;
break;
}
ret = kvm_loongarch_pch_pic_init(dev, addr);
break;
default:
kvm_err("%s: unknown group (%d) attr (%lld)\n", __func__, attr->group,
attr->attr);
ret = -EINVAL;
break;
}
break;
case KVM_DEV_LOONGARCH_PCH_PIC_GRP_REGS:
ret = kvm_loongarch_pch_pic_regs_access(dev, attr, true);
break;
default:
break;
}
return ret;
}
static int kvm_setup_default_irq_routing(struct kvm *kvm)
{
struct kvm_irq_routing_entry *entries;
u32 nr = KVM_IRQCHIP_NUM_PINS;
int i, ret;
entries = kcalloc(nr, sizeof(*entries), GFP_KERNEL);
if (!entries)
return -ENOMEM;
for (i = 0; i < nr; i++) {
entries[i].gsi = i;
entries[i].type = KVM_IRQ_ROUTING_IRQCHIP;
entries[i].u.irqchip.irqchip = 0;
entries[i].u.irqchip.pin = i;
}
ret = kvm_set_irq_routing(kvm, entries, nr, 0);
kfree(entries);
return 0;
}
static void kvm_loongarch_pch_pic_destroy(struct kvm_device *dev)
{
struct kvm *kvm;
struct loongarch_pch_pic *s;
struct kvm_io_device *device;
if (!dev)
return;
kvm = dev->kvm;
if (!kvm)
return;
s = kvm->arch.pch_pic;
if (!s)
return;
device = &s->device;
/* unregister pch pic device and free it's memory */
kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, device);
kfree(s);
}
static int kvm_loongarch_pch_pic_create(struct kvm_device *dev, u32 type)
{
int ret;
struct loongarch_pch_pic *s;
struct kvm *kvm = dev->kvm;
/* pch pic should not has been created */
if (kvm->arch.pch_pic)
return -EINVAL;
ret = kvm_setup_default_irq_routing(kvm);
if (ret)
return -ENOMEM;
s = kzalloc(sizeof(struct loongarch_pch_pic), GFP_KERNEL);
if (!s)
return -ENOMEM;
spin_lock_init(&s->lock);
s->kvm = kvm;
kvm->arch.pch_pic = s;
kvm_info("create pch pic device successfully\n");
return 0;
}
static struct kvm_device_ops kvm_loongarch_pch_pic_dev_ops = {
.name = "kvm-loongarch-pch-pic",
.create = kvm_loongarch_pch_pic_create,
.destroy = kvm_loongarch_pch_pic_destroy,
.set_attr = kvm_loongarch_pch_pic_set_attr,
.get_attr = kvm_loongarch_pch_pic_get_attr,
};
int kvm_loongarch_register_pch_pic_device(void)
{
return kvm_register_device_ops(&kvm_loongarch_pch_pic_dev_ops,
KVM_DEV_TYPE_LA_IOAPIC);
}

View File

@ -0,0 +1,87 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2024 Loongson Technology Corporation Limited
*/
#include <linux/kvm_host.h>
#include <trace/events/kvm.h>
#include <asm/kvm_pch_pic.h>
static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int irq_source_id,
int level, bool line_status)
{
/* ioapic pin (0 ~ 64) <---> gsi(0 ~ 64) */
pch_pic_set_irq(kvm->arch.pch_pic, e->irqchip.pin, level);
return 0;
}
/*
* kvm_set_routing_entry: populate a kvm routing entry
* from a user routing entry
*
* @kvm: the VM this entry is applied to
* @e: kvm kernel routing entry handle
* @ue: user api routing entry handle
* return 0 on success, -EINVAL on errors.
*/
int kvm_set_routing_entry(struct kvm *kvm,
struct kvm_kernel_irq_routing_entry *e,
const struct kvm_irq_routing_entry *ue)
{
int r = -EINVAL;
switch (ue->type) {
case KVM_IRQ_ROUTING_IRQCHIP:
e->set = kvm_set_ioapic_irq;
e->irqchip.irqchip = ue->u.irqchip.irqchip;
e->irqchip.pin = ue->u.irqchip.pin;
if (e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS)
goto out;
break;
case KVM_IRQ_ROUTING_MSI:
e->set = kvm_set_msi;
e->msi.address_lo = ue->u.msi.address_lo;
e->msi.address_hi = ue->u.msi.address_hi;
e->msi.data = ue->u.msi.data;
break;
default:
goto out;
}
r = 0;
out:
return r;
}
int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int irq_source_id,
int level, bool line_status)
{
if (e->type == KVM_IRQ_ROUTING_MSI) {
pch_msi_set_irq(kvm, e->msi.data, 1);
return 0;
}
return -EWOULDBLOCK;
}
/**
* kvm_set_msi: inject the MSI corresponding to the
* MSI routing entry
*
* This is the entry point for irqfd MSI injection
* and userspace MSI injection.
*/
int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int irq_source_id,
int level, bool line_status)
{
if (!level)
return -1;
pch_msi_set_irq(kvm, e->msi.data, level);
return 0;
}

View File

@ -9,6 +9,8 @@
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/kvm_csr.h>
#include <asm/kvm_extioi.h>
#include <asm/kvm_pch_pic.h>
#include "trace.h"
unsigned long vpid_mask;
@ -242,6 +244,7 @@ void kvm_check_vpid(struct kvm_vcpu *vcpu)
kvm_update_vpid(vcpu, cpu);
trace_kvm_vpid_change(vcpu, vcpu->arch.vpid);
vcpu->cpu = cpu;
kvm_clear_request(KVM_REQ_TLB_FLUSH_GPA, vcpu);
}
/* Restore GSTAT(0x50).vpid */
@ -312,7 +315,7 @@ void kvm_arch_hardware_disable(void)
static int kvm_loongarch_env_init(void)
{
int cpu, order;
int cpu, order, ret;
void *addr;
struct kvm_context *context;
@ -367,7 +370,20 @@ static int kvm_loongarch_env_init(void)
kvm_init_gcsr_flag();
return 0;
/* Register loongarch ipi interrupt controller interface. */
ret = kvm_loongarch_register_ipi_device();
if (ret)
return ret;
/* Register loongarch extioi interrupt controller interface. */
ret = kvm_loongarch_register_extioi_device();
if (ret)
return ret;
/* Register loongarch pch pic interrupt controller interface. */
ret = kvm_loongarch_register_pch_pic_device();
return ret;
}
static void kvm_loongarch_env_exit(void)

View File

@ -163,6 +163,7 @@ static kvm_pte_t *kvm_populate_gpa(struct kvm *kvm,
child = kvm_mmu_memory_cache_alloc(cache);
_kvm_pte_init(child, ctx.invalid_ptes[ctx.level - 1]);
smp_wmb(); /* Make pte visible before pmd */
kvm_set_pte(entry, __pa(child));
} else if (kvm_pte_huge(*entry)) {
return entry;
@ -444,6 +445,17 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
enum kvm_mr_change change)
{
int needs_flush;
u32 old_flags = old ? old->flags : 0;
u32 new_flags = new ? new->flags : 0;
bool log_dirty_pages = new_flags & KVM_MEM_LOG_DIRTY_PAGES;
/* Only track memslot flags changed */
if (change != KVM_MR_FLAGS_ONLY)
return;
/* Discard dirty page tracking on readonly memslot */
if ((old_flags & new_flags) & KVM_MEM_READONLY)
return;
/*
* If dirty page logging is enabled, write protect all pages in the slot
@ -454,9 +466,14 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
* MOVE/DELETE: The old mappings will already have been cleaned up by
* kvm_arch_flush_shadow_memslot()
*/
if (change == KVM_MR_FLAGS_ONLY &&
(!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) &&
new->flags & KVM_MEM_LOG_DIRTY_PAGES)) {
if (!(old_flags & KVM_MEM_LOG_DIRTY_PAGES) && log_dirty_pages) {
/*
* Initially-all-set does not require write protecting any page
* because they're all assumed to be dirty.
*/
if (kvm_dirty_log_manual_protect_and_init_set(kvm))
return;
spin_lock(&kvm->mmu_lock);
/* Write protect GPA page table entries */
needs_flush = kvm_mkclean_gpa_pt(kvm, new->base_gfn,
@ -572,6 +589,7 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ
gfn_t gfn = gpa >> PAGE_SHIFT;
struct kvm *kvm = vcpu->kvm;
struct kvm_memory_slot *slot;
struct page *page;
spin_lock(&kvm->mmu_lock);
@ -583,10 +601,8 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ
}
/* Track access to pages marked old */
new = *ptep;
if (!kvm_pte_young(new))
new = kvm_pte_mkyoung(new);
/* call kvm_set_pfn_accessed() after unlock */
new = kvm_pte_mkyoung(*ptep);
/* call kvm_set_pfn_accessed() after unlock */
if (write && !kvm_pte_dirty(new)) {
if (!kvm_pte_write(new)) {
@ -614,19 +630,22 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ
if (changed) {
kvm_set_pte(ptep, new);
pfn = kvm_pte_pfn(new);
page = kvm_pfn_to_refcounted_page(pfn);
if (page)
get_page(page);
}
spin_unlock(&kvm->mmu_lock);
/*
* Fixme: pfn may be freed after mmu_lock
* kvm_try_get_pfn(pfn)/kvm_release_pfn pair to prevent this?
*/
if (kvm_pte_young(changed))
kvm_set_pfn_accessed(pfn);
if (changed) {
if (kvm_pte_young(changed))
kvm_set_pfn_accessed(pfn);
if (kvm_pte_dirty(changed)) {
mark_page_dirty(kvm, gfn);
kvm_set_pfn_dirty(pfn);
if (kvm_pte_dirty(changed)) {
mark_page_dirty(kvm, gfn);
kvm_set_pfn_dirty(pfn);
}
if (page)
put_page(page);
}
return ret;
out:
@ -769,6 +788,7 @@ static kvm_pte_t *kvm_split_huge(struct kvm_vcpu *vcpu, kvm_pte_t *ptep, gfn_t g
val += PAGE_SIZE;
}
smp_wmb(); /* Make pte visible before pmd */
/* The later kvm_flush_tlb_gpa() will flush hugepage tlb */
kvm_set_pte(ptep, __pa(child));
@ -890,10 +910,20 @@ retry:
/* Disable dirty logging on HugePages */
level = 0;
if (!fault_supports_huge_mapping(memslot, hva, write)) {
level = 0;
} else {
if (fault_supports_huge_mapping(memslot, hva, write)) {
/* Check page level about host mmu*/
level = host_pfn_mapping_level(kvm, gfn, memslot);
if (level == 1) {
/*
* Check page level about secondary mmu
* Disable hugepage if it is normal page on
* secondary mmu already
*/
ptep = kvm_populate_gpa(kvm, NULL, gpa, 0);
if (ptep && !kvm_pte_huge(*ptep))
level = 0;
}
if (level == 1) {
gfn = gfn & ~(PTRS_PER_PTE - 1);
pfn = pfn & ~(PTRS_PER_PTE - 1);
@ -924,7 +954,6 @@ retry:
kvm_set_pfn_dirty(pfn);
}
kvm_set_pfn_accessed(pfn);
kvm_release_pfn_clean(pfn);
out:
srcu_read_unlock(&kvm->srcu, srcu_idx);
@ -940,7 +969,8 @@ int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
return ret;
/* Invalidate this entry in the TLB */
kvm_flush_tlb_gpa(vcpu, gpa);
vcpu->arch.flush_gpa = gpa;
kvm_make_request(KVM_REQ_TLB_FLUSH_GPA, vcpu);
return 0;
}

View File

@ -213,12 +213,6 @@ SYM_FUNC_START(kvm_enter_guest)
/* Save host GPRs */
kvm_save_host_gpr a2
/* Save host CRMD, PRMD to stack */
csrrd a3, LOONGARCH_CSR_CRMD
st.d a3, a2, PT_CRMD
csrrd a3, LOONGARCH_CSR_PRMD
st.d a3, a2, PT_PRMD
addi.d a2, a1, KVM_VCPU_ARCH
st.d sp, a2, KVM_ARCH_HSP
st.d tp, a2, KVM_ARCH_HTP

View File

@ -188,10 +188,3 @@ void kvm_save_timer(struct kvm_vcpu *vcpu)
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ESTAT);
preempt_enable();
}
void kvm_reset_timer(struct kvm_vcpu *vcpu)
{
write_gcsr_timercfg(0);
kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TCFG, 0);
hrtimer_cancel(&vcpu->arch.swtimer);
}

View File

@ -23,10 +23,7 @@ void kvm_flush_tlb_all(void)
void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa)
{
unsigned long flags;
local_irq_save(flags);
lockdep_assert_irqs_disabled();
gpa &= (PAGE_MASK << 1);
invtlb(INVTLB_GID_ADDR, read_csr_gstat() & CSR_GSTAT_GID, gpa);
local_irq_restore(flags);
}

View File

@ -19,14 +19,16 @@ DECLARE_EVENT_CLASS(kvm_transition,
TP_PROTO(struct kvm_vcpu *vcpu),
TP_ARGS(vcpu),
TP_STRUCT__entry(
__field(unsigned int, vcpu_id)
__field(unsigned long, pc)
),
TP_fast_assign(
__entry->vcpu_id = vcpu->vcpu_id;
__entry->pc = vcpu->arch.pc;
),
TP_printk("PC: 0x%08lx", __entry->pc)
TP_printk("vcpu %u PC: 0x%08lx", __entry->vcpu_id, __entry->pc)
);
DEFINE_EVENT(kvm_transition, kvm_enter,
@ -54,19 +56,22 @@ DECLARE_EVENT_CLASS(kvm_exit,
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
TP_ARGS(vcpu, reason),
TP_STRUCT__entry(
__field(unsigned int, vcpu_id)
__field(unsigned long, pc)
__field(unsigned int, reason)
),
TP_fast_assign(
__entry->vcpu_id = vcpu->vcpu_id;
__entry->pc = vcpu->arch.pc;
__entry->reason = reason;
),
TP_printk("[%s]PC: 0x%08lx",
__print_symbolic(__entry->reason,
kvm_trace_symbol_exit_types),
__entry->pc)
TP_printk("vcpu %u [%s] PC: 0x%08lx",
__entry->vcpu_id,
__print_symbolic(__entry->reason,
kvm_trace_symbol_exit_types),
__entry->pc)
);
DEFINE_EVENT(kvm_exit, kvm_exit_idle,
@ -85,14 +90,17 @@ TRACE_EVENT(kvm_exit_gspr,
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int inst_word),
TP_ARGS(vcpu, inst_word),
TP_STRUCT__entry(
__field(unsigned int, vcpu_id)
__field(unsigned int, inst_word)
),
TP_fast_assign(
__entry->vcpu_id = vcpu->vcpu_id;
__entry->inst_word = inst_word;
),
TP_printk("Inst word: 0x%08x", __entry->inst_word)
TP_printk("vcpu %u Inst word: 0x%08x", __entry->vcpu_id,
__entry->inst_word)
);
#define KVM_TRACE_AUX_SAVE 0

File diff suppressed because it is too large Load Diff

View File

@ -5,6 +5,9 @@
#include <linux/kvm_host.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_vcpu.h>
#include <asm/kvm_extioi.h>
#include <asm/kvm_pch_pic.h>
const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
KVM_GENERIC_VM_STATS(),
@ -30,15 +33,21 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (!kvm->arch.pgd)
return -ENOMEM;
kvm->arch.phyid_map = kvzalloc(sizeof(struct kvm_phyid_map),
GFP_KERNEL_ACCOUNT);
kvm->arch.phyid_map = kvzalloc(sizeof(struct kvm_phyid_map), GFP_KERNEL_ACCOUNT);
if (!kvm->arch.phyid_map) {
free_page((unsigned long)kvm->arch.pgd);
kvm->arch.pgd = NULL;
return -ENOMEM;
}
spin_lock_init(&kvm->arch.phyid_map_lock);
kvm_init_vmcs(kvm);
/* Enable all PV features by default */
kvm->arch.pv_features = BIT(KVM_FEATURE_IPI);
if (kvm_pvtime_supported())
kvm->arch.pv_features |= BIT(KVM_FEATURE_STEAL_TIME);
kvm->arch.gpa_size = BIT(cpu_vabits - 1);
kvm->arch.root_level = CONFIG_PGTABLE_LEVELS - 1;
kvm->arch.invalid_ptes[0] = 0;
@ -52,7 +61,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
for (i = 0; i <= kvm->arch.root_level; i++)
kvm->arch.pte_shifts[i] = PAGE_SHIFT + i * (PAGE_SHIFT - 3);
spin_lock_init(&kvm->arch.phyid_map_lock);
return 0;
}
@ -60,8 +68,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
{
kvm_destroy_vcpus(kvm);
free_page((unsigned long)kvm->arch.pgd);
kvfree(kvm->arch.phyid_map);
kvm->arch.pgd = NULL;
kvfree(kvm->arch.phyid_map);
kvm->arch.phyid_map = NULL;
}
@ -70,6 +78,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
int r;
switch (ext) {
case KVM_CAP_IRQCHIP:
case KVM_CAP_ONE_REG:
case KVM_CAP_ENABLE_CAP:
case KVM_CAP_READONLY_MEM:
@ -78,6 +87,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_IOEVENTFD:
case KVM_CAP_MP_STATE:
case KVM_CAP_SET_GUEST_DEBUG:
case KVM_CAP_VM_ATTRIBUTES:
r = 1;
break;
case KVM_CAP_NR_VCPUS:
@ -100,7 +110,114 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
return r;
}
static int kvm_vm_feature_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
{
switch (attr->attr) {
case KVM_LOONGARCH_VM_FEAT_LSX:
if (cpu_has_lsx)
return 0;
return -ENXIO;
case KVM_LOONGARCH_VM_FEAT_LASX:
if (cpu_has_lasx)
return 0;
return -ENXIO;
case KVM_LOONGARCH_VM_FEAT_X86BT:
if (cpu_has_lbt_x86)
return 0;
return -ENXIO;
case KVM_LOONGARCH_VM_FEAT_ARMBT:
if (cpu_has_lbt_arm)
return 0;
return -ENXIO;
case KVM_LOONGARCH_VM_FEAT_MIPSBT:
if (cpu_has_lbt_mips)
return 0;
return -ENXIO;
case KVM_LOONGARCH_VM_FEAT_PMU:
if (cpu_has_pmp)
return 0;
return -ENXIO;
case KVM_LOONGARCH_VM_FEAT_PV_IPI:
return 0;
case KVM_LOONGARCH_VM_FEAT_PV_STEALTIME:
if (kvm_pvtime_supported())
return 0;
return -ENXIO;
default:
return -ENXIO;
}
}
static int kvm_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
{
switch (attr->group) {
case KVM_LOONGARCH_VM_FEAT_CTRL:
return kvm_vm_feature_has_attr(kvm, attr);
case KVM_LOONGARCH_VM_HAVE_IRQCHIP:
return 0;
default:
return -ENXIO;
}
}
int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
{
return -ENOIOCTLCMD;
int r;
void __user *argp = (void __user *)arg;
struct kvm *kvm = filp->private_data;
struct kvm_device_attr attr;
switch (ioctl) {
case KVM_CREATE_IRQCHIP: {
r = 1;
break;
}
case KVM_HAS_DEVICE_ATTR: {
if (copy_from_user(&attr, argp, sizeof(attr)))
return -EFAULT;
return kvm_vm_has_attr(kvm, &attr);
}
default:
return -EINVAL;
}
return r;
}
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *data,
bool line_status)
{
bool level;
struct loongarch_pch_pic *s;
int type, vcpu, irq, vcpus, val, ret = 0;
level = data->level;
val = data->irq;
s = kvm->arch.pch_pic;
vcpus = atomic_read(&kvm->online_vcpus);
type = (val >> KVM_LOONGARCH_IRQ_TYPE_SHIFT) & KVM_LOONGARCH_IRQ_TYPE_MASK;
vcpu = (val >> KVM_LOONGARCH_IRQ_VCPU_SHIFT) & KVM_LOONGARCH_IRQ_VCPU_MASK;
irq = (val >> KVM_LOONGARCH_IRQ_NUM_SHIFT) & KVM_LOONGARCH_IRQ_NUM_MASK;
switch (type) {
case KVM_LOONGARCH_IRQ_TYPE_IOAPIC:
if (irq < KVM_IRQCHIP_NUM_PINS)
pch_pic_set_irq(s, irq, level);
else if (irq < 256)
pch_msi_set_irq(kvm, irq, level);
else
ret = -EINVAL;
break;
default:
ret = -EINVAL;
}
return ret;
}
bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
{
return (bool)((!!kvm->arch.extioi) && (!!kvm->arch.pch_pic));
}

View File

@ -134,6 +134,7 @@ KVM_X86_OP(msr_filter_changed)
KVM_X86_OP(complete_emulated_msr)
KVM_X86_OP(vcpu_deliver_sipi_vector)
KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons);
KVM_X86_OP_OPTIONAL(get_untagged_addr)
KVM_X86_OP_OPTIONAL(vm_attestation)
KVM_X86_OP_OPTIONAL(control_pre_system_reset)
KVM_X86_OP_OPTIONAL(control_post_system_reset)

View File

@ -125,7 +125,8 @@
| X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \
| X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \
| X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_VMXE \
| X86_CR4_SMAP | X86_CR4_PKE | X86_CR4_UMIP))
| X86_CR4_SMAP | X86_CR4_PKE | X86_CR4_UMIP \
| X86_CR4_LAM_SUP))
#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
@ -1768,6 +1769,8 @@ struct kvm_x86_ops {
*/
unsigned long (*vcpu_get_apicv_inhibit_reasons)(struct kvm_vcpu *vcpu);
gva_t (*get_untagged_addr)(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags);
/*
* Interfaces for HYGON CSV guest
*/

View File

@ -2124,15 +2124,11 @@ static __always_inline void exc_machine_check_user(struct pt_regs *regs)
{
irqentry_state_t irq_state;
irq_state = irqentry_nmi_enter(regs);
irqentry_enter_from_user_mode(regs);
irq_state = irqentry_nmi_enter(regs);
do_machine_check(regs);
irqentry_exit_to_user_mode(regs);
irqentry_nmi_exit(regs, irq_state);
irqentry_exit_to_user_mode(regs);
}
#ifdef CONFIG_X86_64

View File

@ -680,7 +680,7 @@ void kvm_set_cpu_caps(void)
kvm_cpu_cap_mask(CPUID_7_1_EAX,
F(AVX_VNNI) | F(AVX512_BF16) | F(CMPCCXADD) |
F(FZRM) | F(FSRS) | F(FSRC) |
F(AMX_FP16) | F(AVX_IFMA)
F(AMX_FP16) | F(AVX_IFMA) | F(LAM)
);
kvm_cpu_cap_init_kvm_defined(CPUID_7_1_EDX,

View File

@ -47,11 +47,6 @@ static inline bool kvm_vcpu_is_legal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
return !(gpa & vcpu->arch.reserved_gpa_bits);
}
static inline bool kvm_vcpu_is_illegal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
{
return !kvm_vcpu_is_legal_gpa(vcpu, gpa);
}
static inline bool kvm_vcpu_is_legal_aligned_gpa(struct kvm_vcpu *vcpu,
gpa_t gpa, gpa_t alignment)
{
@ -288,4 +283,12 @@ static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu,
vcpu->arch.governed_features.enabled);
}
static inline bool kvm_vcpu_is_legal_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{
if (guest_can_use(vcpu, X86_FEATURE_LAM))
cr3 &= ~(X86_CR3_LAM_U48 | X86_CR3_LAM_U57);
return kvm_vcpu_is_legal_gpa(vcpu, cr3);
}
#endif

View File

@ -687,8 +687,8 @@ static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
unsigned *max_size, unsigned size,
bool write, bool fetch,
enum x86emul_mode mode, ulong *linear)
enum x86emul_mode mode, ulong *linear,
unsigned int flags)
{
struct desc_struct desc;
bool usable;
@ -701,7 +701,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
*max_size = 0;
switch (mode) {
case X86EMUL_MODE_PROT64:
*linear = la;
*linear = la = ctxt->ops->get_untagged_addr(ctxt, la, flags);
va_bits = ctxt_virt_addr_bits(ctxt);
if (!__is_canonical_address(la, va_bits))
goto bad;
@ -717,11 +717,11 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
if (!usable)
goto bad;
/* code segment in protected mode or read-only data segment */
if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
|| !(desc.type & 2)) && write)
if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8)) || !(desc.type & 2)) &&
(flags & X86EMUL_F_WRITE))
goto bad;
/* unreadable code segment */
if (!fetch && (desc.type & 8) && !(desc.type & 2))
if (!(flags & X86EMUL_F_FETCH) && (desc.type & 8) && !(desc.type & 2))
goto bad;
lim = desc_limit_scaled(&desc);
if (!(desc.type & 8) && (desc.type & 4)) {
@ -757,8 +757,8 @@ static int linearize(struct x86_emulate_ctxt *ctxt,
ulong *linear)
{
unsigned max_size;
return __linearize(ctxt, addr, &max_size, size, write, false,
ctxt->mode, linear);
return __linearize(ctxt, addr, &max_size, size, ctxt->mode, linear,
write ? X86EMUL_F_WRITE : 0);
}
static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
@ -771,7 +771,8 @@ static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
if (ctxt->op_bytes != sizeof(unsigned long))
addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode, &linear);
rc = __linearize(ctxt, addr, &max_size, 1, ctxt->mode, &linear,
X86EMUL_F_FETCH);
if (rc == X86EMUL_CONTINUE)
ctxt->_eip = addr.ea;
return rc;
@ -907,8 +908,8 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
* boundary check itself. Instead, we use max_size to check
* against op_size.
*/
rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
&linear);
rc = __linearize(ctxt, addr, &max_size, 0, ctxt->mode, &linear,
X86EMUL_F_FETCH);
if (unlikely(rc != X86EMUL_CONTINUE))
return rc;
@ -3439,8 +3440,10 @@ static int em_invlpg(struct x86_emulate_ctxt *ctxt)
{
int rc;
ulong linear;
unsigned int max_size;
rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
rc = __linearize(ctxt, ctxt->src.addr.mem, &max_size, 1, ctxt->mode,
&linear, X86EMUL_F_INVLPG);
if (rc == X86EMUL_CONTINUE)
ctxt->ops->invlpg(ctxt, linear);
/* Disable writeback. */

View File

@ -16,6 +16,7 @@ KVM_GOVERNED_X86_FEATURE(PAUSEFILTER)
KVM_GOVERNED_X86_FEATURE(PFTHRESHOLD)
KVM_GOVERNED_X86_FEATURE(VGIF)
KVM_GOVERNED_X86_FEATURE(VNMI)
KVM_GOVERNED_X86_FEATURE(LAM)
#undef KVM_GOVERNED_X86_FEATURE
#undef KVM_GOVERNED_FEATURE

View File

@ -88,6 +88,12 @@ struct x86_instruction_info {
#define X86EMUL_IO_NEEDED 5 /* IO is needed to complete emulation */
#define X86EMUL_INTERCEPTED 6 /* Intercepted by nested VMCB/VMCS */
/* x86-specific emulation flags */
#define X86EMUL_F_WRITE BIT(0)
#define X86EMUL_F_FETCH BIT(1)
#define X86EMUL_F_IMPLICIT BIT(2)
#define X86EMUL_F_INVLPG BIT(3)
struct x86_emulate_ops {
void (*vm_bugged)(struct x86_emulate_ctxt *ctxt);
/*
@ -224,6 +230,9 @@ struct x86_emulate_ops {
int (*leave_smm)(struct x86_emulate_ctxt *ctxt);
void (*triple_fault)(struct x86_emulate_ctxt *ctxt);
int (*set_xcr)(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr);
gva_t (*get_untagged_addr)(struct x86_emulate_ctxt *ctxt, gva_t addr,
unsigned int flags);
};
/* Type, address-of, and value of an instruction's operand. */

View File

@ -146,6 +146,14 @@ static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
return kvm_get_pcid(vcpu, kvm_read_cr3(vcpu));
}
static inline unsigned long kvm_get_active_cr3_lam_bits(struct kvm_vcpu *vcpu)
{
if (!guest_can_use(vcpu, X86_FEATURE_LAM))
return 0;
return kvm_read_cr3(vcpu) & (X86_CR3_LAM_U48 | X86_CR3_LAM_U57);
}
static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
{
u64 root_hpa = vcpu->arch.mmu->root.hpa;

View File

@ -3774,7 +3774,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
hpa_t root;
root_pgd = kvm_mmu_get_guest_pgd(vcpu, mmu);
root_gfn = root_pgd >> PAGE_SHIFT;
root_gfn = (root_pgd & __PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) {
mmu->root.hpa = kvm_mmu_get_dummy_root();

View File

@ -13,6 +13,7 @@
#endif
/* Page table builder macros common to shadow (host) PTEs and guest PTEs. */
#define __PT_BASE_ADDR_MASK GENMASK_ULL(51, 12)
#define __PT_LEVEL_SHIFT(level, bits_per_level) \
(PAGE_SHIFT + ((level) - 1) * (bits_per_level))
#define __PT_INDEX(address, level, bits_per_level) \

View File

@ -62,7 +62,7 @@
#endif
/* Common logic, but per-type values. These also need to be undefined. */
#define PT_BASE_ADDR_MASK ((pt_element_t)(((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1)))
#define PT_BASE_ADDR_MASK ((pt_element_t)__PT_BASE_ADDR_MASK)
#define PT_LVL_ADDR_MASK(lvl) __PT_LVL_ADDR_MASK(PT_BASE_ADDR_MASK, lvl, PT_LEVEL_BITS)
#define PT_LVL_OFFSET_MASK(lvl) __PT_LVL_OFFSET_MASK(PT_BASE_ADDR_MASK, lvl, PT_LEVEL_BITS)
#define PT_INDEX(addr, lvl) __PT_INDEX(addr, lvl, PT_LEVEL_BITS)

View File

@ -296,7 +296,7 @@ static bool __nested_vmcb_check_save(struct kvm_vcpu *vcpu,
if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) {
if (CC(!(save->cr4 & X86_CR4_PAE)) ||
CC(!(save->cr0 & X86_CR0_PE)) ||
CC(kvm_vcpu_is_illegal_gpa(vcpu, save->cr3)))
CC(!kvm_vcpu_is_legal_cr3(vcpu, save->cr3)))
return false;
}
@ -505,7 +505,7 @@ static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu)
static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
bool nested_npt, bool reload_pdptrs)
{
if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3)))
if (CC(!kvm_vcpu_is_legal_cr3(vcpu, cr3)))
return -EINVAL;
if (reload_pdptrs && !nested_npt && is_pae_paging(vcpu) &&

View File

@ -1086,7 +1086,7 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
bool nested_ept, bool reload_pdptrs,
enum vm_entry_failure_code *entry_failure_code)
{
if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3))) {
if (CC(!kvm_vcpu_is_legal_cr3(vcpu, cr3))) {
*entry_failure_code = ENTRY_FAIL_DEFAULT;
return -EINVAL;
}
@ -2720,7 +2720,7 @@ static bool nested_vmx_check_eptp(struct kvm_vcpu *vcpu, u64 new_eptp)
}
/* Reserved bits should not be set */
if (CC(kvm_vcpu_is_illegal_gpa(vcpu, new_eptp) || ((new_eptp >> 7) & 0x1f)))
if (CC(!kvm_vcpu_is_legal_gpa(vcpu, new_eptp) || ((new_eptp >> 7) & 0x1f)))
return false;
/* AD, if set, should be supported */
@ -2915,7 +2915,7 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
if (CC(!nested_host_cr0_valid(vcpu, vmcs12->host_cr0)) ||
CC(!nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) ||
CC(kvm_vcpu_is_illegal_gpa(vcpu, vmcs12->host_cr3)))
CC(!kvm_vcpu_is_legal_cr3(vcpu, vmcs12->host_cr3)))
return -EINVAL;
if (CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu)) ||
@ -5048,6 +5048,7 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
else
*ret = off;
*ret = vmx_get_untagged_addr(vcpu, *ret, 0);
/* Long mode: #GP(0)/#SS(0) if the memory address is in a
* non-canonical form. This is the only check on the memory
* destination for long mode!
@ -5865,6 +5866,10 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
vpid02 = nested_get_vpid02(vcpu);
switch (type) {
case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
/*
* LAM doesn't apply to addresses that are inputs to TLB
* invalidation.
*/
if (!operand.vpid ||
is_noncanonical_address(operand.gla, vcpu))
return nested_vmx_fail(vcpu,

View File

@ -37,6 +37,7 @@ static int sgx_get_encls_gva(struct kvm_vcpu *vcpu, unsigned long offset,
if (!IS_ALIGNED(*gva, alignment)) {
fault = true;
} else if (likely(is_64_bit_mode(vcpu))) {
*gva = vmx_get_untagged_addr(vcpu, *gva, 0);
fault = is_noncanonical_address(*gva, vcpu);
} else {
*gva &= 0xffffffff;

View File

@ -3430,7 +3430,8 @@ static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa,
update_guest_cr3 = false;
vmx_ept_load_pdptrs(vcpu);
} else {
guest_cr3 = root_hpa | kvm_get_active_pcid(vcpu);
guest_cr3 = root_hpa | kvm_get_active_pcid(vcpu) |
kvm_get_active_cr3_lam_bits(vcpu);
}
if (update_guest_cr3)
@ -5851,7 +5852,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
* would also use advanced VM-exit information for EPT violations to
* reconstruct the page fault error code.
*/
if (unlikely(allow_smaller_maxphyaddr && kvm_vcpu_is_illegal_gpa(vcpu, gpa)))
if (unlikely(allow_smaller_maxphyaddr && !kvm_vcpu_is_legal_gpa(vcpu, gpa)))
return kvm_emulate_instruction(vcpu, 0);
return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
@ -7791,6 +7792,9 @@ static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
cr4_fixed1_update(X86_CR4_UMIP, ecx, feature_bit(UMIP));
cr4_fixed1_update(X86_CR4_LA57, ecx, feature_bit(LA57));
entry = kvm_find_cpuid_entry_index(vcpu, 0x7, 1);
cr4_fixed1_update(X86_CR4_LAM_SUP, eax, feature_bit(LAM));
#undef cr4_fixed1_update
}
@ -7877,6 +7881,7 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_XSAVES);
kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_VMX);
kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_LAM);
vmx_setup_uret_msrs(vmx);
@ -8354,6 +8359,50 @@ static void vmx_vm_destroy(struct kvm *kvm)
free_pages((unsigned long)kvm_vmx->pid_table, vmx_get_pid_table_order(kvm));
}
/*
* Note, the SDM states that the linear address is masked *after* the modified
* canonicality check, whereas KVM masks (untags) the address and then performs
* a "normal" canonicality check. Functionally, the two methods are identical,
* and when the masking occurs relative to the canonicality check isn't visible
* to software, i.e. KVM's behavior doesn't violate the SDM.
*/
gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags)
{
int lam_bit;
unsigned long cr3_bits;
if (flags & (X86EMUL_F_FETCH | X86EMUL_F_IMPLICIT | X86EMUL_F_INVLPG))
return gva;
if (!is_64_bit_mode(vcpu))
return gva;
/*
* Bit 63 determines if the address should be treated as user address
* or a supervisor address.
*/
if (!(gva & BIT_ULL(63))) {
cr3_bits = kvm_get_active_cr3_lam_bits(vcpu);
if (!(cr3_bits & (X86_CR3_LAM_U57 | X86_CR3_LAM_U48)))
return gva;
/* LAM_U48 is ignored if LAM_U57 is set. */
lam_bit = cr3_bits & X86_CR3_LAM_U57 ? 56 : 47;
} else {
if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_LAM_SUP))
return gva;
lam_bit = kvm_is_cr4_bit_set(vcpu, X86_CR4_LA57) ? 56 : 47;
}
/*
* Untag the address by sign-extending the lam_bit, but NOT to bit 63.
* Bit 63 is retained from the raw virtual address so that untagging
* doesn't change a user access to a supervisor access, and vice versa.
*/
return (sign_extend64(gva, lam_bit) & ~BIT_ULL(63)) | (gva & BIT_ULL(63));
}
static struct kvm_x86_ops vmx_x86_ops __initdata = {
.name = KBUILD_MODNAME,
@ -8492,6 +8541,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.complete_emulated_msr = kvm_complete_insn_gp,
.vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
.get_untagged_addr = vmx_get_untagged_addr,
};
static unsigned int vmx_handle_intel_pt_intr(void)

View File

@ -421,6 +421,8 @@ void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags);
static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
int type, bool value)
{

View File

@ -1288,7 +1288,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
* stuff CR3, e.g. for RSM emulation, and there is no guarantee that
* the current vCPU mode is accurate.
*/
if (kvm_vcpu_is_illegal_gpa(vcpu, cr3))
if (!kvm_vcpu_is_legal_cr3(vcpu, cr3))
return 1;
if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, cr3))
@ -8385,6 +8385,15 @@ static void emulator_vm_bugged(struct x86_emulate_ctxt *ctxt)
kvm_vm_bugged(kvm);
}
static gva_t emulator_get_untagged_addr(struct x86_emulate_ctxt *ctxt,
gva_t addr, unsigned int flags)
{
if (!kvm_x86_ops.get_untagged_addr)
return addr;
return static_call(kvm_x86_get_untagged_addr)(emul_to_vcpu(ctxt), addr, flags);
}
static const struct x86_emulate_ops emulate_ops = {
.vm_bugged = emulator_vm_bugged,
.read_gpr = emulator_read_gpr,
@ -8429,6 +8438,7 @@ static const struct x86_emulate_ops emulate_ops = {
.leave_smm = emulator_leave_smm,
.triple_fault = emulator_triple_fault,
.set_xcr = emulator_set_xcr,
.get_untagged_addr = emulator_get_untagged_addr,
};
static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
@ -11665,7 +11675,7 @@ static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
*/
if (!(sregs->cr4 & X86_CR4_PAE) || !(sregs->efer & EFER_LMA))
return false;
if (kvm_vcpu_is_illegal_gpa(vcpu, sregs->cr3))
if (!kvm_vcpu_is_legal_cr3(vcpu, sregs->cr3))
return false;
} else {
/*
@ -13513,6 +13523,10 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
switch (type) {
case INVPCID_TYPE_INDIV_ADDR:
/*
* LAM doesn't apply to addresses that are inputs to TLB
* invalidation.
*/
if ((!pcid_enabled && (operand.pcid != 0)) ||
is_noncanonical_address(operand.gla, vcpu)) {
kvm_inject_gp(vcpu, 0);

View File

@ -524,6 +524,8 @@ bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type);
__reserved_bits |= X86_CR4_VMXE; \
if (!__cpu_has(__c, X86_FEATURE_PCID)) \
__reserved_bits |= X86_CR4_PCIDE; \
if (!__cpu_has(__c, X86_FEATURE_LAM)) \
__reserved_bits |= X86_CR4_LAM_SUP; \
__reserved_bits; \
})

View File

@ -1257,6 +1257,8 @@ static int loongson3_cpufreq_cpu_init(struct cpufreq_policy *policy)
if (has_boost_freq() && boost_supported())
loongson3_cpufreq_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
policy->cur = core->normal_max_freq * 1000;
pr_info("CPU%u - ACPI performance management activated.\n", cpu);
for (i = 0; i < perf->state_count; i++)
pr_debug(" %cP%d: %d MHz, %d mW, %d uS %d level\n",

View File

@ -15,3 +15,4 @@ config DRM_LOONGSON
If "M" is selected, the module will be called loongson.
If in doubt, say "N".
source "drivers/gpu/drm/loongson/ast/Kconfig"

View File

@ -20,3 +20,4 @@ loongson-y += loongson_device.o \
loongson_module.o
obj-$(CONFIG_DRM_LOONGSON) += loongson.o
obj-$(CONFIG_DRM_AST_LOONGSON) += ast/

View File

@ -0,0 +1,14 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_AST_LOONGSON
tristate "AST server chips for Loongson Platform"
depends on DRM && PCI && MMU && LOONGARCH
select DRM_KMS_HELPER
select DRM_VRAM_HELPER
select DRM_TTM
select DRM_TTM_HELPER
help
Say yes for experimental AST GPU driver. Do not enable
this driver without having a working -modesetting,
and a version of AST that knows to fail if KMS
is bound to the driver. These GPUs are commonly found
in server chipsets.

View File

@ -0,0 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
#
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
ast-y := ast_drv.o ast_i2c.o ast_main.o ast_mm.o ast_mode.o ast_post.o ast_dp501.o ast_dp.o
obj-$(CONFIG_DRM_AST_LOONGSON) := ast.o

View File

@ -0,0 +1,299 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2021, ASPEED Technology Inc.
// Authors: KuoHsiang Chou <kuohsiang_chou@aspeedtech.com>
#include <linux/firmware.h>
#include <linux/delay.h>
#include <drm/drm_print.h>
#include "ast_drv.h"
int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata)
{
struct ast_private *ast = to_ast_private(dev);
u8 i = 0, j = 0;
/*
* CRD1[b5]: DP MCU FW is executing
* CRDC[b0]: DP link success
* CRDF[b0]: DP HPD
* CRE5[b0]: Host reading EDID process is done
*/
if (!(ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1,
ASTDP_MCU_FW_EXECUTING) &&
ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDC,
ASTDP_LINK_SUCCESS) &&
ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDF, ASTDP_HPD) &&
ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5,
ASTDP_HOST_EDID_READ_DONE_MASK))) {
goto err_astdp_edid_not_ready;
}
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5,
(u8)~ASTDP_HOST_EDID_READ_DONE_MASK, 0x00);
for (i = 0; i < 32; i++) {
/*
* CRE4[7:0]: Read-Pointer for EDID (Unit: 4bytes); valid range: 0~64
*/
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE4,
ASTDP_AND_CLEAR_MASK, (u8)i);
j = 0;
/*
* CRD7[b0]: valid flag for EDID
* CRD6[b0]: mirror read pointer for EDID
*/
while ((ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD7,
ASTDP_EDID_VALID_FLAG_MASK) !=
0x01) ||
(ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD6,
ASTDP_EDID_READ_POINTER_MASK) !=
i)) {
/*
* Delay are getting longer with each retry.
* 1. The Delays are often 2 loops when users request "Display Settings"
* of right-click of mouse.
* 2. The Delays are often longer a lot when system resume from S3/S4.
*/
mdelay(j + 1);
if (!(ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT,
0xD1,
ASTDP_MCU_FW_EXECUTING) &&
ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT,
0xDC,
ASTDP_LINK_SUCCESS) &&
ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT,
0xDF, ASTDP_HPD))) {
goto err_astdp_jump_out_loop_of_edid;
}
j++;
if (j > 200)
goto err_astdp_jump_out_loop_of_edid;
}
*(ediddata) = ast_get_index_reg_mask(
ast, AST_IO_CRTC_PORT, 0xD8, ASTDP_EDID_READ_DATA_MASK);
*(ediddata + 1) = ast_get_index_reg_mask(
ast, AST_IO_CRTC_PORT, 0xD9, ASTDP_EDID_READ_DATA_MASK);
*(ediddata + 2) = ast_get_index_reg_mask(
ast, AST_IO_CRTC_PORT, 0xDA, ASTDP_EDID_READ_DATA_MASK);
*(ediddata + 3) = ast_get_index_reg_mask(
ast, AST_IO_CRTC_PORT, 0xDB, ASTDP_EDID_READ_DATA_MASK);
if (i == 31) {
/*
* For 128-bytes EDID_1.3,
* 1. Add the value of Bytes-126 to Bytes-127.
* The Bytes-127 is Checksum. Sum of all 128bytes should
* equal 0 (mod 256).
* 2. Modify Bytes-126 to be 0.
* The Bytes-126 indicates the Number of extensions to
* follow. 0 represents noextensions.
*/
*(ediddata + 3) = *(ediddata + 3) + *(ediddata + 2);
*(ediddata + 2) = 0;
}
ediddata += 4;
}
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5,
(u8)~ASTDP_HOST_EDID_READ_DONE_MASK,
ASTDP_HOST_EDID_READ_DONE);
return 0;
err_astdp_jump_out_loop_of_edid:
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5,
(u8)~ASTDP_HOST_EDID_READ_DONE_MASK,
ASTDP_HOST_EDID_READ_DONE);
return (~(j + 256) + 1);
err_astdp_edid_not_ready:
if (!(ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1,
ASTDP_MCU_FW_EXECUTING)))
return (~0xD1 + 1);
if (!(ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDC,
ASTDP_LINK_SUCCESS)))
return (~0xDC + 1);
if (!(ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDF, ASTDP_HPD)))
return (~0xDF + 1);
if (!(ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5,
ASTDP_HOST_EDID_READ_DONE_MASK)))
return (~0xE5 + 1);
return 0;
}
/*
* Launch Aspeed DP
*/
void ast_dp_launch(struct drm_device *dev, u8 bPower)
{
u32 i = 0, j = 0, WaitCount = 1;
u8 bDPTX = 0;
u8 bDPExecute = 1;
struct ast_private *ast = to_ast_private(dev);
// S3 come back, need more time to wait BMC ready.
if (bPower)
WaitCount = 300;
// Wait total count by different condition.
for (j = 0; j < WaitCount; j++) {
bDPTX = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1,
TX_TYPE_MASK);
if (bDPTX)
break;
msleep(100);
}
// 0xE : ASTDP with DPMCU FW handling
if (bDPTX == ASTDP_DPMCU_TX) {
// Wait one second then timeout.
i = 0;
while (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1,
COPROCESSOR_LAUNCH) !=
COPROCESSOR_LAUNCH) {
i++;
// wait 100 ms
msleep(100);
if (i >= 10) {
// DP would not be ready.
bDPExecute = 0;
break;
}
}
if (bDPExecute)
ast->tx_chip_types |= BIT(AST_TX_ASTDP);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5,
(u8)~ASTDP_HOST_EDID_READ_DONE_MASK,
ASTDP_HOST_EDID_READ_DONE);
}
}
void ast_dp_power_on_off(struct drm_device *dev, bool on)
{
struct ast_private *ast = to_ast_private(dev);
// Read and Turn off DP PHY sleep
u8 bE3 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE3,
AST_DP_VIDEO_ENABLE);
// Turn on DP PHY sleep
if (!on)
bE3 |= AST_DP_PHY_SLEEP;
// DP Power on/off
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE3,
(u8)~AST_DP_PHY_SLEEP, bE3);
}
void ast_dp_set_on_off(struct drm_device *dev, bool on)
{
struct ast_private *ast = to_ast_private(dev);
u8 video_on_off = on;
// Video On/Off
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE3,
(u8)~AST_DP_VIDEO_ENABLE, on);
// If DP plug in and link successful then check video on / off status
if (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDC,
ASTDP_LINK_SUCCESS) &&
ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDF, ASTDP_HPD)) {
video_on_off <<= 4;
while (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDF,
ASTDP_MIRROR_VIDEO_ENABLE) !=
video_on_off) {
// wait 1 ms
mdelay(1);
}
}
}
void ast_dp_set_mode(struct drm_crtc *crtc,
struct ast_vbios_mode_info *vbios_mode)
{
struct ast_private *ast = to_ast_private(crtc->dev);
u32 ulRefreshRateIndex;
u8 ModeIdx;
ulRefreshRateIndex = vbios_mode->enh_table->refresh_rate_index - 1;
switch (crtc->mode.crtc_hdisplay) {
case 320:
ModeIdx = ASTDP_320x240_60;
break;
case 400:
ModeIdx = ASTDP_400x300_60;
break;
case 512:
ModeIdx = ASTDP_512x384_60;
break;
case 640:
ModeIdx = (ASTDP_640x480_60 + (u8)ulRefreshRateIndex);
break;
case 800:
ModeIdx = (ASTDP_800x600_56 + (u8)ulRefreshRateIndex);
break;
case 1024:
ModeIdx = (ASTDP_1024x768_60 + (u8)ulRefreshRateIndex);
break;
case 1152:
ModeIdx = ASTDP_1152x864_75;
break;
case 1280:
if (crtc->mode.crtc_vdisplay == 800)
ModeIdx =
(ASTDP_1280x800_60_RB - (u8)ulRefreshRateIndex);
else // 1024
ModeIdx = (ASTDP_1280x1024_60 + (u8)ulRefreshRateIndex);
break;
case 1360:
case 1366:
ModeIdx = ASTDP_1366x768_60;
break;
case 1440:
ModeIdx = (ASTDP_1440x900_60_RB - (u8)ulRefreshRateIndex);
break;
case 1600:
if (crtc->mode.crtc_vdisplay == 900)
ModeIdx =
(ASTDP_1600x900_60_RB - (u8)ulRefreshRateIndex);
else //1200
ModeIdx = ASTDP_1600x1200_60;
break;
case 1680:
ModeIdx = (ASTDP_1680x1050_60_RB - (u8)ulRefreshRateIndex);
break;
case 1920:
if (crtc->mode.crtc_vdisplay == 1080)
ModeIdx = ASTDP_1920x1080_60;
else //1200
ModeIdx = ASTDP_1920x1200_60;
break;
default:
return;
}
/*
* CRE0[7:0]: MISC0 ((0x00: 18-bpp) or (0x20: 24-bpp)
* CRE1[7:0]: MISC1 (default: 0x00)
* CRE2[7:0]: video format index (0x00 ~ 0x20 or 0x40 ~ 0x50)
*/
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE0,
ASTDP_AND_CLEAR_MASK, ASTDP_MISC0_24bpp);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE1,
ASTDP_AND_CLEAR_MASK, ASTDP_MISC1);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE2,
ASTDP_AND_CLEAR_MASK, ModeIdx);
}

View File

@ -0,0 +1,429 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/module.h>
#include "ast_drv.h"
MODULE_FIRMWARE("ast_dp501_fw.bin");
static void ast_release_firmware(void *data)
{
struct ast_private *ast = data;
release_firmware(ast->dp501_fw);
ast->dp501_fw = NULL;
}
static int ast_load_dp501_microcode(struct drm_device *dev)
{
struct ast_private *ast = to_ast_private(dev);
int ret;
ret = request_firmware(&ast->dp501_fw, "ast_dp501_fw.bin", dev->dev);
if (ret)
return ret;
return devm_add_action_or_reset(dev->dev, ast_release_firmware, ast);
}
static void send_ack(struct ast_private *ast)
{
u8 sendack;
sendack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0xff);
sendack |= 0x80;
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0x00, sendack);
}
static void send_nack(struct ast_private *ast)
{
u8 sendack;
sendack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0xff);
sendack &= ~0x80;
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0x00, sendack);
}
static bool wait_ack(struct ast_private *ast)
{
u8 waitack;
u32 retry = 0;
do {
waitack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd2,
0xff);
waitack &= 0x80;
udelay(100);
} while ((!waitack) && (retry++ < 1000));
if (retry < 1000)
return true;
else
return false;
}
static bool wait_nack(struct ast_private *ast)
{
u8 waitack;
u32 retry = 0;
do {
waitack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd2,
0xff);
waitack &= 0x80;
udelay(100);
} while ((waitack) && (retry++ < 1000));
if (retry < 1000)
return true;
else
return false;
}
static void set_cmd_trigger(struct ast_private *ast)
{
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, ~0x40, 0x40);
}
static void clear_cmd_trigger(struct ast_private *ast)
{
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, ~0x40, 0x00);
}
static bool ast_write_cmd(struct drm_device *dev, u8 data)
{
struct ast_private *ast = to_ast_private(dev);
int retry = 0;
if (wait_nack(ast)) {
send_nack(ast);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9a, 0x00, data);
send_ack(ast);
set_cmd_trigger(ast);
do {
if (wait_ack(ast)) {
clear_cmd_trigger(ast);
send_nack(ast);
return true;
}
} while (retry++ < 100);
}
clear_cmd_trigger(ast);
send_nack(ast);
return false;
}
static bool ast_write_data(struct drm_device *dev, u8 data)
{
struct ast_private *ast = to_ast_private(dev);
if (wait_nack(ast)) {
send_nack(ast);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9a, 0x00, data);
send_ack(ast);
if (wait_ack(ast)) {
send_nack(ast);
return true;
}
}
send_nack(ast);
return false;
}
void ast_set_dp501_video_output(struct drm_device *dev, u8 mode)
{
ast_write_cmd(dev, 0x40);
ast_write_data(dev, mode);
/*
* msleep < 20ms can sleep for up to 20ms;
* see Documentation/timers/timers-howto.rst
*/
msleep(20);
}
static u32 get_fw_base(struct ast_private *ast)
{
return ast_mindwm(ast, 0x1e6e2104) & 0x7fffffff;
}
bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size)
{
struct ast_private *ast = to_ast_private(dev);
u32 i, data;
u32 boot_address;
if (ast->config_mode != ast_use_p2a)
return false;
data = ast_mindwm(ast, 0x1e6e2100) & 0x01;
if (data) {
boot_address = get_fw_base(ast);
for (i = 0; i < size; i += 4)
*(u32 *)(addr + i) = ast_mindwm(ast, boot_address + i);
return true;
}
return false;
}
static bool ast_launch_m68k(struct drm_device *dev)
{
struct ast_private *ast = to_ast_private(dev);
u32 i, data, len = 0;
u32 boot_address;
u8 *fw_addr = NULL;
u8 jreg;
if (ast->config_mode != ast_use_p2a)
return false;
data = ast_mindwm(ast, 0x1e6e2100) & 0x01;
if (!data) {
if (ast->dp501_fw_addr) {
fw_addr = ast->dp501_fw_addr;
len = 32 * 1024;
} else {
if (!ast->dp501_fw && ast_load_dp501_microcode(dev) < 0)
return false;
fw_addr = (u8 *)ast->dp501_fw->data;
len = ast->dp501_fw->size;
}
/* Get BootAddress */
ast_moutdwm(ast, 0x1e6e2000, 0x1688a8a8);
data = ast_mindwm(ast, 0x1e6e0004);
switch (data & 0x03) {
case 0:
boot_address = 0x44000000;
break;
default:
case 1:
boot_address = 0x48000000;
break;
case 2:
boot_address = 0x50000000;
break;
case 3:
boot_address = 0x60000000;
break;
}
boot_address -= 0x200000; /* -2MB */
/* copy image to buffer */
for (i = 0; i < len; i += 4) {
data = *(u32 *)(fw_addr + i);
ast_moutdwm(ast, boot_address + i, data);
}
/* Init SCU */
ast_moutdwm(ast, 0x1e6e2000, 0x1688a8a8);
/* Launch FW */
ast_moutdwm(ast, 0x1e6e2104, 0x80000000 + boot_address);
ast_moutdwm(ast, 0x1e6e2100, 1);
/* Update Scratch */
data = ast_mindwm(ast, 0x1e6e2040) &
0xfffff1ff; /* D[11:9] = 100b: UEFI handling */
data |= 0x800;
ast_moutdwm(ast, 0x1e6e2040, data);
jreg = ast_get_index_reg_mask(
ast, AST_IO_CRTC_PORT, 0x99,
0xfc); /* D[1:0]: Reserved Video Buffer */
jreg |= 0x02;
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x99, jreg);
}
return true;
}
bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata)
{
struct ast_private *ast = to_ast_private(dev);
u32 i, boot_address, offset, data;
u32 *pEDIDidx;
if (ast->config_mode == ast_use_p2a) {
boot_address = get_fw_base(ast);
/* validate FW version */
offset = AST_DP501_GBL_VERSION;
data = ast_mindwm(ast, boot_address + offset);
if ((data & AST_DP501_FW_VERSION_MASK) !=
AST_DP501_FW_VERSION_1)
return false;
/* validate PnP Monitor */
offset = AST_DP501_PNPMONITOR;
data = ast_mindwm(ast, boot_address + offset);
if (!(data & AST_DP501_PNP_CONNECTED))
return false;
/* Read EDID */
offset = AST_DP501_EDID_DATA;
for (i = 0; i < 128; i += 4) {
data = ast_mindwm(ast, boot_address + offset + i);
pEDIDidx = (u32 *)(ediddata + i);
*pEDIDidx = data;
}
} else {
if (!ast->dp501_fw_buf)
return false;
/* dummy read */
offset = 0x0000;
data = readl(ast->dp501_fw_buf + offset);
/* validate FW version */
offset = AST_DP501_GBL_VERSION;
data = readl(ast->dp501_fw_buf + offset);
if ((data & AST_DP501_FW_VERSION_MASK) !=
AST_DP501_FW_VERSION_1)
return false;
/* validate PnP Monitor */
offset = AST_DP501_PNPMONITOR;
data = readl(ast->dp501_fw_buf + offset);
if (!(data & AST_DP501_PNP_CONNECTED))
return false;
/* Read EDID */
offset = AST_DP501_EDID_DATA;
for (i = 0; i < 128; i += 4) {
data = readl(ast->dp501_fw_buf + offset + i);
pEDIDidx = (u32 *)(ediddata + i);
*pEDIDidx = data;
}
}
return true;
}
static bool ast_init_dvo(struct drm_device *dev)
{
struct ast_private *ast = to_ast_private(dev);
u8 jreg;
u32 data;
ast_write32(ast, 0xf004, 0x1e6e0000);
ast_write32(ast, 0xf000, 0x1);
ast_write32(ast, 0x12000, 0x1688a8a8);
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
if (!(jreg & 0x80)) {
/* Init SCU DVO Settings */
data = ast_read32(ast, 0x12008);
/* delay phase */
data &= 0xfffff8ff;
data |= 0x00000500;
ast_write32(ast, 0x12008, data);
if (ast->chip == AST2300) {
data = ast_read32(ast, 0x12084);
/* multi-pins for DVO single-edge */
data |= 0xfffe0000;
ast_write32(ast, 0x12084, data);
data = ast_read32(ast, 0x12088);
/* multi-pins for DVO single-edge */
data |= 0x000fffff;
ast_write32(ast, 0x12088, data);
data = ast_read32(ast, 0x12090);
/* multi-pins for DVO single-edge */
data &= 0xffffffcf;
data |= 0x00000020;
ast_write32(ast, 0x12090, data);
} else { /* AST2400 */
data = ast_read32(ast, 0x12088);
/* multi-pins for DVO single-edge */
data |= 0x30000000;
ast_write32(ast, 0x12088, data);
data = ast_read32(ast, 0x1208c);
/* multi-pins for DVO single-edge */
data |= 0x000000cf;
ast_write32(ast, 0x1208c, data);
data = ast_read32(ast, 0x120a4);
/* multi-pins for DVO single-edge */
data |= 0xffff0000;
ast_write32(ast, 0x120a4, data);
data = ast_read32(ast, 0x120a8);
/* multi-pins for DVO single-edge */
data |= 0x0000000f;
ast_write32(ast, 0x120a8, data);
data = ast_read32(ast, 0x12094);
/* multi-pins for DVO single-edge */
data |= 0x00000002;
ast_write32(ast, 0x12094, data);
}
}
/* Force to DVO */
data = ast_read32(ast, 0x1202c);
data &= 0xfffbffff;
ast_write32(ast, 0x1202c, data);
/* Init VGA DVO Settings */
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80);
return true;
}
static void ast_init_analog(struct drm_device *dev)
{
struct ast_private *ast = to_ast_private(dev);
u32 data;
/*
* Set DAC source to VGA mode in SCU2C via the P2A
* bridge. First configure the P2U to target the SCU
* in case it isn't at this stage.
*/
ast_write32(ast, 0xf004, 0x1e6e0000);
ast_write32(ast, 0xf000, 0x1);
/* Then unlock the SCU with the magic password */
ast_write32(ast, 0x12000, 0x1688a8a8);
ast_write32(ast, 0x12000, 0x1688a8a8);
ast_write32(ast, 0x12000, 0x1688a8a8);
/* Finally, clear bits [17:16] of SCU2c */
data = ast_read32(ast, 0x1202c);
data &= 0xfffcffff;
ast_write32(ast, 0, data);
/* Disable DVO */
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x00);
}
void ast_init_3rdtx(struct drm_device *dev)
{
struct ast_private *ast = to_ast_private(dev);
u8 jreg;
if (ast->chip == AST2300 || ast->chip == AST2400) {
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1,
0xff);
switch (jreg & 0x0e) {
case 0x04:
ast_init_dvo(dev);
break;
case 0x08:
ast_launch_m68k(dev);
break;
case 0x0c:
ast_init_dvo(dev);
break;
default:
if (ast->tx_chip_types & BIT(AST_TX_SIL164))
ast_init_dvo(dev);
else
ast_init_analog(dev);
}
}
}

View File

@ -0,0 +1,125 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef AST_DRAM_TABLES_H
#define AST_DRAM_TABLES_H
/* DRAM timing tables */
struct ast_dramstruct {
u16 index;
u32 data;
};
static const struct ast_dramstruct ast2000_dram_table_data[] = {
{ 0x0108, 0x00000000 }, { 0x0120, 0x00004a21 }, { 0xFF00, 0x00000043 },
{ 0x0000, 0xFFFFFFFF }, { 0x0004, 0x00000089 }, { 0x0008, 0x22331353 },
{ 0x000C, 0x0d07000b }, { 0x0010, 0x11113333 }, { 0x0020, 0x00110350 },
{ 0x0028, 0x1e0828f0 }, { 0x0024, 0x00000001 }, { 0x001C, 0x00000000 },
{ 0x0014, 0x00000003 }, { 0xFF00, 0x00000043 }, { 0x0018, 0x00000131 },
{ 0x0014, 0x00000001 }, { 0xFF00, 0x00000043 }, { 0x0018, 0x00000031 },
{ 0x0014, 0x00000001 }, { 0xFF00, 0x00000043 }, { 0x0028, 0x1e0828f1 },
{ 0x0024, 0x00000003 }, { 0x002C, 0x1f0f28fb }, { 0x0030, 0xFFFFFE01 },
{ 0xFFFF, 0xFFFFFFFF }
};
static const struct ast_dramstruct ast1100_dram_table_data[] = {
{ 0x2000, 0x1688a8a8 }, { 0x2020, 0x000041f0 }, { 0xFF00, 0x00000043 },
{ 0x0000, 0xfc600309 }, { 0x006C, 0x00909090 }, { 0x0064, 0x00050000 },
{ 0x0004, 0x00000585 }, { 0x0008, 0x0011030f }, { 0x0010, 0x22201724 },
{ 0x0018, 0x1e29011a }, { 0x0020, 0x00c82222 }, { 0x0014, 0x01001523 },
{ 0x001C, 0x1024010d }, { 0x0024, 0x00cb2522 }, { 0x0038, 0xffffff82 },
{ 0x003C, 0x00000000 }, { 0x0040, 0x00000000 }, { 0x0044, 0x00000000 },
{ 0x0048, 0x00000000 }, { 0x004C, 0x00000000 }, { 0x0050, 0x00000000 },
{ 0x0054, 0x00000000 }, { 0x0058, 0x00000000 }, { 0x005C, 0x00000000 },
{ 0x0060, 0x032aa02a }, { 0x0064, 0x002d3000 }, { 0x0068, 0x00000000 },
{ 0x0070, 0x00000000 }, { 0x0074, 0x00000000 }, { 0x0078, 0x00000000 },
{ 0x007C, 0x00000000 }, { 0x0034, 0x00000001 }, { 0xFF00, 0x00000043 },
{ 0x002C, 0x00000732 }, { 0x0030, 0x00000040 }, { 0x0028, 0x00000005 },
{ 0x0028, 0x00000007 }, { 0x0028, 0x00000003 }, { 0x0028, 0x00000001 },
{ 0x000C, 0x00005a08 }, { 0x002C, 0x00000632 }, { 0x0028, 0x00000001 },
{ 0x0030, 0x000003c0 }, { 0x0028, 0x00000003 }, { 0x0030, 0x00000040 },
{ 0x0028, 0x00000003 }, { 0x000C, 0x00005a21 }, { 0x0034, 0x00007c03 },
{ 0x0120, 0x00004c41 }, { 0xffff, 0xffffffff },
};
static const struct ast_dramstruct ast2100_dram_table_data[] = {
{ 0x2000, 0x1688a8a8 }, { 0x2020, 0x00004120 }, { 0xFF00, 0x00000043 },
{ 0x0000, 0xfc600309 }, { 0x006C, 0x00909090 }, { 0x0064, 0x00070000 },
{ 0x0004, 0x00000489 }, { 0x0008, 0x0011030f }, { 0x0010, 0x32302926 },
{ 0x0018, 0x274c0122 }, { 0x0020, 0x00ce2222 }, { 0x0014, 0x01001523 },
{ 0x001C, 0x1024010d }, { 0x0024, 0x00cb2522 }, { 0x0038, 0xffffff82 },
{ 0x003C, 0x00000000 }, { 0x0040, 0x00000000 }, { 0x0044, 0x00000000 },
{ 0x0048, 0x00000000 }, { 0x004C, 0x00000000 }, { 0x0050, 0x00000000 },
{ 0x0054, 0x00000000 }, { 0x0058, 0x00000000 }, { 0x005C, 0x00000000 },
{ 0x0060, 0x0f2aa02a }, { 0x0064, 0x003f3005 }, { 0x0068, 0x02020202 },
{ 0x0070, 0x00000000 }, { 0x0074, 0x00000000 }, { 0x0078, 0x00000000 },
{ 0x007C, 0x00000000 }, { 0x0034, 0x00000001 }, { 0xFF00, 0x00000043 },
{ 0x002C, 0x00000942 }, { 0x0030, 0x00000040 }, { 0x0028, 0x00000005 },
{ 0x0028, 0x00000007 }, { 0x0028, 0x00000003 }, { 0x0028, 0x00000001 },
{ 0x000C, 0x00005a08 }, { 0x002C, 0x00000842 }, { 0x0028, 0x00000001 },
{ 0x0030, 0x000003c0 }, { 0x0028, 0x00000003 }, { 0x0030, 0x00000040 },
{ 0x0028, 0x00000003 }, { 0x000C, 0x00005a21 }, { 0x0034, 0x00007c03 },
{ 0x0120, 0x00005061 }, { 0xffff, 0xffffffff },
};
/*
* AST2500 DRAM settings modules
*/
#define REGTBL_NUM 17
#define REGIDX_010 0
#define REGIDX_014 1
#define REGIDX_018 2
#define REGIDX_020 3
#define REGIDX_024 4
#define REGIDX_02C 5
#define REGIDX_030 6
#define REGIDX_214 7
#define REGIDX_2E0 8
#define REGIDX_2E4 9
#define REGIDX_2E8 10
#define REGIDX_2EC 11
#define REGIDX_2F0 12
#define REGIDX_2F4 13
#define REGIDX_2F8 14
#define REGIDX_RFC 15
#define REGIDX_PLL 16
static const u32 ast2500_ddr3_1600_timing_table[REGTBL_NUM] = {
0x64604D38, /* 0x010 */
0x29690599, /* 0x014 */
0x00000300, /* 0x018 */
0x00000000, /* 0x020 */
0x00000000, /* 0x024 */
0x02181E70, /* 0x02C */
0x00000040, /* 0x030 */
0x00000024, /* 0x214 */
0x02001300, /* 0x2E0 */
0x0E0000A0, /* 0x2E4 */
0x000E001B, /* 0x2E8 */
0x35B8C105, /* 0x2EC */
0x08090408, /* 0x2F0 */
0x9B000800, /* 0x2F4 */
0x0E400A00, /* 0x2F8 */
0x9971452F, /* tRFC */
0x000071C1 /* PLL */
};
static const u32 ast2500_ddr4_1600_timing_table[REGTBL_NUM] = {
0x63604E37, /* 0x010 */
0xE97AFA99, /* 0x014 */
0x00019000, /* 0x018 */
0x08000000, /* 0x020 */
0x00000400, /* 0x024 */
0x00000410, /* 0x02C */
0x00000101, /* 0x030 */
0x00000024, /* 0x214 */
0x03002900, /* 0x2E0 */
0x0E0000A0, /* 0x2E4 */
0x000E001C, /* 0x2E8 */
0x35B8C106, /* 0x2EC */
0x08080607, /* 0x2F0 */
0x9B000900, /* 0x2F4 */
0x0E400A00, /* 0x2F8 */
0x99714545, /* tRFC */
0x000071C1 /* PLL */
};
#endif

View File

@ -0,0 +1,231 @@
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*/
/*
* Authors: Dave Airlie <airlied@redhat.com>
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_fbdev_generic.h>
#include "ast_drv.h"
static int ast_modeset = -1;
MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
module_param_named(modeset, ast_modeset, int, 0400);
/*
* DRM driver
*/
DEFINE_DRM_GEM_FOPS(ast_fops);
static const struct drm_driver ast_driver = { .driver_features = DRIVER_ATOMIC |
DRIVER_GEM |
DRIVER_MODESET,
.fops = &ast_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
DRM_GEM_VRAM_DRIVER };
/*
* PCI driver
*/
#define PCI_VENDOR_ASPEED 0x1a03
#define AST_VGA_DEVICE(id, info) \
{ .class = PCI_BASE_CLASS_DISPLAY << 16, \
.class_mask = 0xff0000, \
.vendor = PCI_VENDOR_ASPEED, \
.device = id, \
.subvendor = PCI_ANY_ID, \
.subdevice = PCI_ANY_ID, \
.driver_data = (unsigned long)info }
static const struct pci_device_id ast_pciidlist[] = {
AST_VGA_DEVICE(PCI_CHIP_AST2000, NULL),
AST_VGA_DEVICE(PCI_CHIP_AST2100, NULL),
{ 0, 0, 0 },
};
MODULE_DEVICE_TABLE(pci, ast_pciidlist);
static int ast_remove_conflicting_framebuffers(struct pci_dev *pdev)
{
resource_size_t base, size;
base = pci_resource_start(pdev, 0);
size = pci_resource_len(pdev, 0);
return drm_aperture_remove_conflicting_framebuffers(base, size,
&ast_driver);
}
static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct ast_private *ast;
struct drm_device *dev;
int ret;
ret = ast_remove_conflicting_framebuffers(pdev);
if (ret)
return ret;
ret = pcim_enable_device(pdev);
if (ret)
return ret;
ast = ast_device_create(&ast_driver, pdev, ent->driver_data);
if (IS_ERR(ast))
return PTR_ERR(ast);
dev = &ast->base;
ret = drm_dev_register(dev, ent->driver_data);
if (ret)
return ret;
drm_fbdev_generic_setup(dev, 32);
return 0;
}
static void ast_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
drm_dev_unregister(dev);
drm_atomic_helper_shutdown(dev);
}
static int ast_drm_freeze(struct drm_device *dev)
{
int error;
error = drm_mode_config_helper_suspend(dev);
if (error)
return error;
pci_save_state(to_pci_dev(dev->dev));
return 0;
}
static int ast_drm_thaw(struct drm_device *dev)
{
ast_post_gpu(dev);
return drm_mode_config_helper_resume(dev);
}
static int ast_drm_resume(struct drm_device *dev)
{
if (pci_enable_device(to_pci_dev(dev->dev)))
return -EIO;
return ast_drm_thaw(dev);
}
static int ast_pm_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *ddev = pci_get_drvdata(pdev);
int error;
error = ast_drm_freeze(ddev);
if (error)
return error;
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
static int ast_pm_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *ddev = pci_get_drvdata(pdev);
return ast_drm_resume(ddev);
}
static int ast_pm_freeze(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *ddev = pci_get_drvdata(pdev);
return ast_drm_freeze(ddev);
}
static int ast_pm_thaw(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *ddev = pci_get_drvdata(pdev);
return ast_drm_thaw(ddev);
}
static int ast_pm_poweroff(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *ddev = pci_get_drvdata(pdev);
return ast_drm_freeze(ddev);
}
static const struct dev_pm_ops ast_pm_ops = {
.suspend = ast_pm_suspend,
.resume = ast_pm_resume,
.freeze = ast_pm_freeze,
.thaw = ast_pm_thaw,
.poweroff = ast_pm_poweroff,
.restore = ast_pm_resume,
};
static struct pci_driver ast_pci_driver = {
.name = DRIVER_NAME,
.id_table = ast_pciidlist,
.probe = ast_pci_probe,
.remove = ast_pci_remove,
.driver.pm = &ast_pm_ops,
};
drm_module_pci_driver_if_modeset(ast_pci_driver, ast_modeset);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");

View File

@ -0,0 +1,528 @@
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*/
/*
* Authors: Dave Airlie <airlied@redhat.com>
*/
#ifndef __AST_DRV_H__
#define __AST_DRV_H__
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/io.h>
#include <linux/types.h>
#include <drm/drm_connector.h>
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
#include <drm/drm_mode.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_fb_helper.h>
#define DRIVER_AUTHOR "Dave Airlie"
#define DRIVER_NAME "ast"
#define DRIVER_DESC "AST"
#define DRIVER_DATE "20120228"
#define DRIVER_MAJOR 0
#define DRIVER_MINOR 1
#define DRIVER_PATCHLEVEL 0
#define PCI_CHIP_AST2000 0x2000
#define PCI_CHIP_AST2100 0x2010
enum ast_chip {
AST2000,
AST2100,
AST1100,
AST2200,
AST2150,
AST2300,
AST2400,
AST2500,
AST2600,
};
enum ast_tx_chip {
AST_TX_NONE,
AST_TX_SIL164,
AST_TX_DP501,
AST_TX_ASTDP,
};
#define AST_TX_NONE_BIT BIT(AST_TX_NONE)
#define AST_TX_SIL164_BIT BIT(AST_TX_SIL164)
#define AST_TX_DP501_BIT BIT(AST_TX_DP501)
#define AST_TX_ASTDP_BIT BIT(AST_TX_ASTDP)
#define AST_DRAM_512Mx16 0
#define AST_DRAM_1Gx16 1
#define AST_DRAM_512Mx32 2
#define AST_DRAM_1Gx32 3
#define AST_DRAM_2Gx16 6
#define AST_DRAM_4Gx16 7
#define AST_DRAM_8Gx16 8
/*
* Hardware cursor
*/
#define AST_MAX_HWC_WIDTH 64
#define AST_MAX_HWC_HEIGHT 64
#define AST_HWC_SIZE (AST_MAX_HWC_WIDTH * AST_MAX_HWC_HEIGHT * 2)
#define AST_HWC_SIGNATURE_SIZE 32
/* define for signature structure */
#define AST_HWC_SIGNATURE_CHECKSUM 0x00
#define AST_HWC_SIGNATURE_SizeX 0x04
#define AST_HWC_SIGNATURE_SizeY 0x08
#define AST_HWC_SIGNATURE_X 0x0C
#define AST_HWC_SIGNATURE_Y 0x10
#define AST_HWC_SIGNATURE_HOTSPOTX 0x14
#define AST_HWC_SIGNATURE_HOTSPOTY 0x18
/*
* Planes
*/
struct ast_plane {
struct drm_plane base;
struct drm_gem_vram_object *gbo;
struct iosys_map map;
u64 off;
};
static inline struct ast_plane *to_ast_plane(struct drm_plane *plane)
{
return container_of(plane, struct ast_plane, base);
}
/*
* Connector with i2c channel
*/
struct ast_i2c_chan {
struct i2c_adapter adapter;
struct drm_device *dev;
struct i2c_algo_bit_data bit;
};
struct ast_vga_connector {
struct drm_connector base;
struct ast_i2c_chan *i2c;
};
static inline struct ast_vga_connector *
to_ast_vga_connector(struct drm_connector *connector)
{
return container_of(connector, struct ast_vga_connector, base);
}
struct ast_sil164_connector {
struct drm_connector base;
struct ast_i2c_chan *i2c;
};
static inline struct ast_sil164_connector *
to_ast_sil164_connector(struct drm_connector *connector)
{
return container_of(connector, struct ast_sil164_connector, base);
}
/*
* Device
*/
struct ast_private {
struct drm_device base;
struct mutex ioregs_lock; /* Protects access to I/O registers in ioregs */
void __iomem *regs;
void __iomem *ioregs;
void __iomem *dp501_fw_buf;
enum ast_chip chip;
bool vga2_clone;
uint32_t dram_bus_width;
uint32_t dram_type;
uint32_t mclk;
struct drm_plane primary_plane;
struct ast_plane cursor_plane;
struct drm_crtc crtc;
struct {
struct {
struct drm_encoder encoder;
struct ast_vga_connector vga_connector;
} vga;
struct {
struct drm_encoder encoder;
struct ast_sil164_connector sil164_connector;
} sil164;
struct {
struct drm_encoder encoder;
struct drm_connector connector;
} dp501;
struct {
struct drm_encoder encoder;
struct drm_connector connector;
} astdp;
} output;
bool support_wide_screen;
enum { ast_use_p2a, ast_use_dt, ast_use_defaults } config_mode;
unsigned long tx_chip_types; /* bitfield of enum ast_chip_type */
u8 *dp501_fw_addr;
const struct firmware *dp501_fw; /* dp501 fw */
};
static inline struct ast_private *to_ast_private(struct drm_device *dev)
{
return container_of(dev, struct ast_private, base);
}
struct ast_private *ast_device_create(const struct drm_driver *drv,
struct pci_dev *pdev,
unsigned long flags);
#define AST_IO_AR_PORT_WRITE (0x40)
#define AST_IO_MISC_PORT_WRITE (0x42)
#define AST_IO_VGA_ENABLE_PORT (0x43)
#define AST_IO_SEQ_PORT (0x44)
#define AST_IO_DAC_INDEX_READ (0x47)
#define AST_IO_DAC_INDEX_WRITE (0x48)
#define AST_IO_DAC_DATA (0x49)
#define AST_IO_GR_PORT (0x4E)
#define AST_IO_CRTC_PORT (0x54)
#define AST_IO_INPUT_STATUS1_READ (0x5A)
#define AST_IO_MISC_PORT_READ (0x4C)
#define AST_IO_MM_OFFSET (0x380)
#define AST_IO_VGAIR1_VREFRESH BIT(3)
#define AST_IO_VGACRCB_HWC_ENABLED BIT(1)
#define AST_IO_VGACRCB_HWC_16BPP \
BIT(0) /* set: ARGB4444, cleared: 2bpp palette */
static inline u8 ast_read8(struct ast_private *ast, u32 reg)
{
u8 val = 0;
val = ioread8(ast->regs + reg);
return val;
}
static inline u16 ast_read16(struct ast_private *ast, u32 reg)
{
u16 val = 0;
val = ioread16(ast->regs + reg);
return val;
}
static inline u32 ast_read32(struct ast_private *ast, u32 reg)
{
u32 val = 0;
val = ioread32(ast->regs + reg);
return val;
}
static inline u8 ast_io_read8(struct ast_private *ast, u32 reg)
{
u8 val = 0;
val = ioread8(ast->ioregs + reg);
return val;
}
static inline u16 ast_io_read16(struct ast_private *ast, u32 reg)
{
u16 val = 0;
val = ioread16(ast->ioregs + reg);
return val;
}
static inline u32 ast_io_read32(struct ast_private *ast, u32 reg)
{
u32 val = 0;
val = ioread32(ast->ioregs + reg);
return val;
}
#define __ast_write(x) \
static inline void ast_write##x(struct ast_private *ast, u32 reg, \
u##x val) \
{ \
iowrite##x(val, ast->regs + reg); \
}
__ast_write(8);
__ast_write(16);
__ast_write(32);
#define __ast_io_write(x) \
static inline void ast_io_write##x(struct ast_private *ast, u32 reg, \
u##x val) \
{ \
iowrite##x(val, ast->ioregs + reg); \
}
__ast_io_write(8);
__ast_io_write(16);
#undef __ast_io_write
static inline void ast_set_index_reg(struct ast_private *ast, uint32_t base,
uint8_t index, uint8_t val)
{
ast_io_write16(ast, base, ((u16)val << 8) | index);
}
void ast_set_index_reg_mask(struct ast_private *ast, uint32_t base,
uint8_t index, uint8_t mask, uint8_t val);
uint8_t ast_get_index_reg(struct ast_private *ast, uint32_t base,
uint8_t index);
uint8_t ast_get_index_reg_mask(struct ast_private *ast, uint32_t base,
uint8_t index, uint8_t mask);
static inline void ast_open_key(struct ast_private *ast)
{
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x80, 0xA8);
}
#define AST_VIDMEM_SIZE_8M 0x00800000
#define AST_VIDMEM_SIZE_16M 0x01000000
#define AST_VIDMEM_SIZE_32M 0x02000000
#define AST_VIDMEM_SIZE_64M 0x04000000
#define AST_VIDMEM_SIZE_128M 0x08000000
#define AST_VIDMEM_DEFAULT_SIZE AST_VIDMEM_SIZE_8M
struct ast_vbios_stdtable {
u8 misc;
u8 seq[4];
u8 crtc[25];
u8 ar[20];
u8 gr[9];
};
struct ast_vbios_enhtable {
u32 ht;
u32 hde;
u32 hfp;
u32 hsync;
u32 vt;
u32 vde;
u32 vfp;
u32 vsync;
u32 dclk_index;
u32 flags;
u32 refresh_rate;
u32 refresh_rate_index;
u32 mode_id;
};
struct ast_vbios_dclk_info {
u8 param1;
u8 param2;
u8 param3;
};
struct ast_vbios_mode_info {
const struct ast_vbios_stdtable *std_table;
const struct ast_vbios_enhtable *enh_table;
};
struct ast_crtc_state {
struct drm_crtc_state base;
/* Last known format of primary plane */
const struct drm_format_info *format;
struct ast_vbios_mode_info vbios_mode_info;
};
#define to_ast_crtc_state(state) \
container_of(state, struct ast_crtc_state, base)
int ast_mode_config_init(struct ast_private *ast);
#define AST_MM_ALIGN_SHIFT 4
#define AST_MM_ALIGN_MASK ((1 << AST_MM_ALIGN_SHIFT) - 1)
#define AST_DP501_FW_VERSION_MASK GENMASK(7, 4)
#define AST_DP501_FW_VERSION_1 BIT(4)
#define AST_DP501_PNP_CONNECTED BIT(1)
#define AST_DP501_DEFAULT_DCLK 65
#define AST_DP501_GBL_VERSION 0xf000
#define AST_DP501_PNPMONITOR 0xf010
#define AST_DP501_LINKRATE 0xf014
#define AST_DP501_EDID_DATA 0xf020
/* Define for Soc scratched reg */
#define COPROCESSOR_LAUNCH BIT(5)
/*
* Display Transmitter Type:
*/
#define TX_TYPE_MASK GENMASK(3, 1)
#define NO_TX (0 << 1)
#define ITE66121_VBIOS_TX (1 << 1)
#define SI164_VBIOS_TX (2 << 1)
#define CH7003_VBIOS_TX (3 << 1)
#define DP501_VBIOS_TX (4 << 1)
#define ANX9807_VBIOS_TX (5 << 1)
#define TX_FW_EMBEDDED_FW_TX (6 << 1)
#define ASTDP_DPMCU_TX (7 << 1)
#define AST_VRAM_INIT_STATUS_MASK GENMASK(7, 6)
//#define AST_VRAM_INIT_BY_BMC BIT(7)
//#define AST_VRAM_INIT_READY BIT(6)
/* Define for Soc scratched reg used on ASTDP */
#define AST_DP_PHY_SLEEP BIT(4)
#define AST_DP_VIDEO_ENABLE BIT(0)
#define AST_DP_POWER_ON true
#define AST_DP_POWER_OFF false
/*
* CRD1[b5]: DP MCU FW is executing
* CRDC[b0]: DP link success
* CRDF[b0]: DP HPD
* CRE5[b0]: Host reading EDID process is done
*/
#define ASTDP_MCU_FW_EXECUTING BIT(5)
#define ASTDP_LINK_SUCCESS BIT(0)
#define ASTDP_HPD BIT(0)
#define ASTDP_HOST_EDID_READ_DONE BIT(0)
#define ASTDP_HOST_EDID_READ_DONE_MASK GENMASK(0, 0)
/*
* CRB8[b1]: Enable VSYNC off
* CRB8[b0]: Enable HSYNC off
*/
#define AST_DPMS_VSYNC_OFF BIT(1)
#define AST_DPMS_HSYNC_OFF BIT(0)
/*
* CRDF[b4]: Mirror of AST_DP_VIDEO_ENABLE
* Precondition: A. ~AST_DP_PHY_SLEEP &&
* B. DP_HPD &&
* C. DP_LINK_SUCCESS
*/
#define ASTDP_MIRROR_VIDEO_ENABLE BIT(4)
#define ASTDP_EDID_READ_POINTER_MASK GENMASK(7, 0)
#define ASTDP_EDID_VALID_FLAG_MASK GENMASK(0, 0)
#define ASTDP_EDID_READ_DATA_MASK GENMASK(7, 0)
/*
* ASTDP setmode registers:
* CRE0[7:0]: MISC0 ((0x00: 18-bpp) or (0x20: 24-bpp)
* CRE1[7:0]: MISC1 (default: 0x00)
* CRE2[7:0]: video format index (0x00 ~ 0x20 or 0x40 ~ 0x50)
*/
#define ASTDP_MISC0_24bpp BIT(5)
#define ASTDP_MISC1 0
#define ASTDP_AND_CLEAR_MASK 0x00
/*
* ASTDP resoultion table:
* EX: ASTDP_A_B_C:
* A: Resolution
* B: Refresh Rate
* C: Misc information, such as CVT, Reduce Blanked
*/
#define ASTDP_640x480_60 0x00
#define ASTDP_640x480_72 0x01
#define ASTDP_640x480_75 0x02
#define ASTDP_640x480_85 0x03
#define ASTDP_800x600_56 0x04
#define ASTDP_800x600_60 0x05
#define ASTDP_800x600_72 0x06
#define ASTDP_800x600_75 0x07
#define ASTDP_800x600_85 0x08
#define ASTDP_1024x768_60 0x09
#define ASTDP_1024x768_70 0x0A
#define ASTDP_1024x768_75 0x0B
#define ASTDP_1024x768_85 0x0C
#define ASTDP_1280x1024_60 0x0D
#define ASTDP_1280x1024_75 0x0E
#define ASTDP_1280x1024_85 0x0F
#define ASTDP_1600x1200_60 0x10
#define ASTDP_320x240_60 0x11
#define ASTDP_400x300_60 0x12
#define ASTDP_512x384_60 0x13
#define ASTDP_1920x1200_60 0x14
#define ASTDP_1920x1080_60 0x15
#define ASTDP_1280x800_60 0x16
#define ASTDP_1280x800_60_RB 0x17
#define ASTDP_1440x900_60 0x18
#define ASTDP_1440x900_60_RB 0x19
#define ASTDP_1680x1050_60 0x1A
#define ASTDP_1680x1050_60_RB 0x1B
#define ASTDP_1600x900_60 0x1C
#define ASTDP_1600x900_60_RB 0x1D
#define ASTDP_1366x768_60 0x1E
#define ASTDP_1152x864_75 0x1F
int ast_mm_init(struct ast_private *ast);
/* ast post */
void ast_enable_vga(struct drm_device *dev);
void ast_enable_mmio(struct drm_device *dev);
bool ast_is_vga_enabled(struct drm_device *dev);
void ast_post_gpu(struct drm_device *dev);
u32 ast_mindwm(struct ast_private *ast, u32 r);
void ast_moutdwm(struct ast_private *ast, u32 r, u32 v);
void ast_patch_ahb_2500(struct ast_private *ast);
/* ast dp501 */
void ast_set_dp501_video_output(struct drm_device *dev, u8 mode);
bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size);
bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata);
u8 ast_get_dp501_max_clk(struct drm_device *dev);
void ast_init_3rdtx(struct drm_device *dev);
/* ast_i2c.c */
struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev);
/* aspeed DP */
int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata);
void ast_dp_launch(struct drm_device *dev, u8 bPower);
void ast_dp_power_on_off(struct drm_device *dev, bool no);
void ast_dp_set_on_off(struct drm_device *dev, bool no);
void ast_dp_set_mode(struct drm_crtc *crtc,
struct ast_vbios_mode_info *vbios_mode);
#endif

View File

@ -0,0 +1,170 @@
// SPDX-License-Identifier: MIT
/*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
#include "ast_drv.h"
static void ast_i2c_setsda(void *i2c_priv, int data)
{
struct ast_i2c_chan *i2c = i2c_priv;
struct ast_private *ast = to_ast_private(i2c->dev);
int i;
u8 ujcrb7, jtemp;
for (i = 0; i < 0x10000; i++) {
ujcrb7 = ((data & 0x01) ? 0 : 1) << 2;
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf1,
ujcrb7);
jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7,
0x04);
if (ujcrb7 == jtemp)
break;
}
}
static void ast_i2c_setscl(void *i2c_priv, int clock)
{
struct ast_i2c_chan *i2c = i2c_priv;
struct ast_private *ast = to_ast_private(i2c->dev);
int i;
u8 ujcrb7, jtemp;
for (i = 0; i < 0x10000; i++) {
ujcrb7 = ((clock & 0x01) ? 0 : 1);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf4,
ujcrb7);
jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7,
0x01);
if (ujcrb7 == jtemp)
break;
}
}
static int ast_i2c_getsda(void *i2c_priv)
{
struct ast_i2c_chan *i2c = i2c_priv;
struct ast_private *ast = to_ast_private(i2c->dev);
uint32_t val, val2, count, pass;
count = 0;
pass = 0;
val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) &
0x01;
do {
val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7,
0x20) >>
5) &
0x01;
if (val == val2) {
pass++;
} else {
pass = 0;
val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT,
0xb7, 0x20) >>
5) &
0x01;
}
} while ((pass < 5) && (count++ < 0x10000));
return val & 1 ? 1 : 0;
}
static int ast_i2c_getscl(void *i2c_priv)
{
struct ast_i2c_chan *i2c = i2c_priv;
struct ast_private *ast = to_ast_private(i2c->dev);
uint32_t val, val2, count, pass;
count = 0;
pass = 0;
val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) &
0x01;
do {
val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7,
0x10) >>
4) &
0x01;
if (val == val2) {
pass++;
} else {
pass = 0;
val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT,
0xb7, 0x10) >>
4) &
0x01;
}
} while ((pass < 5) && (count++ < 0x10000));
return val & 1 ? 1 : 0;
}
static void ast_i2c_release(struct drm_device *dev, void *res)
{
struct ast_i2c_chan *i2c = res;
i2c_del_adapter(&i2c->adapter);
kfree(i2c);
}
struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev)
{
struct ast_i2c_chan *i2c;
int ret;
i2c = kzalloc(sizeof(struct ast_i2c_chan), GFP_KERNEL);
if (!i2c)
return NULL;
i2c->adapter.owner = THIS_MODULE;
i2c->adapter.class = I2C_CLASS_DDC;
i2c->adapter.dev.parent = dev->dev;
i2c->dev = dev;
i2c_set_adapdata(&i2c->adapter, i2c);
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
"AST i2c bit bus");
i2c->adapter.algo_data = &i2c->bit;
i2c->bit.udelay = 20;
i2c->bit.timeout = 2;
i2c->bit.data = i2c;
i2c->bit.setsda = ast_i2c_setsda;
i2c->bit.setscl = ast_i2c_setscl;
i2c->bit.getsda = ast_i2c_getsda;
i2c->bit.getscl = ast_i2c_getscl;
ret = i2c_bit_add_bus(&i2c->adapter);
if (ret) {
drm_err(dev, "Failed to register bit i2c\n");
goto out_kfree;
}
ret = drmm_add_action_or_reset(dev, ast_i2c_release, i2c);
if (ret)
return NULL;
return i2c;
out_kfree:
kfree(i2c);
return NULL;
}

View File

@ -0,0 +1,486 @@
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*/
/*
* Authors: Dave Airlie <airlied@redhat.com>
*/
#include <linux/pci.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_managed.h>
#include "ast_drv.h"
void ast_set_index_reg_mask(struct ast_private *ast, uint32_t base,
uint8_t index, uint8_t mask, uint8_t val)
{
u8 tmp;
ast_io_write8(ast, base, index);
tmp = (ast_io_read8(ast, base + 1) & mask) | val;
ast_set_index_reg(ast, base, index, tmp);
}
uint8_t ast_get_index_reg(struct ast_private *ast, uint32_t base, uint8_t index)
{
uint8_t ret;
ast_io_write8(ast, base, index);
ret = ast_io_read8(ast, base + 1);
return ret;
}
uint8_t ast_get_index_reg_mask(struct ast_private *ast, uint32_t base,
uint8_t index, uint8_t mask)
{
uint8_t ret;
ast_io_write8(ast, base, index);
ret = ast_io_read8(ast, base + 1) & mask;
return ret;
}
static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev)
{
struct device_node *np = dev->dev->of_node;
struct ast_private *ast = to_ast_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
uint32_t data, jregd0, jregd1;
/* Defaults */
ast->config_mode = ast_use_defaults;
*scu_rev = 0xffffffff;
/* Check if we have device-tree properties */
if (np &&
!of_property_read_u32(np, "aspeed,scu-revision-id", scu_rev)) {
/* We do, disable P2A access */
ast->config_mode = ast_use_dt;
drm_info(dev, "Using device-tree for configuration\n");
return;
}
/* Not all families have a P2A bridge */
if (pdev->device != PCI_CHIP_AST2000)
return;
/*
* The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge
* is disabled. We force using P2A if VGA only mode bit
* is set D[7]
*/
jregd0 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
jregd1 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) {
/* Patch AST2500 */
if (((pdev->revision & 0xF0) == 0x40) &&
((jregd0 & AST_VRAM_INIT_STATUS_MASK) == 0))
ast_patch_ahb_2500(ast);
/* Double check it's actually working */
data = ast_read32(ast, 0xf004);
if ((data != 0xFFFFFFFF) && (data != 0x00)) {
/* P2A works, grab silicon revision */
ast->config_mode = ast_use_p2a;
drm_info(dev, "Using P2A bridge for configuration\n");
/* Read SCU7c (silicon revision register) */
ast_write32(ast, 0xf004, 0x1e6e0000);
ast_write32(ast, 0xf000, 0x1);
*scu_rev = ast_read32(ast, 0x1207c);
return;
}
}
/* We have a P2A bridge but it's disabled */
drm_info(dev, "P2A bridge disabled, using default configuration\n");
}
static int ast_detect_chip(struct drm_device *dev, bool *need_post)
{
struct ast_private *ast = to_ast_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
uint32_t jreg, scu_rev;
/*
* If VGA isn't enabled, we need to enable now or subsequent
* access to the scratch registers will fail. We also inform
* our caller that it needs to POST the chip
* (Assumption: VGA not enabled -> need to POST)
*/
if (!ast_is_vga_enabled(dev)) {
ast_enable_vga(dev);
drm_info(dev,
"VGA not enabled on entry, requesting chip POST\n");
*need_post = true;
} else
*need_post = false;
/* Enable extended register access */
ast_open_key(ast);
ast_enable_mmio(dev);
/* Find out whether P2A works or whether to use device-tree */
ast_detect_config_mode(dev, &scu_rev);
/* Identify chipset */
if (pdev->revision >= 0x50) {
ast->chip = AST2600;
drm_info(dev, "AST 2600 detected\n");
} else if (pdev->revision >= 0x40) {
ast->chip = AST2500;
drm_info(dev, "AST 2500 detected\n");
} else if (pdev->revision >= 0x30) {
ast->chip = AST2400;
drm_info(dev, "AST 2400 detected\n");
} else if (pdev->revision >= 0x20) {
ast->chip = AST2300;
drm_info(dev, "AST 2300 detected\n");
} else if (pdev->revision >= 0x10) {
switch (scu_rev & 0x0300) {
case 0x0200:
ast->chip = AST1100;
drm_info(dev, "AST 1100 detected\n");
break;
case 0x0100:
ast->chip = AST2200;
drm_info(dev, "AST 2200 detected\n");
break;
case 0x0000:
ast->chip = AST2150;
drm_info(dev, "AST 2150 detected\n");
break;
default:
ast->chip = AST2100;
drm_info(dev, "AST 2100 detected\n");
break;
}
ast->vga2_clone = false;
} else {
ast->chip = AST2000;
drm_info(dev, "AST 2000 detected\n");
}
/* Check if we support wide screen */
switch (ast->chip) {
case AST2000:
ast->support_wide_screen = false;
break;
default:
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0,
0xff);
if (!(jreg & 0x80))
ast->support_wide_screen = true;
else if (jreg & 0x01)
ast->support_wide_screen = true;
else {
ast->support_wide_screen = false;
if (ast->chip == AST2300 &&
(scu_rev & 0x300) == 0x0) /* ast1300 */
ast->support_wide_screen = true;
if (ast->chip == AST2400 &&
(scu_rev & 0x300) == 0x100) /* ast1400 */
ast->support_wide_screen = true;
if (ast->chip == AST2500 &&
scu_rev == 0x100) /* ast2510 */
ast->support_wide_screen = true;
if (ast->chip == AST2600) /* ast2600 */
ast->support_wide_screen = true;
}
break;
}
/* Check 3rd Tx option (digital output afaik) */
ast->tx_chip_types |= AST_TX_NONE_BIT;
/*
* VGACRA3 Enhanced Color Mode Register, check if DVO is already
* enabled, in that case, assume we have a SIL164 TMDS transmitter
*
* Don't make that assumption if we the chip wasn't enabled and
* is at power-on reset, otherwise we'll incorrectly "detect" a
* SIL164 when there is none.
*/
if (!*need_post) {
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3,
0xff);
if (jreg & 0x80)
ast->tx_chip_types = AST_TX_SIL164_BIT;
}
if ((ast->chip == AST2300) || (ast->chip == AST2400) ||
(ast->chip == AST2500)) {
/*
* On AST2300 and 2400, look the configuration set by the SoC in
* the SOC scratch register #1 bits 11:8 (interestingly marked
* as "reserved" in the spec)
*/
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1,
0xff);
switch (jreg) {
case 0x04:
ast->tx_chip_types = AST_TX_SIL164_BIT;
break;
case 0x08:
ast->dp501_fw_addr =
drmm_kzalloc(dev, 32 * 1024, GFP_KERNEL);
if (ast->dp501_fw_addr) {
/* backup firmware */
if (ast_backup_fw(dev, ast->dp501_fw_addr,
32 * 1024)) {
drmm_kfree(dev, ast->dp501_fw_addr);
ast->dp501_fw_addr = NULL;
}
}
fallthrough;
case 0x0c:
ast->tx_chip_types = AST_TX_DP501_BIT;
}
} else if (ast->chip == AST2600)
ast_dp_launch(&ast->base, 0);
/* Print stuff for diagnostic purposes */
if (ast->tx_chip_types & AST_TX_NONE_BIT)
drm_info(dev, "Using analog VGA\n");
if (ast->tx_chip_types & AST_TX_SIL164_BIT)
drm_info(dev, "Using Sil164 TMDS transmitter\n");
if (ast->tx_chip_types & AST_TX_DP501_BIT)
drm_info(dev, "Using DP501 DisplayPort transmitter\n");
return 0;
}
static int ast_get_dram_info(struct drm_device *dev)
{
struct device_node *np = dev->dev->of_node;
struct ast_private *ast = to_ast_private(dev);
uint32_t mcr_cfg, mcr_scu_mpll, mcr_scu_strap;
uint32_t denum, num, div, ref_pll, dsel;
switch (ast->config_mode) {
case ast_use_dt:
/*
* If some properties are missing, use reasonable
* defaults for AST2400
*/
if (of_property_read_u32(np, "aspeed,mcr-configuration",
&mcr_cfg))
mcr_cfg = 0x00000577;
if (of_property_read_u32(np, "aspeed,mcr-scu-mpll",
&mcr_scu_mpll))
mcr_scu_mpll = 0x000050C0;
if (of_property_read_u32(np, "aspeed,mcr-scu-strap",
&mcr_scu_strap))
mcr_scu_strap = 0;
break;
case ast_use_p2a:
ast_write32(ast, 0xf004, 0x1e6e0000);
ast_write32(ast, 0xf000, 0x1);
mcr_cfg = ast_read32(ast, 0x10004);
mcr_scu_mpll = ast_read32(ast, 0x10120);
mcr_scu_strap = ast_read32(ast, 0x10170);
break;
case ast_use_defaults:
default:
ast->dram_bus_width = 16;
ast->dram_type = AST_DRAM_1Gx16;
if (ast->chip == AST2500)
ast->mclk = 800;
else
ast->mclk = 396;
return 0;
}
if (mcr_cfg & 0x40)
ast->dram_bus_width = 16;
else
ast->dram_bus_width = 32;
if (ast->chip == AST2500) {
switch (mcr_cfg & 0x03) {
case 0:
ast->dram_type = AST_DRAM_1Gx16;
break;
default:
case 1:
ast->dram_type = AST_DRAM_2Gx16;
break;
case 2:
ast->dram_type = AST_DRAM_4Gx16;
break;
case 3:
ast->dram_type = AST_DRAM_8Gx16;
break;
}
} else if (ast->chip == AST2300 || ast->chip == AST2400) {
switch (mcr_cfg & 0x03) {
case 0:
ast->dram_type = AST_DRAM_512Mx16;
break;
default:
case 1:
ast->dram_type = AST_DRAM_1Gx16;
break;
case 2:
ast->dram_type = AST_DRAM_2Gx16;
break;
case 3:
ast->dram_type = AST_DRAM_4Gx16;
break;
}
} else {
switch (mcr_cfg & 0x0c) {
case 0:
case 4:
ast->dram_type = AST_DRAM_512Mx16;
break;
case 8:
if (mcr_cfg & 0x40)
ast->dram_type = AST_DRAM_1Gx16;
else
ast->dram_type = AST_DRAM_512Mx32;
break;
case 0xc:
ast->dram_type = AST_DRAM_1Gx32;
break;
}
}
if (mcr_scu_strap & 0x2000)
ref_pll = 14318;
else
ref_pll = 12000;
denum = mcr_scu_mpll & 0x1f;
num = (mcr_scu_mpll & 0x3fe0) >> 5;
dsel = (mcr_scu_mpll & 0xc000) >> 14;
switch (dsel) {
case 3:
div = 0x4;
break;
case 2:
case 1:
div = 0x2;
break;
default:
div = 0x1;
break;
}
ast->mclk = ref_pll * (num + 2) / ((denum + 2) * (div * 1000));
return 0;
}
/*
* Run this function as part of the HW device cleanup; not
* when the DRM device gets released.
*/
static void ast_device_release(void *data)
{
struct ast_private *ast = data;
/* enable standard VGA decode */
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x04);
}
struct ast_private *ast_device_create(const struct drm_driver *drv,
struct pci_dev *pdev, unsigned long flags)
{
struct drm_device *dev;
struct ast_private *ast;
bool need_post;
int ret = 0;
ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_private, base);
if (IS_ERR(ast))
return ast;
dev = &ast->base;
pci_set_drvdata(pdev, dev);
ret = drmm_mutex_init(dev, &ast->ioregs_lock);
if (ret)
return ERR_PTR(ret);
ast->regs = pcim_iomap(pdev, 1, 0);
if (!ast->regs)
return ERR_PTR(-EIO);
/*
* If we don't have IO space at all, use MMIO now and
* assume the chip has MMIO enabled by default (rev 0x20
* and higher).
*/
if (!(pci_resource_flags(pdev, 2) & IORESOURCE_IO)) {
drm_info(dev, "platform has no IO space, trying MMIO\n");
ast->ioregs = ast->regs + AST_IO_MM_OFFSET;
}
/* "map" IO regs if the above hasn't done so already */
if (!ast->ioregs) {
ast->ioregs = pcim_iomap(pdev, 2, 0);
if (!ast->ioregs)
return ERR_PTR(-EIO);
}
ast_detect_chip(dev, &need_post);
ret = ast_get_dram_info(dev);
if (ret)
return ERR_PTR(ret);
drm_info(dev, "dram MCLK=%u Mhz type=%d bus_width=%d\n", ast->mclk,
ast->dram_type, ast->dram_bus_width);
if (need_post)
ast_post_gpu(dev);
ret = ast_mm_init(ast);
if (ret)
return ERR_PTR(ret);
/* map reserved buffer */
ast->dp501_fw_buf = NULL;
if (dev->vram_mm->vram_size < pci_resource_len(pdev, 0)) {
ast->dp501_fw_buf =
pci_iomap_range(pdev, 0, dev->vram_mm->vram_size, 0);
if (!ast->dp501_fw_buf)
drm_info(dev, "failed to map reserved buffer!\n");
}
ret = ast_mode_config_init(ast);
if (ret)
return ERR_PTR(ret);
ret = devm_add_action_or_reset(dev->dev, ast_device_release, ast);
if (ret)
return ERR_PTR(ret);
return ast;
}

View File

@ -0,0 +1,101 @@
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*/
/*
* Authors: Dave Airlie <airlied@redhat.com>
*/
#include <linux/pci.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
#include "ast_drv.h"
static u32 ast_get_vram_size(struct ast_private *ast)
{
u8 jreg;
u32 vram_size;
ast_open_key(ast);
vram_size = AST_VIDMEM_DEFAULT_SIZE;
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xaa, 0xff);
switch (jreg & 3) {
case 0:
vram_size = AST_VIDMEM_SIZE_8M;
break;
case 1:
vram_size = AST_VIDMEM_SIZE_16M;
break;
case 2:
vram_size = AST_VIDMEM_SIZE_32M;
break;
case 3:
vram_size = AST_VIDMEM_SIZE_64M;
break;
}
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x99, 0xff);
switch (jreg & 0x03) {
case 1:
vram_size -= 0x100000;
break;
case 2:
vram_size -= 0x200000;
break;
case 3:
vram_size -= 0x400000;
break;
}
return vram_size;
}
int ast_mm_init(struct ast_private *ast)
{
struct drm_device *dev = &ast->base;
struct pci_dev *pdev = to_pci_dev(dev->dev);
resource_size_t base, size;
u32 vram_size;
int ret;
base = pci_resource_start(pdev, 0);
size = pci_resource_len(pdev, 0);
/* Don't fail on errors, but performance might be reduced. */
devm_arch_io_reserve_memtype_wc(dev->dev, base, size);
devm_arch_phys_wc_add(dev->dev, base, size);
vram_size = ast_get_vram_size(ast);
ret = drmm_vram_helper_init(dev, base, vram_size);
if (ret) {
drm_err(dev, "Error initializing VRAM MM; %d\n", ret);
return ret;
}
return 0;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,342 @@
/*
* Copyright (c) 2005 ASPEED Technology Inc.
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of the authors not be used in
* advertising or publicity pertaining to distribution of the software without
* specific, written prior permission. The authors makes no representations
* about the suitability of this software for any purpose. It is provided
* "as is" without express or implied warranty.
*
* THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/* Ported from xf86-video-ast driver */
#ifndef AST_TABLES_H
#define AST_TABLES_H
/* Std. Table Index Definition */
#define TextModeIndex 0
#define EGAModeIndex 1
#define VGAModeIndex 2
#define HiCModeIndex 3
#define TrueCModeIndex 4
#define Charx8Dot 0x00000001
#define HalfDCLK 0x00000002
#define DoubleScanMode 0x00000004
#define LineCompareOff 0x00000008
#define HBorder 0x00000020
#define VBorder 0x00000010
#define WideScreenMode 0x00000100
#define NewModeInfo 0x00000200
#define NHSync 0x00000400
#define PHSync 0x00000800
#define NVSync 0x00001000
#define PVSync 0x00002000
#define SyncPP (PVSync | PHSync)
#define SyncPN (PVSync | NHSync)
#define SyncNP (NVSync | PHSync)
#define SyncNN (NVSync | NHSync)
#define AST2500PreCatchCRT 0x00004000
/* DCLK Index */
#define VCLK25_175 0x00
#define VCLK28_322 0x01
#define VCLK31_5 0x02
#define VCLK36 0x03
#define VCLK40 0x04
#define VCLK49_5 0x05
#define VCLK50 0x06
#define VCLK56_25 0x07
#define VCLK65 0x08
#define VCLK75 0x09
#define VCLK78_75 0x0A
#define VCLK94_5 0x0B
#define VCLK108 0x0C
#define VCLK135 0x0D
#define VCLK157_5 0x0E
#define VCLK162 0x0F
#define VCLK154 0x10
#define VCLK83_5 0x11
#define VCLK106_5 0x12
#define VCLK146_25 0x13
#define VCLK148_5 0x14
#define VCLK71 0x15
#define VCLK88_75 0x16
#define VCLK119 0x17
#define VCLK85_5 0x18
#define VCLK97_75 0x19
#define VCLK118_25 0x1A
static const struct ast_vbios_dclk_info dclk_table[] = {
{ 0x2C, 0xE7, 0x03 }, /* 00: VCLK25_175 */
{ 0x95, 0x62, 0x03 }, /* 01: VCLK28_322 */
{ 0x67, 0x63, 0x01 }, /* 02: VCLK31_5 */
{ 0x76, 0x63, 0x01 }, /* 03: VCLK36 */
{ 0xEE, 0x67, 0x01 }, /* 04: VCLK40 */
{ 0x82, 0x62, 0x01 }, /* 05: VCLK49_5 */
{ 0xC6, 0x64, 0x01 }, /* 06: VCLK50 */
{ 0x94, 0x62, 0x01 }, /* 07: VCLK56_25 */
{ 0x80, 0x64, 0x00 }, /* 08: VCLK65 */
{ 0x7B, 0x63, 0x00 }, /* 09: VCLK75 */
{ 0x67, 0x62, 0x00 }, /* 0A: VCLK78_75 */
{ 0x7C, 0x62, 0x00 }, /* 0B: VCLK94_5 */
{ 0x8E, 0x62, 0x00 }, /* 0C: VCLK108 */
{ 0x85, 0x24, 0x00 }, /* 0D: VCLK135 */
{ 0x67, 0x22, 0x00 }, /* 0E: VCLK157_5 */
{ 0x6A, 0x22, 0x00 }, /* 0F: VCLK162 */
{ 0x4d, 0x4c, 0x80 }, /* 10: VCLK154 */
{ 0x68, 0x6f, 0x80 }, /* 11: VCLK83.5 */
{ 0x28, 0x49, 0x80 }, /* 12: VCLK106.5 */
{ 0x37, 0x49, 0x80 }, /* 13: VCLK146.25 */
{ 0x1f, 0x45, 0x80 }, /* 14: VCLK148.5 */
{ 0x47, 0x6c, 0x80 }, /* 15: VCLK71 */
{ 0x25, 0x65, 0x80 }, /* 16: VCLK88.75 */
{ 0x77, 0x58, 0x80 }, /* 17: VCLK119 */
{ 0x32, 0x67, 0x80 }, /* 18: VCLK85_5 */
{ 0x6a, 0x6d, 0x80 }, /* 19: VCLK97_75 */
{ 0x3b, 0x2c, 0x81 }, /* 1A: VCLK118_25 */
};
static const struct ast_vbios_dclk_info dclk_table_ast2500[] = {
{ 0x2C, 0xE7, 0x03 }, /* 00: VCLK25_175 */
{ 0x95, 0x62, 0x03 }, /* 01: VCLK28_322 */
{ 0x67, 0x63, 0x01 }, /* 02: VCLK31_5 */
{ 0x76, 0x63, 0x01 }, /* 03: VCLK36 */
{ 0xEE, 0x67, 0x01 }, /* 04: VCLK40 */
{ 0x82, 0x62, 0x01 }, /* 05: VCLK49_5 */
{ 0xC6, 0x64, 0x01 }, /* 06: VCLK50 */
{ 0x94, 0x62, 0x01 }, /* 07: VCLK56_25 */
{ 0x80, 0x64, 0x00 }, /* 08: VCLK65 */
{ 0x7B, 0x63, 0x00 }, /* 09: VCLK75 */
{ 0x67, 0x62, 0x00 }, /* 0A: VCLK78_75 */
{ 0x7C, 0x62, 0x00 }, /* 0B: VCLK94_5 */
{ 0x8E, 0x62, 0x00 }, /* 0C: VCLK108 */
{ 0x85, 0x24, 0x00 }, /* 0D: VCLK135 */
{ 0x67, 0x22, 0x00 }, /* 0E: VCLK157_5 */
{ 0x6A, 0x22, 0x00 }, /* 0F: VCLK162 */
{ 0x4d, 0x4c, 0x80 }, /* 10: VCLK154 */
{ 0x68, 0x6f, 0x80 }, /* 11: VCLK83.5 */
{ 0x28, 0x49, 0x80 }, /* 12: VCLK106.5 */
{ 0x37, 0x49, 0x80 }, /* 13: VCLK146.25 */
{ 0x1f, 0x45, 0x80 }, /* 14: VCLK148.5 */
{ 0x47, 0x6c, 0x80 }, /* 15: VCLK71 */
{ 0x25, 0x65, 0x80 }, /* 16: VCLK88.75 */
{ 0x58, 0x01, 0x42 }, /* 17: VCLK119 */
{ 0x32, 0x67, 0x80 }, /* 18: VCLK85_5 */
{ 0x6a, 0x6d, 0x80 }, /* 19: VCLK97_75 */
{ 0x44, 0x20, 0x43 }, /* 1A: VCLK118_25 */
};
static const struct ast_vbios_stdtable vbios_stdtable[] = {
/* MD_2_3_400 */
{ 0x67,
{ 0x00, 0x03, 0x00, 0x02 },
{ 0x5f, 0x4f, 0x50, 0x82, 0x55, 0x81, 0xbf, 0x1f, 0x00,
0x4f, 0x0d, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x9c, 0x8e,
0x8f, 0x28, 0x1f, 0x96, 0xb9, 0xa3, 0xff },
{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x14, 0x07, 0x38, 0x39,
0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x0c, 0x00, 0x0f, 0x08 },
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x0e, 0x00, 0xff } },
/* Mode12/ExtEGATable */
{ 0xe3,
{ 0x01, 0x0f, 0x00, 0x06 },
{ 0x5f, 0x4f, 0x50, 0x82, 0x55, 0x81, 0x0b, 0x3e, 0x00,
0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe9, 0x8b,
0xdf, 0x28, 0x00, 0xe7, 0x04, 0xe3, 0xff },
{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x14, 0x07, 0x38, 0x39,
0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x01, 0x00, 0x0f, 0x00 },
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x0f, 0xff } },
/* ExtVGATable */
{ 0x2f,
{ 0x01, 0x0f, 0x00, 0x0e },
{ 0x5f, 0x4f, 0x50, 0x82, 0x54, 0x80, 0x0b, 0x3e, 0x00,
0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xea, 0x8c,
0xdf, 0x28, 0x40, 0xe7, 0x04, 0xa3, 0xff },
{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09,
0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x01, 0x00, 0x00, 0x00 },
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0f, 0xff } },
/* ExtHiCTable */
{ 0x2f,
{ 0x01, 0x0f, 0x00, 0x0e },
{ 0x5f, 0x4f, 0x50, 0x82, 0x54, 0x80, 0x0b, 0x3e, 0x00,
0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xea, 0x8c,
0xdf, 0x28, 0x40, 0xe7, 0x04, 0xa3, 0xff },
{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09,
0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x01, 0x00, 0x00, 0x00 },
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x0f, 0xff } },
/* ExtTrueCTable */
{ 0x2f,
{ 0x01, 0x0f, 0x00, 0x0e },
{ 0x5f, 0x4f, 0x50, 0x82, 0x54, 0x80, 0x0b, 0x3e, 0x00,
0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xea, 0x8c,
0xdf, 0x28, 0x40, 0xe7, 0x04, 0xa3, 0xff },
{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09,
0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x01, 0x00, 0x00, 0x00 },
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x0f, 0xff } },
};
static const struct ast_vbios_enhtable res_640x480[] = {
{ 800, 640, 8, 96, 525, 480, 2, 2, VCLK25_175, /* 60Hz */
(SyncNN | HBorder | VBorder | Charx8Dot), 60, 1, 0x2E },
{ 832, 640, 16, 40, 520, 480, 1, 3, VCLK31_5, /* 72Hz */
(SyncNN | HBorder | VBorder | Charx8Dot), 72, 2, 0x2E },
{ 840, 640, 16, 64, 500, 480, 1, 3, VCLK31_5, /* 75Hz */
(SyncNN | Charx8Dot), 75, 3, 0x2E },
{ 832, 640, 56, 56, 509, 480, 1, 3, VCLK36, /* 85Hz */
(SyncNN | Charx8Dot), 85, 4, 0x2E },
{ 832, 640, 56, 56, 509, 480, 1, 3, VCLK36, /* end */
(SyncNN | Charx8Dot), 0xFF, 4, 0x2E },
};
static const struct ast_vbios_enhtable res_800x600[] = {
{ 1024, 800, 24, 72, 625, 600, 1, 2, VCLK36, /* 56Hz */
(SyncPP | Charx8Dot), 56, 1, 0x30 },
{ 1056, 800, 40, 128, 628, 600, 1, 4, VCLK40, /* 60Hz */
(SyncPP | Charx8Dot), 60, 2, 0x30 },
{ 1040, 800, 56, 120, 666, 600, 37, 6, VCLK50, /* 72Hz */
(SyncPP | Charx8Dot), 72, 3, 0x30 },
{ 1056, 800, 16, 80, 625, 600, 1, 3, VCLK49_5, /* 75Hz */
(SyncPP | Charx8Dot), 75, 4, 0x30 },
{ 1048, 800, 32, 64, 631, 600, 1, 3, VCLK56_25, /* 85Hz */
(SyncPP | Charx8Dot), 84, 5, 0x30 },
{ 1048, 800, 32, 64, 631, 600, 1, 3, VCLK56_25, /* end */
(SyncPP | Charx8Dot), 0xFF, 5, 0x30 },
};
static const struct ast_vbios_enhtable res_1024x768[] = {
{ 1344, 1024, 24, 136, 806, 768, 3, 6, VCLK65, /* 60Hz */
(SyncNN | Charx8Dot), 60, 1, 0x31 },
{ 1328, 1024, 24, 136, 806, 768, 3, 6, VCLK75, /* 70Hz */
(SyncNN | Charx8Dot), 70, 2, 0x31 },
{ 1312, 1024, 16, 96, 800, 768, 1, 3, VCLK78_75, /* 75Hz */
(SyncPP | Charx8Dot), 75, 3, 0x31 },
{ 1376, 1024, 48, 96, 808, 768, 1, 3, VCLK94_5, /* 85Hz */
(SyncPP | Charx8Dot), 84, 4, 0x31 },
{ 1376, 1024, 48, 96, 808, 768, 1, 3, VCLK94_5, /* end */
(SyncPP | Charx8Dot), 0xFF, 4, 0x31 },
};
static const struct ast_vbios_enhtable res_1280x1024[] = {
{ 1688, 1280, 48, 112, 1066, 1024, 1, 3, VCLK108, /* 60Hz */
(SyncPP | Charx8Dot), 60, 1, 0x32 },
{ 1688, 1280, 16, 144, 1066, 1024, 1, 3, VCLK135, /* 75Hz */
(SyncPP | Charx8Dot), 75, 2, 0x32 },
{ 1728, 1280, 64, 160, 1072, 1024, 1, 3, VCLK157_5, /* 85Hz */
(SyncPP | Charx8Dot), 85, 3, 0x32 },
{ 1728, 1280, 64, 160, 1072, 1024, 1, 3, VCLK157_5, /* end */
(SyncPP | Charx8Dot), 0xFF, 3, 0x32 },
};
static const struct ast_vbios_enhtable res_1600x1200[] = {
{ 2160, 1600, 64, 192, 1250, 1200, 1, 3, VCLK162, /* 60Hz */
(SyncPP | Charx8Dot), 60, 1, 0x33 },
{ 2160, 1600, 64, 192, 1250, 1200, 1, 3, VCLK162, /* end */
(SyncPP | Charx8Dot), 0xFF, 1, 0x33 },
};
static const struct ast_vbios_enhtable res_1152x864[] = {
{ 1600, 1152, 64, 128, 900, 864, 1, 3, VCLK108, /* 75Hz */
(SyncPP | Charx8Dot | NewModeInfo), 75, 1, 0x3B },
{ 1600, 1152, 64, 128, 900, 864, 1, 3, VCLK108, /* end */
(SyncPP | Charx8Dot | NewModeInfo), 0xFF, 1, 0x3B },
};
/* 16:9 */
static const struct ast_vbios_enhtable res_1360x768[] = {
{ 1792, 1360, 64, 112, 795, 768, 3, 6, VCLK85_5, /* 60Hz */
(SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo),
60, 1, 0x39 },
{ 1792, 1360, 64, 112, 795, 768, 3, 6, VCLK85_5, /* end */
(SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
AST2500PreCatchCRT),
0xFF, 1, 0x39 },
};
static const struct ast_vbios_enhtable res_1600x900[] = {
{ 1760, 1600, 48, 32, 926, 900, 3, 5, VCLK97_75, /* 60Hz CVT RB */
(SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
AST2500PreCatchCRT),
60, 1, 0x3A },
{ 2112, 1600, 88, 168, 934, 900, 3, 5, VCLK118_25, /* 60Hz CVT */
(SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo),
60, 2, 0x3A },
{ 2112, 1600, 88, 168, 934, 900, 3, 5, VCLK118_25, /* 60Hz CVT */
(SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo),
0xFF, 2, 0x3A },
};
static const struct ast_vbios_enhtable res_1920x1080[] = {
{ 2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */
(SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
AST2500PreCatchCRT),
60, 1, 0x38 },
{ 2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */
(SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
AST2500PreCatchCRT),
0xFF, 1, 0x38 },
};
/* 16:10 */
static const struct ast_vbios_enhtable res_1280x800[] = {
{ 1440, 1280, 48, 32, 823, 800, 3, 6, VCLK71, /* 60Hz RB */
(SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
AST2500PreCatchCRT),
60, 1, 0x35 },
{ 1680, 1280, 72, 128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */
(SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo),
60, 2, 0x35 },
{ 1680, 1280, 72, 128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */
(SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo),
0xFF, 2, 0x35 },
};
static const struct ast_vbios_enhtable res_1440x900[] = {
{ 1600, 1440, 48, 32, 926, 900, 3, 6, VCLK88_75, /* 60Hz RB */
(SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
AST2500PreCatchCRT),
60, 1, 0x36 },
{ 1904, 1440, 80, 152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */
(SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo),
60, 2, 0x36 },
{ 1904, 1440, 80, 152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */
(SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo),
0xFF, 2, 0x36 },
};
static const struct ast_vbios_enhtable res_1680x1050[] = {
{ 1840, 1680, 48, 32, 1080, 1050, 3, 6, VCLK119, /* 60Hz RB */
(SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
AST2500PreCatchCRT),
60, 1, 0x37 },
{ 2240, 1680, 104, 176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */
(SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo),
60, 2, 0x37 },
{ 2240, 1680, 104, 176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */
(SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo),
0xFF, 2, 0x37 },
};
static const struct ast_vbios_enhtable res_1920x1200[] = {
{ 2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz RB*/
(SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
AST2500PreCatchCRT),
60, 1, 0x34 },
{ 2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz RB */
(SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
AST2500PreCatchCRT),
0xFF, 1, 0x34 },
};
#endif

View File

@ -120,6 +120,12 @@ static unsigned int mwait_substates __initdata;
*/
#define CPUIDLE_FLAG_INIT_XSTATE BIT(17)
/*
* Ignore the sub-state when matching mwait hints between the ACPI _CST and
* custom tables.
*/
#define CPUIDLE_FLAG_PARTIAL_HINT_MATCH BIT(18)
/*
* MWAIT takes an 8-bit "hint" in EAX "suggesting"
* the C-state (top nibble) and sub-state (bottom nibble)
@ -993,6 +999,47 @@ static struct cpuidle_state spr_cstates[] __initdata = {
.enter = NULL }
};
static struct cpuidle_state gnr_cstates[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 1,
.enter = &intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
.desc = "MWAIT 0x01",
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 4,
.target_residency = 4,
.enter = &intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
.desc = "MWAIT 0x20",
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED |
CPUIDLE_FLAG_INIT_XSTATE |
CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
.exit_latency = 170,
.target_residency = 650,
.enter = &intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6P",
.desc = "MWAIT 0x21",
.flags = MWAIT2flg(0x21) | CPUIDLE_FLAG_TLB_FLUSHED |
CPUIDLE_FLAG_INIT_XSTATE |
CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
.exit_latency = 210,
.target_residency = 1000,
.enter = &intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
};
static struct cpuidle_state atom_cstates[] __initdata = {
{
.name = "C1E",
@ -1237,6 +1284,45 @@ static struct cpuidle_state snr_cstates[] __initdata = {
.enter = NULL }
};
static struct cpuidle_state srf_cstates[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 1,
.target_residency = 1,
.enter = &intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
.desc = "MWAIT 0x01",
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 2,
.target_residency = 10,
.enter = &intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6S",
.desc = "MWAIT 0x22",
.flags = MWAIT2flg(0x22) | CPUIDLE_FLAG_TLB_FLUSHED |
CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
.exit_latency = 270,
.target_residency = 700,
.enter = &intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6SP",
.desc = "MWAIT 0x23",
.flags = MWAIT2flg(0x23) | CPUIDLE_FLAG_TLB_FLUSHED |
CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
.exit_latency = 310,
.target_residency = 900,
.enter = &intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
};
static const struct idle_cpu idle_cpu_nehalem __initconst = {
.state_table = nehalem_cstates,
.auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
@ -1354,6 +1440,12 @@ static const struct idle_cpu idle_cpu_spr __initconst = {
.use_acpi = true,
};
static const struct idle_cpu idle_cpu_gnr __initconst = {
.state_table = gnr_cstates,
.disable_promotion_to_c1e = true,
.use_acpi = true,
};
static const struct idle_cpu idle_cpu_avn __initconst = {
.state_table = avn_cstates,
.disable_promotion_to_c1e = true,
@ -1382,6 +1474,12 @@ static const struct idle_cpu idle_cpu_snr __initconst = {
.use_acpi = true,
};
static const struct idle_cpu idle_cpu_srf __initconst = {
.state_table = srf_cstates,
.disable_promotion_to_c1e = true,
.use_acpi = true,
};
static const struct x86_cpu_id intel_idle_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &idle_cpu_nhx),
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &idle_cpu_nehalem),
@ -1421,12 +1519,14 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &idle_cpu_gmt),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &idle_cpu_spr),
X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &idle_cpu_spr),
X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X, &idle_cpu_gnr),
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &idle_cpu_knl),
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &idle_cpu_knl),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &idle_cpu_bxt),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &idle_cpu_bxt),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &idle_cpu_dnv),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &idle_cpu_snr),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, &idle_cpu_srf),
{}
};
@ -1578,7 +1678,7 @@ static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv)
}
}
static bool __init intel_idle_off_by_default(u32 mwait_hint)
static bool __init intel_idle_off_by_default(unsigned int flags, u32 mwait_hint)
{
int cstate, limit;
@ -1595,7 +1695,15 @@ static bool __init intel_idle_off_by_default(u32 mwait_hint)
* the interesting states are ACPI_CSTATE_FFH.
*/
for (cstate = 1; cstate < limit; cstate++) {
if (acpi_state_table.states[cstate].address == mwait_hint)
u32 acpi_hint = acpi_state_table.states[cstate].address;
u32 table_hint = mwait_hint;
if (flags & CPUIDLE_FLAG_PARTIAL_HINT_MATCH) {
acpi_hint &= ~MWAIT_SUBSTATE_MASK;
table_hint &= ~MWAIT_SUBSTATE_MASK;
}
if (acpi_hint == table_hint)
return false;
}
return true;
@ -1605,7 +1713,10 @@ static bool __init intel_idle_off_by_default(u32 mwait_hint)
static inline bool intel_idle_acpi_cst_extract(void) { return false; }
static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { }
static inline bool intel_idle_off_by_default(u32 mwait_hint) { return false; }
static inline bool intel_idle_off_by_default(unsigned int flags, u32 mwait_hint)
{
return false;
}
#endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */
/**
@ -1929,7 +2040,7 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
if ((disabled_states_mask & BIT(drv->state_count)) ||
((icpu->use_acpi || force_use_acpi) &&
intel_idle_off_by_default(mwait_hint) &&
intel_idle_off_by_default(state->flags, mwait_hint) &&
!(state->flags & CPUIDLE_FLAG_ALWAYS_ENABLE)))
state->flags |= CPUIDLE_FLAG_OFF;

View File

@ -150,7 +150,7 @@ config OF_IOMMU
# IOMMU-agnostic DMA-mapping layer
config IOMMU_DMA
def_bool ARM64 || IA64 || X86
def_bool ARM64 || IA64 || X86 || LOONGARCH
select DMA_OPS
select IOMMU_API
select IOMMU_IOVA
@ -498,4 +498,17 @@ config SPRD_IOMMU
Say Y here if you want to use the multimedia devices listed above.
# LOONGARCH IOMMU support
config LOONGARCH_IOMMU
tristate "LOONGARCH IOMMU support"
select IOMMU_API
select IOMMU_DEFAULT_PASSTHROUGH
depends on LOONGARCH
help
With this option you can enable support for LOONGARCH IOMMU hardware in
your system. An IOMMU is a hardware component which provides
remapping of DMA memory accesses from devices. With an LOONGARCH IOMMU you
can isolate the DMA memory of different devices and protect the
system from misbehaving device drivers or hardware.
endif # IOMMU_SUPPORT

View File

@ -30,3 +30,4 @@ obj-$(CONFIG_VIRTIO_IOMMU) += virtio-iommu.o
obj-$(CONFIG_IOMMU_SVA) += iommu-sva.o io-pgfault.o
obj-$(CONFIG_SPRD_IOMMU) += sprd-iommu.o
obj-$(CONFIG_APPLE_DART) += apple-dart.o
obj-$(CONFIG_LOONGARCH_IOMMU) += loongarch_iommu.o

View File

@ -1743,7 +1743,7 @@ static size_t iommu_dma_max_mapping_size(struct device *dev)
return SIZE_MAX;
}
static const struct dma_map_ops iommu_dma_ops = {
static const struct dma_map_ops iommu_dmafops = {
.flags = DMA_F_PCI_P2PDMA_SUPPORTED,
.alloc = iommu_dma_alloc,
.free = iommu_dma_free,
@ -1786,7 +1786,7 @@ void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit)
if (iommu_is_dma_domain(domain)) {
if (iommu_dma_init_domain(domain, dma_base, dma_limit, dev))
goto out_err;
dev->dma_ops = &iommu_dma_ops;
dev->dma_ops = &iommu_dmafops;
}
return;

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,184 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Loongson IOMMU Driver
*
* Copyright (C) 2020-2021 Loongson Technology Ltd.
* Author: Lv Chen <lvchen@loongson.cn>
* Wang Yang <wangyang@loongson.cn>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef LOONGARCH_IOMMU_H
#define LOONGARCH_IOMMU_H
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/iommu.h>
#include <linux/sizes.h>
#include <asm/addrspace.h>
#include <linux/io.h>
#define IOVA_WIDTH 47
/* Bit value definition for I/O PTE fields */
#define IOMMU_PTE_PR (1ULL << 0) /* Present */
#define IOMMU_PTE_HP (1ULL << 1) /* HugePage */
#define IOMMU_PTE_IR (1ULL << 2) /* Readable */
#define IOMMU_PTE_IW (1ULL << 3) /* Writeable */
#define IOMMU_PTE_RW (IOMMU_PTE_PR | IOMMU_PTE_IR | IOMMU_PTE_IW)
#define iommu_pte_present(ptep) ((*ptep != 0))
#define iommu_pte_huge(ptep) ((*ptep) & IOMMU_PTE_HP)
#define LA_IOMMU_PGSIZE (SZ_16K | SZ_32M)
#define IOMMU_PT_LEVEL0 0x00
#define IOMMU_PT_LEVEL1 0x01
/* IOMMU page table */
#define IOMMU_PAGE_SHIFT PAGE_SHIFT
#define IOMMU_PAGE_SIZE (_AC(1, UL) << IOMMU_PAGE_SHIFT)
#define IOMMU_LEVEL_STRIDE (IOMMU_PAGE_SHIFT - 3)
#define IOMMU_PTRS_PER_LEVEL (IOMMU_PAGE_SIZE >> 3)
#define IOMMU_LEVEL_SHIFT(n) (((n) * IOMMU_LEVEL_STRIDE) + IOMMU_PAGE_SHIFT)
#define IOMMU_LEVEL_SIZE(n) (_AC(1, UL) << (((n) * IOMMU_LEVEL_STRIDE) + IOMMU_PAGE_SHIFT))
#define IOMMU_LEVEL_MASK(n) (~(IOMMU_LEVEL_SIZE(n) - 1))
#define IOMMU_LEVEL_MAX DIV_ROUND_UP((IOVA_WIDTH - IOMMU_PAGE_SHIFT), IOMMU_LEVEL_STRIDE)
#define IOMMU_PAGE_MASK (~(IOMMU_PAGE_SIZE - 1))
#define IOMMU_HPAGE_SIZE (1UL << IOMMU_LEVEL_SHIFT(IOMMU_PT_LEVEL1))
#define IOMMU_HPAGE_MASK (~(IOMMU_HPAGE_SIZE - 1))
/* wired | index | domain | shift */
#define LA_IOMMU_WIDS 0x10
/* valid | busy | tlbar/aw | cmd */
#define LA_IOMMU_VBTC 0x14
#define IOMMU_PGTABLE_BUSY (1 << 16)
/* enable |index | valid | domain | bdf */
#define LA_IOMMU_EIVDB 0x18
/* enable | valid | cmd */
#define LA_IOMMU_CMD 0x1C
#define LA_IOMMU_PGD0_LO 0x20
#define LA_IOMMU_PGD0_HI 0x24
#define STEP_PGD 0x8
#define STEP_PGD_SHIFT 3
#define LA_IOMMU_PGD_LO(domain_id) \
(LA_IOMMU_PGD0_LO + ((domain_id) << STEP_PGD_SHIFT))
#define LA_IOMMU_PGD_HI(domain_id) \
(LA_IOMMU_PGD0_HI + ((domain_id) << STEP_PGD_SHIFT))
#define LA_IOMMU_DIR_CTRL0 0xA0
#define LA_IOMMU_DIR_CTRL1 0xA4
#define LA_IOMMU_DIR_CTRL(x) (LA_IOMMU_DIR_CTRL0 + ((x) << 2))
#define LA_IOMMU_SAFE_BASE_HI 0xE0
#define LA_IOMMU_SAFE_BASE_LO 0xE4
#define LA_IOMMU_EX_ADDR_LO 0xE8
#define LA_IOMMU_EX_ADDR_HI 0xEC
#define LA_IOMMU_PFM_CNT_EN 0x100
#define LA_IOMMU_RD_HIT_CNT_0 0x110
#define LA_IOMMU_RD_MISS_CNT_O 0x114
#define LA_IOMMU_WR_HIT_CNT_0 0x118
#define LA_IOMMU_WR_MISS_CNT_0 0x11C
#define LA_IOMMU_RD_HIT_CNT_1 0x120
#define LA_IOMMU_RD_MISS_CNT_1 0x124
#define LA_IOMMU_WR_HIT_CNT_1 0x128
#define LA_IOMMU_WR_MISS_CNT_1 0x12C
#define LA_IOMMU_RD_HIT_CNT_2 0x130
#define LA_IOMMU_RD_MISS_CNT_2 0x134
#define LA_IOMMU_WR_HIT_CNT_2 0x138
#define LA_IOMMU_WR_MISS_CNT_2 0x13C
#define MAX_DOMAIN_ID 16
#define MAX_ATTACHED_DEV_ID 16
#define iommu_ptable_end(addr, end, level) \
({ unsigned long __boundary = ((addr) + IOMMU_LEVEL_SIZE(level)) & \
IOMMU_LEVEL_MASK(level); \
(__boundary - 1 < (end) - 1) ? __boundary : (end); \
})
/* To find an entry in an iommu page table directory */
#define iommu_page_index(addr, level) \
(((addr) >> ((level * IOMMU_LEVEL_STRIDE) + IOMMU_PAGE_SHIFT)) \
& (IOMMU_PTRS_PER_LEVEL - 1))
struct loongarch_iommu {
struct list_head list; /* for la_iommu_list */
spinlock_t domain_bitmap_lock; /* Lock for domain allocing */
spinlock_t dom_info_lock; /* Lock for dom_list */
void *domain_bitmap; /* Bitmap of global domains */
void *devtable_bitmap; /* Bitmap of devtable */
struct list_head dom_list; /* List of all domain privates */
/* PCI device id of the IOMMU device */
u16 devid;
int segment; /* PCI segment# */
/* iommu configures the register space base address */
void *confbase;
/* iommu configures the register space physical base address */
resource_size_t confbase_phy;
/* iommu configures the register space size */
resource_size_t conf_size;
struct pci_dev *pdev;
/* Handle for IOMMU core code */
struct iommu_device iommu_dev;
} loongarch_iommu;
struct iommu_rlookup_entry {
struct list_head list;
struct loongarch_iommu **rlookup_table;
int pcisegment;
};
struct iommu_info {
struct list_head list; /* for dom_info->iommu_devlist */
struct loongarch_iommu *iommu;
spinlock_t devlock; /* priv dev list lock */
struct list_head dev_list; /* List of all devices in this domain iommu */
unsigned int dev_cnt; /* devices assigned to this domain iommu */
short id;
} iommu_info;
/* One vm is equal to a domain,one domain has a priv */
struct dom_info {
struct list_head iommu_devlist;
struct iommu_domain domain;
struct mutex ptl_lock; /* Lock for page table */
void *pgd;
spinlock_t lock; /* Lock for dom_info->iommu_devlist */
} dom_info;
struct dom_entry {
struct list_head list; /* for loongarch_iommu->dom_list */
struct dom_info *domain_info;
} dom_entry;
/* A device for passthrough */
struct la_iommu_dev_data {
struct list_head list; /* for iommu_entry->dev_list */
struct loongarch_iommu *iommu;
struct iommu_info *iommu_entry;
struct iommu_domain *domain;
struct device *dev;
unsigned short bdf;
int count;
int index; /* index in device table */
};
static inline unsigned long *iommu_pte_offset(unsigned long *ptep, unsigned long addr, int level)
{
return ptep + iommu_page_index(addr, level);
}
#endif /* LOONGARCH_IOMMU_H */

View File

@ -4457,6 +4457,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9000,
quirk_bridge_cavm_thrx2_pcie_root);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9084,
quirk_bridge_cavm_thrx2_pcie_root);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LOONGSON, 0x3c09,
quirk_bridge_cavm_thrx2_pcie_root);
/*
* Intersil/Techwell TW686[4589]-based video capture cards have an empty (zero)
@ -5177,6 +5179,8 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs },
/* Wangxun nics */
{ PCI_VENDOR_ID_WANGXUN, PCI_ANY_ID, pci_quirk_wangxun_nic_acs },
{ PCI_VENDOR_ID_LOONGSON, 0x3c09, pci_quirk_xgene_acs},
{ PCI_VENDOR_ID_LOONGSON, 0x3c19, pci_quirk_xgene_acs},
{ 0 }
};

View File

@ -39,7 +39,7 @@ config VFIO_GROUP
config VFIO_CONTAINER
bool "Support for the VFIO container /dev/vfio/vfio"
select VFIO_IOMMU_TYPE1 if MMU && (X86 || S390 || ARM || ARM64)
select VFIO_IOMMU_TYPE1 if MMU && (X86 || S390 || ARM || ARM64 || LOONGARCH)
depends on VFIO_GROUP
default y
help

View File

@ -216,6 +216,7 @@ enum kvm_bus {
KVM_PIO_BUS,
KVM_VIRTIO_CCW_NOTIFY_BUS,
KVM_FAST_MMIO_BUS,
KVM_IOCSR_BUS,
KVM_NR_BUSES
};

View File

@ -1475,7 +1475,15 @@ enum kvm_device_type {
#define KVM_DEV_TYPE_ARM_PV_TIME KVM_DEV_TYPE_ARM_PV_TIME
KVM_DEV_TYPE_RISCV_AIA,
#define KVM_DEV_TYPE_RISCV_AIA KVM_DEV_TYPE_RISCV_AIA
KVM_DEV_TYPE_LA_IOAPIC = 0x100,
#define KVM_DEV_TYPE_LA_IOAPIC KVM_DEV_TYPE_LA_IOAPIC
KVM_DEV_TYPE_LA_IPI,
#define KVM_DEV_TYPE_LA_IPI KVM_DEV_TYPE_LA_IPI
KVM_DEV_TYPE_LA_EXTIOI,
#define KVM_DEV_TYPE_LA_EXTIOI KVM_DEV_TYPE_LA_EXTIOI
KVM_DEV_TYPE_MAX,
};
struct kvm_vfio_spapr_tce {

View File

@ -4,6 +4,7 @@ PERF_HAVE_DWARF_REGS := 1
endif
PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
PERF_HAVE_JITDUMP := 1
HAVE_KVM_STAT_SUPPORT := 1
#
# Syscall table generation for perf

View File

@ -1,5 +1,7 @@
perf-y += header.o
perf-y += perf_regs.o
perf-$(CONFIG_DWARF) += dwarf-regs.o
perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
perf-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o

View File

@ -0,0 +1,96 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Implementation of get_cpuid().
*
* Author: Nikita Shubin <n.shubin@yadro.com>
* Bibo Mao <maobibo@loongson.cn>
* Huacai Chen <chenhuacai@loongson.cn>
*/
#include <stdio.h>
#include <stdlib.h>
#include <api/fs/fs.h>
#include <errno.h>
#include "util/debug.h"
#include "util/header.h"
/*
* Output example from /proc/cpuinfo
* CPU Family : Loongson-64bit
* Model Name : Loongson-3C5000
* CPU Revision : 0x10
* FPU Revision : 0x01
*/
#define CPUINFO_MODEL "Model Name"
#define CPUINFO "/proc/cpuinfo"
static char *_get_field(const char *line)
{
char *line2, *nl;
line2 = strrchr(line, ' ');
if (!line2)
return NULL;
line2++;
nl = strrchr(line, '\n');
if (!nl)
return NULL;
return strndup(line2, nl - line2);
}
static char *_get_cpuid(void)
{
unsigned long line_sz;
char *line, *model, *cpuid;
FILE *file;
file = fopen(CPUINFO, "r");
if (file == NULL)
return NULL;
line = model = cpuid = NULL;
while (getline(&line, &line_sz, file) != -1) {
if (strncmp(line, CPUINFO_MODEL, strlen(CPUINFO_MODEL)))
continue;
model = _get_field(line);
if (!model)
goto out_free;
break;
}
if (model && (asprintf(&cpuid, "%s", model) < 0))
cpuid = NULL;
out_free:
fclose(file);
free(model);
return cpuid;
}
int get_cpuid(char *buffer, size_t sz)
{
int ret = 0;
char *cpuid = _get_cpuid();
if (!cpuid)
return EINVAL;
if (sz < strlen(cpuid)) {
ret = ENOBUFS;
goto out_free;
}
scnprintf(buffer, sz, "%s", cpuid);
out_free:
free(cpuid);
return ret;
}
char *get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
{
return _get_cpuid();
}

View File

@ -0,0 +1,139 @@
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <memory.h>
#include "util/kvm-stat.h"
#include "util/parse-events.h"
#include "util/debug.h"
#include "util/evsel.h"
#include "util/evlist.h"
#include "util/pmus.h"
#define LOONGARCH_EXCEPTION_INT 0
#define LOONGARCH_EXCEPTION_PIL 1
#define LOONGARCH_EXCEPTION_PIS 2
#define LOONGARCH_EXCEPTION_PIF 3
#define LOONGARCH_EXCEPTION_PME 4
#define LOONGARCH_EXCEPTION_FPD 15
#define LOONGARCH_EXCEPTION_SXD 16
#define LOONGARCH_EXCEPTION_ASXD 17
#define LOONGARCH_EXCEPTION_GSPR 22
#define LOONGARCH_EXCEPTION_CPUCFG 100
#define LOONGARCH_EXCEPTION_CSR 101
#define LOONGARCH_EXCEPTION_IOCSR 102
#define LOONGARCH_EXCEPTION_IDLE 103
#define LOONGARCH_EXCEPTION_OTHERS 104
#define LOONGARCH_EXCEPTION_HVC 23
#define loongarch_exception_type \
{LOONGARCH_EXCEPTION_INT, "Interrupt" }, \
{LOONGARCH_EXCEPTION_PIL, "Mem Read" }, \
{LOONGARCH_EXCEPTION_PIS, "Mem Store" }, \
{LOONGARCH_EXCEPTION_PIF, "Inst Fetch" }, \
{LOONGARCH_EXCEPTION_PME, "Mem Modify" }, \
{LOONGARCH_EXCEPTION_FPD, "FPU" }, \
{LOONGARCH_EXCEPTION_SXD, "LSX" }, \
{LOONGARCH_EXCEPTION_ASXD, "LASX" }, \
{LOONGARCH_EXCEPTION_GSPR, "Privilege Error" }, \
{LOONGARCH_EXCEPTION_HVC, "Hypercall" }, \
{LOONGARCH_EXCEPTION_CPUCFG, "CPUCFG" }, \
{LOONGARCH_EXCEPTION_CSR, "CSR" }, \
{LOONGARCH_EXCEPTION_IOCSR, "IOCSR" }, \
{LOONGARCH_EXCEPTION_IDLE, "Idle" }, \
{LOONGARCH_EXCEPTION_OTHERS, "Others" }
define_exit_reasons_table(loongarch_exit_reasons, loongarch_exception_type);
const char *vcpu_id_str = "vcpu_id";
const char *kvm_exit_reason = "reason";
const char *kvm_entry_trace = "kvm:kvm_enter";
const char *kvm_reenter_trace = "kvm:kvm_reenter";
const char *kvm_exit_trace = "kvm:kvm_exit";
const char *kvm_events_tp[] = {
"kvm:kvm_enter",
"kvm:kvm_reenter",
"kvm:kvm_exit",
"kvm:kvm_exit_gspr",
NULL,
};
static bool event_begin(struct evsel *evsel,
struct perf_sample *sample, struct event_key *key)
{
return exit_event_begin(evsel, sample, key);
}
static bool event_end(struct evsel *evsel,
struct perf_sample *sample __maybe_unused,
struct event_key *key __maybe_unused)
{
/*
* LoongArch kvm is different with other architectures
*
* There is kvm:kvm_reenter or kvm:kvm_enter event adjacent with
* kvm:kvm_exit event.
* kvm:kvm_enter means returning to vmm and then to guest
* kvm:kvm_reenter means returning to guest immediately
*/
return evsel__name_is(evsel, kvm_entry_trace) || evsel__name_is(evsel, kvm_reenter_trace);
}
static void event_gspr_get_key(struct evsel *evsel,
struct perf_sample *sample, struct event_key *key)
{
unsigned int insn;
key->key = LOONGARCH_EXCEPTION_OTHERS;
insn = evsel__intval(evsel, sample, "inst_word");
switch (insn >> 24) {
case 0:
/* CPUCFG inst trap */
if ((insn >> 10) == 0x1b)
key->key = LOONGARCH_EXCEPTION_CPUCFG;
break;
case 4:
/* CSR inst trap */
key->key = LOONGARCH_EXCEPTION_CSR;
break;
case 6:
/* IOCSR inst trap */
if ((insn >> 15) == 0xc90)
key->key = LOONGARCH_EXCEPTION_IOCSR;
else if ((insn >> 15) == 0xc91)
/* Idle inst trap */
key->key = LOONGARCH_EXCEPTION_IDLE;
break;
default:
key->key = LOONGARCH_EXCEPTION_OTHERS;
break;
}
}
static struct child_event_ops child_events[] = {
{ .name = "kvm:kvm_exit_gspr", .get_key = event_gspr_get_key },
{ NULL, NULL },
};
static struct kvm_events_ops exit_events = {
.is_begin_event = event_begin,
.is_end_event = event_end,
.child_ops = child_events,
.decode_key = exit_event_decode_key,
.name = "VM-EXIT"
};
struct kvm_reg_events_ops kvm_reg_events_ops[] = {
{ .name = "vmexit", .ops = &exit_events, },
{ NULL, NULL },
};
const char * const kvm_skip_events[] = {
NULL,
};
int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid __maybe_unused)
{
kvm->exit_reasons_isa = "loongarch64";
kvm->exit_reasons = loongarch_exit_reasons;
return 0;
}

View File

@ -55,6 +55,9 @@ static void kvm_reset_dirty_gfn(struct kvm *kvm, u32 slot, u64 offset, u64 mask)
struct kvm_memory_slot *memslot;
int as_id, id;
if (!mask)
return;
as_id = slot >> 16;
id = (u16)slot;