Merge OCK next branch to TK5 master branch

This commit is contained in:
Jianping Liu 2024-08-23 19:52:09 +08:00
commit d6563b9042
39 changed files with 3174 additions and 828 deletions

View File

@ -2325,6 +2325,11 @@
isapnp= [ISAPNP]
Format: <RDP>,<reset>,<pci_scan>,<verbosity>
zhaoxin_patch_bitmask=
[X86] Bitmask for Zhaoxin Platform's patch.
bit 0: enable KH-40000 dma patch's node check function
isolcpus= [KNL,SMP,ISOL] Isolate a given set of CPUs from disturbance.
[Deprecated - use cpusets instead]
Format: [flag-list,]<cpu-list>

View File

@ -259,7 +259,10 @@ static void zhaoxin_pmu_disable_all(void)
static void zhaoxin_pmu_enable_all(int added)
{
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
}
static inline u64 zhaoxin_pmu_get_status(void)
@ -286,13 +289,31 @@ static inline void zxc_pmu_ack_status(u64 ack)
zhaoxin_pmu_disable_all();
}
static void zhaoxin_pmu_disable_fixed(struct hw_perf_event *hwc)
static inline void zhaoxin_set_masks(struct perf_event *event, int idx)
{
int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
if (event->attr.exclude_host)
__set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask);
if (event->attr.exclude_guest)
__set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask);
}
static inline void zhaoxin_clear_masks(struct perf_event *event, int idx)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
__clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask);
__clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask);
}
static void zhaoxin_pmu_disable_fixed(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
u64 ctrl_val, mask;
int idx = hwc->idx;
mask = 0xfULL << (idx * 4);
mask = 0xfULL << ((idx - INTEL_PMC_IDX_FIXED) * 4);
rdmsrl(hwc->config_base, ctrl_val);
ctrl_val &= ~mask;
wrmsrl(hwc->config_base, ctrl_val);
@ -301,19 +322,23 @@ static void zhaoxin_pmu_disable_fixed(struct hw_perf_event *hwc)
static void zhaoxin_pmu_disable_event(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
zhaoxin_clear_masks(event, idx);
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
zhaoxin_pmu_disable_fixed(hwc);
zhaoxin_pmu_disable_fixed(event);
return;
}
x86_pmu_disable_event(event);
}
static void zhaoxin_pmu_enable_fixed(struct hw_perf_event *hwc)
static void zhaoxin_pmu_enable_fixed(struct perf_event *event)
{
int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
u64 ctrl_val, bits, mask;
struct hw_perf_event *hwc = &event->hw;
u64 ctrl_val, mask, bits = 0;
int idx = hwc->idx;
/*
* Enable IRQ generation (0x8),
@ -326,6 +351,7 @@ static void zhaoxin_pmu_enable_fixed(struct hw_perf_event *hwc)
if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
bits |= 0x1;
idx -= INTEL_PMC_IDX_FIXED;
bits <<= (idx * 4);
mask = 0xfULL << (idx * 4);
@ -338,9 +364,12 @@ static void zhaoxin_pmu_enable_fixed(struct hw_perf_event *hwc)
static void zhaoxin_pmu_enable_event(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
zhaoxin_set_masks(event, idx);
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
zhaoxin_pmu_enable_fixed(hwc);
zhaoxin_pmu_enable_fixed(event);
return;
}
@ -456,6 +485,19 @@ static ssize_t zhaoxin_event_sysfs_show(char *page, u64 config)
return x86_event_sysfs_show(page, config, event);
}
static struct perf_guest_switch_msr *zhaoxin_guest_get_msrs(int *nr, void *data)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
*nr = 1;
return arr;
}
static const struct x86_pmu zhaoxin_pmu __initconst = {
.name = "zhaoxin",
.handle_irq = zhaoxin_pmu_handle_irq,
@ -478,6 +520,8 @@ static const struct x86_pmu zhaoxin_pmu __initconst = {
.format_attrs = zx_arch_formats_attr,
.events_sysfs_show = zhaoxin_event_sysfs_show,
.guest_get_msrs = zhaoxin_guest_get_msrs,
};
static const struct { int id; char *name; } zx_arch_events_map[] __initconst = {
@ -617,6 +661,9 @@ __init int zhaoxin_pmu_init(void)
if (boot_cpu_data.x86_model == 0x5b)
pr_cont("Yongfeng events, ");
if (boot_cpu_data.x86_model == 0x6b)
pr_cont("Shijidadao events, ");
break;
default:
return -ENODEV;
@ -639,4 +686,3 @@ __init int zhaoxin_pmu_init(void)
return 0;
}

File diff suppressed because it is too large Load Diff

View File

@ -10,9 +10,7 @@
#define ZHAOXIN_FAM7_KX5000 0x1b
#define ZHAOXIN_FAM7_KX6000 0x3b
#define ZHAOXIN_FAM7_KH40000 0x5b
#define ZHAOXIN_FAM7_KX8000 0x6b
#define ZHAOXIN_FAM7_KX7000 0x6b
#define UNCORE_PMU_NAME_LEN 32
#define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC)
@ -135,8 +133,7 @@ struct hw_info {
u64 active_state;
};
ssize_t zx_uncore_event_show(struct device *dev,
struct device_attribute *attr, char *buf);
ssize_t zx_uncore_event_show(struct device *dev, struct device_attribute *attr, char *buf);
#define ZHAOXIN_UNCORE_EVENT_DESC(_name, _config) \
{ \
@ -160,8 +157,7 @@ static inline bool uncore_pmc_fixed(int idx)
return idx == UNCORE_PMC_IDX_FIXED;
}
static inline
unsigned int uncore_mmio_box_ctl(struct zhaoxin_uncore_box *box)
static inline unsigned int uncore_mmio_box_ctl(struct zhaoxin_uncore_box *box)
{
return box->pmu->type->box_ctl +
box->pmu->type->mmio_offset * box->pmu->pmu_idx;
@ -182,14 +178,12 @@ static inline unsigned int uncore_pci_fixed_ctr(struct zhaoxin_uncore_box *box)
return box->pmu->type->fixed_ctr;
}
static inline
unsigned int uncore_pci_event_ctl(struct zhaoxin_uncore_box *box, int idx)
static inline unsigned int uncore_pci_event_ctl(struct zhaoxin_uncore_box *box, int idx)
{
return idx * 4 + box->pmu->type->event_ctl;
}
static inline
unsigned int uncore_pci_perf_ctr(struct zhaoxin_uncore_box *box, int idx)
static inline unsigned int uncore_pci_perf_ctr(struct zhaoxin_uncore_box *box, int idx)
{
if (!strncmp(box->pmu->type->name, "mc_", 3))
return idx * 2 + box->pmu->type->perf_ctr;
@ -225,24 +219,21 @@ static inline unsigned int uncore_msr_fixed_ctr(struct zhaoxin_uncore_box *box)
return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box);
}
static inline
unsigned int uncore_msr_event_ctl(struct zhaoxin_uncore_box *box, int idx)
static inline unsigned int uncore_msr_event_ctl(struct zhaoxin_uncore_box *box, int idx)
{
return box->pmu->type->event_ctl +
(box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
uncore_msr_box_offset(box);
}
static inline
unsigned int uncore_msr_perf_ctr(struct zhaoxin_uncore_box *box, int idx)
static inline unsigned int uncore_msr_perf_ctr(struct zhaoxin_uncore_box *box, int idx)
{
return box->pmu->type->perf_ctr +
(box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
uncore_msr_box_offset(box);
}
static inline
unsigned int uncore_fixed_ctl(struct zhaoxin_uncore_box *box)
static inline unsigned int uncore_fixed_ctl(struct zhaoxin_uncore_box *box)
{
if (box->pci_dev)
return uncore_pci_fixed_ctl(box);
@ -250,8 +241,7 @@ unsigned int uncore_fixed_ctl(struct zhaoxin_uncore_box *box)
return uncore_msr_fixed_ctl(box);
}
static inline
unsigned int uncore_fixed_ctr(struct zhaoxin_uncore_box *box)
static inline unsigned int uncore_fixed_ctr(struct zhaoxin_uncore_box *box)
{
if (box->pci_dev)
return uncore_pci_fixed_ctr(box);
@ -259,17 +249,17 @@ unsigned int uncore_fixed_ctr(struct zhaoxin_uncore_box *box)
return uncore_msr_fixed_ctr(box);
}
static inline
unsigned int uncore_event_ctl(struct zhaoxin_uncore_box *box, int idx)
{ if (box->pci_dev || box->io_addr)
static inline unsigned int uncore_event_ctl(struct zhaoxin_uncore_box *box, int idx)
{
if (box->pci_dev || box->io_addr)
return uncore_pci_event_ctl(box, idx);
else
return uncore_msr_event_ctl(box, idx);
}
static inline
unsigned int uncore_perf_ctr(struct zhaoxin_uncore_box *box, int idx)
{ if (box->pci_dev || box->io_addr)
static inline unsigned int uncore_perf_ctr(struct zhaoxin_uncore_box *box, int idx)
{
if (box->pci_dev || box->io_addr)
return uncore_pci_perf_ctr(box, idx);
else
return uncore_msr_perf_ctr(box, idx);
@ -302,20 +292,17 @@ static inline void uncore_enable_box(struct zhaoxin_uncore_box *box)
box->pmu->type->ops->enable_box(box);
}
static inline void uncore_disable_event(struct zhaoxin_uncore_box *box,
struct perf_event *event)
static inline void uncore_disable_event(struct zhaoxin_uncore_box *box, struct perf_event *event)
{
box->pmu->type->ops->disable_event(box, event);
}
static inline void uncore_enable_event(struct zhaoxin_uncore_box *box,
struct perf_event *event)
static inline void uncore_enable_event(struct zhaoxin_uncore_box *box, struct perf_event *event)
{
box->pmu->type->ops->enable_event(box, event);
}
static inline u64 uncore_read_counter(struct zhaoxin_uncore_box *box,
struct perf_event *event)
static inline u64 uncore_read_counter(struct zhaoxin_uncore_box *box, struct perf_event *event)
{
return box->pmu->type->ops->read_counter(box, event);
}
@ -351,12 +338,10 @@ static inline struct zhaoxin_uncore_box *uncore_event_to_box(struct perf_event *
return event->pmu_private;
}
static struct zhaoxin_uncore_box *uncore_pmu_to_box(struct zhaoxin_uncore_pmu *pmu, int cpu);
static u64 uncore_msr_read_counter(struct zhaoxin_uncore_box *box, struct perf_event *event);
static void uncore_mmio_exit_box(struct zhaoxin_uncore_box *box);
static u64 uncore_mmio_read_counter(struct zhaoxin_uncore_box *box,
struct perf_event *event);
static u64 uncore_mmio_read_counter(struct zhaoxin_uncore_box *box, struct perf_event *event);
static void uncore_pmu_start_hrtimer(struct zhaoxin_uncore_box *box);
static void uncore_pmu_cancel_hrtimer(struct zhaoxin_uncore_box *box);
static void uncore_pmu_event_start(struct perf_event *event, int flags);
@ -365,7 +350,7 @@ static int uncore_pmu_event_add(struct perf_event *event, int flags);
static void uncore_pmu_event_del(struct perf_event *event, int flags);
static void uncore_pmu_event_read(struct perf_event *event);
static void uncore_perf_event_update(struct zhaoxin_uncore_box *box, struct perf_event *event);
struct event_constraint *
uncore_get_constraint(struct zhaoxin_uncore_box *box, struct perf_event *event);
struct event_constraint *uncore_get_constraint(struct zhaoxin_uncore_box *box,
struct perf_event *event);
void uncore_put_constraint(struct zhaoxin_uncore_box *box, struct perf_event *event);
u64 uncore_shared_reg_config(struct zhaoxin_uncore_box *box, int idx);

View File

@ -289,6 +289,12 @@ struct cper_sec_mem_err;
extern void apei_mce_report_mem_error(int corrected,
struct cper_sec_mem_err *mem_err);
extern void zx_apei_mce_report_mem_error(struct cper_sec_mem_err *mem_err);
struct cper_sec_pcie;
extern void zx_apei_mce_report_pcie_error(int corrected, struct cper_sec_pcie *pcie_err);
struct cper_sec_proc_generic;
extern void zx_apei_mce_report_zdi_error(struct cper_sec_proc_generic *zdi_err);
/*
* Enumerate new IP types and HWID values in AMD processors which support
* Scalable MCA.

View File

@ -159,6 +159,7 @@ ifeq ($(CONFIG_X86_64),y)
obj-$(CONFIG_MMCONF_FAM10H) += mmconf-fam10h_64.o
obj-y += vsmp_64.o
obj-$(CONFIG_PCI) += zhaoxin_kh40000.o
endif
obj-$(CONFIG_HYGON_CSV) += csv.o

View File

@ -40,10 +40,36 @@ int arch_apei_enable_cmcff(struct acpi_hest_header *hest_hdr, void *data)
void arch_apei_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
{
#ifdef CONFIG_X86_MCE
if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN ||
boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR)
zx_apei_mce_report_mem_error(mem_err);
else
apei_mce_report_mem_error(sev, mem_err);
#endif
}
void arch_apei_report_pcie_error(int sev, struct cper_sec_pcie *pcie_err)
{
#ifdef CONFIG_X86_MCE
if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN ||
boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR)
zx_apei_mce_report_pcie_error(sev, pcie_err);
#endif
}
bool arch_apei_report_zdi_error(guid_t *sec_type, struct cper_sec_proc_generic *zdi_err)
{
#ifdef CONFIG_X86_MCE
if ((boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR ||
boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) &&
(guid_equal(sec_type, &CPER_SEC_PROC_GENERIC))) {
zx_apei_mce_report_zdi_error(zdi_err);
return true;
}
#endif
return false;
}
int arch_apei_report_x86_error(struct cper_ia_proc_ctx *ctx_info, u64 lapic_id)
{
return apei_smca_report_x86_error(ctx_info, lapic_id);

View File

@ -398,19 +398,6 @@ static void early_init_intel(struct cpuinfo_x86 *c)
setup_clear_cpu_cap(X86_FEATURE_PGE);
}
if (c->cpuid_level >= 0x00000001) {
u32 eax, ebx, ecx, edx;
cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
/*
* If HTT (EDX[28]) is set EBX[16:23] contain the number of
* apicids which are reserved per package. Store the resulting
* shift value for the package management code.
*/
if (edx & (1U << 28))
c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff);
}
check_memory_type_self_snoop_errata(c);
/*

View File

@ -63,6 +63,173 @@ void apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err)
}
EXPORT_SYMBOL_GPL(apei_mce_report_mem_error);
void zx_apei_mce_report_mem_error(struct cper_sec_mem_err *mem_err)
{
struct mce m;
int apei_error = 0;
if (boot_cpu_data.x86 != 7 || boot_cpu_data.x86_model != 91)
return;
if (!(mem_err->validation_bits & CPER_MEM_VALID_PA))
return;
mce_setup(&m);
m.misc = 0;
m.misc = mem_err->module;
m.addr = mem_err->physical_addr;
if (mem_err->card == 0)
m.bank = 9;
else
m.bank = 10;
switch (mem_err->error_type) {
case 2:
m.status = 0x9c20004000010080;
break;
case 3:
m.status = 0xbe40000000020090;
apei_error = apei_write_mce(&m);
break;
case 8:
if (mem_err->requestor_id == 2) {
m.status = 0x98200040000400b0;
} else if (mem_err->requestor_id == 3) {
m.status = 0xba400000000600a0;
apei_error = apei_write_mce(&m);
} else if (mem_err->requestor_id == 4) {
m.status = 0x98200100000300b0;
} else if (mem_err->requestor_id == 5) {
m.status = 0xba000000000500b0;
apei_error = apei_write_mce(&m);
} else {
pr_info("Undefined Parity error\n");
}
break;
case 10:
if (mem_err->requestor_id == 6) {
m.status = 0xba400000000700a0;
apei_error = apei_write_mce(&m);
} else if (mem_err->requestor_id == 7) {
m.status = 0xba000000000800b0;
apei_error = apei_write_mce(&m);
} else {
pr_info("Undefined dvad error\n");
}
break;
case 13:
m.status = 0x9c200040000100c0;
break;
case 14:
m.status = 0xbd000000000200c0;
apei_error = apei_write_mce(&m);
break;
}
mce_log(&m);
}
EXPORT_SYMBOL_GPL(zx_apei_mce_report_mem_error);
void zx_apei_mce_report_pcie_error(int severity, struct cper_sec_pcie *pcie_err)
{
struct mce m;
int apei_error = 0;
if (boot_cpu_data.x86 != 7 || boot_cpu_data.x86_model != 91)
return;
mce_setup(&m);
m.addr = 0;
m.misc = 0;
m.misc |= (u64)pcie_err->device_id.segment << 32;
m.misc |= pcie_err->device_id.bus << 24;
m.misc |= pcie_err->device_id.device << 19;
m.misc |= pcie_err->device_id.function << 16;
m.bank = 6;
switch (severity) {
case 1:
m.status = 0x9820004000020e0b;
break;
case 2:
m.status = 0xba20000000010e0b;
break;
case 3:
m.status = 0xbd20000000000e0b;
apei_error = apei_write_mce(&m);
break;
default:
pr_info("Undefine pcie error\n");
break;
}
mce_log(&m);
}
EXPORT_SYMBOL_GPL(zx_apei_mce_report_pcie_error);
void zx_apei_mce_report_zdi_error(struct cper_sec_proc_generic *zdi_err)
{
struct mce m;
int apei_error = 0;
if (boot_cpu_data.x86 != 7 || boot_cpu_data.x86_model != 91)
return;
mce_setup(&m);
m.misc = 0;
m.misc |= (zdi_err->requestor_id & 0xff) << 19;
m.misc |= ((zdi_err->requestor_id & 0xff00) >> 8) >> 24;
m.bank = 5;
switch (zdi_err->responder_id) {
case 2:
m.status = 0xba00000000040e0f;
apei_error = apei_write_mce(&m);
break;
case 3:
m.status = 0xba00000000030e0f;
apei_error = apei_write_mce(&m);
break;
case 4:
m.status = 0xba00000000020e0f;
apei_error = apei_write_mce(&m);
break;
case 5:
m.status = 0xba00000000010e0f;
apei_error = apei_write_mce(&m);
break;
case 6:
m.status = 0x9820004000090e0f;
break;
case 7:
m.status = 0x9820004000080e0f;
break;
case 8:
m.status = 0x9820004000070e0f;
break;
case 9:
m.status = 0x9820004000060e0f;
break;
case 10:
m.status = 0x9820004000050e0f;
break;
case 11:
case 12:
case 13:
case 14:
case 15:
m.status = 0x98200040000b0e0f;
break;
case 16:
case 17:
case 18:
m.status = 0x98200040000c0e0f;
break;
default:
pr_info("Undefined ZDI Error\n");
break;
}
mce_log(&m);
}
EXPORT_SYMBOL_GPL(zx_apei_mce_report_zdi_error);
int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info, u64 lapic_id)
{
const u64 *i_mce = ((const u64 *) (ctx_info + 1));

View File

@ -1946,6 +1946,7 @@ static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
if (cfg->monarch_timeout < 0)
cfg->monarch_timeout = USEC_PER_SEC;
}
mca_cfg.bios_cmci_threshold = 1;
}
if (cfg->monarch_timeout < 0)
@ -2121,11 +2122,17 @@ static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
static __always_inline void exc_machine_check_user(struct pt_regs *regs)
{
irqentry_state_t irq_state;
irq_state = irqentry_nmi_enter(regs);
irqentry_enter_from_user_mode(regs);
do_machine_check(regs);
irqentry_exit_to_user_mode(regs);
irqentry_nmi_exit(regs, irq_state);
}
#ifdef CONFIG_X86_64

View File

@ -66,19 +66,6 @@ static void early_init_zhaoxin(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
}
if (c->cpuid_level >= 0x00000001) {
u32 eax, ebx, ecx, edx;
cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
/*
* If HTT (EDX[28]) is set EBX[16:23] contain the number of
* apicids which are reserved per package. Store the resulting
* shift value for the package management code.
*/
if (edx & (1U << 28))
c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff);
}
/*
* These CPUs declare support SSE4.2 instruction sets but
* having low performance CRC32C instruction implementation.

View File

@ -17,6 +17,7 @@
#include <linux/bcma/bcma.h>
#include <linux/bcma/bcma_regs.h>
#include <linux/platform_data/x86/apple.h>
#include <linux/dma-map-ops.h>
#include <drm/i915_drm.h>
#include <drm/i915_pciids.h>
#include <asm/pci-direct.h>
@ -28,6 +29,7 @@
#include <asm/gart.h>
#include <asm/irq_remapping.h>
#include <asm/early_ioremap.h>
#include <asm/dma-mapping.h>
static void __init fix_hypertransport_config(int num, int slot, int func)
{
@ -685,6 +687,20 @@ static void __init apple_airport_reset(int bus, int slot, int func)
early_iounmap(mmio, BCM4331_MMIO_SIZE);
}
bool is_zhaoxin_kh40000;
static void quirk_zhaoxin_dma_patch(int num, int slot, int func)
{
u8 revision;
revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID);
if (revision == 0x10) {
is_zhaoxin_kh40000 = true;
dma_ops = &kh40000_dma_direct_ops;
pr_info("zhaoxin direct dma patch enabled\n");
}
}
#define QFLAG_APPLY_ONCE 0x1
#define QFLAG_APPLIED 0x2
#define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED)
@ -728,6 +744,10 @@ static struct chipset early_qrk[] __initdata = {
PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
{ PCI_VENDOR_ID_BROADCOM, 0x4331,
PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset},
{ PCI_VENDOR_ID_ZHAOXIN, 0x1001, PCI_CLASS_BRIDGE_HOST,
PCI_BASE_CLASS_BRIDGE, QFLAG_APPLY_ONCE, quirk_zhaoxin_dma_patch },
{ PCI_VENDOR_ID_ZHAOXIN, 0x345B, PCI_CLASS_BRIDGE_HOST,
PCI_BASE_CLASS_BRIDGE, QFLAG_APPLY_ONCE, quirk_zhaoxin_dma_patch },
{}
};

View File

@ -804,6 +804,12 @@ static u64 read_hpet(struct clocksource *cs)
if (in_nmi())
return (u64)hpet_readl(HPET_COUNTER);
/*
* Read HPET directly if panic in progress.
*/
if (unlikely(atomic_read(&panic_cpu) != PANIC_CPU_INVALID))
return (u64)hpet_readl(HPET_COUNTER);
/*
* Read the current state of the lock and HPET value atomically.
*/

View File

@ -122,6 +122,7 @@ int sched_set_itmt_support(void)
return 0;
}
EXPORT_SYMBOL_GPL(sched_set_itmt_support);
/**
* sched_clear_itmt_support() - Revoke platform's support of ITMT
@ -181,3 +182,4 @@ void sched_set_itmt_core_prio(int prio, int cpu)
{
per_cpu(sched_core_priority, cpu) = prio;
}
EXPORT_SYMBOL_GPL(sched_set_itmt_core_prio);

View File

@ -0,0 +1,351 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/device.h>
#include <linux/dma-direct.h>
#include <linux/dma-map-ops.h>
#include <linux/dma-mapping.h>
#include <linux/gfp.h>
#include <linux/init.h>
#include <linux/iommu.h>
#include <linux/kstrtox.h>
#include <linux/pci.h>
#include <linux/pfn.h>
#include <linux/printk.h>
#include <linux/scatterlist.h>
#include <linux/types.h>
#include <asm/dma-mapping.h>
#include "../../../kernel/dma/direct.h"
/***
* usage:
* set "zhaoxin_patch_bitmask=<value>" in cmdline
* value description:
* bit 0: enable(1) node check or not(0). default 1
*/
enum {
ZHAOXIN_P2CW_NODE_CHECK = BIT(0),
ZHAOXIN_PATCH_CODE_MAX = ZHAOXIN_P2CW_NODE_CHECK,
};
#define ZHAOXIN_PATCH_CODE_DEFAULT ZHAOXIN_P2CW_NODE_CHECK
unsigned long zhaoxin_patch_code = ZHAOXIN_PATCH_CODE_DEFAULT;
static int __init zhaoxin_patch_code_setup(char *str)
{
int err = kstrtoul(str, 0, &zhaoxin_patch_code);
if (err || (zhaoxin_patch_code > ZHAOXIN_PATCH_CODE_MAX)) {
pr_err("cmdline 'zhaoxin_patch_bitmask=%s' inappropriate\n",
str);
return err;
}
if (ZHAOXIN_P2CW_NODE_CHECK | zhaoxin_patch_code)
pr_info("zhaoxin dma patch node check is enabled\n");
return 0;
}
__setup("zhaoxin_patch_bitmask=", zhaoxin_patch_code_setup);
static struct pci_dev *kh40000_get_pci_dev(struct device *dev)
{
if (dev_is_pci(dev))
return to_pci_dev(dev);
if (dev->parent)
return kh40000_get_pci_dev(dev->parent);
return NULL;
}
static void kh40000_sync_single_dma_for_cpu(struct device *dev, dma_addr_t paddr,
enum dma_data_direction dir, bool is_iommu)
{
u8 vid;
struct pci_dev *pci;
u64 dma_mask = *dev->dma_mask;
/* check direction */
if ((dir != DMA_FROM_DEVICE) && (dir != DMA_BIDIRECTIONAL))
return;
/* check dma capability */
if (dma_mask <= DMA_BIT_MASK(32))
return;
/* check device type */
pci = kh40000_get_pci_dev(dev);
if (pci == NULL)
return;
/* get real physical address */
if (is_iommu) {
struct iommu_domain *domain = iommu_get_dma_domain(dev);
paddr = iommu_iova_to_phys(domain, paddr);
if (!paddr)
return;
}
/* check node or not */
if ((zhaoxin_patch_code & ZHAOXIN_P2CW_NODE_CHECK)
&& pfn_to_nid(PFN_DOWN(paddr)) == dev_to_node(dev))
return;
/* flush data by one pci read cycle */
pci_read_config_byte(pci, PCI_VENDOR_ID, &vid);
}
/* zhaoxin kh-40000 direct dma ops */
static void *kh40000_dma_direct_alloc(struct device *dev, size_t size,
dma_addr_t *addr, gfp_t gfp, unsigned long attrs)
{
if (dev->coherent_dma_mask > DMA_BIT_MASK(32))
gfp |= __GFP_THISNODE;
return dma_direct_alloc(dev, size, addr, gfp, attrs);
}
static void kh40000_dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
kh40000_sync_single_dma_for_cpu(dev, addr, dir, 0);
dma_direct_unmap_page(dev, addr, size, dir, attrs);
}
static void kh40000_dma_direct_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sgl, int nents, enum dma_data_direction dir)
{
struct scatterlist *sg;
int i;
for_each_sg(sgl, sg, nents, i)
kh40000_sync_single_dma_for_cpu(dev, sg_dma_address(sg), dir, 0);
dma_direct_sync_sg_for_cpu(dev, sgl, nents, dir);
}
static void kh40000_dma_direct_sync_single_for_cpu(struct device *dev,
dma_addr_t addr, size_t size, enum dma_data_direction dir)
{
kh40000_sync_single_dma_for_cpu(dev, addr, dir, 0);
dma_direct_sync_single_for_cpu(dev, addr, size, dir);
}
static void kh40000_dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction dir, unsigned long attrs)
{
struct scatterlist *sg;
int i;
for_each_sg(sgl, sg, nents, i)
kh40000_sync_single_dma_for_cpu(dev, sg_dma_address(sg), dir, 0);
dma_direct_unmap_sg(dev, sgl, nents, dir, attrs);
}
static void kh40000_dma_direct_unmap_resource(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
kh40000_sync_single_dma_for_cpu(dev, addr, dir, 0);
}
const struct dma_map_ops kh40000_dma_direct_ops = {
.flags = DMA_F_PCI_P2PDMA_SUPPORTED,
.alloc = kh40000_dma_direct_alloc,
.sync_sg_for_cpu = kh40000_dma_direct_sync_sg_for_cpu,
.unmap_page = kh40000_dma_direct_unmap_page,
.sync_single_for_cpu = kh40000_dma_direct_sync_single_for_cpu,
.unmap_sg = kh40000_dma_direct_unmap_sg,
.unmap_resource = kh40000_dma_direct_unmap_resource,
.dma_supported = dma_direct_supported,
.free = dma_direct_free,
.alloc_pages = dma_direct_alloc_pages,
.free_pages = dma_direct_free_pages,
.sync_single_for_device = dma_direct_sync_single_for_device,
.sync_sg_for_device = dma_direct_sync_sg_for_device,
.get_required_mask = dma_direct_get_required_mask,
.max_mapping_size = dma_direct_max_mapping_size,
.mmap = dma_direct_mmap,
.get_sgtable = dma_direct_get_sgtable,
.map_page = dma_direct_map_page,
.map_sg = dma_direct_map_sg,
.map_resource = dma_direct_map_resource,
};
/* zhaoxin kh-40000 iommu dma ops */
static const struct dma_map_ops *iommu_dma_ops;
static void *kh40000_iommu_dma_alloc(struct device *dev, size_t size,
dma_addr_t *addr, gfp_t gfp, unsigned long attrs)
{
gfp |= __GFP_THISNODE;
return iommu_dma_ops->alloc(dev, size, addr, gfp, attrs);
}
static void kh40000_iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t handle, unsigned long attrs)
{
iommu_dma_ops->free(dev, size, cpu_addr, handle, attrs);
}
static struct page *kh40000_dma_common_alloc_pages(struct device *dev, size_t size,
dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
{
return iommu_dma_ops->alloc_pages(dev, size, dma_handle, dir, gfp);
}
static void kh40000_dma_common_free_pages(struct device *dev, size_t size, struct page *page,
dma_addr_t dma_handle, enum dma_data_direction dir)
{
iommu_dma_ops->free_pages(dev, size, page, dma_handle, dir);
}
static struct sg_table *kh40000_iommu_dma_alloc_noncontiguous(struct device *dev,
size_t size, enum dma_data_direction dir, gfp_t gfp,
unsigned long attrs)
{
return iommu_dma_ops->alloc_noncontiguous(dev, size, dir, gfp, attrs);
}
static void kh40000_iommu_dma_free_noncontiguous(struct device *dev, size_t size,
struct sg_table *sgt, enum dma_data_direction dir)
{
return iommu_dma_ops->free_noncontiguous(dev, size, sgt, dir);
}
static int kh40000_iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
return iommu_dma_ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
}
static void kh40000_iommu_dma_unmap_page(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
kh40000_sync_single_dma_for_cpu(dev, addr, dir, 1);
iommu_dma_ops->unmap_page(dev, addr, size, dir, attrs);
}
static int kh40000_iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
return iommu_dma_ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs);
}
static dma_addr_t kh40000_iommu_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
return iommu_dma_ops->map_page(dev, page, offset, size, dir, attrs);
}
static int kh40000_iommu_dma_map_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction dir, unsigned long attrs)
{
return iommu_dma_ops->map_sg(dev, sgl, nents, dir, attrs);
}
static void kh40000_iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir, unsigned long attrs)
{
struct scatterlist *sg;
int i;
for_each_sg(sgl, sg, nelems, i)
kh40000_sync_single_dma_for_cpu(dev, sg_dma_address(sg), dir, 1);
iommu_dma_ops->unmap_sg(dev, sgl, nelems, dir, attrs);
}
static void kh40000_iommu_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t addr, size_t size, enum dma_data_direction dir)
{
kh40000_sync_single_dma_for_cpu(dev, addr, dir, 1);
iommu_dma_ops->sync_single_for_cpu(dev, addr, size, dir);
}
static void kh40000_iommu_dma_sync_single_for_device(struct device *dev,
dma_addr_t addr, size_t size, enum dma_data_direction dir)
{
iommu_dma_ops->sync_single_for_device(dev, addr, size, dir);
}
static void kh40000_iommu_dma_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sgl, int nelems,
enum dma_data_direction dir)
{
struct scatterlist *sg;
int i;
for_each_sg(sgl, sg, nelems, i)
kh40000_sync_single_dma_for_cpu(dev, sg_dma_address(sg), dir, 1);
iommu_dma_ops->sync_sg_for_cpu(dev, sgl, nelems, dir);
}
static void kh40000_iommu_dma_sync_sg_for_device(struct device *dev,
struct scatterlist *sgl, int nelems,
enum dma_data_direction dir)
{
iommu_dma_ops->sync_sg_for_device(dev, sgl, nelems, dir);
}
static dma_addr_t kh40000_iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
return iommu_dma_ops->map_resource(dev, phys, size, dir, attrs);
}
static void kh40000_iommu_dma_unmap_resource(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
kh40000_sync_single_dma_for_cpu(dev, addr, dir, 1);
iommu_dma_ops->unmap_resource(dev, addr, size, dir, attrs);
}
static unsigned long kh40000_iommu_dma_get_merge_boundary(struct device *dev)
{
return iommu_dma_ops->get_merge_boundary(dev);
}
static size_t kh40000_iommu_dma_opt_mapping_size(void)
{
return iommu_dma_ops->opt_mapping_size();
}
const struct dma_map_ops kh40000_dma_iommu_ops = {
.flags = DMA_F_PCI_P2PDMA_SUPPORTED,
.alloc = kh40000_iommu_dma_alloc,
.free = kh40000_iommu_dma_free,
.unmap_page = kh40000_iommu_dma_unmap_page,
.alloc_pages = kh40000_dma_common_alloc_pages,
.free_pages = kh40000_dma_common_free_pages,
.alloc_noncontiguous = kh40000_iommu_dma_alloc_noncontiguous,
.free_noncontiguous = kh40000_iommu_dma_free_noncontiguous,
.mmap = kh40000_iommu_dma_mmap,
.get_sgtable = kh40000_iommu_dma_get_sgtable,
.map_page = kh40000_iommu_dma_map_page,
.map_sg = kh40000_iommu_dma_map_sg,
.unmap_sg = kh40000_iommu_dma_unmap_sg,
.sync_single_for_cpu = kh40000_iommu_dma_sync_single_for_cpu,
.sync_single_for_device = kh40000_iommu_dma_sync_single_for_device,
.sync_sg_for_cpu = kh40000_iommu_dma_sync_sg_for_cpu,
.sync_sg_for_device = kh40000_iommu_dma_sync_sg_for_device,
.map_resource = kh40000_iommu_dma_map_resource,
.unmap_resource = kh40000_iommu_dma_unmap_resource,
.get_merge_boundary = kh40000_iommu_dma_get_merge_boundary,
.opt_mapping_size = kh40000_iommu_dma_opt_mapping_size,
};
void kh40000_set_iommu_dma_ops(struct device *dev)
{
if (dev->dma_ops) {
iommu_dma_ops = dev->dma_ops;
set_dma_ops(dev, &kh40000_dma_iommu_ops);
pr_info_once("zhaoxin iommu dma patch enabled\n");
}
}

View File

@ -773,6 +773,17 @@ void __weak arch_apei_report_mem_error(int sev,
}
EXPORT_SYMBOL_GPL(arch_apei_report_mem_error);
void __weak arch_apei_report_pcie_error(int sev, struct cper_sec_pcie *pcie_err)
{
}
EXPORT_SYMBOL_GPL(arch_apei_report_pcie_error);
bool __weak arch_apei_report_zdi_error(guid_t *sec_type, struct cper_sec_proc_generic *zdi_err)
{
return false;
}
EXPORT_SYMBOL_GPL(arch_apei_report_zdi_error);
int apei_osc_setup(void)
{
static u8 whea_uuid_str[] = "ed855e0c-6c90-47bf-a62a-26de0fc5ad5c";

View File

@ -703,6 +703,9 @@ static bool ghes_do_proc(struct ghes *ghes,
queued = ghes_handle_memory_failure(gdata, sev, sync);
}
else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
struct cper_sec_pcie *pcie_err = acpi_hest_get_payload(gdata);
arch_apei_report_pcie_error(sec_sev, pcie_err);
ghes_handle_aer(gdata);
}
else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
@ -710,12 +713,15 @@ static bool ghes_do_proc(struct ghes *ghes,
} else {
void *err = acpi_hest_get_payload(gdata);
if (!arch_apei_report_zdi_error(sec_type,
(struct cper_sec_proc_generic *)err)) {
ghes_defer_non_standard_event(gdata, sev);
log_non_standard_event(sec_type, fru_id, fru_text,
sec_sev, err,
gdata->error_data_length);
}
}
}
return queued;
}
@ -1091,6 +1097,8 @@ static int ghes_in_nmi_queue_one_entry(struct ghes *ghes,
u32 len, node_len;
u64 buf_paddr;
int sev, rc;
struct acpi_hest_generic_data *gdata;
guid_t *sec_type;
if (!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG))
return -EOPNOTSUPP;
@ -1126,6 +1134,23 @@ static int ghes_in_nmi_queue_one_entry(struct ghes *ghes,
sev = ghes_severity(estatus->error_severity);
if (sev >= GHES_SEV_PANIC) {
apei_estatus_for_each_section(estatus, gdata) {
sec_type = (guid_t *)gdata->section_type;
if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
arch_apei_report_mem_error(sev, mem_err);
} else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
struct cper_sec_pcie *pcie_err = acpi_hest_get_payload(gdata);
arch_apei_report_pcie_error(sev, pcie_err);
} else if (guid_equal(sec_type, &CPER_SEC_PROC_GENERIC)) {
struct cper_sec_proc_generic *zdi_err =
acpi_hest_get_payload(gdata);
arch_apei_report_zdi_error(sec_type, zdi_err);
}
}
ghes_print_queued_estatus();
__ghes_panic(ghes, estatus, buf_paddr, fixmap_idx);
}

View File

@ -115,6 +115,16 @@ config SATA_AHCI
If unsure, say N.
config AHCI_ZHAOXIN_SGPIO
tristate "zhaoxin AHCI SGPIO support"
depends on SATA_AHCI
default y
help
This option enables support for Zhaoxin AHCI SGPIO.
Add support SGPIO mode and SGPIO GP mode.
If unsure, say N.
config SATA_MOBILE_LPM_POLICY
int "Default SATA Link Power Management policy for low power chipsets"
range 0 4

View File

@ -27,6 +27,7 @@ obj-$(CONFIG_AHCI_ST) += ahci_st.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_TEGRA) += ahci_tegra.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_XGENE) += ahci_xgene.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_QORIQ) += ahci_qoriq.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_ZHAOXIN_SGPIO) += ahci_zhaoxin_sgpio.o
# SFF w/ custom DMA
obj-$(CONFIG_PDC_ADMA) += pdc_adma.o

View File

@ -0,0 +1,706 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* ahci_zhaoxin_sgpio.c - Driver for Zhaoxin sgpio
*/
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/module.h>
#include <linux/nospec.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <linux/libata.h>
#include <linux/pci.h>
#include "ahci.h"
#include "libata.h"
#include "ahci_zhaoxin_sgpio.h"
static LIST_HEAD(sgpio_zhaoxin_list);
static unsigned int zhaoxin_em_type __read_mostly = AHCI_EM_MSG_LED_MODE; /*LED protocol*/
module_param(zhaoxin_em_type, int, 0644);
MODULE_PARM_DESC(zhaoxin_em_type,
"AHCI Enclosure Management Message type control (1 = led on, 2 = sgpio on,3 = sgpio gp on)");
int ahci_wait_em_reset(struct sgpio_zhaoxin *sgpio_zhaoxin, u32 retry)
{
void __iomem *mmio = sgpio_zhaoxin->mmio;
u32 em_ctl;
if (!sgpio_zhaoxin || retry == 0) {
pr_err("In ahci wait em reset, invalid param\n");
return -EINVAL;
}
while (retry--) { /*EM_CTL needs reset at least 64ms*/
em_ctl = readl(mmio + HOST_EM_CTL);
if (em_ctl & EM_CTL_RST)
usleep_range(10000, 20000); /*EM_CTL still in reset, usleep 10ms*/
else
break;
if (!retry)
pr_err("Wait for EM_CTL reset, time out\n");
}
return 0;
}
void ahci_zhaoxin_set_em_sgpio(struct sgpio_zhaoxin *sgpio_zhaoxin)
{
void __iomem *mmio = sgpio_zhaoxin->mmio;
void __iomem *em_mmio = mmio + SGPIO_OFFSET;
u32 read;
sgpio_zhaoxin->sgpio_reg.cfg_0.enable = 1;
sgpio_zhaoxin->sgpio_reg.cfg_1.blink_gen_a = 0x7;
sgpio_zhaoxin->sgpio_reg.cfg_1.blink_gen_b = 0x3;
sgpio_zhaoxin->sgpio_reg.cfg_1.blink_gen_c = 0x0;
sgpio_zhaoxin->sgpio_reg.cfg_1.stretch_act_on = 0;
sgpio_zhaoxin->sgpio_reg.cfg_1.stretch_act_off = 0;
sgpio_zhaoxin->sgpio_reg.cfg_1.max_act_on = 2;
sgpio_zhaoxin->sgpio_reg.cfg_1.force_act_off = 1;
sgpio_zhaoxin->sgpio_reg.gp_transmit_cfg.sload = 0xf;
sgpio_zhaoxin->sgpio_reg.gp_transmit_cfg.count = 0x0;
sgpio_zhaoxin->sgpio_reg.transmit_0.sgpio_tx_0 = 0;
sgpio_zhaoxin->sgpio_reg.transmit_1.sgpio_tx_1 = 0;
sgpio_zhaoxin->sgpio_reg.gp_transmit_reg.sgpio_tx_gp = 0;
sgpio_zhaoxin->sgpio_reg.receive_reg.sgpio_rx = 0x07070707;
sgpio_zhaoxin->sgpio_reg.gp_receive_reg.sgpio_rx_gp = 0;
/*Setup SGPIO type*/
read = readl(mmio + sgpio_zhaoxin->em_loc);
read = read | SGPIO_MESSAGE_HEAD; /*LED register MSG_HEAD, select SGPIO*/
writel(read, mmio + sgpio_zhaoxin->em_loc);
/*Setup gp mode*/
writel(sgpio_zhaoxin->sgpio_reg.gp_transmit_cfg.sgpio_tx_gp_cfg, em_mmio + 0x38);
/*Initial SGPIO CFG1*/
writel(sgpio_zhaoxin->sgpio_reg.cfg_1.sgpio_cfg_1, em_mmio + 0x4);
/*Initial SGPIO CFG0*/
read = readl(em_mmio);
read |= sgpio_zhaoxin->sgpio_reg.cfg_0.sgpio_cfg_0;
writel(read, em_mmio);
}
void ahci_zhaoxin_set_em_sgpio_gpmode(struct sgpio_zhaoxin *sgpio_zhaoxin)
{
void __iomem *mmio = sgpio_zhaoxin->mmio;
void __iomem *em_mmio = mmio + SGPIO_OFFSET;
u32 read;
sgpio_zhaoxin->sgpio_reg.cfg_0.enable = 1;
sgpio_zhaoxin->sgpio_reg.gp_transmit_cfg.sload = 0xf;
sgpio_zhaoxin->sgpio_reg.gp_transmit_cfg.count = 0xff;
sgpio_zhaoxin->sgpio_reg.transmit_0.sgpio_tx_0 = 0;
sgpio_zhaoxin->sgpio_reg.transmit_1.sgpio_tx_1 = 0;
sgpio_zhaoxin->sgpio_reg.gp_transmit_reg.sgpio_tx_gp = 0;
sgpio_zhaoxin->sgpio_reg.receive_reg.sgpio_rx = 0;
sgpio_zhaoxin->sgpio_reg.gp_receive_reg.sgpio_rx_gp = 0xff0f0000;
/*Setup SGPIO type*/
read = readl(mmio + sgpio_zhaoxin->em_loc);
read |= SGPIO_MESSAGE_HEAD;
writel(read, mmio + sgpio_zhaoxin->em_loc);
/*Setup gp mode*/
writel(sgpio_zhaoxin->sgpio_reg.gp_transmit_cfg.sgpio_tx_gp_cfg, em_mmio + 0x38);
/*Enable SGPIO*/
writel(sgpio_zhaoxin->sgpio_reg.cfg_0.sgpio_cfg_0, em_mmio);
}
static ssize_t ahci_em_type_sys_show(struct sgpio_zhaoxin *sgpio_zhaoxin, char *buf)
{
return sprintf(buf, "0x%x\n", zhaoxin_em_type);
}
static ssize_t ahci_em_type_sys_store(struct sgpio_zhaoxin *sgpio_zhaoxin, const char *buf,
size_t count)
{
int code = 0;
int rc = 0;
if (kstrtouint(buf, 0, &code))
return count;
if (code == AHCI_EM_MSG_LED_MODE) {
zhaoxin_em_type = code;
} else if (code == AHCI_EM_MSG_SGPIO_MODE) {
rc = ahci_wait_em_reset(sgpio_zhaoxin, 7); /*wait at least 64ms*/
if (rc < 0) {
pr_err("ahci wait em reset failed!\n");
return rc;
}
zhaoxin_em_type = code;
ahci_zhaoxin_set_em_sgpio(sgpio_zhaoxin);
} else if (code == AHCI_EM_MSG_SGPIO_GP_MODE) {
rc = ahci_wait_em_reset(sgpio_zhaoxin, 7); /*wait at least 64ms*/
if (rc < 0) {
pr_err("ahci wait em reset failed!\n");
return rc;
}
zhaoxin_em_type = code;
ahci_zhaoxin_set_em_sgpio_gpmode(sgpio_zhaoxin);
} else
pr_err("Incorrect value:1 = LED on, 2 = SGPIO normal on, 3 = SGPIO GP on)\n");
return count;
}
static ssize_t ahci_transmit_sgpio_message(unsigned long port_num,
struct sgpio_zhaoxin *sgpio_zhaoxin, u16 state,
ssize_t size)
{
void __iomem *mmio = sgpio_zhaoxin->mmio;
void __iomem *em_mmio = mmio + SGPIO_OFFSET;
unsigned long flags;
if (!(sgpio_zhaoxin->em_msg_type & EM_MSG_TYPE_SGPIO))
return -EINVAL;
spin_lock_irqsave(&sgpio_zhaoxin->wr_lock, flags);
switch (port_num) {
case 0:
writel(SGPIO_MESSAGE_HEAD, mmio + sgpio_zhaoxin->em_loc);
writew(state, em_mmio + 0x22);
sgpio_zhaoxin->sgpio_reg.transmit_0.sgpio_tx_0 &= 0x0000ffff;
sgpio_zhaoxin->sgpio_reg.transmit_0.drive_0_active = (state & 0x3c0) >> 6;
sgpio_zhaoxin->sgpio_reg.transmit_0.drive_0_locate = (state & 0x38) >> 3;
sgpio_zhaoxin->sgpio_reg.transmit_0.drive_0_error = state & 0x7;
break;
case 1:
writel(SGPIO_MESSAGE_HEAD, mmio + sgpio_zhaoxin->em_loc);
writew(state, em_mmio + 0x20);
sgpio_zhaoxin->sgpio_reg.transmit_0.sgpio_tx_0 &= 0xffff0000;
sgpio_zhaoxin->sgpio_reg.transmit_0.drive_1_active = (state & 0x3c0) >> 6;
sgpio_zhaoxin->sgpio_reg.transmit_0.drive_1_locate = (state & 0x38) >> 3;
sgpio_zhaoxin->sgpio_reg.transmit_0.drive_1_error = state & 0x7;
break;
case 2:
writel(SGPIO_MESSAGE_HEAD, mmio + sgpio_zhaoxin->em_loc);
writew(state, em_mmio + 0x26);
sgpio_zhaoxin->sgpio_reg.transmit_1.sgpio_tx_1 &= 0x0000ffff;
sgpio_zhaoxin->sgpio_reg.transmit_1.drive_2_active = (state & 0x3c0) >> 6;
sgpio_zhaoxin->sgpio_reg.transmit_1.drive_2_locate = (state & 0x38) >> 3;
sgpio_zhaoxin->sgpio_reg.transmit_1.drive_2_error = state & 0x7;
break;
case 3:
writel(SGPIO_MESSAGE_HEAD, mmio + sgpio_zhaoxin->em_loc);
writew(state, em_mmio + 0x24);
sgpio_zhaoxin->sgpio_reg.transmit_1.sgpio_tx_1 &= 0xffff0000;
sgpio_zhaoxin->sgpio_reg.transmit_1.drive_3_active = (state & 0x3c0) >> 6;
sgpio_zhaoxin->sgpio_reg.transmit_1.drive_3_locate = (state & 0x38) >> 3;
sgpio_zhaoxin->sgpio_reg.transmit_1.drive_3_error = state & 0x7;
break;
default:
pr_err("Unsupported port number in this controller\n");
break;
}
spin_unlock_irqrestore(&sgpio_zhaoxin->wr_lock, flags);
return size;
}
static ssize_t ahci_transmit_sgpio_indicator(unsigned long port_num,
struct sgpio_zhaoxin *sgpio_zhaoxin,
u8 indicator_code, enum SGPIO_INDICATOR type,
ssize_t size)
{
void __iomem *mmio = sgpio_zhaoxin->mmio;
void __iomem *em_mmio = mmio + SGPIO_OFFSET;
u16 state;
if (!(sgpio_zhaoxin->em_msg_type & EM_MSG_TYPE_SGPIO))
return -EINVAL;
if (get_ahci_em_messages() && (zhaoxin_em_type != AHCI_EM_MSG_SGPIO_MODE)) {
pr_err("Current setting not SGPIO normal mode, quit\n");
return -EINVAL;
}
switch (port_num) {
case 0:
state = readw(em_mmio + 0x22);
break;
case 1:
state = readw(em_mmio + 0x20);
break;
case 2:
state = readw(em_mmio + 0x26);
break;
case 3:
state = readw(em_mmio + 0x24);
break;
default:
return -EINVAL;
}
if (type == SGPIO_ACTIVITY) {
state &= 0xfc3f;
state |= (indicator_code&0xf) << 6;
} else if (type == SGPIO_LOCATE) {
state &= 0xffc7;
state |= (indicator_code&0x7) << 3;
} else if (type == SGPIO_ERROR) {
state &= 0xfff8;
state |= indicator_code & 0x7;
} else {
return -EINVAL;
}
return ahci_transmit_sgpio_message(port_num, sgpio_zhaoxin, state, size);
}
static ssize_t ahci_transmit_sgpio_indicator_gp(unsigned long port_num,
struct sgpio_zhaoxin *sgpio_zhaoxin,
u8 indicator_code, enum SGPIO_INDICATOR type,
ssize_t size)
{
void __iomem *mmio = sgpio_zhaoxin->mmio;
void __iomem *em_mmio = mmio + SGPIO_OFFSET;
union SGPIO_TX_GP state;
unsigned long flags;
if (!(sgpio_zhaoxin->em_msg_type & EM_MSG_TYPE_SGPIO))
return -EINVAL;
if (get_ahci_em_messages() && (zhaoxin_em_type != AHCI_EM_MSG_SGPIO_GP_MODE)) {
pr_err("Current setting not SGPIO_GP mode, quit\n");
return -EINVAL;
}
spin_lock_irqsave(&sgpio_zhaoxin->wr_lock, flags);
state.sgpio_tx_gp = readl(em_mmio + 0x3c);
switch (port_num) {
case 0:
if (type == SGPIO_ACTIVITY)
state.D00 = indicator_code & 0x1;
else if (type == SGPIO_LOCATE)
state.D01 = indicator_code & 0x1;
else if (type == SGPIO_ERROR)
state.D02 = indicator_code & 0x1;
break;
case 1:
if (type == SGPIO_ACTIVITY)
state.D10 = indicator_code & 0x1;
else if (type == SGPIO_LOCATE)
state.D11 = indicator_code & 0x1;
else if (type == SGPIO_ERROR)
state.D12 = indicator_code & 0x1;
break;
case 2:
if (type == SGPIO_ACTIVITY)
state.D20 = indicator_code & 0x1;
else if (type == SGPIO_LOCATE)
state.D21 = indicator_code & 0x1;
else if (type == SGPIO_ERROR)
state.D22 = indicator_code & 0x1;
break;
case 3:
if (type == SGPIO_ACTIVITY)
state.D30 = indicator_code & 0x1;
else if (type == SGPIO_LOCATE)
state.D31 = indicator_code & 0x1;
else if (type == SGPIO_ERROR)
state.D32 = indicator_code & 0x1;
break;
default:
return -EINVAL;
}
writel(SGPIO_MESSAGE_HEAD, mmio + sgpio_zhaoxin->em_loc);
writel(state.sgpio_tx_gp, em_mmio + 0x3c);
sgpio_zhaoxin->sgpio_reg.gp_transmit_reg.sgpio_tx_gp = state.sgpio_tx_gp;
spin_unlock_irqrestore(&sgpio_zhaoxin->wr_lock, flags);
return size;
}
static ssize_t sgpio_activity_store(struct sgpio_zhaoxin *sgpio_zhaoxin, const char *buf,
size_t count)
{
unsigned long val = 0;
unsigned long port_num = 0;
unsigned long code = 0;
if (kstrtoul(buf, 0, &val))
return count;
port_num = val & 0xf;
code = val >> 4;
if (sgpio_zhaoxin->em_msg_type & EM_MSG_TYPE_SGPIO) {
switch (code) {
case 0x0:
ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin,
ACTIVITY_DISABLE, SGPIO_ACTIVITY, 1);
break;
case 0x1:
ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin,
ACTIVITY_ENABLE, SGPIO_ACTIVITY, 1);
break;
case 0x2:
ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin,
ACTIVITY_GA_FON, SGPIO_ACTIVITY, 1);
break;
case 0x3:
ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin,
ACTIVITY_GA_FOFF, SGPIO_ACTIVITY, 1);
break;
case 0x4:
ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin,
ACTIVITY_BRIEF_EN_EOF, SGPIO_ACTIVITY, 1);
break;
case 0x5:
ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin,
ACTIVITY_BRIEF_EN_SOF, SGPIO_ACTIVITY, 1);
break;
case 0x6:
ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin,
ACTIVITY_GB_FON, SGPIO_ACTIVITY, 1);
break;
case 0x7:
ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin,
ACTIVITY_GB_FOFF, SGPIO_ACTIVITY, 1);
break;
case 0x8:
ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin,
ACTIVITY_GC_FON, SGPIO_ACTIVITY, 1);
break;
case 0x9:
ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin,
ACTIVITY_GC_FOFF, SGPIO_ACTIVITY, 1);
break;
case 0x10:
ahci_transmit_sgpio_indicator_gp(port_num, sgpio_zhaoxin,
GP_OFF, SGPIO_ACTIVITY, 1);
break;
case 0x11:
ahci_transmit_sgpio_indicator_gp(port_num, sgpio_zhaoxin,
GP_ON, SGPIO_ACTIVITY, 1);
break;
default:
pr_err("Unsupported command for activity indicator, cmd:0x%lx\n", val);
break;
}
return count;
}
return -EINVAL;
}
static ssize_t sgpio_locate_store(struct sgpio_zhaoxin *sgpio_zhaoxin, const char *buf,
size_t count)
{
unsigned long val = 0;
unsigned long port_num = 0;
unsigned long code = 0;
if (kstrtoul(buf, 0, &val))
return count;
port_num = val & 0xf;
code = val >> 4;
if (sgpio_zhaoxin->em_msg_type & EM_MSG_TYPE_SGPIO) {
switch (code) {
case 0x0:
ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin,
LOCATE_ERROR_DISABLE, SGPIO_LOCATE, 1);
break;
case 0x1:
ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin,
LOCATE_ERROR_ENABLE, SGPIO_LOCATE, 1);
break;
case 0x2:
ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin,
LOCATE_ERROR_GA_FON, SGPIO_LOCATE, 1);
break;
case 0x3:
ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin,
LOCATE_ERROR_GA_FOFF, SGPIO_LOCATE, 1);
break;
case 0x4:
ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin,
LOCATE_ERROR_GB_FON, SGPIO_LOCATE, 1);
break;
case 0x5:
ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin,
LOCATE_ERROR_GB_FOFF, SGPIO_LOCATE, 1);
break;
case 0x6:
ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin,
LOCATE_ERROR_GC_FON, SGPIO_LOCATE, 1);
break;
case 0x7:
ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin,
LOCATE_ERROR_GC_FOFF, SGPIO_LOCATE, 1);
break;
case 0x10:
ahci_transmit_sgpio_indicator_gp(port_num, sgpio_zhaoxin,
GP_OFF, SGPIO_LOCATE, 1);
break;
case 0x11:
ahci_transmit_sgpio_indicator_gp(port_num, sgpio_zhaoxin, GP_ON,
SGPIO_LOCATE, 1);
break;
default:
pr_err("Unsupported command for locate indicator, cmd:0x%lx\n", val);
break;
}
return count;
}
return -EINVAL;
}
static ssize_t sgpio_error_store(struct sgpio_zhaoxin *sgpio_zhaoxin, const char *buf, size_t count)
{
unsigned long val = 0;
unsigned long port_num = 0;
unsigned long code = 0;
if (kstrtoul(buf, 0, &val))
return count;
port_num = val & 0xf;
code = val >> 4;
if (sgpio_zhaoxin->em_msg_type & EM_MSG_TYPE_SGPIO) {
switch (code) {
case 0x0:
ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin,
LOCATE_ERROR_DISABLE, SGPIO_ERROR, 1);
break;
case 0x1:
ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin,
LOCATE_ERROR_ENABLE, SGPIO_ERROR, 1);
break;
case 0x2:
ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin,
LOCATE_ERROR_GA_FON, SGPIO_ERROR, 1);
break;
case 0x3:
ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin,
LOCATE_ERROR_GA_FOFF, SGPIO_ERROR, 1);
break;
case 0x4:
ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin,
LOCATE_ERROR_GB_FON, SGPIO_ERROR, 1);
break;
case 0x5:
ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin,
LOCATE_ERROR_GB_FOFF, SGPIO_ERROR, 1);
break;
case 0x6:
ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin,
LOCATE_ERROR_GC_FON, SGPIO_ERROR, 1);
break;
case 0x7:
ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin,
LOCATE_ERROR_GC_FOFF, SGPIO_ERROR, 1);
break;
case 0x10:
ahci_transmit_sgpio_indicator_gp(port_num, sgpio_zhaoxin,
GP_OFF, SGPIO_ERROR, 1);
break;
case 0x11:
ahci_transmit_sgpio_indicator_gp(port_num, sgpio_zhaoxin,
GP_ON, SGPIO_ERROR, 1);
break;
default:
pr_err("Unsupport command for error indicator, cmd:0x%lx\n", val);
break;
}
return count;
}
return -EINVAL;
}
static struct sgpio_zhaoxin_sysfs_attr dev_attr_ahci_em_type_sys =
__ATTR(ahci_em_type_sys, 0644, ahci_em_type_sys_show,
ahci_em_type_sys_store);
static struct sgpio_zhaoxin_sysfs_attr dev_attr_sgpio_activity =
__ATTR(sgpio_activity, 0200, NULL, sgpio_activity_store);
static struct sgpio_zhaoxin_sysfs_attr dev_attr_sgpio_locate =
__ATTR(sgpio_locate, 0200, NULL, sgpio_locate_store);
static struct sgpio_zhaoxin_sysfs_attr dev_attr_sgpio_error =
__ATTR(sgpio_error, 0200, NULL, sgpio_error_store);
struct attribute *sgpio_attrs[] = {
&dev_attr_ahci_em_type_sys.attr,
&dev_attr_sgpio_activity.attr,
&dev_attr_sgpio_locate.attr,
&dev_attr_sgpio_error.attr,
NULL
};
static const struct attribute_group sgpio_attrs_group = {
.attrs = sgpio_attrs
};
const struct attribute_group *sgpio_groups[] = {
&sgpio_attrs_group,
NULL
};
static ssize_t sgpio_zhaoxin_attr_show(struct kobject *kobj, struct attribute *attr, char *buf)
{
struct sgpio_zhaoxin_sysfs_attr *sgpio_zhaoxin_sysfs_attr = to_sgpio_attr(attr);
struct sgpio_zhaoxin *sgpio_zhaoxin = to_sgpio_obj(kobj);
if (!sgpio_zhaoxin_sysfs_attr->show)
return -EIO;
return sgpio_zhaoxin_sysfs_attr->show(sgpio_zhaoxin, buf);
}
static ssize_t sgpio_zhaoxin_attr_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t len)
{
struct sgpio_zhaoxin_sysfs_attr *sgpio_zhaoxin_sysfs_attr = to_sgpio_attr(attr);
struct sgpio_zhaoxin *sgpio_zhaoxin = to_sgpio_obj(kobj);
if (!sgpio_zhaoxin_sysfs_attr->store)
return -EIO;
return sgpio_zhaoxin_sysfs_attr->store(sgpio_zhaoxin, buf, len);
}
const struct sysfs_ops sgpio_zhaoxin_sysfs_ops = {
.show = sgpio_zhaoxin_attr_show,
.store = sgpio_zhaoxin_attr_store,
};
const struct kobj_type sgpio_zhaoxin_ktype = {
.sysfs_ops = &sgpio_zhaoxin_sysfs_ops,
.default_groups = sgpio_groups,
};
void set_em_messages(struct sgpio_zhaoxin *sgpio_zhaoxin)
{
void __iomem *mmio = sgpio_zhaoxin->mmio;
u32 em_loc = readl(mmio + HOST_EM_LOC);
u32 em_ctl = readl(mmio + HOST_EM_CTL);
u8 messages;
if (!get_ahci_em_messages())
return;
messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
if (messages) {
/* store em_loc */
sgpio_zhaoxin->em_loc = ((em_loc >> 16) * 4);
sgpio_zhaoxin->em_buf_sz = ((em_loc & 0xff) * 4);
sgpio_zhaoxin->em_msg_type = messages;
}
}
int add_sgpio_zhaoxin(void)
{
struct pci_dev *pdev_cur = pci_get_device(PCI_VENDOR_ID_ZHAOXIN, 0x9083, NULL);
struct pci_dev *pdev_next = pdev_cur;
struct sgpio_zhaoxin *sgpio_zhaoxin;
int ret = 0;
if (!get_ahci_em_messages())
return 0;
while (pdev_next) {
pdev_next = pci_get_device(PCI_VENDOR_ID_ZHAOXIN, 0x9083, pdev_cur);
WARN_ON(MAX_TEST_RESULT_LEN <= 0);
sgpio_zhaoxin = (struct sgpio_zhaoxin *)get_zeroed_page(GFP_KERNEL);
if (!sgpio_zhaoxin)
return -ENOMEM;
list_add(&sgpio_zhaoxin->list, &sgpio_zhaoxin_list);
ret = kobject_init_and_add(&sgpio_zhaoxin->kobj, &sgpio_zhaoxin_ktype,
&(&pdev_cur->dev)->kobj, "zx_sgpio");
if (ret) {
kobject_put(&sgpio_zhaoxin->kobj);
return -1;
}
kobject_uevent(&sgpio_zhaoxin->kobj, KOBJ_ADD);
spin_lock_init(&sgpio_zhaoxin->wr_lock);
sgpio_zhaoxin->kobj_valid = 1;
sgpio_zhaoxin->mmio = pcim_iomap_table(pdev_cur)[5];
set_em_messages(sgpio_zhaoxin);
ret = ahci_wait_em_reset(sgpio_zhaoxin, 7); /*wait at least 64ms*/
if (ret < 0) {
pr_err("ahci wait em reset failed!\n");
return ret;
}
sgpio_zhaoxin->kobj_valid = 1;
if (zhaoxin_em_type == AHCI_EM_MSG_SGPIO_GP_MODE)
ahci_zhaoxin_set_em_sgpio_gpmode(sgpio_zhaoxin);
else if (zhaoxin_em_type == AHCI_EM_MSG_SGPIO_MODE)
ahci_zhaoxin_set_em_sgpio(sgpio_zhaoxin);
pdev_cur = pdev_next;
}
return 0;
}
void remove_sgpio_zhaoxin(void)
{
struct sgpio_zhaoxin *cur = NULL, *next = NULL;
if (!get_ahci_em_messages())
return;
list_for_each_entry_safe(cur, next, &sgpio_zhaoxin_list, list) {
list_del(&cur->list);
if (cur->kobj_valid)
kobject_put(&cur->kobj);
free_page((unsigned long)cur);
if (!next)
break;
}
}
static int __init zhaoxin_sgpio_init(void)
{
return add_sgpio_zhaoxin();
}
static void __exit zhaoxin_sgpio_exit(void)
{
remove_sgpio_zhaoxin();
}
late_initcall(zhaoxin_sgpio_init);
module_exit(zhaoxin_sgpio_exit);
MODULE_DESCRIPTION("Zhaoxin SGPIO driver");
MODULE_AUTHOR("XanderChen");
MODULE_LICENSE("GPL");

View File

@ -0,0 +1,221 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ACHI_ZHAOXIN_SGPIO_H
#define _ACHI_ZHAOXIN_SGPIO_H
#define SGPIO_OFFSET 0x580
#define SGPIO_MESSAGE_HEAD 0x3000000
#define ACTIVITY_DISABLE 0x0
#define ACTIVITY_ENABLE 0x1
#define ACTIVITY_GA_FON 0x2
#define ACTIVITY_GA_FOFF 0x3
#define ACTIVITY_BRIEF_EN_EOF 0x4
#define ACTIVITY_BRIEF_EN_SOF 0x5
#define ACTIVITY_GB_FON 0x6
#define ACTIVITY_GB_FOFF 0x7
#define ACTIVITY_GC_FON 0x8
#define ACTIVITY_GC_FOFF 0x9
#define LOCATE_ERROR_DISABLE 0x0
#define LOCATE_ERROR_ENABLE 0x1
#define LOCATE_ERROR_GA_FON 0x2
#define LOCATE_ERROR_GA_FOFF 0x3
#define LOCATE_ERROR_GB_FON 0x4
#define LOCATE_ERROR_GB_FOFF 0x5
#define LOCATE_ERROR_GC_FON 0x6
#define LOCATE_ERROR_GC_FOFF 0x7
#define GP_OFF 0x10
#define GP_ON 0x11
#define to_sgpio_attr(x) container_of(x, struct sgpio_zhaoxin_sysfs_attr, attr)
#define to_sgpio_obj(x) container_of(x, struct sgpio_zhaoxin, kobj)
#define MAX_TEST_RESULT_LEN (PAGE_SIZE - sizeof(struct sgpio_zhaoxin) - 8)
//SGPIO module parameter: 0-off, 1-LED, 2-SGPIO, 3-SGPIO_GP
enum ahci_em_msg_modes {
AHCI_EM_MSG_OFF = 0,
AHCI_EM_MSG_LED_MODE,
AHCI_EM_MSG_SGPIO_MODE,
AHCI_EM_MSG_SGPIO_GP_MODE,
AHCI_EM_MSG_NULL,
};
enum SGPIO_INDICATOR {
SGPIO_ACTIVITY,
SGPIO_LOCATE,
SGPIO_ERROR
};
enum SGPIO_CFG1 {
STRETCH_ACTIVITY_OFF,
STRETCH_ACTIVITY_ON,
FORCE_ACTIVITY_OFF,
MAXIMUM_ACTIVITY_ON,
BLINK_GENERATIOR_RATE_B,
BLINK_GENERATIOR_RATE_A,
BLINK_GENERATIOR_RATE_C
};
union SGPIO_CFG_0 {
struct {
u32 reserved0 :8;
u32 version :4;
u32 reserved1 :4;
u32 gp_register_count :4;
u32 cfg_register_count :3;
u32 enable :1;
u32 supported_drive_count :8;
};
u32 sgpio_cfg_0;
};
union SGPIO_CFG_1 {
struct {
u32 reserved0 :4;
u32 blink_gen_c :4;
u32 blink_gen_a :4;
u32 blink_gen_b :4;
u32 max_act_on :4;
u32 force_act_off :4;
u32 stretch_act_on :4;
u32 stretch_act_off :4;
};
u32 sgpio_cfg_1;
};
union SGPIO_RX {
struct {
u32 drive_3_input :3;
u32 reserved3 :5;
u32 drive_2_input :3;
u32 reserved2 :5;
u32 drive_1_input :3;
u32 reserved1 :5;
u32 drive_0_input :3;
u32 reserved0 :5;
};
u32 sgpio_rx;
};
union SGPIO_RX_GP_CFG {
struct {
u32 reserved0 :16;
u32 count :8;
u32 reserved1 :8;
};
u32 sgpio_rx_gp_cfg;
};
union SGPIO_RX_GP {
struct {
u32 reserved0 :16;
u32 D22 :1;
u32 D30 :1;
u32 D31 :1;
u32 D32 :1;
u32 reserved1:4;
u32 D00 :1;
u32 D01 :1;
u32 D02 :1;
u32 D10 :1;
u32 D11 :1;
u32 D12 :1;
u32 D20 :1;
u32 D21 :1;
};
u32 sgpio_rx_gp;
};
union SGPIO_TX_0 {
struct {
u32 drive_1_error :3;
u32 drive_1_locate :3;
u32 drive_1_active :4;
u32 reserved1 :6;
u32 drive_0_error :3;
u32 drive_0_locate :3;
u32 drive_0_active :4;
u32 reserved0 :6;
};
u32 sgpio_tx_0;
};
union SGPIO_TX_1 {
struct {
u32 drive_3_error :3;
u32 drive_3_locate :3;
u32 drive_3_active :4;
u32 reserved3 :6;
u32 drive_2_error :3;
u32 drive_2_locate :3;
u32 drive_2_active :4;
u32 reserved2 :6;
};
u32 sgpio_tx_1;
};
union SGPIO_TX_GP_CFG {
struct {
u32 reserved0 :16;
u32 count :8;
u32 sload :4;
u32 reserved1 :4;
};
u32 sgpio_tx_gp_cfg;
};
union SGPIO_TX_GP {
struct {
u32 reserved0 :16;
u32 D22 :1;
u32 D30 :1;
u32 D31 :1;
u32 D32 :1;
u32 reserved1:4;
u32 D00 :1;
u32 D01 :1;
u32 D02 :1;
u32 D10 :1;
u32 D11 :1;
u32 D12 :1;
u32 D20 :1;
u32 D21 :1;
};
u32 sgpio_tx_gp;
};
struct AHCI_SGPIO_REG {
union SGPIO_CFG_0 cfg_0;
union SGPIO_CFG_1 cfg_1;
union SGPIO_RX receive_reg;
union SGPIO_RX_GP_CFG gp_receive_cfg;
union SGPIO_RX_GP gp_receive_reg;
union SGPIO_TX_0 transmit_0;
union SGPIO_TX_1 transmit_1;
union SGPIO_TX_GP_CFG gp_transmit_cfg;
union SGPIO_TX_GP gp_transmit_reg;
};
struct sgpio_zhaoxin {
struct kobject kobj;
struct list_head list;
unsigned int kobj_valid;
unsigned int index;
u32 em_loc; /* enclosure management location */
u32 em_buf_sz; /* EM buffer size in byte */
u32 em_msg_type; /* EM message type */
void __iomem *mmio;
spinlock_t wr_lock; /* protects sgpio register */
struct AHCI_SGPIO_REG sgpio_reg; /* saved sgpio register */
};
struct sgpio_zhaoxin_sysfs_attr {
struct attribute attr;
ssize_t (*show)(struct sgpio_zhaoxin *sgpio_zhaoxin, char *buf);
ssize_t (*store)(struct sgpio_zhaoxin *sgpio_zhaoxin, const char *buf, size_t count);
};
int get_ahci_em_messages(void);
#endif /* _ACHI_ZHAOXIN_SGPIO_H */

View File

@ -207,6 +207,12 @@ static int devslp_idle_timeout __read_mostly = 1000;
module_param(devslp_idle_timeout, int, 0644);
MODULE_PARM_DESC(devslp_idle_timeout, "device sleep idle timeout");
int get_ahci_em_messages(void)
{
return ahci_em_messages;
}
EXPORT_SYMBOL_GPL(get_ahci_em_messages);
static void ahci_enable_ahci(void __iomem *mmio)
{
int i;

View File

@ -628,28 +628,35 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
#endif
#ifdef CONFIG_ACPI_CPPC_LIB
static u64 get_max_boost_ratio(unsigned int cpu)
static bool cppc_highest_perf_diff;
static struct cpumask core_prior_mask;
static void cppc_get_highest_nominal_perf(int cpu, u64 *highest_perf, u64 *nominal_perf)
{
struct cppc_perf_caps perf_caps;
u64 highest_perf, nominal_perf;
int ret;
ret = cppc_get_perf_caps(cpu, &perf_caps);
if (ret) {
pr_debug("CPU%d: Unable to get performance capabilities (%d)\n", cpu, ret);
return;
}
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
*highest_perf = amd_get_highest_perf();
else
*highest_perf = perf_caps.highest_perf;
*nominal_perf = perf_caps.nominal_perf;
}
static u64 get_max_boost_ratio(unsigned int cpu)
{
u64 highest_perf, nominal_perf;
if (acpi_pstate_strict)
return 0;
ret = cppc_get_perf_caps(cpu, &perf_caps);
if (ret) {
pr_debug("CPU%d: Unable to get performance capabilities (%d)\n",
cpu, ret);
return 0;
}
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
highest_perf = amd_get_highest_perf();
else
highest_perf = perf_caps.highest_perf;
nominal_perf = perf_caps.nominal_perf;
cppc_get_highest_nominal_perf(cpu, &highest_perf, &nominal_perf);
if (!highest_perf || !nominal_perf) {
pr_debug("CPU%d: highest or nominal performance missing\n", cpu);
@ -663,8 +670,51 @@ static u64 get_max_boost_ratio(unsigned int cpu)
return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
}
/* The work item is needed to avoid CPU hotplug locking issues */
static void cpufreq_sched_itmt_work_fn(struct work_struct *work)
{
sched_set_itmt_support();
}
static DECLARE_WORK(sched_itmt_work, cpufreq_sched_itmt_work_fn);
static void cpufreq_set_itmt_prio(int cpu)
{
u64 highest_perf, nominal_perf;
static u64 max_highest_perf = 0, min_highest_perf = U64_MAX;
cppc_get_highest_nominal_perf(cpu, &highest_perf, &nominal_perf);
sched_set_itmt_core_prio(highest_perf, cpu);
cpumask_set_cpu(cpu, &core_prior_mask);
if (max_highest_perf <= min_highest_perf) {
if (highest_perf > max_highest_perf)
max_highest_perf = highest_perf;
if (highest_perf < min_highest_perf)
min_highest_perf = highest_perf;
if (max_highest_perf > min_highest_perf) {
/*
* This code can be run during CPU online under the
* CPU hotplug locks, so sched_set_itmt_support()
* cannot be called from here. Queue up a work item
* to invoke it.
*/
cppc_highest_perf_diff = true;
}
}
if (cppc_highest_perf_diff && cpumask_equal(&core_prior_mask, cpu_online_mask)) {
pr_debug("queue a work to set itmt enabled\n");
schedule_work(&sched_itmt_work);
}
}
#else
static inline u64 get_max_boost_ratio(unsigned int cpu) { return 0; }
static inline void cpufreq_set_itmt_prio(int cpu) { }
#endif
static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
@ -677,7 +727,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
unsigned int valid_states = 0;
unsigned int result = 0;
u64 max_boost_ratio;
unsigned int i;
unsigned int i, j;
#ifdef CONFIG_SMP
static int blacklisted;
#endif
@ -741,6 +791,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
pr_info_once("overriding BIOS provided _PSD data\n");
}
#endif
if (c->x86_vendor == X86_VENDOR_CENTAUR || c->x86_vendor == X86_VENDOR_ZHAOXIN) {
for_each_cpu(j, policy->cpus) {
cpufreq_set_itmt_prio(j);
}
}
/* capability check */
if (perf->state_count <= 1) {

View File

@ -141,6 +141,59 @@ static const char * const proc_flag_strs[] = {
"corrected",
};
static const char *const zdi_zpi_err_type_strs[] = {
"No Error",
"Training Error Status (PHY)",
"Data Link Protocol Error Status (DLL)",
"Surprise Down Error Status",
"Flow Control Protocol Error Status (TL)",
"Receiver Overflow Status (TL)",
"Receiver Error Status (PHY)",
"Bad TLP Status (DLL)",
"Bad Data Link Layer Packet (DLLP) Status (DLL)",
"REPLAY_NUM Rollover Status (DLL)",
"Replay Timer Timeout Status (DLL)",
"X16 Link Width Unreliable Status",
"ZPI X8 Link Width Unreliable Status",
"ZPI X4 Link Width Unreliable Status",
"ZPI X2 Link Width Unreliable Status",
"ZPI Gen3 Link Speed Unreliable Status",
"ZPI Gen2 Link Speed Unreliable Status",
"ZDI Gen3 Link Speed Unreliable Status",
"ZDI Gen4 Link Speed Unreliable Status",
};
const char *cper_zdi_zpi_err_type_str(unsigned int etype)
{
return etype < ARRAY_SIZE(zdi_zpi_err_type_strs) ?
zdi_zpi_err_type_strs[etype] : "unknown error";
}
EXPORT_SYMBOL_GPL(cper_zdi_zpi_err_type_str);
static void cper_print_proc_generic_zdi_zpi(const char *pfx,
const struct cper_sec_proc_generic *zdi_zpi)
{
#if IS_ENABLED(CONFIG_X86)
u8 etype = zdi_zpi->responder_id;
if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN ||
boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) {
if ((zdi_zpi->requestor_id & 0xff) == 7) {
pr_info("%s general processor error(zpi error)\n", pfx);
} else if ((zdi_zpi->requestor_id & 0xff) == 6) {
pr_info("%s general processor error(zdi error)\n", pfx);
} else {
pr_info("%s general processor error(unknown error)\n", pfx);
return;
}
pr_info("%s bus number %llx device number %llx function number 0\n", pfx,
((zdi_zpi->requestor_id)>>8) & 0xff, zdi_zpi->requestor_id & 0xff);
pr_info("%s apic id %lld error_type: %s\n", pfx, zdi_zpi->proc_id,
cper_zdi_zpi_err_type_str(etype));
}
#endif
}
static void cper_print_proc_generic(const char *pfx,
const struct cper_sec_proc_generic *proc)
{
@ -184,6 +237,8 @@ static void cper_print_proc_generic(const char *pfx,
pfx, proc->responder_id);
if (proc->validation_bits & CPER_PROC_VALID_IP)
printk("%s""IP: 0x%016llx\n", pfx, proc->ip);
cper_print_proc_generic_zdi_zpi(pfx, proc);
}
static const char * const mem_err_type_strs[] = {

View File

@ -360,6 +360,17 @@ config I2C_SCMI
To compile this driver as a module, choose M here:
the module will be called i2c-scmi.
config I2C_ZHAOXIN_SMBUS
tristate "Zhaoxin SMBus Interface"
depends on PCI || COMPILE_TEST
default m
help
If you say yes to this option, support will be included for the
ZHAOXIN SMBus interface
This driver can also be built as a module. If so, the module
will be called i2c-zhaoxin-smbus.
endif # ACPI
comment "Mac SMBus host controller drivers"

View File

@ -139,6 +139,7 @@ obj-$(CONFIG_I2C_ROBOTFUZZ_OSIF) += i2c-robotfuzz-osif.o
obj-$(CONFIG_I2C_TAOS_EVM) += i2c-taos-evm.o
obj-$(CONFIG_I2C_TINY_USB) += i2c-tiny-usb.o
obj-$(CONFIG_I2C_VIPERBOARD) += i2c-viperboard.o
obj-$(CONFIG_I2C_ZHAOXIN_SMBUS) += i2c-zhaoxin-smbus.o
# Other I2C/SMBus bus drivers
obj-$(CONFIG_I2C_ACORN) += i2c-acorn.o

View File

@ -0,0 +1,385 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Zhaoxin SMBus controller driver
*
* Copyright(c) 2023 Shanghai Zhaoxin Semiconductor Corporation.
* All rights reserved.
*/
#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#define DRIVER_VERSION "3.1.0"
#define ZXSMB_NAME "smbus_zhaoxin"
/*
* registers
*/
/* SMBus MMIO address offsets */
#define ZXSMB_STS 0x00
#define ZXSMB_BUSY BIT(0)
#define ZXSMB_CMD_CMPLET BIT(1)
#define ZXSMB_DEV_ERR BIT(2)
#define ZXSMB_BUS_CLSI BIT(3)
#define ZXSMB_FAIL_TRANS BIT(4)
#define ZXSMB_STS_MASK GENMASK(4, 0)
#define ZXSMB_NSMBSRST BIT(5)
#define ZXSMB_CTL 0x02
#define ZXSMB_CMPLT_EN BIT(0)
#define ZXSMB_KILL_PRG BIT(1)
#define ZXSMB_START BIT(6)
#define ZXSMB_PEC_EN BIT(7)
#define ZXSMB_CMD 0x03
#define ZXSMB_ADD 0x04
#define ZXSMB_DAT0 0x05
#define ZXSMB_DAT1 0x06
#define ZXSMB_BLKDAT 0x07
/*
* platform related information
*/
/* protocol cmd constants */
#define ZXSMB_QUICK 0x00
#define ZXSMB_BYTE 0x04
#define ZXSMB_BYTE_DATA 0x08
#define ZXSMB_WORD_DATA 0x0C
#define ZXSMB_PROC_CALL 0x10
#define ZXSMB_BLOCK_DATA 0x14
#define ZXSMB_I2C_10_BIT_ADDR 0x18
#define ZXSMB_I2C_PROC_CALL 0x30
#define ZXSMB_I2C_BLOCK_DATA 0x34
#define ZXSMB_I2C_7_BIT_ADDR 0x38
#define ZXSMB_UNIVERSAL 0x3C
#define ZXSMB_TIMEOUT 500
struct zxsmb {
struct device *dev;
struct i2c_adapter adap;
struct completion complete;
u16 base;
int irq;
u8 status;
int size;
u8 pec;
};
static irqreturn_t zxsmb_irq_handle(int irq, void *dev_id)
{
struct zxsmb *smb = (struct zxsmb *)dev_id;
smb->status = inb(smb->base + ZXSMB_STS);
if ((smb->status & ZXSMB_STS_MASK) == 0)
return IRQ_NONE;
/* clear status */
outb(smb->status, smb->base + ZXSMB_STS);
complete(&smb->complete);
return IRQ_HANDLED;
}
static int zxsmb_status_check(struct zxsmb *smb)
{
if (smb->status & ZXSMB_CMD_CMPLET)
return 0;
if (smb->status & ZXSMB_BUS_CLSI) {
dev_err(smb->dev, "Lost arbitration\n");
outb(ZXSMB_KILL_PRG, smb->base + ZXSMB_CTL);
return -EAGAIN;
}
dev_dbg(smb->dev, "Trans failed, status = 0x%X\n", smb->status);
return -EIO;
}
static int zxsmb_wait_interrput_finish(struct zxsmb *smb)
{
int time_left;
time_left = wait_for_completion_timeout(&smb->complete, msecs_to_jiffies(ZXSMB_TIMEOUT));
if (time_left == 0) {
u8 status = inb(smb->base + ZXSMB_STS);
/* some host's irq config not work well */
if (status & ZXSMB_STS_MASK) {
outb(status, smb->base + ZXSMB_STS);
outb(ZXSMB_KILL_PRG, smb->base + ZXSMB_CTL);
devm_free_irq(smb->dev, smb->irq, smb);
smb->irq = 0;
dev_warn(smb->dev, "change to polling mode\n");
return -EAGAIN;
}
dev_dbg(smb->dev, "interrput timeout\n");
return -EIO;
}
return zxsmb_status_check(smb);
}
static int zxsmb_wait_polling_finish(struct zxsmb *smb)
{
int status;
int time_left = ZXSMB_TIMEOUT * 10;
do {
usleep_range(100, 200);
status = inb(smb->base + ZXSMB_STS);
} while ((status & ZXSMB_BUSY) && (--time_left));
if (time_left == 0) {
dev_dbg(smb->dev, "polling timeout\n");
return -EIO;
}
/* clear status */
outb(status, smb->base + ZXSMB_STS);
smb->status = status;
return zxsmb_status_check(smb);
}
static int zxsmb_trans_start(struct zxsmb *smb)
{
u16 base = smb->base;
int tmp;
/* Make sure the SMBus host is ready to start transmitting */
tmp = inb(base + ZXSMB_STS);
if (tmp & ZXSMB_BUSY) {
outb(tmp, base + ZXSMB_STS);
usleep_range(1000, 5000);
tmp = inb(base + ZXSMB_STS);
if (tmp & ZXSMB_BUSY) {
dev_err(smb->dev, "SMBus reset failed! (0x%02x)\n", tmp);
return -EIO;
}
}
tmp = ZXSMB_START | smb->size;
if (smb->pec)
tmp |= ZXSMB_PEC_EN;
else
tmp &= (~ZXSMB_PEC_EN);
if (smb->irq)
tmp |= ZXSMB_CMPLT_EN;
reinit_completion(&smb->complete);
smb->status = 0;
outb(tmp, base + ZXSMB_CTL);
return 0;
}
static int zxsmb_transaction(struct zxsmb *smb)
{
int err;
err = zxsmb_trans_start(smb);
if (err)
return err;
if (smb->irq)
err = zxsmb_wait_interrput_finish(smb);
else
err = zxsmb_wait_polling_finish(smb);
outb(0, smb->base + ZXSMB_CTL);
return err;
}
static int zxsmb_smbus_xfer(struct i2c_adapter *adap, u16 addr, u16 flags, char read, u8 command,
int size, union i2c_smbus_data *data)
{
int i;
int err;
u8 len;
struct zxsmb *smb = (struct zxsmb *)i2c_get_adapdata(adap);
u16 base = smb->base;
switch (size) {
case I2C_SMBUS_QUICK:
size = ZXSMB_QUICK;
break;
case I2C_SMBUS_BYTE:
size = ZXSMB_BYTE;
if (!read)
outb(command, base + ZXSMB_CMD);
break;
case I2C_SMBUS_BYTE_DATA:
outb(command, base + ZXSMB_CMD);
if (!read)
outb(data->byte, base + ZXSMB_DAT0);
size = ZXSMB_BYTE_DATA;
break;
case I2C_SMBUS_PROC_CALL:
case I2C_SMBUS_WORD_DATA:
if (read && size == I2C_SMBUS_PROC_CALL)
goto exit_unsupported;
outb(command, base + ZXSMB_CMD);
if (!read) {
outb(data->word & 0xff, base + ZXSMB_DAT0);
outb((data->word & 0xff00) >> 8, base + ZXSMB_DAT1);
}
size = (size == I2C_SMBUS_PROC_CALL) ?
ZXSMB_PROC_CALL : ZXSMB_WORD_DATA;
break;
case I2C_SMBUS_I2C_BLOCK_DATA:
case I2C_SMBUS_BLOCK_DATA:
len = data->block[0];
if (read && size == I2C_SMBUS_I2C_BLOCK_DATA)
outb(len, base + ZXSMB_DAT1);
outb(command, base + ZXSMB_CMD);
/* Reset ZXSMB_BLKDAT */
inb(base + ZXSMB_CTL);
if (!read) {
outb(len, base + ZXSMB_DAT0);
outb(0, base + ZXSMB_DAT1);
for (i = 1; i <= len; i++)
outb(data->block[i], base + ZXSMB_BLKDAT);
}
size = (size == I2C_SMBUS_I2C_BLOCK_DATA) ?
ZXSMB_I2C_BLOCK_DATA : ZXSMB_BLOCK_DATA;
break;
default:
goto exit_unsupported;
}
outb(((addr & 0x7f) << 1) | read, base + ZXSMB_ADD);
smb->size = size;
smb->pec = flags & I2C_CLIENT_PEC;
err = zxsmb_transaction(smb);
if (err)
return err;
if ((read == I2C_SMBUS_WRITE) || (size == ZXSMB_QUICK)) {
if (unlikely(size == ZXSMB_PROC_CALL))
goto prepare_read;
return 0;
}
prepare_read:
switch (size) {
case ZXSMB_BYTE:
case ZXSMB_BYTE_DATA:
data->byte = inb(base + ZXSMB_DAT0);
break;
case ZXSMB_PROC_CALL:
case ZXSMB_WORD_DATA:
data->word = inb(base + ZXSMB_DAT0) + (inb(base + ZXSMB_DAT1) << 8);
break;
case ZXSMB_I2C_BLOCK_DATA:
case ZXSMB_BLOCK_DATA:
data->block[0] = inb(base + ZXSMB_DAT0);
if (data->block[0] > I2C_SMBUS_BLOCK_MAX)
data->block[0] = I2C_SMBUS_BLOCK_MAX;
/* Reset ZXSMB_BLKDAT */
inb(base + ZXSMB_CTL);
for (i = 1; i <= data->block[0]; i++)
data->block[i] = inb(base + ZXSMB_BLKDAT);
break;
}
return 0;
exit_unsupported:
dev_err(smb->dev, "unsupported access, size:%x, dir:%s", size, read ? "read" : "write");
return -EOPNOTSUPP;
}
static u32 zxsmb_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_SMBUS_EMUL;
}
static const struct i2c_algorithm smbus_algorithm = {
.smbus_xfer = zxsmb_smbus_xfer,
.functionality = zxsmb_func,
};
static int zxsmb_probe(struct platform_device *pdev)
{
struct zxsmb *smb;
struct resource *res;
struct i2c_adapter *adap;
smb = devm_kzalloc(&pdev->dev, sizeof(*smb), GFP_KERNEL);
if (!smb)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (IS_ERR(res))
return -ENODEV;
smb->base = res->start;
if (!devm_request_region(&pdev->dev, res->start, resource_size(res), pdev->name)) {
dev_err(&pdev->dev, "Can't get I/O resource\n");
return -EBUSY;
}
smb->irq = platform_get_irq(pdev, 0);
if (smb->irq < 0 || devm_request_irq(&pdev->dev, smb->irq, zxsmb_irq_handle, IRQF_SHARED,
pdev->name, smb)) {
dev_warn(&pdev->dev, "failed to request irq %d\n", smb->irq);
smb->irq = 0;
} else
init_completion(&smb->complete);
smb->dev = &pdev->dev;
platform_set_drvdata(pdev, (void *)smb);
adap = &smb->adap;
adap->algo = &smbus_algorithm;
adap->retries = 2;
adap->owner = THIS_MODULE;
adap->dev.parent = &pdev->dev;
ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev));
snprintf(adap->name, sizeof(adap->name), "zhaoxin-%s-%s", dev_name(pdev->dev.parent),
dev_name(smb->dev));
i2c_set_adapdata(&smb->adap, smb);
return i2c_add_adapter(&smb->adap);
}
static int zxsmb_remove(struct platform_device *pdev)
{
struct zxsmb *smb = platform_get_drvdata(pdev);
i2c_del_adapter(&(smb->adap));
platform_set_drvdata(pdev, NULL);
devm_kfree(&pdev->dev, smb);
return 0;
}
static const struct acpi_device_id zxsmb_acpi_match[] = {
{"SMB3324", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, zxsmb_acpi_match);
static struct platform_driver zxsmb_driver = {
.probe = zxsmb_probe,
.remove = zxsmb_remove,
.driver = {
.name = ZXSMB_NAME,
.acpi_match_table = ACPI_PTR(zxsmb_acpi_match),
},
};
module_platform_driver(zxsmb_driver);
MODULE_AUTHOR("hanshu@zhaoxin.com");
MODULE_DESCRIPTION("Zhaoxin SMBus driver");
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL");

View File

@ -1,10 +1,10 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright(c) 2023 Shanghai Zhaoxin Semiconductor Corporation.
* Copyright(c) 2024 Shanghai Zhaoxin Semiconductor Corporation.
* All rights reserved.
*/
#define DRIVER_VERSION "1.5.2"
#define DRIVER_VERSION "1.6.0"
#include <linux/acpi.h>
#include <linux/delay.h>
@ -32,7 +32,7 @@
/* REG_TCR Bit fields */
#define ZXI2C_REG_TCR 0x02
#define ZXI2C_TCR_HS_MODE BIT(13)
#define ZXI2C_TCR_MASTER_READ BIT(14)
#define ZXI2C_TCR_READ BIT(14)
#define ZXI2C_TCR_FAST BIT(15)
/* REG_CSR Bit fields */
@ -48,9 +48,7 @@
#define ZXI2C_ISR_MASK_ALL GENMASK(2, 0)
#define ZXI2C_IRQ_FIFOEND BIT(3)
#define ZXI2C_IRQ_FIFONACK BIT(4)
#define ZXI2C_IRQ_MASK (ZXI2C_ISR_MASK_ALL \
| ZXI2C_IRQ_FIFOEND \
| ZXI2C_IRQ_FIFONACK)
#define ZXI2C_IRQ_MASK (ZXI2C_ISR_MASK_ALL | ZXI2C_IRQ_FIFOEND | ZXI2C_IRQ_FIFONACK)
/* REG_IMR Bit fields */
#define ZXI2C_REG_IMR 0x08
@ -75,22 +73,30 @@
#define ZXI2C_REG_TR 0x0C
#define ZXI2C_REG_MCR 0x0E
enum {
ZXI2C_BYTE_MODE,
ZXI2C_FIFO_MODE
};
struct zxi2c {
struct i2c_adapter adapter;
struct completion complete;
struct device *dev;
void __iomem *base;
struct clk *clk;
u16 tcr;
struct i2c_msg *msg;
int irq;
u16 cmd_status;
int ret;
u16 tcr;
u16 tr;
u16 mcr;
u16 csr;
u8 fstp;
u8 hrv;
ktime_t ti;
ktime_t to;
bool last;
u16 xfer_len;
u16 xfered_len;
unsigned int mode;
};
/* parameters Constants */
@ -115,8 +121,8 @@ static int zxi2c_wait_bus_ready(struct zxi2c *i2c)
dev_warn(i2c->dev, "timeout waiting for bus ready\n");
return -EBUSY;
}
tmp = ioread16(i2c->base + ZXI2C_REG_CR);
iowrite16(tmp | ZXI2C_CR_END_MASK, i2c->base + ZXI2C_REG_CR);
tmp = ioread16(base + ZXI2C_REG_CR);
iowrite16(tmp | ZXI2C_CR_END_MASK, base + ZXI2C_REG_CR);
msleep(20);
}
@ -124,229 +130,205 @@ static int zxi2c_wait_bus_ready(struct zxi2c *i2c)
return 0;
}
static int zxi2c_wait_status(struct zxi2c *i2c, u8 status)
static int zxi2c_irq_xfer(struct zxi2c *i2c)
{
unsigned long time_left;
time_left = wait_for_completion_timeout(&i2c->complete, msecs_to_jiffies(ZXI2C_TIMEOUT));
if (!time_left) {
dev_err(i2c->dev, "bus transfer timeout\n");
return -EIO;
}
/*
* During each byte access, the host performs clock stretching.
* In this case, the thread may be interrupted by preemption,
* resulting in a long stretching time.
* However, some touchpad can only tolerate host clock stretching
* of no more than 200 ms. We reduce the impact of this through
* a retransmission mechanism.
*/
local_irq_disable();
i2c->to = ktime_get();
if (ktime_to_ms(ktime_sub(i2c->to, i2c->ti)) > ZXI2C_TIMEOUT) {
local_irq_enable();
dev_warn(i2c->dev, "thread has been blocked for a while\n");
return -EAGAIN;
}
i2c->ti = i2c->to;
local_irq_enable();
if (i2c->cmd_status & status)
return 0;
return -EIO;
}
static irqreturn_t zxi2c_isr(int irq, void *data)
{
struct zxi2c *i2c = data;
/* save the status and write-clear it */
i2c->cmd_status = readw(i2c->base + ZXI2C_REG_ISR);
if (!i2c->cmd_status)
return IRQ_NONE;
writew(i2c->cmd_status, i2c->base + ZXI2C_REG_ISR);
complete(&i2c->complete);
return IRQ_HANDLED;
}
static int zxi2c_write(struct zxi2c *i2c, struct i2c_msg *msg, bool last)
{
u16 val, tcr_val = i2c->tcr;
int xfer_len = 0;
u16 val;
struct i2c_msg *msg = i2c->msg;
u8 read = msg->flags & I2C_M_RD;
void __iomem *base = i2c->base;
writew(msg->buf[0] & 0xFF, base + ZXI2C_REG_CDR);
reinit_completion(&i2c->complete);
writew(tcr_val | msg->addr, base + ZXI2C_REG_TCR);
if (read) {
msg->buf[i2c->xfered_len] = readw(base + ZXI2C_REG_CDR) >> 8;
while (xfer_len < msg->len) {
int err;
err = zxi2c_wait_status(i2c, ZXI2C_ISR_BYTE_END);
if (err)
return err;
xfer_len++;
val = readw(base + ZXI2C_REG_CR) | ZXI2C_CR_CPU_RDY;
if (i2c->xfered_len == msg->len - 2)
val |= ZXI2C_CR_RX_END;
writew(val, base + ZXI2C_REG_CR);
} else {
val = readw(base + ZXI2C_REG_CSR);
if (val & ZXI2C_CSR_RCV_NOT_ACK) {
dev_dbg(i2c->dev, "write RCV NACK error\n");
dev_dbg_ratelimited(i2c->dev, "write RCV NACK error\n");
return -EIO;
}
if (msg->len == 0) {
val = ZXI2C_CR_TX_END | ZXI2C_CR_CPU_RDY | ZXI2C_CR_ENABLE;
writew(val, base + ZXI2C_REG_CR);
break;
return 0;
}
if (xfer_len == msg->len) {
if (last)
if ((i2c->xfered_len + 1) == msg->len) {
if (i2c->last)
writeb(ZXI2C_CR_TX_END, base + ZXI2C_REG_CR);
} else {
writew(msg->buf[xfer_len] & 0xFF, base + ZXI2C_REG_CDR);
writew(msg->buf[i2c->xfered_len + 1] & 0xFF, base + ZXI2C_REG_CDR);
writew(ZXI2C_CR_CPU_RDY | ZXI2C_CR_ENABLE, base + ZXI2C_REG_CR);
}
}
return 0;
i2c->xfered_len++;
return i2c->xfered_len == msg->len;
}
static int zxi2c_read(struct zxi2c *i2c, struct i2c_msg *msg, bool first)
/* 'irq == true' means in interrupt context */
int zxi2c_fifo_irq_xfer(struct zxi2c *i2c, bool irq)
{
u16 val, tcr_val = i2c->tcr;
u32 xfer_len = 0;
void __iomem *base = i2c->base;
val = readw(base + ZXI2C_REG_CR);
val &= ~(ZXI2C_CR_TX_END | ZXI2C_CR_RX_END);
if (msg->len == 1)
val |= ZXI2C_CR_RX_END;
writew(val, base + ZXI2C_REG_CR);
reinit_completion(&i2c->complete);
tcr_val |= ZXI2C_TCR_MASTER_READ | msg->addr;
writew(tcr_val, base + ZXI2C_REG_TCR);
if (!first) {
val = readw(base + ZXI2C_REG_CR);
val |= ZXI2C_CR_CPU_RDY;
writew(val, base + ZXI2C_REG_CR);
}
while (xfer_len < msg->len) {
int err;
err = zxi2c_wait_status(i2c, ZXI2C_ISR_BYTE_END);
if (err)
return err;
msg->buf[xfer_len] = readw(base + ZXI2C_REG_CDR) >> 8;
xfer_len++;
val = readw(base + ZXI2C_REG_CR) | ZXI2C_CR_CPU_RDY;
if (xfer_len == msg->len - 1)
val |= ZXI2C_CR_RX_END;
writew(val, base + ZXI2C_REG_CR);
}
return 0;
}
static int zxi2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
{
struct i2c_msg *msg;
int i;
int ret = 0;
struct zxi2c *i2c = i2c_get_adapdata(adap);
for (i = 0; ret >= 0 && i < num; i++) {
msg = &msgs[i];
if (msg->len == 0) {
dev_dbg(i2c->dev, "zero len unsupported\n");
return -ENODEV;
}
if (msg->flags & I2C_M_RD)
ret = zxi2c_read(i2c, msg, i == 0);
else
ret = zxi2c_write(i2c, msg, i == (num - 1));
}
return (ret < 0) ? ret : i;
}
static int zxi2c_fifo_xfer(struct zxi2c *i2c, struct i2c_msg *msg)
{
u16 xfered_len = 0;
u16 byte_left = msg->len;
u16 tcr_val = i2c->tcr;
u16 i;
u8 tmp;
struct i2c_msg *msg = i2c->msg;
void __iomem *base = i2c->base;
bool read = !!(msg->flags & I2C_M_RD);
while (byte_left) {
u16 i;
u8 tmp;
int error;
u16 xfer_len = min_t(u16, byte_left, ZXI2C_FIFO_SIZE);
if (irq) {
/* get the received data */
if (read)
for (i = 0; i < i2c->xfer_len; i++)
msg->buf[i2c->xfered_len + i] = ioread8(base + ZXI2C_REG_HRDR);
byte_left -= xfer_len;
i2c->xfered_len += i2c->xfer_len;
if (i2c->xfered_len == msg->len)
return 1;
}
/* reset fifo buffer */
tmp = ioread8(base + ZXI2C_REG_HCR);
iowrite8(tmp | ZXI2C_HCR_RST_FIFO, base + ZXI2C_REG_HCR);
/* set xfer len */
if (read)
iowrite8(xfer_len - 1, base + ZXI2C_REG_HRLR);
else {
iowrite8(xfer_len - 1, base + ZXI2C_REG_HTLR);
i2c->xfer_len = min_t(u16, msg->len - i2c->xfered_len, ZXI2C_FIFO_SIZE);
if (read) {
iowrite8(i2c->xfer_len - 1, base + ZXI2C_REG_HRLR);
} else {
iowrite8(i2c->xfer_len - 1, base + ZXI2C_REG_HTLR);
/* set write data */
for (i = 0; i < xfer_len; i++)
iowrite8(msg->buf[xfered_len + i], base + ZXI2C_REG_HTDR);
for (i = 0; i < i2c->xfer_len; i++)
iowrite8(msg->buf[i2c->xfered_len + i], base + ZXI2C_REG_HTDR);
}
/* prepare to stop transmission */
if (i2c->hrv && !byte_left) {
tmp = ioread8(i2c->base + ZXI2C_REG_CR);
if (i2c->hrv && msg->len == (i2c->xfered_len + i2c->xfer_len)) {
tmp = ioread8(base + ZXI2C_REG_CR);
tmp |= read ? ZXI2C_CR_RX_END : ZXI2C_CR_TX_END;
iowrite8(tmp, base + ZXI2C_REG_CR);
}
reinit_completion(&i2c->complete);
if (xfered_len) {
if (irq) {
/* continue transmission */
tmp = ioread8(i2c->base + ZXI2C_REG_CR);
iowrite8(tmp |= ZXI2C_CR_CPU_RDY, i2c->base + ZXI2C_REG_CR);
tmp = ioread8(base + ZXI2C_REG_CR);
iowrite8(tmp |= ZXI2C_CR_CPU_RDY, base + ZXI2C_REG_CR);
} else {
u16 tcr_val = i2c->tcr;
/* start transmission */
tcr_val |= (read ? ZXI2C_TCR_MASTER_READ : 0);
tcr_val |= read ? ZXI2C_TCR_READ : 0;
writew(tcr_val | msg->addr, base + ZXI2C_REG_TCR);
}
error = zxi2c_wait_status(i2c, ZXI2C_IRQ_FIFOEND);
if (error)
return error;
return 0;
}
/* get the received data */
if (read)
for (i = 0; i < xfer_len; i++)
msg->buf[xfered_len + i] =
ioread8(base + ZXI2C_REG_HRDR);
static irqreturn_t zxi2c_isr(int irq, void *data)
{
struct zxi2c *i2c = data;
u8 status;
xfered_len += xfer_len;
/* save the status and write-clear it */
status = readw(i2c->base + ZXI2C_REG_ISR);
if (!status)
return IRQ_NONE;
writew(status, i2c->base + ZXI2C_REG_ISR);
i2c->ret = 0;
if (status & ZXI2C_ISR_NACK_ADDR)
i2c->ret = -EIO;
if (!i2c->ret) {
if (i2c->mode == ZXI2C_BYTE_MODE)
i2c->ret = zxi2c_irq_xfer(i2c);
else
i2c->ret = zxi2c_fifo_irq_xfer(i2c, true);
}
return 1;
if (i2c->ret)
complete(&i2c->complete);
return IRQ_HANDLED;
}
static int zxi2c_write(struct zxi2c *i2c, struct i2c_msg *msg, int last)
{
u16 tcr_val = i2c->tcr;
i2c->last = last;
writew(msg->buf[0] & 0xFF, i2c->base + ZXI2C_REG_CDR);
reinit_completion(&i2c->complete);
tcr_val |= msg->addr & 0x7f;
writew(tcr_val, i2c->base + ZXI2C_REG_TCR);
if (!wait_for_completion_timeout(&i2c->complete, ZXI2C_TIMEOUT))
return -ETIMEDOUT;
return i2c->ret;
}
static int zxi2c_read(struct zxi2c *i2c, struct i2c_msg *msg, bool first)
{
u16 val, tcr_val = i2c->tcr;
val = readw(i2c->base + ZXI2C_REG_CR);
val &= ~(ZXI2C_CR_TX_END | ZXI2C_CR_RX_END);
if (msg->len == 1)
val |= ZXI2C_CR_RX_END;
writew(val, i2c->base + ZXI2C_REG_CR);
reinit_completion(&i2c->complete);
tcr_val |= ZXI2C_TCR_READ | (msg->addr & 0x7f);
writew(tcr_val, i2c->base + ZXI2C_REG_TCR);
if (!first) {
val = readw(i2c->base + ZXI2C_REG_CR);
val |= ZXI2C_CR_CPU_RDY;
writew(val, i2c->base + ZXI2C_REG_CR);
}
if (!wait_for_completion_timeout(&i2c->complete, ZXI2C_TIMEOUT))
return -ETIMEDOUT;
return i2c->ret;
}
int zxi2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
{
struct i2c_msg *msg;
int i;
int ret = 0;
struct zxi2c *i2c = i2c_get_adapdata(adap);
i2c->mode = ZXI2C_BYTE_MODE;
for (i = 0; ret >= 0 && i < num; i++) {
i2c->msg = msg = &msgs[i];
i2c->xfered_len = 0;
if (msg->len == 0)
return -EIO;
if (msg->flags & I2C_M_RD)
ret = zxi2c_read(i2c, msg, i == 0);
else
ret = zxi2c_write(i2c, msg, (i + 1) == num);
}
return (ret < 0) ? ret : i;
}
static int zxi2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
@ -354,40 +336,47 @@ static int zxi2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int
u8 tmp;
int ret;
struct zxi2c *i2c = (struct zxi2c *)i2c_get_adapdata(adap);
void __iomem *base = i2c->base;
ret = zxi2c_wait_bus_ready(i2c);
if (ret)
return ret;
tmp = ioread8(i2c->base + ZXI2C_REG_CR);
tmp = ioread8(base + ZXI2C_REG_CR);
tmp &= ~(ZXI2C_CR_RX_END | ZXI2C_CR_TX_END);
i2c->ti = ktime_get();
if (num == 1 && msgs->len >= 2 && (i2c->hrv || msgs->len <= ZXI2C_FIFO_SIZE)) {
/* enable fifo mode */
iowrite16(ZXI2C_CR_FIFO_MODE | tmp, i2c->base + ZXI2C_REG_CR);
iowrite16(ZXI2C_CR_FIFO_MODE | tmp, base + ZXI2C_REG_CR);
/* clear irq status */
iowrite8(ZXI2C_IRQ_MASK, i2c->base + ZXI2C_REG_ISR);
iowrite8(ZXI2C_IRQ_MASK, base + ZXI2C_REG_ISR);
/* enable fifo irq */
iowrite8(ZXI2C_ISR_NACK_ADDR | ZXI2C_IRQ_FIFOEND, i2c->base + ZXI2C_REG_IMR);
iowrite8(ZXI2C_ISR_NACK_ADDR | ZXI2C_IRQ_FIFOEND, base + ZXI2C_REG_IMR);
ret = zxi2c_fifo_xfer(i2c, msgs);
i2c->msg = msgs;
i2c->mode = ZXI2C_FIFO_MODE;
i2c->xfer_len = i2c->xfered_len = 0;
zxi2c_fifo_irq_xfer(i2c, 0);
if (!wait_for_completion_timeout(&i2c->complete, ZXI2C_TIMEOUT))
return -ETIMEDOUT;
ret = i2c->ret;
} else {
/* enable byte mode */
iowrite16(tmp, i2c->base + ZXI2C_REG_CR);
iowrite16(tmp, base + ZXI2C_REG_CR);
/* clear irq status */
iowrite8(ZXI2C_IRQ_MASK, i2c->base + ZXI2C_REG_ISR);
iowrite8(ZXI2C_IRQ_MASK, base + ZXI2C_REG_ISR);
/* enable byte irq */
iowrite8(ZXI2C_ISR_NACK_ADDR | ZXI2C_IMR_BYTE, i2c->base + ZXI2C_REG_IMR);
iowrite8(ZXI2C_ISR_NACK_ADDR | ZXI2C_IMR_BYTE, base + ZXI2C_REG_IMR);
ret = zxi2c_xfer(adap, msgs, num);
if (ret < 0)
iowrite16(tmp | ZXI2C_CR_END_MASK, i2c->base + ZXI2C_REG_CR);
/* make sure the state machine is stopped */
usleep_range(1, 2);
if (ret == -ETIMEDOUT)
iowrite16(tmp | ZXI2C_CR_END_MASK, base + ZXI2C_REG_CR);
}
/* dis interrupt */
iowrite8(0, i2c->base + ZXI2C_REG_IMR);
iowrite8(0, base + ZXI2C_REG_IMR);
return ret;
}
@ -451,8 +440,9 @@ static void zxi2c_get_bus_speed(struct zxi2c *i2c)
dev_warn(i2c->dev, "speed:%d, fstp:0x%x, golden:0x%x\n", params[0], fstp,
params[2]);
i2c->tr = params[2] | 0xff00;
} else
} else {
i2c->tr = fstp | 0xff00;
}
i2c->tcr = params[1];
i2c->mcr = ioread16(i2c->base + ZXI2C_REG_MCR);
@ -497,6 +487,7 @@ static int zxi2c_init(struct platform_device *pdev, struct zxi2c **pi2c)
platform_set_drvdata(pdev, i2c);
*pi2c = i2c;
return 0;
}
@ -517,8 +508,9 @@ static int zxi2c_probe(struct platform_device *pdev)
adap = &i2c->adapter;
adap->owner = THIS_MODULE;
adap->algo = &zxi2c_algorithm;
adap->retries = 2;
adap->quirks = &zxi2c_quirks;
adap->dev.parent = &pdev->dev;
ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev));
snprintf(adap->name, sizeof(adap->name), "zhaoxin-%s-%s", dev_name(pdev->dev.parent),

View File

@ -562,6 +562,37 @@ resv_iova:
return 0;
}
int iova_reserve_domain_addr(struct iommu_domain *domain, dma_addr_t start, dma_addr_t end)
{
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
unsigned long lo, hi;
lo = iova_pfn(iovad, start);
hi = iova_pfn(iovad, end);
if (!cookie)
return -EINVAL;
reserve_iova(iovad, lo, hi);
return 0;
}
EXPORT_SYMBOL_GPL(iova_reserve_domain_addr);
static int iova_reserve_pci_regions(struct device *dev, struct iommu_domain *domain)
{
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
int ret = 0;
if (dev_is_pci(dev))
ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad);
return ret;
}
static int iova_reserve_iommu_regions(struct device *dev,
struct iommu_domain *domain)
{
@ -571,12 +602,6 @@ static int iova_reserve_iommu_regions(struct device *dev,
LIST_HEAD(resv_regions);
int ret = 0;
if (dev_is_pci(dev)) {
ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad);
if (ret)
return ret;
}
iommu_get_resv_regions(dev, &resv_regions);
list_for_each_entry(region, &resv_regions, list) {
unsigned long lo, hi;
@ -708,7 +733,7 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
}
ret = 0;
goto done_unlock;
goto iova_reserve_iommu;
}
init_iova_domain(iovad, 1UL << order, base_pfn);
@ -723,6 +748,11 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
(!device_iommu_capable(dev, IOMMU_CAP_DEFERRED_FLUSH) || iommu_dma_init_fq(domain)))
domain->type = IOMMU_DOMAIN_DMA;
ret = iova_reserve_pci_regions(dev, domain);
if (ret)
goto done_unlock;
iova_reserve_iommu:
ret = iova_reserve_iommu_regions(dev, domain);
done_unlock:

View File

@ -767,6 +767,59 @@ static void __init dmar_acpi_insert_dev_scope(u8 device_number,
device_number, dev_name(&adev->dev));
}
/* Return: > 0 if match found, 0 if no match found */
bool dmar_rmrr_acpi_insert_dev_scope(u8 device_number,
struct acpi_device *adev,
void *start, void *end,
struct dmar_dev_scope *devices,
int devices_cnt)
{
struct acpi_dmar_device_scope *scope;
struct device *tmp;
int i;
struct acpi_dmar_pci_path *path;
for (; start < end; start += scope->length) {
scope = start;
if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE)
continue;
if (scope->enumeration_id != device_number)
continue;
path = (void *)(scope + 1);
pr_info("ACPI device \"%s\" under DMAR as %02x:%02x.%d\n", dev_name(&adev->dev),
scope->bus, path->device, path->function);
for_each_dev_scope(devices, devices_cnt, i, tmp)
if (tmp == NULL) {
devices[i].bus = scope->bus;
devices[i].devfn = PCI_DEVFN(path->device, path->function);
rcu_assign_pointer(devices[i].dev, get_device(&adev->dev));
return true;
}
WARN_ON(i >= devices_cnt);
}
return false;
}
static int dmar_acpi_bus_add_dev(u8 device_number, struct acpi_device *adev)
{
struct dmar_drhd_unit *dmaru;
struct acpi_dmar_hardware_unit *drhd;
int ret;
for_each_drhd_unit(dmaru) {
drhd = container_of(dmaru->hdr, struct acpi_dmar_hardware_unit, header);
ret = dmar_rmrr_acpi_insert_dev_scope(device_number, adev, (void *)(drhd+1),
((void *)drhd)+drhd->header.length,
dmaru->devices, dmaru->devices_cnt);
if (ret)
break;
}
if (ret > 0)
ret = dmar_rmrr_add_acpi_dev(device_number, adev);
return ret;
}
static int __init dmar_acpi_dev_scope_init(void)
{
struct acpi_dmar_andd *andd;
@ -794,6 +847,10 @@ static int __init dmar_acpi_dev_scope_init(void)
andd->device_name);
continue;
}
if (apply_zhaoxin_dmar_acpi_a_behavior())
dmar_acpi_bus_add_dev(andd->device_number, adev);
else
dmar_acpi_insert_dev_scope(andd->device_number, adev);
}
}

View File

@ -3468,6 +3468,24 @@ out:
return ret;
}
int dmar_rmrr_add_acpi_dev(u8 device_number, struct acpi_device *adev)
{
int ret;
struct dmar_rmrr_unit *rmrru;
struct acpi_dmar_reserved_memory *rmrr;
list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
rmrr = container_of(rmrru->hdr, struct acpi_dmar_reserved_memory, header);
ret = dmar_rmrr_acpi_insert_dev_scope(device_number, adev, (void *)(rmrr + 1),
((void *)rmrr) + rmrr->header.length,
rmrru->devices, rmrru->devices_cnt);
if (ret)
break;
}
return 0;
}
int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
{
int ret;
@ -3726,6 +3744,43 @@ static int __init platform_optin_force_iommu(void)
return 1;
}
static inline int acpi_rmrr_device_create_direct_mappings(struct iommu_domain *domain,
struct device *dev)
{
int ret;
pr_info("rmrr andd dev:%s enter to %s\n", dev_name(dev), __func__);
ret = __acpi_rmrr_device_create_direct_mappings(domain, dev);
return ret;
}
static inline int acpi_rmrr_andd_probe(struct device *dev)
{
struct intel_iommu *iommu = NULL;
struct pci_dev *pci_device = NULL;
u8 bus, devfn;
int ret = 0;
ret = iommu_probe_device(dev);
iommu = device_to_iommu(dev, &bus, &devfn);
if (!iommu) {
pr_info("dpoint-- cannot get acpi device corresponding iommu\n");
return -EINVAL;
}
pci_device = pci_get_domain_bus_and_slot(iommu->segment, bus, devfn);
if (!pci_device) {
pr_info("dpoint-- cannot get acpi devie corresponding pci_device\n");
return -EINVAL;
}
ret = acpi_rmrr_device_create_direct_mappings(iommu_get_domain_for_dev(&pci_device->dev),
dev);
return ret;
}
static int __init probe_acpi_namespace_devices(void)
{
struct dmar_drhd_unit *drhd;
@ -3748,6 +3803,10 @@ static int __init probe_acpi_namespace_devices(void)
list_for_each_entry(pn,
&adev->physical_node_list, node) {
ret = iommu_probe_device(pn->dev);
if (apply_zhaoxin_dmar_acpi_a_behavior())
ret = acpi_rmrr_andd_probe(dev);
if (ret)
break;
}
@ -4479,6 +4538,9 @@ static void intel_iommu_probe_finalize(struct device *dev)
{
set_dma_ops(dev, NULL);
iommu_setup_dma_ops(dev, 0, U64_MAX);
if (is_zhaoxin_kh40000)
kh40000_set_iommu_dma_ops(dev);
}
static void intel_iommu_get_resv_regions(struct device *device,

View File

@ -1103,7 +1103,8 @@ map_end:
map_size = 0;
}
}
if (apply_zhaoxin_dmar_acpi_a_behavior())
iova_reserve_domain_addr(domain, start, end);
}
if (!list_empty(&mappings) && iommu_is_dma_domain(domain))
@ -1171,6 +1172,16 @@ err_free_device:
return ERR_PTR(ret);
}
int __acpi_rmrr_device_create_direct_mappings(struct iommu_domain *domain, struct device *dev)
{
int ret;
ret = iommu_create_device_direct_mappings(domain, dev);
return ret;
}
EXPORT_SYMBOL_GPL(__acpi_rmrr_device_create_direct_mappings);
/**
* iommu_group_add_device - add a device to an iommu group
* @group: the group into which to add the device (reference should be held)

View File

@ -52,6 +52,8 @@ int erst_clear(u64 record_id);
int arch_apei_enable_cmcff(struct acpi_hest_header *hest_hdr, void *data);
void arch_apei_report_mem_error(int sev, struct cper_sec_mem_err *mem_err);
void arch_apei_report_pcie_error(int sev, struct cper_sec_pcie *pcie_err);
bool arch_apei_report_zdi_error(guid_t *sec_type, struct cper_sec_proc_generic *zdi_err);
#endif
#endif

View File

@ -578,4 +578,5 @@ void cper_estatus_print(const char *pfx,
int cper_estatus_check_header(const struct acpi_hest_generic_status *estatus);
int cper_estatus_check(const struct acpi_hest_generic_status *estatus);
const char *cper_zdi_zpi_err_type_str(unsigned int etype);
#endif

View File

@ -516,4 +516,21 @@ pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev,
}
#endif /* CONFIG_PCI_P2PDMA */
#if defined CONFIG_PCI && defined CONFIG_X86
extern bool is_zhaoxin_kh40000;
extern const struct dma_map_ops kh40000_dma_direct_ops;
void kh40000_set_iommu_dma_ops(struct device *dev);
#else
bool __weak is_zhaoxin_kh40000;
static inline void kh40000_set_iommu_dma_ops(struct device *dev)
{
}
#endif
#endif /* _LINUX_DMA_MAP_OPS_H */

View File

@ -112,6 +112,9 @@ extern int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
void *start, void*end, u16 segment,
struct dmar_dev_scope *devices,
int devices_cnt);
extern bool dmar_rmrr_acpi_insert_dev_scope(u8 device_number,
struct acpi_device *adev, void *start, void *end,
struct dmar_dev_scope *devices, int devices_cnt);
extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info,
u16 segment, struct dmar_dev_scope *devices,
int count);
@ -144,6 +147,7 @@ extern int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg);
extern int dmar_parse_one_satc(struct acpi_dmar_header *hdr, void *arg);
extern int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg);
extern int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert);
extern int dmar_rmrr_add_acpi_dev(u8 device_number, struct acpi_device *adev);
extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info);
#else /* !CONFIG_INTEL_IOMMU: */
static inline int intel_iommu_init(void) { return -ENODEV; }
@ -155,6 +159,11 @@ static inline void intel_iommu_shutdown(void) { }
#define dmar_release_one_atsr dmar_res_noop
#define dmar_parse_one_satc dmar_res_noop
static inline int dmar_rmrr_add_acpi_dev(u8 device_number, struct acpi_device *adev)
{
return 0;
}
static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
{
return 0;

View File

@ -586,6 +586,21 @@ void iommu_set_dma_strict(void);
extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags);
static inline bool apply_zhaoxin_dmar_acpi_a_behavior(void)
{
#if defined(CONFIG_CPU_SUP_ZHAOXIN) || defined(CONFIG_CPU_SUP_CENTAUR)
if (((boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) ||
(boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN)) &&
((boot_cpu_data.x86 == 7) && (boot_cpu_data.x86_model == 0x3b)))
return true;
#endif
return false;
}
extern int iova_reserve_domain_addr(struct iommu_domain *domain, dma_addr_t start, dma_addr_t end);
int __acpi_rmrr_device_create_direct_mappings(struct iommu_domain *domain, struct device *dev);
static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
{
if (domain->ops->flush_iotlb_all)

View File

@ -224,6 +224,9 @@ void __init dma_contiguous_reserve(phys_addr_t limit)
dma_numa_cma_reserve();
if (is_zhaoxin_kh40000)
return;
pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
if (size_cmdline != -1) {

View File

@ -2485,6 +2485,17 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
}
}
#if IS_ENABLED(CONFIG_X86)
if ((boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR ||
boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) &&
(boot_cpu_data.x86 == 7 && boot_cpu_data.x86_model == 0x5b)) {
for_each_cpu(i, cpu_map) {
for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent)
sd->flags |= SD_ASYM_PACKING;
}
}
#endif
/* Calculate CPU capacity for physical packages and nodes */
for (i = nr_cpumask_bits-1; i >= 0; i--) {
if (!cpumask_test_cpu(i, cpu_map))