Merge tag 'misc-habanalabs-next-2019-04-19' of git://people.freedesktop.org/~gabbayo/linux into char-misc-next
Oded writes: This tag contains many changes for kernel 5.2. The major changes are: - Add a new IOCTL for debug, profiling and trace operations on the device. This will allow the user to perform profiling and debugging of the deep learning topologies that are executing on the ASIC. - Add a shadow table for the ASIC's MMU page tables to avoid doing page table walks on the device's DRAM during map/unmap operations. - re-factor of ASIC-dependent code to be common code for all ASICs In addition, there are many small fixes and changes. The notable ones are: - Allow accessing the DRAM using virtual address through the debugFS interface. Until now, only physical addresses were valid, but that is useless for debugging when working with MMU. - Allow the user to modify the TPC clock relaxation value to better control TPC power consumption during topology execution. - Allow the user to inquire about the device's status (operational/Malfunction/in-reset) in the INFO IOCTL. - Improvements to the device's removal function, to prevent crash in case of force removal by the OS. - Prevent PTE read/write during hard-reset. This will improve stability of the device during hard-reset. * tag 'misc-habanalabs-next-2019-04-19' of git://people.freedesktop.org/~gabbayo/linux: (31 commits) habanalabs: prevent device PTE read/write during hard-reset habanalabs: improve IOCTLs behavior when disabled or reset habanalabs: all FD must be closed before removing device habanalabs: split mmu/no-mmu code paths in memory ioctl habanalabs: ASIC_AUTO_DETECT enum value is redundant habanalabs: refactoring in goya.c uapi/habanalabs: fix some comments in uapi file habanalabs: add goya implementation for debug configuration habanalabs: add new IOCTL for debug, tracing and profiling habanalabs: remove extra semicolon habanalabs: prevent CPU soft lockup on Palladium habanalabs: remove trailing blank line from EOF habanalabs: improve error messages habanalabs: add device status option to INFO IOCTL habanalabs: allow user to modify TPC clock relaxation value habanalabs: set new golden value to tpc clock relaxation habanalabs: never fail hard reset of device habanalabs: keep track of the device's dma mask habanalabs: add MMU shadow mapping habanalabs: Allow accessing DRAM virtual addresses via debugfs ...
This commit is contained in:
commit
cef62a615d
|
@ -6,7 +6,7 @@ obj-m := habanalabs.o
|
|||
|
||||
habanalabs-y := habanalabs_drv.o device.o context.o asid.o habanalabs_ioctl.o \
|
||||
command_buffer.o hw_queue.o irq.o sysfs.o hwmon.o memory.o \
|
||||
command_submission.o mmu.o
|
||||
command_submission.o mmu.o firmware_if.o pci.o
|
||||
|
||||
habanalabs-$(CONFIG_DEBUG_FS) += debugfs.o
|
||||
|
||||
|
|
|
@ -214,6 +214,13 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
|
|||
u64 handle;
|
||||
int rc;
|
||||
|
||||
if (hl_device_disabled_or_in_reset(hdev)) {
|
||||
dev_warn_ratelimited(hdev->dev,
|
||||
"Device is %s. Can't execute CB IOCTL\n",
|
||||
atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
switch (args->in.op) {
|
||||
case HL_CB_OP_CREATE:
|
||||
rc = hl_cb_create(hdev, &hpriv->cb_mgr, args->in.cb_size,
|
||||
|
|
|
@ -261,7 +261,8 @@ static void cs_timedout(struct work_struct *work)
|
|||
ctx_asid = cs->ctx->asid;
|
||||
|
||||
/* TODO: add information about last signaled seq and last emitted seq */
|
||||
dev_err(hdev->dev, "CS %d.%llu got stuck!\n", ctx_asid, cs->sequence);
|
||||
dev_err(hdev->dev, "User %d command submission %llu got stuck!\n",
|
||||
ctx_asid, cs->sequence);
|
||||
|
||||
cs_put(cs);
|
||||
|
||||
|
@ -604,7 +605,7 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
|
|||
bool need_soft_reset = false;
|
||||
|
||||
if (hl_device_disabled_or_in_reset(hdev)) {
|
||||
dev_warn(hdev->dev,
|
||||
dev_warn_ratelimited(hdev->dev,
|
||||
"Device is %s. Can't submit new CS\n",
|
||||
atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
|
||||
rc = -EBUSY;
|
||||
|
|
|
@ -505,22 +505,97 @@ err:
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr,
|
||||
u64 *phys_addr)
|
||||
{
|
||||
struct hl_ctx *ctx = hdev->user_ctx;
|
||||
u64 hop_addr, hop_pte_addr, hop_pte;
|
||||
int rc = 0;
|
||||
|
||||
if (!ctx) {
|
||||
dev_err(hdev->dev, "no ctx available\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&ctx->mmu_lock);
|
||||
|
||||
/* hop 0 */
|
||||
hop_addr = get_hop0_addr(ctx);
|
||||
hop_pte_addr = get_hop0_pte_addr(ctx, hop_addr, virt_addr);
|
||||
hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
|
||||
|
||||
/* hop 1 */
|
||||
hop_addr = get_next_hop_addr(hop_pte);
|
||||
if (hop_addr == ULLONG_MAX)
|
||||
goto not_mapped;
|
||||
hop_pte_addr = get_hop1_pte_addr(ctx, hop_addr, virt_addr);
|
||||
hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
|
||||
|
||||
/* hop 2 */
|
||||
hop_addr = get_next_hop_addr(hop_pte);
|
||||
if (hop_addr == ULLONG_MAX)
|
||||
goto not_mapped;
|
||||
hop_pte_addr = get_hop2_pte_addr(ctx, hop_addr, virt_addr);
|
||||
hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
|
||||
|
||||
/* hop 3 */
|
||||
hop_addr = get_next_hop_addr(hop_pte);
|
||||
if (hop_addr == ULLONG_MAX)
|
||||
goto not_mapped;
|
||||
hop_pte_addr = get_hop3_pte_addr(ctx, hop_addr, virt_addr);
|
||||
hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
|
||||
|
||||
if (!(hop_pte & LAST_MASK)) {
|
||||
/* hop 4 */
|
||||
hop_addr = get_next_hop_addr(hop_pte);
|
||||
if (hop_addr == ULLONG_MAX)
|
||||
goto not_mapped;
|
||||
hop_pte_addr = get_hop4_pte_addr(ctx, hop_addr, virt_addr);
|
||||
hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
|
||||
}
|
||||
|
||||
if (!(hop_pte & PAGE_PRESENT_MASK))
|
||||
goto not_mapped;
|
||||
|
||||
*phys_addr = (hop_pte & PTE_PHYS_ADDR_MASK) | (virt_addr & OFFSET_MASK);
|
||||
|
||||
goto out;
|
||||
|
||||
not_mapped:
|
||||
dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
|
||||
virt_addr);
|
||||
rc = -EINVAL;
|
||||
out:
|
||||
mutex_unlock(&ctx->mmu_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static ssize_t hl_data_read32(struct file *f, char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
|
||||
struct hl_device *hdev = entry->hdev;
|
||||
struct asic_fixed_properties *prop = &hdev->asic_prop;
|
||||
char tmp_buf[32];
|
||||
u64 addr = entry->addr;
|
||||
u32 val;
|
||||
ssize_t rc;
|
||||
|
||||
if (*ppos)
|
||||
return 0;
|
||||
|
||||
rc = hdev->asic_funcs->debugfs_read32(hdev, entry->addr, &val);
|
||||
if (addr >= prop->va_space_dram_start_address &&
|
||||
addr < prop->va_space_dram_end_address &&
|
||||
hdev->mmu_enable &&
|
||||
hdev->dram_supports_virtual_memory) {
|
||||
rc = device_va_to_pa(hdev, entry->addr, &addr);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = hdev->asic_funcs->debugfs_read32(hdev, addr, &val);
|
||||
if (rc) {
|
||||
dev_err(hdev->dev, "Failed to read from 0x%010llx\n",
|
||||
entry->addr);
|
||||
dev_err(hdev->dev, "Failed to read from 0x%010llx\n", addr);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -536,6 +611,8 @@ static ssize_t hl_data_write32(struct file *f, const char __user *buf,
|
|||
{
|
||||
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
|
||||
struct hl_device *hdev = entry->hdev;
|
||||
struct asic_fixed_properties *prop = &hdev->asic_prop;
|
||||
u64 addr = entry->addr;
|
||||
u32 value;
|
||||
ssize_t rc;
|
||||
|
||||
|
@ -543,10 +620,19 @@ static ssize_t hl_data_write32(struct file *f, const char __user *buf,
|
|||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = hdev->asic_funcs->debugfs_write32(hdev, entry->addr, value);
|
||||
if (addr >= prop->va_space_dram_start_address &&
|
||||
addr < prop->va_space_dram_end_address &&
|
||||
hdev->mmu_enable &&
|
||||
hdev->dram_supports_virtual_memory) {
|
||||
rc = device_va_to_pa(hdev, entry->addr, &addr);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = hdev->asic_funcs->debugfs_write32(hdev, addr, value);
|
||||
if (rc) {
|
||||
dev_err(hdev->dev, "Failed to write 0x%08x to 0x%010llx\n",
|
||||
value, entry->addr);
|
||||
value, addr);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <linux/pci.h>
|
||||
#include <linux/sched/signal.h>
|
||||
#include <linux/hwmon.h>
|
||||
#include <uapi/misc/habanalabs.h>
|
||||
|
||||
#define HL_PLDM_PENDING_RESET_PER_SEC (HL_PENDING_RESET_PER_SEC * 10)
|
||||
|
||||
|
@ -21,6 +22,20 @@ bool hl_device_disabled_or_in_reset(struct hl_device *hdev)
|
|||
return false;
|
||||
}
|
||||
|
||||
enum hl_device_status hl_device_status(struct hl_device *hdev)
|
||||
{
|
||||
enum hl_device_status status;
|
||||
|
||||
if (hdev->disabled)
|
||||
status = HL_DEVICE_STATUS_MALFUNCTION;
|
||||
else if (atomic_read(&hdev->in_reset))
|
||||
status = HL_DEVICE_STATUS_IN_RESET;
|
||||
else
|
||||
status = HL_DEVICE_STATUS_OPERATIONAL;
|
||||
|
||||
return status;
|
||||
};
|
||||
|
||||
static void hpriv_release(struct kref *ref)
|
||||
{
|
||||
struct hl_fpriv *hpriv;
|
||||
|
@ -498,11 +513,8 @@ disable_device:
|
|||
return rc;
|
||||
}
|
||||
|
||||
static void hl_device_hard_reset_pending(struct work_struct *work)
|
||||
static void device_kill_open_processes(struct hl_device *hdev)
|
||||
{
|
||||
struct hl_device_reset_work *device_reset_work =
|
||||
container_of(work, struct hl_device_reset_work, reset_work);
|
||||
struct hl_device *hdev = device_reset_work->hdev;
|
||||
u16 pending_total, pending_cnt;
|
||||
struct task_struct *task = NULL;
|
||||
|
||||
|
@ -537,6 +549,12 @@ static void hl_device_hard_reset_pending(struct work_struct *work)
|
|||
}
|
||||
}
|
||||
|
||||
/* We killed the open users, but because the driver cleans up after the
|
||||
* user contexts are closed (e.g. mmu mappings), we need to wait again
|
||||
* to make sure the cleaning phase is finished before continuing with
|
||||
* the reset
|
||||
*/
|
||||
|
||||
pending_cnt = pending_total;
|
||||
|
||||
while ((atomic_read(&hdev->fd_open_cnt)) && (pending_cnt)) {
|
||||
|
@ -552,6 +570,16 @@ static void hl_device_hard_reset_pending(struct work_struct *work)
|
|||
|
||||
mutex_unlock(&hdev->fd_open_cnt_lock);
|
||||
|
||||
}
|
||||
|
||||
static void device_hard_reset_pending(struct work_struct *work)
|
||||
{
|
||||
struct hl_device_reset_work *device_reset_work =
|
||||
container_of(work, struct hl_device_reset_work, reset_work);
|
||||
struct hl_device *hdev = device_reset_work->hdev;
|
||||
|
||||
device_kill_open_processes(hdev);
|
||||
|
||||
hl_device_reset(hdev, true, true);
|
||||
|
||||
kfree(device_reset_work);
|
||||
|
@ -613,6 +641,8 @@ again:
|
|||
if ((hard_reset) && (!from_hard_reset_thread)) {
|
||||
struct hl_device_reset_work *device_reset_work;
|
||||
|
||||
hdev->hard_reset_pending = true;
|
||||
|
||||
if (!hdev->pdev) {
|
||||
dev_err(hdev->dev,
|
||||
"Reset action is NOT supported in simulator\n");
|
||||
|
@ -620,8 +650,6 @@ again:
|
|||
goto out_err;
|
||||
}
|
||||
|
||||
hdev->hard_reset_pending = true;
|
||||
|
||||
device_reset_work = kzalloc(sizeof(*device_reset_work),
|
||||
GFP_ATOMIC);
|
||||
if (!device_reset_work) {
|
||||
|
@ -635,7 +663,7 @@ again:
|
|||
* from a dedicated work
|
||||
*/
|
||||
INIT_WORK(&device_reset_work->reset_work,
|
||||
hl_device_hard_reset_pending);
|
||||
device_hard_reset_pending);
|
||||
device_reset_work->hdev = hdev;
|
||||
schedule_work(&device_reset_work->reset_work);
|
||||
|
||||
|
@ -663,17 +691,9 @@ again:
|
|||
/* Go over all the queues, release all CS and their jobs */
|
||||
hl_cs_rollback_all(hdev);
|
||||
|
||||
if (hard_reset) {
|
||||
/* Release kernel context */
|
||||
if (hl_ctx_put(hdev->kernel_ctx) != 1) {
|
||||
dev_err(hdev->dev,
|
||||
"kernel ctx is alive during hard reset\n");
|
||||
rc = -EBUSY;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
/* Release kernel context */
|
||||
if ((hard_reset) && (hl_ctx_put(hdev->kernel_ctx) == 1))
|
||||
hdev->kernel_ctx = NULL;
|
||||
}
|
||||
|
||||
/* Reset the H/W. It will be in idle state after this returns */
|
||||
hdev->asic_funcs->hw_fini(hdev, hard_reset);
|
||||
|
@ -698,6 +718,14 @@ again:
|
|||
|
||||
if (hard_reset) {
|
||||
hdev->device_cpu_disabled = false;
|
||||
hdev->hard_reset_pending = false;
|
||||
|
||||
if (hdev->kernel_ctx) {
|
||||
dev_crit(hdev->dev,
|
||||
"kernel ctx was alive during hard reset, something is terribly wrong\n");
|
||||
rc = -EBUSY;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
/* Allocate the kernel context */
|
||||
hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx),
|
||||
|
@ -752,8 +780,6 @@ again:
|
|||
}
|
||||
|
||||
hl_set_max_power(hdev, hdev->max_power);
|
||||
|
||||
hdev->hard_reset_pending = false;
|
||||
} else {
|
||||
rc = hdev->asic_funcs->soft_reset_late_init(hdev);
|
||||
if (rc) {
|
||||
|
@ -1030,11 +1056,22 @@ void hl_device_fini(struct hl_device *hdev)
|
|||
WARN(1, "Failed to remove device because reset function did not finish\n");
|
||||
return;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/* Mark device as disabled */
|
||||
hdev->disabled = true;
|
||||
|
||||
/*
|
||||
* Flush anyone that is inside the critical section of enqueue
|
||||
* jobs to the H/W
|
||||
*/
|
||||
hdev->asic_funcs->hw_queues_lock(hdev);
|
||||
hdev->asic_funcs->hw_queues_unlock(hdev);
|
||||
|
||||
hdev->hard_reset_pending = true;
|
||||
|
||||
device_kill_open_processes(hdev);
|
||||
|
||||
hl_hwmon_fini(hdev);
|
||||
|
||||
device_late_fini(hdev);
|
||||
|
|
|
@ -0,0 +1,325 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
/*
|
||||
* Copyright 2016-2019 HabanaLabs, Ltd.
|
||||
* All Rights Reserved.
|
||||
*/
|
||||
|
||||
#include "habanalabs.h"
|
||||
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/genalloc.h>
|
||||
#include <linux/io-64-nonatomic-lo-hi.h>
|
||||
|
||||
/**
|
||||
* hl_fw_push_fw_to_device() - Push FW code to device.
|
||||
* @hdev: pointer to hl_device structure.
|
||||
*
|
||||
* Copy fw code from firmware file to device memory.
|
||||
*
|
||||
* Return: 0 on success, non-zero for failure.
|
||||
*/
|
||||
int hl_fw_push_fw_to_device(struct hl_device *hdev, const char *fw_name,
|
||||
void __iomem *dst)
|
||||
{
|
||||
const struct firmware *fw;
|
||||
const u64 *fw_data;
|
||||
size_t fw_size, i;
|
||||
int rc;
|
||||
|
||||
rc = request_firmware(&fw, fw_name, hdev->dev);
|
||||
if (rc) {
|
||||
dev_err(hdev->dev, "Failed to request %s\n", fw_name);
|
||||
goto out;
|
||||
}
|
||||
|
||||
fw_size = fw->size;
|
||||
if ((fw_size % 4) != 0) {
|
||||
dev_err(hdev->dev, "illegal %s firmware size %zu\n",
|
||||
fw_name, fw_size);
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
dev_dbg(hdev->dev, "%s firmware size == %zu\n", fw_name, fw_size);
|
||||
|
||||
fw_data = (const u64 *) fw->data;
|
||||
|
||||
if ((fw->size % 8) != 0)
|
||||
fw_size -= 8;
|
||||
|
||||
for (i = 0 ; i < fw_size ; i += 8, fw_data++, dst += 8) {
|
||||
if (!(i & (0x80000 - 1))) {
|
||||
dev_dbg(hdev->dev,
|
||||
"copied so far %zu out of %zu for %s firmware",
|
||||
i, fw_size, fw_name);
|
||||
usleep_range(20, 100);
|
||||
}
|
||||
|
||||
writeq(*fw_data, dst);
|
||||
}
|
||||
|
||||
if ((fw->size % 8) != 0)
|
||||
writel(*(const u32 *) fw_data, dst);
|
||||
|
||||
out:
|
||||
release_firmware(fw);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode)
|
||||
{
|
||||
struct armcp_packet pkt = {};
|
||||
|
||||
pkt.ctl = cpu_to_le32(opcode << ARMCP_PKT_CTL_OPCODE_SHIFT);
|
||||
|
||||
return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt,
|
||||
sizeof(pkt), HL_DEVICE_TIMEOUT_USEC, NULL);
|
||||
}
|
||||
|
||||
int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
|
||||
u16 len, u32 timeout, long *result)
|
||||
{
|
||||
struct armcp_packet *pkt;
|
||||
dma_addr_t pkt_dma_addr;
|
||||
u32 tmp;
|
||||
int rc = 0;
|
||||
|
||||
if (len > HL_CPU_CB_SIZE) {
|
||||
dev_err(hdev->dev, "Invalid CPU message size of %d bytes\n",
|
||||
len);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
pkt = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, len,
|
||||
&pkt_dma_addr);
|
||||
if (!pkt) {
|
||||
dev_err(hdev->dev,
|
||||
"Failed to allocate DMA memory for packet to CPU\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memcpy(pkt, msg, len);
|
||||
|
||||
mutex_lock(&hdev->send_cpu_message_lock);
|
||||
|
||||
if (hdev->disabled)
|
||||
goto out;
|
||||
|
||||
if (hdev->device_cpu_disabled) {
|
||||
rc = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, len, pkt_dma_addr);
|
||||
if (rc) {
|
||||
dev_err(hdev->dev, "Failed to send CB on CPU PQ (%d)\n", rc);
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) &pkt->fence,
|
||||
timeout, &tmp);
|
||||
|
||||
hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
|
||||
|
||||
if (rc == -ETIMEDOUT) {
|
||||
dev_err(hdev->dev, "Timeout while waiting for device CPU\n");
|
||||
hdev->device_cpu_disabled = true;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (tmp == ARMCP_PACKET_FENCE_VAL) {
|
||||
u32 ctl = le32_to_cpu(pkt->ctl);
|
||||
|
||||
rc = (ctl & ARMCP_PKT_CTL_RC_MASK) >> ARMCP_PKT_CTL_RC_SHIFT;
|
||||
if (rc) {
|
||||
dev_err(hdev->dev,
|
||||
"F/W ERROR %d for CPU packet %d\n",
|
||||
rc, (ctl & ARMCP_PKT_CTL_OPCODE_MASK)
|
||||
>> ARMCP_PKT_CTL_OPCODE_SHIFT);
|
||||
rc = -EINVAL;
|
||||
} else if (result) {
|
||||
*result = (long) le64_to_cpu(pkt->result);
|
||||
}
|
||||
} else {
|
||||
dev_err(hdev->dev, "CPU packet wrong fence value\n");
|
||||
rc = -EINVAL;
|
||||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&hdev->send_cpu_message_lock);
|
||||
|
||||
hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, len, pkt);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int hl_fw_test_cpu_queue(struct hl_device *hdev)
|
||||
{
|
||||
struct armcp_packet test_pkt = {};
|
||||
long result;
|
||||
int rc;
|
||||
|
||||
test_pkt.ctl = cpu_to_le32(ARMCP_PACKET_TEST <<
|
||||
ARMCP_PKT_CTL_OPCODE_SHIFT);
|
||||
test_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL);
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &test_pkt,
|
||||
sizeof(test_pkt), HL_DEVICE_TIMEOUT_USEC, &result);
|
||||
|
||||
if (!rc) {
|
||||
if (result == ARMCP_PACKET_FENCE_VAL)
|
||||
dev_info(hdev->dev,
|
||||
"queue test on CPU queue succeeded\n");
|
||||
else
|
||||
dev_err(hdev->dev,
|
||||
"CPU queue test failed (0x%08lX)\n", result);
|
||||
} else {
|
||||
dev_err(hdev->dev, "CPU queue test failed, error %d\n", rc);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
void *hl_fw_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
|
||||
dma_addr_t *dma_handle)
|
||||
{
|
||||
u64 kernel_addr;
|
||||
|
||||
/* roundup to HL_CPU_PKT_SIZE */
|
||||
size = (size + (HL_CPU_PKT_SIZE - 1)) & HL_CPU_PKT_MASK;
|
||||
|
||||
kernel_addr = gen_pool_alloc(hdev->cpu_accessible_dma_pool, size);
|
||||
|
||||
*dma_handle = hdev->cpu_accessible_dma_address +
|
||||
(kernel_addr - (u64) (uintptr_t) hdev->cpu_accessible_dma_mem);
|
||||
|
||||
return (void *) (uintptr_t) kernel_addr;
|
||||
}
|
||||
|
||||
void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
|
||||
void *vaddr)
|
||||
{
|
||||
/* roundup to HL_CPU_PKT_SIZE */
|
||||
size = (size + (HL_CPU_PKT_SIZE - 1)) & HL_CPU_PKT_MASK;
|
||||
|
||||
gen_pool_free(hdev->cpu_accessible_dma_pool, (u64) (uintptr_t) vaddr,
|
||||
size);
|
||||
}
|
||||
|
||||
int hl_fw_send_heartbeat(struct hl_device *hdev)
|
||||
{
|
||||
struct armcp_packet hb_pkt = {};
|
||||
long result;
|
||||
int rc;
|
||||
|
||||
hb_pkt.ctl = cpu_to_le32(ARMCP_PACKET_TEST <<
|
||||
ARMCP_PKT_CTL_OPCODE_SHIFT);
|
||||
hb_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL);
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &hb_pkt,
|
||||
sizeof(hb_pkt), HL_DEVICE_TIMEOUT_USEC, &result);
|
||||
|
||||
if ((rc) || (result != ARMCP_PACKET_FENCE_VAL))
|
||||
rc = -EIO;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int hl_fw_armcp_info_get(struct hl_device *hdev)
|
||||
{
|
||||
struct asic_fixed_properties *prop = &hdev->asic_prop;
|
||||
struct armcp_packet pkt = {};
|
||||
void *armcp_info_cpu_addr;
|
||||
dma_addr_t armcp_info_dma_addr;
|
||||
long result;
|
||||
int rc;
|
||||
|
||||
armcp_info_cpu_addr =
|
||||
hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
|
||||
sizeof(struct armcp_info),
|
||||
&armcp_info_dma_addr);
|
||||
if (!armcp_info_cpu_addr) {
|
||||
dev_err(hdev->dev,
|
||||
"Failed to allocate DMA memory for ArmCP info packet\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(armcp_info_cpu_addr, 0, sizeof(struct armcp_info));
|
||||
|
||||
pkt.ctl = cpu_to_le32(ARMCP_PACKET_INFO_GET <<
|
||||
ARMCP_PKT_CTL_OPCODE_SHIFT);
|
||||
pkt.addr = cpu_to_le64(armcp_info_dma_addr +
|
||||
prop->host_phys_base_address);
|
||||
pkt.data_max_size = cpu_to_le32(sizeof(struct armcp_info));
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
|
||||
HL_ARMCP_INFO_TIMEOUT_USEC, &result);
|
||||
if (rc) {
|
||||
dev_err(hdev->dev,
|
||||
"Failed to send armcp info pkt, error %d\n", rc);
|
||||
goto out;
|
||||
}
|
||||
|
||||
memcpy(&prop->armcp_info, armcp_info_cpu_addr,
|
||||
sizeof(prop->armcp_info));
|
||||
|
||||
rc = hl_build_hwmon_channel_info(hdev, prop->armcp_info.sensors);
|
||||
if (rc) {
|
||||
dev_err(hdev->dev,
|
||||
"Failed to build hwmon channel info, error %d\n", rc);
|
||||
rc = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
|
||||
sizeof(struct armcp_info), armcp_info_cpu_addr);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size)
|
||||
{
|
||||
struct asic_fixed_properties *prop = &hdev->asic_prop;
|
||||
struct armcp_packet pkt = {};
|
||||
void *eeprom_info_cpu_addr;
|
||||
dma_addr_t eeprom_info_dma_addr;
|
||||
long result;
|
||||
int rc;
|
||||
|
||||
eeprom_info_cpu_addr =
|
||||
hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
|
||||
max_size, &eeprom_info_dma_addr);
|
||||
if (!eeprom_info_cpu_addr) {
|
||||
dev_err(hdev->dev,
|
||||
"Failed to allocate DMA memory for EEPROM info packet\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(eeprom_info_cpu_addr, 0, max_size);
|
||||
|
||||
pkt.ctl = cpu_to_le32(ARMCP_PACKET_EEPROM_DATA_GET <<
|
||||
ARMCP_PKT_CTL_OPCODE_SHIFT);
|
||||
pkt.addr = cpu_to_le64(eeprom_info_dma_addr +
|
||||
prop->host_phys_base_address);
|
||||
pkt.data_max_size = cpu_to_le32(max_size);
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
|
||||
HL_ARMCP_EEPROM_TIMEOUT_USEC, &result);
|
||||
|
||||
if (rc) {
|
||||
dev_err(hdev->dev,
|
||||
"Failed to send armcp EEPROM pkt, error %d\n", rc);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* result contains the actual size */
|
||||
memcpy(data, eeprom_info_cpu_addr, min((size_t)result, max_size));
|
||||
|
||||
out:
|
||||
hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, max_size,
|
||||
eeprom_info_cpu_addr);
|
||||
|
||||
return rc;
|
||||
}
|
|
@ -1,3 +1,4 @@
|
|||
subdir-ccflags-y += -I$(src)
|
||||
|
||||
HL_GOYA_FILES := goya/goya.o goya/goya_security.o goya/goya_hwmgr.o
|
||||
HL_GOYA_FILES := goya/goya.o goya/goya_security.o goya/goya_hwmgr.o \
|
||||
goya/goya_coresight.o
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -39,9 +39,13 @@
|
|||
#error "Number of MSIX interrupts must be smaller or equal to GOYA_MSIX_ENTRIES"
|
||||
#endif
|
||||
|
||||
#define QMAN_FENCE_TIMEOUT_USEC 10000 /* 10 ms */
|
||||
#define QMAN_FENCE_TIMEOUT_USEC 10000 /* 10 ms */
|
||||
|
||||
#define QMAN_STOP_TIMEOUT_USEC 100000 /* 100 ms */
|
||||
#define QMAN_STOP_TIMEOUT_USEC 100000 /* 100 ms */
|
||||
|
||||
#define CORESIGHT_TIMEOUT_USEC 100000 /* 100 ms */
|
||||
|
||||
#define GOYA_CPU_TIMEOUT_USEC 10000000 /* 10s */
|
||||
|
||||
#define TPC_ENABLED_MASK 0xFF
|
||||
|
||||
|
@ -49,19 +53,14 @@
|
|||
|
||||
#define MAX_POWER_DEFAULT 200000 /* 200W */
|
||||
|
||||
#define GOYA_ARMCP_INFO_TIMEOUT 10000000 /* 10s */
|
||||
#define GOYA_ARMCP_EEPROM_TIMEOUT 10000000 /* 10s */
|
||||
|
||||
#define DRAM_PHYS_DEFAULT_SIZE 0x100000000ull /* 4GB */
|
||||
|
||||
/* DRAM Memory Map */
|
||||
|
||||
#define CPU_FW_IMAGE_SIZE 0x10000000 /* 256MB */
|
||||
#define MMU_PAGE_TABLES_SIZE 0x0DE00000 /* 222MB */
|
||||
#define MMU_PAGE_TABLES_SIZE 0x0FC00000 /* 252MB */
|
||||
#define MMU_DRAM_DEFAULT_PAGE_SIZE 0x00200000 /* 2MB */
|
||||
#define MMU_CACHE_MNG_SIZE 0x00001000 /* 4KB */
|
||||
#define CPU_PQ_PKT_SIZE 0x00001000 /* 4KB */
|
||||
#define CPU_PQ_DATA_SIZE 0x01FFE000 /* 32MB - 8KB */
|
||||
|
||||
#define CPU_FW_IMAGE_ADDR DRAM_PHYS_BASE
|
||||
#define MMU_PAGE_TABLES_ADDR (CPU_FW_IMAGE_ADDR + CPU_FW_IMAGE_SIZE)
|
||||
|
@ -69,13 +68,13 @@
|
|||
MMU_PAGE_TABLES_SIZE)
|
||||
#define MMU_CACHE_MNG_ADDR (MMU_DRAM_DEFAULT_PAGE_ADDR + \
|
||||
MMU_DRAM_DEFAULT_PAGE_SIZE)
|
||||
#define CPU_PQ_PKT_ADDR (MMU_CACHE_MNG_ADDR + \
|
||||
#define DRAM_KMD_END_ADDR (MMU_CACHE_MNG_ADDR + \
|
||||
MMU_CACHE_MNG_SIZE)
|
||||
#define CPU_PQ_DATA_ADDR (CPU_PQ_PKT_ADDR + CPU_PQ_PKT_SIZE)
|
||||
#define DRAM_BASE_ADDR_USER (CPU_PQ_DATA_ADDR + CPU_PQ_DATA_SIZE)
|
||||
|
||||
#if (DRAM_BASE_ADDR_USER != 0x20000000)
|
||||
#error "KMD must reserve 512MB"
|
||||
#define DRAM_BASE_ADDR_USER 0x20000000
|
||||
|
||||
#if (DRAM_KMD_END_ADDR > DRAM_BASE_ADDR_USER)
|
||||
#error "KMD must reserve no more than 512MB"
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -142,21 +141,14 @@
|
|||
#define HW_CAP_GOLDEN 0x00000400
|
||||
#define HW_CAP_TPC 0x00000800
|
||||
|
||||
#define CPU_PKT_SHIFT 5
|
||||
#define CPU_PKT_SIZE (1 << CPU_PKT_SHIFT)
|
||||
#define CPU_PKT_MASK (~((1 << CPU_PKT_SHIFT) - 1))
|
||||
#define CPU_MAX_PKTS_IN_CB 32
|
||||
#define CPU_CB_SIZE (CPU_PKT_SIZE * CPU_MAX_PKTS_IN_CB)
|
||||
#define CPU_ACCESSIBLE_MEM_SIZE (HL_QUEUE_LENGTH * CPU_CB_SIZE)
|
||||
|
||||
enum goya_fw_component {
|
||||
FW_COMP_UBOOT,
|
||||
FW_COMP_PREBOOT
|
||||
};
|
||||
|
||||
struct goya_device {
|
||||
int (*test_cpu_queue)(struct hl_device *hdev);
|
||||
int (*armcp_info_get)(struct hl_device *hdev);
|
||||
void (*mmu_prepare_reg)(struct hl_device *hdev, u64 reg, u32 asid);
|
||||
void (*qman0_set_security)(struct hl_device *hdev, bool secure);
|
||||
|
||||
/* TODO: remove hw_queues_lock after moving to scheduler code */
|
||||
spinlock_t hw_queues_lock;
|
||||
|
@ -188,11 +180,16 @@ void goya_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state);
|
|||
void goya_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq);
|
||||
void goya_add_device_attr(struct hl_device *hdev,
|
||||
struct attribute_group *dev_attr_grp);
|
||||
int goya_armcp_info_get(struct hl_device *hdev);
|
||||
void goya_init_security(struct hl_device *hdev);
|
||||
int goya_debug_coresight(struct hl_device *hdev, void *data);
|
||||
u64 goya_get_max_power(struct hl_device *hdev);
|
||||
void goya_set_max_power(struct hl_device *hdev, u64 value);
|
||||
int goya_test_queues(struct hl_device *hdev);
|
||||
void goya_mmu_prepare(struct hl_device *hdev, u32 asid);
|
||||
int goya_mmu_clear_pgt_range(struct hl_device *hdev);
|
||||
int goya_mmu_set_dram_default_page(struct hl_device *hdev);
|
||||
|
||||
int goya_send_pci_access_msg(struct hl_device *hdev, u32 opcode);
|
||||
void goya_late_fini(struct hl_device *hdev);
|
||||
int goya_suspend(struct hl_device *hdev);
|
||||
int goya_resume(struct hl_device *hdev);
|
||||
|
@ -207,5 +204,9 @@ void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
|
|||
u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt);
|
||||
int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id);
|
||||
int goya_send_heartbeat(struct hl_device *hdev);
|
||||
void *goya_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
|
||||
dma_addr_t *dma_handle);
|
||||
void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
|
||||
void *vaddr);
|
||||
|
||||
#endif /* GOYAP_H_ */
|
||||
|
|
|
@ -0,0 +1,620 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
/*
|
||||
* Copyright 2016-2019 HabanaLabs, Ltd.
|
||||
* All Rights Reserved.
|
||||
*/
|
||||
|
||||
#include "goyaP.h"
|
||||
#include "include/goya/goya_coresight.h"
|
||||
#include "include/goya/asic_reg/goya_regs.h"
|
||||
|
||||
#include <uapi/misc/habanalabs.h>
|
||||
|
||||
#include <linux/coresight.h>
|
||||
|
||||
#define GOYA_PLDM_CORESIGHT_TIMEOUT_USEC (CORESIGHT_TIMEOUT_USEC * 100)
|
||||
|
||||
static u64 debug_stm_regs[GOYA_STM_LAST + 1] = {
|
||||
[GOYA_STM_CPU] = mmCPU_STM_BASE,
|
||||
[GOYA_STM_DMA_CH_0_CS] = mmDMA_CH_0_CS_STM_BASE,
|
||||
[GOYA_STM_DMA_CH_1_CS] = mmDMA_CH_1_CS_STM_BASE,
|
||||
[GOYA_STM_DMA_CH_2_CS] = mmDMA_CH_2_CS_STM_BASE,
|
||||
[GOYA_STM_DMA_CH_3_CS] = mmDMA_CH_3_CS_STM_BASE,
|
||||
[GOYA_STM_DMA_CH_4_CS] = mmDMA_CH_4_CS_STM_BASE,
|
||||
[GOYA_STM_DMA_MACRO_CS] = mmDMA_MACRO_CS_STM_BASE,
|
||||
[GOYA_STM_MME1_SBA] = mmMME1_SBA_STM_BASE,
|
||||
[GOYA_STM_MME3_SBB] = mmMME3_SBB_STM_BASE,
|
||||
[GOYA_STM_MME4_WACS2] = mmMME4_WACS2_STM_BASE,
|
||||
[GOYA_STM_MME4_WACS] = mmMME4_WACS_STM_BASE,
|
||||
[GOYA_STM_MMU_CS] = mmMMU_CS_STM_BASE,
|
||||
[GOYA_STM_PCIE] = mmPCIE_STM_BASE,
|
||||
[GOYA_STM_PSOC] = mmPSOC_STM_BASE,
|
||||
[GOYA_STM_TPC0_EML] = mmTPC0_EML_STM_BASE,
|
||||
[GOYA_STM_TPC1_EML] = mmTPC1_EML_STM_BASE,
|
||||
[GOYA_STM_TPC2_EML] = mmTPC2_EML_STM_BASE,
|
||||
[GOYA_STM_TPC3_EML] = mmTPC3_EML_STM_BASE,
|
||||
[GOYA_STM_TPC4_EML] = mmTPC4_EML_STM_BASE,
|
||||
[GOYA_STM_TPC5_EML] = mmTPC5_EML_STM_BASE,
|
||||
[GOYA_STM_TPC6_EML] = mmTPC6_EML_STM_BASE,
|
||||
[GOYA_STM_TPC7_EML] = mmTPC7_EML_STM_BASE
|
||||
};
|
||||
|
||||
static u64 debug_etf_regs[GOYA_ETF_LAST + 1] = {
|
||||
[GOYA_ETF_CPU_0] = mmCPU_ETF_0_BASE,
|
||||
[GOYA_ETF_CPU_1] = mmCPU_ETF_1_BASE,
|
||||
[GOYA_ETF_CPU_TRACE] = mmCPU_ETF_TRACE_BASE,
|
||||
[GOYA_ETF_DMA_CH_0_CS] = mmDMA_CH_0_CS_ETF_BASE,
|
||||
[GOYA_ETF_DMA_CH_1_CS] = mmDMA_CH_1_CS_ETF_BASE,
|
||||
[GOYA_ETF_DMA_CH_2_CS] = mmDMA_CH_2_CS_ETF_BASE,
|
||||
[GOYA_ETF_DMA_CH_3_CS] = mmDMA_CH_3_CS_ETF_BASE,
|
||||
[GOYA_ETF_DMA_CH_4_CS] = mmDMA_CH_4_CS_ETF_BASE,
|
||||
[GOYA_ETF_DMA_MACRO_CS] = mmDMA_MACRO_CS_ETF_BASE,
|
||||
[GOYA_ETF_MME1_SBA] = mmMME1_SBA_ETF_BASE,
|
||||
[GOYA_ETF_MME3_SBB] = mmMME3_SBB_ETF_BASE,
|
||||
[GOYA_ETF_MME4_WACS2] = mmMME4_WACS2_ETF_BASE,
|
||||
[GOYA_ETF_MME4_WACS] = mmMME4_WACS_ETF_BASE,
|
||||
[GOYA_ETF_MMU_CS] = mmMMU_CS_ETF_BASE,
|
||||
[GOYA_ETF_PCIE] = mmPCIE_ETF_BASE,
|
||||
[GOYA_ETF_PSOC] = mmPSOC_ETF_BASE,
|
||||
[GOYA_ETF_TPC0_EML] = mmTPC0_EML_ETF_BASE,
|
||||
[GOYA_ETF_TPC1_EML] = mmTPC1_EML_ETF_BASE,
|
||||
[GOYA_ETF_TPC2_EML] = mmTPC2_EML_ETF_BASE,
|
||||
[GOYA_ETF_TPC3_EML] = mmTPC3_EML_ETF_BASE,
|
||||
[GOYA_ETF_TPC4_EML] = mmTPC4_EML_ETF_BASE,
|
||||
[GOYA_ETF_TPC5_EML] = mmTPC5_EML_ETF_BASE,
|
||||
[GOYA_ETF_TPC6_EML] = mmTPC6_EML_ETF_BASE,
|
||||
[GOYA_ETF_TPC7_EML] = mmTPC7_EML_ETF_BASE
|
||||
};
|
||||
|
||||
static u64 debug_funnel_regs[GOYA_FUNNEL_LAST + 1] = {
|
||||
[GOYA_FUNNEL_CPU] = mmCPU_FUNNEL_BASE,
|
||||
[GOYA_FUNNEL_DMA_CH_6_1] = mmDMA_CH_FUNNEL_6_1_BASE,
|
||||
[GOYA_FUNNEL_DMA_MACRO_3_1] = mmDMA_MACRO_FUNNEL_3_1_BASE,
|
||||
[GOYA_FUNNEL_MME0_RTR] = mmMME0_RTR_FUNNEL_BASE,
|
||||
[GOYA_FUNNEL_MME1_RTR] = mmMME1_RTR_FUNNEL_BASE,
|
||||
[GOYA_FUNNEL_MME2_RTR] = mmMME2_RTR_FUNNEL_BASE,
|
||||
[GOYA_FUNNEL_MME3_RTR] = mmMME3_RTR_FUNNEL_BASE,
|
||||
[GOYA_FUNNEL_MME4_RTR] = mmMME4_RTR_FUNNEL_BASE,
|
||||
[GOYA_FUNNEL_MME5_RTR] = mmMME5_RTR_FUNNEL_BASE,
|
||||
[GOYA_FUNNEL_PCIE] = mmPCIE_FUNNEL_BASE,
|
||||
[GOYA_FUNNEL_PSOC] = mmPSOC_FUNNEL_BASE,
|
||||
[GOYA_FUNNEL_TPC0_EML] = mmTPC0_EML_FUNNEL_BASE,
|
||||
[GOYA_FUNNEL_TPC1_EML] = mmTPC1_EML_FUNNEL_BASE,
|
||||
[GOYA_FUNNEL_TPC1_RTR] = mmTPC1_RTR_FUNNEL_BASE,
|
||||
[GOYA_FUNNEL_TPC2_EML] = mmTPC2_EML_FUNNEL_BASE,
|
||||
[GOYA_FUNNEL_TPC2_RTR] = mmTPC2_RTR_FUNNEL_BASE,
|
||||
[GOYA_FUNNEL_TPC3_EML] = mmTPC3_EML_FUNNEL_BASE,
|
||||
[GOYA_FUNNEL_TPC3_RTR] = mmTPC3_RTR_FUNNEL_BASE,
|
||||
[GOYA_FUNNEL_TPC4_EML] = mmTPC4_EML_FUNNEL_BASE,
|
||||
[GOYA_FUNNEL_TPC4_RTR] = mmTPC4_RTR_FUNNEL_BASE,
|
||||
[GOYA_FUNNEL_TPC5_EML] = mmTPC5_EML_FUNNEL_BASE,
|
||||
[GOYA_FUNNEL_TPC5_RTR] = mmTPC5_RTR_FUNNEL_BASE,
|
||||
[GOYA_FUNNEL_TPC6_EML] = mmTPC6_EML_FUNNEL_BASE,
|
||||
[GOYA_FUNNEL_TPC6_RTR] = mmTPC6_RTR_FUNNEL_BASE,
|
||||
[GOYA_FUNNEL_TPC7_EML] = mmTPC7_EML_FUNNEL_BASE
|
||||
};
|
||||
|
||||
static u64 debug_bmon_regs[GOYA_BMON_LAST + 1] = {
|
||||
[GOYA_BMON_CPU_RD] = mmCPU_RD_BMON_BASE,
|
||||
[GOYA_BMON_CPU_WR] = mmCPU_WR_BMON_BASE,
|
||||
[GOYA_BMON_DMA_CH_0_0] = mmDMA_CH_0_BMON_0_BASE,
|
||||
[GOYA_BMON_DMA_CH_0_1] = mmDMA_CH_0_BMON_1_BASE,
|
||||
[GOYA_BMON_DMA_CH_1_0] = mmDMA_CH_1_BMON_0_BASE,
|
||||
[GOYA_BMON_DMA_CH_1_1] = mmDMA_CH_1_BMON_1_BASE,
|
||||
[GOYA_BMON_DMA_CH_2_0] = mmDMA_CH_2_BMON_0_BASE,
|
||||
[GOYA_BMON_DMA_CH_2_1] = mmDMA_CH_2_BMON_1_BASE,
|
||||
[GOYA_BMON_DMA_CH_3_0] = mmDMA_CH_3_BMON_0_BASE,
|
||||
[GOYA_BMON_DMA_CH_3_1] = mmDMA_CH_3_BMON_1_BASE,
|
||||
[GOYA_BMON_DMA_CH_4_0] = mmDMA_CH_4_BMON_0_BASE,
|
||||
[GOYA_BMON_DMA_CH_4_1] = mmDMA_CH_4_BMON_1_BASE,
|
||||
[GOYA_BMON_DMA_MACRO_0] = mmDMA_MACRO_BMON_0_BASE,
|
||||
[GOYA_BMON_DMA_MACRO_1] = mmDMA_MACRO_BMON_1_BASE,
|
||||
[GOYA_BMON_DMA_MACRO_2] = mmDMA_MACRO_BMON_2_BASE,
|
||||
[GOYA_BMON_DMA_MACRO_3] = mmDMA_MACRO_BMON_3_BASE,
|
||||
[GOYA_BMON_DMA_MACRO_4] = mmDMA_MACRO_BMON_4_BASE,
|
||||
[GOYA_BMON_DMA_MACRO_5] = mmDMA_MACRO_BMON_5_BASE,
|
||||
[GOYA_BMON_DMA_MACRO_6] = mmDMA_MACRO_BMON_6_BASE,
|
||||
[GOYA_BMON_DMA_MACRO_7] = mmDMA_MACRO_BMON_7_BASE,
|
||||
[GOYA_BMON_MME1_SBA_0] = mmMME1_SBA_BMON0_BASE,
|
||||
[GOYA_BMON_MME1_SBA_1] = mmMME1_SBA_BMON1_BASE,
|
||||
[GOYA_BMON_MME3_SBB_0] = mmMME3_SBB_BMON0_BASE,
|
||||
[GOYA_BMON_MME3_SBB_1] = mmMME3_SBB_BMON1_BASE,
|
||||
[GOYA_BMON_MME4_WACS2_0] = mmMME4_WACS2_BMON0_BASE,
|
||||
[GOYA_BMON_MME4_WACS2_1] = mmMME4_WACS2_BMON1_BASE,
|
||||
[GOYA_BMON_MME4_WACS2_2] = mmMME4_WACS2_BMON2_BASE,
|
||||
[GOYA_BMON_MME4_WACS_0] = mmMME4_WACS_BMON0_BASE,
|
||||
[GOYA_BMON_MME4_WACS_1] = mmMME4_WACS_BMON1_BASE,
|
||||
[GOYA_BMON_MME4_WACS_2] = mmMME4_WACS_BMON2_BASE,
|
||||
[GOYA_BMON_MME4_WACS_3] = mmMME4_WACS_BMON3_BASE,
|
||||
[GOYA_BMON_MME4_WACS_4] = mmMME4_WACS_BMON4_BASE,
|
||||
[GOYA_BMON_MME4_WACS_5] = mmMME4_WACS_BMON5_BASE,
|
||||
[GOYA_BMON_MME4_WACS_6] = mmMME4_WACS_BMON6_BASE,
|
||||
[GOYA_BMON_MMU_0] = mmMMU_BMON_0_BASE,
|
||||
[GOYA_BMON_MMU_1] = mmMMU_BMON_1_BASE,
|
||||
[GOYA_BMON_PCIE_MSTR_RD] = mmPCIE_BMON_MSTR_RD_BASE,
|
||||
[GOYA_BMON_PCIE_MSTR_WR] = mmPCIE_BMON_MSTR_WR_BASE,
|
||||
[GOYA_BMON_PCIE_SLV_RD] = mmPCIE_BMON_SLV_RD_BASE,
|
||||
[GOYA_BMON_PCIE_SLV_WR] = mmPCIE_BMON_SLV_WR_BASE,
|
||||
[GOYA_BMON_TPC0_EML_0] = mmTPC0_EML_BUSMON_0_BASE,
|
||||
[GOYA_BMON_TPC0_EML_1] = mmTPC0_EML_BUSMON_1_BASE,
|
||||
[GOYA_BMON_TPC0_EML_2] = mmTPC0_EML_BUSMON_2_BASE,
|
||||
[GOYA_BMON_TPC0_EML_3] = mmTPC0_EML_BUSMON_3_BASE,
|
||||
[GOYA_BMON_TPC1_EML_0] = mmTPC1_EML_BUSMON_0_BASE,
|
||||
[GOYA_BMON_TPC1_EML_1] = mmTPC1_EML_BUSMON_1_BASE,
|
||||
[GOYA_BMON_TPC1_EML_2] = mmTPC1_EML_BUSMON_2_BASE,
|
||||
[GOYA_BMON_TPC1_EML_3] = mmTPC1_EML_BUSMON_3_BASE,
|
||||
[GOYA_BMON_TPC2_EML_0] = mmTPC2_EML_BUSMON_0_BASE,
|
||||
[GOYA_BMON_TPC2_EML_1] = mmTPC2_EML_BUSMON_1_BASE,
|
||||
[GOYA_BMON_TPC2_EML_2] = mmTPC2_EML_BUSMON_2_BASE,
|
||||
[GOYA_BMON_TPC2_EML_3] = mmTPC2_EML_BUSMON_3_BASE,
|
||||
[GOYA_BMON_TPC3_EML_0] = mmTPC3_EML_BUSMON_0_BASE,
|
||||
[GOYA_BMON_TPC3_EML_1] = mmTPC3_EML_BUSMON_1_BASE,
|
||||
[GOYA_BMON_TPC3_EML_2] = mmTPC3_EML_BUSMON_2_BASE,
|
||||
[GOYA_BMON_TPC3_EML_3] = mmTPC3_EML_BUSMON_3_BASE,
|
||||
[GOYA_BMON_TPC4_EML_0] = mmTPC4_EML_BUSMON_0_BASE,
|
||||
[GOYA_BMON_TPC4_EML_1] = mmTPC4_EML_BUSMON_1_BASE,
|
||||
[GOYA_BMON_TPC4_EML_2] = mmTPC4_EML_BUSMON_2_BASE,
|
||||
[GOYA_BMON_TPC4_EML_3] = mmTPC4_EML_BUSMON_3_BASE,
|
||||
[GOYA_BMON_TPC5_EML_0] = mmTPC5_EML_BUSMON_0_BASE,
|
||||
[GOYA_BMON_TPC5_EML_1] = mmTPC5_EML_BUSMON_1_BASE,
|
||||
[GOYA_BMON_TPC5_EML_2] = mmTPC5_EML_BUSMON_2_BASE,
|
||||
[GOYA_BMON_TPC5_EML_3] = mmTPC5_EML_BUSMON_3_BASE,
|
||||
[GOYA_BMON_TPC6_EML_0] = mmTPC6_EML_BUSMON_0_BASE,
|
||||
[GOYA_BMON_TPC6_EML_1] = mmTPC6_EML_BUSMON_1_BASE,
|
||||
[GOYA_BMON_TPC6_EML_2] = mmTPC6_EML_BUSMON_2_BASE,
|
||||
[GOYA_BMON_TPC6_EML_3] = mmTPC6_EML_BUSMON_3_BASE,
|
||||
[GOYA_BMON_TPC7_EML_0] = mmTPC7_EML_BUSMON_0_BASE,
|
||||
[GOYA_BMON_TPC7_EML_1] = mmTPC7_EML_BUSMON_1_BASE,
|
||||
[GOYA_BMON_TPC7_EML_2] = mmTPC7_EML_BUSMON_2_BASE,
|
||||
[GOYA_BMON_TPC7_EML_3] = mmTPC7_EML_BUSMON_3_BASE
|
||||
};
|
||||
|
||||
static u64 debug_spmu_regs[GOYA_SPMU_LAST + 1] = {
|
||||
[GOYA_SPMU_DMA_CH_0_CS] = mmDMA_CH_0_CS_SPMU_BASE,
|
||||
[GOYA_SPMU_DMA_CH_1_CS] = mmDMA_CH_1_CS_SPMU_BASE,
|
||||
[GOYA_SPMU_DMA_CH_2_CS] = mmDMA_CH_2_CS_SPMU_BASE,
|
||||
[GOYA_SPMU_DMA_CH_3_CS] = mmDMA_CH_3_CS_SPMU_BASE,
|
||||
[GOYA_SPMU_DMA_CH_4_CS] = mmDMA_CH_4_CS_SPMU_BASE,
|
||||
[GOYA_SPMU_DMA_MACRO_CS] = mmDMA_MACRO_CS_SPMU_BASE,
|
||||
[GOYA_SPMU_MME1_SBA] = mmMME1_SBA_SPMU_BASE,
|
||||
[GOYA_SPMU_MME3_SBB] = mmMME3_SBB_SPMU_BASE,
|
||||
[GOYA_SPMU_MME4_WACS2] = mmMME4_WACS2_SPMU_BASE,
|
||||
[GOYA_SPMU_MME4_WACS] = mmMME4_WACS_SPMU_BASE,
|
||||
[GOYA_SPMU_MMU_CS] = mmMMU_CS_SPMU_BASE,
|
||||
[GOYA_SPMU_PCIE] = mmPCIE_SPMU_BASE,
|
||||
[GOYA_SPMU_TPC0_EML] = mmTPC0_EML_SPMU_BASE,
|
||||
[GOYA_SPMU_TPC1_EML] = mmTPC1_EML_SPMU_BASE,
|
||||
[GOYA_SPMU_TPC2_EML] = mmTPC2_EML_SPMU_BASE,
|
||||
[GOYA_SPMU_TPC3_EML] = mmTPC3_EML_SPMU_BASE,
|
||||
[GOYA_SPMU_TPC4_EML] = mmTPC4_EML_SPMU_BASE,
|
||||
[GOYA_SPMU_TPC5_EML] = mmTPC5_EML_SPMU_BASE,
|
||||
[GOYA_SPMU_TPC6_EML] = mmTPC6_EML_SPMU_BASE,
|
||||
[GOYA_SPMU_TPC7_EML] = mmTPC7_EML_SPMU_BASE
|
||||
};
|
||||
|
||||
static int goya_coresight_timeout(struct hl_device *hdev, u64 addr,
|
||||
int position, bool up)
|
||||
{
|
||||
int rc;
|
||||
u32 val, timeout_usec;
|
||||
|
||||
if (hdev->pldm)
|
||||
timeout_usec = GOYA_PLDM_CORESIGHT_TIMEOUT_USEC;
|
||||
else
|
||||
timeout_usec = CORESIGHT_TIMEOUT_USEC;
|
||||
|
||||
rc = hl_poll_timeout(
|
||||
hdev,
|
||||
addr,
|
||||
val,
|
||||
up ? val & BIT(position) : !(val & BIT(position)),
|
||||
1000,
|
||||
timeout_usec);
|
||||
|
||||
if (rc) {
|
||||
dev_err(hdev->dev,
|
||||
"Timeout while waiting for coresight, addr: 0x%llx, position: %d, up: %d\n",
|
||||
addr, position, up);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int goya_config_stm(struct hl_device *hdev,
|
||||
struct hl_debug_params *params)
|
||||
{
|
||||
struct hl_debug_params_stm *input;
|
||||
u64 base_reg = debug_stm_regs[params->reg_idx] - CFG_BASE;
|
||||
int rc;
|
||||
|
||||
WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK);
|
||||
|
||||
if (params->enable) {
|
||||
input = params->input;
|
||||
|
||||
if (!input)
|
||||
return -EINVAL;
|
||||
|
||||
WREG32(base_reg + 0xE80, 0x80004);
|
||||
WREG32(base_reg + 0xD64, 7);
|
||||
WREG32(base_reg + 0xD60, 0);
|
||||
WREG32(base_reg + 0xD00, lower_32_bits(input->he_mask));
|
||||
WREG32(base_reg + 0xD20, lower_32_bits(input->sp_mask));
|
||||
WREG32(base_reg + 0xD60, 1);
|
||||
WREG32(base_reg + 0xD00, upper_32_bits(input->he_mask));
|
||||
WREG32(base_reg + 0xD20, upper_32_bits(input->sp_mask));
|
||||
WREG32(base_reg + 0xE70, 0x10);
|
||||
WREG32(base_reg + 0xE60, 0);
|
||||
WREG32(base_reg + 0xE64, 0x420000);
|
||||
WREG32(base_reg + 0xE00, 0xFFFFFFFF);
|
||||
WREG32(base_reg + 0xE20, 0xFFFFFFFF);
|
||||
WREG32(base_reg + 0xEF4, input->id);
|
||||
WREG32(base_reg + 0xDF4, 0x80);
|
||||
WREG32(base_reg + 0xE8C, input->frequency);
|
||||
WREG32(base_reg + 0xE90, 0x7FF);
|
||||
WREG32(base_reg + 0xE80, 0x7 | (input->id << 16));
|
||||
} else {
|
||||
WREG32(base_reg + 0xE80, 4);
|
||||
WREG32(base_reg + 0xD64, 0);
|
||||
WREG32(base_reg + 0xD60, 1);
|
||||
WREG32(base_reg + 0xD00, 0);
|
||||
WREG32(base_reg + 0xD20, 0);
|
||||
WREG32(base_reg + 0xD60, 0);
|
||||
WREG32(base_reg + 0xE20, 0);
|
||||
WREG32(base_reg + 0xE00, 0);
|
||||
WREG32(base_reg + 0xDF4, 0x80);
|
||||
WREG32(base_reg + 0xE70, 0);
|
||||
WREG32(base_reg + 0xE60, 0);
|
||||
WREG32(base_reg + 0xE64, 0);
|
||||
WREG32(base_reg + 0xE8C, 0);
|
||||
|
||||
rc = goya_coresight_timeout(hdev, base_reg + 0xE80, 23, false);
|
||||
if (rc) {
|
||||
dev_err(hdev->dev,
|
||||
"Failed to disable STM on timeout, error %d\n",
|
||||
rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
WREG32(base_reg + 0xE80, 4);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int goya_config_etf(struct hl_device *hdev,
|
||||
struct hl_debug_params *params)
|
||||
{
|
||||
struct hl_debug_params_etf *input;
|
||||
u64 base_reg = debug_etf_regs[params->reg_idx] - CFG_BASE;
|
||||
u32 val;
|
||||
int rc;
|
||||
|
||||
WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK);
|
||||
|
||||
val = RREG32(base_reg + 0x304);
|
||||
val |= 0x1000;
|
||||
WREG32(base_reg + 0x304, val);
|
||||
val |= 0x40;
|
||||
WREG32(base_reg + 0x304, val);
|
||||
|
||||
rc = goya_coresight_timeout(hdev, base_reg + 0x304, 6, false);
|
||||
if (rc) {
|
||||
dev_err(hdev->dev,
|
||||
"Failed to %s ETF on timeout, error %d\n",
|
||||
params->enable ? "enable" : "disable", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = goya_coresight_timeout(hdev, base_reg + 0xC, 2, true);
|
||||
if (rc) {
|
||||
dev_err(hdev->dev,
|
||||
"Failed to %s ETF on timeout, error %d\n",
|
||||
params->enable ? "enable" : "disable", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
WREG32(base_reg + 0x20, 0);
|
||||
|
||||
if (params->enable) {
|
||||
input = params->input;
|
||||
|
||||
if (!input)
|
||||
return -EINVAL;
|
||||
|
||||
WREG32(base_reg + 0x34, 0x3FFC);
|
||||
WREG32(base_reg + 0x28, input->sink_mode);
|
||||
WREG32(base_reg + 0x304, 0x4001);
|
||||
WREG32(base_reg + 0x308, 0xA);
|
||||
WREG32(base_reg + 0x20, 1);
|
||||
} else {
|
||||
WREG32(base_reg + 0x34, 0);
|
||||
WREG32(base_reg + 0x28, 0);
|
||||
WREG32(base_reg + 0x304, 0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int goya_etr_validate_address(struct hl_device *hdev, u64 addr,
|
||||
u32 size)
|
||||
{
|
||||
struct asic_fixed_properties *prop = &hdev->asic_prop;
|
||||
u64 range_start, range_end;
|
||||
|
||||
if (hdev->mmu_enable) {
|
||||
range_start = prop->va_space_dram_start_address;
|
||||
range_end = prop->va_space_dram_end_address;
|
||||
} else {
|
||||
range_start = prop->dram_user_base_address;
|
||||
range_end = prop->dram_end_address;
|
||||
}
|
||||
|
||||
return hl_mem_area_inside_range(addr, size, range_start, range_end);
|
||||
}
|
||||
|
||||
static int goya_config_etr(struct hl_device *hdev,
|
||||
struct hl_debug_params *params)
|
||||
{
|
||||
struct hl_debug_params_etr *input;
|
||||
u64 base_reg = mmPSOC_ETR_BASE - CFG_BASE;
|
||||
u32 val;
|
||||
int rc;
|
||||
|
||||
WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK);
|
||||
|
||||
val = RREG32(base_reg + 0x304);
|
||||
val |= 0x1000;
|
||||
WREG32(base_reg + 0x304, val);
|
||||
val |= 0x40;
|
||||
WREG32(base_reg + 0x304, val);
|
||||
|
||||
rc = goya_coresight_timeout(hdev, base_reg + 0x304, 6, false);
|
||||
if (rc) {
|
||||
dev_err(hdev->dev, "Failed to %s ETR on timeout, error %d\n",
|
||||
params->enable ? "enable" : "disable", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = goya_coresight_timeout(hdev, base_reg + 0xC, 2, true);
|
||||
if (rc) {
|
||||
dev_err(hdev->dev, "Failed to %s ETR on timeout, error %d\n",
|
||||
params->enable ? "enable" : "disable", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
WREG32(base_reg + 0x20, 0);
|
||||
|
||||
if (params->enable) {
|
||||
input = params->input;
|
||||
|
||||
if (!input)
|
||||
return -EINVAL;
|
||||
|
||||
if (input->buffer_size == 0) {
|
||||
dev_err(hdev->dev,
|
||||
"ETR buffer size should be bigger than 0\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!goya_etr_validate_address(hdev,
|
||||
input->buffer_address, input->buffer_size)) {
|
||||
dev_err(hdev->dev, "buffer address is not valid\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
WREG32(base_reg + 0x34, 0x3FFC);
|
||||
WREG32(base_reg + 0x4, input->buffer_size);
|
||||
WREG32(base_reg + 0x28, input->sink_mode);
|
||||
WREG32(base_reg + 0x110, 0x700);
|
||||
WREG32(base_reg + 0x118,
|
||||
lower_32_bits(input->buffer_address));
|
||||
WREG32(base_reg + 0x11C,
|
||||
upper_32_bits(input->buffer_address));
|
||||
WREG32(base_reg + 0x304, 3);
|
||||
WREG32(base_reg + 0x308, 0xA);
|
||||
WREG32(base_reg + 0x20, 1);
|
||||
} else {
|
||||
WREG32(base_reg + 0x34, 0);
|
||||
WREG32(base_reg + 0x4, 0x400);
|
||||
WREG32(base_reg + 0x118, 0);
|
||||
WREG32(base_reg + 0x11C, 0);
|
||||
WREG32(base_reg + 0x308, 0);
|
||||
WREG32(base_reg + 0x28, 0);
|
||||
WREG32(base_reg + 0x304, 0);
|
||||
|
||||
if (params->output_size >= sizeof(u32))
|
||||
*(u32 *) params->output = RREG32(base_reg + 0x18);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int goya_config_funnel(struct hl_device *hdev,
|
||||
struct hl_debug_params *params)
|
||||
{
|
||||
WREG32(debug_funnel_regs[params->reg_idx] - CFG_BASE + 0xFB0,
|
||||
CORESIGHT_UNLOCK);
|
||||
|
||||
WREG32(debug_funnel_regs[params->reg_idx] - CFG_BASE,
|
||||
params->enable ? 0x33F : 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int goya_config_bmon(struct hl_device *hdev,
|
||||
struct hl_debug_params *params)
|
||||
{
|
||||
struct hl_debug_params_bmon *input;
|
||||
u64 base_reg = debug_bmon_regs[params->reg_idx] - CFG_BASE;
|
||||
u32 pcie_base = 0;
|
||||
|
||||
WREG32(base_reg + 0x104, 1);
|
||||
|
||||
if (params->enable) {
|
||||
input = params->input;
|
||||
|
||||
if (!input)
|
||||
return -EINVAL;
|
||||
|
||||
WREG32(base_reg + 0x208, lower_32_bits(input->addr_range0));
|
||||
WREG32(base_reg + 0x20C, upper_32_bits(input->addr_range0));
|
||||
WREG32(base_reg + 0x248, lower_32_bits(input->addr_range1));
|
||||
WREG32(base_reg + 0x24C, upper_32_bits(input->addr_range1));
|
||||
WREG32(base_reg + 0x224, 0);
|
||||
WREG32(base_reg + 0x234, 0);
|
||||
WREG32(base_reg + 0x30C, input->bw_win);
|
||||
WREG32(base_reg + 0x308, input->win_capture);
|
||||
|
||||
/* PCIE IF BMON bug WA */
|
||||
if (params->reg_idx != GOYA_BMON_PCIE_MSTR_RD &&
|
||||
params->reg_idx != GOYA_BMON_PCIE_MSTR_WR &&
|
||||
params->reg_idx != GOYA_BMON_PCIE_SLV_RD &&
|
||||
params->reg_idx != GOYA_BMON_PCIE_SLV_WR)
|
||||
pcie_base = 0xA000000;
|
||||
|
||||
WREG32(base_reg + 0x700, pcie_base | 0xB00 | (input->id << 12));
|
||||
WREG32(base_reg + 0x708, pcie_base | 0xA00 | (input->id << 12));
|
||||
WREG32(base_reg + 0x70C, pcie_base | 0xC00 | (input->id << 12));
|
||||
|
||||
WREG32(base_reg + 0x100, 0x11);
|
||||
WREG32(base_reg + 0x304, 0x1);
|
||||
} else {
|
||||
WREG32(base_reg + 0x208, 0xFFFFFFFF);
|
||||
WREG32(base_reg + 0x20C, 0xFFFFFFFF);
|
||||
WREG32(base_reg + 0x248, 0xFFFFFFFF);
|
||||
WREG32(base_reg + 0x24C, 0xFFFFFFFF);
|
||||
WREG32(base_reg + 0x224, 0xFFFFFFFF);
|
||||
WREG32(base_reg + 0x234, 0x1070F);
|
||||
WREG32(base_reg + 0x30C, 0);
|
||||
WREG32(base_reg + 0x308, 0xFFFF);
|
||||
WREG32(base_reg + 0x700, 0xA000B00);
|
||||
WREG32(base_reg + 0x708, 0xA000A00);
|
||||
WREG32(base_reg + 0x70C, 0xA000C00);
|
||||
WREG32(base_reg + 0x100, 1);
|
||||
WREG32(base_reg + 0x304, 0);
|
||||
WREG32(base_reg + 0x104, 0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int goya_config_spmu(struct hl_device *hdev,
|
||||
struct hl_debug_params *params)
|
||||
{
|
||||
u64 base_reg = debug_spmu_regs[params->reg_idx] - CFG_BASE;
|
||||
struct hl_debug_params_spmu *input = params->input;
|
||||
u64 *output;
|
||||
u32 output_arr_len;
|
||||
u32 events_num;
|
||||
u32 overflow_idx;
|
||||
u32 cycle_cnt_idx;
|
||||
int i;
|
||||
|
||||
if (params->enable) {
|
||||
input = params->input;
|
||||
|
||||
if (!input)
|
||||
return -EINVAL;
|
||||
|
||||
if (input->event_types_num < 3) {
|
||||
dev_err(hdev->dev,
|
||||
"not enough values for SPMU enable\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
WREG32(base_reg + 0xE04, 0x41013046);
|
||||
WREG32(base_reg + 0xE04, 0x41013040);
|
||||
|
||||
for (i = 0 ; i < input->event_types_num ; i++)
|
||||
WREG32(base_reg + 0x400 + i * 4, input->event_types[i]);
|
||||
|
||||
WREG32(base_reg + 0xE04, 0x41013041);
|
||||
WREG32(base_reg + 0xC00, 0x8000003F);
|
||||
} else {
|
||||
output = params->output;
|
||||
output_arr_len = params->output_size / 8;
|
||||
events_num = output_arr_len - 2;
|
||||
overflow_idx = output_arr_len - 2;
|
||||
cycle_cnt_idx = output_arr_len - 1;
|
||||
|
||||
if (!output)
|
||||
return -EINVAL;
|
||||
|
||||
if (output_arr_len < 3) {
|
||||
dev_err(hdev->dev,
|
||||
"not enough values for SPMU disable\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
WREG32(base_reg + 0xE04, 0x41013040);
|
||||
|
||||
for (i = 0 ; i < events_num ; i++)
|
||||
output[i] = RREG32(base_reg + i * 8);
|
||||
|
||||
output[overflow_idx] = RREG32(base_reg + 0xCC0);
|
||||
|
||||
output[cycle_cnt_idx] = RREG32(base_reg + 0xFC);
|
||||
output[cycle_cnt_idx] <<= 32;
|
||||
output[cycle_cnt_idx] |= RREG32(base_reg + 0xF8);
|
||||
|
||||
WREG32(base_reg + 0xCC0, 0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int goya_config_timestamp(struct hl_device *hdev,
|
||||
struct hl_debug_params *params)
|
||||
{
|
||||
WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
|
||||
if (params->enable) {
|
||||
WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0xC, 0);
|
||||
WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0x8, 0);
|
||||
WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 1);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int goya_debug_coresight(struct hl_device *hdev, void *data)
|
||||
{
|
||||
struct hl_debug_params *params = data;
|
||||
u32 val;
|
||||
int rc;
|
||||
|
||||
switch (params->op) {
|
||||
case HL_DEBUG_OP_STM:
|
||||
rc = goya_config_stm(hdev, params);
|
||||
break;
|
||||
case HL_DEBUG_OP_ETF:
|
||||
rc = goya_config_etf(hdev, params);
|
||||
break;
|
||||
case HL_DEBUG_OP_ETR:
|
||||
rc = goya_config_etr(hdev, params);
|
||||
break;
|
||||
case HL_DEBUG_OP_FUNNEL:
|
||||
rc = goya_config_funnel(hdev, params);
|
||||
break;
|
||||
case HL_DEBUG_OP_BMON:
|
||||
rc = goya_config_bmon(hdev, params);
|
||||
break;
|
||||
case HL_DEBUG_OP_SPMU:
|
||||
rc = goya_config_spmu(hdev, params);
|
||||
break;
|
||||
case HL_DEBUG_OP_TIMESTAMP:
|
||||
rc = goya_config_timestamp(hdev, params);
|
||||
break;
|
||||
|
||||
default:
|
||||
dev_err(hdev->dev, "Unknown coresight id %d\n", params->op);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Perform read from the device to flush all configuration */
|
||||
val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
|
||||
|
||||
return rc;
|
||||
}
|
|
@ -6,6 +6,7 @@
|
|||
*/
|
||||
|
||||
#include "goyaP.h"
|
||||
#include "include/goya/asic_reg/goya_regs.h"
|
||||
|
||||
/*
|
||||
* goya_set_block_as_protected - set the given block as protected
|
||||
|
@ -2159,6 +2160,8 @@ static void goya_init_protection_bits(struct hl_device *hdev)
|
|||
* Bits 7-11 represents the word offset inside the 128 bytes.
|
||||
* Bits 2-6 represents the bit location inside the word.
|
||||
*/
|
||||
u32 pb_addr, mask;
|
||||
u8 word_offset;
|
||||
|
||||
goya_pb_set_block(hdev, mmPCI_NRTR_BASE);
|
||||
goya_pb_set_block(hdev, mmPCI_RD_REGULATOR_BASE);
|
||||
|
@ -2237,6 +2240,14 @@ static void goya_init_protection_bits(struct hl_device *hdev)
|
|||
goya_pb_set_block(hdev, mmPCIE_AUX_BASE);
|
||||
goya_pb_set_block(hdev, mmPCIE_DB_RSV_BASE);
|
||||
goya_pb_set_block(hdev, mmPCIE_PHY_BASE);
|
||||
goya_pb_set_block(hdev, mmTPC0_NRTR_BASE);
|
||||
goya_pb_set_block(hdev, mmTPC_PLL_BASE);
|
||||
|
||||
pb_addr = (mmTPC_PLL_CLK_RLX_0 & ~0xFFF) + PROT_BITS_OFFS;
|
||||
word_offset = ((mmTPC_PLL_CLK_RLX_0 & PROT_BITS_OFFS) >> 7) << 2;
|
||||
mask = 1 << ((mmTPC_PLL_CLK_RLX_0 & 0x7C) >> 2);
|
||||
|
||||
WREG32(pb_addr + word_offset, mask);
|
||||
|
||||
goya_init_mme_protection_bits(hdev);
|
||||
|
||||
|
@ -2294,8 +2305,8 @@ void goya_init_security(struct hl_device *hdev)
|
|||
u32 lbw_rng10_base = 0xFCC60000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
|
||||
u32 lbw_rng10_mask = 0xFFFE0000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
|
||||
|
||||
u32 lbw_rng11_base = 0xFCE00000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
|
||||
u32 lbw_rng11_mask = 0xFFFFC000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
|
||||
u32 lbw_rng11_base = 0xFCE02000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
|
||||
u32 lbw_rng11_mask = 0xFFFFE000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
|
||||
|
||||
u32 lbw_rng12_base = 0xFE484000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
|
||||
u32 lbw_rng12_mask = 0xFFFFF000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
|
||||
|
|
|
@ -33,6 +33,9 @@
|
|||
|
||||
#define HL_PLL_LOW_JOB_FREQ_USEC 5000000 /* 5 s */
|
||||
|
||||
#define HL_ARMCP_INFO_TIMEOUT_USEC 10000000 /* 10s */
|
||||
#define HL_ARMCP_EEPROM_TIMEOUT_USEC 10000000 /* 10s */
|
||||
|
||||
#define HL_MAX_QUEUES 128
|
||||
|
||||
#define HL_MAX_JOBS_PER_CS 64
|
||||
|
@ -48,8 +51,9 @@
|
|||
|
||||
/**
|
||||
* struct pgt_info - MMU hop page info.
|
||||
* @node: hash linked-list node for the pgts hash of pgts.
|
||||
* @addr: physical address of the pgt.
|
||||
* @node: hash linked-list node for the pgts shadow hash of pgts.
|
||||
* @phys_addr: physical address of the pgt.
|
||||
* @shadow_addr: shadow hop in the host.
|
||||
* @ctx: pointer to the owner ctx.
|
||||
* @num_of_ptes: indicates how many ptes are used in the pgt.
|
||||
*
|
||||
|
@ -59,10 +63,11 @@
|
|||
* page, it is freed with its pgt_info structure.
|
||||
*/
|
||||
struct pgt_info {
|
||||
struct hlist_node node;
|
||||
u64 addr;
|
||||
struct hl_ctx *ctx;
|
||||
int num_of_ptes;
|
||||
struct hlist_node node;
|
||||
u64 phys_addr;
|
||||
u64 shadow_addr;
|
||||
struct hl_ctx *ctx;
|
||||
int num_of_ptes;
|
||||
};
|
||||
|
||||
struct hl_device;
|
||||
|
@ -145,6 +150,8 @@ enum hl_device_hw_state {
|
|||
* mapping DRAM memory.
|
||||
* @dram_size_for_default_page_mapping: DRAM size needed to map to avoid page
|
||||
* fault.
|
||||
* @pcie_dbi_base_address: Base address of the PCIE_DBI block.
|
||||
* @pcie_aux_dbi_reg_addr: Address of the PCIE_AUX DBI register.
|
||||
* @mmu_pgt_addr: base physical address in DRAM of MMU page tables.
|
||||
* @mmu_dram_default_page_addr: DRAM default page physical address.
|
||||
* @mmu_pgt_size: MMU page tables total size.
|
||||
|
@ -186,6 +193,8 @@ struct asic_fixed_properties {
|
|||
u64 va_space_dram_start_address;
|
||||
u64 va_space_dram_end_address;
|
||||
u64 dram_size_for_default_page_mapping;
|
||||
u64 pcie_dbi_base_address;
|
||||
u64 pcie_aux_dbi_reg_addr;
|
||||
u64 mmu_pgt_addr;
|
||||
u64 mmu_dram_default_page_addr;
|
||||
u32 mmu_pgt_size;
|
||||
|
@ -381,14 +390,12 @@ struct hl_eq {
|
|||
|
||||
/**
|
||||
* enum hl_asic_type - supported ASIC types.
|
||||
* @ASIC_AUTO_DETECT: ASIC type will be automatically set.
|
||||
* @ASIC_GOYA: Goya device.
|
||||
* @ASIC_INVALID: Invalid ASIC type.
|
||||
* @ASIC_GOYA: Goya device.
|
||||
*/
|
||||
enum hl_asic_type {
|
||||
ASIC_AUTO_DETECT,
|
||||
ASIC_GOYA,
|
||||
ASIC_INVALID
|
||||
ASIC_INVALID,
|
||||
ASIC_GOYA
|
||||
};
|
||||
|
||||
struct hl_cs_parser;
|
||||
|
@ -472,8 +479,7 @@ enum hl_pll_frequency {
|
|||
* @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with
|
||||
* ASID-VA-size mask.
|
||||
* @send_heartbeat: send is-alive packet to ArmCP and verify response.
|
||||
* @enable_clock_gating: enable clock gating for reducing power consumption.
|
||||
* @disable_clock_gating: disable clock for accessing registers on HBW.
|
||||
* @debug_coresight: perform certain actions on Coresight for debugging.
|
||||
* @is_device_idle: return true if device is idle, false otherwise.
|
||||
* @soft_reset_late_init: perform certain actions needed after soft reset.
|
||||
* @hw_queues_lock: acquire H/W queues lock.
|
||||
|
@ -482,6 +488,9 @@ enum hl_pll_frequency {
|
|||
* @get_eeprom_data: retrieve EEPROM data from F/W.
|
||||
* @send_cpu_message: send buffer to ArmCP.
|
||||
* @get_hw_state: retrieve the H/W state
|
||||
* @pci_bars_map: Map PCI BARs.
|
||||
* @set_dram_bar_base: Set DRAM BAR to map specific device address.
|
||||
* @init_iatu: Initialize the iATU unit inside the PCI controller.
|
||||
*/
|
||||
struct hl_asic_funcs {
|
||||
int (*early_init)(struct hl_device *hdev);
|
||||
|
@ -543,9 +552,8 @@ struct hl_asic_funcs {
|
|||
void (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard,
|
||||
u32 asid, u64 va, u64 size);
|
||||
int (*send_heartbeat)(struct hl_device *hdev);
|
||||
void (*enable_clock_gating)(struct hl_device *hdev);
|
||||
void (*disable_clock_gating)(struct hl_device *hdev);
|
||||
bool (*is_device_idle)(struct hl_device *hdev);
|
||||
int (*debug_coresight)(struct hl_device *hdev, void *data);
|
||||
bool (*is_device_idle)(struct hl_device *hdev, char *buf, size_t size);
|
||||
int (*soft_reset_late_init)(struct hl_device *hdev);
|
||||
void (*hw_queues_lock)(struct hl_device *hdev);
|
||||
void (*hw_queues_unlock)(struct hl_device *hdev);
|
||||
|
@ -555,6 +563,9 @@ struct hl_asic_funcs {
|
|||
int (*send_cpu_message)(struct hl_device *hdev, u32 *msg,
|
||||
u16 len, u32 timeout, long *result);
|
||||
enum hl_device_hw_state (*get_hw_state)(struct hl_device *hdev);
|
||||
int (*pci_bars_map)(struct hl_device *hdev);
|
||||
int (*set_dram_bar_base)(struct hl_device *hdev, u64 addr);
|
||||
int (*init_iatu)(struct hl_device *hdev);
|
||||
};
|
||||
|
||||
|
||||
|
@ -582,7 +593,8 @@ struct hl_va_range {
|
|||
* struct hl_ctx - user/kernel context.
|
||||
* @mem_hash: holds mapping from virtual address to virtual memory area
|
||||
* descriptor (hl_vm_phys_pg_list or hl_userptr).
|
||||
* @mmu_hash: holds a mapping from virtual address to pgt_info structure.
|
||||
* @mmu_phys_hash: holds a mapping from physical address to pgt_info structure.
|
||||
* @mmu_shadow_hash: holds a mapping from shadow address to pgt_info structure.
|
||||
* @hpriv: pointer to the private (KMD) data of the process (fd).
|
||||
* @hdev: pointer to the device structure.
|
||||
* @refcount: reference counter for the context. Context is released only when
|
||||
|
@ -611,7 +623,8 @@ struct hl_va_range {
|
|||
*/
|
||||
struct hl_ctx {
|
||||
DECLARE_HASHTABLE(mem_hash, MEM_HASH_TABLE_BITS);
|
||||
DECLARE_HASHTABLE(mmu_hash, MMU_HASH_TABLE_BITS);
|
||||
DECLARE_HASHTABLE(mmu_phys_hash, MMU_HASH_TABLE_BITS);
|
||||
DECLARE_HASHTABLE(mmu_shadow_hash, MMU_HASH_TABLE_BITS);
|
||||
struct hl_fpriv *hpriv;
|
||||
struct hl_device *hdev;
|
||||
struct kref refcount;
|
||||
|
@ -850,6 +863,29 @@ struct hl_vm {
|
|||
u8 init_done;
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* DEBUG, PROFILING STRUCTURE
|
||||
*/
|
||||
|
||||
/**
|
||||
* struct hl_debug_params - Coresight debug parameters.
|
||||
* @input: pointer to component specific input parameters.
|
||||
* @output: pointer to component specific output parameters.
|
||||
* @output_size: size of output buffer.
|
||||
* @reg_idx: relevant register ID.
|
||||
* @op: component operation to execute.
|
||||
* @enable: true if to enable component debugging, false otherwise.
|
||||
*/
|
||||
struct hl_debug_params {
|
||||
void *input;
|
||||
void *output;
|
||||
u32 output_size;
|
||||
u32 reg_idx;
|
||||
u32 op;
|
||||
bool enable;
|
||||
};
|
||||
|
||||
/*
|
||||
* FILE PRIVATE STRUCTURE
|
||||
*/
|
||||
|
@ -997,6 +1033,12 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
|
|||
WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | \
|
||||
(val) << REG_FIELD_SHIFT(reg, field))
|
||||
|
||||
#define HL_ENG_BUSY(buf, size, fmt, ...) ({ \
|
||||
if (buf) \
|
||||
snprintf(buf, size, fmt, ##__VA_ARGS__); \
|
||||
false; \
|
||||
})
|
||||
|
||||
struct hwmon_chip_info;
|
||||
|
||||
/**
|
||||
|
@ -1047,7 +1089,8 @@ struct hl_device_reset_work {
|
|||
* @asic_specific: ASIC specific information to use only from ASIC files.
|
||||
* @mmu_pgt_pool: pool of available MMU hops.
|
||||
* @vm: virtual memory manager for MMU.
|
||||
* @mmu_cache_lock: protects MMU cache invalidation as it can serve one context
|
||||
* @mmu_cache_lock: protects MMU cache invalidation as it can serve one context.
|
||||
* @mmu_shadow_hop0: shadow mapping of the MMU hop 0 zone.
|
||||
* @hwmon_dev: H/W monitor device.
|
||||
* @pm_mng_profile: current power management profile.
|
||||
* @hl_chip_info: ASIC's sensors information.
|
||||
|
@ -1082,6 +1125,7 @@ struct hl_device_reset_work {
|
|||
* @init_done: is the initialization of the device done.
|
||||
* @mmu_enable: is MMU enabled.
|
||||
* @device_cpu_disabled: is the device CPU disabled (due to timeouts)
|
||||
* @dma_mask: the dma mask that was set for this device
|
||||
*/
|
||||
struct hl_device {
|
||||
struct pci_dev *pdev;
|
||||
|
@ -1117,6 +1161,7 @@ struct hl_device {
|
|||
struct gen_pool *mmu_pgt_pool;
|
||||
struct hl_vm vm;
|
||||
struct mutex mmu_cache_lock;
|
||||
void *mmu_shadow_hop0;
|
||||
struct device *hwmon_dev;
|
||||
enum hl_pm_mng_profile pm_mng_profile;
|
||||
struct hwmon_chip_info *hl_chip_info;
|
||||
|
@ -1151,6 +1196,7 @@ struct hl_device {
|
|||
u8 dram_default_page_mapping;
|
||||
u8 init_done;
|
||||
u8 device_cpu_disabled;
|
||||
u8 dma_mask;
|
||||
|
||||
/* Parameters for bring-up */
|
||||
u8 mmu_enable;
|
||||
|
@ -1245,6 +1291,7 @@ static inline bool hl_mem_area_crosses_range(u64 address, u32 size,
|
|||
|
||||
int hl_device_open(struct inode *inode, struct file *filp);
|
||||
bool hl_device_disabled_or_in_reset(struct hl_device *hdev);
|
||||
enum hl_device_status hl_device_status(struct hl_device *hdev);
|
||||
int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
|
||||
enum hl_asic_type asic_type, int minor);
|
||||
void destroy_hdev(struct hl_device *hdev);
|
||||
|
@ -1351,6 +1398,31 @@ int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size);
|
|||
void hl_mmu_swap_out(struct hl_ctx *ctx);
|
||||
void hl_mmu_swap_in(struct hl_ctx *ctx);
|
||||
|
||||
int hl_fw_push_fw_to_device(struct hl_device *hdev, const char *fw_name,
|
||||
void __iomem *dst);
|
||||
int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode);
|
||||
int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
|
||||
u16 len, u32 timeout, long *result);
|
||||
int hl_fw_test_cpu_queue(struct hl_device *hdev);
|
||||
void *hl_fw_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
|
||||
dma_addr_t *dma_handle);
|
||||
void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
|
||||
void *vaddr);
|
||||
int hl_fw_send_heartbeat(struct hl_device *hdev);
|
||||
int hl_fw_armcp_info_get(struct hl_device *hdev);
|
||||
int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size);
|
||||
|
||||
int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3],
|
||||
bool is_wc[3]);
|
||||
int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data);
|
||||
int hl_pci_set_dram_bar_base(struct hl_device *hdev, u8 inbound_region, u8 bar,
|
||||
u64 addr);
|
||||
int hl_pci_init_iatu(struct hl_device *hdev, u64 sram_base_address,
|
||||
u64 dram_base_address, u64 host_phys_size);
|
||||
int hl_pci_init(struct hl_device *hdev, u8 dma_mask);
|
||||
void hl_pci_fini(struct hl_device *hdev);
|
||||
int hl_pci_set_dma_mask(struct hl_device *hdev, u8 dma_mask);
|
||||
|
||||
long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr);
|
||||
void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq);
|
||||
long hl_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr);
|
||||
|
|
|
@ -218,7 +218,7 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
|
|||
hdev->disabled = true;
|
||||
hdev->pdev = pdev; /* can be NULL in case of simulator device */
|
||||
|
||||
if (asic_type == ASIC_AUTO_DETECT) {
|
||||
if (pdev) {
|
||||
hdev->asic_type = get_asic_type(pdev->device);
|
||||
if (hdev->asic_type == ASIC_INVALID) {
|
||||
dev_err(&pdev->dev, "Unsupported ASIC\n");
|
||||
|
@ -229,6 +229,9 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
|
|||
hdev->asic_type = asic_type;
|
||||
}
|
||||
|
||||
/* Set default DMA mask to 32 bits */
|
||||
hdev->dma_mask = 32;
|
||||
|
||||
mutex_lock(&hl_devs_idr_lock);
|
||||
|
||||
if (minor == -1) {
|
||||
|
@ -334,7 +337,7 @@ static int hl_pci_probe(struct pci_dev *pdev,
|
|||
" device found [%04x:%04x] (rev %x)\n",
|
||||
(int)pdev->vendor, (int)pdev->device, (int)pdev->revision);
|
||||
|
||||
rc = create_hdev(&hdev, pdev, ASIC_AUTO_DETECT, -1);
|
||||
rc = create_hdev(&hdev, pdev, ASIC_INVALID, -1);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
|
|
|
@ -12,6 +12,32 @@
|
|||
#include <linux/uaccess.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
static u32 hl_debug_struct_size[HL_DEBUG_OP_TIMESTAMP + 1] = {
|
||||
[HL_DEBUG_OP_ETR] = sizeof(struct hl_debug_params_etr),
|
||||
[HL_DEBUG_OP_ETF] = sizeof(struct hl_debug_params_etf),
|
||||
[HL_DEBUG_OP_STM] = sizeof(struct hl_debug_params_stm),
|
||||
[HL_DEBUG_OP_FUNNEL] = 0,
|
||||
[HL_DEBUG_OP_BMON] = sizeof(struct hl_debug_params_bmon),
|
||||
[HL_DEBUG_OP_SPMU] = sizeof(struct hl_debug_params_spmu),
|
||||
[HL_DEBUG_OP_TIMESTAMP] = 0
|
||||
|
||||
};
|
||||
|
||||
static int device_status_info(struct hl_device *hdev, struct hl_info_args *args)
|
||||
{
|
||||
struct hl_info_device_status dev_stat = {0};
|
||||
u32 size = args->return_size;
|
||||
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
|
||||
|
||||
if ((!size) || (!out))
|
||||
return -EINVAL;
|
||||
|
||||
dev_stat.status = hl_device_status(hdev);
|
||||
|
||||
return copy_to_user(out, &dev_stat,
|
||||
min((size_t)size, sizeof(dev_stat))) ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
|
||||
{
|
||||
struct hl_info_hw_ip_info hw_ip = {0};
|
||||
|
@ -93,21 +119,91 @@ static int hw_idle(struct hl_device *hdev, struct hl_info_args *args)
|
|||
if ((!max_size) || (!out))
|
||||
return -EINVAL;
|
||||
|
||||
hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev);
|
||||
hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev, NULL, 0);
|
||||
|
||||
return copy_to_user(out, &hw_idle,
|
||||
min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
static int debug_coresight(struct hl_device *hdev, struct hl_debug_args *args)
|
||||
{
|
||||
struct hl_debug_params *params;
|
||||
void *input = NULL, *output = NULL;
|
||||
int rc;
|
||||
|
||||
params = kzalloc(sizeof(*params), GFP_KERNEL);
|
||||
if (!params)
|
||||
return -ENOMEM;
|
||||
|
||||
params->reg_idx = args->reg_idx;
|
||||
params->enable = args->enable;
|
||||
params->op = args->op;
|
||||
|
||||
if (args->input_ptr && args->input_size) {
|
||||
input = memdup_user((const void __user *) args->input_ptr,
|
||||
args->input_size);
|
||||
if (IS_ERR(input)) {
|
||||
rc = PTR_ERR(input);
|
||||
input = NULL;
|
||||
dev_err(hdev->dev,
|
||||
"error %d when copying input debug data\n", rc);
|
||||
goto out;
|
||||
}
|
||||
|
||||
params->input = input;
|
||||
}
|
||||
|
||||
if (args->output_ptr && args->output_size) {
|
||||
output = kzalloc(args->output_size, GFP_KERNEL);
|
||||
if (!output) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
params->output = output;
|
||||
params->output_size = args->output_size;
|
||||
}
|
||||
|
||||
rc = hdev->asic_funcs->debug_coresight(hdev, params);
|
||||
if (rc) {
|
||||
dev_err(hdev->dev,
|
||||
"debug coresight operation failed %d\n", rc);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (output) {
|
||||
if (copy_to_user((void __user *) (uintptr_t) args->output_ptr,
|
||||
output,
|
||||
args->output_size)) {
|
||||
dev_err(hdev->dev,
|
||||
"copy to user failed in debug ioctl\n");
|
||||
rc = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(params);
|
||||
kfree(output);
|
||||
kfree(input);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int hl_info_ioctl(struct hl_fpriv *hpriv, void *data)
|
||||
{
|
||||
struct hl_info_args *args = data;
|
||||
struct hl_device *hdev = hpriv->hdev;
|
||||
int rc;
|
||||
|
||||
/* We want to return device status even if it disabled or in reset */
|
||||
if (args->op == HL_INFO_DEVICE_STATUS)
|
||||
return device_status_info(hdev, args);
|
||||
|
||||
if (hl_device_disabled_or_in_reset(hdev)) {
|
||||
dev_err(hdev->dev,
|
||||
"Device is disabled or in reset. Can't execute INFO IOCTL\n");
|
||||
dev_warn_ratelimited(hdev->dev,
|
||||
"Device is %s. Can't execute INFO IOCTL\n",
|
||||
atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
|
@ -137,6 +233,40 @@ static int hl_info_ioctl(struct hl_fpriv *hpriv, void *data)
|
|||
return rc;
|
||||
}
|
||||
|
||||
static int hl_debug_ioctl(struct hl_fpriv *hpriv, void *data)
|
||||
{
|
||||
struct hl_debug_args *args = data;
|
||||
struct hl_device *hdev = hpriv->hdev;
|
||||
int rc = 0;
|
||||
|
||||
if (hl_device_disabled_or_in_reset(hdev)) {
|
||||
dev_warn_ratelimited(hdev->dev,
|
||||
"Device is %s. Can't execute DEBUG IOCTL\n",
|
||||
atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
switch (args->op) {
|
||||
case HL_DEBUG_OP_ETR:
|
||||
case HL_DEBUG_OP_ETF:
|
||||
case HL_DEBUG_OP_STM:
|
||||
case HL_DEBUG_OP_FUNNEL:
|
||||
case HL_DEBUG_OP_BMON:
|
||||
case HL_DEBUG_OP_SPMU:
|
||||
case HL_DEBUG_OP_TIMESTAMP:
|
||||
args->input_size =
|
||||
min(args->input_size, hl_debug_struct_size[args->op]);
|
||||
rc = debug_coresight(hdev, args);
|
||||
break;
|
||||
default:
|
||||
dev_err(hdev->dev, "Invalid request %d\n", args->op);
|
||||
rc = -ENOTTY;
|
||||
break;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
#define HL_IOCTL_DEF(ioctl, _func) \
|
||||
[_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func}
|
||||
|
||||
|
@ -145,7 +275,8 @@ static const struct hl_ioctl_desc hl_ioctls[] = {
|
|||
HL_IOCTL_DEF(HL_IOCTL_CB, hl_cb_ioctl),
|
||||
HL_IOCTL_DEF(HL_IOCTL_CS, hl_cs_ioctl),
|
||||
HL_IOCTL_DEF(HL_IOCTL_WAIT_CS, hl_cs_wait_ioctl),
|
||||
HL_IOCTL_DEF(HL_IOCTL_MEMORY, hl_mem_ioctl)
|
||||
HL_IOCTL_DEF(HL_IOCTL_MEMORY, hl_mem_ioctl),
|
||||
HL_IOCTL_DEF(HL_IOCTL_DEBUG, hl_debug_ioctl)
|
||||
};
|
||||
|
||||
#define HL_CORE_IOCTL_COUNT ARRAY_SIZE(hl_ioctls)
|
||||
|
|
|
@ -32,8 +32,6 @@ struct hl_eq_entry {
|
|||
#define EQ_CTL_EVENT_TYPE_SHIFT 16
|
||||
#define EQ_CTL_EVENT_TYPE_MASK 0x03FF0000
|
||||
|
||||
#define EVENT_QUEUE_MSIX_IDX 5
|
||||
|
||||
enum pq_init_status {
|
||||
PQ_INIT_STATUS_NA = 0,
|
||||
PQ_INIT_STATUS_READY_FOR_CP,
|
||||
|
@ -302,6 +300,14 @@ enum armcp_pwm_attributes {
|
|||
armcp_pwm_enable
|
||||
};
|
||||
|
||||
#define HL_CPU_PKT_SHIFT 5
|
||||
#define HL_CPU_PKT_SIZE (1 << HL_CPU_PKT_SHIFT)
|
||||
#define HL_CPU_PKT_MASK (~((1 << HL_CPU_PKT_SHIFT) - 1))
|
||||
#define HL_CPU_MAX_PKTS_IN_CB 32
|
||||
#define HL_CPU_CB_SIZE (HL_CPU_PKT_SIZE * \
|
||||
HL_CPU_MAX_PKTS_IN_CB)
|
||||
#define HL_CPU_ACCESSIBLE_MEM_SIZE (HL_QUEUE_LENGTH * HL_CPU_CB_SIZE)
|
||||
|
||||
/* Event Queue Packets */
|
||||
|
||||
struct eq_generic_event {
|
||||
|
|
|
@ -188,4 +188,3 @@
|
|||
#define CPU_CA53_CFG_ARM_PMU_EVENT_MASK 0x3FFFFFFF
|
||||
|
||||
#endif /* ASIC_REG_CPU_CA53_CFG_MASKS_H_ */
|
||||
|
||||
|
|
|
@ -58,4 +58,3 @@
|
|||
#define mmCPU_CA53_CFG_ARM_PMU_1 0x441214
|
||||
|
||||
#endif /* ASIC_REG_CPU_CA53_CFG_REGS_H_ */
|
||||
|
||||
|
|
|
@ -46,4 +46,3 @@
|
|||
#define mmCPU_IF_AXI_SPLIT_INTR 0x442130
|
||||
|
||||
#endif /* ASIC_REG_CPU_IF_REGS_H_ */
|
||||
|
||||
|
|
|
@ -102,4 +102,3 @@
|
|||
#define mmCPU_PLL_FREQ_CALC_EN 0x4A2440
|
||||
|
||||
#endif /* ASIC_REG_CPU_PLL_REGS_H_ */
|
||||
|
||||
|
|
|
@ -206,4 +206,3 @@
|
|||
#define mmDMA_CH_0_MEM_INIT_BUSY 0x4011FC
|
||||
|
||||
#endif /* ASIC_REG_DMA_CH_0_REGS_H_ */
|
||||
|
||||
|
|
|
@ -206,4 +206,3 @@
|
|||
#define mmDMA_CH_1_MEM_INIT_BUSY 0x4091FC
|
||||
|
||||
#endif /* ASIC_REG_DMA_CH_1_REGS_H_ */
|
||||
|
||||
|
|
|
@ -206,4 +206,3 @@
|
|||
#define mmDMA_CH_2_MEM_INIT_BUSY 0x4111FC
|
||||
|
||||
#endif /* ASIC_REG_DMA_CH_2_REGS_H_ */
|
||||
|
||||
|
|
|
@ -206,4 +206,3 @@
|
|||
#define mmDMA_CH_3_MEM_INIT_BUSY 0x4191FC
|
||||
|
||||
#endif /* ASIC_REG_DMA_CH_3_REGS_H_ */
|
||||
|
||||
|
|
|
@ -206,4 +206,3 @@
|
|||
#define mmDMA_CH_4_MEM_INIT_BUSY 0x4211FC
|
||||
|
||||
#endif /* ASIC_REG_DMA_CH_4_REGS_H_ */
|
||||
|
||||
|
|
|
@ -102,4 +102,3 @@
|
|||
#define DMA_MACRO_RAZWI_HBW_RD_ID_R_MASK 0x1FFFFFFF
|
||||
|
||||
#endif /* ASIC_REG_DMA_MACRO_MASKS_H_ */
|
||||
|
||||
|
|
|
@ -178,4 +178,3 @@
|
|||
#define mmDMA_MACRO_RAZWI_HBW_RD_ID 0x4B0158
|
||||
|
||||
#endif /* ASIC_REG_DMA_MACRO_REGS_H_ */
|
||||
|
||||
|
|
|
@ -206,4 +206,3 @@
|
|||
#define DMA_NRTR_NON_LIN_SCRAMB_EN_MASK 0x1
|
||||
|
||||
#endif /* ASIC_REG_DMA_NRTR_MASKS_H_ */
|
||||
|
||||
|
|
|
@ -224,4 +224,3 @@
|
|||
#define mmDMA_NRTR_NON_LIN_SCRAMB 0x1C0604
|
||||
|
||||
#endif /* ASIC_REG_DMA_NRTR_REGS_H_ */
|
||||
|
||||
|
|
|
@ -462,4 +462,3 @@
|
|||
#define DMA_QM_0_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF
|
||||
|
||||
#endif /* ASIC_REG_DMA_QM_0_MASKS_H_ */
|
||||
|
||||
|
|
|
@ -176,4 +176,3 @@
|
|||
#define mmDMA_QM_0_CQ_BUF_RDATA 0x40030C
|
||||
|
||||
#endif /* ASIC_REG_DMA_QM_0_REGS_H_ */
|
||||
|
||||
|
|
|
@ -176,4 +176,3 @@
|
|||
#define mmDMA_QM_1_CQ_BUF_RDATA 0x40830C
|
||||
|
||||
#endif /* ASIC_REG_DMA_QM_1_REGS_H_ */
|
||||
|
||||
|
|
|
@ -176,4 +176,3 @@
|
|||
#define mmDMA_QM_2_CQ_BUF_RDATA 0x41030C
|
||||
|
||||
#endif /* ASIC_REG_DMA_QM_2_REGS_H_ */
|
||||
|
||||
|
|
|
@ -176,4 +176,3 @@
|
|||
#define mmDMA_QM_3_CQ_BUF_RDATA 0x41830C
|
||||
|
||||
#endif /* ASIC_REG_DMA_QM_3_REGS_H_ */
|
||||
|
||||
|
|
|
@ -176,4 +176,3 @@
|
|||
#define mmDMA_QM_4_CQ_BUF_RDATA 0x42030C
|
||||
|
||||
#endif /* ASIC_REG_DMA_QM_4_REGS_H_ */
|
||||
|
||||
|
|
|
@ -189,18 +189,6 @@
|
|||
1 << CPU_CA53_CFG_ARM_RST_CONTROL_NL2RESET_SHIFT |\
|
||||
1 << CPU_CA53_CFG_ARM_RST_CONTROL_NMBISTRESET_SHIFT)
|
||||
|
||||
/* PCI CONFIGURATION SPACE */
|
||||
#define mmPCI_CONFIG_ELBI_ADDR 0xFF0
|
||||
#define mmPCI_CONFIG_ELBI_DATA 0xFF4
|
||||
#define mmPCI_CONFIG_ELBI_CTRL 0xFF8
|
||||
#define PCI_CONFIG_ELBI_CTRL_WRITE (1 << 31)
|
||||
|
||||
#define mmPCI_CONFIG_ELBI_STS 0xFFC
|
||||
#define PCI_CONFIG_ELBI_STS_ERR (1 << 30)
|
||||
#define PCI_CONFIG_ELBI_STS_DONE (1 << 31)
|
||||
#define PCI_CONFIG_ELBI_STS_MASK (PCI_CONFIG_ELBI_STS_ERR | \
|
||||
PCI_CONFIG_ELBI_STS_DONE)
|
||||
|
||||
#define GOYA_IRQ_HBW_ID_MASK 0x1FFF
|
||||
#define GOYA_IRQ_HBW_ID_SHIFT 0
|
||||
#define GOYA_IRQ_HBW_INTERNAL_ID_MASK 0xE000
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0
|
||||
*
|
||||
* Copyright 2016-2018 HabanaLabs, Ltd.
|
||||
* Copyright 2016-2019 HabanaLabs, Ltd.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
*/
|
||||
|
@ -12,6 +12,7 @@
|
|||
#include "stlb_regs.h"
|
||||
#include "mmu_regs.h"
|
||||
#include "pcie_aux_regs.h"
|
||||
#include "pcie_wrap_regs.h"
|
||||
#include "psoc_global_conf_regs.h"
|
||||
#include "psoc_spi_regs.h"
|
||||
#include "psoc_mme_pll_regs.h"
|
||||
|
|
|
@ -102,4 +102,3 @@
|
|||
#define mmIC_PLL_FREQ_CALC_EN 0x4A3440
|
||||
|
||||
#endif /* ASIC_REG_IC_PLL_REGS_H_ */
|
||||
|
||||
|
|
|
@ -102,4 +102,3 @@
|
|||
#define mmMC_PLL_FREQ_CALC_EN 0x4A1440
|
||||
|
||||
#endif /* ASIC_REG_MC_PLL_REGS_H_ */
|
||||
|
||||
|
|
|
@ -650,4 +650,3 @@
|
|||
#define MME1_RTR_NON_LIN_SCRAMB_EN_MASK 0x1
|
||||
|
||||
#endif /* ASIC_REG_MME1_RTR_MASKS_H_ */
|
||||
|
||||
|
|
|
@ -328,4 +328,3 @@
|
|||
#define mmMME1_RTR_NON_LIN_SCRAMB 0x40604
|
||||
|
||||
#endif /* ASIC_REG_MME1_RTR_REGS_H_ */
|
||||
|
||||
|
|
|
@ -328,4 +328,3 @@
|
|||
#define mmMME2_RTR_NON_LIN_SCRAMB 0x80604
|
||||
|
||||
#endif /* ASIC_REG_MME2_RTR_REGS_H_ */
|
||||
|
||||
|
|
|
@ -328,4 +328,3 @@
|
|||
#define mmMME3_RTR_NON_LIN_SCRAMB 0xC0604
|
||||
|
||||
#endif /* ASIC_REG_MME3_RTR_REGS_H_ */
|
||||
|
||||
|
|
|
@ -328,4 +328,3 @@
|
|||
#define mmMME4_RTR_NON_LIN_SCRAMB 0x100604
|
||||
|
||||
#endif /* ASIC_REG_MME4_RTR_REGS_H_ */
|
||||
|
||||
|
|
|
@ -328,4 +328,3 @@
|
|||
#define mmMME5_RTR_NON_LIN_SCRAMB 0x140604
|
||||
|
||||
#endif /* ASIC_REG_MME5_RTR_REGS_H_ */
|
||||
|
||||
|
|
|
@ -328,4 +328,3 @@
|
|||
#define mmMME6_RTR_NON_LIN_SCRAMB 0x180604
|
||||
|
||||
#endif /* ASIC_REG_MME6_RTR_REGS_H_ */
|
||||
|
||||
|
|
|
@ -370,4 +370,3 @@
|
|||
#define MME_CMDQ_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF
|
||||
|
||||
#endif /* ASIC_REG_MME_CMDQ_MASKS_H_ */
|
||||
|
||||
|
|
|
@ -136,4 +136,3 @@
|
|||
#define mmMME_CMDQ_CQ_BUF_RDATA 0xD930C
|
||||
|
||||
#endif /* ASIC_REG_MME_CMDQ_REGS_H_ */
|
||||
|
||||
|
|
|
@ -1534,4 +1534,3 @@
|
|||
#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_ID_MASK 0xFF000000
|
||||
|
||||
#endif /* ASIC_REG_MME_MASKS_H_ */
|
||||
|
||||
|
|
|
@ -462,4 +462,3 @@
|
|||
#define MME_QM_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF
|
||||
|
||||
#endif /* ASIC_REG_MME_QM_MASKS_H_ */
|
||||
|
||||
|
|
|
@ -176,4 +176,3 @@
|
|||
#define mmMME_QM_CQ_BUF_RDATA 0xD830C
|
||||
|
||||
#endif /* ASIC_REG_MME_QM_REGS_H_ */
|
||||
|
||||
|
|
|
@ -1150,4 +1150,3 @@
|
|||
#define mmMME_SHADOW_3_E_BUBBLES_PER_SPLIT 0xD0BAC
|
||||
|
||||
#endif /* ASIC_REG_MME_REGS_H_ */
|
||||
|
||||
|
|
|
@ -140,4 +140,3 @@
|
|||
#define MMU_ACCESS_ERROR_CAPTURE_VA_VA_31_0_MASK 0xFFFFFFFF
|
||||
|
||||
#endif /* ASIC_REG_MMU_MASKS_H_ */
|
||||
|
||||
|
|
|
@ -50,4 +50,3 @@
|
|||
#define mmMMU_ACCESS_ERROR_CAPTURE_VA 0x480040
|
||||
|
||||
#endif /* ASIC_REG_MMU_REGS_H_ */
|
||||
|
||||
|
|
|
@ -206,4 +206,3 @@
|
|||
#define PCI_NRTR_NON_LIN_SCRAMB_EN_MASK 0x1
|
||||
|
||||
#endif /* ASIC_REG_PCI_NRTR_MASKS_H_ */
|
||||
|
||||
|
|
|
@ -224,4 +224,3 @@
|
|||
#define mmPCI_NRTR_NON_LIN_SCRAMB 0x604
|
||||
|
||||
#endif /* ASIC_REG_PCI_NRTR_REGS_H_ */
|
||||
|
||||
|
|
|
@ -240,4 +240,3 @@
|
|||
#define mmPCIE_AUX_PERST 0xC079B8
|
||||
|
||||
#endif /* ASIC_REG_PCIE_AUX_REGS_H_ */
|
||||
|
||||
|
|
|
@ -0,0 +1,306 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0
|
||||
*
|
||||
* Copyright 2016-2018 HabanaLabs, Ltd.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
*/
|
||||
|
||||
/************************************
|
||||
** This is an auto-generated file **
|
||||
** DO NOT EDIT BELOW **
|
||||
************************************/
|
||||
|
||||
#ifndef ASIC_REG_PCIE_WRAP_REGS_H_
|
||||
#define ASIC_REG_PCIE_WRAP_REGS_H_
|
||||
|
||||
/*
|
||||
*****************************************
|
||||
* PCIE_WRAP (Prototype: PCIE_WRAP)
|
||||
*****************************************
|
||||
*/
|
||||
|
||||
#define mmPCIE_WRAP_PHY_RST_N 0xC01300
|
||||
|
||||
#define mmPCIE_WRAP_OUTSTAND_TRANS 0xC01400
|
||||
|
||||
#define mmPCIE_WRAP_MASK_REQ 0xC01404
|
||||
|
||||
#define mmPCIE_WRAP_IND_AWADDR_L 0xC01500
|
||||
|
||||
#define mmPCIE_WRAP_IND_AWADDR_H 0xC01504
|
||||
|
||||
#define mmPCIE_WRAP_IND_AWLEN 0xC01508
|
||||
|
||||
#define mmPCIE_WRAP_IND_AWSIZE 0xC0150C
|
||||
|
||||
#define mmPCIE_WRAP_IND_AWBURST 0xC01510
|
||||
|
||||
#define mmPCIE_WRAP_IND_AWLOCK 0xC01514
|
||||
|
||||
#define mmPCIE_WRAP_IND_AWCACHE 0xC01518
|
||||
|
||||
#define mmPCIE_WRAP_IND_AWPROT 0xC0151C
|
||||
|
||||
#define mmPCIE_WRAP_IND_AWVALID 0xC01520
|
||||
|
||||
#define mmPCIE_WRAP_IND_WDATA_0 0xC01524
|
||||
|
||||
#define mmPCIE_WRAP_IND_WDATA_1 0xC01528
|
||||
|
||||
#define mmPCIE_WRAP_IND_WDATA_2 0xC0152C
|
||||
|
||||
#define mmPCIE_WRAP_IND_WDATA_3 0xC01530
|
||||
|
||||
#define mmPCIE_WRAP_IND_WSTRB 0xC01544
|
||||
|
||||
#define mmPCIE_WRAP_IND_WLAST 0xC01548
|
||||
|
||||
#define mmPCIE_WRAP_IND_WVALID 0xC0154C
|
||||
|
||||
#define mmPCIE_WRAP_IND_BRESP 0xC01550
|
||||
|
||||
#define mmPCIE_WRAP_IND_BVALID 0xC01554
|
||||
|
||||
#define mmPCIE_WRAP_IND_ARADDR_0 0xC01558
|
||||
|
||||
#define mmPCIE_WRAP_IND_ARADDR_1 0xC0155C
|
||||
|
||||
#define mmPCIE_WRAP_IND_ARLEN 0xC01560
|
||||
|
||||
#define mmPCIE_WRAP_IND_ARSIZE 0xC01564
|
||||
|
||||
#define mmPCIE_WRAP_IND_ARBURST 0xC01568
|
||||
|
||||
#define mmPCIE_WRAP_IND_ARLOCK 0xC0156C
|
||||
|
||||
#define mmPCIE_WRAP_IND_ARCACHE 0xC01570
|
||||
|
||||
#define mmPCIE_WRAP_IND_ARPROT 0xC01574
|
||||
|
||||
#define mmPCIE_WRAP_IND_ARVALID 0xC01578
|
||||
|
||||
#define mmPCIE_WRAP_IND_RDATA_0 0xC0157C
|
||||
|
||||
#define mmPCIE_WRAP_IND_RDATA_1 0xC01580
|
||||
|
||||
#define mmPCIE_WRAP_IND_RDATA_2 0xC01584
|
||||
|
||||
#define mmPCIE_WRAP_IND_RDATA_3 0xC01588
|
||||
|
||||
#define mmPCIE_WRAP_IND_RLAST 0xC0159C
|
||||
|
||||
#define mmPCIE_WRAP_IND_RRESP 0xC015A0
|
||||
|
||||
#define mmPCIE_WRAP_IND_RVALID 0xC015A4
|
||||
|
||||
#define mmPCIE_WRAP_IND_AWMISC_INFO 0xC015A8
|
||||
|
||||
#define mmPCIE_WRAP_IND_AWMISC_INFO_HDR_34DW_0 0xC015AC
|
||||
|
||||
#define mmPCIE_WRAP_IND_AWMISC_INFO_HDR_34DW_1 0xC015B0
|
||||
|
||||
#define mmPCIE_WRAP_IND_AWMISC_INFO_P_TAG 0xC015B4
|
||||
|
||||
#define mmPCIE_WRAP_IND_AWMISC_INFO_ATU_BYPAS 0xC015B8
|
||||
|
||||
#define mmPCIE_WRAP_IND_AWMISC_INFO_FUNC_NUM 0xC015BC
|
||||
|
||||
#define mmPCIE_WRAP_IND_AWMISC_INFO_VFUNC_ACT 0xC015C0
|
||||
|
||||
#define mmPCIE_WRAP_IND_AWMISC_INFO_VFUNC_NUM 0xC015C4
|
||||
|
||||
#define mmPCIE_WRAP_IND_AWMISC_INFO_TLPPRFX 0xC015C8
|
||||
|
||||
#define mmPCIE_WRAP_IND_ARMISC_INFO 0xC015CC
|
||||
|
||||
#define mmPCIE_WRAP_IND_ARMISC_INFO_TLPPRFX 0xC015D0
|
||||
|
||||
#define mmPCIE_WRAP_IND_ARMISC_INFO_ATU_BYP 0xC015D4
|
||||
|
||||
#define mmPCIE_WRAP_IND_ARMISC_INFO_FUNC_NUM 0xC015D8
|
||||
|
||||
#define mmPCIE_WRAP_IND_ARMISC_INFO_VFUNC_ACT 0xC015DC
|
||||
|
||||
#define mmPCIE_WRAP_IND_ARMISC_INFO_VFUNC_NUM 0xC015E0
|
||||
|
||||
#define mmPCIE_WRAP_SLV_AWMISC_INFO 0xC01800
|
||||
|
||||
#define mmPCIE_WRAP_SLV_AWMISC_INFO_HDR_34DW_0 0xC01804
|
||||
|
||||
#define mmPCIE_WRAP_SLV_AWMISC_INFO_HDR_34DW_1 0xC01808
|
||||
|
||||
#define mmPCIE_WRAP_SLV_AWMISC_INFO_P_TAG 0xC0180C
|
||||
|
||||
#define mmPCIE_WRAP_SLV_AWMISC_INFO_ATU_BYPAS 0xC01810
|
||||
|
||||
#define mmPCIE_WRAP_SLV_AWMISC_INFO_FUNC_NUM 0xC01814
|
||||
|
||||
#define mmPCIE_WRAP_SLV_AWMISC_INFO_VFUNC_ACT 0xC01818
|
||||
|
||||
#define mmPCIE_WRAP_SLV_AWMISC_INFO_VFUNC_NUM 0xC0181C
|
||||
|
||||
#define mmPCIE_WRAP_SLV_AWMISC_INFO_TLPPRFX 0xC01820
|
||||
|
||||
#define mmPCIE_WRAP_SLV_ARMISC_INFO 0xC01824
|
||||
|
||||
#define mmPCIE_WRAP_SLV_ARMISC_INFO_TLPPRFX 0xC01828
|
||||
|
||||
#define mmPCIE_WRAP_SLV_ARMISC_INFO_ATU_BYP 0xC0182C
|
||||
|
||||
#define mmPCIE_WRAP_SLV_ARMISC_INFO_FUNC_NUM 0xC01830
|
||||
|
||||
#define mmPCIE_WRAP_SLV_ARMISC_INFO_VFUNC_ACT 0xC01834
|
||||
|
||||
#define mmPCIE_WRAP_SLV_ARMISC_INFO_VFUNC_NUM 0xC01838
|
||||
|
||||
#define mmPCIE_WRAP_MAX_QID 0xC01900
|
||||
|
||||
#define mmPCIE_WRAP_DB_BASE_ADDR_L_0 0xC01910
|
||||
|
||||
#define mmPCIE_WRAP_DB_BASE_ADDR_L_1 0xC01914
|
||||
|
||||
#define mmPCIE_WRAP_DB_BASE_ADDR_L_2 0xC01918
|
||||
|
||||
#define mmPCIE_WRAP_DB_BASE_ADDR_L_3 0xC0191C
|
||||
|
||||
#define mmPCIE_WRAP_DB_BASE_ADDR_H_0 0xC01920
|
||||
|
||||
#define mmPCIE_WRAP_DB_BASE_ADDR_H_1 0xC01924
|
||||
|
||||
#define mmPCIE_WRAP_DB_BASE_ADDR_H_2 0xC01928
|
||||
|
||||
#define mmPCIE_WRAP_DB_BASE_ADDR_H_3 0xC0192C
|
||||
|
||||
#define mmPCIE_WRAP_DB_MASK 0xC01940
|
||||
|
||||
#define mmPCIE_WRAP_SQ_BASE_ADDR_H 0xC01A00
|
||||
|
||||
#define mmPCIE_WRAP_SQ_BASE_ADDR_L 0xC01A04
|
||||
|
||||
#define mmPCIE_WRAP_SQ_STRIDE_ACCRESS 0xC01A08
|
||||
|
||||
#define mmPCIE_WRAP_SQ_POP_CMD 0xC01A10
|
||||
|
||||
#define mmPCIE_WRAP_SQ_POP_DATA 0xC01A14
|
||||
|
||||
#define mmPCIE_WRAP_DB_INTR_0 0xC01A20
|
||||
|
||||
#define mmPCIE_WRAP_DB_INTR_1 0xC01A24
|
||||
|
||||
#define mmPCIE_WRAP_DB_INTR_2 0xC01A28
|
||||
|
||||
#define mmPCIE_WRAP_DB_INTR_3 0xC01A2C
|
||||
|
||||
#define mmPCIE_WRAP_DB_INTR_4 0xC01A30
|
||||
|
||||
#define mmPCIE_WRAP_DB_INTR_5 0xC01A34
|
||||
|
||||
#define mmPCIE_WRAP_DB_INTR_6 0xC01A38
|
||||
|
||||
#define mmPCIE_WRAP_DB_INTR_7 0xC01A3C
|
||||
|
||||
#define mmPCIE_WRAP_MMU_BYPASS_DMA 0xC01A80
|
||||
|
||||
#define mmPCIE_WRAP_MMU_BYPASS_NON_DMA 0xC01A84
|
||||
|
||||
#define mmPCIE_WRAP_ASID_NON_DMA 0xC01A90
|
||||
|
||||
#define mmPCIE_WRAP_ASID_DMA_0 0xC01AA0
|
||||
|
||||
#define mmPCIE_WRAP_ASID_DMA_1 0xC01AA4
|
||||
|
||||
#define mmPCIE_WRAP_ASID_DMA_2 0xC01AA8
|
||||
|
||||
#define mmPCIE_WRAP_ASID_DMA_3 0xC01AAC
|
||||
|
||||
#define mmPCIE_WRAP_ASID_DMA_4 0xC01AB0
|
||||
|
||||
#define mmPCIE_WRAP_ASID_DMA_5 0xC01AB4
|
||||
|
||||
#define mmPCIE_WRAP_ASID_DMA_6 0xC01AB8
|
||||
|
||||
#define mmPCIE_WRAP_ASID_DMA_7 0xC01ABC
|
||||
|
||||
#define mmPCIE_WRAP_CPU_HOT_RST 0xC01AE0
|
||||
|
||||
#define mmPCIE_WRAP_AXI_PROT_OVR 0xC01AE4
|
||||
|
||||
#define mmPCIE_WRAP_CACHE_OVR 0xC01B00
|
||||
|
||||
#define mmPCIE_WRAP_LOCK_OVR 0xC01B04
|
||||
|
||||
#define mmPCIE_WRAP_PROT_OVR 0xC01B08
|
||||
|
||||
#define mmPCIE_WRAP_ARUSER_OVR 0xC01B0C
|
||||
|
||||
#define mmPCIE_WRAP_AWUSER_OVR 0xC01B10
|
||||
|
||||
#define mmPCIE_WRAP_ARUSER_OVR_EN 0xC01B14
|
||||
|
||||
#define mmPCIE_WRAP_AWUSER_OVR_EN 0xC01B18
|
||||
|
||||
#define mmPCIE_WRAP_MAX_OUTSTAND 0xC01B20
|
||||
|
||||
#define mmPCIE_WRAP_MST_IN 0xC01B24
|
||||
|
||||
#define mmPCIE_WRAP_RSP_OK 0xC01B28
|
||||
|
||||
#define mmPCIE_WRAP_LBW_CACHE_OVR 0xC01B40
|
||||
|
||||
#define mmPCIE_WRAP_LBW_LOCK_OVR 0xC01B44
|
||||
|
||||
#define mmPCIE_WRAP_LBW_PROT_OVR 0xC01B48
|
||||
|
||||
#define mmPCIE_WRAP_LBW_ARUSER_OVR 0xC01B4C
|
||||
|
||||
#define mmPCIE_WRAP_LBW_AWUSER_OVR 0xC01B50
|
||||
|
||||
#define mmPCIE_WRAP_LBW_ARUSER_OVR_EN 0xC01B58
|
||||
|
||||
#define mmPCIE_WRAP_LBW_AWUSER_OVR_EN 0xC01B5C
|
||||
|
||||
#define mmPCIE_WRAP_LBW_MAX_OUTSTAND 0xC01B60
|
||||
|
||||
#define mmPCIE_WRAP_LBW_MST_IN 0xC01B64
|
||||
|
||||
#define mmPCIE_WRAP_LBW_RSP_OK 0xC01B68
|
||||
|
||||
#define mmPCIE_WRAP_QUEUE_INIT 0xC01C00
|
||||
|
||||
#define mmPCIE_WRAP_AXI_SPLIT_INTR_0 0xC01C10
|
||||
|
||||
#define mmPCIE_WRAP_AXI_SPLIT_INTR_1 0xC01C14
|
||||
|
||||
#define mmPCIE_WRAP_DB_AWUSER 0xC01D00
|
||||
|
||||
#define mmPCIE_WRAP_DB_ARUSER 0xC01D04
|
||||
|
||||
#define mmPCIE_WRAP_PCIE_AWUSER 0xC01D08
|
||||
|
||||
#define mmPCIE_WRAP_PCIE_ARUSER 0xC01D0C
|
||||
|
||||
#define mmPCIE_WRAP_PSOC_AWUSER 0xC01D10
|
||||
|
||||
#define mmPCIE_WRAP_PSOC_ARUSER 0xC01D14
|
||||
|
||||
#define mmPCIE_WRAP_SCH_Q_AWUSER 0xC01D18
|
||||
|
||||
#define mmPCIE_WRAP_SCH_Q_ARUSER 0xC01D1C
|
||||
|
||||
#define mmPCIE_WRAP_PSOC2PCI_AWUSER 0xC01D40
|
||||
|
||||
#define mmPCIE_WRAP_PSOC2PCI_ARUSER 0xC01D44
|
||||
|
||||
#define mmPCIE_WRAP_DRAIN_TIMEOUT 0xC01D50
|
||||
|
||||
#define mmPCIE_WRAP_DRAIN_CFG 0xC01D54
|
||||
|
||||
#define mmPCIE_WRAP_DB_AXI_ERR 0xC01DE0
|
||||
|
||||
#define mmPCIE_WRAP_SPMU_INTR 0xC01DE4
|
||||
|
||||
#define mmPCIE_WRAP_AXI_INTR 0xC01DE8
|
||||
|
||||
#define mmPCIE_WRAP_E2E_CTRL 0xC01DF0
|
||||
|
||||
#endif /* ASIC_REG_PCIE_WRAP_REGS_H_ */
|
|
@ -102,4 +102,3 @@
|
|||
#define mmPSOC_EMMC_PLL_FREQ_CALC_EN 0xC70440
|
||||
|
||||
#endif /* ASIC_REG_PSOC_EMMC_PLL_REGS_H_ */
|
||||
|
||||
|
|
|
@ -444,4 +444,3 @@
|
|||
#define PSOC_GLOBAL_CONF_PAD_SEL_VAL_MASK 0x3
|
||||
|
||||
#endif /* ASIC_REG_PSOC_GLOBAL_CONF_MASKS_H_ */
|
||||
|
||||
|
|
|
@ -742,4 +742,3 @@
|
|||
#define mmPSOC_GLOBAL_CONF_PAD_SEL_81 0xC4BA44
|
||||
|
||||
#endif /* ASIC_REG_PSOC_GLOBAL_CONF_REGS_H_ */
|
||||
|
||||
|
|
|
@ -102,4 +102,3 @@
|
|||
#define mmPSOC_MME_PLL_FREQ_CALC_EN 0xC71440
|
||||
|
||||
#endif /* ASIC_REG_PSOC_MME_PLL_REGS_H_ */
|
||||
|
||||
|
|
|
@ -102,4 +102,3 @@
|
|||
#define mmPSOC_PCI_PLL_FREQ_CALC_EN 0xC72440
|
||||
|
||||
#endif /* ASIC_REG_PSOC_PCI_PLL_REGS_H_ */
|
||||
|
||||
|
|
|
@ -140,4 +140,3 @@
|
|||
#define mmPSOC_SPI_RSVD_2 0xC430FC
|
||||
|
||||
#endif /* ASIC_REG_PSOC_SPI_REGS_H_ */
|
||||
|
||||
|
|
|
@ -80,4 +80,3 @@
|
|||
#define mmSRAM_Y0_X0_RTR_DBG_L_ARB_MAX 0x201330
|
||||
|
||||
#endif /* ASIC_REG_SRAM_Y0_X0_RTR_REGS_H_ */
|
||||
|
||||
|
|
|
@ -80,4 +80,3 @@
|
|||
#define mmSRAM_Y0_X1_RTR_DBG_L_ARB_MAX 0x205330
|
||||
|
||||
#endif /* ASIC_REG_SRAM_Y0_X1_RTR_REGS_H_ */
|
||||
|
||||
|
|
|
@ -80,4 +80,3 @@
|
|||
#define mmSRAM_Y0_X2_RTR_DBG_L_ARB_MAX 0x209330
|
||||
|
||||
#endif /* ASIC_REG_SRAM_Y0_X2_RTR_REGS_H_ */
|
||||
|
||||
|
|
|
@ -80,4 +80,3 @@
|
|||
#define mmSRAM_Y0_X3_RTR_DBG_L_ARB_MAX 0x20D330
|
||||
|
||||
#endif /* ASIC_REG_SRAM_Y0_X3_RTR_REGS_H_ */
|
||||
|
||||
|
|
|
@ -80,4 +80,3 @@
|
|||
#define mmSRAM_Y0_X4_RTR_DBG_L_ARB_MAX 0x211330
|
||||
|
||||
#endif /* ASIC_REG_SRAM_Y0_X4_RTR_REGS_H_ */
|
||||
|
||||
|
|
|
@ -114,4 +114,3 @@
|
|||
#define STLB_SRAM_INIT_BUSY_DATA_MASK 0x10
|
||||
|
||||
#endif /* ASIC_REG_STLB_MASKS_H_ */
|
||||
|
||||
|
|
|
@ -52,4 +52,3 @@
|
|||
#define mmSTLB_SRAM_INIT 0x49004C
|
||||
|
||||
#endif /* ASIC_REG_STLB_REGS_H_ */
|
||||
|
||||
|
|
|
@ -1604,4 +1604,3 @@
|
|||
#define TPC0_CFG_FUNC_MBIST_MEM_LAST_FAILED_PATTERN_MASK 0x70000000
|
||||
|
||||
#endif /* ASIC_REG_TPC0_CFG_MASKS_H_ */
|
||||
|
||||
|
|
|
@ -884,4 +884,3 @@
|
|||
#define mmTPC0_CFG_FUNC_MBIST_MEM_9 0xE06E2C
|
||||
|
||||
#endif /* ASIC_REG_TPC0_CFG_REGS_H_ */
|
||||
|
||||
|
|
|
@ -370,4 +370,3 @@
|
|||
#define TPC0_CMDQ_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF
|
||||
|
||||
#endif /* ASIC_REG_TPC0_CMDQ_MASKS_H_ */
|
||||
|
||||
|
|
|
@ -136,4 +136,3 @@
|
|||
#define mmTPC0_CMDQ_CQ_BUF_RDATA 0xE0930C
|
||||
|
||||
#endif /* ASIC_REG_TPC0_CMDQ_REGS_H_ */
|
||||
|
||||
|
|
|
@ -344,4 +344,3 @@
|
|||
#define TPC0_EML_CFG_DBG_INST_INSERT_CTL_INSERT_MASK 0x1
|
||||
|
||||
#endif /* ASIC_REG_TPC0_EML_CFG_MASKS_H_ */
|
||||
|
||||
|
|
|
@ -310,4 +310,3 @@
|
|||
#define mmTPC0_EML_CFG_DBG_INST_INSERT_CTL 0x3040334
|
||||
|
||||
#endif /* ASIC_REG_TPC0_EML_CFG_REGS_H_ */
|
||||
|
||||
|
|
|
@ -206,4 +206,3 @@
|
|||
#define TPC0_NRTR_NON_LIN_SCRAMB_EN_MASK 0x1
|
||||
|
||||
#endif /* ASIC_REG_TPC0_NRTR_MASKS_H_ */
|
||||
|
||||
|
|
|
@ -224,4 +224,3 @@
|
|||
#define mmTPC0_NRTR_NON_LIN_SCRAMB 0xE00604
|
||||
|
||||
#endif /* ASIC_REG_TPC0_NRTR_REGS_H_ */
|
||||
|
||||
|
|
|
@ -462,4 +462,3 @@
|
|||
#define TPC0_QM_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF
|
||||
|
||||
#endif /* ASIC_REG_TPC0_QM_MASKS_H_ */
|
||||
|
||||
|
|
|
@ -176,4 +176,3 @@
|
|||
#define mmTPC0_QM_CQ_BUF_RDATA 0xE0830C
|
||||
|
||||
#endif /* ASIC_REG_TPC0_QM_REGS_H_ */
|
||||
|
||||
|
|
|
@ -884,4 +884,3 @@
|
|||
#define mmTPC1_CFG_FUNC_MBIST_MEM_9 0xE46E2C
|
||||
|
||||
#endif /* ASIC_REG_TPC1_CFG_REGS_H_ */
|
||||
|
||||
|
|
|
@ -136,4 +136,3 @@
|
|||
#define mmTPC1_CMDQ_CQ_BUF_RDATA 0xE4930C
|
||||
|
||||
#endif /* ASIC_REG_TPC1_CMDQ_REGS_H_ */
|
||||
|
||||
|
|
|
@ -176,4 +176,3 @@
|
|||
#define mmTPC1_QM_CQ_BUF_RDATA 0xE4830C
|
||||
|
||||
#endif /* ASIC_REG_TPC1_QM_REGS_H_ */
|
||||
|
||||
|
|
|
@ -320,4 +320,3 @@
|
|||
#define mmTPC1_RTR_NON_LIN_SCRAMB 0xE40604
|
||||
|
||||
#endif /* ASIC_REG_TPC1_RTR_REGS_H_ */
|
||||
|
||||
|
|
|
@ -884,4 +884,3 @@
|
|||
#define mmTPC2_CFG_FUNC_MBIST_MEM_9 0xE86E2C
|
||||
|
||||
#endif /* ASIC_REG_TPC2_CFG_REGS_H_ */
|
||||
|
||||
|
|
|
@ -136,4 +136,3 @@
|
|||
#define mmTPC2_CMDQ_CQ_BUF_RDATA 0xE8930C
|
||||
|
||||
#endif /* ASIC_REG_TPC2_CMDQ_REGS_H_ */
|
||||
|
||||
|
|
|
@ -176,4 +176,3 @@
|
|||
#define mmTPC2_QM_CQ_BUF_RDATA 0xE8830C
|
||||
|
||||
#endif /* ASIC_REG_TPC2_QM_REGS_H_ */
|
||||
|
||||
|
|
|
@ -320,4 +320,3 @@
|
|||
#define mmTPC2_RTR_NON_LIN_SCRAMB 0xE80604
|
||||
|
||||
#endif /* ASIC_REG_TPC2_RTR_REGS_H_ */
|
||||
|
||||
|
|
|
@ -884,4 +884,3 @@
|
|||
#define mmTPC3_CFG_FUNC_MBIST_MEM_9 0xEC6E2C
|
||||
|
||||
#endif /* ASIC_REG_TPC3_CFG_REGS_H_ */
|
||||
|
||||
|
|
|
@ -136,4 +136,3 @@
|
|||
#define mmTPC3_CMDQ_CQ_BUF_RDATA 0xEC930C
|
||||
|
||||
#endif /* ASIC_REG_TPC3_CMDQ_REGS_H_ */
|
||||
|
||||
|
|
|
@ -176,4 +176,3 @@
|
|||
#define mmTPC3_QM_CQ_BUF_RDATA 0xEC830C
|
||||
|
||||
#endif /* ASIC_REG_TPC3_QM_REGS_H_ */
|
||||
|
||||
|
|
|
@ -320,4 +320,3 @@
|
|||
#define mmTPC3_RTR_NON_LIN_SCRAMB 0xEC0604
|
||||
|
||||
#endif /* ASIC_REG_TPC3_RTR_REGS_H_ */
|
||||
|
||||
|
|
|
@ -884,4 +884,3 @@
|
|||
#define mmTPC4_CFG_FUNC_MBIST_MEM_9 0xF06E2C
|
||||
|
||||
#endif /* ASIC_REG_TPC4_CFG_REGS_H_ */
|
||||
|
||||
|
|
|
@ -136,4 +136,3 @@
|
|||
#define mmTPC4_CMDQ_CQ_BUF_RDATA 0xF0930C
|
||||
|
||||
#endif /* ASIC_REG_TPC4_CMDQ_REGS_H_ */
|
||||
|
||||
|
|
|
@ -176,4 +176,3 @@
|
|||
#define mmTPC4_QM_CQ_BUF_RDATA 0xF0830C
|
||||
|
||||
#endif /* ASIC_REG_TPC4_QM_REGS_H_ */
|
||||
|
||||
|
|
|
@ -320,4 +320,3 @@
|
|||
#define mmTPC4_RTR_NON_LIN_SCRAMB 0xF00604
|
||||
|
||||
#endif /* ASIC_REG_TPC4_RTR_REGS_H_ */
|
||||
|
||||
|
|
|
@ -884,4 +884,3 @@
|
|||
#define mmTPC5_CFG_FUNC_MBIST_MEM_9 0xF46E2C
|
||||
|
||||
#endif /* ASIC_REG_TPC5_CFG_REGS_H_ */
|
||||
|
||||
|
|
|
@ -136,4 +136,3 @@
|
|||
#define mmTPC5_CMDQ_CQ_BUF_RDATA 0xF4930C
|
||||
|
||||
#endif /* ASIC_REG_TPC5_CMDQ_REGS_H_ */
|
||||
|
||||
|
|
|
@ -176,4 +176,3 @@
|
|||
#define mmTPC5_QM_CQ_BUF_RDATA 0xF4830C
|
||||
|
||||
#endif /* ASIC_REG_TPC5_QM_REGS_H_ */
|
||||
|
||||
|
|
|
@ -320,4 +320,3 @@
|
|||
#define mmTPC5_RTR_NON_LIN_SCRAMB 0xF40604
|
||||
|
||||
#endif /* ASIC_REG_TPC5_RTR_REGS_H_ */
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue