dmaengine: idxd: add per file user counters for completion record faults
Add counters per opened file for the char device in order to keep track how many completion record faults occurred and how many of those faults failed the writeback by the driver after attempt to fault in the page. The counters are managed by xarray that associates the PASID with struct idxd_user_context. Tested-by: Tony Zhu <tony.zhu@intel.com> Signed-off-by: Dave Jiang <dave.jiang@intel.com> Co-developed-by: Fenghua Yu <fenghua.yu@intel.com> Signed-off-by: Fenghua Yu <fenghua.yu@intel.com> Link: https://lore.kernel.org/r/20230407203143.2189681-13-fenghua.yu@intel.com Signed-off-by: Vinod Koul <vkoul@kernel.org>
This commit is contained in:
parent
2442b7473a
commit
fecae134ee
|
@ -39,6 +39,7 @@ struct idxd_user_context {
|
|||
struct mm_struct *mm;
|
||||
unsigned int flags;
|
||||
struct iommu_sva *sva;
|
||||
u64 counters[COUNTER_MAX];
|
||||
};
|
||||
|
||||
static void idxd_cdev_dev_release(struct device *dev)
|
||||
|
@ -84,6 +85,23 @@ static void idxd_xa_pasid_remove(struct idxd_user_context *ctx)
|
|||
mutex_unlock(&wq->uc_lock);
|
||||
}
|
||||
|
||||
void idxd_user_counter_increment(struct idxd_wq *wq, u32 pasid, int index)
|
||||
{
|
||||
struct idxd_user_context *ctx;
|
||||
|
||||
if (index >= COUNTER_MAX)
|
||||
return;
|
||||
|
||||
mutex_lock(&wq->uc_lock);
|
||||
ctx = xa_load(&wq->upasid_xa, pasid);
|
||||
if (!ctx) {
|
||||
mutex_unlock(&wq->uc_lock);
|
||||
return;
|
||||
}
|
||||
ctx->counters[index]++;
|
||||
mutex_unlock(&wq->uc_lock);
|
||||
}
|
||||
|
||||
static int idxd_cdev_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
struct idxd_user_context *ctx;
|
||||
|
|
|
@ -127,6 +127,12 @@ struct idxd_pmu {
|
|||
|
||||
#define IDXD_MAX_PRIORITY 0xf
|
||||
|
||||
enum {
|
||||
COUNTER_FAULTS = 0,
|
||||
COUNTER_FAULT_FAILS,
|
||||
COUNTER_MAX
|
||||
};
|
||||
|
||||
enum idxd_wq_state {
|
||||
IDXD_WQ_DISABLED = 0,
|
||||
IDXD_WQ_ENABLED,
|
||||
|
@ -713,6 +719,7 @@ int idxd_wq_add_cdev(struct idxd_wq *wq);
|
|||
void idxd_wq_del_cdev(struct idxd_wq *wq);
|
||||
int idxd_copy_cr(struct idxd_wq *wq, ioasid_t pasid, unsigned long addr,
|
||||
void *buf, int len);
|
||||
void idxd_user_counter_increment(struct idxd_wq *wq, u32 pasid, int index);
|
||||
|
||||
/* perfmon */
|
||||
#if IS_ENABLED(CONFIG_INTEL_IDXD_PERFMON)
|
||||
|
|
|
@ -240,6 +240,7 @@ static void idxd_evl_fault_work(struct work_struct *work)
|
|||
evl->batch_fail[entry_head->batch_id] = false;
|
||||
|
||||
copy_size = cr_size;
|
||||
idxd_user_counter_increment(wq, entry_head->pasid, COUNTER_FAULTS);
|
||||
break;
|
||||
case DSA_COMP_BATCH_EVL_ERR:
|
||||
bf = &evl->batch_fail[entry_head->batch_id];
|
||||
|
@ -251,6 +252,7 @@ static void idxd_evl_fault_work(struct work_struct *work)
|
|||
*result = 1;
|
||||
*bf = false;
|
||||
}
|
||||
idxd_user_counter_increment(wq, entry_head->pasid, COUNTER_FAULTS);
|
||||
break;
|
||||
case DSA_COMP_DRAIN_EVL:
|
||||
copy_size = cr_size;
|
||||
|
@ -282,6 +284,7 @@ static void idxd_evl_fault_work(struct work_struct *work)
|
|||
switch (fault->status) {
|
||||
case DSA_COMP_CRA_XLAT:
|
||||
if (copied != copy_size) {
|
||||
idxd_user_counter_increment(wq, entry_head->pasid, COUNTER_FAULT_FAILS);
|
||||
dev_dbg_ratelimited(dev, "Failed to write to completion record: (%d:%d)\n",
|
||||
copy_size, copied);
|
||||
if (entry_head->batch)
|
||||
|
@ -290,6 +293,7 @@ static void idxd_evl_fault_work(struct work_struct *work)
|
|||
break;
|
||||
case DSA_COMP_BATCH_EVL_ERR:
|
||||
if (copied != copy_size) {
|
||||
idxd_user_counter_increment(wq, entry_head->pasid, COUNTER_FAULT_FAILS);
|
||||
dev_dbg_ratelimited(dev, "Failed to write to batch completion record: (%d:%d)\n",
|
||||
copy_size, copied);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue