2019-05-27 14:55:05 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2009-06-18 07:28:19 +08:00
|
|
|
/*
|
|
|
|
* SN Platform GRU Driver
|
|
|
|
*
|
|
|
|
* Dump GRU State
|
|
|
|
*
|
2009-06-18 07:28:36 +08:00
|
|
|
* Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
|
2009-06-18 07:28:19 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/uaccess.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/bitops.h>
|
|
|
|
#include <asm/uv/uv_hub.h>
|
2018-10-16 18:59:44 +08:00
|
|
|
|
|
|
|
#include <linux/nospec.h>
|
|
|
|
|
2009-06-18 07:28:19 +08:00
|
|
|
#include "gru.h"
|
|
|
|
#include "grutables.h"
|
|
|
|
#include "gruhandles.h"
|
|
|
|
#include "grulib.h"
|
|
|
|
|
|
|
|
#define CCH_LOCK_ATTEMPTS 10
|
|
|
|
|
|
|
|
static int gru_user_copy_handle(void __user **dp, void *s)
|
|
|
|
{
|
2009-06-18 07:28:34 +08:00
|
|
|
if (copy_to_user(*dp, s, GRU_HANDLE_BYTES))
|
2009-06-18 07:28:19 +08:00
|
|
|
return -1;
|
|
|
|
*dp += GRU_HANDLE_BYTES;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gru_dump_context_data(void *grubase,
|
|
|
|
struct gru_context_configuration_handle *cch,
|
2009-12-16 08:48:09 +08:00
|
|
|
void __user *ubuf, int ctxnum, int dsrcnt,
|
|
|
|
int flush_cbrs)
|
2009-06-18 07:28:19 +08:00
|
|
|
{
|
|
|
|
void *cb, *cbe, *tfh, *gseg;
|
|
|
|
int i, scr;
|
|
|
|
|
|
|
|
gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
|
|
|
|
cb = gseg + GRU_CB_BASE;
|
|
|
|
cbe = grubase + GRU_CBE_BASE;
|
|
|
|
tfh = grubase + GRU_TFH_BASE;
|
|
|
|
|
|
|
|
for_each_cbr_in_allocation_map(i, &cch->cbr_allocation_map, scr) {
|
2009-12-16 08:48:09 +08:00
|
|
|
if (flush_cbrs)
|
|
|
|
gru_flush_cache(cb);
|
2009-06-18 07:28:19 +08:00
|
|
|
if (gru_user_copy_handle(&ubuf, cb))
|
|
|
|
goto fail;
|
|
|
|
if (gru_user_copy_handle(&ubuf, tfh + i * GRU_HANDLE_STRIDE))
|
|
|
|
goto fail;
|
|
|
|
if (gru_user_copy_handle(&ubuf, cbe + i * GRU_HANDLE_STRIDE))
|
|
|
|
goto fail;
|
|
|
|
cb += GRU_HANDLE_STRIDE;
|
|
|
|
}
|
|
|
|
if (dsrcnt)
|
|
|
|
memcpy(ubuf, gseg + GRU_DS_BASE, dsrcnt * GRU_HANDLE_STRIDE);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail:
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gru_dump_tfm(struct gru_state *gru,
|
|
|
|
void __user *ubuf, void __user *ubufend)
|
|
|
|
{
|
|
|
|
struct gru_tlb_fault_map *tfm;
|
2015-09-03 22:50:47 +08:00
|
|
|
int i;
|
2009-06-18 07:28:19 +08:00
|
|
|
|
2015-09-03 22:50:47 +08:00
|
|
|
if (GRU_NUM_TFM * GRU_CACHE_LINE_BYTES > ubufend - ubuf)
|
|
|
|
return -EFBIG;
|
2009-06-18 07:28:19 +08:00
|
|
|
|
|
|
|
for (i = 0; i < GRU_NUM_TFM; i++) {
|
|
|
|
tfm = get_tfm(gru->gs_gru_base_vaddr, i);
|
|
|
|
if (gru_user_copy_handle(&ubuf, tfm))
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
return GRU_NUM_TFM * GRU_CACHE_LINE_BYTES;
|
|
|
|
|
|
|
|
fail:
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gru_dump_tgh(struct gru_state *gru,
|
|
|
|
void __user *ubuf, void __user *ubufend)
|
|
|
|
{
|
|
|
|
struct gru_tlb_global_handle *tgh;
|
2015-09-03 22:50:47 +08:00
|
|
|
int i;
|
2009-06-18 07:28:19 +08:00
|
|
|
|
2015-09-03 22:50:47 +08:00
|
|
|
if (GRU_NUM_TGH * GRU_CACHE_LINE_BYTES > ubufend - ubuf)
|
|
|
|
return -EFBIG;
|
2009-06-18 07:28:19 +08:00
|
|
|
|
|
|
|
for (i = 0; i < GRU_NUM_TGH; i++) {
|
|
|
|
tgh = get_tgh(gru->gs_gru_base_vaddr, i);
|
|
|
|
if (gru_user_copy_handle(&ubuf, tgh))
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
return GRU_NUM_TGH * GRU_CACHE_LINE_BYTES;
|
|
|
|
|
|
|
|
fail:
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gru_dump_context(struct gru_state *gru, int ctxnum,
|
|
|
|
void __user *ubuf, void __user *ubufend, char data_opt,
|
2009-12-16 08:48:09 +08:00
|
|
|
char lock_cch, char flush_cbrs)
|
2009-06-18 07:28:19 +08:00
|
|
|
{
|
|
|
|
struct gru_dump_context_header hdr;
|
|
|
|
struct gru_dump_context_header __user *uhdr = ubuf;
|
2009-06-18 07:28:34 +08:00
|
|
|
struct gru_context_configuration_handle *cch, *ubufcch;
|
2009-06-18 07:28:19 +08:00
|
|
|
struct gru_thread_state *gts;
|
|
|
|
int try, cch_locked, cbrcnt = 0, dsrcnt = 0, bytes = 0, ret = 0;
|
|
|
|
void *grubase;
|
|
|
|
|
|
|
|
memset(&hdr, 0, sizeof(hdr));
|
|
|
|
grubase = gru->gs_gru_base_vaddr;
|
|
|
|
cch = get_cch(grubase, ctxnum);
|
|
|
|
for (try = 0; try < CCH_LOCK_ATTEMPTS; try++) {
|
|
|
|
cch_locked = trylock_cch_handle(cch);
|
|
|
|
if (cch_locked)
|
|
|
|
break;
|
|
|
|
msleep(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
ubuf += sizeof(hdr);
|
2009-06-18 07:28:34 +08:00
|
|
|
ubufcch = ubuf;
|
2014-02-11 06:25:30 +08:00
|
|
|
if (gru_user_copy_handle(&ubuf, cch)) {
|
|
|
|
if (cch_locked)
|
|
|
|
unlock_cch_handle(cch);
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
2009-06-18 07:28:34 +08:00
|
|
|
if (cch_locked)
|
|
|
|
ubufcch->delresp = 0;
|
2009-06-18 07:28:19 +08:00
|
|
|
bytes = sizeof(hdr) + GRU_CACHE_LINE_BYTES;
|
|
|
|
|
|
|
|
if (cch_locked || !lock_cch) {
|
|
|
|
gts = gru->gs_gts[ctxnum];
|
2009-06-18 07:28:22 +08:00
|
|
|
if (gts && gts->ts_vma) {
|
2009-06-18 07:28:19 +08:00
|
|
|
hdr.pid = gts->ts_tgid_owner;
|
|
|
|
hdr.vaddr = gts->ts_vma->vm_start;
|
|
|
|
}
|
|
|
|
if (cch->state != CCHSTATE_INACTIVE) {
|
|
|
|
cbrcnt = hweight64(cch->cbr_allocation_map) *
|
|
|
|
GRU_CBR_AU_SIZE;
|
|
|
|
dsrcnt = data_opt ? hweight32(cch->dsr_allocation_map) *
|
|
|
|
GRU_DSR_AU_CL : 0;
|
|
|
|
}
|
|
|
|
bytes += (3 * cbrcnt + dsrcnt) * GRU_CACHE_LINE_BYTES;
|
|
|
|
if (bytes > ubufend - ubuf)
|
|
|
|
ret = -EFBIG;
|
|
|
|
else
|
|
|
|
ret = gru_dump_context_data(grubase, cch, ubuf, ctxnum,
|
2009-12-16 08:48:09 +08:00
|
|
|
dsrcnt, flush_cbrs);
|
2009-06-18 07:28:19 +08:00
|
|
|
}
|
|
|
|
if (cch_locked)
|
|
|
|
unlock_cch_handle(cch);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
hdr.magic = GRU_DUMP_MAGIC;
|
2009-06-18 07:28:34 +08:00
|
|
|
hdr.gid = gru->gs_gid;
|
2009-06-18 07:28:19 +08:00
|
|
|
hdr.ctxnum = ctxnum;
|
|
|
|
hdr.cbrcnt = cbrcnt;
|
|
|
|
hdr.dsrcnt = dsrcnt;
|
|
|
|
hdr.cch_locked = cch_locked;
|
2014-04-08 06:39:03 +08:00
|
|
|
if (copy_to_user(uhdr, &hdr, sizeof(hdr)))
|
|
|
|
return -EFAULT;
|
2009-06-18 07:28:19 +08:00
|
|
|
|
2014-04-08 06:39:03 +08:00
|
|
|
return bytes;
|
2009-06-18 07:28:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int gru_dump_chiplet_request(unsigned long arg)
|
|
|
|
{
|
|
|
|
struct gru_state *gru;
|
|
|
|
struct gru_dump_chiplet_state_req req;
|
|
|
|
void __user *ubuf;
|
|
|
|
void __user *ubufend;
|
|
|
|
int ctxnum, ret, cnt = 0;
|
|
|
|
|
|
|
|
if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
/* Currently, only dump by gid is implemented */
|
2015-09-03 22:50:49 +08:00
|
|
|
if (req.gid >= gru_max_gids)
|
2009-06-18 07:28:19 +08:00
|
|
|
return -EINVAL;
|
2018-10-16 18:59:44 +08:00
|
|
|
req.gid = array_index_nospec(req.gid, gru_max_gids);
|
2009-06-18 07:28:19 +08:00
|
|
|
|
|
|
|
gru = GID_TO_GRU(req.gid);
|
|
|
|
ubuf = req.buf;
|
|
|
|
ubufend = req.buf + req.buflen;
|
|
|
|
|
|
|
|
ret = gru_dump_tfm(gru, ubuf, ubufend);
|
|
|
|
if (ret < 0)
|
|
|
|
goto fail;
|
|
|
|
ubuf += ret;
|
|
|
|
|
|
|
|
ret = gru_dump_tgh(gru, ubuf, ubufend);
|
|
|
|
if (ret < 0)
|
|
|
|
goto fail;
|
|
|
|
ubuf += ret;
|
|
|
|
|
|
|
|
for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
|
|
|
|
if (req.ctxnum == ctxnum || req.ctxnum < 0) {
|
|
|
|
ret = gru_dump_context(gru, ctxnum, ubuf, ubufend,
|
2009-12-16 08:48:09 +08:00
|
|
|
req.data_opt, req.lock_cch,
|
|
|
|
req.flush_cbrs);
|
2009-06-18 07:28:19 +08:00
|
|
|
if (ret < 0)
|
|
|
|
goto fail;
|
|
|
|
ubuf += ret;
|
|
|
|
cnt++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (copy_to_user((void __user *)arg, &req, sizeof(req)))
|
|
|
|
return -EFAULT;
|
|
|
|
return cnt;
|
|
|
|
|
|
|
|
fail:
|
|
|
|
return ret;
|
|
|
|
}
|