staging: lustre: lnet: add offset for selftest brw
In current lnet selftest, both client and server side bulk have no offset and we can only test page aligned IO, this patch changed this: - user can set brw offset by lst add_test ... brw off=OFFSET ... - offset is only effective on client side so far - to simply implementation, offset needs to be eight bytes aligned Signed-off-by: Liang Zhen <liang.zhen@intel.com> Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-5718 Reviewed-on: http://review.whamcloud.com/12496 Reviewed-by: Doug Oucharek <doug.s.oucharek@intel.com> Reviewed-by: James Simmons <uja.ornl@yahoo.com> Reviewed-by: Oleg Drokin <oleg.drokin@intel.com> Signed-off-by: James Simmons <jsimmons@infradead.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
482b27d897
commit
b73d803b65
|
@ -490,6 +490,8 @@ typedef struct {
|
|||
int blk_size; /* size (bytes) */
|
||||
int blk_time; /* time of running the test*/
|
||||
int blk_flags; /* reserved flags */
|
||||
int blk_cli_off; /* bulk offset on client */
|
||||
int blk_srv_off; /* reserved: bulk offset on server */
|
||||
} lst_test_bulk_param_t;
|
||||
|
||||
typedef struct {
|
||||
|
|
|
@ -44,6 +44,10 @@ static int brw_inject_errors;
|
|||
module_param(brw_inject_errors, int, 0644);
|
||||
MODULE_PARM_DESC(brw_inject_errors, "# data errors to inject randomly, zero by default");
|
||||
|
||||
#define BRW_POISON 0xbeefbeefbeefbeefULL
|
||||
#define BRW_MAGIC 0xeeb0eeb1eeb2eeb3ULL
|
||||
#define BRW_MSIZE sizeof(u64)
|
||||
|
||||
static void
|
||||
brw_client_fini(struct sfw_test_instance *tsi)
|
||||
{
|
||||
|
@ -67,6 +71,7 @@ brw_client_init(struct sfw_test_instance *tsi)
|
|||
{
|
||||
struct sfw_session *sn = tsi->tsi_batch->bat_session;
|
||||
int flags;
|
||||
int off;
|
||||
int npg;
|
||||
int len;
|
||||
int opc;
|
||||
|
@ -87,6 +92,7 @@ brw_client_init(struct sfw_test_instance *tsi)
|
|||
* but we have to keep it for compatibility
|
||||
*/
|
||||
len = npg * PAGE_SIZE;
|
||||
off = 0;
|
||||
} else {
|
||||
struct test_bulk_req_v1 *breq = &tsi->tsi_u.bulk_v1;
|
||||
|
||||
|
@ -99,9 +105,13 @@ brw_client_init(struct sfw_test_instance *tsi)
|
|||
opc = breq->blk_opc;
|
||||
flags = breq->blk_flags;
|
||||
len = breq->blk_len;
|
||||
npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
off = breq->blk_offset & ~PAGE_MASK;
|
||||
npg = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
if (off % BRW_MSIZE)
|
||||
return -EINVAL;
|
||||
|
||||
if (npg > LNET_MAX_IOV || npg <= 0)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -114,7 +124,7 @@ brw_client_init(struct sfw_test_instance *tsi)
|
|||
|
||||
list_for_each_entry(tsu, &tsi->tsi_units, tsu_list) {
|
||||
bulk = srpc_alloc_bulk(lnet_cpt_of_nid(tsu->tsu_dest.nid),
|
||||
npg, len, opc == LST_BRW_READ);
|
||||
off, npg, len, opc == LST_BRW_READ);
|
||||
if (!bulk) {
|
||||
brw_client_fini(tsi);
|
||||
return -ENOMEM;
|
||||
|
@ -126,12 +136,7 @@ brw_client_init(struct sfw_test_instance *tsi)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#define BRW_POISON 0xbeefbeefbeefbeefULL
|
||||
#define BRW_MAGIC 0xeeb0eeb1eeb2eeb3ULL
|
||||
#define BRW_MSIZE sizeof(__u64)
|
||||
|
||||
static int
|
||||
brw_inject_one_error(void)
|
||||
int brw_inject_one_error(void)
|
||||
{
|
||||
struct timespec64 ts;
|
||||
|
||||
|
@ -147,12 +152,13 @@ brw_inject_one_error(void)
|
|||
}
|
||||
|
||||
static void
|
||||
brw_fill_page(struct page *pg, int pattern, __u64 magic)
|
||||
brw_fill_page(struct page *pg, int off, int len, int pattern, __u64 magic)
|
||||
{
|
||||
char *addr = page_address(pg);
|
||||
char *addr = page_address(pg) + off;
|
||||
int i;
|
||||
|
||||
LASSERT(addr);
|
||||
LASSERT(!(off % BRW_MSIZE) && !(len % BRW_MSIZE));
|
||||
|
||||
if (pattern == LST_BRW_CHECK_NONE)
|
||||
return;
|
||||
|
@ -162,14 +168,16 @@ brw_fill_page(struct page *pg, int pattern, __u64 magic)
|
|||
|
||||
if (pattern == LST_BRW_CHECK_SIMPLE) {
|
||||
memcpy(addr, &magic, BRW_MSIZE);
|
||||
addr += PAGE_SIZE - BRW_MSIZE;
|
||||
memcpy(addr, &magic, BRW_MSIZE);
|
||||
if (len > BRW_MSIZE) {
|
||||
addr += PAGE_SIZE - BRW_MSIZE;
|
||||
memcpy(addr, &magic, BRW_MSIZE);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (pattern == LST_BRW_CHECK_FULL) {
|
||||
for (i = 0; i < PAGE_SIZE / BRW_MSIZE; i++)
|
||||
memcpy(addr + i * BRW_MSIZE, &magic, BRW_MSIZE);
|
||||
for (i = 0; i < len; i += BRW_MSIZE)
|
||||
memcpy(addr + i, &magic, BRW_MSIZE);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -177,13 +185,14 @@ brw_fill_page(struct page *pg, int pattern, __u64 magic)
|
|||
}
|
||||
|
||||
static int
|
||||
brw_check_page(struct page *pg, int pattern, __u64 magic)
|
||||
brw_check_page(struct page *pg, int off, int len, int pattern, __u64 magic)
|
||||
{
|
||||
char *addr = page_address(pg);
|
||||
char *addr = page_address(pg) + off;
|
||||
__u64 data = 0; /* make compiler happy */
|
||||
int i;
|
||||
|
||||
LASSERT(addr);
|
||||
LASSERT(!(off % BRW_MSIZE) && !(len % BRW_MSIZE));
|
||||
|
||||
if (pattern == LST_BRW_CHECK_NONE)
|
||||
return 0;
|
||||
|
@ -193,21 +202,21 @@ brw_check_page(struct page *pg, int pattern, __u64 magic)
|
|||
if (data != magic)
|
||||
goto bad_data;
|
||||
|
||||
addr += PAGE_SIZE - BRW_MSIZE;
|
||||
data = *((__u64 *)addr);
|
||||
if (data != magic)
|
||||
goto bad_data;
|
||||
|
||||
if (len > BRW_MSIZE) {
|
||||
addr += PAGE_SIZE - BRW_MSIZE;
|
||||
data = *((__u64 *)addr);
|
||||
if (data != magic)
|
||||
goto bad_data;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (pattern == LST_BRW_CHECK_FULL) {
|
||||
for (i = 0; i < PAGE_SIZE / BRW_MSIZE; i++) {
|
||||
data = *(((__u64 *)addr) + i);
|
||||
for (i = 0; i < len; i += BRW_MSIZE) {
|
||||
data = *(u64 *)(addr + i);
|
||||
if (data != magic)
|
||||
goto bad_data;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -226,8 +235,12 @@ brw_fill_bulk(struct srpc_bulk *bk, int pattern, __u64 magic)
|
|||
struct page *pg;
|
||||
|
||||
for (i = 0; i < bk->bk_niov; i++) {
|
||||
int off, len;
|
||||
|
||||
pg = bk->bk_iovs[i].bv_page;
|
||||
brw_fill_page(pg, pattern, magic);
|
||||
off = bk->bk_iovs[i].bv_offset;
|
||||
len = bk->bk_iovs[i].bv_len;
|
||||
brw_fill_page(pg, off, len, pattern, magic);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -238,8 +251,12 @@ brw_check_bulk(struct srpc_bulk *bk, int pattern, __u64 magic)
|
|||
struct page *pg;
|
||||
|
||||
for (i = 0; i < bk->bk_niov; i++) {
|
||||
int off, len;
|
||||
|
||||
pg = bk->bk_iovs[i].bv_page;
|
||||
if (brw_check_page(pg, pattern, magic)) {
|
||||
off = bk->bk_iovs[i].bv_offset;
|
||||
len = bk->bk_iovs[i].bv_len;
|
||||
if (brw_check_page(pg, off, len, pattern, magic)) {
|
||||
CERROR("Bulk page %p (%d/%d) is corrupted!\n",
|
||||
pg, i, bk->bk_niov);
|
||||
return 1;
|
||||
|
@ -276,6 +293,7 @@ brw_client_prep_rpc(struct sfw_test_unit *tsu,
|
|||
len = npg * PAGE_SIZE;
|
||||
} else {
|
||||
struct test_bulk_req_v1 *breq = &tsi->tsi_u.bulk_v1;
|
||||
int off;
|
||||
|
||||
/*
|
||||
* I should never get this step if it's unknown feature
|
||||
|
@ -286,7 +304,8 @@ brw_client_prep_rpc(struct sfw_test_unit *tsu,
|
|||
opc = breq->blk_opc;
|
||||
flags = breq->blk_flags;
|
||||
len = breq->blk_len;
|
||||
npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
off = breq->blk_offset;
|
||||
npg = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, npg, len, &rpc);
|
||||
|
|
|
@ -789,14 +789,15 @@ lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, struct srpc_test_reqst *req
|
|||
}
|
||||
|
||||
static int
|
||||
lstcon_bulkrpc_v1_prep(lst_test_bulk_param_t *param, struct srpc_test_reqst *req)
|
||||
lstcon_bulkrpc_v1_prep(lst_test_bulk_param_t *param, bool is_client,
|
||||
struct srpc_test_reqst *req)
|
||||
{
|
||||
struct test_bulk_req_v1 *brq = &req->tsr_u.bulk_v1;
|
||||
|
||||
brq->blk_opc = param->blk_opc;
|
||||
brq->blk_flags = param->blk_flags;
|
||||
brq->blk_len = param->blk_size;
|
||||
brq->blk_offset = 0; /* reserved */
|
||||
brq->blk_offset = is_client ? param->blk_cli_off : param->blk_srv_off;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -897,7 +898,8 @@ lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned feats,
|
|||
&test->tes_param[0], trq);
|
||||
} else {
|
||||
rc = lstcon_bulkrpc_v1_prep((lst_test_bulk_param_t *)
|
||||
&test->tes_param[0], trq);
|
||||
&test->tes_param[0],
|
||||
trq->tsr_is_client, trq);
|
||||
}
|
||||
|
||||
break;
|
||||
|
|
|
@ -1101,7 +1101,7 @@ sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len,
|
|||
LASSERT(!rpc->srpc_bulk);
|
||||
LASSERT(npages > 0 && npages <= LNET_MAX_IOV);
|
||||
|
||||
rpc->srpc_bulk = srpc_alloc_bulk(cpt, npages, len, sink);
|
||||
rpc->srpc_bulk = srpc_alloc_bulk(cpt, 0, npages, len, sink);
|
||||
if (!rpc->srpc_bulk)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -84,14 +84,13 @@ void srpc_set_counters(const srpc_counters_t *cnt)
|
|||
}
|
||||
|
||||
static int
|
||||
srpc_add_bulk_page(struct srpc_bulk *bk, struct page *pg, int i, int nob)
|
||||
srpc_add_bulk_page(struct srpc_bulk *bk, struct page *pg, int i, int off,
|
||||
int nob)
|
||||
{
|
||||
nob = min_t(int, nob, PAGE_SIZE);
|
||||
LASSERT(off < PAGE_SIZE);
|
||||
LASSERT(nob > 0 && nob <= PAGE_SIZE);
|
||||
|
||||
LASSERT(nob > 0);
|
||||
LASSERT(i >= 0 && i < bk->bk_niov);
|
||||
|
||||
bk->bk_iovs[i].bv_offset = 0;
|
||||
bk->bk_iovs[i].bv_offset = off;
|
||||
bk->bk_iovs[i].bv_page = pg;
|
||||
bk->bk_iovs[i].bv_len = nob;
|
||||
return nob;
|
||||
|
@ -117,7 +116,8 @@ srpc_free_bulk(struct srpc_bulk *bk)
|
|||
}
|
||||
|
||||
struct srpc_bulk *
|
||||
srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink)
|
||||
srpc_alloc_bulk(int cpt, unsigned int bulk_off, unsigned int bulk_npg,
|
||||
unsigned int bulk_len, int sink)
|
||||
{
|
||||
struct srpc_bulk *bk;
|
||||
int i;
|
||||
|
@ -148,8 +148,11 @@ srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
nob = srpc_add_bulk_page(bk, pg, i, bulk_len);
|
||||
nob = min_t(unsigned int, bulk_off + bulk_len, PAGE_SIZE) -
|
||||
bulk_off;
|
||||
srpc_add_bulk_page(bk, pg, i, bulk_off, nob);
|
||||
bulk_len -= nob;
|
||||
bulk_off = 0;
|
||||
}
|
||||
|
||||
return bk;
|
||||
|
|
|
@ -175,7 +175,7 @@ struct test_bulk_req_v1 {
|
|||
__u16 blk_opc; /* bulk operation code */
|
||||
__u16 blk_flags; /* data check flags */
|
||||
__u32 blk_len; /* data length */
|
||||
__u32 blk_offset; /* reserved: offset */
|
||||
__u32 blk_offset; /* offset */
|
||||
} WIRE_ATTR;
|
||||
|
||||
struct test_ping_req {
|
||||
|
|
|
@ -434,8 +434,9 @@ srpc_create_client_rpc(lnet_process_id_t peer, int service,
|
|||
void srpc_post_rpc(struct srpc_client_rpc *rpc);
|
||||
void srpc_abort_rpc(struct srpc_client_rpc *rpc, int why);
|
||||
void srpc_free_bulk(struct srpc_bulk *bk);
|
||||
struct srpc_bulk *srpc_alloc_bulk(int cpt, unsigned bulk_npg,
|
||||
unsigned bulk_len, int sink);
|
||||
struct srpc_bulk *srpc_alloc_bulk(int cpt, unsigned int off,
|
||||
unsigned int bulk_npg, unsigned int bulk_len,
|
||||
int sink);
|
||||
int srpc_send_rpc(struct swi_workitem *wi);
|
||||
int srpc_send_reply(struct srpc_server_rpc *rpc);
|
||||
int srpc_add_service(struct srpc_service *sv);
|
||||
|
|
Loading…
Reference in New Issue