!11912 update submoudle akg, close graph kernel ascend ci testcases
From: @looop5 Reviewed-by: Signed-off-by:
This commit is contained in:
commit
df265b6d6b
2
akg
2
akg
|
@ -1 +1 @@
|
|||
Subproject commit c63b2e6f7e7704f18b217e42c8c5c0b95e04b9fb
|
||||
Subproject commit 60841fc11dcdc4ae31e669f3d9cd9f2fd7af59cd
|
|
@ -1,4 +1,4 @@
|
|||
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||
# Copyright 2020-2021 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -103,8 +103,22 @@ def _auto_enable_graph_kernel(device_target, graph_kernel_mode):
|
|||
cfg.optimizer == 'AdamWeightDecay'
|
||||
|
||||
|
||||
def run_pretrain():
|
||||
"""pre-train bert_clue"""
|
||||
def _set_graph_kernel_context(device_target, enable_graph_kernel, is_auto_enable_graph_kernel):
|
||||
if enable_graph_kernel == "true" or is_auto_enable_graph_kernel:
|
||||
if device_target == 'GPU':
|
||||
context.set_context(enable_graph_kernel=True)
|
||||
else:
|
||||
logger.warning('Graph kernel only supports GPU back-end now, run with graph kernel off.')
|
||||
|
||||
|
||||
def _check_compute_type(device_target, is_auto_enable_graph_kernel):
|
||||
if device_target == 'GPU' and bert_net_cfg.compute_type != mstype.float32 and not is_auto_enable_graph_kernel:
|
||||
logger.warning('Gpu only support fp32 temporarily, run with fp32.')
|
||||
bert_net_cfg.compute_type = mstype.float32
|
||||
|
||||
|
||||
def argparse_init():
|
||||
"""Argparse init."""
|
||||
parser = argparse.ArgumentParser(description='bert pre_training')
|
||||
parser.add_argument('--device_target', type=str, default='Ascend', choices=['Ascend', 'GPU'],
|
||||
help='device where the code will be implemented. (Default: Ascend)')
|
||||
|
@ -137,7 +151,12 @@ def run_pretrain():
|
|||
parser.add_argument("--schema_dir", type=str, default="", help="Schema path, it is better to use absolute path")
|
||||
parser.add_argument("--enable_graph_kernel", type=str, default="auto", choices=["auto", "true", "false"],
|
||||
help="Accelerate by graph kernel, default is auto.")
|
||||
return parser
|
||||
|
||||
|
||||
def run_pretrain():
|
||||
"""pre-train bert_clue"""
|
||||
parser = argparse_init()
|
||||
args_opt = parser.parse_args()
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.device_target, device_id=args_opt.device_id)
|
||||
context.set_context(reserve_class_name_in_scope=False)
|
||||
|
@ -163,15 +182,8 @@ def run_pretrain():
|
|||
device_num = 1
|
||||
|
||||
is_auto_enable_graph_kernel = _auto_enable_graph_kernel(args_opt.device_target, args_opt.enable_graph_kernel)
|
||||
|
||||
if args_opt.enable_graph_kernel == "true" or is_auto_enable_graph_kernel:
|
||||
context.set_context(enable_graph_kernel=True)
|
||||
|
||||
if args_opt.device_target == 'GPU' and bert_net_cfg.compute_type != mstype.float32 and \
|
||||
not is_auto_enable_graph_kernel:
|
||||
logger.warning('Gpu only support fp32 temporarily, run with fp32.')
|
||||
bert_net_cfg.compute_type = mstype.float32
|
||||
|
||||
_set_graph_kernel_context(args_opt.device_target, args_opt.enable_graph_kernel, is_auto_enable_graph_kernel)
|
||||
_check_compute_type(args_opt.device_target, is_auto_enable_graph_kernel)
|
||||
|
||||
if args_opt.accumulation_steps > 1:
|
||||
logger.info("accumulation steps: {}".format(args_opt.accumulation_steps))
|
||||
|
|
|
@ -174,8 +174,8 @@ class TimeMonitor(Callback):
|
|||
self.per_step_mseconds_list.append(epoch_mseconds / self.data_size)
|
||||
|
||||
|
||||
def test_bert_percision(enable_graph_kernel=False):
|
||||
"""test bert percision"""
|
||||
def test_bert_precision(enable_graph_kernel=False):
|
||||
"""test bert precision"""
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", reserve_class_name_in_scope=False)
|
||||
if enable_graph_kernel:
|
||||
context.set_context(enable_graph_kernel=True)
|
||||
|
@ -249,18 +249,13 @@ def test_bert_percision(enable_graph_kernel=False):
|
|||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_bert_percision_graph_kernel_off():
|
||||
test_bert_percision(enable_graph_kernel=False)
|
||||
def test_bert_precision_graph_kernel_off():
|
||||
test_bert_precision(enable_graph_kernel=False)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_bert_percision_graph_kernel_on():
|
||||
test_bert_percision(enable_graph_kernel=True)
|
||||
def test_bert_precision_graph_kernel_on():
|
||||
test_bert_precision(enable_graph_kernel=True)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_bert_percision(enable_graph_kernel=False)
|
||||
test_bert_percision(enable_graph_kernel=True)
|
||||
test_bert_precision(enable_graph_kernel=False)
|
||||
|
|
|
@ -13,7 +13,6 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
import numpy as np
|
||||
import pytest
|
||||
import mindspore.context as context
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
|
@ -60,10 +59,6 @@ def test_clip_by_norm_no_div_sum(shape0, shape1, shape2, shape3, dtype):
|
|||
assert np.allclose(expect_np, output_np, 0.0001, 0.0001)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_clip_by_norm_no_div_sum_ascend():
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
test_clip_by_norm_no_div_sum((1, 1), (1,), (1, 1), (1,), np.float32)
|
||||
|
|
|
@ -54,10 +54,6 @@ def test_basic_gpu():
|
|||
test_basic()
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_basic_ascend():
|
||||
context.set_context(mode=context.GRAPH_MODE, enable_graph_kernel=True, device_target="Ascend")
|
||||
test_basic()
|
||||
|
|
|
@ -70,10 +70,6 @@ def test_basic_gpu():
|
|||
test_basic()
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_basic_ascend():
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
test_basic()
|
||||
|
|
|
@ -135,10 +135,6 @@ def test_adam_gpu():
|
|||
test_adam()
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_adam_ascend():
|
||||
context.set_context(mode=context.GRAPH_MODE, enable_graph_kernel=True, device_target="Ascend")
|
||||
test_adam()
|
||||
|
@ -152,10 +148,6 @@ def test_adam_weight_decay_gpu():
|
|||
test_adam_weight_decay()
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_adam_weight_decay_ascend():
|
||||
context.set_context(mode=context.GRAPH_MODE, enable_graph_kernel=True, device_target="Ascend")
|
||||
test_adam_weight_decay()
|
||||
|
|
|
@ -85,10 +85,6 @@ def test_gelu_gpu():
|
|||
test_gelu()
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_gelu_ascend():
|
||||
context.set_context(mode=context.GRAPH_MODE, enable_graph_kernel=True, device_target="Ascend")
|
||||
test_gelu()
|
||||
|
@ -102,10 +98,6 @@ def test_gelu_grad_gpu():
|
|||
test_gelu_grad()
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_gelu_grad_ascend():
|
||||
context.set_context(mode=context.GRAPH_MODE, enable_graph_kernel=True, device_target="Ascend")
|
||||
test_gelu_grad()
|
||||
|
|
|
@ -13,7 +13,6 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
import pytest
|
||||
import numpy as np
|
||||
import mindspore.context as context
|
||||
from mindspore import Tensor, Parameter
|
||||
|
@ -135,10 +134,6 @@ def test_graph_kernel_lamb_gpu():
|
|||
test_graph_kernel_lamb()
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_graph_kernel_lamb_ascend():
|
||||
context.set_context(mode=context.GRAPH_MODE, enable_graph_kernel=True, device_target="Ascend")
|
||||
test_graph_kernel_lamb()
|
||||
|
|
|
@ -144,10 +144,6 @@ def test_basic_gpu():
|
|||
test_basic()
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_basic_ascend():
|
||||
context.set_context(mode=context.GRAPH_MODE, enable_graph_kernel=True, device_target="Ascend")
|
||||
test_basic()
|
||||
|
@ -161,10 +157,6 @@ def test_layernormgrad_gpu():
|
|||
test_layernormgrad()
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_layernormgrad_ascend():
|
||||
context.set_context(mode=context.GRAPH_MODE, enable_graph_kernel=True, device_target="Ascend")
|
||||
test_layernormgrad()
|
||||
|
|
|
@ -107,19 +107,11 @@ def test_logsoftmaxgrad_gpu():
|
|||
test_logsoftmaxgrad()
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_logsoftmax_asend():
|
||||
context.set_context(mode=context.GRAPH_MODE, enable_graph_kernel=True, device_target="Ascend")
|
||||
test_logsoftmax()
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_logsoftmaxgrad_asend():
|
||||
context.set_context(mode=context.GRAPH_MODE, enable_graph_kernel=True, device_target="Ascend")
|
||||
test_logsoftmaxgrad()
|
||||
|
|
|
@ -51,10 +51,6 @@ def test_maximum_grad_gpu():
|
|||
test_maximum_grad()
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_maximum_grad_ascend():
|
||||
context.set_context(mode=context.GRAPH_MODE, enable_graph_kernel=True, device_target="Ascend")
|
||||
test_maximum_grad()
|
||||
|
|
|
@ -51,10 +51,6 @@ def test_basic_gpu():
|
|||
test_minimum_grad()
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_basic_ascend():
|
||||
context.set_context(mode=context.GRAPH_MODE, enable_graph_kernel=True, device_target="Ascend")
|
||||
test_minimum_grad()
|
||||
|
|
|
@ -48,10 +48,6 @@ def test_reduce_mean_gpu():
|
|||
test_reduce_mean()
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_reduce_mean_ascend():
|
||||
context.set_context(mode=context.GRAPH_MODE, enable_graph_kernel=True, device_target="Ascend")
|
||||
test_reduce_mean()
|
||||
|
|
|
@ -76,10 +76,6 @@ def test_basic_gpu():
|
|||
test_basic()
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_basic_ascend():
|
||||
context.set_context(mode=context.GRAPH_MODE, enable_graph_kernel=True, device_target="Ascend")
|
||||
test_basic()
|
||||
|
|
|
@ -13,7 +13,6 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
import numpy as np
|
||||
import pytest
|
||||
import mindspore.context as context
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
|
@ -56,10 +55,6 @@ def test_sqrt_grad(shape_x, shape_dout, dtype):
|
|||
assert np.allclose(expect_np, output_np, rtol, atol)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_sqrt_grad_ascend():
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
test_sqrt_grad((16, 16), (16, 16), np.float16)
|
||||
|
|
|
@ -49,10 +49,6 @@ def test_tanh_grad_gpu():
|
|||
test_tanh_grad()
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_tanh_grad_ascend():
|
||||
context.set_context(mode=context.GRAPH_MODE, enable_graph_kernel=True, device_target="Ascend")
|
||||
test_tanh_grad()
|
||||
|
|
|
@ -13,7 +13,6 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
import numpy as np
|
||||
import pytest
|
||||
import mindspore.context as context
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
|
@ -49,10 +48,6 @@ def test_tile(shape, dtype, multiples):
|
|||
assert np.allclose(expect_np, output_np, 0.0001, 0.0001)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_tile_ascend():
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
test_tile((24, 1), np.float16, (2, 2, 2))
|
||||
|
|
Loading…
Reference in New Issue