fix test cases about models

This commit is contained in:
chenhaozhe 2021-09-19 16:05:20 +08:00
parent 60b33924dc
commit 12fab451e8
29 changed files with 50 additions and 494 deletions

View File

@ -25,7 +25,7 @@ import mindspore.ops.functional as F
from mindspore import nn from mindspore import nn
from mindspore.common.initializer import TruncatedNormal from mindspore.common.initializer import TruncatedNormal
from mindspore.common.tensor import Tensor from mindspore.common.tensor import Tensor
from mindspore.model_zoo.Bert_NEZHA.bert_model import SaturateCast, RelaPosEmbeddingsGenerator from mindspore.tests.models.Bert_NEZHA.bert_model import SaturateCast, RelaPosEmbeddingsGenerator
from mindspore.ops import operations as P from mindspore.ops import operations as P

View File

@ -19,8 +19,8 @@ import numpy as np
import mindspore.common.dtype as mstype import mindspore.common.dtype as mstype
from mindspore import context, nn from mindspore import context, nn
from mindspore.model_zoo.Bert_NEZHA import GetNextSentenceOutput, BertNetworkWithLoss from mindspore.tests.models.Bert_NEZHA import GetNextSentenceOutput, BertNetworkWithLoss
from mindspore.model_zoo.Bert_NEZHA.bert_model import BertConfig, \ from mindspore.tests.models.Bert_NEZHA.bert_model import BertConfig, \
EmbeddingLookup, EmbeddingPostprocessor, BertOutput, \ EmbeddingLookup, EmbeddingPostprocessor, BertOutput, \
BertAttention, BertSelfAttention, SaturateCast, TruncatedNormal, \ BertAttention, BertSelfAttention, SaturateCast, TruncatedNormal, \
BertEncoderCell, BertTransformer, CreateAttentionMaskFromInputMask, BertModel BertEncoderCell, BertTransformer, CreateAttentionMaskFromInputMask, BertModel

View File

@ -19,7 +19,7 @@ import numpy as np
import mindspore.common.dtype as mstype import mindspore.common.dtype as mstype
from mindspore import context from mindspore import context
from mindspore.model_zoo.Bert_NEZHA.bert_model import BertAttention, SaturateCast, \ from mindspore.tests.models.Bert_NEZHA.bert_model import BertAttention, SaturateCast, \
EmbeddingLookup, BertModel, \ EmbeddingLookup, BertModel, \
BertConfig, EmbeddingPostprocessor, \ BertConfig, EmbeddingPostprocessor, \
BertTransformer, BertEncoderCell, \ BertTransformer, BertEncoderCell, \

View File

@ -18,9 +18,9 @@
import os import os
import numpy as np import numpy as np
from mindspore.model_zoo.Bert_NEZHA import EmbeddingLookup, GetMaskedLMOutput, \ from mindspore.tests.models.Bert_NEZHA import EmbeddingLookup, GetMaskedLMOutput, \
BertConfig, BertPreTraining, BertNetworkWithLoss BertConfig, BertPreTraining, BertNetworkWithLoss
from mindspore.model_zoo.Bert_NEZHA.bert_model import BertModel from mindspore.tests.models.Bert_NEZHA.bert_model import BertModel
from mindspore import Tensor from mindspore import Tensor
from mindspore import nn, context from mindspore import nn, context

View File

@ -26,7 +26,7 @@ from mindspore.ops import operations as P
from mindspore.nn.optim import AdamWeightDecay from mindspore.nn.optim import AdamWeightDecay
from mindspore.train.loss_scale_manager import DynamicLossScaleManager from mindspore.train.loss_scale_manager import DynamicLossScaleManager
from mindspore.nn import learning_rate_schedule as lr_schedules from mindspore.nn import learning_rate_schedule as lr_schedules
from model_zoo.official.nlp.bert.src import BertConfig, BertNetworkWithLoss, BertTrainOneStepCell, BertTrainOneStepWithLossScaleCell from tests.models.official.nlp.bert.src import BertConfig, BertNetworkWithLoss, BertTrainOneStepCell, BertTrainOneStepWithLossScaleCell
from ...dataset_mock import MindData from ...dataset_mock import MindData
from ...ops_common import nn, np, batch_tuple_tensor, build_construct_graph from ...ops_common import nn, np, batch_tuple_tensor, build_construct_graph

View File

@ -23,8 +23,8 @@ from mindspore import context
from mindspore.nn.metrics import Accuracy from mindspore.nn.metrics import Accuracy
from mindspore.train import Model from mindspore.train import Model
from mindspore.train.callback import LossMonitor, TimeMonitor from mindspore.train.callback import LossMonitor, TimeMonitor
from model_zoo.official.cv.lenet.src.dataset import create_dataset from tests.models.official.cv.lenet.src.dataset import create_dataset
from model_zoo.official.cv.lenet.src.lenet import LeNet5 from tests.models.official.cv.lenet.src.lenet import LeNet5
np.set_printoptions(threshold=np.inf) np.set_printoptions(threshold=np.inf)
device_num = 2 device_num = 2

View File

@ -23,7 +23,7 @@ import pytest
from mindspore import Tensor from mindspore import Tensor
from mindspore.train.serialization import export, load_checkpoint from mindspore.train.serialization import export, load_checkpoint
from mindspore import context from mindspore import context
from model_zoo.official.cv.mobilenetv2.src.mobilenetV2 import MobileNetV2Backbone, MobileNetV2Head, mobilenet_v2 from tests.models.official.cv.mobilenetv2.src.mobilenetV2 import MobileNetV2Backbone, MobileNetV2Head, mobilenet_v2
context.set_context(mode=context.GRAPH_MODE, device_target="GPU") context.set_context(mode=context.GRAPH_MODE, device_target="GPU")

View File

@ -18,10 +18,10 @@ import pytest
import numpy as np import numpy as np
from mindspore import context from mindspore import context
from mindspore import Tensor from mindspore import Tensor
from model_zoo.official.gnn.gcn.src.gcn import GCN from tests.models.official.gnn.gcn.src.gcn import GCN
from model_zoo.official.gnn.gcn.src.metrics import LossAccuracyWrapper, TrainNetWrapper from tests.models.official.gnn.gcn.src.metrics import LossAccuracyWrapper, TrainNetWrapper
from model_zoo.official.gnn.gcn.src.config import ConfigGCN from tests.models.official.gnn.gcn.src.config import ConfigGCN
from model_zoo.official.gnn.gcn.src.dataset import get_adj_features_labels, get_mask from tests.models.official.gnn.gcn.src.dataset import get_adj_features_labels, get_mask
DATA_DIR = '/home/workspace/mindspore_dataset/cora/cora_mr/cora_mr' DATA_DIR = '/home/workspace/mindspore_dataset/cora/cora_mr/cora_mr'

View File

@ -24,7 +24,7 @@ from tests.st.model_zoo_tests import utils
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_BGCF_amazon_beauty(): def test_BGCF_amazon_beauty():
cur_path = os.path.dirname(os.path.abspath(__file__)) cur_path = os.path.dirname(os.path.abspath(__file__))
model_path = "{}/../../../../model_zoo/official/gnn".format(cur_path) model_path = "{}/../../../../tests/models/official/gnn".format(cur_path)
model_name = "bgcf" model_name = "bgcf"
utils.copy_files(model_path, cur_path, model_name) utils.copy_files(model_path, cur_path, model_name)
cur_model_path = os.path.join(cur_path, model_name) cur_model_path = os.path.join(cur_path, model_name)
@ -61,7 +61,7 @@ def test_BGCF_amazon_beauty():
def test_bgcf_export_mindir(): def test_bgcf_export_mindir():
cur_path = os.getcwd() cur_path = os.getcwd()
model_path = "{}/../../../../model_zoo/official/gnn".format(cur_path) model_path = "{}/../../../../tests/models/official/gnn".format(cur_path)
model_name = "bgcf" model_name = "bgcf"
utils.copy_files(model_path, cur_path, model_name) utils.copy_files(model_path, cur_path, model_name)
cur_model_path = os.path.join(cur_path, model_name) cur_model_path = os.path.join(cur_path, model_name)

View File

@ -24,7 +24,7 @@ from tests.st.model_zoo_tests import utils
@pytest.mark.env_single @pytest.mark.env_single
def test_center_net(): def test_center_net():
cur_path = os.path.dirname(os.path.abspath(__file__)) cur_path = os.path.dirname(os.path.abspath(__file__))
model_path = "{}/../../../../model_zoo/research/cv".format(cur_path) model_path = "{}/../../../../tests/models/research/cv".format(cur_path)
model_name = "centernet" model_name = "centernet"
utils.copy_files(model_path, cur_path, model_name) utils.copy_files(model_path, cur_path, model_name)
cur_model_path = os.path.join(cur_path, model_name) cur_model_path = os.path.join(cur_path, model_name)

View File

@ -14,7 +14,7 @@
# limitations under the License. # limitations under the License.
# ============================================================================ # ============================================================================
echo "==============================================================================================================" echo "=============================================================================================================="
echo "Please run the scipt as: " echo "Please run the script as: "
echo "for example: bash run_deeplabv3_ci.sh DEVICE_ID DATA_PATH PRETRAINED_CKPT_PATH" echo "for example: bash run_deeplabv3_ci.sh DEVICE_ID DATA_PATH PRETRAINED_CKPT_PATH"
echo "==============================================================================================================" echo "=============================================================================================================="
DEVICE_ID=$1 DEVICE_ID=$1
@ -23,10 +23,10 @@ PATH_CHECKPOINT=$3
BASE_PATH=$(cd "$(dirname $0)"; pwd) BASE_PATH=$(cd "$(dirname $0)"; pwd)
unset SLOG_PRINT_TO_STDOUT unset SLOG_PRINT_TO_STDOUT
CODE_DIR="./" CODE_DIR="./"
if [ -d ${BASE_PATH}/../../../../model_zoo/deeplabv3 ]; then if [ -d ${BASE_PATH}/../../../../tests/models/deeplabv3 ]; then
CODE_DIR=${BASE_PATH}/../../../../model_zoo/deeplabv3 CODE_DIR=${BASE_PATH}/../../../../tests/models/deeplabv3
elif [ -d ${BASE_PATH}/../../model_zoo/deeplabv3 ]; then elif [ -d ${BASE_PATH}/../../tests/models/deeplabv3 ]; then
CODE_DIR=${BASE_PATH}/../../model_zoo/deeplabv3 CODE_DIR=${BASE_PATH}/../../tests/models/deeplabv3
else else
echo "[ERROR] code dir is not found" echo "[ERROR] code dir is not found"
fi fi

View File

@ -24,7 +24,7 @@ from tests.st.model_zoo_tests import utils
@pytest.mark.env_single @pytest.mark.env_single
def test_DeeplabV3_voc2007(): def test_DeeplabV3_voc2007():
cur_path = os.path.dirname(os.path.abspath(__file__)) cur_path = os.path.dirname(os.path.abspath(__file__))
model_path = "{}/../../../../model_zoo/official/cv".format(cur_path) model_path = "{}/../../../../tests/models/official/cv".format(cur_path)
model_name = "deeplabv3" model_name = "deeplabv3"
utils.copy_files(model_path, cur_path, model_name) utils.copy_files(model_path, cur_path, model_name)
cur_model_path = os.path.join(cur_path, model_name) cur_model_path = os.path.join(cur_path, model_name)

View File

@ -24,7 +24,7 @@ from tests.st.model_zoo_tests import utils
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_FaceDetection_WIDER(): def test_FaceDetection_WIDER():
cur_path = os.path.dirname(os.path.abspath(__file__)) cur_path = os.path.dirname(os.path.abspath(__file__))
model_path = "{}/../../../../model_zoo/research/cv".format(cur_path) model_path = "{}/../../../../tests/models/research/cv".format(cur_path)
model_name = "FaceDetection" model_name = "FaceDetection"
utils.copy_files(model_path, cur_path, model_name) utils.copy_files(model_path, cur_path, model_name)
cur_model_path = os.path.join(cur_path, model_name) cur_model_path = os.path.join(cur_path, model_name)

View File

@ -24,7 +24,7 @@ from tests.st.model_zoo_tests import utils
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_lenet_MNIST(): def test_lenet_MNIST():
cur_path = os.path.dirname(os.path.abspath(__file__)) cur_path = os.path.dirname(os.path.abspath(__file__))
model_path = "{}/../../../../model_zoo/official/cv".format(cur_path) model_path = "{}/../../../../tests/models/official/cv".format(cur_path)
model_name = "lenet" model_name = "lenet"
utils.copy_files(model_path, cur_path, model_name) utils.copy_files(model_path, cur_path, model_name)
cur_model_path = os.path.join(cur_path, model_name) cur_model_path = os.path.join(cur_path, model_name)

View File

@ -31,7 +31,7 @@ def test_maskrcnn_export():
new_list = ["(config=config)\\n '''", "(net, param_dict_new)\\n '''"] new_list = ["(config=config)\\n '''", "(net, param_dict_new)\\n '''"]
cur_path = os.getcwd() cur_path = os.getcwd()
model_path = "{}/../../../../model_zoo/official/cv".format(cur_path) model_path = "{}/../../../../tests/models/official/cv".format(cur_path)
model_name = "maskrcnn" model_name = "maskrcnn"
utils.copy_files(model_path, cur_path, model_name) utils.copy_files(model_path, cur_path, model_name)
cur_model_path = os.path.join(cur_path, model_name) cur_model_path = os.path.join(cur_path, model_name)

View File

@ -25,7 +25,7 @@ from tests.st.model_zoo_tests import utils
@pytest.mark.env_single @pytest.mark.env_single
def test_resnet50_cifar10_ascend(): def test_resnet50_cifar10_ascend():
cur_path = os.path.dirname(os.path.abspath(__file__)) cur_path = os.path.dirname(os.path.abspath(__file__))
model_path = "{}/../../../../model_zoo/official/cv".format(cur_path) model_path = "{}/../../../../tests/models/official/cv".format(cur_path)
model_name = "resnet" model_name = "resnet"
utils.copy_files(model_path, cur_path, model_name) utils.copy_files(model_path, cur_path, model_name)
cur_model_path = os.path.join(cur_path, "resnet") cur_model_path = os.path.join(cur_path, "resnet")
@ -56,7 +56,7 @@ def test_resnet50_cifar10_ascend():
@pytest.mark.env_single @pytest.mark.env_single
def test_resnet50_cifar10_gpu(): def test_resnet50_cifar10_gpu():
cur_path = os.getcwd() cur_path = os.getcwd()
model_path = "{}/../../../../model_zoo/official/cv".format(cur_path) model_path = "{}/../../../../tests/models/official/cv".format(cur_path)
model_name = "resnet" model_name = "resnet"
utils.copy_files(model_path, cur_path, model_name) utils.copy_files(model_path, cur_path, model_name)
cur_model_path = os.path.join(cur_path, "resnet") cur_model_path = os.path.join(cur_path, "resnet")

View File

@ -23,7 +23,7 @@ from tests.st.model_zoo_tests import utils
@pytest.mark.env_single @pytest.mark.env_single
def test_retinaface_resnet50(): def test_retinaface_resnet50():
cur_path = os.path.dirname(os.path.abspath(__file__)) cur_path = os.path.dirname(os.path.abspath(__file__))
model_path = "{}/../../../../model_zoo/official/cv".format(cur_path) model_path = "{}/../../../../tests/models/official/cv".format(cur_path)
model_name = "retinaface_resnet50" model_name = "retinaface_resnet50"
utils.copy_files(model_path, cur_path, model_name) utils.copy_files(model_path, cur_path, model_name)
cur_model_path = os.path.join(cur_path, model_name) cur_model_path = os.path.join(cur_path, model_name)

View File

@ -25,7 +25,7 @@ from tests.st.model_zoo_tests import utils
@pytest.mark.env_single @pytest.mark.env_single
def test_SSD_mobilenet_v1_fpn_coco2017(): def test_SSD_mobilenet_v1_fpn_coco2017():
cur_path = os.path.dirname(os.path.abspath(__file__)) cur_path = os.path.dirname(os.path.abspath(__file__))
model_path = "{}/../../../../model_zoo/official/cv".format(cur_path) model_path = "{}/../../../../tests/models/official/cv".format(cur_path)
model_name = "ssd" model_name = "ssd"
utils.copy_files(model_path, cur_path, model_name) utils.copy_files(model_path, cur_path, model_name)
cur_model_path = os.path.join(cur_path, model_name) cur_model_path = os.path.join(cur_path, model_name)

View File

@ -28,9 +28,9 @@ import mindspore.dataset as ds
import mindspore.dataset.transforms.c_transforms as deC import mindspore.dataset.transforms.c_transforms as deC
from mindspore import context from mindspore import context
from easydict import EasyDict as edict from easydict import EasyDict as edict
from model_zoo.official.nlp.transformer.src.transformer_model import TransformerConfig from tests.models.official.nlp.transformer.src.transformer_model import TransformerConfig
from model_zoo.official.nlp.transformer.src.transformer_for_train import TransformerNetworkWithLoss, TransformerTrainOneStepWithLossScaleCell from tests.models.official.nlp.transformer.src.transformer_for_train import TransformerNetworkWithLoss, TransformerTrainOneStepWithLossScaleCell
from model_zoo.official.nlp.transformer.src.lr_schedule import create_dynamic_lr from tests.models.official.nlp.transformer.src.lr_schedule import create_dynamic_lr
from tests.st.model_zoo_tests import utils from tests.st.model_zoo_tests import utils
@ -223,7 +223,7 @@ def test_transformer():
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_transformer_export_mindir(): def test_transformer_export_mindir():
cur_path = os.path.dirname(os.path.abspath(__file__)) cur_path = os.path.dirname(os.path.abspath(__file__))
model_path = "{}/../../../../model_zoo/official/nlp".format(cur_path) model_path = "{}/../../../../tests/models/official/nlp".format(cur_path)
model_name = "transformer" model_name = "transformer"
utils.copy_files(model_path, cur_path, model_name) utils.copy_files(model_path, cur_path, model_name)
cur_model_path = os.path.join(cur_path, model_name) cur_model_path = os.path.join(cur_path, model_name)

View File

@ -18,7 +18,7 @@ ckpt_root = "/home/workspace/mindspore_dataset/checkpoint"
cur_path = os.path.split(os.path.realpath(__file__))[0] cur_path = os.path.split(os.path.realpath(__file__))[0]
geir_root = os.path.join(cur_path, "mindspore_geir") geir_root = os.path.join(cur_path, "mindspore_geir")
arm_main_path = os.path.join(cur_path, "mindir_310infer_exe") arm_main_path = os.path.join(cur_path, "mindir_310infer_exe")
model_zoo_path = os.path.join(cur_path, "../../../model_zoo") model_zoo_path = os.path.join(cur_path, "../../../tests/models")
def copy_files(from_, to_, model_name): def copy_files(from_, to_, model_name):

View File

@ -21,10 +21,10 @@ export RANK_SIZE=$DEVICE_NUM
unset SLOG_PRINT_TO_STDOUT unset SLOG_PRINT_TO_STDOUT
export MINDSPORE_HCCL_CONFIG_PATH=$CONFIG_PATH/hccl/rank_table_${DEVICE_NUM}p.json export MINDSPORE_HCCL_CONFIG_PATH=$CONFIG_PATH/hccl/rank_table_${DEVICE_NUM}p.json
CODE_DIR="./" CODE_DIR="./"
if [ -d ${BASE_PATH}/../../../../model_zoo/official/recommend/wide_and_deep ]; then if [ -d ${BASE_PATH}/../../../../tests/models/official/recommend/wide_and_deep ]; then
CODE_DIR=${BASE_PATH}/../../../../model_zoo/official/recommend/wide_and_deep CODE_DIR=${BASE_PATH}/../../../../tests/models/official/recommend/wide_and_deep
elif [ -d ${BASE_PATH}/../../model_zoo/official/recommend/wide_and_deep ]; then elif [ -d ${BASE_PATH}/../../tests/models/official/recommend/wide_and_deep ]; then
CODE_DIR=${BASE_PATH}/../../model_zoo/official/recommend/wide_and_deep CODE_DIR=${BASE_PATH}/../../tests/models/official/recommend/wide_and_deep
else else
echo "[ERROR] code dir is not found" echo "[ERROR] code dir is not found"
fi fi

View File

@ -218,7 +218,7 @@ def test_yolov3_darknet53():
@pytest.mark.env_single @pytest.mark.env_single
def test_yolov3_darknet_8p(): def test_yolov3_darknet_8p():
cur_path = os.path.dirname(os.path.abspath(__file__)) cur_path = os.path.dirname(os.path.abspath(__file__))
model_path = "{}/../../../../model_zoo/official/cv".format(cur_path) model_path = "{}/../../../../tests/models/official/cv".format(cur_path)
model_name = "yolov3_darknet53" model_name = "yolov3_darknet53"
dataset_path = os.path.join(utils.data_root, "coco/coco2014/") dataset_path = os.path.join(utils.data_root, "coco/coco2014/")
ckpt_path = os.path.join(utils.ckpt_root, "yolov3_darknet/yolov3_darknet53_pretrain.ckpt") ckpt_path = os.path.join(utils.ckpt_root, "yolov3_darknet/yolov3_darknet53_pretrain.ckpt")

View File

@ -31,9 +31,9 @@ from mindspore.train.callback import Callback
from mindspore.train.loss_scale_manager import DynamicLossScaleManager from mindspore.train.loss_scale_manager import DynamicLossScaleManager
from mindspore.train.model import Model from mindspore.train.model import Model
import mindspore.nn.learning_rate_schedule as lr_schedules import mindspore.nn.learning_rate_schedule as lr_schedules
from model_zoo.official.nlp.bert.src.bert_for_pre_training import BertNetworkWithLoss from tests.models.official.nlp.bert.src.bert_for_pre_training import BertNetworkWithLoss
from model_zoo.official.nlp.bert.src.bert_for_pre_training import BertTrainOneStepWithLossScaleCell from tests.models.official.nlp.bert.src.bert_for_pre_training import BertTrainOneStepWithLossScaleCell
from model_zoo.official.nlp.bert.src.bert_model import BertConfig from tests.models.official.nlp.bert.src.bert_model import BertConfig
_current_dir = os.path.dirname(os.path.realpath(__file__)) _current_dir = os.path.dirname(os.path.realpath(__file__))
DATA_DIR = ["/home/workspace/mindspore_dataset/bert/example/examples.tfrecord"] DATA_DIR = ["/home/workspace/mindspore_dataset/bert/example/examples.tfrecord"]

View File

@ -33,9 +33,9 @@ from mindspore.train.model import Model
from mindspore.train.train_thor import ConvertModelUtils from mindspore.train.train_thor import ConvertModelUtils
import mindspore.dataset.transforms.c_transforms as C import mindspore.dataset.transforms.c_transforms as C
from model_zoo.official.nlp.bert.src.bert_for_pre_training import BertNetworkWithLoss, BertTrainOneStepCell from tests.models.official.nlp.bert.src.bert_for_pre_training import BertNetworkWithLoss, BertTrainOneStepCell
from model_zoo.official.nlp.bert.src.utils import get_bert_thor_lr, get_bert_thor_damping from tests.models.official.nlp.bert.src.utils import get_bert_thor_lr, get_bert_thor_damping
from model_zoo.official.nlp.bert.src.bert_model import BertConfig from tests.models.official.nlp.bert.src.bert_model import BertConfig
MINDSPORE_HCCL_CONFIG_PATH = "/home/workspace/mindspore_config/hccl/rank_table_8p.json" MINDSPORE_HCCL_CONFIG_PATH = "/home/workspace/mindspore_config/hccl/rank_table_8p.json"
DATASET_PATH = "/home/workspace/mindspore_dataset/bert/thor/en-wiki-512_test_first1wan" DATASET_PATH = "/home/workspace/mindspore_dataset/bert/thor/en-wiki-512_test_first1wan"

View File

@ -31,9 +31,9 @@ from mindspore.train.callback import Callback
from mindspore.train.loss_scale_manager import DynamicLossScaleManager from mindspore.train.loss_scale_manager import DynamicLossScaleManager
from mindspore.train.model import Model from mindspore.train.model import Model
import mindspore.nn.learning_rate_schedule as lr_schedules import mindspore.nn.learning_rate_schedule as lr_schedules
from model_zoo.official.nlp.bert.src.bert_for_pre_training import BertNetworkWithLoss from tests.models.official.nlp.bert.src.bert_for_pre_training import BertNetworkWithLoss
from model_zoo.official.nlp.bert.src.bert_for_pre_training import BertTrainOneStepWithLossScaleCell from tests.models.official.nlp.bert.src.bert_for_pre_training import BertTrainOneStepWithLossScaleCell
from model_zoo.official.nlp.bert.src.bert_model import BertConfig from tests.models.official.nlp.bert.src.bert_model import BertConfig
_current_dir = os.path.dirname(os.path.realpath(__file__)) _current_dir = os.path.dirname(os.path.realpath(__file__))
DATA_DIR = ["/home/workspace/mindspore_dataset/bert/example/examples.tfrecord"] DATA_DIR = ["/home/workspace/mindspore_dataset/bert/example/examples.tfrecord"]

View File

@ -1,258 +0,0 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
'''
Functional Cells used in Bert finetune and evaluation.
'''
import mindspore.nn as nn
from mindspore.common.initializer import TruncatedNormal
from mindspore.ops import operations as P
from mindspore.ops import functional as F
from mindspore.ops import composite as C
from mindspore.common.tensor import Tensor
from mindspore.common.parameter import Parameter, ParameterTuple
from mindspore.common import dtype as mstype
from mindspore.nn.wrap.grad_reducer import DistributedGradReducer
from mindspore.context import ParallelMode
from mindspore.communication.management import get_group_size
from mindspore import context
from mindspore.model_zoo.Bert_NEZHA.bert_model import BertModel
from .bert_for_pre_training import clip_grad
from .CRF import CRF
GRADIENT_CLIP_TYPE = 1
GRADIENT_CLIP_VALUE = 1.0
grad_scale = C.MultitypeFuncGraph("grad_scale")
reciprocal = P.Reciprocal()
@grad_scale.register("Tensor", "Tensor")
def tensor_grad_scale(scale, grad):
return grad * reciprocal(scale)
class BertFinetuneCell(nn.Cell):
"""
Specifically defined for finetuning where only four inputs tensor are needed.
"""
def __init__(self, network, optimizer, scale_update_cell=None):
super(BertFinetuneCell, self).__init__(auto_prefix=False)
self.network = network
self.weights = ParameterTuple(network.trainable_params())
self.optimizer = optimizer
self.grad = C.GradOperation(get_by_list=True,
sens_param=True)
self.reducer_flag = False
self.allreduce = P.AllReduce()
self.parallel_mode = context.get_auto_parallel_context("parallel_mode")
if self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]:
self.reducer_flag = True
self.grad_reducer = None
if self.reducer_flag:
mean = context.get_auto_parallel_context("gradients_mean")
degree = get_group_size()
self.grad_reducer = DistributedGradReducer(optimizer.parameters, mean, degree)
self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE)
self.cast = P.Cast()
self.alloc_status = P.NPUAllocFloatStatus()
self.get_status = P.NPUGetFloatStatus()
self.clear_status = P.NPUClearFloatStatus()
self.reduce_sum = P.ReduceSum(keep_dims=False)
self.base = Tensor(1, mstype.float32)
self.less_equal = P.LessEqual()
self.hyper_map = C.HyperMap()
self.loss_scale = None
self.loss_scaling_manager = scale_update_cell
if scale_update_cell:
self.loss_scale = Parameter(Tensor(scale_update_cell.get_loss_scale(), dtype=mstype.float32),
name="loss_scale")
def construct(self,
input_ids,
input_mask,
token_type_id,
label_ids,
sens=None):
weights = self.weights
init = self.alloc_status()
loss = self.network(input_ids,
input_mask,
token_type_id,
label_ids)
if sens is None:
scaling_sens = self.loss_scale
else:
scaling_sens = sens
init = F.depend(init, loss)
clear_status = self.clear_status(init)
scaling_sens = F.depend(scaling_sens, clear_status)
grads = self.grad(self.network, weights)(input_ids,
input_mask,
token_type_id,
label_ids,
self.cast(scaling_sens,
mstype.float32))
grads = self.hyper_map(F.partial(grad_scale, scaling_sens), grads)
grads = self.hyper_map(F.partial(clip_grad, GRADIENT_CLIP_TYPE, GRADIENT_CLIP_VALUE), grads)
if self.reducer_flag:
grads = self.grad_reducer(grads)
init = F.depend(init, grads)
get_status = self.get_status(init)
init = F.depend(init, get_status)
flag_sum = self.reduce_sum(init, (0,))
if self.is_distributed:
flag_reduce = self.allreduce(flag_sum)
cond = self.less_equal(self.base, flag_reduce)
else:
cond = self.less_equal(self.base, flag_sum)
overflow = cond
if sens is None:
overflow = self.loss_scaling_manager(self.loss_scale, cond)
if not overflow:
self.optimizer(grads)
return (loss, cond)
class BertCLSModel(nn.Cell):
"""
This class is responsible for classification task evaluation, i.e. XNLI(num_labels=3),
LCQMC(num_labels=2), Chnsenti(num_labels=2). The returned output represents the final
logits as the results of log_softmax is proportional to that of softmax.
"""
def __init__(self, config, is_training, num_labels=2, dropout_prob=0.0, use_one_hot_embeddings=False):
super(BertCLSModel, self).__init__()
self.bert = BertModel(config, is_training, use_one_hot_embeddings)
self.cast = P.Cast()
self.weight_init = TruncatedNormal(config.initializer_range)
self.log_softmax = P.LogSoftmax(axis=-1)
self.dtype = config.dtype
self.num_labels = num_labels
self.dense_1 = nn.Dense(config.hidden_size, self.num_labels, weight_init=self.weight_init,
has_bias=True).to_float(config.compute_type)
self.dropout = nn.Dropout(1 - dropout_prob)
def construct(self, input_ids, input_mask, token_type_id):
_, pooled_output, _ = \
self.bert(input_ids, token_type_id, input_mask)
cls = self.cast(pooled_output, self.dtype)
cls = self.dropout(cls)
logits = self.dense_1(cls)
logits = self.cast(logits, self.dtype)
log_probs = self.log_softmax(logits)
return log_probs
class BertNERModel(nn.Cell):
"""
This class is responsible for sequence labeling task evaluation, i.e. NER(num_labels=11).
The returned output represents the final logits as the results of log_softmax is proportional to that of softmax.
"""
def __init__(self, config, is_training, num_labels=11, use_crf=False, dropout_prob=0.0,
use_one_hot_embeddings=False):
super(BertNERModel, self).__init__()
self.bert = BertModel(config, is_training, use_one_hot_embeddings)
self.cast = P.Cast()
self.weight_init = TruncatedNormal(config.initializer_range)
self.log_softmax = P.LogSoftmax(axis=-1)
self.dtype = config.dtype
self.num_labels = num_labels
self.dense_1 = nn.Dense(config.hidden_size, self.num_labels, weight_init=self.weight_init,
has_bias=True).to_float(config.compute_type)
self.dropout = nn.Dropout(1 - dropout_prob)
self.reshape = P.Reshape()
self.shape = (-1, config.hidden_size)
self.use_crf = use_crf
self.origin_shape = (config.batch_size, config.seq_length, self.num_labels)
def construct(self, input_ids, input_mask, token_type_id):
sequence_output, _, _ = \
self.bert(input_ids, token_type_id, input_mask)
seq = self.dropout(sequence_output)
seq = self.reshape(seq, self.shape)
logits = self.dense_1(seq)
logits = self.cast(logits, self.dtype)
if self.use_crf:
return_value = self.reshape(logits, self.origin_shape)
else:
return_value = self.log_softmax(logits)
return return_value
class CrossEntropyCalculation(nn.Cell):
"""
Cross Entropy loss
"""
def __init__(self, is_training=True):
super(CrossEntropyCalculation, self).__init__()
self.onehot = P.OneHot()
self.on_value = Tensor(1.0, mstype.float32)
self.off_value = Tensor(0.0, mstype.float32)
self.reduce_sum = P.ReduceSum()
self.reduce_mean = P.ReduceMean()
self.reshape = P.Reshape()
self.last_idx = (-1,)
self.neg = P.Neg()
self.cast = P.Cast()
self.is_training = is_training
def construct(self, logits, label_ids, num_labels):
if self.is_training:
label_ids = self.reshape(label_ids, self.last_idx)
one_hot_labels = self.onehot(label_ids, num_labels, self.on_value, self.off_value)
per_example_loss = self.neg(self.reduce_sum(one_hot_labels * logits, self.last_idx))
loss = self.reduce_mean(per_example_loss, self.last_idx)
return_value = self.cast(loss, mstype.float32)
else:
return_value = logits * 1.0
return return_value
class BertCLS(nn.Cell):
"""
Train interface for classification finetuning task.
"""
def __init__(self, config, is_training, num_labels=2, dropout_prob=0.0, use_one_hot_embeddings=False):
super(BertCLS, self).__init__()
self.bert = BertCLSModel(config, is_training, num_labels, dropout_prob, use_one_hot_embeddings)
self.loss = CrossEntropyCalculation(is_training)
self.num_labels = num_labels
def construct(self, input_ids, input_mask, token_type_id, label_ids):
log_probs = self.bert(input_ids, input_mask, token_type_id)
loss = self.loss(log_probs, label_ids, self.num_labels)
return loss
class BertNER(nn.Cell):
"""
Train interface for sequence labeling finetuning task.
"""
def __init__(self, config, is_training, num_labels=11, use_crf=False, tag_to_index=None, dropout_prob=0.0,
use_one_hot_embeddings=False):
super(BertNER, self).__init__()
self.bert = BertNERModel(config, is_training, num_labels, use_crf, dropout_prob, use_one_hot_embeddings)
if use_crf:
if not tag_to_index:
raise Exception("The dict for tag-index mapping should be provided for CRF.")
self.loss = CRF(tag_to_index, config.batch_size, config.seq_length, is_training)
else:
self.loss = CrossEntropyCalculation(is_training)
self.num_labels = num_labels
self.use_crf = use_crf
def construct(self, input_ids, input_mask, token_type_id, label_ids):
logits = self.bert(input_ids, input_mask, token_type_id)
if self.use_crf:
loss = self.loss(logits, label_ids)
else:
loss = self.loss(logits, label_ids, self.num_labels)
return loss

View File

@ -22,7 +22,7 @@ from mindspore import Tensor
from mindspore.nn import Dense from mindspore.nn import Dense
from mindspore.nn import TrainOneStepCell, WithLossCell from mindspore.nn import TrainOneStepCell, WithLossCell
from mindspore.ops import operations as P from mindspore.ops import operations as P
from model_zoo.official.nlp.pangu_alpha.src.adam import AdamWeightDecayOp from tests.models.official.nlp.pangu_alpha.src.adam import AdamWeightDecayOp
context.set_context(mode=context.GRAPH_MODE, device_target="CPU") context.set_context(mode=context.GRAPH_MODE, device_target="CPU")

View File

@ -1,31 +0,0 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""test_vgg"""
import numpy as np
import pytest
from mindspore import Tensor
from model_zoo.official.cv.vgg16.src.vgg import vgg16
from model_zoo.official.cv.vgg16.model_utils.config import get_config_static
from ..ut_filter import non_graph_engine
cfg = get_config_static()
@non_graph_engine
def test_vgg16():
inputs = Tensor(np.random.rand(1, 3, 112, 112).astype(np.float32))
net = vgg16(args=cfg)
with pytest.raises(ValueError):
print(net.construct(inputs))

View File

@ -1,155 +0,0 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" tests for quant """
import numpy as np
import pytest
import mindspore.context as context
from mindspore import Tensor
from mindspore import nn
from mindspore.compression.quant import QuantizationAwareTraining
from mindspore.compression.export import quant_export
from mindspore.compression.quant.quantizer import OptimizeOption
from model_zoo.official.cv.mobilenetv2_quant.src.mobilenetV2 import mobilenetV2
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
class LeNet5(nn.Cell):
"""
Lenet network
Args:
num_class (int): Num classes. Default: 10.
Returns:
Tensor, output tensor
Examples:
>>> LeNet(num_class=10)
"""
def __init__(self, num_class=10):
super(LeNet5, self).__init__()
self.num_class = num_class
self.conv1 = nn.Conv2dBnAct(1, 6, kernel_size=5, has_bn=True, activation='relu', pad_mode="valid")
self.conv2 = nn.Conv2dBnAct(6, 16, kernel_size=5, activation='relu', pad_mode="valid")
self.fc1 = nn.DenseBnAct(16 * 5 * 5, 120, activation='relu')
self.fc2 = nn.DenseBnAct(120, 84, activation='relu')
self.fc3 = nn.DenseBnAct(84, self.num_class)
self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
self.flatten = nn.Flatten()
def construct(self, x):
x = self.conv1(x)
x = self.max_pool2d(x)
x = self.conv2(x)
x = self.max_pool2d(x)
x = self.flatten(x)
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x
@pytest.mark.skip(reason="no `te.lang.cce` in ut env")
def test_qat_lenet():
img = Tensor(np.ones((32, 1, 32, 32)).astype(np.float32))
net = LeNet5()
quantizer = QuantizationAwareTraining(bn_fold=True,
per_channel=[True, False],
symmetric=[True, False])
net = quantizer.quantize(net)
# should load the checkpoint. mock here
net.init_parameters_data()
quant_export.export(net, img, file_name="quant.pb")
@pytest.mark.skip(reason="no `te.lang.cce` in ut env")
def test_qat_mobile_per_channel_tf():
network = mobilenetV2(num_classes=1000)
img = Tensor(np.ones((1, 3, 224, 224)).astype(np.float32))
quantizer = QuantizationAwareTraining(bn_fold=True,
per_channel=[True, False],
symmetric=[True, False])
network = quantizer.quantize(network)
# should load the checkpoint. mock here
network.init_parameters_data()
quant_export.export(network, img, file_name="quant.pb")
@pytest.mark.skip(reason="no `te.lang.cce` in ut env")
def test_qat_mobile_per_channel_ff():
network = mobilenetV2(num_classes=1000)
img = Tensor(np.ones((1, 3, 224, 224)).astype(np.float32))
quantizer = QuantizationAwareTraining(bn_fold=True,
per_channel=[False, False],
symmetric=[True, False])
network = quantizer.quantize(network)
# should load the checkpoint. mock here
network.init_parameters_data()
quant_export.export(network, img, file_name="quant.pb")
@pytest.mark.skip(reason="no `te.lang.cce` in ut env")
def test_lsq_lenet():
img = Tensor(np.ones((32, 1, 32, 32)).astype(np.float32))
net = LeNet5()
quantizer = QuantizationAwareTraining(bn_fold=True,
per_channel=[True, False],
symmetric=[True, True],
narrow_range=[True, True],
freeze_bn=0,
quant_delay=0,
one_conv_fold=True,
optimize_option=OptimizeOption.LEARNED_SCALE)
net = quantizer.quantize(net)
# should load the checkpoint. mock here
net.init_parameters_data()
quant_export.export(net, img, file_name="quant.pb")
@pytest.mark.skip(reason="no `te.lang.cce` in ut env")
def test_lsq_mobile_per_channel_tf():
network = mobilenetV2(num_classes=1000)
img = Tensor(np.ones((1, 3, 224, 224)).astype(np.float32))
quantizer = QuantizationAwareTraining(bn_fold=True,
per_channel=[True, False],
symmetric=[True, True],
narrow_range=[True, True],
freeze_bn=0,
quant_delay=0,
one_conv_fold=True,
optimize_option=OptimizeOption.LEARNED_SCALE)
network = quantizer.quantize(network)
# should load the checkpoint. mock here
network.init_parameters_data()
quant_export.export(network, img, file_name="quant.pb")
@pytest.mark.skip(reason="no `te.lang.cce` in ut env")
def test_lsq_mobile_per_channel_ff():
network = mobilenetV2(num_classes=1000)
img = Tensor(np.ones((1, 3, 224, 224)).astype(np.float32))
quantizer = QuantizationAwareTraining(bn_fold=True,
per_channel=[False, False],
symmetric=[True, True],
narrow_range=[True, True],
freeze_bn=0,
quant_delay=0,
one_conv_fold=True,
optimize_option=OptimizeOption.LEARNED_SCALE)
network = quantizer.quantize(network)
# should load the checkpoint. mock here
network.init_parameters_data()
quant_export.export(network, img, file_name="quant.pb")