forked from mindspore-Ecosystem/mindspore
!15382 clean pylint & shell check
From: @zhao_ting_v Reviewed-by: @wuxuejian,@c_34 Signed-off-by: @wuxuejian
This commit is contained in:
commit
879f5e1f4e
|
@ -17,7 +17,7 @@
|
|||
if [ ! -d out ]; then
|
||||
mkdir out
|
||||
fi
|
||||
cd out
|
||||
cd out || exit
|
||||
cmake .. \
|
||||
-DMINDSPORE_PATH="`pip show mindspore-ascend | grep Location | awk '{print $2"/mindspore"}' | xargs realpath`"
|
||||
make
|
||||
|
|
|
@ -63,7 +63,7 @@ fi
|
|||
|
||||
function compile_app()
|
||||
{
|
||||
cd ../ascend310_infer
|
||||
cd ../ascend310_infer || exit
|
||||
if [ -f "Makefile" ]; then
|
||||
make clean
|
||||
fi
|
||||
|
@ -73,7 +73,7 @@ function compile_app()
|
|||
echo "compile app code failed"
|
||||
exit 1
|
||||
fi
|
||||
cd -
|
||||
cd - || exit
|
||||
}
|
||||
|
||||
function infer()
|
||||
|
|
|
@ -73,7 +73,7 @@ function compile_app()
|
|||
echo "compile app code failed"
|
||||
exit 1
|
||||
fi
|
||||
cd -
|
||||
cd - || exit
|
||||
}
|
||||
|
||||
function infer()
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
if [ ! -d out ]; then
|
||||
mkdir out
|
||||
fi
|
||||
cd out
|
||||
cd out || exit
|
||||
cmake .. \
|
||||
-DMINDSPORE_PATH="`pip show mindspore-ascend | grep Location | awk '{print $2"/mindspore"}' | xargs realpath`"
|
||||
make
|
||||
|
|
|
@ -16,12 +16,11 @@
|
|||
"""Evaluation for Deeptext"""
|
||||
import argparse
|
||||
import os
|
||||
|
||||
from PIL import Image
|
||||
import numpy as np
|
||||
import mmcv
|
||||
from src.config import config
|
||||
from src.utils import metrics
|
||||
from PIL import Image
|
||||
import mmcv
|
||||
|
||||
parser = argparse.ArgumentParser(description="Deeptext evaluation")
|
||||
parser.add_argument("--result_path", type=str, required=True, help="result file path")
|
||||
|
|
|
@ -62,7 +62,7 @@ fi
|
|||
|
||||
function compile_app()
|
||||
{
|
||||
cd ../ascend310_infer
|
||||
cd ../ascend310_infer || exit
|
||||
if [ -f "Makefile" ]; then
|
||||
make clean
|
||||
fi
|
||||
|
@ -72,7 +72,7 @@ function compile_app()
|
|||
echo "compile app code failed"
|
||||
exit 1
|
||||
fi
|
||||
cd -
|
||||
cd - || exit
|
||||
}
|
||||
|
||||
function infer()
|
||||
|
|
|
@ -19,7 +19,7 @@ build_type="Release"
|
|||
function preparePath() {
|
||||
rm -rf $1
|
||||
mkdir -p $1
|
||||
cd $1
|
||||
cd $1 || exit
|
||||
}
|
||||
|
||||
function buildA300() {
|
||||
|
|
|
@ -66,9 +66,9 @@ function air_to_om()
|
|||
|
||||
function compile_app()
|
||||
{
|
||||
cd ../ascend310_infer/src
|
||||
cd ../ascend310_infer/src || exit
|
||||
sh build.sh &> build.log
|
||||
cd -
|
||||
cd - || exit
|
||||
}
|
||||
|
||||
function infer()
|
||||
|
|
|
@ -57,7 +57,7 @@ mkdir ./eval
|
|||
cp ../*.py ./eval
|
||||
cp *.sh ./eval
|
||||
cp -r ../src ./eval
|
||||
cd ./eval
|
||||
cd ./eval || exit
|
||||
env > env.log
|
||||
echo "start evaluation for device $DEVICE_ID"
|
||||
python eval.py --data_url=$PATH1 --checkpoint_path=$PATH2 &> eval.log &
|
||||
|
|
|
@ -70,7 +70,7 @@ function preprocess_data()
|
|||
|
||||
function compile_app()
|
||||
{
|
||||
cd ../ascend310_infer/src
|
||||
cd ../ascend310_infer/ || exit
|
||||
if [ -f "Makefile" ]; then
|
||||
make clean
|
||||
fi
|
||||
|
|
|
@ -62,7 +62,8 @@ do
|
|||
end=`expr $start \+ $gap`
|
||||
cmdopt=$start"-"$end
|
||||
|
||||
export DEVICE_ID=`expr $i \+ $start_idx`
|
||||
device_id=`expr $i \+ $start_idx`
|
||||
export DEVICE_ID=$device_id
|
||||
export RANK_ID=$i
|
||||
rm -rf ./train_parallel$DEVICE_ID
|
||||
mkdir ./train_parallel$DEVICE_ID
|
||||
|
|
|
@ -60,7 +60,7 @@ fi
|
|||
|
||||
function compile_app()
|
||||
{
|
||||
cd ../ascend310_infer/src
|
||||
cd ../ascend310_infer/src || exit
|
||||
if [ -f "Makefile" ]; then
|
||||
make clean
|
||||
fi
|
||||
|
|
|
@ -21,8 +21,8 @@ import pprint
|
|||
import ast
|
||||
import html
|
||||
import numpy as np
|
||||
import spacy
|
||||
from sklearn.feature_extraction import FeatureHasher
|
||||
import spacy
|
||||
from mindspore.mindrecord import FileWriter
|
||||
|
||||
|
||||
|
|
|
@ -61,7 +61,7 @@ export RANK_ID=0
|
|||
rm -rf ${current_exec_path}/device$USE_DEVICE_ID
|
||||
echo 'start device '$USE_DEVICE_ID
|
||||
mkdir ${current_exec_path}/device$USE_DEVICE_ID
|
||||
cd ${current_exec_path}/device$USE_DEVICE_ID
|
||||
cd ${current_exec_path}/device$USE_DEVICE_ID || exit
|
||||
dev=`expr $USE_DEVICE_ID + 0`
|
||||
export DEVICE_ID=$dev
|
||||
python ${dirname_path}/${SCRIPT_NAME} \
|
||||
|
|
|
@ -102,7 +102,7 @@ def main(args):
|
|||
else:
|
||||
param_dict_new[key] = values
|
||||
load_param_into_net(network, param_dict_new)
|
||||
cfg.logger.info('load model %s success.' % cfg.pretrained)
|
||||
cfg.logger.info('load model %s success.', cfg.pretrained)
|
||||
|
||||
# optimizer and lr scheduler
|
||||
lr = warmup_step(cfg, gamma=0.9)
|
||||
|
|
|
@ -328,6 +328,6 @@ if __name__ == '__main__':
|
|||
log_path = os.path.join(arg.ckpt_path, 'logs')
|
||||
arg.logger = get_logger(log_path, arg.local_rank)
|
||||
|
||||
arg.logger.info('Config\n\n{}\n'.format(pformat(arg)))
|
||||
arg.logger.info('Config: {}'.format(pformat(arg)))
|
||||
|
||||
main(arg)
|
||||
|
|
|
@ -42,7 +42,7 @@ mkdir ${EXECUTE_PATH}/log_standalone_graph
|
|||
|
||||
rm -rf ${EXECUTE_PATH}/data_standalone_log_$USE_DEVICE_ID
|
||||
mkdir -p ${EXECUTE_PATH}/data_standalone_log_$USE_DEVICE_ID
|
||||
cd ${EXECUTE_PATH}/data_standalone_log_$USE_DEVICE_ID
|
||||
cd ${EXECUTE_PATH}/data_standalone_log_$USE_DEVICE_ID || exit
|
||||
echo "start training for rank $RANK_ID, device $USE_DEVICE_ID"
|
||||
env > ${EXECUTE_PATH}/log_standalone_graph/face_recognition_$USE_DEVICE_ID.log
|
||||
python ${EXECUTE_PATH}/../train.py \
|
||||
|
|
|
@ -42,7 +42,7 @@ mkdir ${EXECUTE_PATH}/log_standalone_graph
|
|||
|
||||
rm -rf ${EXECUTE_PATH}/data_standalone_log_$USE_DEVICE_ID
|
||||
mkdir -p ${EXECUTE_PATH}/data_standalone_log_$USE_DEVICE_ID
|
||||
cd ${EXECUTE_PATH}/data_standalone_log_$USE_DEVICE_ID
|
||||
cd ${EXECUTE_PATH}/data_standalone_log_$USE_DEVICE_ID || exit
|
||||
echo "start training for rank $RANK_ID, device $USE_DEVICE_ID"
|
||||
env > ${EXECUTE_PATH}/log_standalone_graph/face_recognition_$USE_DEVICE_ID.log
|
||||
python ${EXECUTE_PATH}/../train.py \
|
||||
|
|
|
@ -90,13 +90,13 @@ def main():
|
|||
cfg.logger.info('start create dataloader')
|
||||
de_dataset, steps_per_epoch, class_num = get_de_dataset(cfg)
|
||||
cfg.steps_per_epoch = steps_per_epoch
|
||||
cfg.logger.info('step per epoch: %d' % cfg.steps_per_epoch)
|
||||
cfg.logger.info('step per epoch: {}'.format(cfg.steps_per_epoch))
|
||||
de_dataloader = de_dataset.create_tuple_iterator()
|
||||
cfg.logger.info('class num original: %d' % class_num)
|
||||
cfg.logger.info('class num original: {}'.format(class_num))
|
||||
if class_num % 16 != 0:
|
||||
class_num = (class_num // 16 + 1) * 16
|
||||
cfg.class_num = class_num
|
||||
cfg.logger.info('change the class num to: %d' % cfg.class_num)
|
||||
cfg.logger.info('change the class num to: {}'.format(cfg.class_num))
|
||||
cfg.logger.info('end create dataloader')
|
||||
|
||||
# backbone and loss
|
||||
|
@ -119,7 +119,7 @@ def main():
|
|||
else:
|
||||
param_dict_new[key] = values
|
||||
load_param_into_net(network, param_dict_new)
|
||||
cfg.logger.info('load model %s success' % cfg.pretrained)
|
||||
cfg.logger.info('load model %s success', cfg.pretrained)
|
||||
|
||||
# mixed precision training
|
||||
network.add_flags_recursive(fp16=True)
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
# ============================================================================
|
||||
|
||||
mkdir -p ms_log
|
||||
PROJECT_DIR=$(cd "$(dirname "$0")"; pwd)
|
||||
PROJECT_DIR=$(cd "$(dirname "$0")" || exit; pwd)
|
||||
CUR_DIR=`pwd`
|
||||
export GLOG_log_dir=${CUR_DIR}/ms_log
|
||||
export GLOG_logtostderr=0
|
||||
|
|
Loading…
Reference in New Issue