forked from mindspore-Ecosystem/mindspore
commit
3d288a5732
|
@ -33,7 +33,7 @@ global_step: 0
|
||||||
|
|
||||||
# MindSpore LSTM Example - train.py
|
# MindSpore LSTM Example - train.py
|
||||||
preprocess: 'false'
|
preprocess: 'false'
|
||||||
aclimdb_path: "/cache/data/aclImdb"
|
aclimdb_path: "./aclImdb"
|
||||||
glove_path: "/cache/data"
|
glove_path: "/cache/data"
|
||||||
preprocess_path: "/cache/train/preprocess"
|
preprocess_path: "/cache/train/preprocess"
|
||||||
ckpt_path: './ckpt_lstm/'
|
ckpt_path: './ckpt_lstm/'
|
||||||
|
|
|
@ -34,7 +34,7 @@ global_step: 0
|
||||||
|
|
||||||
# MindSpore LSTM Example - train.py
|
# MindSpore LSTM Example - train.py
|
||||||
preprocess: 'false'
|
preprocess: 'false'
|
||||||
aclimdb_path: "/cache/data/aclImdb"
|
aclimdb_path: "./aclImdb"
|
||||||
glove_path: "/cache/data"
|
glove_path: "/cache/data"
|
||||||
preprocess_path: "/cache/train/preprocess"
|
preprocess_path: "/cache/train/preprocess"
|
||||||
ckpt_path: './ckpt_lstm/'
|
ckpt_path: './ckpt_lstm/'
|
||||||
|
|
|
@ -28,7 +28,7 @@ keep_checkpoint_max: 10
|
||||||
|
|
||||||
# MindSpore LSTM Example - train.py
|
# MindSpore LSTM Example - train.py
|
||||||
preprocess: 'false'
|
preprocess: 'false'
|
||||||
aclimdb_path: "/cache/data/aclImdb"
|
aclimdb_path: "./aclImdb"
|
||||||
glove_path: "/cache/data"
|
glove_path: "/cache/data"
|
||||||
preprocess_path: "/cache/train/preprocess"
|
preprocess_path: "/cache/train/preprocess"
|
||||||
ckpt_path: './ckpt_lstm/'
|
ckpt_path: './ckpt_lstm/'
|
||||||
|
|
|
@ -39,6 +39,6 @@ CONFIG_FILE="${BASE_PATH}/../../config_ascend.yaml"
|
||||||
python ../../eval.py \
|
python ../../eval.py \
|
||||||
--config_path=$CONFIG_FILE \
|
--config_path=$CONFIG_FILE \
|
||||||
--device_target="Ascend" \
|
--device_target="Ascend" \
|
||||||
--preprocess=true \
|
--preprocess=false \
|
||||||
--glove_path=$PREPROCESS_DIR \
|
--preprocess_path=$PREPROCESS_DIR \
|
||||||
--ckpt_file=$CKPT_FILE > log.txt 2>&1 &
|
--ckpt_file=$CKPT_FILE > log.txt 2>&1 &
|
||||||
|
|
|
@ -37,6 +37,6 @@ python ../eval.py \
|
||||||
--device_target="CPU" \
|
--device_target="CPU" \
|
||||||
--aclimdb_path=$ACLIMDB_DIR \
|
--aclimdb_path=$ACLIMDB_DIR \
|
||||||
--glove_path=$GLOVE_DIR \
|
--glove_path=$GLOVE_DIR \
|
||||||
--preprocess=true \
|
--preprocess=false \
|
||||||
--preprocess_path=./preprocess \
|
--preprocess_path=./preprocess \
|
||||||
--ckpt_file=$CKPT_FILE > log.txt 2>&1 &
|
--ckpt_file=$CKPT_FILE > log.txt 2>&1 &
|
||||||
|
|
|
@ -40,6 +40,6 @@ python ../eval.py \
|
||||||
--device_target="GPU" \
|
--device_target="GPU" \
|
||||||
--aclimdb_path=$ACLIMDB_DIR \
|
--aclimdb_path=$ACLIMDB_DIR \
|
||||||
--glove_path=$GLOVE_DIR \
|
--glove_path=$GLOVE_DIR \
|
||||||
--preprocess=true \
|
--preprocess=false \
|
||||||
--preprocess_path=./preprocess \
|
--preprocess_path=./preprocess \
|
||||||
--ckpt_file=$CKPT_FILE > log.txt 2>&1 &
|
--ckpt_file=$CKPT_FILE > log.txt 2>&1 &
|
||||||
|
|
|
@ -104,7 +104,7 @@ def test_train_eval(config):
|
||||||
|
|
||||||
|
|
||||||
def modelarts_pre_process():
|
def modelarts_pre_process():
|
||||||
config.ckpt_path = config.output_path
|
cfg.ckpt_path = cfg.output_path
|
||||||
|
|
||||||
@moxing_wrapper(pre_process=modelarts_pre_process)
|
@moxing_wrapper(pre_process=modelarts_pre_process)
|
||||||
def train_wide_and_deep():
|
def train_wide_and_deep():
|
||||||
|
|
|
@ -144,7 +144,7 @@ def train_and_eval(config):
|
||||||
|
|
||||||
|
|
||||||
def modelarts_pre_process():
|
def modelarts_pre_process():
|
||||||
config.ckpt_path = config.output_path
|
cfg.ckpt_path = cfg.output_path
|
||||||
|
|
||||||
@moxing_wrapper(pre_process=modelarts_pre_process)
|
@moxing_wrapper(pre_process=modelarts_pre_process)
|
||||||
def train_wide_and_deep():
|
def train_wide_and_deep():
|
||||||
|
|
|
@ -118,7 +118,7 @@ def train_and_eval(config):
|
||||||
|
|
||||||
|
|
||||||
def modelarts_pre_process():
|
def modelarts_pre_process():
|
||||||
config.ckpt_path = config.output_path
|
cfg.ckpt_path = cfg.output_path
|
||||||
|
|
||||||
@moxing_wrapper(pre_process=modelarts_pre_process)
|
@moxing_wrapper(pre_process=modelarts_pre_process)
|
||||||
def train_wide_and_deep():
|
def train_wide_and_deep():
|
||||||
|
|
|
@ -145,13 +145,14 @@ def train_and_eval(config):
|
||||||
|
|
||||||
|
|
||||||
def modelarts_pre_process():
|
def modelarts_pre_process():
|
||||||
config.ckpt_path = config.output_path
|
cfg.ckpt_path = cfg.output_path
|
||||||
|
|
||||||
|
context.set_context(mode=context.GRAPH_MODE, device_target=cfg.device_target, save_graphs=True)
|
||||||
|
cache_enable = cfg.vocab_cache_size > 0
|
||||||
|
|
||||||
@moxing_wrapper(pre_process=modelarts_pre_process)
|
@moxing_wrapper(pre_process=modelarts_pre_process)
|
||||||
def train_wide_and_deep():
|
def train_wide_and_deep():
|
||||||
""" train_wide_and_deep """
|
""" train_wide_and_deep """
|
||||||
context.set_context(mode=context.GRAPH_MODE, device_target=cfg.device_target, save_graphs=True)
|
|
||||||
cache_enable = cfg.vocab_cache_size > 0
|
|
||||||
if cache_enable and cfg.device_target != "GPU":
|
if cache_enable and cfg.device_target != "GPU":
|
||||||
context.set_context(variable_memory_max_size="24GB")
|
context.set_context(variable_memory_max_size="24GB")
|
||||||
context.set_ps_context(enable_ps=True)
|
context.set_ps_context(enable_ps=True)
|
||||||
|
|
|
@ -117,13 +117,14 @@ def train_and_eval(config):
|
||||||
|
|
||||||
|
|
||||||
def modelarts_pre_process():
|
def modelarts_pre_process():
|
||||||
config.ckpt_path = config.output_path
|
cfg.ckpt_path = cfg.output_path
|
||||||
|
|
||||||
|
context.set_context(mode=context.GRAPH_MODE, device_target=cfg.device_target, save_graphs=True)
|
||||||
|
cache_enable = cfg.vocab_cache_size > 0
|
||||||
|
|
||||||
@moxing_wrapper(pre_process=modelarts_pre_process)
|
@moxing_wrapper(pre_process=modelarts_pre_process)
|
||||||
def train_wide_and_deep():
|
def train_wide_and_deep():
|
||||||
""" train_wide_and_deep """
|
""" train_wide_and_deep """
|
||||||
context.set_context(mode=context.GRAPH_MODE, device_target=cfg.device_target, save_graphs=True)
|
|
||||||
cache_enable = cfg.vocab_cache_size > 0
|
|
||||||
if not cache_enable:
|
if not cache_enable:
|
||||||
cfg.sparse = True
|
cfg.sparse = True
|
||||||
if cfg.sparse:
|
if cfg.sparse:
|
||||||
|
|
Loading…
Reference in New Issue