forked from mindspore-Ecosystem/mindspore
!10118 raise textrcnn precision when using DynamicRNN
From: @chenmai1102 Reviewed-by: @guoqi1024,@oacjiewen Signed-off-by: @guoqi1024
This commit is contained in:
commit
4ce11a930b
|
@ -114,13 +114,16 @@ Parameters for both training and evaluation can be set in config.py
|
||||||
```python
|
```python
|
||||||
'num_epochs': 10, # total training epochs
|
'num_epochs': 10, # total training epochs
|
||||||
'batch_size': 64, # training batch size
|
'batch_size': 64, # training batch size
|
||||||
'cell': 'lstm', # the RNN architecture, can be 'vanilla', 'gru' and 'lstm'.
|
'cell': 'gru', # the RNN architecture, can be 'vanilla', 'gru' and 'lstm'.
|
||||||
'opt': 'adam', # the optimizer strategy, can be 'adam' or 'momentum'
|
'opt': 'adam', # the optimizer strategy, can be 'adam' or 'momentum'
|
||||||
'ckpt_folder_path': './ckpt', # the path to save the checkpoints
|
'ckpt_folder_path': './ckpt', # the path to save the checkpoints
|
||||||
'preprocess_path': './preprocess', # the directory to save the processed data
|
'preprocess_path': './preprocess', # the directory to save the processed data
|
||||||
'preprocess' : 'false', # whethere to preprocess the data
|
'preprocess' : 'false', # whethere to preprocess the data
|
||||||
'data_path': './data/', # the path to store the splited data
|
'data_path': './data/', # the path to store the splited data
|
||||||
'lr': 1e-3, # the training learning rate
|
'lr': 1e-3, # the training learning rate
|
||||||
|
'lstm_base_lr': 3e-3, # the training learning rate when using lstm as RNN cell
|
||||||
|
'lstm_decay_rate': 0.9, # lr decay rate when using lstm as RNN cell
|
||||||
|
'lstm_decay_epoch': 1, # lr decay epoch when using lstm as RNN cell
|
||||||
'emb_path': './word2vec', # the directory to save the embedding file
|
'emb_path': './word2vec', # the directory to save the embedding file
|
||||||
'embed_size': 300, # the dimension of the word embedding
|
'embed_size': 300, # the dimension of the word embedding
|
||||||
'save_checkpoint_steps': 149, # per step to save the checkpoint
|
'save_checkpoint_steps': 149, # per step to save the checkpoint
|
||||||
|
@ -137,7 +140,7 @@ Parameters for both training and evaluation can be set in config.py
|
||||||
| Dataset | Sentence polarity dataset v1.0 | Sentence polarity dataset v1.0 |
|
| Dataset | Sentence polarity dataset v1.0 | Sentence polarity dataset v1.0 |
|
||||||
| batch_size | 64 | 64 |
|
| batch_size | 64 | 64 |
|
||||||
| Accuracy | 0.78 | 0.78 |
|
| Accuracy | 0.78 | 0.78 |
|
||||||
| Speed | 78ms/step | 89ms/step |
|
| Speed | 25ms/step | 77ms/step |
|
||||||
|
|
||||||
## [ModelZoo Homepage](#contents)
|
## [ModelZoo Homepage](#contents)
|
||||||
|
|
||||||
|
|
|
@ -23,13 +23,16 @@ textrcnn_cfg = edict({
|
||||||
'neg_dir': 'data/rt-polaritydata/rt-polarity.neg',
|
'neg_dir': 'data/rt-polaritydata/rt-polarity.neg',
|
||||||
'num_epochs': 10,
|
'num_epochs': 10,
|
||||||
'batch_size': 64,
|
'batch_size': 64,
|
||||||
'cell': 'lstm',
|
'cell': 'gru',
|
||||||
'opt': 'adam',
|
'opt': 'adam',
|
||||||
'ckpt_folder_path': './ckpt',
|
'ckpt_folder_path': './ckpt',
|
||||||
'preprocess_path': './preprocess',
|
'preprocess_path': './preprocess',
|
||||||
'preprocess': 'false',
|
'preprocess': 'false',
|
||||||
'data_path': './data/',
|
'data_path': './data/',
|
||||||
'lr': 1e-3,
|
'lr': 1e-3,
|
||||||
|
'lstm_base_lr': 3e-3,
|
||||||
|
'lstm_decay_rate': 0.9,
|
||||||
|
'lstm_decay_epoch': 1,
|
||||||
'emb_path': './word2vec',
|
'emb_path': './word2vec',
|
||||||
'embed_size': 300,
|
'embed_size': 300,
|
||||||
'save_checkpoint_steps': 149,
|
'save_checkpoint_steps': 149,
|
||||||
|
|
|
@ -45,16 +45,16 @@ class textrcnn(nn.Cell):
|
||||||
self.lstm = P.DynamicRNN(forget_bias=0.0)
|
self.lstm = P.DynamicRNN(forget_bias=0.0)
|
||||||
self.w1_fw = Parameter(
|
self.w1_fw = Parameter(
|
||||||
np.random.uniform(-k, k, (self.embed_size + self.num_hiddens, 4 * self.num_hiddens)).astype(
|
np.random.uniform(-k, k, (self.embed_size + self.num_hiddens, 4 * self.num_hiddens)).astype(
|
||||||
np.float16), name="w1_fw")
|
np.float32), name="w1_fw")
|
||||||
self.b1_fw = Parameter(np.random.uniform(-k, k, (4 * self.num_hiddens)).astype(np.float16),
|
self.b1_fw = Parameter(np.random.uniform(-k, k, (4 * self.num_hiddens)).astype(np.float32),
|
||||||
name="b1_fw")
|
name="b1_fw")
|
||||||
self.w1_bw = Parameter(
|
self.w1_bw = Parameter(
|
||||||
np.random.uniform(-k, k, (self.embed_size + self.num_hiddens, 4 * self.num_hiddens)).astype(
|
np.random.uniform(-k, k, (self.embed_size + self.num_hiddens, 4 * self.num_hiddens)).astype(
|
||||||
np.float16), name="w1_bw")
|
np.float32), name="w1_bw")
|
||||||
self.b1_bw = Parameter(np.random.uniform(-k, k, (4 * self.num_hiddens)).astype(np.float16),
|
self.b1_bw = Parameter(np.random.uniform(-k, k, (4 * self.num_hiddens)).astype(np.float32),
|
||||||
name="b1_bw")
|
name="b1_bw")
|
||||||
self.h1 = Tensor(np.zeros(shape=(1, self.batch_size, self.num_hiddens)).astype(np.float16))
|
self.h1 = Tensor(np.zeros(shape=(1, self.batch_size, self.num_hiddens)).astype(np.float32))
|
||||||
self.c1 = Tensor(np.zeros(shape=(1, self.batch_size, self.num_hiddens)).astype(np.float16))
|
self.c1 = Tensor(np.zeros(shape=(1, self.batch_size, self.num_hiddens)).astype(np.float32))
|
||||||
|
|
||||||
if cell == "vanilla":
|
if cell == "vanilla":
|
||||||
self.rnnW_fw = nn.Dense(self.num_hiddens, self.num_hiddens)
|
self.rnnW_fw = nn.Dense(self.num_hiddens, self.num_hiddens)
|
||||||
|
|
|
@ -0,0 +1,29 @@
|
||||||
|
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
# ============================================================================
|
||||||
|
"""training utils"""
|
||||||
|
from mindspore import dtype as mstype
|
||||||
|
from mindspore.nn.dynamic_lr import exponential_decay_lr
|
||||||
|
from mindspore import Tensor
|
||||||
|
|
||||||
|
|
||||||
|
def get_lr(cfg, dataset_size):
|
||||||
|
if cfg.cell == "lstm":
|
||||||
|
lr = exponential_decay_lr(cfg.lstm_base_lr, cfg.lstm_decay_rate, dataset_size * cfg.num_epochs,
|
||||||
|
dataset_size,
|
||||||
|
cfg.lstm_decay_epoch)
|
||||||
|
lr_ret = Tensor(lr, mstype.float32)
|
||||||
|
else:
|
||||||
|
lr_ret = cfg.lr
|
||||||
|
return lr_ret
|
|
@ -29,6 +29,7 @@ from src.config import textrcnn_cfg as cfg
|
||||||
from src.dataset import create_dataset
|
from src.dataset import create_dataset
|
||||||
from src.dataset import convert_to_mindrecord
|
from src.dataset import convert_to_mindrecord
|
||||||
from src.textrcnn import textrcnn
|
from src.textrcnn import textrcnn
|
||||||
|
from src.utils import get_lr
|
||||||
|
|
||||||
|
|
||||||
set_seed(1)
|
set_seed(1)
|
||||||
|
@ -50,25 +51,31 @@ if __name__ == '__main__':
|
||||||
os.mkdir(cfg.preprocess_path)
|
os.mkdir(cfg.preprocess_path)
|
||||||
convert_to_mindrecord(cfg.embed_size, cfg.data_path, cfg.preprocess_path, cfg.emb_path)
|
convert_to_mindrecord(cfg.embed_size, cfg.data_path, cfg.preprocess_path, cfg.emb_path)
|
||||||
|
|
||||||
|
if cfg.cell == "vanilla":
|
||||||
|
print("============ Precision is lower than expected when using vanilla RNN architecture ===========")
|
||||||
|
|
||||||
embedding_table = np.loadtxt(os.path.join(cfg.preprocess_path, "weight.txt")).astype(np.float32)
|
embedding_table = np.loadtxt(os.path.join(cfg.preprocess_path, "weight.txt")).astype(np.float32)
|
||||||
|
|
||||||
network = textrcnn(weight=Tensor(embedding_table), vocab_size=embedding_table.shape[0], \
|
network = textrcnn(weight=Tensor(embedding_table), vocab_size=embedding_table.shape[0], \
|
||||||
cell=cfg.cell, batch_size=cfg.batch_size)
|
cell=cfg.cell, batch_size=cfg.batch_size)
|
||||||
|
|
||||||
|
ds_train = create_dataset(cfg.preprocess_path, cfg.batch_size, cfg.num_epochs, True)
|
||||||
|
step_size = ds_train.get_dataset_size()
|
||||||
|
|
||||||
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True)
|
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True)
|
||||||
|
lr = get_lr(cfg, step_size)
|
||||||
|
|
||||||
if cfg.opt == "adam":
|
if cfg.opt == "adam":
|
||||||
opt = nn.Adam(params=network.trainable_params(), learning_rate=cfg.lr)
|
opt = nn.Adam(params=network.trainable_params(), learning_rate=lr)
|
||||||
elif cfg.opt == "momentum":
|
elif cfg.opt == "momentum":
|
||||||
opt = nn.Momentum(network.trainable_params(), cfg.lr, cfg.momentum)
|
opt = nn.Momentum(network.trainable_params(), lr, cfg.momentum)
|
||||||
|
|
||||||
loss_cb = LossMonitor()
|
loss_cb = LossMonitor()
|
||||||
model = Model(network, loss, opt, {'acc': Accuracy()}, amp_level="O3")
|
model = Model(network, loss, opt, {'acc': Accuracy()}, amp_level="O3")
|
||||||
|
|
||||||
print("============== Starting Training ==============")
|
print("============== Starting Training ==============")
|
||||||
ds_train = create_dataset(cfg.preprocess_path, cfg.batch_size, cfg.num_epochs, True)
|
|
||||||
config_ck = CheckpointConfig(save_checkpoint_steps=cfg.save_checkpoint_steps, \
|
config_ck = CheckpointConfig(save_checkpoint_steps=cfg.save_checkpoint_steps, \
|
||||||
keep_checkpoint_max=cfg.keep_checkpoint_max)
|
keep_checkpoint_max=cfg.keep_checkpoint_max)
|
||||||
ckpoint_cb = ModelCheckpoint(prefix=cfg.cell, directory=cfg.ckpt_folder_path, config=config_ck)
|
ckpoint_cb = ModelCheckpoint(prefix=cfg.cell, directory=cfg.ckpt_folder_path, config=config_ck)
|
||||||
model.train(cfg.num_epochs, ds_train, callbacks=[ckpoint_cb, loss_cb])
|
model.train(cfg.num_epochs, ds_train, callbacks=[ckpoint_cb, loss_cb])
|
||||||
print("train success")
|
print("train success")
|
||||||
|
|
Loading…
Reference in New Issue