diff --git a/model_zoo/official/nlp/textrcnn/readme.md b/model_zoo/official/nlp/textrcnn/readme.md index ba5d0b6c624..89fe67644b4 100644 --- a/model_zoo/official/nlp/textrcnn/readme.md +++ b/model_zoo/official/nlp/textrcnn/readme.md @@ -114,13 +114,16 @@ Parameters for both training and evaluation can be set in config.py ```python 'num_epochs': 10, # total training epochs 'batch_size': 64, # training batch size - 'cell': 'lstm', # the RNN architecture, can be 'vanilla', 'gru' and 'lstm'. + 'cell': 'gru', # the RNN architecture, can be 'vanilla', 'gru' and 'lstm'. 'opt': 'adam', # the optimizer strategy, can be 'adam' or 'momentum' 'ckpt_folder_path': './ckpt', # the path to save the checkpoints 'preprocess_path': './preprocess', # the directory to save the processed data 'preprocess' : 'false', # whethere to preprocess the data 'data_path': './data/', # the path to store the splited data 'lr': 1e-3, # the training learning rate + 'lstm_base_lr': 3e-3, # the training learning rate when using lstm as RNN cell + 'lstm_decay_rate': 0.9, # lr decay rate when using lstm as RNN cell + 'lstm_decay_epoch': 1, # lr decay epoch when using lstm as RNN cell 'emb_path': './word2vec', # the directory to save the embedding file 'embed_size': 300, # the dimension of the word embedding 'save_checkpoint_steps': 149, # per step to save the checkpoint @@ -137,7 +140,7 @@ Parameters for both training and evaluation can be set in config.py | Dataset | Sentence polarity dataset v1.0 | Sentence polarity dataset v1.0 | | batch_size | 64 | 64 | | Accuracy | 0.78 | 0.78 | -| Speed | 78ms/step | 89ms/step | +| Speed | 25ms/step | 77ms/step | ## [ModelZoo Homepage](#contents) diff --git a/model_zoo/official/nlp/textrcnn/src/config.py b/model_zoo/official/nlp/textrcnn/src/config.py index eacda81a19e..0782b709a24 100644 --- a/model_zoo/official/nlp/textrcnn/src/config.py +++ b/model_zoo/official/nlp/textrcnn/src/config.py @@ -23,13 +23,16 @@ textrcnn_cfg = edict({ 'neg_dir': 'data/rt-polaritydata/rt-polarity.neg', 'num_epochs': 10, 'batch_size': 64, - 'cell': 'lstm', + 'cell': 'gru', 'opt': 'adam', 'ckpt_folder_path': './ckpt', 'preprocess_path': './preprocess', 'preprocess': 'false', 'data_path': './data/', 'lr': 1e-3, + 'lstm_base_lr': 3e-3, + 'lstm_decay_rate': 0.9, + 'lstm_decay_epoch': 1, 'emb_path': './word2vec', 'embed_size': 300, 'save_checkpoint_steps': 149, diff --git a/model_zoo/official/nlp/textrcnn/src/textrcnn.py b/model_zoo/official/nlp/textrcnn/src/textrcnn.py index 6b3fb00e928..bd3175ce471 100644 --- a/model_zoo/official/nlp/textrcnn/src/textrcnn.py +++ b/model_zoo/official/nlp/textrcnn/src/textrcnn.py @@ -45,16 +45,16 @@ class textrcnn(nn.Cell): self.lstm = P.DynamicRNN(forget_bias=0.0) self.w1_fw = Parameter( np.random.uniform(-k, k, (self.embed_size + self.num_hiddens, 4 * self.num_hiddens)).astype( - np.float16), name="w1_fw") - self.b1_fw = Parameter(np.random.uniform(-k, k, (4 * self.num_hiddens)).astype(np.float16), + np.float32), name="w1_fw") + self.b1_fw = Parameter(np.random.uniform(-k, k, (4 * self.num_hiddens)).astype(np.float32), name="b1_fw") self.w1_bw = Parameter( np.random.uniform(-k, k, (self.embed_size + self.num_hiddens, 4 * self.num_hiddens)).astype( - np.float16), name="w1_bw") - self.b1_bw = Parameter(np.random.uniform(-k, k, (4 * self.num_hiddens)).astype(np.float16), + np.float32), name="w1_bw") + self.b1_bw = Parameter(np.random.uniform(-k, k, (4 * self.num_hiddens)).astype(np.float32), name="b1_bw") - self.h1 = Tensor(np.zeros(shape=(1, self.batch_size, self.num_hiddens)).astype(np.float16)) - self.c1 = Tensor(np.zeros(shape=(1, self.batch_size, self.num_hiddens)).astype(np.float16)) + self.h1 = Tensor(np.zeros(shape=(1, self.batch_size, self.num_hiddens)).astype(np.float32)) + self.c1 = Tensor(np.zeros(shape=(1, self.batch_size, self.num_hiddens)).astype(np.float32)) if cell == "vanilla": self.rnnW_fw = nn.Dense(self.num_hiddens, self.num_hiddens) diff --git a/model_zoo/official/nlp/textrcnn/src/utils.py b/model_zoo/official/nlp/textrcnn/src/utils.py new file mode 100644 index 00000000000..f68401fa63f --- /dev/null +++ b/model_zoo/official/nlp/textrcnn/src/utils.py @@ -0,0 +1,29 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""training utils""" +from mindspore import dtype as mstype +from mindspore.nn.dynamic_lr import exponential_decay_lr +from mindspore import Tensor + + +def get_lr(cfg, dataset_size): + if cfg.cell == "lstm": + lr = exponential_decay_lr(cfg.lstm_base_lr, cfg.lstm_decay_rate, dataset_size * cfg.num_epochs, + dataset_size, + cfg.lstm_decay_epoch) + lr_ret = Tensor(lr, mstype.float32) + else: + lr_ret = cfg.lr + return lr_ret diff --git a/model_zoo/official/nlp/textrcnn/train.py b/model_zoo/official/nlp/textrcnn/train.py index ec74ef4a9f8..0b67cf9a312 100644 --- a/model_zoo/official/nlp/textrcnn/train.py +++ b/model_zoo/official/nlp/textrcnn/train.py @@ -29,6 +29,7 @@ from src.config import textrcnn_cfg as cfg from src.dataset import create_dataset from src.dataset import convert_to_mindrecord from src.textrcnn import textrcnn +from src.utils import get_lr set_seed(1) @@ -50,25 +51,31 @@ if __name__ == '__main__': os.mkdir(cfg.preprocess_path) convert_to_mindrecord(cfg.embed_size, cfg.data_path, cfg.preprocess_path, cfg.emb_path) + if cfg.cell == "vanilla": + print("============ Precision is lower than expected when using vanilla RNN architecture ===========") + embedding_table = np.loadtxt(os.path.join(cfg.preprocess_path, "weight.txt")).astype(np.float32) network = textrcnn(weight=Tensor(embedding_table), vocab_size=embedding_table.shape[0], \ - cell=cfg.cell, batch_size=cfg.batch_size) + cell=cfg.cell, batch_size=cfg.batch_size) + + ds_train = create_dataset(cfg.preprocess_path, cfg.batch_size, cfg.num_epochs, True) + step_size = ds_train.get_dataset_size() loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) + lr = get_lr(cfg, step_size) + if cfg.opt == "adam": - opt = nn.Adam(params=network.trainable_params(), learning_rate=cfg.lr) + opt = nn.Adam(params=network.trainable_params(), learning_rate=lr) elif cfg.opt == "momentum": - opt = nn.Momentum(network.trainable_params(), cfg.lr, cfg.momentum) + opt = nn.Momentum(network.trainable_params(), lr, cfg.momentum) loss_cb = LossMonitor() model = Model(network, loss, opt, {'acc': Accuracy()}, amp_level="O3") print("============== Starting Training ==============") - ds_train = create_dataset(cfg.preprocess_path, cfg.batch_size, cfg.num_epochs, True) config_ck = CheckpointConfig(save_checkpoint_steps=cfg.save_checkpoint_steps, \ - keep_checkpoint_max=cfg.keep_checkpoint_max) + keep_checkpoint_max=cfg.keep_checkpoint_max) ckpoint_cb = ModelCheckpoint(prefix=cfg.cell, directory=cfg.ckpt_folder_path, config=config_ck) model.train(cfg.num_epochs, ds_train, callbacks=[ckpoint_cb, loss_cb]) print("train success") - \ No newline at end of file