!18970 fix ctpn readme and simplify batchnorm using type

Merge pull request !18970 from qujianwei/master
This commit is contained in:
i-robot 2021-06-28 07:56:38 +00:00 committed by Gitee
commit 19e2e063c9
2 changed files with 5 additions and 15 deletions

View File

@ -221,9 +221,9 @@ ICDAR2013, SCUT-FORU to improve precision and recall, and when doing Finetune, w
Training result will be stored in the example path. Checkpoints will be stored at `ckpt_path` by default, and training log will be redirected to `./log`, also the loss will be redirected to `./loss_0.log` like followings.
```python
377 epoch: 1 step: 229 ,rpn_loss: 0.00355, rpn_cls_loss: 0.00047, rpn_reg_loss: 0.00103,
399 epoch: 2 step: 229 ,rpn_loss: 0.00327,rpn_cls_loss: 0.00047, rpn_reg_loss: 0.00093,
424 epoch: 3 step: 229 ,rpn_loss: 0.00910, rpn_cls_loss: 0.00385, rpn_reg_loss: 0.00175,
377 epoch: 1 step: 229 ,rpn_loss: 0.00355
399 epoch: 2 step: 229 ,rpn_loss: 0.00327
424 epoch: 3 step: 229 ,rpn_loss: 0.00910
```
- running on ModelArts

View File

@ -23,17 +23,6 @@ def _weight_variable(shape, factor=0.01):
init_value = np.random.randn(*shape).astype(np.float32) * factor
return Tensor(init_value)
def _BatchNorm2dInit(out_chls, momentum=0.1, affine=True, use_batch_statistics=False):
"""Batchnorm2D wrapper."""
gamma_init = Tensor(np.array(np.ones(out_chls)).astype(np.float32))
beta_init = Tensor(np.array(np.ones(out_chls) * 0).astype(np.float32))
moving_mean_init = Tensor(np.array(np.ones(out_chls) * 0).astype(np.float32))
moving_var_init = Tensor(np.array(np.ones(out_chls)).astype(np.float32))
return nn.BatchNorm2d(out_chls, momentum=momentum, affine=affine, gamma_init=gamma_init,
beta_init=beta_init, moving_mean_init=moving_mean_init,
moving_var_init=moving_var_init, use_batch_statistics=use_batch_statistics)
def _conv(in_channels, out_channels, kernel_size=3, stride=1, padding=0, pad_mode='pad', weights_update=True):
"""Conv2D wrapper."""
layers = []
@ -43,7 +32,8 @@ def _conv(in_channels, out_channels, kernel_size=3, stride=1, padding=0, pad_mod
if not weights_update:
conv.weight.requires_grad = False
layers += [conv]
layers += [_BatchNorm2dInit(out_channels)]
layers += [nn.BatchNorm2d(out_channels, momentum=0.1, beta_init="ones", \
moving_mean_init="ones", use_batch_statistics=False)]
return nn.SequentialCell(layers)