!12364 [MSLITE][TOD] Integration with Minddata

From: @yonibaehr_admin
Reviewed-by: @HilbertDavid,@ddwsky
Signed-off-by: @HilbertDavid
This commit is contained in:
mindspore-ci-bot 2021-02-18 09:09:05 +08:00 committed by Gitee
commit 563df484ad
100 changed files with 2958 additions and 747 deletions

View File

@ -113,6 +113,7 @@ include_directories(${CMAKE_CURRENT_SOURCE_DIR})
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/src/runtime/kernel/arm)
include_directories(${TOP_DIR}/third_party)
include_directories(${CMAKE_BINARY_DIR})
include_directories(${CCSRC_DIR}/minddata/dataset/liteapi)
include(${TOP_DIR}/cmake/utils.cmake)
include(${TOP_DIR}/cmake/dependency_utils.cmake)

View File

@ -0,0 +1,7 @@
*.mindir
*.ms
*.bin
*.ckpt
export_result.txt
mindir
train_io

View File

@ -0,0 +1,81 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""NetworkInNetwork."""
import numpy as np
import mindspore.nn as nn
import mindspore.ops as ops
from mindspore import Tensor
# NiN block
class NiN(nn.Cell):
"""class NiN"""
def __init__(self, num_classes=10, num_channel=3):
super().__init__()
self.size = ops.Size()
self.block0 = nn.SequentialCell(
# block 0
nn.Conv2d(in_channels=num_channel, out_channels=192, kernel_size=5, stride=1, has_bias=False),
nn.ReLU(),
nn.Conv2d(in_channels=192, out_channels=160, kernel_size=1, stride=1, has_bias=True),
nn.ReLU(),
nn.Conv2d(in_channels=160, out_channels=96, kernel_size=1, stride=1, has_bias=True),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2, pad_mode='same'),
nn.Dropout(1.0)
)
self.block1 = nn.SequentialCell(
# block 1
nn.Conv2d(in_channels=96, out_channels=192, kernel_size=5, stride=1, has_bias=False),
nn.ReLU(),
nn.Conv2d(in_channels=192, out_channels=192, kernel_size=1, stride=1, has_bias=True),
nn.ReLU(),
nn.Conv2d(in_channels=192, out_channels=192, kernel_size=1, stride=1, has_bias=True),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2, pad_mode='same'),
nn.Dropout(1.0)
)
self.block2 = nn.SequentialCell(
# block 2
nn.Conv2d(in_channels=192, out_channels=192, kernel_size=3, stride=1, has_bias=False),
nn.ReLU(),
nn.Conv2d(in_channels=192, out_channels=192, kernel_size=1, stride=1, has_bias=True),
nn.ReLU(),
nn.Conv2d(in_channels=192, out_channels=num_classes, kernel_size=1, stride=1, has_bias=True),
nn.ReLU(),
nn.AvgPool2d(kernel_size=8, stride=1, pad_mode='valid')
)
# flatten
self.flatten = nn.Flatten()
self._initialize_weights()
def _initialize_weights(self):
self.init_parameters_data()
for _, m in self.cells_and_names():
if isinstance(m, (nn.Conv2d)):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.set_data(Tensor(np.random.normal(0, np.sqrt(2. / n),
m.weight.data.shape).astype("float32")))
if m.bias is not None:
m.bias.set_data(
Tensor(np.zeros(m.bias.data.shape, dtype="float32")))
def construct(self, x):
out = self.block0(x)
out = self.block1(out)
out = self.block2(out)
out = self.flatten(out)
return out

View File

@ -0,0 +1,39 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""densenet_train_export."""
import sys
import numpy as np
from train_utils import SaveInOut, TrainWrap
from official.cv.densenet121.src.network.densenet import DenseNet121
import mindspore.common.dtype as mstype
from mindspore import context, Tensor, nn
from mindspore.train.serialization import export
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU", save_graphs=False)
n = DenseNet121(num_classes=10)
loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=False)
optimizer = nn.SGD(n.trainable_params(), learning_rate=0.001, momentum=0.9, dampening=0.0, weight_decay=0.0,
nesterov=True, loss_scale=0.9)
net = TrainWrap(n, loss_fn, optimizer)
batch = 2
x = Tensor(np.random.randn(batch, 3, 224, 224), mstype.float32)
label = Tensor(np.zeros([batch, 10]).astype(np.float32))
export(net, x, label, file_name="mindir/densenet_train", file_format='MINDIR')
if len(sys.argv) > 1:
SaveInOut(sys.argv[1] + "densenet", x, label, n, net)

View File

@ -0,0 +1,315 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""effnet."""
import numpy as np
import mindspore.nn as nn
from mindspore.ops import operations as P
from mindspore.common.initializer import TruncatedNormal
from mindspore import Tensor
def weight_variable():
"""weight initial"""
return TruncatedNormal(0.02)
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
iparam min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class Swish(nn.Cell):
def __init__(self):
super().__init__()
self.sigmoid = nn.Sigmoid()
def construct(self, x):
s = self.sigmoid(x)
m = x*s
return m
#return x * (1/(1+self.exp(-x)))
class AdaptiveAvgPool(nn.Cell):
def __init__(self, output_size=None):
super().__init__()
self.mean = P.ReduceMean(keep_dims=True)
self.output_size = output_size
def construct(self, x):
return self.mean(x, (2, 3)) ## This is not a general case
class SELayer(nn.Cell):
"""SELayer"""
def __init__(self, channel, reduction=4):
super().__init__()
reduced_chs = _make_divisible(channel/reduction, 1)
self.avg_pool = AdaptiveAvgPool(output_size=(1, 1))
weight = weight_variable()
self.conv_reduce = nn.Conv2d(in_channels=channel, out_channels=reduced_chs, kernel_size=1, has_bias=True,
weight_init=weight)
self.act1 = Swish()
self.conv_expand = nn.Conv2d(in_channels=reduced_chs, out_channels=channel, kernel_size=1, has_bias=True)
self.act2 = nn.Sigmoid()
def construct(self, x):
#b, c, _, _ = x.shape()
o = self.avg_pool(x) #.view(b,c)
o = self.conv_reduce(o)
o = self.act1(o)
o = self.conv_expand(o)
o = self.act2(o) #.view(b, c, 1,1)
return x * o
class DepthwiseSeparableConv(nn.Cell):
"""DepthwiseSeparableConv"""
def __init__(self, in_chs, out_chs, dw_kernel_size=3, stride=1, noskip=False, se_ratio=0.0, drop_connect_rate=0.0):
super().__init__()
assert stride in [1, 2]
self.has_residual = (stride == 1 and in_chs == out_chs) and not noskip
self.drop_connect_rate = drop_connect_rate
self.conv_dw = nn.Conv2d(in_channels=in_chs, out_channels=in_chs, kernel_size=dw_kernel_size, stride=stride,
pad_mode="pad", padding=1, has_bias=False, group=in_chs)
self.bn1 = nn.BatchNorm2d(in_chs, eps=0.001) #,momentum=0.1)
self.act1 = Swish()
# Squeeze-and-excitation
if se_ratio is not None and se_ratio > 0.:
self.se = SELayer(in_chs, reduction=se_ratio)
else:
print("ERRRRRORRRR -- not prepared for this one\n")
self.conv_pw = nn.Conv2d(in_channels=in_chs, out_channels=out_chs, kernel_size=1, stride=stride, has_bias=False)
self.bn2 = nn.BatchNorm2d(out_chs, eps=0.001) #,momentum=0.1)
def construct(self, x):
"""construct"""
residual = x
x = self.conv_dw(x)
x = self.bn1(x)
x = self.act1(x)
x = self.se(x)
x = self.conv_pw(x)
x = self.bn2(x)
if self.has_residual:
# if self.drop_connect_rate > 0.:
# x = x
# x = drop_connect(x, self.training, self.drop_connect_rate)
x += residual
return x
def conv_3x3_bn(inp, oup, stride):
weight = weight_variable()
return nn.SequentialCell([
nn.Conv2d(in_channels=inp, out_channels=oup, kernel_size=3, stride=stride, padding=1, weight_init=weight,
has_bias=False, pad_mode='pad'),
nn.BatchNorm2d(oup, eps=0.001), #, momentum=0.1),
nn.HSwish()])
def conv_1x1_bn(inp, oup):
weight = weight_variable()
return nn.SequentialCell([
nn.Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, stride=1, padding=0, weight_init=weight,
has_bias=False),
nn.BatchNorm2d(oup, eps=0.001),
nn.HSwish()])
class InvertedResidual(nn.Cell):
"""InvertedResidual"""
def __init__(self, in_chs, out_chs, kernel_size, stride, padding, expansion, se_ratio):
super().__init__()
assert stride in [1, 2]
mid_chs: int = _make_divisible(in_chs * expansion, 1)
self.has_residual = (in_chs == out_chs and stride == 1)
self.drop_connect_rate = 0
# Point-wise expansion
self.conv_pw = nn.Conv2d(in_channels=in_chs, out_channels=mid_chs, kernel_size=1, stride=1, has_bias=False)
self.bn1 = nn.BatchNorm2d(mid_chs, eps=0.001)
self.act1 = Swish()
# Depth-wise convolution
if stride > 1:
self.conv_dw = nn.Conv2d(in_channels=mid_chs, out_channels=mid_chs, kernel_size=kernel_size, stride=stride,
padding=padding, has_bias=False, group=mid_chs, pad_mode='same')
else:
self.conv_dw = nn.Conv2d(in_channels=mid_chs, out_channels=mid_chs, kernel_size=kernel_size, stride=stride,
padding=padding, has_bias=False, group=mid_chs, pad_mode='pad')
self.bn2 = nn.BatchNorm2d(mid_chs, eps=0.001)
self.act2 = Swish()
# Squeeze-and-excitation
if se_ratio is not None and se_ratio > 0.:
self.se = SELayer(mid_chs, reduction=se_ratio)
else:
print("ERRRRRORRRR -- not prepared for this one\n")
# Point-wise linear projection
self.conv_pwl = nn.Conv2d(in_channels=mid_chs, out_channels=out_chs, kernel_size=1, stride=1, has_bias=False)
self.bn3 = nn.BatchNorm2d(out_chs, eps=0.001)
def construct(self, x):
"""construct"""
residual = x
# Point-wise expansion
x = self.conv_pw(x)
x = self.bn1(x)
x = self.act1(x)
# Depth-wise convolution
x = self.conv_dw(x)
x = self.bn2(x)
x = self.act2(x)
# Squeeze-and-excitation
x = self.se(x)
# Point-wise linear projection
x = self.conv_pwl(x)
x = self.bn3(x)
if self.has_residual:
# if self.drop_connect_rate > 0.:
# x = x
x += residual
return x
class EfficientNet(nn.Cell):
"""EfficientNet"""
def __init__(self, cfgs, num_classes=1000):
super().__init__()
# setting of inverted residual blocks
self.cfgs = cfgs
stem_size = 32
self.num_classes_ = num_classes
self.num_features_ = 1280
self.conv_stem = nn.Conv2d(in_channels=3, out_channels=stem_size, kernel_size=3, stride=2, has_bias=False)
self.bn1 = nn.BatchNorm2d(stem_size, eps=0.001) #momentum=0.1)
self.act1 = Swish()
in_chs = stem_size
layers = [nn.SequentialCell([DepthwiseSeparableConv(in_chs, 16, 3, 1, se_ratio=4)]),
nn.SequentialCell([InvertedResidual(16, 24, 3, 2, 0, 6, se_ratio=24),
InvertedResidual(24, 24, 3, 1, 1, 6, se_ratio=24)]),
nn.SequentialCell([InvertedResidual(24, 40, 5, 2, 0, 6, se_ratio=24),
InvertedResidual(40, 40, 5, 1, 2, 6, se_ratio=24)]),
nn.SequentialCell([InvertedResidual(40, 80, 3, 2, 0, 6, se_ratio=24),
InvertedResidual(80, 80, 3, 1, 1, 6, se_ratio=24),
InvertedResidual(80, 80, 3, 1, 1, 6, se_ratio=24)]),
nn.SequentialCell([InvertedResidual(80, 112, 5, 1, 2, 6, se_ratio=24),
InvertedResidual(112, 112, 5, 1, 2, 6, se_ratio=24),
InvertedResidual(112, 112, 5, 1, 2, 6, se_ratio=24)]),
nn.SequentialCell([InvertedResidual(112, 192, 5, 2, 0, 6, se_ratio=24),
InvertedResidual(192, 192, 5, 1, 2, 6, se_ratio=24),
InvertedResidual(192, 192, 5, 1, 2, 6, se_ratio=24),
InvertedResidual(192, 192, 5, 1, 2, 6, se_ratio=24)]),
nn.SequentialCell([InvertedResidual(192, 320, 3, 1, 1, 6, se_ratio=24)])
]
self.blocks = nn.SequentialCell(layers)
self.conv_head = nn.Conv2d(in_channels=320, out_channels=self.num_features_, kernel_size=1)
self.bn2 = nn.BatchNorm2d(self.num_features_, eps=0.001) #,momentum=0.1)
self.act2 = Swish()
self.global_pool = AdaptiveAvgPool(output_size=(1, 1))
self.classifier = nn.Dense(self.num_features_, num_classes)
self._initialize_weights()
def construct(self, x):
"""construct"""
x = self.conv_stem(x)
x = self.bn1(x)
x = self.act1(x)
x = self.blocks(x)
x = self.conv_head(x)
x = self.bn2(x)
x = self.act2(x)
x = self.global_pool(x)
x = P.Reshape()(x, (-1, self.num_features_))
x = self.classifier(x)
return x
def _initialize_weights(self):
"""_initialize_weights"""
def init_linear_weight(m):
m.weight.set_data(Tensor(np.random.normal(0, 0.01, m.weight.data.shape).astype("float32")))
if m.bias is not None:
m.bias.set_data(Tensor(np.zeros(m.bias.data.shape, dtype="float32")))
for m in self.cells():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.set_data(Tensor(np.random.normal(0, np.sqrt(2. / n), m.weight.data.shape).astype("float32")))
if m.bias is not None:
m.bias.data.zero_()
m.weight.requires_grad = True
elif isinstance(m, nn.BatchNorm2d):
m.gamma.set_data(Tensor(np.ones(m.gamma.data.shape, dtype="float32")))
m.beta.set_data(Tensor(np.zeros(m.beta.data.shape, dtype="float32")))
elif isinstance(m, nn.Dense):
init_linear_weight(m)
def effnet(**kwargs):
"""
Constructs a EfficientNet model
"""
cfgs = [
# k, t, c, SE, HS, s
[3, 1, 16, 1, 0, 2],
[3, 4.5, 24, 0, 0, 2],
[3, 3.67, 24, 0, 0, 1],
[5, 4, 40, 1, 1, 2],
[5, 6, 40, 1, 1, 1],
[5, 6, 40, 1, 1, 1],
[5, 3, 48, 1, 1, 1],
[5, 3, 48, 1, 1, 1],
[5, 6, 96, 1, 1, 2],
[5, 6, 96, 1, 1, 1],
[5, 6, 96, 1, 1, 1],
]
return EfficientNet(cfgs, **kwargs)

View File

@ -0,0 +1,38 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""effnet_train_export."""
import sys
import numpy as np
from train_utils import SaveInOut, TrainWrap
from effnet import effnet
import mindspore.common.dtype as mstype
from mindspore import context, Tensor, nn
from mindspore.train.serialization import export
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU", save_graphs=False)
n = effnet(num_classes=10)
loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=False)
optimizer = nn.SGD(n.trainable_params(), learning_rate=0.01, momentum=0.9, dampening=0.0, weight_decay=0.0,
nesterov=True, loss_scale=1.0)
net = TrainWrap(n, loss_fn, optimizer)
x = Tensor(np.random.randn(2, 3, 224, 224), mstype.float32)
label = Tensor(np.zeros([2, 10]).astype(np.float32))
export(net, x, label, file_name="mindir/effnet_train", file_format='MINDIR')
if len(sys.argv) > 1:
SaveInOut(sys.argv[1] + "effnet", x, label, n, net)

View File

@ -0,0 +1,83 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""effnet_tune_train_export."""
import sys
from os import path
import numpy as np
from train_utils import TrainWrap, SaveT
from effnet import effnet
import mindspore.common.dtype as mstype
from mindspore import context, Tensor, nn
from mindspore.train.serialization import export, load_checkpoint
from mindspore.common.parameter import ParameterTuple
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU", save_graphs=False)
class TransferNet(nn.Cell):
def __init__(self, backbone, head):
super().__init__(TransferNet)
self.backbone = backbone
self.head = head
def construct(self, x):
x = self.backbone(x)
x = self.head(x)
return x
CHECKPOINT_WEIGHT_FILE = "efficient_net_b0.ckpt"
if not path.exists(CHECKPOINT_WEIGHT_FILE):
import subprocess
print("weight file is missing, downloading from hub")
url = "https://download.mindspore.cn/model_zoo/official/lite/efficient_net/" + CHECKPOINT_WEIGHT_FILE
subprocess.run(["wget", url], check=True)
BACKBONE = effnet(num_classes=1000)
load_checkpoint(CHECKPOINT_WEIGHT_FILE, BACKBONE)
HEAD = nn.Dense(1000, 10)
HEAD.weight.set_data(Tensor(np.random.normal(
0, 0.1, HEAD.weight.data.shape).astype("float32")))
HEAD.bias.set_data(Tensor(np.zeros(HEAD.bias.data.shape, dtype="float32")))
n = TransferNet(BACKBONE, HEAD)
trainable_weights_list = []
trainable_weights_list.extend(n.head.trainable_params())
trainable_weights = ParameterTuple(trainable_weights_list)
sgd = nn.SGD(trainable_weights, learning_rate=0.01, momentum=0.9,
dampening=0.01, weight_decay=0.0, nesterov=False, loss_scale=1.0)
net = TrainWrap(n, optimizer=sgd, weights=trainable_weights)
BATCH_SIZE = 8
X = Tensor(np.random.randn(BATCH_SIZE, 3, 224, 224), mstype.float32)
label = Tensor(np.zeros([BATCH_SIZE, 10]).astype(np.float32))
export(net, X, label, file_name="mindir/effnet_tune_train", file_format='MINDIR')
if len(sys.argv) > 1:
name_prefix = sys.argv[1] + "effnet_tune"
x_name = name_prefix + "_input1.bin"
SaveT(Tensor(X.asnumpy().transpose(0, 2, 3, 1)), x_name)
l_name = name_prefix + "_input2.bin"
SaveT(label, l_name)
#train network
n.head.set_train(True)
n.backbone.set_train(False)
net(X, label)
#save Y after training
n.set_train(False)
y = n(X)
y_name = name_prefix + "_output1.bin"
SaveT(y, y_name)

View File

@ -0,0 +1,39 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""googlenet_train_export."""
import sys
import numpy as np
from train_utils import SaveInOut, TrainWrap
from official.cv.googlenet.src.googlenet import GoogleNet
import mindspore.common.dtype as mstype
from mindspore import context, Tensor, nn
from mindspore.train.serialization import export
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU", save_graphs=False)
n = GoogleNet(num_classes=10)
loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=False)
optimizer = nn.SGD(n.trainable_params(), learning_rate=0.01, momentum=0.9, dampening=0.0, weight_decay=5e-4,
nesterov=True, loss_scale=0.9)
net = TrainWrap(n, loss_fn, optimizer)
batch = 2
x = Tensor(np.random.randn(batch, 3, 224, 224), mstype.float32)
label = Tensor(np.zeros([batch, 10]).astype(np.float32))
export(net, x, label, file_name="mindir/googlenet_train", file_format='MINDIR')
if len(sys.argv) > 1:
SaveInOut(sys.argv[1] + "googlenet", x, label, n, net)

View File

@ -0,0 +1,39 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""lenet_train_export."""
import sys
import numpy as np
from train_utils import SaveInOut, TrainWrap
from official.cv.lenet.src.lenet import LeNet5
import mindspore.common.dtype as mstype
from mindspore import context, Tensor, nn
from mindspore.train.serialization import export
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU", save_graphs=False)
n = LeNet5()
loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=False)
loss_fn = nn.MSELoss()
optimizer = nn.Adam(n.trainable_params(), learning_rate=1e-2, beta1=0.5, beta2=0.7, eps=1e-2, use_locking=True,
use_nesterov=False, weight_decay=0.0, loss_scale=0.3)
net = TrainWrap(n, loss_fn, optimizer)
x = Tensor(np.random.randn(32, 1, 32, 32), mstype.float32)
label = Tensor(np.zeros([32, 10]).astype(np.float32))
export(net, x, label, file_name="mindir/lenet_train", file_format='MINDIR')
if len(sys.argv) > 1:
SaveInOut(sys.argv[1] + "lenet", x, label, n, net, sparse=False)

View File

@ -0,0 +1,66 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""mini_alexnet."""
import mindspore.nn as nn
from mindspore.ops import operations as P
def conv(in_channels, out_channels, kernel_size, stride=1, padding=0, pad_mode="valid", has_bias=True):
return nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding,
has_bias=has_bias, pad_mode=pad_mode)
def fc_with_initialize(input_channels, out_channels, has_bias=True):
return nn.Dense(input_channels, out_channels, has_bias=has_bias)
class AlexNet(nn.Cell):
"""
Alexnet
"""
def __init__(self, num_classes=10, channel=1, phase='train', include_top=True):
super().__init__()
self.conv1 = conv(channel, 12, 11, stride=2, pad_mode="same", has_bias=True)
self.conv2 = conv(12, 20, 3, pad_mode="same", has_bias=True)
self.relu = P.ReLU()
self.max_pool2d = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode='valid')
self.include_top = include_top
if self.include_top:
dropout_ratio = 0.65
if phase == 'test':
dropout_ratio = 1.0
self.flatten = nn.Flatten()
self.fc1 = fc_with_initialize(20*3*3, 1024)
self.fc2 = fc_with_initialize(1024, 1024)
self.fc3 = fc_with_initialize(1024, num_classes)
self.dropout = nn.Dropout(dropout_ratio)
def construct(self, x):
"""define network"""
x = self.conv1(x)
x = self.relu(x)
x = self.max_pool2d(x)
x = self.conv2(x)
x = self.relu(x)
x = self.max_pool2d(x)
if not self.include_top:
return x
x = self.flatten(x)
x = self.fc1(x)
x = self.relu(x)
x = self.dropout(x)
x = self.fc2(x)
x = self.relu(x)
x = self.dropout(x)
x = self.fc3(x)
return x

View File

@ -0,0 +1,41 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""mini_alexnet_train_export."""
import sys
import numpy as np
from train_utils import SaveInOut, TrainWrap
from mini_alexnet import AlexNet
from mindspore import context, Tensor, nn
from mindspore.train.serialization import export
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU", save_graphs=False)
# Mini alexnet is designed for MNIST data
batch = 2
number_of_classes = 10
n = AlexNet(phase='test')
loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=False)
optimizer = nn.Adam(n.trainable_params(), learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-8, use_locking=False,
use_nesterov=False, weight_decay=0.0, loss_scale=1.0)
net = TrainWrap(n, loss_fn, optimizer)
x = Tensor(np.ones([batch, 1, 32, 32]).astype(np.float32) * 0.01)
label = Tensor(np.zeros([batch, number_of_classes]).astype(np.float32))
export(net, x, label, file_name="mindir/mini_alexnet_train", file_format='MINDIR')
if len(sys.argv) > 1:
SaveInOut(sys.argv[1] + "mini_alexnet", x, label, n, net, sparse=False)

View File

@ -0,0 +1,40 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""mobilenetv1_train_export."""
import sys
import numpy as np
from train_utils import SaveInOut, TrainWrap
from official.cv.mobilenetv1.src.mobilenet_v1 import MobileNetV1
import mindspore.common.dtype as mstype
from mindspore import context, Tensor, nn
from mindspore.train.serialization import export
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU", save_graphs=False)
n = MobileNetV1(10)
loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=False)
optimizer = nn.SGD(n.trainable_params(), learning_rate=1e-2, momentum=0.9, dampening=0.1, weight_decay=0.0,
nesterov=False, loss_scale=1.0)
net = TrainWrap(n, loss_fn, optimizer)
batch = 2
x = Tensor(np.random.randn(batch, 3, 224, 224), mstype.float32)
label = Tensor(np.zeros([batch, 10]).astype(np.float32))
export(net, x, label, file_name="mindir/mobilenetv1_train", file_format='MINDIR')
if len(sys.argv) > 1:
SaveInOut(sys.argv[1] + "mobilenetv1", x, label, n, net)

View File

@ -0,0 +1,42 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""mobilenetv2_train_export."""
import sys
import numpy as np
from train_utils import SaveInOut, TrainWrap
from official.cv.mobilenetv2.src.mobilenetV2 import MobileNetV2Backbone, MobileNetV2Head, mobilenet_v2
import mindspore.common.dtype as mstype
from mindspore import context, Tensor, nn
from mindspore.train.serialization import export
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU", save_graphs=False)
batch = 16
#n = MobileNetV2()
backbone_net = MobileNetV2Backbone()
head_net = MobileNetV2Head(input_channel=backbone_net.out_channels, num_classes=10)
n = mobilenet_v2(backbone_net, head_net)
loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=False)
optimizer = nn.Momentum(n.trainable_params(), 0.01, 0.9, use_nesterov=False)
net = TrainWrap(n, loss_fn, optimizer)
x = Tensor(np.random.randn(batch, 3, 224, 224), mstype.float32)
label = Tensor(np.zeros([batch, 10]).astype(np.float32))
export(net, x, label, file_name="mindir/mobilenetv2_train", file_format='MINDIR')
if len(sys.argv) > 1:
SaveInOut(sys.argv[1] + "mobilenetv2", x, label, n, net, sparse=False)

View File

@ -0,0 +1,39 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""mobilenetv3_train_export."""
import sys
import numpy as np
from train_utils import SaveInOut, TrainWrap
from official.cv.mobilenetv3.src.mobilenetV3 import mobilenet_v3_small
import mindspore.common.dtype as mstype
from mindspore import context, Tensor, nn
from mindspore.train.serialization import export
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU", save_graphs=False)
n = mobilenet_v3_small(num_classes=10)
loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=False, reduction='mean')
optimizer = nn.Adam(n.trainable_params(), learning_rate=1e-2, beta1=0.5, beta2=0.7, eps=1e-2, use_locking=True,
use_nesterov=False, weight_decay=0.1, loss_scale=0.3)
net = TrainWrap(n, loss_fn, optimizer)
batch = 2
x = Tensor(np.random.randn(batch, 3, 224, 224), mstype.float32)
label = Tensor(np.zeros([batch, 10]).astype(np.float32))
export(net, x, label, file_name="mindir/mobilenetv3_train", file_format='MINDIR')
if len(sys.argv) > 1:
SaveInOut(sys.argv[1] + "mobilenetv3", x, label, n, net, sparse=False)

View File

@ -0,0 +1,39 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""nin_train_export."""
import sys
import numpy as np
from train_utils import SaveInOut, TrainWrap
from NetworkInNetwork import NiN
import mindspore.common.dtype as mstype
from mindspore import context, Tensor, nn
from mindspore.train.serialization import export
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU", save_graphs=False)
n = NiN(num_classes=10)
loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=False)
optimizer = nn.SGD(n.trainable_params(), learning_rate=0.01, momentum=0.9, dampening=0.0, weight_decay=5e-4,
nesterov=True, loss_scale=0.9)
net = TrainWrap(n, loss_fn, optimizer)
batch = 2
x = Tensor(np.random.randn(batch, 3, 32, 32), mstype.float32)
label = Tensor(np.zeros([batch, 10]).astype(np.float32))
export(net, x, label, file_name="mindir/nin_train", file_format='MINDIR')
if len(sys.argv) > 1:
SaveInOut(sys.argv[1] + "nin", x, label, n, net)

View File

@ -0,0 +1,39 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""mobilenetv2_train_export."""
import sys
import numpy as np
from train_utils import SaveInOut, TrainWrap
from official.cv.resnet.src.resnet import resnet50
import mindspore.common.dtype as mstype
from mindspore import context, Tensor, nn
from mindspore.train.serialization import export
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU", save_graphs=False)
batch = 4
n = resnet50(class_num=10)
loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=False)
optimizer = nn.SGD(n.trainable_params(), learning_rate=0.01, momentum=0.9, dampening=0.0, weight_decay=0.0,
nesterov=True, loss_scale=1.0)
net = TrainWrap(n, loss_fn, optimizer)
x = Tensor(np.random.randn(batch, 3, 224, 224), mstype.float32)
label = Tensor(np.zeros([batch, 10]).astype(np.float32))
export(net, x, label, file_name="mindir/resnet_train", file_format='MINDIR')
if len(sys.argv) > 1:
SaveInOut(sys.argv[1] + "resnet", x, label, n, net)

View File

@ -0,0 +1,39 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""shufflenetv2_train_export."""
import sys
import numpy as np
from train_utils import SaveInOut, TrainWrap
from official.cv.shufflenetv2.src.shufflenetv2 import ShuffleNetV2
import mindspore.common.dtype as mstype
from mindspore import context, Tensor, nn
from mindspore.train.serialization import export
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU", save_graphs=False)
n = ShuffleNetV2(n_class=10)
loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=False)
optimizer = nn.Momentum(n.trainable_params(), 0.01, 0.9, use_nesterov=False)
net = TrainWrap(n, loss_fn, optimizer)
batch = 2
x = Tensor(np.random.randn(batch, 3, 224, 224), mstype.float32)
label = Tensor(np.zeros([batch, 10]).astype(np.float32))
export(net, x, label, file_name="mindir/shufflenetv2_train", file_format='MINDIR')
if len(sys.argv) > 1:
SaveInOut(sys.argv[1] + "shufflenetv2", x, label, n, net)

View File

@ -0,0 +1,71 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""train_utils."""
from mindspore import nn, Tensor
from mindspore.common.parameter import ParameterTuple
def TrainWrap(net, loss_fn=None, optimizer=None, weights=None):
"""TrainWrap"""
if loss_fn is None:
loss_fn = nn.SoftmaxCrossEntropyWithLogits()
loss_net = nn.WithLossCell(net, loss_fn)
loss_net.set_train()
if weights is None:
weights = ParameterTuple(net.trainable_params())
if optimizer is None:
optimizer = nn.Adam(weights, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-8, use_locking=False,
use_nesterov=False, weight_decay=0.0, loss_scale=1.0)
train_net = nn.TrainOneStepCell(loss_net, optimizer)
return train_net
def SaveT(t, file):
x = t.asnumpy()
x.tofile(file)
def SaveInOut(name, x, l, net, net_train, sparse=False, epoch=1):
"""SaveInOut"""
x_name = name + "_input1.bin"
if sparse:
x_name = name + "_input2.bin"
SaveT(Tensor(x.asnumpy().transpose(0, 2, 3, 1)), x_name)
l_name = name + "_input2.bin"
if sparse:
l_name = name + "_input1.bin"
SaveT(l, l_name)
net.set_train(False)
y = net(x)
#train network
net.set_train(True)
for i in range(epoch):
net_train(x, l)
net.set_train(False)
y = net(x)
if isinstance(y, tuple):
i = 1
for t in y:
with open(name + "_output" + str(i) + ".bin", 'w') as f:
for j in t.asnumpy().flatten():
f.write(str(j)+' ')
i = i + 1
else:
y_name = name + "_output1.bin"
SaveT(y, y_name)

View File

@ -0,0 +1,12 @@
mini_alexnet
mobilenetv1
mobilenetv2
mobilenetv3
lenet
effnet
effnet_tune
resnet
googlenet
nin
#shufflenetv2
#densenet

View File

@ -0,0 +1,97 @@
#!/bin/bash
display_usage()
{
echo "Usage: prepare.sh [-d mindspore_docker] [-r release.tar.gz] [-i]"
echo "Options:"
echo " -d docker where mindspore is installed. If no docker is provided script will use local python"
echo " -r release tarball"
echo " -i create input and output files"
}
checkopts()
{
DOCKER=""
TRAIN_IO=""
while getopts 'd:r:i' opt
do
case "${opt}" in
d)
DOCKER=$OPTARG
;;
r)
TARBALL=$OPTARG
;;
i)
TRAIN_IO="train_io/"
;;
*)
echo "Unknown option ${opt}!"
display_usage
exit 1
esac
done
}
function MS_PRINT_TESTCASE_END_MSG() {
echo -e "-----------------------------------------------------------------------------------------------------------------------------------"
}
function Print_Result() {
MS_PRINT_TESTCASE_END_MSG
while read line; do
arr=("${line}")
if [ ! -z "${arr[0]}" ]; then
printf "%-8s %-10s %-40s %-7s\n" ${arr[0]} ${arr[1]} ${arr[2]} ${arr[3]}
fi
done < $1
MS_PRINT_TESTCASE_END_MSG
}
export_result_file=export_result.txt
echo ' ' > ${export_result_file}
CLOUD_MODEL_ZOO=../../../../model_zoo/
checkopts "$@"
if [ "$TARBALL" == "" ]; then
file=$(ls ../../../../output/mindspore-lite-*-train-linux-x64.tar.gz)
if [ -f ${file} ]; then
TARBALL=${file}
else
echo "release.tar.gz was not found"
display_usage
exit 1
fi
fi
if [ -z "${DOCKER}" ]; then
echo "MindSpore docker was not provided, attempting to run locally"
fi
mkdir -p mindir
if [ ! -z "${TRAIN_IO}" ]; then
mkdir -p ${TRAIN_IO}
fi
while read line; do
model_name=${line}
if [[ $model_name == \#* ]]; then
continue
fi
echo 'exporting' ${model_name}
if [ ! -z "${DOCKER}" ]; then
docker run -w $PWD --runtime=nvidia -v /home/$USER:/home/$USER --privileged=true ${DOCKER} /bin/bash -c "PYTHONPATH=${CLOUD_MODEL_ZOO} python models/${model_name}_train_export.py ${TRAIN_IO} && chmod 444 mindir/${model_name}_train.mindir"
else
PYTHONPATH=${CLOUD_MODEL_ZOO} python models/${model_name}_train_export.py ${TRAIN_IO}
fi
if [ $? = 0 ]; then
export_result='export mindspore '${model_name}'_train_export pass';echo ${export_result} >> ${export_result_file}
else
export_result='export mindspore '${model_name}'_train_export failed';echo ${export_result} >> ${export_result_file}
fi
done < models_train.cfg
Print_Result ${export_result_file}
rm ${export_result_file}

View File

@ -0,0 +1,4 @@
*.mindir
*.ms
msl
package-*

View File

@ -1,14 +1,17 @@
BASE_DIR=$(realpath ../../../../)
APP:=bin/net_runner
MSLIB:=mindspore-lite
LMDLIB:=-lminddata-lite -ljpeg
LHIAILIB:=-lhiai_ir_build -lhiai_ir -lhiai
MSDIR:=$(realpath package-$(TARGET)/lib)
SRC:=src/net_runner.cc src/dataset.cc src/data_callbacks.cc
SRC:=src/net_runner.cc
OBJ:=$(SRC:.cc=.o)
CFLAGS := -Ofast -std=c++17 \
-I . \
-I ./msl \
-I ./msl/minddata \
-I ./msl/third_party/flatbuffers/include
@ -16,10 +19,10 @@ ifeq ($(TARGET),arm64)
CXX := ${ANDROID_NDK}/toolchains/llvm/prebuilt/linux-x86_64/bin/clang++
CFLAGS += --target=aarch64-none-linux-android21 --gcc-toolchain=${ANDROID_NDK}/toolchains/llvm/prebuilt/linux-x86_64 --sysroot=${ANDROID_NDK}/toolchains/llvm/prebuilt/linux-x86_64/sysroot -fdata-sections -ffunction-sections
LDFLAGS := --target=aarch64-none-linux-android21 --gcc-toolchain=${ANDROID_NDK}/toolchains/llvm/prebuilt/linux-x86_64 --sysroot=${ANDROID_NDK}/toolchains/llvm/prebuilt/linux-x86_64/sysroot -Wl,--gc-sections
LDFLAGS += -L$(MSDIR) -l$(MSLIB) -pthread -llog -latomic -lm
LDFLAGS += -L$(MSDIR) -l$(MSLIB) $(LMDLIB) -pthread -llog -latomic -lm $(LHIAILIB)
else
CFLAGS += -g
LDFLAGS := -L$(MSDIR) -l$(MSLIB) -lpthread -Wl,-rpath,$(MSDIR)
LDFLAGS := -L$(MSDIR) -l$(MSLIB) $(LMDLIB) -lpthread -Wl,-rpath,$(MSDIR)
endif
LD := ${CXX}

View File

@ -21,14 +21,13 @@ from mindspore.train.serialization import export
from lenet import LeNet5
from train_utils import TrainWrap
n = LeNet5()
n.set_train()
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU", save_graphs=False)
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU", save_graphs=False)
BATCH_SIZE = 32
x = Tensor(np.ones((BATCH_SIZE, 1, 32, 32)), mstype.float32)
label = Tensor(np.zeros([BATCH_SIZE, 10]).astype(np.float32))
label = Tensor(np.zeros([BATCH_SIZE]).astype(np.int32))
net = TrainWrap(n)
export(net, x, label, file_name="lenet_tod", file_format='MINDIR')

View File

@ -22,7 +22,7 @@ def TrainWrap(net, loss_fn=None, optimizer=None, weights=None):
TrainWrap
"""
if loss_fn is None:
loss_fn = nn.SoftmaxCrossEntropyWithLogits(reduction='mean')
loss_fn = nn.SoftmaxCrossEntropyWithLogits(reduction='mean', sparse=True)
loss_net = nn.WithLossCell(net, loss_fn)
loss_net.set_train()
if weights is None:

View File

@ -79,9 +79,15 @@ cp model/*.ms ${PACKAGE}/model || exit 1
cp scripts/*.sh ${PACKAGE}/
# Copy the shared MindSpore ToD library
tar -xzf ${TARBALL} --wildcards --no-anchored libmindspore-lite.so
tar -xzf ${TARBALL} --wildcards --no-anchored include
tar -xzf ${TARBALL}
mv mindspore-*/lib ${PACKAGE}/
mv mindspore-*/minddata/lib/* ${PACKAGE}/lib/
mv mindspore-*/minddata/third_party/libjpeg-turbo/lib/* ${PACKAGE}/lib/
if [ "${TARGET}" == "arm64" ]; then
tar -xzf ${TARBALL} --wildcards --no-anchored hiai_ddk
mv mindspore-*/third_party/hiai_ddk/lib/* ${PACKAGE}/lib/
fi
rm -rf msl
mkdir msl
mv mindspore-*/* msl/

View File

@ -15,4 +15,4 @@
# ============================================================================
# an simple tutorial as follows, more parameters can be setting
LD_LIBRARY_PATH=./lib/ bin/net_runner -f model/lenet_tod_trained_3000.ms -e 0 -d dataset
LD_LIBRARY_PATH=./lib/ bin/net_runner -f model/lenet_tod_trained.ms -e 0 -d dataset

View File

@ -15,4 +15,4 @@
# ============================================================================
# an simple tutorial as follows, more parameters can be setting
LD_LIBRARY_PATH=./lib/ bin/net_runner -f model/lenet_tod.ms -e 3000 -d dataset
LD_LIBRARY_PATH=./lib/ bin/net_runner -f model/lenet_tod.ms -e 3 -d dataset

View File

@ -1,103 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <math.h>
#include <getopt.h>
#include <cstring>
#include <iostream>
#include <fstream>
#include <utility>
#include "src/net_runner.h"
#include "include/context.h"
#include "src/utils.h"
#include "src/data_loader.h"
#include "src/accuracy_monitor.h"
static unsigned int seed = time(NULL);
std::vector<int> FillInputDataUtil(const mindspore::session::TrainLoopCallBackData &cb_data,
const std::vector<DataLabelTuple> &dataset, bool serially) {
static unsigned int idx = 1;
int total_size = dataset.size();
std::vector<int> labels_vec;
auto inputs = cb_data.session_->GetInputs();
char *input_data = reinterpret_cast<char *>(inputs.at(0)->MutableData());
auto labels = reinterpret_cast<float *>(inputs.at(1)->MutableData());
int batch_size = inputs.at(0)->shape()[0];
int num_of_classes = inputs.at(1)->shape()[1];
int data_size = inputs.at(0)->Size() / batch_size;
MS_ASSERT(total_size > 0);
MS_ASSERT(input_data != nullptr);
std::fill(labels, labels + inputs.at(1)->ElementsNum(), 0.f);
for (int i = 0; i < batch_size; i++) {
if (serially) {
idx = ++idx % total_size;
} else {
idx = rand_r(&seed) % total_size;
}
int label = 0;
char *data = nullptr;
std::tie(data, label) = dataset[idx];
std::copy(data, data + data_size, input_data + i * data_size);
labels[i * num_of_classes + label] = 1.0; // Model expects labels in onehot representation
labels_vec.push_back(label);
}
return labels_vec;
}
void DataLoader::StepBegin(const mindspore::session::TrainLoopCallBackData &cb_data) {
FillInputDataUtil(cb_data, ds_->train_data(), false);
}
int AccuracyMonitor::EpochEnd(const mindspore::session::TrainLoopCallBackData &cb_data) {
if ((cb_data.epoch_ + 1) % check_every_n_ != 0) return mindspore::session::RET_CONTINUE;
float accuracy = 0.0;
auto inputs = cb_data.session_->GetInputs();
int batch_size = inputs.at(0)->shape()[0];
int num_of_classes = ds_->num_of_classes();
int tests = ds_->test_data().size() / batch_size;
if (max_steps_ != -1 && tests > max_steps_) tests = max_steps_;
cb_data.session_->Eval();
for (int i = 0; i < tests; i++) {
auto labels = FillInputDataUtil(cb_data, ds_->test_data(), false);
cb_data.session_->RunGraph();
auto outputs = cb_data.session_->GetPredictions();
for (auto it = outputs.begin(); it != outputs.end(); ++it) {
if (it->second->ElementsNum() == batch_size * num_of_classes) {
auto scores = reinterpret_cast<float *>(it->second->MutableData());
for (int b = 0; b < batch_size; b++) {
int max_idx = 0;
float max_score = scores[num_of_classes * b];
for (int c = 1; c < num_of_classes; c++) {
if (scores[num_of_classes * b + c] > max_score) {
max_score = scores[num_of_classes * b + c];
max_idx = c;
}
}
if (labels[b] == max_idx) accuracy += 1.0;
}
break;
}
}
}
accuracy /= static_cast<float>(batch_size * tests);
accuracies_.push_back(std::make_pair(cb_data.epoch_, accuracy));
std::cout << cb_data.epoch_ + 1 << ":\tAccuracy is " << accuracy << std::endl;
cb_data.session_->Train();
return mindspore::session::RET_CONTINUE;
}

View File

@ -1,150 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/dataset.h"
#include <arpa/inet.h>
#include <map>
#include <iostream>
#include <fstream>
#include <memory>
#include "src/utils.h"
using LabelId = std::map<std::string, int>;
char *ReadFile(const std::string &file, size_t *size) {
MS_ASSERT(size != nullptr);
std::string realPath(file);
std::ifstream ifs(realPath);
if (!ifs.good()) {
std::cerr << "file: " << realPath << " does not exist";
return nullptr;
}
if (!ifs.is_open()) {
std::cerr << "file: " << realPath << " open failed";
return nullptr;
}
ifs.seekg(0, std::ios::end);
*size = ifs.tellg();
std::unique_ptr<char[]> buf(new (std::nothrow) char[*size]);
if (buf == nullptr) {
std::cerr << "malloc buf failed, file: " << realPath;
ifs.close();
return nullptr;
}
ifs.seekg(0, std::ios::beg);
ifs.read(buf.get(), *size);
ifs.close();
return buf.release();
}
DataSet::~DataSet() {
for (auto itr = train_data_.begin(); itr != train_data_.end(); ++itr) {
auto ptr = std::get<0>(*itr);
delete[] ptr;
}
for (auto itr = test_data_.begin(); itr != test_data_.end(); ++itr) {
auto ptr = std::get<0>(*itr);
delete[] ptr;
}
}
int DataSet::Init(const std::string &data_base_directory, database_type type) {
InitializeMNISTDatabase(data_base_directory);
return 0;
}
void DataSet::InitializeMNISTDatabase(std::string dpath) {
num_of_classes_ = 10;
ReadMNISTFile(dpath + "/train/train-images-idx3-ubyte", dpath + "/train/train-labels-idx1-ubyte", &train_data_);
ReadMNISTFile(dpath + "/test/t10k-images-idx3-ubyte", dpath + "/test/t10k-labels-idx1-ubyte", &test_data_);
}
int DataSet::ReadMNISTFile(const std::string &ifile_name, const std::string &lfile_name,
std::vector<DataLabelTuple> *dataset) {
std::ifstream lfile(lfile_name, std::ios::binary);
if (!lfile.is_open()) {
std::cerr << "Cannot open label file " << lfile_name << std::endl;
return 0;
}
std::ifstream ifile(ifile_name, std::ios::binary);
if (!ifile.is_open()) {
std::cerr << "Cannot open data file " << ifile_name << std::endl;
return 0;
}
int magic_number = 0;
lfile.read(reinterpret_cast<char *>(&magic_number), sizeof(magic_number));
magic_number = ntohl(magic_number);
if (magic_number != 2049) {
std::cout << "Invalid MNIST label file!" << std::endl;
return 0;
}
int number_of_labels = 0;
lfile.read(reinterpret_cast<char *>(&number_of_labels), sizeof(number_of_labels));
number_of_labels = ntohl(number_of_labels);
ifile.read(reinterpret_cast<char *>(&magic_number), sizeof(magic_number));
magic_number = ntohl(magic_number);
if (magic_number != 2051) {
std::cout << "Invalid MNIST image file!" << std::endl;
return 0;
}
int number_of_images = 0;
ifile.read(reinterpret_cast<char *>(&number_of_images), sizeof(number_of_images));
number_of_images = ntohl(number_of_images);
int n_rows = 0;
ifile.read(reinterpret_cast<char *>(&n_rows), sizeof(n_rows));
n_rows = ntohl(n_rows);
int n_cols = 0;
ifile.read(reinterpret_cast<char *>(&n_cols), sizeof(n_cols));
n_cols = ntohl(n_cols);
if (number_of_labels != number_of_images) {
std::cout << "number of records in labels and images files does not match" << std::endl;
return 0;
}
int image_size = n_rows * n_cols;
unsigned char labels[number_of_labels];
unsigned char data[image_size];
lfile.read(reinterpret_cast<char *>(labels), number_of_labels);
for (int i = 0; i < number_of_labels; ++i) {
std::unique_ptr<float[]> hwc_bin_image(new (std::nothrow) float[32 * 32]);
ifile.read(reinterpret_cast<char *>(data), image_size);
for (size_t r = 0; r < 32; r++) {
for (size_t c = 0; c < 32; c++) {
if (r < 2 || r > 29 || c < 2 || c > 29)
hwc_bin_image[r * 32 + c] = 0.0;
else
hwc_bin_image[r * 32 + c] = (static_cast<float>(data[(r - 2) * 28 + (c - 2)])) / 255.0;
}
}
DataLabelTuple data_entry = std::make_tuple(reinterpret_cast<char *>(hwc_bin_image.release()), labels[i]);
dataset->push_back(data_entry);
}
return number_of_labels;
}

View File

@ -1,54 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_EXAMPLES_TRAIN_LENET_SRC_DATASET_H_
#define MINDSPORE_LITE_EXAMPLES_TRAIN_LENET_SRC_DATASET_H_
#include <tuple>
#include <string>
#include <vector>
using DataLabelTuple = std::tuple<char *, int>;
using FileTuple = std::tuple<int, std::string>;
enum database_type { DS_CIFAR10_BINARY = 0, DS_MNIST_BINARY, DS_OTHER };
char *ReadFile(const std::string &file, size_t *size); // utility function
class DataSet {
public:
DataSet() {}
~DataSet();
int Init(const std::string &data_base_directory, database_type type = DS_OTHER);
const std::vector<DataLabelTuple> &train_data() const { return train_data_; }
const std::vector<DataLabelTuple> &test_data() const { return test_data_; }
unsigned int num_of_classes() { return num_of_classes_; }
void set_expected_data_size(unsigned int expected_data_size) { expected_data_size_ = expected_data_size; }
unsigned int expected_data_size() { return expected_data_size_; }
private:
int ReadMNISTFile(const std::string &ifile, const std::string &lfile, std::vector<DataLabelTuple> *dataset);
void InitializeMNISTDatabase(std::string dpath);
std::vector<DataLabelTuple> train_data_;
std::vector<DataLabelTuple> test_data_;
unsigned int num_of_classes_ = 0;
unsigned int expected_data_size_ = 0;
};
#endif // MINDSPORE_LITE_EXAMPLES_TRAIN_LENET_SRC_DATASET_H_

View File

@ -25,17 +25,37 @@
#include "include/train/loss_monitor.h"
#include "include/train/ckpt_saver.h"
#include "include/train/lr_scheduler.h"
#include "include/train/accuracy_metrics.h"
#include "include/train/classification_train_accuracy_monitor.h"
#include "src/utils.h"
#include "src/data_loader.h"
#include "src/accuracy_monitor.h"
#include "include/datasets.h"
#include "include/vision_lite.h"
#include "include/transforms.h"
using mindspore::dataset::Dataset;
using mindspore::dataset::Mnist;
using mindspore::dataset::TensorOperation;
using mindspore::dataset::vision::Normalize;
using mindspore::lite::AccuracyMetrics;
using mindspore::session::TrainLoopCallBack;
using mindspore::session::TrainLoopCallBackData;
static unsigned int seed = time(NULL);
class Rescaler : public mindspore::session::TrainLoopCallBack {
public:
explicit Rescaler(float scale) : scale_(scale) {
if (scale_ == 0) scale_ = 1.0;
}
void StepBegin(const mindspore::session::TrainLoopCallBackData &cb_data) override {
auto inputs = cb_data.session_->GetInputs();
auto *input_data = reinterpret_cast<float *>(inputs.at(0)->MutableData());
for (int k = 0; k < inputs.at(0)->ElementsNum(); k++) input_data[k] /= scale_;
}
// Definition of callback function after forwarding operator.
private:
float scale_ = 1.0;
};
// Definition of verbose callback function after forwarding operator.
bool after_callback(const std::vector<mindspore::tensor::MSTensor *> &after_inputs,
const std::vector<mindspore::tensor::MSTensor *> &after_outputs,
const mindspore::CallBackParam &call_param) {
@ -79,46 +99,68 @@ void NetRunner::InitAndFigureInputs() {
session_ = loop_->train_session();
MS_ASSERT(nullptr != session_);
acc_metrics_ = std::shared_ptr<AccuracyMetrics>(new AccuracyMetrics);
loop_->Init({acc_metrics_.get()});
auto inputs = session_->GetInputs();
MS_ASSERT(inputs.size() > 1);
data_index_ = 0;
label_index_ = 1;
batch_size_ = inputs[data_index_]->shape()[0];
data_size_ = inputs[data_index_]->Size() / batch_size_; // in bytes
if (verbose_) {
std::cout << "data size: " << data_size_ << std::endl << "batch size: " << batch_size_ << std::endl;
}
}
float NetRunner::CalculateAccuracy(int max_tests) {
AccuracyMonitor test_am(&ds_, 1, max_tests);
test_am.EpochEnd(TrainLoopCallBackData(true, 0, session_, loop_));
test_ds_ = Mnist(data_dir_ + "/test", "all");
std::shared_ptr<TensorOperation> typecast_f = mindspore::dataset::transforms::TypeCast("float32");
std::shared_ptr<TensorOperation> resize = mindspore::dataset::vision::Resize({32, 32});
test_ds_ = test_ds_->Map({resize, typecast_f}, {"image"});
std::shared_ptr<TensorOperation> typecast = mindspore::dataset::transforms::TypeCast("int32");
test_ds_ = test_ds_->Map({typecast}, {"label"});
test_ds_ = test_ds_->Batch(32, true);
Rescaler rescale(255.0);
loop_->Eval(test_ds_.get(), std::vector<TrainLoopCallBack *>{&rescale});
std::cout << "Eval Accuracy is " << acc_metrics_->Eval() << std::endl;
return 0.0;
}
int NetRunner::InitDB() {
if (data_size_ != 0) ds_.set_expected_data_size(data_size_);
int ret = ds_.Init(data_dir_, DS_MNIST_BINARY);
num_of_classes_ = ds_.num_of_classes();
if (ds_.test_data().size() == 0) {
train_ds_ = Mnist(data_dir_ + "/train", "all");
std::shared_ptr<TensorOperation> typecast_f = mindspore::dataset::transforms::TypeCast("float32");
std::shared_ptr<TensorOperation> resize = mindspore::dataset::vision::Resize({32, 32});
// std::shared_ptr<TensorOperation> rescale_op = Normalize({0.0, 0.0, 0.0}, {255.0, 255.0, 255.0});
// std::shared_ptr<TensorOperation> rescale_op = mindspore::dataset::vision::Rescale(255.0, 0.0);
train_ds_ = train_ds_->Map({resize, typecast_f}, {"image"});
std::shared_ptr<TensorOperation> typecast = mindspore::dataset::transforms::TypeCast("int32");
train_ds_ = train_ds_->Map({typecast}, {"label"});
train_ds_ = train_ds_->Shuffle(2);
train_ds_ = train_ds_->Batch(32, true);
if (verbose_) {
std::cout << "DatasetSize is " << train_ds_->GetDatasetSize() << std::endl;
}
if (train_ds_->GetDatasetSize() == 0) {
std::cout << "No relevant data was found in " << data_dir_ << std::endl;
MS_ASSERT(ds_.test_data().size() != 0);
MS_ASSERT(train_ds_->GetDatasetSize() != 0);
}
return ret;
return 0;
}
int NetRunner::TrainLoop() {
struct mindspore::lite::StepLRLambda step_lr_lambda(100, 0.9);
struct mindspore::lite::StepLRLambda step_lr_lambda(1, 0.9);
mindspore::lite::LRScheduler step_lr_sched(mindspore::lite::StepLRLambda, static_cast<void *>(&step_lr_lambda), 100);
mindspore::lite::LossMonitor lm(100);
// mindspore::lite::ClassificationTrainAccuracyMonitor am(10);
mindspore::lite::ClassificationTrainAccuracyMonitor am(1);
mindspore::lite::CkptSaver cs(1000, std::string("lenet"));
AccuracyMonitor test_am(&ds_, 500, 10);
DataLoader dl(&ds_);
Rescaler rescale(255.0);
loop_->Train(cycles_, std::vector<TrainLoopCallBack *>{&dl, &lm, &test_am, &cs, &step_lr_sched});
loop_->Train(epochs_, train_ds_.get(), std::vector<TrainLoopCallBack *>{&rescale, &lm, &cs, &am, &step_lr_sched});
return 0;
}
@ -131,15 +173,15 @@ int NetRunner::Main() {
CalculateAccuracy();
if (cycles_ > 0) {
auto trained_fn = ms_file_.substr(0, ms_file_.find_last_of('.')) + "_trained_" + std::to_string(cycles_) + ".ms";
if (epochs_ > 0) {
auto trained_fn = ms_file_.substr(0, ms_file_.find_last_of('.')) + "_trained.ms";
session_->SaveToFile(trained_fn);
}
return 0;
}
void NetRunner::Usage() {
std::cout << "Usage: net_runner -f <.ms model file> -d <data_dir> [-c <num of training cycles>] "
std::cout << "Usage: net_runner -f <.ms model file> -d <data_dir> [-e <num of training epochs>] "
<< "[-v (verbose mode)] [-s <save checkpoint every X iterations>]" << std::endl;
}
@ -151,7 +193,7 @@ bool NetRunner::ReadArgs(int argc, char *argv[]) {
ms_file_ = std::string(optarg);
break;
case 'e':
cycles_ = atoi(optarg);
epochs_ = atoi(optarg);
break;
case 'd':
data_dir_ = std::string(optarg);

View File

@ -21,11 +21,16 @@
#include <iomanip>
#include <map>
#include <vector>
#include <memory>
#include <string>
#include "include/train_session.h"
#include "include/train/train_loop.h"
#include "include/train/accuracy_metrics.h"
#include "include/ms_tensor.h"
#include "src/dataset.h"
#include "include/datasets.h"
using mindspore::dataset::Dataset;
using mindspore::lite::AccuracyMetrics;
class NetRunner {
public:
@ -38,26 +43,22 @@ class NetRunner {
void InitAndFigureInputs();
int InitDB();
int TrainLoop();
std::vector<int> FillInputData(const std::vector<DataLabelTuple> &dataset, bool is_train_set = false) const;
float CalculateAccuracy(int max_tests = -1);
float CalculateAccuracy(int max_tests = 0);
float GetLoss() const;
mindspore::tensor::MSTensor *SearchOutputsForSize(size_t size) const;
DataSet ds_;
mindspore::session::TrainSession *session_ = nullptr;
mindspore::session::TrainLoop *loop_ = nullptr;
std::shared_ptr<Dataset> train_ds_;
std::shared_ptr<Dataset> test_ds_;
std::shared_ptr<AccuracyMetrics> acc_metrics_;
std::string ms_file_ = "";
std::string data_dir_ = "";
size_t data_size_ = 0;
size_t batch_size_ = 0;
unsigned int cycles_ = 100;
int data_index_ = 0;
int label_index_ = -1;
int num_of_classes_ = 0;
unsigned int epochs_ = 10;
bool verbose_ = false;
int save_checkpoint_ = 0;
static unsigned int seed_;
};
#endif // MINDSPORE_LITE_EXAMPLES_TRAIN_LENET_SRC_NET_RUNNER_H_

View File

@ -8,7 +8,7 @@ fi
echo "============Exporting=========="
if [ -n "$1" ]; then
DOCKER_IMG=$1
docker run -w $PWD --runtime=nvidia -v /home/$USER:/home/$USER --privileged=true ${DOCKER_IMG} /bin/bash -c "python transfer_learning_export.py; chmod 444 transfer_learning_tod.mindir; rm -rf __pycache__"
docker run -w $PWD --runtime=nvidia -v /home/$USER:/home/$USER --privileged=true ${DOCKER_IMG} /bin/bash -c "python transfer_learning_export.py; chmod 444 transfer_learning_tod*.mindir; rm -rf __pycache__"
else
echo "MindSpore docker was not provided, attempting to run locally"
python transfer_learning_export.py
@ -32,4 +32,6 @@ if [ ! -f "$CONVERTER" ]; then
fi
echo "============Converting========="
LD_LIBRARY_PATH=./ $CONVERTER --fmk=MINDIR --trainModel=true --modelFile=transfer_learning_tod.mindir --outputFile=transfer_learning_tod
pwd
LD_LIBRARY_PATH=./ $CONVERTER --fmk=MINDIR --trainModel=false --modelFile=transfer_learning_tod_backbone.mindir --outputFile=transfer_learning_tod_backbone
LD_LIBRARY_PATH=./ $CONVERTER --fmk=MINDIR --trainModel=true --modelFile=transfer_learning_tod_head.mindir --outputFile=transfer_learning_tod_head

View File

@ -17,9 +17,7 @@
import numpy as np
import mindspore as M
from mindspore.nn import Cell
from mindspore.train.serialization import load_checkpoint
from mindspore.common.parameter import ParameterTuple
from mindspore.train.serialization import export
from mindspore.train.serialization import load_checkpoint, export
from effnet import effnet
from train_utils import TrainWrap
@ -38,26 +36,23 @@ class TransferNet(Cell):
BACKBONE = effnet(num_classes=1000)
load_checkpoint("efficient_net_b0.ckpt", BACKBONE)
HEAD = M.nn.Dense(1000, 10)
HEAD.weight.set_data(M.Tensor(np.random.normal(
0, 0.1, HEAD.weight.data.shape).astype("float32")))
HEAD.bias.set_data(M.Tensor(np.zeros(HEAD.bias.data.shape, dtype="float32")))
n = TransferNet(BACKBONE, HEAD)
trainable_weights_list = []
trainable_weights_list.extend(n.head.trainable_params())
trainable_weights = ParameterTuple(trainable_weights_list)
M.context.set_context(mode=M.context.PYNATIVE_MODE,
device_target="GPU", save_graphs=False)
BATCH_SIZE = 16
X = M.Tensor(np.ones((BATCH_SIZE, 3, 224, 224)), M.float32)
label = M.Tensor(np.zeros([BATCH_SIZE, 10]).astype(np.float32))
export(BACKBONE, X, file_name="transfer_learning_tod_backbone", file_format='MINDIR')
sgd = M.nn.SGD(trainable_weights, learning_rate=0.01, momentum=0.9,
label = M.Tensor(np.zeros([BATCH_SIZE, 10]).astype(np.float32))
HEAD = M.nn.Dense(1000, 10)
HEAD.weight.set_data(M.Tensor(np.random.normal(
0, 0.1, HEAD.weight.data.shape).astype("float32")))
HEAD.bias.set_data(M.Tensor(np.zeros(HEAD.bias.data.shape, dtype="float32")))
sgd = M.nn.SGD(HEAD.trainable_params(), learning_rate=0.01, momentum=0.9,
dampening=0.01, weight_decay=0.0, nesterov=False, loss_scale=1.0)
net = TrainWrap(n, optimizer=sgd, weights=trainable_weights)
export(net, X, label, file_name="transfer_learning_tod", file_format='MINDIR')
net = TrainWrap(HEAD, optimizer=sgd)
backbone_out = M.Tensor(np.zeros([BATCH_SIZE, 1000]).astype(np.float32))
export(net, backbone_out, label, file_name="transfer_learning_tod_head", file_format='MINDIR')
print("Exported")

View File

@ -14,4 +14,4 @@
# limitations under the License.
# ============================================================================
LD_LIBRARY_PATH=./lib/ bin/net_runner -f model/transfer_learning_tod_trained.ms -e 0 -d dataset
LD_LIBRARY_PATH=./lib/ bin/net_runner -b model/transfer_learning_tod_backbone.ms -f model/transfer_learning_tod_head_trained.ms -e 0 -d dataset

View File

@ -14,4 +14,4 @@
# limitations under the License.
# ============================================================================
LD_LIBRARY_PATH=./lib/ bin/net_runner -f model/transfer_learning_tod.ms -e 0 -d dataset
LD_LIBRARY_PATH=./lib/ bin/net_runner -b model/transfer_learning_tod_backbone.ms -f model/transfer_learning_tod_head.ms -e 0 -d dataset

View File

@ -14,4 +14,4 @@
# limitations under the License.
# ============================================================================
LD_LIBRARY_PATH=./lib/ bin/net_runner -f model/transfer_learning_tod.ms -e 100 -d dataset -s 20
LD_LIBRARY_PATH=./lib/ bin/net_runner -b model/transfer_learning_tod_backbone.ms -f model/transfer_learning_tod_head.ms -e 100 -d dataset -s 20

View File

@ -70,7 +70,7 @@ void NetRunner::InitAndFigureInputs() {
context.device_list_[0].device_info_.cpu_device_info_.cpu_bind_mode_ = mindspore::lite::NO_BIND;
context.thread_num_ = 1;
session_ = mindspore::session::TrainSession::CreateSession(ms_file_, &context);
session_ = mindspore::session::TrainSession::CreateTransferSession(ms_backbone_file_, ms_head_file_, &context);
MS_ASSERT(nullptr != session_);
auto inputs = session_->GetInputs();
@ -185,7 +185,8 @@ int NetRunner::TrainLoop() {
if (min_loss > loss) min_loss = loss;
if (save_checkpoint_ != 0 && (i + 1) % save_checkpoint_ == 0) {
auto cpkt_fn = ms_file_.substr(0, ms_file_.find_last_of('.')) + "_trained_" + std::to_string(i + 1) + ".ms";
auto cpkt_fn =
ms_head_file_.substr(0, ms_head_file_.find_last_of('.')) + "_trained_" + std::to_string(i + 1) + ".ms";
session_->SaveToFile(cpkt_fn);
}
@ -211,23 +212,27 @@ int NetRunner::Main() {
std::cout << "accuracy on validation data = " << acc << std::endl;
if (cycles_ > 0) {
auto trained_fn = ms_file_.substr(0, ms_file_.find_last_of('.')) + "_trained.ms";
auto trained_fn = ms_head_file_.substr(0, ms_head_file_.find_last_of('.')) + "_trained.ms";
session_->SaveToFile(trained_fn);
}
return 0;
}
void NetRunner::Usage() {
std::cout << "Usage: net_runner -f <.ms model file> -d <data_dir> [-c <num of training cycles>] "
<< "[-v (verbose mode)] [-s <save checkpoint every X iterations>]" << std::endl;
std::cout << "Usage: net_runner -f <.ms head model file> -b <.ms backbone model file> -d <data_dir> "
<< "[-c <num of training cycles>] [-v (verbose mode)] "
<< "[-s <save checkpoint every X iterations>]" << std::endl;
}
bool NetRunner::ReadArgs(int argc, char *argv[]) {
int opt;
while ((opt = getopt(argc, argv, "f:e:d:s:ihc:v")) != -1) {
while ((opt = getopt(argc, argv, "b:f:e:d:s:ihc:v")) != -1) {
switch (opt) {
case 'b':
ms_backbone_file_ = std::string(optarg);
break;
case 'f':
ms_file_ = std::string(optarg);
ms_head_file_ = std::string(optarg);
break;
case 'e':
cycles_ = atoi(optarg);

View File

@ -45,7 +45,8 @@ class NetRunner {
DataSet ds_;
mindspore::session::TrainSession *session_ = nullptr;
std::string ms_file_ = "";
std::string ms_backbone_file_ = "";
std::string ms_head_file_ = "";
std::string data_dir_ = "";
size_t data_size_ = 0;
size_t batch_size_ = 0;

View File

@ -0,0 +1,48 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_INCLUDE_TRAIN_ACCURACY_METRICS_H_
#define MINDSPORE_LITE_INCLUDE_TRAIN_ACCURACY_METRICS_H_
#include <vector>
#include "include/train/metrics.h"
using mindspore::session::Metrics;
namespace mindspore {
namespace lite {
constexpr int METRICS_CLASSIFICATION = 0;
constexpr int METRICS_MULTILABLE = 1;
class AccuracyMetrics : public Metrics {
public:
explicit AccuracyMetrics(int accuracy_metrics = METRICS_CLASSIFICATION, const std::vector<int> &input_indexes = {1},
const std::vector<int> &output_indexes = {0});
virtual ~AccuracyMetrics() = default;
void Clear() override { total_accuracy_ = total_steps_ = 0.0; }
float Eval() override;
void Update(std::vector<tensor::MSTensor *> inputs, std::vector<tensor::MSTensor *> outputs) override;
protected:
int accuracy_metrics_ = METRICS_CLASSIFICATION;
std::vector<int> input_indexes_ = {1};
std::vector<int> output_indexes_ = {0};
float total_accuracy_ = 0.0;
float total_steps_ = 0.0;
};
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_LITE_INCLUDE_TRAIN_ACCURACY_METRICS_H_

View File

@ -13,31 +13,35 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_EXAMPLES_TRAIN_LENET_SRC_ACCURACY_MONITOR_H_
#define MINDSPORE_LITE_EXAMPLES_TRAIN_LENET_SRC_ACCURACY_MONITOR_H_
#ifndef MINDSPORE_LITE_INCLUDE_TRAIN_ACCURACY_MONITOR_H_
#define MINDSPORE_LITE_INCLUDE_TRAIN_ACCURACY_MONITOR_H_
#include <vector>
#include <string>
#include <utility>
#include <unordered_map>
#include "include/train/train_loop.h"
#include "src/dataset.h"
using GraphPoint = std::pair<int, float>;
class AccuracyMonitor : public mindspore::session::TrainLoopCallBack {
namespace mindspore {
namespace lite {
class AccuracyMonitor : public session::TrainLoopCallBack {
public:
explicit AccuracyMonitor(DataSet *dataset, int check_every_n, int max_steps = -1)
explicit AccuracyMonitor(mindspore::dataset::Dataset *dataset, int check_every_n, int max_steps = -1)
: ds_(dataset), check_every_n_(check_every_n), max_steps_(max_steps) {}
void Begin(const session::TrainLoopCallBackData &cb_data) override;
int EpochEnd(const mindspore::session::TrainLoopCallBackData &cb_data) override;
const std::vector<GraphPoint> &GetAccuracyPoints() const { return accuracies_; }
private:
DataSet *ds_;
mindspore::dataset::Dataset *ds_;
std::vector<GraphPoint> accuracies_;
int check_every_n_;
int max_steps_;
};
#endif // MINDSPORE_LITE_EXAMPLES_TRAIN_LENET_SRC_ACCURACY_MONITOR_H_
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_LITE_INCLUDE_TRAIN_ACCURACY_MONITOR_H_

View File

@ -21,6 +21,7 @@
#include <climits>
#include <unordered_map>
#include "include/train/train_loop.h"
#include "include/train/accuracy_metrics.h"
using GraphPoint = std::pair<int, float>;
@ -29,9 +30,12 @@ namespace lite {
class ClassificationTrainAccuracyMonitor : public session::TrainLoopCallBack {
public:
explicit ClassificationTrainAccuracyMonitor(int print_every_n = INT_MAX) : print_every_n_(print_every_n) {}
explicit ClassificationTrainAccuracyMonitor(int print_every_n = INT_MAX,
int accuracy_metrics = METRICS_CLASSIFICATION,
const std::vector<int> &input_indexes = {1},
const std::vector<int> &output_indexes = {0});
virtual ~ClassificationTrainAccuracyMonitor() = default;
void Begin(const session::TrainLoopCallBackData &cb_data) override;
void EpochBegin(const session::TrainLoopCallBackData &cb_data) override;
int EpochEnd(const session::TrainLoopCallBackData &cb_data) override;
@ -40,6 +44,9 @@ class ClassificationTrainAccuracyMonitor : public session::TrainLoopCallBack {
private:
std::vector<GraphPoint> accuracies_;
int accuracy_metrics_ = METRICS_CLASSIFICATION;
std::vector<int> input_indexes_ = {1};
std::vector<int> output_indexes_ = {0};
int print_every_n_ = 0;
};

View File

@ -29,7 +29,16 @@ namespace lite {
class LossMonitor : public session::TrainLoopCallBack {
public:
explicit LossMonitor(int print_every_n = INT_MAX) : print_every_n_(print_every_n) {}
/// \brief constructor
///
/// \param[in] print_every_n_steps prints loss into stdout every n_steps.
// print_every_n_steps=0 means never print
// print_every_n_steps=INT_MAX will print every epoch
/// \param[in] dataset Pointer to MindData Dataset object
/// \param[in] cbs A vector of TrainLoopCallBack objects
///
/// \return 0 on success or -1 in case of error
explicit LossMonitor(int print_every_n_steps = INT_MAX) : print_every_n_(print_every_n_steps) {}
virtual ~LossMonitor() = default;
void Begin(const session::TrainLoopCallBackData &cb_data) override;
void EpochBegin(const session::TrainLoopCallBackData &cb_data) override;

View File

@ -13,22 +13,25 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_EXAMPLES_TRAIN_LENET_SRC_DATA_LOADER_H_
#define MINDSPORE_LITE_EXAMPLES_TRAIN_LENET_SRC_DATA_LOADER_H_
#ifndef MINDSPORE_LITE_INCLUDE_TRAIN_METRICS_H_
#define MINDSPORE_LITE_INCLUDE_TRAIN_METRICS_H_
#include <vector>
#include <string>
#include <utility>
#include <tuple>
#include <unordered_map>
#include "include/train/train_loop.h"
#include "src/dataset.h"
#include "include/ms_tensor.h"
class DataLoader : public mindspore::session::TrainLoopCallBack {
namespace mindspore {
namespace session {
class Metrics {
public:
explicit DataLoader(DataSet *dataset) : ds_(dataset) {}
void StepBegin(const mindspore::session::TrainLoopCallBackData &cb_data) override;
private:
DataSet *ds_;
virtual ~Metrics() = default;
virtual void Clear() {}
virtual float Eval() { return 0.0; }
virtual void Update(std::vector<tensor::MSTensor *> inputs, std::vector<tensor::MSTensor *> outputs) = 0;
};
#endif // MINDSPORE_LITE_EXAMPLES_TRAIN_LENET_SRC_DATA_LOADER_H_
} // namespace session
} // namespace mindspore
#endif // MINDSPORE_LITE_INCLUDE_TRAIN_METRICS_H_

View File

@ -18,11 +18,22 @@
#include <vector>
#include <string>
#include <tuple>
#include <climits>
#include <unordered_map>
#include "include/train/train_loop_callback.h"
#include "include/train/metrics.h"
#include "include/train_session.h"
namespace mindspore {
class MSTensor;
namespace dataset {
class Dataset;
using MSTensorVec = std::vector<mindspore::MSTensor>;
} // namespace dataset
using LoadDataFunc = std::function<int(std::vector<tensor::MSTensor *> inputs, dataset::MSTensorVec *dataset_vec)>;
namespace session {
class TrainLoop {
@ -48,6 +59,18 @@ class TrainLoop {
/// \return pointer of the train_session
virtual session::TrainSession *train_session() = 0;
/// \brief Initialize object with metrics
///
/// \param[in] verctor of metrics
///
/// \return 0 on success or -1 in case of error
virtual int Init(std::vector<mindspore::session::Metrics *> metrics) = 0;
/// \brief Accessor to TrainLoop metric objects
///
/// \return vector of metrics
virtual std::vector<mindspore::session::Metrics *> GetMetrics() = 0;
/// \brief Accessor to the Session KernelCallbacks
///
/// \param[in] before Define a call_back_function to be called before running each node.
@ -59,10 +82,24 @@ class TrainLoop {
/// \brief Performs the training Loop
///
/// \param[in] epoch The number of epochs to run
/// \param[in] dataset Pointer to MindData Dataset object
/// \param[in] cbs A vector of TrainLoopCallBack objects
/// \param[in] load_func a function that load (and can manipulate) data from Minddata Dataset array into model
///
/// \return 0 on success or -1 in case of error
virtual int Train(int epochs, std::vector<TrainLoopCallBack *> cbs) = 0;
virtual int Train(int epochs, mindspore::dataset::Dataset *dataset, std::vector<TrainLoopCallBack *> cbs,
LoadDataFunc load_func = nullptr) = 0;
/// \brief Performs loop over all data in Eval Mode
///
/// \param[in] dataset Pointer to MindData Dataset object
/// \param[in] cbs A vector of TrainLoopCallBack objects
/// \param[in] load_func a function that load (and can manipulate) data from Minddata Dataset array into model
/// \param[in] max_steps (with default = INT_MAX the method iterates all dataset)
///
/// \return 0 on success or -1 in case of error
virtual int Eval(mindspore::dataset::Dataset *dataset, std::vector<TrainLoopCallBack *> cbs,
LoadDataFunc load_func = nullptr, int max_steps = INT_MAX) = 0;
};
} // namespace session
} // namespace mindspore

View File

@ -18,7 +18,6 @@
#include <vector>
#include <string>
#include <tuple>
#include <unordered_map>
#include "include/lite_session.h"
namespace mindspore {
@ -50,6 +49,31 @@ class TrainSession : public session::LiteSession {
/// \return Pointer of MindSpore Lite TrainSession
static TrainSession *CreateSession(const std::string &filename, lite::Context *context, bool train_mode = false);
/// \brief Static method to create a transfer lernning support TrainSession object
///
/// \param[in] model_buf_backbone A buffer that was read from a backbone MS model file
/// \param[in] size_backbone Length of the backbone net buffer
/// \param[in] model_buf_head A buffer that was read from a head MS model file
/// \param[in] size_head Length of the head net buffer
/// \param[in] context Defines the context of the session to be created
/// \param[in] train_mode training mode to initialize Session with
///
/// \return Pointer of MindSpore Lite TrainSession
static TrainSession *CreateTransferSession(const char *model_buf_backbone, size_t size_backbone,
const char *model_buf_head, size_t size_head, lite::Context *context,
bool train_mode = false);
/// \brief Static method to create a TrainSession object
///
/// \param[in] filename_backbone Filename to read backbone net flatbuffer from
/// \param[in] filename_head Filename to read head net flatbuffer from
/// \param[in] context Defines the context of the session to be created
/// \param[in] train_mode training mode to initialize Session with
///
/// \return Pointer of MindSpore Lite TrainSession
static TrainSession *CreateTransferSession(const std::string &filename_backbone, const std::string &filename_head,
lite::Context *context, bool train_mode = false);
/// \brief Export the trained model into a buffer
///
/// \param[in] buf The buffer to Export into. If equal to nullptr, buf will be allocated
@ -95,13 +119,30 @@ class TrainSession : public session::LiteSession {
/// \return learning rate. 0.0 if no optimizer was found
virtual float GetLearningRate() = 0;
/// \brief Setup training with virtual batches
///
/// \param[in] virtual_batch_multiplier - virtual batch multiplier, use any number < 1 to disable
/// \param[in] lr - learning rate to use for virtual batch, -1 for internal configuration
/// \param[in] momentum - batch norm momentum to use for virtual batch, -1 for internal configuration
/// \return STATUS as an error code of the set operation, STATUS is defined in errorcode.h
virtual int SetupVirtualBatch(int virtual_batch_multiplier, float lr = -1.0f, float momentum = -1.0f) = 0;
/// \brief Get output MindSpore Lite MSTensors of Training model prediction
///
/// \return The map of output tensor name and MindSpore Lite MSTensor.
virtual std::unordered_map<std::string, mindspore::tensor::MSTensor *> GetPredictions() const = 0;
/// \return a vector of output tensors (MindSpore Lite MSTensor).
virtual std::vector<tensor::MSTensor *> GetPredictions() const = 0;
/// \brief Set part of the name that identify a loss kernel
/// \param[in] loss_name Identifucation name for loss kernels
void SetLossName(std::string loss_name) { loss_name_ = loss_name; }
protected:
bool train_mode_ = false;
std::string get_loss_name() const { return loss_name_; }
private:
std::string loss_name_ = "_loss_fn";
};
} // namespace session
} // namespace mindspore

View File

@ -17,6 +17,7 @@
#include "nnacl/fp32_grad/arithmetic_grad.h"
#include <string.h>
#include "nnacl/fp32_grad/utils.h"
#include "nnacl/errorcode.h"
void ElementDivNegSquare(const float *nom, const float *denom, float *output, int element_size) {
for (int i = 0; i < element_size; i++) {
@ -30,6 +31,13 @@ void ElementMulAndDivNegSquare(const float *a, const float *b, const float *deno
}
}
int ElementAbsGrad(const float *in1, const float *in2, float *out, int element_size) {
for (int i = 0; i < element_size; i++) {
out[i] = (in1[i] < 0.f) ? -in2[i] : ((in1[i] > 0.f) ? in2[i] : 0);
}
return NNACL_OK;
}
void MaximumByAxes(const float *input0, const float *input1, const float *dy, const int *input0_dims,
const int *input1_dims, const int *dy_dims, float *output0, float *output1, int num_dims) {
int num_output0 = 1;

View File

@ -23,6 +23,7 @@ extern "C" {
#endif
void ElementDivNegSquare(const float *nom, const float *denom, float *output, int element_size);
void ElementMulAndDivNegSquare(const float *a, const float *b, const float *denom, float *output, int element_size);
int ElementAbsGrad(const float *in1, const float *in2, float *out, int element_size);
void MaximumByAxes(const float *input0, const float *input1, const float *dy, const int *input0_dims,
const int *input1_dims, const int *dy_dims, float *output0, float *output1, int num_dims);
void MinimumByAxes(const float *input0, const float *input1, const float *dy, const int *input0_dims,

View File

@ -277,7 +277,8 @@ union PrimitiveType {
IsFinite,
BatchMatMul,
LinSpace,
UniformReal
UniformReal,
AbsGrad
}
enum QuantType: int {

View File

@ -1281,14 +1281,18 @@ table IsFinite {
}
table BatchMatMul {
transpose_a :bool;
transpose_b :bool;
transpose_a :bool;
transpose_b :bool;
}
table LinSpace {
}
table UniformReal {
seed : int;
seed2 : int;
}
table AbsGrad {
transpose_a :bool;
}

View File

@ -94,10 +94,14 @@ if(SUPPORT_TRAIN)
${ANF_SRC}
${CMAKE_CURRENT_SOURCE_DIR}/train/train_populate_parameter.cc
${CMAKE_CURRENT_SOURCE_DIR}/train/train_session.cc
${CMAKE_CURRENT_SOURCE_DIR}/train/transfer_session.cc
${CMAKE_CURRENT_SOURCE_DIR}/train/train_model.cc
${CMAKE_CURRENT_SOURCE_DIR}/train/train_loop.cc
${CMAKE_CURRENT_SOURCE_DIR}/train/train_utils.cc
${CMAKE_CURRENT_SOURCE_DIR}/train/loss_monitor.cc
${CMAKE_CURRENT_SOURCE_DIR}/train/lr_scheduler.cc
${CMAKE_CURRENT_SOURCE_DIR}/train/accuracy_metrics.cc
${CMAKE_CURRENT_SOURCE_DIR}/train/accuracy_monitor.cc
${CMAKE_CURRENT_SOURCE_DIR}/train/classification_train_accuracy_monitor.cc
)
endif()
@ -141,6 +145,10 @@ if(BUILD_MINDDATA STREQUAL "lite")
target_link_libraries(mindspore-lite minddata_eager_mid minddata-lite)
target_link_libraries(mindspore-lite_static minddata_eager_mid)
endif()
if(SUPPORT_TRAIN)
target_link_libraries(mindspore-lite minddata-lite)
endif()
if(PLATFORM_ARM)
set(NDK_STRIP

View File

@ -0,0 +1,69 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/ops/abs_grad.h"
#ifndef PRIMITIVE_WRITEABLE
#include "src/ops/ops_register.h"
#endif
#include "src/ops/arithmetic_self.h"
namespace mindspore {
namespace lite {
#ifdef PRIMITIVE_WRITEABLE
int AbsGrad::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) {
if (this->primitive_ == nullptr) {
this->primitive_ = new (std::nothrow) schema::PrimitiveT;
if (this->primitive_ == nullptr) {
MS_LOG(ERROR) << "new primitiveT failed";
return RET_ERROR;
}
this->primitive_->value.type = schema::PrimitiveType_AbsGrad;
}
if (this->primitive_->value.type != schema::PrimitiveType_AbsGrad) {
MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type;
return RET_ERROR;
}
if (this->primitive_->value.value == nullptr) {
this->primitive_->value.value = new (std::nothrow) schema::AbsGradT();
if (this->primitive_->value.value == nullptr) {
MS_LOG(ERROR) << "new primitiveT value failed";
return RET_ERROR;
}
}
return RET_OK;
}
#else
int AbsGrad::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(primitive != nullptr);
MS_ASSERT(fbb != nullptr);
auto attr = primitive->value_as_AbsGrad();
if (attr == nullptr) {
MS_LOG(ERROR) << "value_as_AbsGrad return nullptr";
return RET_ERROR;
}
auto val_offset = schema::CreateAbsGrad(*fbb);
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_AbsGrad, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
PrimitiveC *AbsGradCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<AbsGrad>(primitive); }
Registry AbsGradRegistry(schema::PrimitiveType_AbsGrad, AbsGradCreator);
#endif
} // namespace lite
} // namespace mindspore

View File

@ -0,0 +1,42 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_OPS_ABS_GRAD_H_
#define MINDSPORE_LITE_SRC_OPS_ABS_GRAD_H_
#include <vector>
#include <set>
#include <cmath>
#include "src/ops/primitive_c.h"
namespace mindspore {
namespace lite {
class AbsGrad : public PrimitiveC {
public:
AbsGrad() = default;
~AbsGrad() = default;
#ifdef PRIMITIVE_WRITEABLE
MS_DECLARE_PARENT(AbsGrad, PrimitiveC);
explicit AbsGrad(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override;
#else
int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
#endif
};
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_LITE_SRC_OPS_ABS_GRAD_H_

View File

@ -22,7 +22,30 @@
namespace mindspore {
namespace lite {
#ifndef PRIMITIVE_WRITEABLE
#ifdef PRIMITIVE_WRITEABLE
int FloorDiv::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) {
if (this->primitive_ == nullptr) {
this->primitive_ = new (std::nothrow) schema::PrimitiveT;
if (this->primitive_ == nullptr) {
MS_LOG(ERROR) << "new primitiveT failed";
return RET_ERROR;
}
this->primitive_->value.type = schema::PrimitiveType_FloorDiv;
}
if (this->primitive_->value.type != schema::PrimitiveType_FloorDiv) {
MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type;
return RET_ERROR;
}
if (this->primitive_->value.value == nullptr) {
this->primitive_->value.value = new (std::nothrow) schema::FloorDivT();
if (this->primitive_->value.value == nullptr) {
MS_LOG(ERROR) << "new primitiveT value failed";
return RET_ERROR;
}
}
return RET_OK;
}
#else
int FloorDiv::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);

View File

@ -14,8 +14,8 @@
* limitations under the License.
*/
#ifndef LITE_MINDSPORE_LITE_C_OPS_FLOOR_DIV_H_
#define LITE_MINDSPORE_LITE_C_OPS_FLOOR_DIV_H_
#ifndef MINDSPORE_LITE_SRC_OPS_FLOOR_DIV_H_
#define MINDSPORE_LITE_SRC_OPS_FLOOR_DIV_H_
#include <vector>
#include <set>
@ -31,6 +31,7 @@ class FloorDiv : public Arithmetic {
#ifdef PRIMITIVE_WRITEABLE
MS_DECLARE_PARENT(FloorDiv, Arithmetic);
explicit FloorDiv(schema::PrimitiveT *primitive) : Arithmetic(primitive) {}
int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override;
#else
int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
#endif
@ -38,4 +39,4 @@ class FloorDiv : public Arithmetic {
} // namespace lite
} // namespace mindspore
#endif // LITE_MINDSPORE_LITE_C_OPS_FLOOR_DIV_H_
#endif // MINDSPORE_LITE_SRC_OPS_FLOOR_DIV_H_

View File

@ -40,6 +40,7 @@ Registry LogParameterRegistry(schema::PrimitiveType_Log, PopulateArithmeticSelf)
Registry NegParameterRegistry(schema::PrimitiveType_Neg, PopulateArithmeticSelf);
Registry NegGradParameterRegistry(schema::PrimitiveType_NegGrad, PopulateArithmeticSelf);
Registry LogGradParameterRegistry(schema::PrimitiveType_LogGrad, PopulateArithmeticSelf);
Registry AbsGradParameterRegistry(schema::PrimitiveType_AbsGrad, PopulateArithmeticSelf);
Registry SqrtParameterRegistry(schema::PrimitiveType_Sqrt, PopulateArithmeticSelf);
Registry SquareParameterRegistry(schema::PrimitiveType_Square, PopulateArithmeticSelf);
Registry RsqrtParameterRegistry(schema::PrimitiveType_Rsqrt, PopulateArithmeticSelf);

View File

@ -193,6 +193,7 @@
#include "src/ops/depend.h"
#include "src/ops/flatten_grad.h"
#include "src/ops/log_grad.h"
#include "src/ops/abs_grad.h"
#include "src/ops/sgd.h"
#include "src/ops/adam.h"
#include "src/ops/assign.h"
@ -552,9 +553,11 @@ std::shared_ptr<PrimitiveC> PrimitiveC::Create(const Primitive &prim, const std:
return NewPrimitiveC<Dequant>(prim, inputs, quantType);
} else if (op_type == "Flatten") {
return NewPrimitiveC<Flatten>(prim, inputs, quantType);
} else if (op_type == "FloorDiv") {
return NewPrimitiveC<FloorDiv>(prim, inputs, quantType);
} else if ((op_type == "FusedBatchNorm") || (op_type == "FusedBatchNormEx")) {
return NewPrimitiveC<FusedBatchNorm>(prim, inputs, quantType);
} else if (op_type == "make_tuple") {
} else if ((op_type == "make_tuple") || (op_type == "MakeTuple")) {
return NewPrimitiveC<MakeTuple>(prim, inputs, quantType);
} else if (op_type == "MatMul" || op_type == "BatchMatMul") {
return NewPrimitiveC<MatMul>(prim, inputs, quantType);
@ -582,6 +585,8 @@ std::shared_ptr<PrimitiveC> PrimitiveC::Create(const Primitive &prim, const std:
return NewPrimitiveC<Reduce>(prim, inputs, quantType);
} else if (op_type == "Reshape") {
return NewPrimitiveC<Reshape>(prim, inputs, quantType);
} else if (op_type == "Rsqrt") {
return NewPrimitiveC<Rsqrt>(prim, inputs, quantType);
} else if (op_type == "Sin") {
return NewPrimitiveC<Sin>(prim, inputs, quantType);
} else if (op_type == "Slice") {
@ -739,6 +744,8 @@ std::shared_ptr<PrimitiveC> PrimitiveC::Create(const Primitive &prim, const std:
return NewPrimitiveC<Pad>(prim, inputs, quantType);
} else if (op_type == "StridedSliceGrad") {
return NewPrimitiveC<StridedSliceGrad>(prim, inputs, quantType);
} else if (op_type == "AbsGrad") {
return NewPrimitiveC<AbsGrad>(prim, inputs, quantType);
#else
} else if (op_type == "Conv2DBackpropInput") {
return NewPrimitiveC<DeConv2D>(prim, inputs, quantType);
@ -1097,6 +1104,8 @@ PrimitiveC *PrimitiveC::Create(mindspore::schema::PrimitiveT *primitive) {
return new (std::nothrow) NegGrad(primitive);
case schema::PrimitiveType_LogGrad:
return new (std::nothrow) LogGrad(primitive);
case schema::PrimitiveType_AbsGrad:
return new (std::nothrow) AbsGrad(primitive);
case schema::PrimitiveType_Sgd:
return new (std::nothrow) Sgd(primitive);
case schema::PrimitiveType_Adam:

View File

@ -23,6 +23,28 @@
namespace mindspore {
namespace lite {
#ifdef PRIMITIVE_WRITEABLE
int Rsqrt::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) {
if (this->primitive_ == nullptr) {
this->primitive_ = new (std::nothrow) schema::PrimitiveT;
if (this->primitive_ == nullptr) {
MS_LOG(ERROR) << "new primitiveT failed";
return RET_ERROR;
}
this->primitive_->value.type = schema::PrimitiveType_Rsqrt;
}
if (this->primitive_->value.type != schema::PrimitiveType_Rsqrt) {
MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type;
return RET_ERROR;
}
if (this->primitive_->value.value == nullptr) {
this->primitive_->value.value = new (std::nothrow) schema::RsqrtT();
if (this->primitive_->value.value == nullptr) {
MS_LOG(ERROR) << "new primitiveT value failed";
return RET_ERROR;
}
}
return RET_OK;
}
#else
int Rsqrt::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);

View File

@ -14,8 +14,8 @@
* limitations under the License.
*/
#ifndef LITE_MINDSPORE_LITE_C_OPS_RSQRT_H_
#define LITE_MINDSPORE_LITE_C_OPS_RSQRT_H_
#ifndef MINDSPORE_LITE_SRC_OPS_RSQRT_H_
#define MINDSPORE_LITE_SRC_OPS_RSQRT_H_
#include <vector>
#include <set>
@ -32,6 +32,7 @@ class Rsqrt : public ArithmeticSelf {
#ifdef PRIMITIVE_WRITEABLE
MS_DECLARE_PARENT(Rsqrt, ArithmeticSelf);
explicit Rsqrt(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {}
int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override;
#else
int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
#endif
@ -39,4 +40,4 @@ class Rsqrt : public ArithmeticSelf {
} // namespace lite
} // namespace mindspore
#endif // LITE_MINDSPORE_LITE_C_OPS_RSQRT_H_
#endif // MINDSPORE_LITE_SRC_OPS_RSQRT_H_

View File

@ -236,11 +236,11 @@ int StridedSliceGrad::InferShape(std::vector<lite::Tensor *> inputs, std::vector
shrink_axis_mask_.resize(ndim_);
for (size_t i = 0; i < ndim_; i++) {
begins_mask_.at(i) = static_cast<uint32_t>(GetBeginMask()) & (1 << i);
ends_mask_.at(i) = static_cast<uint32_t>(GetEndMask()) & (1 << i);
ellipsis_mask_.at(i) = static_cast<uint32_t>(GetEllipsisMask()) & (1 << i);
new_axis_mask_.at(i) = static_cast<uint32_t>(GetNewAxisMask()) & (1 << i);
shrink_axis_mask_.at(i) = static_cast<uint32_t>(GetShrinkAxisMask()) & (1 << i);
begins_mask_.at(i) = static_cast<bool>(GetBeginMask()) & (1 << i);
ends_mask_.at(i) = static_cast<bool>(GetEndMask()) & (1 << i);
ellipsis_mask_.at(i) = static_cast<bool>(GetEllipsisMask()) & (1 << i);
new_axis_mask_.at(i) = static_cast<bool>(GetNewAxisMask()) & (1 << i);
shrink_axis_mask_.at(i) = static_cast<bool>(GetShrinkAxisMask()) & (1 << i);
}
ApplyNewAxisMask();

View File

@ -56,6 +56,9 @@ void BatchnormCPUKernel::FillParam() {
for (size_t i = 0; i < n_dim - 1; i++) {
param->unit_ *= input_shapes[i];
}
if (default_momentum_ < 0.0f) {
default_momentum_ = param->momentum_;
}
}
int BatchnormCPUKernel::InitConstTensor() {
@ -94,5 +97,22 @@ int BatchNormRun(void *cdata, int task_id) {
return ret;
}
int BatchnormCPUKernel::set_momentum(float momentum) {
auto param = reinterpret_cast<BatchNormParameter *>(op_parameter_);
param->momentum_ = momentum;
return RET_OK;
}
float BatchnormCPUKernel::get_momentum() {
auto param = reinterpret_cast<BatchNormParameter *>(op_parameter_);
return param->momentum_;
}
int BatchnormCPUKernel::RestoreDefaultMomentum() {
set_momentum(default_momentum_);
return RET_OK;
}
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_BatchNorm, LiteKernelCreator<BatchnormCPUKernel>)
} // namespace mindspore::kernel

View File

@ -14,8 +14,8 @@
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_BATCHNORM_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_BATCHNORM_H_
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_BATCHNORM_FP32_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_BATCHNORM_FP32_H_
#include <vector>
#include "src/lite_kernel.h"
@ -40,15 +40,19 @@ class BatchnormCPUKernel : public LiteKernel {
int Run() override;
virtual int InitConstTensor();
virtual int DoExecute(int task_id);
virtual int set_momentum(float momentum);
virtual float get_momentum();
virtual int RestoreDefaultMomentum();
protected:
void FillParam();
void FreeMeanAndVariance();
void *mean_ = nullptr;
void *variance_ = nullptr;
float default_momentum_ = -1.0f;
};
int BatchNormRun(void *cdata, int task_id);
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_BATCHNORM_H_
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_BATCHNORM_FP32_H_

View File

@ -233,7 +233,7 @@ int Convolution1x1CPUKernel::Run() {
MS_LOG(ERROR) << "Conv1x1 Malloc pack_input_ error!";
return RET_MEMORY_FAILED;
}
if (IsTrain()) {
if (IsTrain() && is_trainable()) {
PackWeight();
}
@ -283,7 +283,9 @@ void Convolution1x1CPUKernel::PackWeight() {
int Convolution1x1CPUKernel::Eval() {
LiteKernel::Eval();
PackWeight();
if (is_trainable()) {
PackWeight();
}
return RET_OK;
}

View File

@ -55,6 +55,10 @@ class ConvolutionDelegateCPUKernel : public LiteKernel {
LiteKernel::Train();
return conv_kernel_->Train();
}
void set_trainable(bool trainable) override {
LiteKernel::set_trainable(trainable);
return conv_kernel_->set_trainable(trainable);
}
protected:
bool need_free_weight_ = false;

View File

@ -127,7 +127,7 @@ int ConvolutionDepthwise3x3CPUKernel::Run() {
return ret;
}
if (IsTrain()) {
if (IsTrain() && is_trainable()) {
PackWeight();
}
@ -160,7 +160,9 @@ void ConvolutionDepthwise3x3CPUKernel::PackWeight() {
int ConvolutionDepthwise3x3CPUKernel::Eval() {
LiteKernel::Eval();
PackWeight();
if (is_trainable()) {
PackWeight();
}
return RET_OK;
}

View File

@ -105,7 +105,7 @@ int ConvDwRun(void *cdata, int task_id) {
}
int ConvolutionDepthwiseCPUKernel::Run() {
if (IsTrain()) {
if (IsTrain() && is_trainable()) {
PackWeight();
}
@ -132,7 +132,9 @@ void ConvolutionDepthwiseCPUKernel::PackWeight() {
int ConvolutionDepthwiseCPUKernel::Eval() {
LiteKernel::Eval();
PackWeight();
if (is_trainable()) {
PackWeight();
}
return RET_OK;
}

View File

@ -190,7 +190,7 @@ int ConvolutionDepthwiseIndirectCPUKernel::Run() {
packed_input_ = input_ptr;
}
if (IsTrain()) {
if (IsTrain() && is_trainable()) {
PackWeight();
}
@ -224,7 +224,9 @@ void ConvolutionDepthwiseIndirectCPUKernel::PackWeight() {
int ConvolutionDepthwiseIndirectCPUKernel::Eval() {
LiteKernel::Eval();
PackWeight();
if (is_trainable()) {
PackWeight();
}
return RET_OK;
}

View File

@ -146,7 +146,7 @@ int ConvolutionDepthwiseSWCPUKernel::Run() {
return RET_ERROR;
}
if (IsTrain()) {
if (IsTrain() && is_trainable()) {
PackWeight();
}
@ -198,7 +198,9 @@ void ConvolutionDepthwiseSWCPUKernel::PackWeight() {
int ConvolutionDepthwiseSWCPUKernel::Eval() {
LiteKernel::Eval();
PackWeight();
if (is_trainable()) {
PackWeight();
}
return RET_OK;
}

View File

@ -150,7 +150,7 @@ int ConvolutionCPUKernel::Run() {
FreeTmpBuffer();
return RET_ERROR;
}
if (IsTrain()) {
if (IsTrain() && is_trainable()) {
PackWeight();
}
@ -190,7 +190,9 @@ void ConvolutionCPUKernel::PackWeight() {
int ConvolutionCPUKernel::Eval() {
LiteKernel::Eval();
PackWeight();
if (is_trainable()) {
PackWeight();
}
return RET_OK;
}

View File

@ -221,7 +221,7 @@ int ConvolutionWinogradCPUKernel::Run() {
FreeTmpBuffer();
return RET_ERROR;
}
if (IsTrain()) {
if (IsTrain() && is_trainable()) {
InitWeightBias();
}
@ -236,7 +236,9 @@ int ConvolutionWinogradCPUKernel::Run() {
int ConvolutionWinogradCPUKernel::Eval() {
LiteKernel::Eval();
InitWeightBias();
if (is_trainable()) {
InitWeightBias();
}
return RET_OK;
}

View File

@ -32,25 +32,8 @@ namespace mindspore::kernel {
int AdamCPUKernel::ReSize() { return RET_OK; }
int AdamCPUKernel::Execute(int task_id) {
auto weight = reinterpret_cast<float *>(in_tensors_.at(0)->MutableData());
auto m = reinterpret_cast<float *>(in_tensors_.at(1)->MutableData());
auto v = reinterpret_cast<float *>(in_tensors_.at(2)->MutableData());
auto beta1_power = reinterpret_cast<float *>(in_tensors_.at(3)->MutableData())[0];
auto beta2_power = reinterpret_cast<float *>(in_tensors_.at(4)->MutableData())[0];
auto learning_rate = reinterpret_cast<float *>(in_tensors_.at(5)->MutableData())[0];
auto beta1 = reinterpret_cast<float *>(in_tensors_.at(6)->MutableData())[0];
auto beta2 = reinterpret_cast<float *>(in_tensors_.at(7)->MutableData())[0];
auto eps = reinterpret_cast<float *>(in_tensors_.at(8)->MutableData())[0];
auto gradient = reinterpret_cast<float *>(in_tensors_.at(9)->MutableData());
size_t length = in_tensors_.at(0)->ElementsNum();
size_t stride = UP_DIV(length, thread_count_);
size_t count = MSMIN(stride, length - stride * task_id);
size_t start = stride * task_id;
size_t end = start + count;
int DoAdam(float *m, float *v, float *gradient, float *weight, float beta1, float beta2, float beta1_power,
float beta2_power, float eps, float learning_rate, bool nesterov, size_t start, size_t end) {
if ((1.f - beta1_power) <= 0.0f) {
MS_LOG(ERROR) << "divisor cannot be 0 or below";
return RET_ERROR;
@ -63,8 +46,7 @@ int AdamCPUKernel::Execute(int task_id) {
auto update_lr = learning_rate * std::sqrt(1.f - beta2_power) / (1.f - beta1_power);
const float one_minus_beta1 = 1.f - beta1;
const float one_minus_beta2 = 1.f - beta2;
if (adam_param_->use_nesterov_) { // Nadam
if (nesterov) { // Nadam
for (size_t i = start; i < end; ++i) {
m[i] += (gradient[i] - m[i]) * one_minus_beta1;
v[i] += (gradient[i] * gradient[i] - v[i]) * one_minus_beta2;
@ -80,10 +62,39 @@ int AdamCPUKernel::Execute(int task_id) {
return RET_OK;
}
int AdamCPUKernel::Execute(int task_id) {
auto weight = reinterpret_cast<float *>(in_tensors_.at(0)->MutableData());
auto m = reinterpret_cast<float *>(in_tensors_.at(1)->MutableData());
auto v = reinterpret_cast<float *>(in_tensors_.at(2)->MutableData());
auto beta1_power = reinterpret_cast<float *>(in_tensors_.at(3)->MutableData())[0];
auto beta2_power = reinterpret_cast<float *>(in_tensors_.at(4)->MutableData())[0];
auto learning_rate = lr_;
auto beta1 = reinterpret_cast<float *>(in_tensors_.at(6)->MutableData())[0];
auto beta2 = reinterpret_cast<float *>(in_tensors_.at(7)->MutableData())[0];
auto eps = reinterpret_cast<float *>(in_tensors_.at(8)->MutableData())[0];
auto gradient = reinterpret_cast<float *>(in_tensors_.at(9)->MutableData());
size_t length = in_tensors_.at(0)->ElementsNum();
size_t stride = UP_DIV(length, thread_count_);
size_t count = MSMIN(stride, length - stride * task_id);
size_t start = stride * task_id;
size_t end = start + count;
return DoAdam(m, v, gradient, weight, beta1, beta2, beta1_power, beta2_power, eps, learning_rate,
adam_param_->use_nesterov_, start, end);
}
int AdamRun(void *cdata, int task_id) {
MS_ASSERT(cdata != nullptr);
auto Adam_kernel = reinterpret_cast<AdamCPUKernel *>(cdata);
auto error_code = Adam_kernel->Execute(task_id);
auto adam_kernel = reinterpret_cast<AdamCPUKernel *>(cdata);
auto error_code = RET_OK;
if (adam_kernel->get_optimizer_mode() == OptimizerKernel::WeightUpdateMode::VIRTUAL_BATCH) {
error_code = adam_kernel->ExecuteVirtualBatch(task_id);
} else {
error_code = adam_kernel->Execute(task_id);
}
if (error_code != RET_OK) {
MS_LOG(ERROR) << "Adam run error task_id[" << task_id << "] error_code[" << error_code << "]";
return RET_ERROR;
@ -100,18 +111,38 @@ int AdamCPUKernel::Run() {
return RET_OK;
}
int AdamCPUKernel::SetLearningRate(float lr) {
auto learning_rate_tensor = reinterpret_cast<float *>(in_tensors_.at(5)->MutableData());
learning_rate_tensor[0] = lr;
int AdamCPUKernel::Init() {
auto ret = OptimizerKernel::Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Failed to initialize Adam Kernel";
return RET_ERROR;
}
return RET_OK;
}
float AdamCPUKernel::GetLearningRate() {
auto learning_rate_tensor = reinterpret_cast<float *>(in_tensors_.at(5)->MutableData());
return learning_rate_tensor[0];
}
int AdamCPUKernel::OptimizerStep() {
auto weight = reinterpret_cast<float *>(in_tensors_.at(0)->MutableData());
auto m = reinterpret_cast<float *>(in_tensors_.at(1)->MutableData());
auto v = reinterpret_cast<float *>(in_tensors_.at(2)->MutableData());
auto beta1_power = reinterpret_cast<float *>(in_tensors_.at(3)->MutableData())[0];
auto beta2_power = reinterpret_cast<float *>(in_tensors_.at(4)->MutableData())[0];
auto learning_rate = lr_;
auto beta1 = reinterpret_cast<float *>(in_tensors_.at(6)->MutableData())[0];
auto beta2 = reinterpret_cast<float *>(in_tensors_.at(7)->MutableData())[0];
auto eps = reinterpret_cast<float *>(in_tensors_.at(8)->MutableData())[0];
size_t length = in_tensors_.at(0)->ElementsNum();
int AdamCPUKernel::Init() { return RET_OK; }
int ret = RET_OK;
if (grad_sum_ != nullptr && valid_grad_sum_) {
size_t start = 0;
size_t end = length;
ret = DoAdam(m, v, grad_sum_, weight, beta1, beta2, beta1_power, beta2_power, eps, learning_rate,
adam_param_->use_nesterov_, start, end);
std::fill(grad_sum_, grad_sum_ + length, 0);
OptimizerKernel::OptimizerStep();
}
return ret;
}
kernel::LiteKernel *CpuAdamFp32KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,

View File

@ -27,16 +27,20 @@ class AdamCPUKernel : public OptimizerKernel {
explicit AdamCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: OptimizerKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) {
: OptimizerKernel(parameter, inputs, outputs, ctx, primitive, 5, 9), thread_count_(ctx->thread_num_) {
adam_param_ = reinterpret_cast<AdamParameter *>(parameter);
}
~AdamCPUKernel() override {}
~AdamCPUKernel() override {
if (grad_sum_ != nullptr) {
context_->allocator->Free(grad_sum_);
grad_sum_ = nullptr;
}
}
int Init() override;
int ReSize() override;
int Run() override;
int SetLearningRate(float lr) override;
float GetLearningRate() override;
int Execute(int task_id);
int OptimizerStep() override;
private:
int thread_count_;

View File

@ -30,20 +30,9 @@ using mindspore::schema::PrimitiveType_ApplyMomentum;
namespace mindspore::kernel {
int ApplyMomentumCPUKernel::ReSize() { return RET_OK; }
int ApplyMomentumCPUKernel::Execute(int task_id) {
auto weight = reinterpret_cast<float *>(in_tensors_.at(0)->MutableData());
auto accumulate = reinterpret_cast<float *>(in_tensors_.at(1)->MutableData());
float learning_rate = reinterpret_cast<float *>(in_tensors_.at(2)->MutableData())[0];
auto gradient = reinterpret_cast<float *>(in_tensors_.at(3)->MutableData());
float moment = reinterpret_cast<float *>(in_tensors_.at(4)->MutableData())[0];
size_t length = in_tensors_.at(0)->ElementsNum();
size_t stride = UP_DIV(length, thread_count_);
size_t count = MSMIN(stride, length - stride * task_id);
size_t start = stride * task_id;
size_t end = start + count;
if (apply_momentum_param_->use_nesterov_) {
int DoApplyMomentum(float *weight, float *accumulate, float learning_rate, float *gradient, float moment, bool nesterov,
size_t start, size_t end) {
if (nesterov) {
for (size_t i = start; i < end; i++) {
accumulate[i] = accumulate[i] * moment + gradient[i];
weight[i] -= (accumulate[i] * moment + gradient[i]) * learning_rate;
@ -57,10 +46,33 @@ int ApplyMomentumCPUKernel::Execute(int task_id) {
return RET_OK;
}
int ApplyMomentumCPUKernel::Execute(int task_id) {
auto weight = reinterpret_cast<float *>(in_tensors_.at(0)->MutableData());
auto accumulate = reinterpret_cast<float *>(in_tensors_.at(1)->MutableData());
float learning_rate = lr_;
auto gradient = reinterpret_cast<float *>(in_tensors_.at(3)->MutableData());
float moment = reinterpret_cast<float *>(in_tensors_.at(4)->MutableData())[0];
size_t length = in_tensors_.at(0)->ElementsNum();
size_t stride = UP_DIV(length, thread_count_);
size_t count = MSMIN(stride, length - stride * task_id);
size_t start = stride * task_id;
size_t end = start + count;
DoApplyMomentum(weight, accumulate, learning_rate, gradient, moment, apply_momentum_param_->use_nesterov_, start,
end);
return RET_OK;
}
int ApplyMomentumRun(void *cdata, int task_id) {
MS_ASSERT(cdata != nullptr);
auto applyMomentum_kernel = reinterpret_cast<ApplyMomentumCPUKernel *>(cdata);
auto error_code = applyMomentum_kernel->Execute(task_id);
auto error_code = RET_OK;
if (applyMomentum_kernel->get_optimizer_mode() == OptimizerKernel::WeightUpdateMode::VIRTUAL_BATCH) {
error_code = applyMomentum_kernel->ExecuteVirtualBatch(task_id);
} else {
error_code = applyMomentum_kernel->Execute(task_id);
}
if (error_code != RET_OK) {
MS_LOG(ERROR) << "apply Momentum run error task_id[" << task_id << "] error_code[" << error_code << "]";
return RET_ERROR;
@ -77,17 +89,31 @@ int ApplyMomentumCPUKernel::Run() {
return RET_OK;
}
int ApplyMomentumCPUKernel::Init() { return RET_OK; }
int ApplyMomentumCPUKernel::SetLearningRate(float lr) {
auto learning_rate_tensor = reinterpret_cast<float *>(in_tensors_.at(2)->MutableData());
learning_rate_tensor[0] = lr;
int ApplyMomentumCPUKernel::Init() {
auto ret = OptimizerKernel::Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Failed to initialize Apply Momentum Kernel";
return RET_ERROR;
}
return RET_OK;
}
float ApplyMomentumCPUKernel::GetLearningRate() {
auto learning_rate_tensor = reinterpret_cast<float *>(in_tensors_.at(2)->MutableData());
return learning_rate_tensor[0];
int ApplyMomentumCPUKernel::OptimizerStep() {
auto weight = reinterpret_cast<float *>(in_tensors_.at(0)->MutableData());
auto accumulate = reinterpret_cast<float *>(in_tensors_.at(1)->MutableData());
float learning_rate = lr_;
float moment = reinterpret_cast<float *>(in_tensors_.at(4)->MutableData())[0];
size_t length = in_tensors_.at(0)->ElementsNum();
if (grad_sum_ != nullptr && valid_grad_sum_) {
size_t start = 0;
size_t end = length;
DoApplyMomentum(weight, accumulate, learning_rate, grad_sum_, moment, apply_momentum_param_->use_nesterov_, start,
end);
std::fill(grad_sum_, grad_sum_ + length, 0);
OptimizerKernel::OptimizerStep();
}
return RET_OK;
}
kernel::LiteKernel *CpuApplyMomentumFp32KernelCreator(const std::vector<lite::Tensor *> &inputs,

View File

@ -27,23 +27,28 @@ class ApplyMomentumCPUKernel : public OptimizerKernel {
explicit ApplyMomentumCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: OptimizerKernel(parameter, inputs, outputs, ctx, primitive),
: OptimizerKernel(parameter, inputs, outputs, ctx, primitive, 2, 3),
thread_count_(ctx->thread_num_),
apply_momentum_param_(nullptr) {
apply_momentum_param_ = reinterpret_cast<ApplyMomentumParameter *>(parameter);
}
~ApplyMomentumCPUKernel() override {}
~ApplyMomentumCPUKernel() override {
if (grad_sum_ != nullptr) {
context_->allocator->Free(grad_sum_);
grad_sum_ = nullptr;
}
}
int Init() override;
int ReSize() override;
int Run() override;
int Execute(int task_id);
int SetLearningRate(float lr) override;
float GetLearningRate() override;
int Run() override;
int OptimizerStep() override;
private:
int thread_count_;
ApplyMomentumParameter *apply_momentum_param_;
};
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_GRAD_APPLY_MOMENTUM_H_

View File

@ -20,11 +20,13 @@
#include "include/errorcode.h"
#include "src/runtime/runtime_api.h"
#include "nnacl/fp32/arithmetic_fp32.h"
#include "nnacl/fp32_grad/arithmetic_grad.h"
using mindspore::kernel::KERNEL_ARCH::kCPU;
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_AbsGrad;
using mindspore::schema::PrimitiveType_LogGrad;
namespace mindspore::kernel {
@ -42,6 +44,9 @@ int ArithmeticSelfGradCPUKernel::Init() {
case PrimitiveType_LogGrad:
self_grad_operation_ = ElementDiv;
break;
case PrimitiveType_AbsGrad:
self_grad_operation_ = ElementAbsGrad;
break;
default:
MS_LOG(ERROR) << "Unsupported type: " << type;
return RET_ERROR;
@ -102,4 +107,5 @@ kernel::LiteKernel *CpuArithmeticSelfGradFp32KernelCreator(const std::vector<lit
}
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_LogGrad, CpuArithmeticSelfGradFp32KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_AbsGrad, CpuArithmeticSelfGradFp32KernelCreator)
} // namespace mindspore::kernel

View File

@ -32,10 +32,68 @@ namespace mindspore::kernel {
int SgdCPUKernel::ReSize() { return RET_OK; }
int DoSgd(float *weight, float *accumulate, float *gradient, float learning_rate, float dampening, float moment,
bool nesterov, size_t start, size_t end) {
if (moment > 0.f) {
if (nesterov) {
for (size_t i = start; i < end; ++i) {
accumulate[i] = accumulate[i] * moment + gradient[i] * (1.f - dampening);
weight[i] -= (accumulate[i] * moment + gradient[i]) * learning_rate;
}
} else {
for (size_t i = start; i < end; ++i) {
accumulate[i] = accumulate[i] * moment + gradient[i] * (1.f - dampening);
weight[i] -= accumulate[i] * learning_rate;
}
}
} else {
for (size_t i = start; i < end; ++i) {
weight[i] -= gradient[i] * learning_rate;
}
}
return RET_OK;
}
int DoSgdInit(float *weight, float *accumulate, float *gradient, float *stat, float learning_rate, float dampening,
float moment, bool nesterov, size_t start, size_t end) {
std::copy(&(gradient[start]), &(gradient[end]), &(accumulate[start]));
if (nesterov) {
for (size_t i = start; i < end; ++i) {
weight[i] -= (accumulate[i] * moment + gradient[i]) * learning_rate;
}
} else {
for (size_t i = start; i < end; ++i) {
weight[i] -= accumulate[i] * learning_rate;
}
}
*stat = 1.0f;
return RET_OK;
}
int SgdCPUKernel::Execute(int task_id) {
auto weight = reinterpret_cast<float *>(in_tensors_.at(0)->MutableData());
auto accumulate = reinterpret_cast<float *>(in_tensors_.at(3)->MutableData());
float learning_rate = reinterpret_cast<float *>(in_tensors_.at(2)->MutableData())[0];
float learning_rate = lr_;
auto gradient = reinterpret_cast<float *>(in_tensors_.at(1)->MutableData());
float moment = reinterpret_cast<float *>(in_tensors_.at(4)->MutableData())[0];
size_t length = in_tensors_.at(0)->ElementsNum();
size_t stride = UP_DIV(length, thread_count_);
size_t count = MSMIN(stride, length - stride * task_id);
size_t start = stride * task_id;
size_t end = start + count;
DoSgd(weight, accumulate, gradient, learning_rate, sgd_param_->dampening_, moment, sgd_param_->use_nesterov_, start,
end);
return RET_OK;
}
int SgdCPUKernel::ExecuteInit(int task_id) {
auto weight = reinterpret_cast<float *>(in_tensors_.at(0)->MutableData());
auto accumulate = reinterpret_cast<float *>(in_tensors_.at(3)->MutableData());
float learning_rate = lr_;
auto gradient = reinterpret_cast<float *>(in_tensors_.at(1)->MutableData());
float moment = reinterpret_cast<float *>(in_tensors_.at(4)->MutableData())[0];
auto stat = reinterpret_cast<float *>(in_tensors_.at(5)->MutableData());
@ -47,43 +105,34 @@ int SgdCPUKernel::Execute(int task_id) {
size_t start = stride * task_id;
size_t end = start + count;
if (stat[task_id] > 0) {
stat[task_id] = 0; // Haim Please approve this
std::copy(&(gradient[start]), &(gradient[end]), &(accumulate[start]));
if (sgd_param_->use_nesterov_) {
for (size_t i = start; i < end; ++i) {
weight[i] -= (accumulate[i] * moment + gradient[i]) * learning_rate;
}
} else {
for (size_t i = start; i < end; ++i) {
weight[i] -= accumulate[i] * learning_rate;
}
}
} else {
if (moment > 0.f) {
if (sgd_param_->use_nesterov_) {
for (size_t i = start; i < end; ++i) {
accumulate[i] = accumulate[i] * moment + gradient[i] * (1.f - sgd_param_->dampening_);
weight[i] -= (accumulate[i] * moment + gradient[i]) * learning_rate;
}
} else {
for (size_t i = start; i < end; ++i) {
accumulate[i] = accumulate[i] * moment + gradient[i] * (1.f - sgd_param_->dampening_);
weight[i] -= accumulate[i] * learning_rate;
}
}
} else {
for (size_t i = start; i < end; ++i) {
weight[i] -= gradient[i] * learning_rate;
}
}
}
DoSgdInit(weight, accumulate, gradient, stat, learning_rate, sgd_param_->dampening_, moment,
sgd_param_->use_nesterov_, start, end);
return RET_OK;
}
int SgdRun(void *cdata, int task_id) {
auto Sgd_kernel = reinterpret_cast<SgdCPUKernel *>(cdata);
auto error_code = Sgd_kernel->Execute(task_id);
auto sgd_kernel = reinterpret_cast<SgdCPUKernel *>(cdata);
auto error_code = RET_OK;
if (sgd_kernel->get_optimizer_mode() == OptimizerKernel::WeightUpdateMode::VIRTUAL_BATCH) {
error_code = sgd_kernel->ExecuteVirtualBatch(task_id);
} else {
error_code = sgd_kernel->Execute(task_id);
}
if (error_code != RET_OK) {
MS_LOG(ERROR) << "SGD run error task_id[" << task_id << "] error_code[" << error_code << "]";
return RET_ERROR;
}
return RET_OK;
}
int SgdRunInit(void *cdata, int task_id) {
auto sgd_kernel = reinterpret_cast<SgdCPUKernel *>(cdata);
auto error_code = RET_OK;
if (sgd_kernel->get_optimizer_mode() == OptimizerKernel::WeightUpdateMode::VIRTUAL_BATCH) {
error_code = sgd_kernel->ExecuteVirtualBatch(task_id);
} else {
error_code = sgd_kernel->ExecuteInit(task_id);
}
if (error_code != RET_OK) {
MS_LOG(ERROR) << "SGD run error task_id[" << task_id << "] error_code[" << error_code << "]";
return RET_ERROR;
@ -92,7 +141,13 @@ int SgdRun(void *cdata, int task_id) {
}
int SgdCPUKernel::Run() {
int error_code = ParallelLaunch(this->context_->thread_pool_, SgdRun, this, thread_count_);
auto stat = reinterpret_cast<float *>(in_tensors_.at(5)->MutableData());
auto error_code = RET_OK;
if (*stat > 0.0f) {
error_code = ParallelLaunch(this->context_->thread_pool_, SgdRunInit, this, thread_count_);
} else {
error_code = ParallelLaunch(this->context_->thread_pool_, SgdRun, this, thread_count_);
}
if (error_code != RET_OK) {
MS_LOG(ERROR) << "SGD function error error_code[" << error_code << "]";
return RET_ERROR;
@ -101,13 +156,6 @@ int SgdCPUKernel::Run() {
}
int SgdCPUKernel::Init() {
// Only for test with uninitialized Data
size_t elem_num = in_tensors_.at(0)->ElementsNum();
auto accumulate = reinterpret_cast<float *>(in_tensors_.at(3)->MutableData());
for (size_t i = 0; i < elem_num; i++) {
accumulate[i] = 0.0;
}
if (sgd_param_->dampening_ < 0.0f) {
MS_LOG(ERROR) << "dampening should be at least 0.0";
return RET_ERROR;
@ -117,19 +165,37 @@ int SgdCPUKernel::Init() {
MS_LOG(ERROR) << "If use nesterov, dampening must equal to 0.0";
return RET_ERROR;
}
auto ret = OptimizerKernel::Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Failed to initialize Sgd Kernel";
return RET_ERROR;
}
return RET_OK;
}
int SgdCPUKernel::SetLearningRate(float lr) {
auto learning_rate_tensor = reinterpret_cast<float *>(in_tensors_.at(2)->MutableData());
learning_rate_tensor[0] = lr;
return RET_OK;
}
int SgdCPUKernel::OptimizerStep() {
auto weight = reinterpret_cast<float *>(in_tensors_.at(0)->MutableData());
auto accumulate = reinterpret_cast<float *>(in_tensors_.at(3)->MutableData());
float learning_rate = lr_;
auto stat = reinterpret_cast<float *>(in_tensors_.at(5)->MutableData());
float moment = reinterpret_cast<float *>(in_tensors_.at(4)->MutableData())[0];
size_t length = in_tensors_.at(0)->ElementsNum();
float SgdCPUKernel::GetLearningRate() {
auto learning_rate_tensor = reinterpret_cast<float *>(in_tensors_.at(2)->MutableData());
return learning_rate_tensor[0];
if (grad_sum_ != nullptr && valid_grad_sum_) {
size_t start = 0;
size_t end = length;
if (*stat > 0) {
DoSgd(weight, accumulate, grad_sum_, learning_rate, sgd_param_->dampening_, moment, sgd_param_->use_nesterov_,
start, end);
} else {
DoSgdInit(weight, accumulate, grad_sum_, stat, learning_rate, sgd_param_->dampening_, moment,
sgd_param_->use_nesterov_, start, end);
}
std::fill(grad_sum_, grad_sum_ + length, 0);
OptimizerKernel::OptimizerStep();
}
return RET_OK;
}
kernel::LiteKernel *CpuSgdFp32KernelCreator(const std::vector<lite::Tensor *> &inputs,

View File

@ -27,18 +27,23 @@ class SgdCPUKernel : public OptimizerKernel {
explicit SgdCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: OptimizerKernel(parameter, inputs, outputs, ctx, primitive),
: OptimizerKernel(parameter, inputs, outputs, ctx, primitive, 2, 1),
thread_count_(ctx->thread_num_),
sgd_param_(nullptr) {
sgd_param_ = reinterpret_cast<SgdParameter *>(parameter);
}
~SgdCPUKernel() override {}
~SgdCPUKernel() override {
if (grad_sum_ != nullptr) {
context_->allocator->Free(grad_sum_);
grad_sum_ = nullptr;
}
}
int Init() override;
int ReSize() override;
int Run() override;
int ExecuteInit(int task_id);
int Execute(int task_id);
int SetLearningRate(float lr) override;
float GetLearningRate() override;
int OptimizerStep() override;
private:
int thread_count_;

View File

@ -136,9 +136,13 @@ int StridedSliceGradCPUKernel::Execute(int task_id) {
auto input = in_tensors_.at(0);
auto output = out_tensors_.at(0);
MS_ASSERT(output);
int *po = output_shape_.data();
auto ret = DoStridedSliceGrad(reinterpret_cast<float *>(input->MutableData()),
reinterpret_cast<float *>(output->MutableData()), po, param_);
auto dx = reinterpret_cast<float *>(output->MutableData());
auto dy = reinterpret_cast<float *>(input->MutableData());
std::fill(dx, dx + output->ElementsNum(), 0.f);
auto ret = DoStridedSliceGrad(dy, dx, po, param_);
if (ret != RET_OK) {
MS_LOG(ERROR) << "StridedSliceGrad error error_code[" << ret << "]";
return RET_ERROR;

View File

@ -0,0 +1,71 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "include/train/accuracy_metrics.h"
#include "include/errorcode.h"
#include "src/common/utils.h"
#include "src/tensor.h"
#include "src/train/train_utils.h"
namespace mindspore {
namespace lite {
AccuracyMetrics::AccuracyMetrics(int accuracy_metrics, const std::vector<int> &input_indexes,
const std::vector<int> &output_indexes)
: Metrics() {
if (input_indexes.size() == output_indexes.size()) {
input_indexes_ = input_indexes;
output_indexes_ = output_indexes;
} else {
MS_LOG(WARNING) << "input to output mapping vectors sizes do not match";
}
if (accuracy_metrics != METRICS_CLASSIFICATION) {
MS_LOG(WARNING) << "Only classification metrics is supported";
} else {
accuracy_metrics_ = accuracy_metrics;
}
}
void AccuracyMetrics::Update(std::vector<tensor::MSTensor *> inputs, std::vector<tensor::MSTensor *> outputs) {
for (unsigned int i = 0; i < input_indexes_.size(); i++) {
if ((inputs.size() <= static_cast<unsigned int>(input_indexes_[i])) ||
(outputs.size() <= static_cast<unsigned int>(output_indexes_[i]))) {
MS_LOG(WARNING) << "indices " << input_indexes_[i] << "/" << output_indexes_[i]
<< " is outside of input/output range";
return;
}
float accuracy = 0.0;
if (inputs.at(input_indexes_[i])->data_type() == kNumberTypeInt32) {
accuracy = CalculateSparseClassification(inputs.at(input_indexes_[i]), outputs.at(output_indexes_[i]));
} else {
accuracy = CalculateOneHotClassification(inputs.at(input_indexes_[i]), outputs.at(output_indexes_[i]));
}
total_accuracy_ += accuracy;
total_steps_ += 1.0;
}
}
float AccuracyMetrics::Eval() {
if (total_steps_ == 0.0) {
MS_LOG(WARNING) << "Accuary can not be calculated, because the number of samples is 0.";
return 0.0;
}
return (total_accuracy_ / total_steps_);
}
} // namespace lite
} // namespace mindspore

View File

@ -0,0 +1,45 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "include/train/accuracy_monitor.h"
#include <sys/stat.h>
#include <algorithm>
#include <utility>
#include <vector>
#include <iostream>
#include <fstream>
#include <memory>
#include "include/errorcode.h"
#include "include/train/train_loop.h"
#include "src/common/utils.h"
#include "src/tensor.h"
namespace mindspore {
namespace lite {
void AccuracyMonitor::Begin(const session::TrainLoopCallBackData &cb_data) {
if (cb_data.epoch_ == 0) accuracies_.clear();
}
int AccuracyMonitor::EpochEnd(const session::TrainLoopCallBackData &cb_data) {
if ((cb_data.epoch_ + 1) % check_every_n_ == 0) cb_data.loop_->Eval(ds_, {}, nullptr, max_steps_);
accuracies_.push_back(std::make_pair(cb_data.epoch_, 0.0));
return mindspore::session::RET_CONTINUE;
}
} // namespace lite
} // namespace mindspore

View File

@ -16,28 +16,32 @@
#include "include/train/classification_train_accuracy_monitor.h"
#include <sys/stat.h>
#include <algorithm>
#include <utility>
#include <vector>
#include <iostream>
#include <fstream>
#include <memory>
#include "include/errorcode.h"
#include "include/train_session.h"
#include "src/common/utils.h"
#include "src/tensor.h"
#include "src/train/loss_kernel.h"
#include "src/train/optimizer_kernel.h"
#include "src/sub_graph_kernel.h"
#include "src/train/train_populate_parameter.h"
#include "src/runtime/runtime_api.h"
#include "src/executor.h"
#include "src/kernel_registry.h"
#include "src/runtime/kernel/arm/fp32_grad/convolution.h"
#include "src/train/train_utils.h"
namespace mindspore {
namespace lite {
ClassificationTrainAccuracyMonitor::ClassificationTrainAccuracyMonitor(int print_every_n, int accuracy_metrics,
const std::vector<int> &input_indexes,
const std::vector<int> &output_indexes) {
if (input_indexes.size() == output_indexes.size()) {
input_indexes_ = input_indexes;
output_indexes_ = output_indexes;
} else {
MS_LOG(WARNING) << "input to output mapping vectors sizes do not match";
}
if (accuracy_metrics != METRICS_CLASSIFICATION) {
MS_LOG(WARNING) << "Only classification metrics is supported";
} else {
accuracy_metrics_ = accuracy_metrics;
}
print_every_n_ = print_every_n;
}
void ClassificationTrainAccuracyMonitor::Begin(const session::TrainLoopCallBackData &cb_data) {
if (cb_data.epoch_ == 0) accuracies_.clear();
}
@ -51,9 +55,10 @@ void ClassificationTrainAccuracyMonitor::EpochBegin(const session::TrainLoopCall
}
int ClassificationTrainAccuracyMonitor::EpochEnd(const session::TrainLoopCallBackData &cb_data) {
if (cb_data.step_ > 0) accuracies_.at(cb_data.epoch_).second /= static_cast<float>(cb_data.step_);
if (cb_data.step_ > 0) accuracies_.at(cb_data.epoch_).second /= static_cast<float>(cb_data.step_ + 1);
if ((cb_data.epoch_ + 1) % print_every_n_ == 0) {
std::cout << cb_data.epoch_ + 1 << ":\tTraining Accuracy is " << accuracies_.at(cb_data.epoch_).second << std::endl;
std::cout << "Epoch (" << cb_data.epoch_ + 1 << "):\tTraining Accuracy is " << accuracies_.at(cb_data.epoch_).second
<< std::endl;
}
return mindspore::session::RET_CONTINUE;
}
@ -61,37 +66,22 @@ int ClassificationTrainAccuracyMonitor::EpochEnd(const session::TrainLoopCallBac
void ClassificationTrainAccuracyMonitor::StepEnd(const session::TrainLoopCallBackData &cb_data) {
auto inputs = cb_data.session_->GetInputs();
auto outputs = cb_data.session_->GetPredictions();
auto labels = reinterpret_cast<float *>(inputs.at(1)->MutableData());
for (auto it = outputs.begin(); it != outputs.end(); ++it) {
if (it->second->ElementsNum() == inputs.at(1)->ElementsNum()) {
int batch_size = inputs.at(1)->shape().at(0);
int num_of_classes = inputs.at(1)->shape().at(1);
auto predictions = reinterpret_cast<float *>(it->second->MutableData());
float accuracy = 0.0;
for (int b = 0; b < batch_size; b++) {
int label = 0;
int max_idx = 0;
float max_label_score = labels[num_of_classes * b];
float max_score = predictions[num_of_classes * b];
for (int c = 1; c < num_of_classes; c++) {
if (predictions[num_of_classes * b + c] > max_score) {
max_score = predictions[num_of_classes * b + c];
max_idx = c;
}
if (labels[num_of_classes * b + c] > max_label_score) {
max_label_score = labels[num_of_classes * b + c];
label = c;
}
}
if (label == max_idx) accuracy += 1.0;
}
accuracy /= static_cast<float>(batch_size);
accuracies_.at(cb_data.epoch_).second = accuracy;
float accuracy = 0.0;
for (unsigned int i = 0; i < input_indexes_.size(); i++) {
if ((inputs.size() <= static_cast<unsigned int>(input_indexes_[i])) ||
(outputs.size() <= static_cast<unsigned int>(output_indexes_[i]))) {
MS_LOG(WARNING) << "indices " << input_indexes_[i] << "/" << output_indexes_[i]
<< " is outside of input/output range";
return;
}
if (inputs.at(input_indexes_[i])->data_type() == kNumberTypeInt32) {
accuracy += CalculateSparseClassification(inputs.at(input_indexes_[i]), outputs.at(output_indexes_[i]));
} else {
accuracy += CalculateOneHotClassification(inputs.at(input_indexes_[i]), outputs.at(output_indexes_[i]));
}
}
MS_LOG(WARNING) << "Model does not have a loss output tensor of size 1";
accuracies_.at(cb_data.epoch_).second += accuracy;
}
} // namespace lite

View File

@ -20,9 +20,6 @@
#include <utility>
#include <vector>
#include <iostream>
#include <fstream>
#include <memory>
#include "include/errorcode.h"
#include "include/train_session.h"
#include "src/common/utils.h"
#include "src/tensor.h"
@ -43,9 +40,9 @@ void LossMonitor::EpochBegin(const session::TrainLoopCallBackData &cb_data) {
}
int LossMonitor::EpochEnd(const session::TrainLoopCallBackData &cb_data) {
if (cb_data.step_ > 0) losses_.at(cb_data.epoch_).second /= static_cast<float>(cb_data.step_);
if ((cb_data.epoch_ + 1) % print_every_n_ == 0) {
std::cout << cb_data.epoch_ + 1 << ":\tLoss is " << losses_.at(cb_data.epoch_).second << std::endl;
if (cb_data.step_ > 0) losses_.at(cb_data.epoch_).second /= static_cast<float>(cb_data.step_ + 1);
if (print_every_n_ > 0) {
std::cout << "Epoch (" << cb_data.epoch_ + 1 << "):\tLoss is " << losses_.at(cb_data.epoch_).second << std::endl;
}
return mindspore::session::RET_CONTINUE;
}
@ -56,6 +53,8 @@ void LossMonitor::StepEnd(const session::TrainLoopCallBackData &cb_data) {
if (it->second->ElementsNum() == 1) {
auto loss = reinterpret_cast<float *>(it->second->MutableData());
losses_.at(cb_data.epoch_).second += loss[0];
if ((cb_data.step_ + 1) % print_every_n_ == 0)
std::cout << cb_data.epoch_ + 1 << "." << cb_data.step_ + 1 << ":\tLoss is " << loss[0] << std::endl;
return;
}
}

View File

@ -17,6 +17,10 @@
#define MINDSPORE_LITE_SRC_TRAIN_OPTIMIZER_KERNEL_H_
#include <vector>
#include "src/lite_kernel.h"
#include "include/errorcode.h"
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;
namespace mindspore::kernel {
class OptimizerKernel : public LiteKernel {
@ -24,11 +28,88 @@ class OptimizerKernel : public LiteKernel {
OptimizerKernel() = default;
OptimizerKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
const lite::PrimitiveC *primitive)
: LiteKernel(parameter, inputs, outputs, ctx, primitive) {}
const lite::PrimitiveC *primitive, int lr_idx, int grad_idx)
: LiteKernel(parameter, inputs, outputs, ctx, primitive), lr_idx_(lr_idx), grad_idx_(grad_idx) {}
~OptimizerKernel() = default;
virtual int SetLearningRate(float lr) = 0;
virtual float GetLearningRate() = 0;
enum class WeightUpdateMode { NORMAL, VIRTUAL_BATCH };
WeightUpdateMode get_optimizer_mode() { return weightUpdateMod_; }
int Init() override {
default_lr_ = reinterpret_cast<float *>(in_tensors_.at(lr_idx_)->MutableData())[0];
lr_ = default_lr_;
return RET_OK;
}
int SetLearningRate(float lr) {
lr_ = lr;
return RET_OK;
}
float GetLearningRate() { return lr_; }
int RestoreDefaultLearningRate() {
SetLearningRate(default_lr_);
return RET_OK;
}
int SetOptimizerMode(WeightUpdateMode mod) {
if (mod == WeightUpdateMode::VIRTUAL_BATCH) {
if (grad_sum_ != nullptr) {
context_->allocator->Free(grad_sum_);
grad_sum_ = nullptr;
}
size_t size = in_tensors_.at(grad_idx_)->Size();
size_t elem_num = in_tensors_.at(grad_idx_)->ElementsNum();
grad_sum_ = reinterpret_cast<float *>(context_->allocator->Malloc(size));
if (grad_sum_ == nullptr) {
MS_LOG(ERROR) << "failed to malloc grad sum tensor, size=" << size;
return RET_ERROR;
}
valid_grad_sum_ = false;
std::fill(grad_sum_, grad_sum_ + elem_num, 0);
} else {
if (grad_sum_ != nullptr) {
context_->allocator->Free(grad_sum_);
grad_sum_ = nullptr;
}
}
return RET_OK;
}
int ExecuteVirtualBatch(int task_id) {
auto gradient = reinterpret_cast<float *>(in_tensors_.at(grad_idx_)->MutableData());
size_t length = in_tensors_.at(grad_idx_)->ElementsNum();
size_t stride = UP_DIV(length, context_->thread_num_);
size_t count = MSMIN(stride, length - stride * task_id);
size_t start = stride * task_id;
size_t end = start + count;
for (size_t i = start; i < end; ++i) {
grad_sum_[i] += gradient[i];
}
valid_grad_sum_ = true;
return RET_OK;
}
virtual int OptimizerStep() {
valid_grad_sum_ = false;
return RET_OK;
}
int Eval() override { return OptimizerStep(); }
protected:
float default_lr_ = 0.0f;
float lr_ = 0.0f;
int lr_idx_ = 0;
int grad_idx_ = 0;
float *grad_sum_ = nullptr;
bool valid_grad_sum_ = false;
private:
WeightUpdateMode weightUpdateMod_ = WeightUpdateMode::NORMAL;
};
} // namespace mindspore::kernel

View File

@ -16,28 +16,19 @@
#include "src/train/train_loop.h"
#include <sys/stat.h>
#include <algorithm>
#include <utility>
#include <vector>
#include <iostream>
#include <fstream>
#include <memory>
#include <algorithm>
#include "include/errorcode.h"
#include "include/train_session.h"
#include "src/common/utils.h"
#include "src/tensor.h"
#include "src/train/loss_kernel.h"
#include "src/train/optimizer_kernel.h"
#include "src/sub_graph_kernel.h"
#include "src/train/train_populate_parameter.h"
#include "src/runtime/runtime_api.h"
#include "src/executor.h"
#include "src/kernel_registry.h"
#include "src/runtime/kernel/arm/fp32_grad/convolution.h"
#include "include/iterator.h"
namespace mindspore {
namespace lite {
using dataset::Dataset;
using dataset::Iterator;
using dataset::MSTensorVec;
using session::RET_CONTINUE;
using session::RET_EXIT;
using session::RET_STOP_TRAINING;
@ -46,24 +37,35 @@ TrainLoop::~TrainLoop() {
if (train_session_ != nullptr) delete train_session_;
}
int TrainLoop::Train(int epochs, std::vector<session::TrainLoopCallBack *> cbs) {
int TrainLoop::Train(int epochs, Dataset *ds, std::vector<session::TrainLoopCallBack *> cbs, LoadDataFunc load_func) {
train_session_->Train();
session::TrainLoopCallBackData cb_data(true, epoch_, train_session_, this);
if (load_func == nullptr) load_func = TrainLoop::LoadData;
for (auto cb : cbs) cb->Begin(cb_data);
int steps_in_epoch = 1; // should be data_size/batch_size
for (int i = 0; i < epochs; i++) {
cb_data.epoch_ = epoch_++;
for (auto cb : cbs) cb->EpochBegin(cb_data);
for (int s = 0; s < steps_in_epoch; s++) {
cb_data.step_ = s;
std::shared_ptr<Iterator> iter = ds->CreateIterator();
MSTensorVec row_vec;
int s = 0;
iter->GetNextRow(&row_vec);
while (row_vec.size() != 0) {
auto ret = load_func(cb_data.session_->GetInputs(), &row_vec);
if (ret != RET_OK) break;
cb_data.step_ = s++;
for (auto cb : cbs) cb->StepBegin(cb_data);
train_session_->RunGraph(before_cb_, after_cb_);
for (auto cb : cbs) cb->StepEnd(cb_data);
iter->GetNextRow(&row_vec);
}
iter->Stop();
int break_loop = false;
for (auto cb : cbs) {
int ret = cb->EpochEnd(cb_data);
@ -86,6 +88,83 @@ int TrainLoop::Train(int epochs, std::vector<session::TrainLoopCallBack *> cbs)
return RET_OK;
}
int TrainLoop::Eval(Dataset *ds, std::vector<session::TrainLoopCallBack *> cbs, LoadDataFunc load_func, int max_steps) {
train_session_->Eval();
session::TrainLoopCallBackData cb_data(false, epoch_, train_session_, this);
if (load_func == nullptr) load_func = TrainLoop::LoadData;
for (auto metric : metrics_) metric->Clear();
for (auto cb : cbs) cb->Begin(cb_data);
for (auto cb : cbs) cb->EpochBegin(cb_data);
std::shared_ptr<Iterator> iter = ds->CreateIterator();
MSTensorVec row_vec;
int s = 0;
iter->GetNextRow(&row_vec);
while (row_vec.size() != 0) {
if (s >= max_steps) break;
auto ret = load_func(cb_data.session_->GetInputs(), &row_vec);
if (ret != RET_OK) break;
cb_data.step_ = ++s;
for (auto cb : cbs) cb->StepBegin(cb_data);
train_session_->RunGraph(before_cb_, after_cb_);
for (auto cb : cbs) cb->StepEnd(cb_data);
auto outputs = cb_data.session_->GetPredictions();
for (auto metric : metrics_) metric->Update(cb_data.session_->GetInputs(), outputs);
iter->GetNextRow(&row_vec);
}
iter->Stop();
for (auto cb : cbs) cb->EpochEnd(cb_data);
for (auto cb : cbs) cb->End(cb_data);
return RET_OK;
}
int TrainLoop::LoadData(std::vector<tensor::MSTensor *> inputs, dataset::MSTensorVec *row_vec) {
auto num_of_inputs = inputs.size();
if ((num_of_inputs == 0) || (row_vec == nullptr) || (num_of_inputs != row_vec->size())) {
return RET_STOP_TRAINING;
}
for (unsigned int i = 0; i < num_of_inputs; i++) {
unsigned char *input_data = reinterpret_cast<unsigned char *>(inputs.at(i)->MutableData());
const unsigned char *row_data = reinterpret_cast<const unsigned char *>(row_vec->at(i).MutableData());
auto data_size = row_vec->at(i).DataSize();
if (data_size != inputs.at(i)->Size()) {
MS_LOG(WARNING) << "Model Input tensor " << i << " size (" << inputs.at(i)->Size()
<< ") does not match dataset size (" << data_size << ")\n";
return RET_STOP_TRAINING;
}
std::copy(row_data, row_data + data_size, input_data);
}
return RET_OK;
}
int TrainLoop::LoadPartialData(std::vector<tensor::MSTensor *> inputs, dataset::MSTensorVec *row_vec) {
auto num_of_inputs = inputs.size();
if ((num_of_inputs == 0) || (row_vec == nullptr) || (num_of_inputs < row_vec->size())) {
return RET_STOP_TRAINING;
}
for (unsigned int i = 0; i < row_vec->size(); i++) {
unsigned char *input_data = reinterpret_cast<unsigned char *>(inputs.at(i)->MutableData());
const unsigned char *row_data = reinterpret_cast<const unsigned char *>(row_vec->at(i).MutableData());
auto data_size = row_vec->at(i).DataSize();
if (data_size != inputs.at(i)->Size()) {
MS_LOG(WARNING) << "Model Input tensor " << i << " size (" << inputs.at(i)->Size()
<< ") does not match dataset size (" << data_size << ")\n";
return RET_STOP_TRAINING;
}
std::copy(row_data, row_data + data_size, input_data);
}
return RET_OK;
}
} // namespace lite
session::TrainLoop *session::TrainLoop::CreateTrainLoop(const std::string &model_filename, lite::Context *context,

View File

@ -18,11 +18,16 @@
#include <vector>
#include <string>
#include <tuple>
#include <memory>
#include <unordered_map>
#include "src/ops/primitive_c.h"
#include "include/train/train_loop.h"
#include "include/train/metrics.h"
#include "include/train_session.h"
#include "include/datasets.h"
#include "include/iterator.h"
namespace mindspore {
namespace lite {
@ -39,20 +44,34 @@ class TrainLoop : virtual public session::TrainLoop {
virtual ~TrainLoop();
int Init(std::vector<mindspore::session::Metrics *> metrics) override {
metrics_ = metrics;
return RET_OK;
}
int SetKernelCallBack(const KernelCallBack &before, const KernelCallBack &after) override {
before_cb_ = before;
after_cb_ = after;
return RET_OK;
}
int Train(int epochs, std::vector<session::TrainLoopCallBack *> cbs) override;
int Train(int epochs, dataset::Dataset *dataset, std::vector<session::TrainLoopCallBack *> cbs,
LoadDataFunc load_func = nullptr) override;
int Eval(dataset::Dataset *dataset, std::vector<session::TrainLoopCallBack *> cbs, LoadDataFunc load_func = nullptr,
int max_steps = 0) override;
std::vector<mindspore::session::Metrics *> GetMetrics() override { return metrics_; }
protected:
static int LoadData(std::vector<tensor::MSTensor *> inputs, dataset::MSTensorVec *dataset_vec);
static int LoadPartialData(std::vector<tensor::MSTensor *> inputs, dataset::MSTensorVec *dataset_vec);
session::TrainSession *train_session_ = nullptr;
unsigned int epoch_ = 0;
KernelCallBack before_cb_ = nullptr;
KernelCallBack after_cb_ = nullptr;
int batch_size;
std::vector<mindspore::session::Metrics *> metrics_;
};
} // namespace lite
} // namespace mindspore

View File

@ -75,4 +75,18 @@ char *TrainModel::ExportBuf(char *buffer, size_t *len) const {
*len = buf_size_;
return buffer;
}
char *TrainModel::GetBuffer(size_t *len) const {
if (len == nullptr) {
MS_LOG(ERROR) << "len is nullptr";
return nullptr;
}
if (buf_size_ == 0 || buf == nullptr) {
MS_LOG(ERROR) << "Model::Export is only available for Train Session";
return nullptr;
}
*len = buf_size_;
return buf;
}
} // namespace mindspore::lite

View File

@ -43,6 +43,13 @@ struct TrainModel : public lite::LiteModel {
///
/// \return Pointer to buffer with exported model
char *ExportBuf(char *buf, size_t *len) const;
/// \brief Get Model buffer
///
/// \param[in,out] len Return size of the buffer
///
/// \return Pointer to model buffer
char *GetBuffer(size_t *len) const;
};
} // namespace lite
} // namespace mindspore

View File

@ -33,10 +33,50 @@
#include "src/executor.h"
#include "src/kernel_registry.h"
#include "src/runtime/kernel/arm/fp32_grad/convolution.h"
#include "src/runtime/kernel/arm/fp32/batchnorm_fp32.h"
namespace mindspore {
namespace lite {
std::unique_ptr<char[]> ReadFileToBuf(const std::string &filename, size_t *size) {
std::ifstream ifs(filename);
if (!ifs.good()) {
MS_LOG(ERROR) << "File: " << filename << " does not exist";
return std::unique_ptr<char[]>(nullptr);
}
if (!ifs.is_open()) {
MS_LOG(ERROR) << "File: " << filename << " open failed";
return std::unique_ptr<char[]>(nullptr);
}
ifs.seekg(0, std::ios::end);
auto tellg_ret = ifs.tellg();
if (tellg_ret <= 0) {
MS_LOG(ERROR) << "Could not read file " << filename;
return std::unique_ptr<char[]>(nullptr);
}
size_t fsize = static_cast<size_t>(tellg_ret);
std::unique_ptr<char[]> buf(new (std::nothrow) char[fsize]);
if (buf == nullptr) {
MS_LOG(ERROR) << "malloc buf failed, file: " << filename;
ifs.close();
return std::unique_ptr<char[]>(nullptr);
}
ifs.seekg(0, std::ios::beg);
ifs.read(buf.get(), fsize);
if (!ifs) {
MS_LOG(ERROR) << "only read " << ifs.gcount() << "bytes in " << filename;
ifs.close();
return std::unique_ptr<char[]>(nullptr);
}
ifs.close();
if (size) *size = fsize;
return buf;
}
static size_t TSFindTensor(const std::vector<lite::Tensor *> &where, const lite::Tensor *searchParameter) {
for (size_t i = 0; i < where.size(); i++) {
if (where[i] == searchParameter) {
@ -46,6 +86,12 @@ static size_t TSFindTensor(const std::vector<lite::Tensor *> &where, const lite:
return where.size();
}
static kernel::LiteKernel *TSFindKernel(const std::vector<kernel::LiteKernel *> &where,
const std::string &searchParameter) {
auto it = std::find_if(where.begin(), where.end(),
[&searchParameter](const kernel::LiteKernel *k) { return (k->name() == searchParameter); });
return *it;
}
TrainSession::TrainSession() { kernel::PopulateTrainParameters(); }
std::vector<CreatorOp> TrainSession::ReplaceOps() {
@ -96,10 +142,10 @@ int TrainSession::CompileTrainGraph(mindspore::lite::TrainModel *model) {
for (auto inTensor : inputs_) inTensor->MutableData();
RestoreOps(restore);
CompileTrainKernels(); // Prepare a list of train kernels
CompileInferenceKernels(); // Prepare a list of eval kernels
CompileOptimizedKernels(); // Prepare a list of kernels which are optimized (weight update step)
CompileTrainOutputs(); // prepare outputs in train mode
CompileEvalOutputs(); // prepare outputs in eval mode
CompileInferenceKernels(); // Prepare a list of eval kernels
AllocWorkSpace();
return RET_OK;
@ -107,7 +153,10 @@ int TrainSession::CompileTrainGraph(mindspore::lite::TrainModel *model) {
TrainSession::~TrainSession() {
mindspore::kernel::LiteKernel::FreeWorkspace();
delete model_;
if (model_ != nullptr) {
delete model_;
model_ = nullptr;
}
}
void *TrainSession::ExportToBuf(char *buf, size_t *len) const { return model_->ExportBuf(buf, len); }
@ -128,16 +177,34 @@ int TrainSession::RunGraph(const KernelCallBack &before, const KernelCallBack &a
}
auto run_kernel = (train_mode_) ? train_kernels_ : inference_kernels_;
lite::CpuExecutor executor;
auto ret = RET_OK;
if (before == nullptr && after == nullptr) {
return executor.Run(this->inputs_, this->outputs_, run_kernel, this->context_->allocator.get());
ret = executor.Run(this->inputs_, this->outputs_, run_kernel, this->context_->allocator.get());
} else {
return executor.Run(this->inputs_, this->outputs_, run_kernel, this->context_->allocator.get(), before, after);
ret = executor.Run(this->inputs_, this->outputs_, run_kernel, this->context_->allocator.get(), before, after);
}
if (ret != RET_OK) {
MS_LOG(ERROR) << "failed to run model";
return ret;
}
if (train_mode_ && virtual_batch_multiplier_) {
virtual_batch_idx_++;
if (virtual_batch_idx_ >= virtual_batch_multiplier_) {
virtual_batch_idx_ = 0;
ret = OptimizerStep();
if (ret != RET_OK) {
MS_LOG(ERROR) << "failed to optimize model weights";
return ret;
}
}
}
return RET_OK;
}
int TrainSession::SaveToFile(const std::string &filename) const {
size_t fb_size = 0;
auto *buf = reinterpret_cast<char *>(ExportToBuf(nullptr, &fb_size));
const auto *buf = reinterpret_cast<char *>(model_->GetBuffer(&fb_size));
if (buf == nullptr) {
MS_LOG(ERROR) << "Could not Export Trained model";
return lite::RET_NULL_PTR;
@ -145,20 +212,19 @@ int TrainSession::SaveToFile(const std::string &filename) const {
std::ofstream ofs(filename);
if ((true != ofs.good()) || (true != ofs.is_open())) {
MS_LOG(ERROR) << "Could not open file \"" << filename << "\" for writing";
free(buf);
return RET_ERROR;
}
ofs.seekp(0, std::ios::beg);
ofs.write(buf, fb_size);
ofs.close();
free(buf);
return chmod(filename.c_str(), S_IRUSR);
}
int TrainSession::Train() {
// shift kernels to train mode
train_mode_ = true;
virtual_batch_idx_ = 0;
for (auto kernel : this->train_kernels_) {
MS_ASSERT(nullptr != kernel);
auto ret = kernel->Train();
@ -177,6 +243,7 @@ int TrainSession::Train() {
int TrainSession::Eval() {
// shift kernels to eval mode
train_mode_ = false;
virtual_batch_idx_ = 0;
for (auto kernel : this->train_kernels_) {
MS_ASSERT(kernel != nullptr);
auto ret = kernel->Eval();
@ -197,6 +264,7 @@ void TrainSession::CompileEvalOutputs() {
for (auto kernel : this->train_kernels_) {
if (IsLossKernel(kernel)) {
for (auto in_kernel : kernel->in_kernels()) {
if (IsLossKernel(in_kernel) || IsGradKernel(in_kernel)) continue;
// insert if not already in
if (eval_output_node_map_.find(in_kernel->name()) == eval_output_node_map_.end()) {
auto *ms_tensor = in_kernel->out_tensors().at(0);
@ -240,10 +308,10 @@ void TrainSession::CompileTrainOutputs() {
void TrainSession::BuildInferenceKernelsRecursive(kernel::LiteKernel *kernel, std::vector<kernel::LiteKernel *> *v) {
if (std::find(v->begin(), v->end(), kernel) == v->end()) { // kernel is not already in vector
if (!IsLossKernel(kernel)) v->push_back(kernel);
for (auto in_node : kernel->in_kernels()) {
BuildInferenceKernelsRecursive(in_node, v);
}
if (!IsLossKernel(kernel)) v->push_back(kernel);
}
}
@ -262,19 +330,10 @@ void TrainSession::CompileTrainKernels() {
}
void TrainSession::CompileInferenceKernels() {
std::vector<kernel::LiteKernel *> req_kernels;
for (auto kernel : this->train_kernels_) {
if (IsLossKernel(kernel)) { // For each loss in the system add backward tree
for (auto in_node : kernel->in_kernels()) {
BuildInferenceKernelsRecursive(in_node, &req_kernels);
}
}
}
inference_kernels_.clear();
for (auto ori_kernel : this->train_kernels_) {
if (std::find(req_kernels.begin(), req_kernels.end(), ori_kernel) != req_kernels.end()) {
inference_kernels_.push_back(ori_kernel);
}
for (auto item : eval_output_node_map_) {
std::string kernel_name = item.first;
auto kernel = TSFindKernel(train_kernels_, kernel_name);
BuildInferenceKernelsRecursive(kernel, &inference_kernels_);
}
if (inference_kernels_.size() == 0) {
inference_kernels_ = this->train_kernels_;
@ -325,23 +384,92 @@ float TrainSession::GetLearningRate() {
return 0.0;
}
int TrainSession::SetupVirtualBatch(int virtual_batch_multiplier, float lr, float momentum) {
auto mod = (virtual_batch_multiplier <= 1) ? kernel::OptimizerKernel::WeightUpdateMode::NORMAL
: kernel::OptimizerKernel::WeightUpdateMode::VIRTUAL_BATCH;
virtual_batch_multiplier_ = (virtual_batch_multiplier <= 1) ? 0 : virtual_batch_multiplier;
virtual_batch_idx_ = 0;
for (auto kernel : this->train_kernels_) {
if (IsOptimizer(kernel)) {
auto optimizer = reinterpret_cast<kernel::OptimizerKernel *>(kernel);
auto ret = optimizer->SetOptimizerMode(mod);
if (ret != RET_OK) {
MS_LOG(ERROR) << kernel->name() << " failed to set optimizer mode";
return RET_ERROR;
}
if (mod == kernel::OptimizerKernel::WeightUpdateMode::VIRTUAL_BATCH) {
lr = (lr < 0.0f) ? (optimizer->GetLearningRate() / static_cast<float>(virtual_batch_multiplier_)) : lr;
ret = optimizer->SetLearningRate(lr);
} else {
ret = optimizer->RestoreDefaultLearningRate();
}
if (ret != RET_OK) {
MS_LOG(ERROR) << kernel->name() << " failed to set learning rate";
return RET_ERROR;
}
}
if (IsBN(kernel) && kernel->is_trainable()) {
auto batchnorm = reinterpret_cast<kernel::BatchnormCPUKernel *>(kernel);
auto ret = RET_OK;
if (mod == kernel::OptimizerKernel::WeightUpdateMode::VIRTUAL_BATCH) {
momentum = (momentum < 0.0f) ? (batchnorm->get_momentum() / virtual_batch_multiplier_) : momentum;
ret = batchnorm->set_momentum(momentum);
} else {
ret = batchnorm->RestoreDefaultMomentum();
}
if (ret != RET_OK) {
MS_LOG(ERROR) << kernel->name() << " failed to set momentum";
return RET_ERROR;
}
}
}
return RET_OK;
}
int TrainSession::OptimizerStep() {
for (auto kernel : this->train_kernels_) {
if (IsOptimizer(kernel)) {
auto optimizer = reinterpret_cast<kernel::OptimizerKernel *>(kernel);
auto ret = optimizer->OptimizerStep();
if (ret != RET_OK) {
MS_LOG(ERROR) << kernel->name() << " failed to do optimize step";
return RET_ERROR;
}
}
}
return RET_OK;
}
bool TrainSession::IsLossKernel(const kernel::LiteKernel *kernel) const {
return (kernel->Type() == schema::PrimitiveType_SoftmaxCrossEntropy ||
kernel->Type() == schema::PrimitiveType_SparseSoftmaxCrossEntropy ||
kernel->Type() == schema::PrimitiveType_SmoothL1Loss ||
kernel->Type() == schema::PrimitiveType_SmoothL1LossGrad ||
kernel->Type() == schema::PrimitiveType_SigmoidCrossEntropyWithLogits ||
kernel->Type() == schema::PrimitiveType_SigmoidCrossEntropyWithLogitsGrad);
kernel->Type() == schema::PrimitiveType_SigmoidCrossEntropyWithLogitsGrad) ||
kernel->name().find(get_loss_name()) != std::string::npos;
}
bool TrainSession::IsGradKernel(const kernel::LiteKernel *kernel) const {
return kernel->name().find("Gradients") != std::string::npos;
}
bool TrainSession::IsOptimizer(kernel::LiteKernel *kernel) const {
return ((kernel->Type() == schema::PrimitiveType_Adam) || (kernel->Type() == schema::PrimitiveType_Sgd) ||
(kernel->Type() == schema::PrimitiveType_ApplyMomentum));
}
bool TrainSession::IsMaskOutput(kernel::LiteKernel *kernel) const {
return (IsOptimizer(kernel) || (kernel->Type() == schema::PrimitiveType_Assign));
}
bool TrainSession::IsBN(kernel::LiteKernel *kernel) const {
return ((kernel->Type() == schema::PrimitiveType_BatchNorm) ||
(kernel->Type() == schema::PrimitiveType_FusedBatchNorm));
}
} // namespace lite
session::TrainSession *session::TrainSession::CreateSession(const char *model_buf, size_t size, lite::Context *context,
@ -362,6 +490,7 @@ session::TrainSession *session::TrainSession::CreateSession(const char *model_bu
if (ret != mindspore::lite::RET_OK) {
MS_LOG(ERROR) << "init session failed";
delete session;
delete model;
return nullptr;
}
@ -388,38 +517,11 @@ session::TrainSession *session::TrainSession::CreateSession(const char *model_bu
session::TrainSession *session::TrainSession::CreateSession(const std::string &filename, lite::Context *context,
bool train_mode) {
std::ifstream ifs(filename);
if (!ifs.good()) {
MS_LOG(ERROR) << "File: " << filename << " does not exist";
return nullptr;
}
if (!ifs.is_open()) {
MS_LOG(ERROR) << "File: " << filename << " open failed";
return nullptr;
}
ifs.seekg(0, std::ios::end);
auto size = ifs.tellg();
if (size <= 0) {
MS_LOG(ERROR) << "Could not read file " << filename;
return nullptr;
}
std::unique_ptr<char[]> buf(new (std::nothrow) char[size]);
size_t size = -1;
auto buf = lite::ReadFileToBuf(filename, &size);
if (buf == nullptr) {
MS_LOG(ERROR) << "malloc buf failed, file: " << filename;
ifs.close();
return nullptr;
}
ifs.seekg(0, std::ios::beg);
ifs.read(buf.get(), size);
if (!ifs) {
MS_LOG(ERROR) << "only read " << ifs.gcount() << "bytes in " << filename;
ifs.close();
return nullptr;
}
ifs.close();
return session::TrainSession::CreateSession(buf.get(), size, context, train_mode);
}

View File

@ -19,6 +19,7 @@
#include <string>
#include <tuple>
#include <unordered_map>
#include <memory>
#include "src/ops/primitive_c.h"
#include "include/train_session.h"
#include "src/train/train_model.h"
@ -42,6 +43,7 @@
namespace mindspore {
namespace lite {
std::unique_ptr<char[]> ReadFileToBuf(const std::string &filename, size_t *size);
using CreatorOp = std::tuple<mindspore::kernel::KernelKey, mindspore::kernel::KernelCreator>;
class TrainSession : virtual public session::TrainSession, virtual public lite::LiteSession {
public:
@ -60,6 +62,7 @@ class TrainSession : virtual public session::TrainSession, virtual public lite::
int Eval() override;
int SetLearningRate(float learning_rate) override;
float GetLearningRate() override;
int SetupVirtualBatch(int virtual_batch_multiplier, float lr = -1.0f, float momentum = -1.0f) override;
void BindThread(bool if_bind) override { return lite::LiteSession::BindThread(if_bind); }
std::vector<tensor::MSTensor *> GetInputs() const override { return lite::LiteSession::GetInputs(); }
@ -81,15 +84,22 @@ class TrainSession : virtual public session::TrainSession, virtual public lite::
return lite::RET_ERROR;
}
std::unordered_map<std::string, mindspore::tensor::MSTensor *> GetPredictions() const override {
return eval_output_tensor_map_;
std::vector<tensor::MSTensor *> GetPredictions() const override {
std::vector<tensor::MSTensor *> outputs;
for (auto it = eval_output_tensor_map_.begin(); it != eval_output_tensor_map_.end(); ++it) {
outputs.push_back(it->second);
}
return outputs;
}
protected:
void AllocWorkSpace();
bool IsLossKernel(const kernel::LiteKernel *kernel) const;
bool IsGradKernel(const kernel::LiteKernel *kernel) const;
bool IsOptimizer(kernel::LiteKernel *kernel) const;
bool IsMaskOutput(kernel::LiteKernel *kernel) const;
bool IsBN(kernel::LiteKernel *kernel) const;
virtual std::vector<CreatorOp> ReplaceOps();
virtual void RestoreOps(const std::vector<CreatorOp> &restore);
virtual void CompileTrainKernels();
@ -113,7 +123,11 @@ class TrainSession : virtual public session::TrainSession, virtual public lite::
private:
void BuildInferenceKernelsRecursive(kernel::LiteKernel *ker, std::vector<kernel::LiteKernel *> *req_kernels);
int OptimizerStep();
int virtual_batch_idx_ = 0;
int virtual_batch_multiplier_ = 0;
};
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_LITE_SRC_TRAIN_TRAIN_SESSION_H_

View File

@ -0,0 +1,85 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/train/train_utils.h"
#include <vector>
#include "include/errorcode.h"
#include "include/ms_tensor.h"
#include "src/common/utils.h"
namespace mindspore {
namespace lite {
float CalculateSparseClassification(tensor::MSTensor *input, tensor::MSTensor *output) {
if ((input->shape().size() != 1) || (input->data_type() != kNumberTypeInt32) || (output->shape().size() != 2)) {
MS_LOG(WARNING) << "SparceClassification got a " << input->shape() << "-D input tensor, " << output->shape()
<< "-D output tensor";
return 0.0;
}
int batch_size = input->shape().at(0);
int num_of_classes = output->shape().at(1);
auto labels = reinterpret_cast<int *>(input->MutableData());
auto predictions = reinterpret_cast<float *>(output->MutableData());
float accuracy = 0.0;
for (int b = 0; b < batch_size; b++) {
int max_idx = 0;
float max_score = predictions[num_of_classes * b];
for (int c = 1; c < num_of_classes; c++) {
if (predictions[num_of_classes * b + c] > max_score) {
max_score = predictions[num_of_classes * b + c];
max_idx = c;
}
}
if (labels[b] == max_idx) accuracy += 1.0;
}
return accuracy / (static_cast<float>(batch_size));
}
float CalculateOneHotClassification(tensor::MSTensor *input, tensor::MSTensor *output) {
if ((input->shape().size() != 2) || (output->shape().size() != 2)) {
MS_LOG(WARNING) << "OneHotClassification got a " << input->shape() << "-D input tensor, " << output->shape()
<< "-D output tensor";
return 0.0;
}
int batch_size = input->shape().at(0);
int num_of_classes = input->shape().at(1);
auto labels = reinterpret_cast<float *>(input->MutableData());
auto predictions = reinterpret_cast<float *>(output->MutableData());
float accuracy = 0.0;
for (int b = 0; b < batch_size; b++) {
int label = 0;
int max_idx = 0;
float max_label_score = labels[num_of_classes * b];
float max_score = predictions[num_of_classes * b];
for (int c = 1; c < num_of_classes; c++) {
if (predictions[num_of_classes * b + c] > max_score) {
max_score = predictions[num_of_classes * b + c];
max_idx = c;
}
if (labels[num_of_classes * b + c] > max_label_score) {
max_label_score = labels[num_of_classes * b + c];
label = c;
}
}
if (label == max_idx) accuracy += 1.0;
}
return accuracy / (static_cast<float>(batch_size));
}
} // namespace lite
} // namespace mindspore

View File

@ -0,0 +1,29 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_TRAIN_TRAIN_UTILS_H_
#define MINDSPORE_LITE_SRC_TRAIN_TRAIN_UTILS_H_
#include "include/ms_tensor.h"
namespace mindspore {
namespace lite {
float CalculateSparseClassification(tensor::MSTensor *input, tensor::MSTensor *output);
float CalculateOneHotClassification(tensor::MSTensor *input, tensor::MSTensor *output);
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_LITE_SRC_TRAIN_TRAIN_UTILS_H_

View File

@ -0,0 +1,207 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/train/transfer_session.h"
#include <sys/stat.h>
#include <algorithm>
#include <utility>
#include <vector>
#include <iostream>
#include <fstream>
#include <memory>
#include "include/errorcode.h"
#include "src/common/utils.h"
#include "src/tensor.h"
#include "src/train/loss_kernel.h"
#include "src/train/optimizer_kernel.h"
#include "src/sub_graph_kernel.h"
#include "src/train/train_populate_parameter.h"
#include "src/runtime/runtime_api.h"
#include "src/executor.h"
#include "src/kernel_registry.h"
#include "src/runtime/kernel/arm/fp32_grad/convolution.h"
namespace mindspore {
namespace lite {
TransferSession::TransferSession(const char *model_buf_backbone, size_t size_backbone, lite::Context *context)
: is_valid_(false) {
lite_model_ = reinterpret_cast<char *>(malloc(size_backbone));
if (lite_model_ != nullptr) {
std::copy(model_buf_backbone, model_buf_backbone + size_backbone, lite_model_);
backbone_session_ =
reinterpret_cast<lite::LiteSession *>(session::LiteSession::CreateSession(lite_model_, size_backbone, context));
if (backbone_session_ != nullptr) {
is_valid_ = true;
} else {
MS_LOG(ERROR) << "transfer session: create backbone session failed";
}
}
}
std::vector<tensor::MSTensor *> TransferSession::GetInputs() const { return combined_inputs_; }
int TransferSession::CompileTransferGraph() {
combined_inputs_ = backbone_session_->GetInputs();
auto outputs_backbone = backbone_session_->GetOutputs();
auto inputs_head = lite::TrainSession::GetInputs();
int ret = RET_OK;
for (auto input : inputs_head) {
bool match = false;
mindspore::tensor::MSTensor *output = nullptr;
for (auto it = outputs_backbone.begin(); it != outputs_backbone.end(); ++it) {
output = it->second;
if (output->ElementsNum() == input->ElementsNum() && output->shape().size() == input->shape().size()) {
match = true;
for (std::size_t dim = 0; dim != output->shape().size(); ++dim) {
if (input->shape().at(dim) != output->shape().at(dim)) {
match = false;
break;
}
}
if (true == match) {
break;
}
}
}
if (true == match) {
backbone_head_map_.push_back(std::make_pair(input, output));
} else {
combined_inputs_.push_back(input);
}
}
if (0 == backbone_head_map_.size()) {
ret = RET_ERROR;
}
return ret;
}
mindspore::tensor::MSTensor *TransferSession::GetInputsByTensorName(const std::string &tensor_name) const {
/* First look in backbone netwok */
auto ret = backbone_session_->GetInputsByTensorName(tensor_name);
/* If not found look in head network */
if (nullptr == ret) {
ret = TrainSession::GetInputsByTensorName(tensor_name);
}
return ret;
}
TransferSession::~TransferSession() {
if (backbone_session_ != nullptr) {
delete backbone_session_;
backbone_session_ = nullptr;
}
if (lite_model_ != nullptr) {
free(lite_model_);
lite_model_ = nullptr;
}
}
void TransferSession::BindThread(bool if_bind) {
backbone_session_->BindThread(if_bind);
TrainSession::BindThread(if_bind);
}
int TransferSession::RunGraph(const KernelCallBack &before, const KernelCallBack &after) {
auto ret = backbone_session_->RunGraph(before, after);
if (ret != RET_OK) {
return ret;
}
for (auto &backbone_head_pair : backbone_head_map_) {
auto input = backbone_head_pair.first;
auto output = backbone_head_pair.second;
char *input_data = reinterpret_cast<char *>(input->MutableData());
char *output_data = reinterpret_cast<char *>(output->MutableData());
std::copy(output_data, output_data + output->Size(), input_data);
}
ret = lite::TrainSession::RunGraph(before, after);
return ret;
}
} // namespace lite
session::TrainSession *session::TrainSession::CreateTransferSession(const char *model_buf_backbone,
size_t size_backbone, const char *model_buf_head,
size_t size_head, lite::Context *context,
bool train_mode) {
auto session = new (std::nothrow) lite::TransferSession(model_buf_backbone, size_backbone, context);
if (session == nullptr) {
MS_LOG(ERROR) << "create transfer session failed";
return nullptr;
}
if (!session->is_valid()) {
MS_LOG(ERROR) << "create transfer session failed";
delete session;
return nullptr;
}
auto ret = session->Init(context);
if (ret != lite::RET_OK) {
MS_LOG(ERROR) << "init transfer session failed";
delete session;
return nullptr;
}
auto model = lite::TrainModel::Import(model_buf_head, size_head);
if (model == nullptr) {
MS_LOG(ERROR) << "create model for head train session failed";
delete session;
return nullptr;
}
ret = session->CompileTrainGraph(model);
if (ret != lite::RET_OK) {
MS_LOG(ERROR) << "Compiling Train Graph failed";
delete session;
return nullptr;
}
ret = session->CompileTransferGraph();
if (ret != lite::RET_OK) {
MS_LOG(ERROR) << "Compiling Transfer Graph failed";
delete session;
return nullptr;
}
if (train_mode) {
ret = session->Train();
} else {
ret = session->Eval();
}
if (ret != lite::RET_OK) {
MS_LOG(ERROR) << "Could not switch to Train Mode " << train_mode;
delete session;
return nullptr;
}
return session;
}
session::TrainSession *session::TrainSession::CreateTransferSession(const std::string &filename_backbone,
const std::string &filename_head,
lite::Context *context, bool train_mode) {
size_t size_head = -1;
size_t size_backbone = -1;
auto buf_head = lite::ReadFileToBuf(filename_head, &size_head);
if (buf_head == nullptr) {
return nullptr;
}
auto buf_backbone = lite::ReadFileToBuf(filename_backbone, &size_backbone);
if (buf_backbone == nullptr) {
return nullptr;
}
return session::TrainSession::CreateTransferSession(buf_backbone.get(), size_backbone, buf_head.get(), size_head,
context, train_mode);
}
} // namespace mindspore

View File

@ -19,6 +19,7 @@
#include <string>
#include <tuple>
#include <unordered_map>
#include <utility>
#include "src/ops/primitive_c.h"
#include "include/train_session.h"
#include "src/train/train_model.h"
@ -50,20 +51,26 @@ namespace lite {
class TransferSession : public lite::TrainSession {
public:
TransferSession();
explicit TransferSession(lite::LiteSession *backend_session);
explicit TransferSession(const char *model_buf_backbone, size_t size_backbone, lite::Context *context);
~TransferSession();
bool is_valid() const { return is_valid_; }
int RunGraph(const KernelCallBack &before = nullptr, const KernelCallBack &after = nullptr) override;
void BindThread(bool if_bind) override;
std::vector<tensor::MSTensor *> GetInputs() const override { return lite::LiteSession::GetInputs(); }
mindspore::tensor::MSTensor *GetInputsByTensorName(const std::string &tensor_name) const override {
return lite::LiteSession::GetInputsByTensorName(tensor_name);
}
std::vector<tensor::MSTensor *> GetInputs() const override;
mindspore::tensor::MSTensor *GetInputsByTensorName(const std::string &tensor_name) const override;
int CompileTransferGraph();
protected:
lite::LiteSession *backend_session_;
lite::LiteSession *backbone_session_;
char *lite_model_;
std::vector<mindspore::tensor::MSTensor *> combined_inputs_;
std::vector<std::pair<mindspore::tensor::MSTensor *, mindspore::tensor::MSTensor *>> backbone_head_map_;
bool is_valid_;
private:
};

4
mindspore/lite/test/.gitignore vendored Normal file
View File

@ -0,0 +1,4 @@
benchmark_train_test
logs_train
ms_models_train
net_train_test

View File

@ -244,6 +244,7 @@ if(SUPPORT_TRAIN)
${TEST_LITE_SRC}
${LITE_DIR}/src/train/train_populate_parameter.cc
${LITE_DIR}/src/train/train_session.cc
${LITE_DIR}/src/train/transfer_session.cc
${LITE_DIR}/src/train/train_model.cc
${LITE_DIR}/src/lite_session.cc
)
@ -332,6 +333,10 @@ endif()
add_executable(lite-test ${TEST_SRC})
add_dependencies(lite-test fbs_src)
target_link_libraries(lite-test dl mindspore::gtest)
if(SUPPORT_TRAIN)
target_link_libraries(lite-test minddata-lite)
endif()
if(PLATFORM_ARM64 AND ENABLE_FP16)
target_link_libraries(lite-test nnacl_fp16_mid nnacl_optimize_mid)
endif()

View File

@ -1,7 +1,7 @@
mini_alexnet
mobilenetv1
mobilenetv2
#mobilenetv3
mobilenetv3
lenet
effnet
effnet_tune
@ -9,7 +9,7 @@ resnet
googlenet
# densenet
# shufflenetv2
# nin
#nin
# one_net
# lenetv1
#LAST
#LAST

View File

@ -16,13 +16,19 @@ function Print_Result() {
basepath=$(pwd)
echo ${basepath}
# Set models default config filepath
models_mindspore_train_config=${basepath}/models_ms_train.cfg
# Example:run_net_export.sh -m /home/emir/Work/TestingEnv/train_models
epoch_num=1
while getopts "m:t:" opt; do
while getopts "c:m:t:" opt; do
case ${opt} in
c)
models_mindspore_train_config=${OPTARG}
echo "models_mindspore_train_config is ${models_mindspore_train_config}"
;;
m)
models_path=${OPTARG}"/models_train"
echo "models_path is ${OPTARG}"
;;
@ -37,9 +43,6 @@ while getopts "m:t:" opt; do
done
# Set models config filepath
models_mindspore_train_config=${basepath}/models_ms_train.cfg
logs_path=${basepath}/logs_train
rm -rf ${logs_path}
mkdir -p ${logs_path}

View File

@ -81,9 +81,9 @@ function Run_x86() {
echo ${model_name}'_train' >> "${run_x86_log_file}"
echo 'cd '${x86_path}'/mindspore-lite-'${version}'-train-linux-x64' >> "${run_x86_log_file}"
cd ${x86_path}/mindspore-lite-${version}-train-linux-x64 || return 1
echo 'LD_LIBRARY_PATH='${LD_LIBRARY_PATH}':./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark_train/benchmark_train --epochs='${epoch_num}' --modelFile='${ms_models_path}'/'${model_name}'_train.ms --inDataFile='${train_io_path}/${model_name}_input1.bin,${train_io_path}/${model_name}_input2.bin' --expectedDataFile='${train_io_path}'/'${model_name}'_output --exportFile='${ms_models_path}'/'${model_name}'_train_exported.ms' >> "${run_x86_log_file}"
echo 'LD_LIBRARY_PATH='${LD_LIBRARY_PATH}':./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib ./benchmark_train/benchmark_train --epochs='${epoch_num}' --modelFile='${ms_models_path}'/'${model_name}'_train.ms --inDataFile='${train_io_path}/${model_name}_input1.bin,${train_io_path}/${model_name}_input2.bin' --expectedDataFile='${train_io_path}'/'${model_name}'_output --exportFile='${ms_models_path}'/'${model_name}'_train_exported.ms' >> "${run_x86_log_file}"
echo '-------------------------------------------------------------------------------' >> "${run_x86_log_file}"
LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib \
LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib:./minddata/lib:./minddata/third_party/libjpeg-turbo/lib \
${run_valgrind}./benchmark_train/benchmark_train \
--modelFile=${ms_models_path}/${model_name}_train.ms \
--inDataFile=${train_io_path}/${model_name}_input1.bin,${train_io_path}/${model_name}_input2.bin \
@ -131,13 +131,10 @@ function Run_arm() {
# If build with minddata, copy the minddata related libs
cd ${benchmark_train_test_path} || exit 1
if [ -f ${arm_path}/mindspore-lite-${version_arm}-train-android-${process_unit}/lib/libminddata-lite.so ]; then
cp -a ${arm_path}/mindspore-lite-${version_arm}-train-android-${process_unit}/third_party/libjpeg-turbo/lib/libjpeg.so ${benchmark_train_test_path}/libjpeg.so || exit 1
cp -a ${arm_path}/mindspore-lite-${version_arm}-train-android-${process_unit}/third_party/libjpeg-turbo/lib/libturbojpeg.so ${benchmark_train_test_path}/libturbojpeg.so || exit 1
cp -a ${arm_path}/mindspore-lite-${version_arm}-train-android-${process_unit}/third_party/opencv/lib/libopencv_core.so ${benchmark_train_test_path}/libopencv_core.so || exit 1
cp -a ${arm_path}/mindspore-lite-${version_arm}-train-android-${process_unit}/third_party/opencv/lib/libopencv_imgcodecs.so ${benchmark_train_test_path}/libopencv_imgcodecs.so || exit 1
cp -a ${arm_path}/mindspore-lite-${version_arm}-train-android-${process_unit}/third_party/opencv/lib/libopencv_imgproc.so ${benchmark_train_test_path}/libopencv_imgproc.so || exit 1
cp -a ${arm_path}/mindspore-lite-${version_arm}-train-android-${process_unit}/lib/libminddata-lite.so ${benchmark_train_test_path}/libminddata-lite.so || exit 1
if [ -f ${arm_path}/mindspore-lite-${version_arm}-train-android-${process_unit}/minddata/lib/libminddata-lite.so ]; then
cp -a ${arm_path}/mindspore-lite-${version_arm}-train-android-${process_unit}/minddata/third_party/libjpeg-turbo/lib/libjpeg.so ${benchmark_train_test_path}/libjpeg.so || exit 1
cp -a ${arm_path}/mindspore-lite-${version_arm}-train-android-${process_unit}/minddata/third_party/libjpeg-turbo/lib/libturbojpeg.so ${benchmark_train_test_path}/libturbojpeg.so || exit 1
cp -a ${arm_path}/mindspore-lite-${version_arm}-train-android-${process_unit}/minddata/lib/libminddata-lite.so ${benchmark_train_test_path}/libminddata-lite.so || exit 1
fi
if [ "$1" == arm64 ]; then
cp -a ${arm_path}/mindspore-lite-${version_arm}-train-android-${process_unit}/third_party/hiai_ddk/lib/libhiai.so ${benchmark_train_test_path}/libhiai.so || exit 1
@ -230,12 +227,15 @@ function Print_Result() {
basepath=$(pwd)
echo ${basepath}
# Set default models config filepath
models_mindspore_train_config=${basepath}/models_ms_train.cfg
# Example:run_benchmark_train.sh -r /home/emir/Work/TestingEnv/release -m /home/emir/Work/TestingEnv/train_models -i /home/emir/Work/TestingEnv/train_io -d "8KE5T19620002408"
# For running on arm64, use -t to set platform tools path (for using adb commands)
epoch_num=1
threads=2
train_io_path=""
while getopts "r:m:d:i:e:vt:q:D" opt; do
while getopts "r:M:c:m:d:i:e:vt:q:D" opt; do
case ${opt} in
r)
release_path=${OPTARG}
@ -245,6 +245,14 @@ while getopts "r:m:d:i:e:vt:q:D" opt; do
models_path=${OPTARG}"/models_train"
echo "models_path is ${OPTARG}"
;;
M)
models_path=${OPTARG}
echo "models_path is ${models_path}"
;;
c)
models_mindspore_train_config=${OPTARG}
echo "models_mindspore_train_config is ${models_mindspore_train_config}"
;;
i)
train_io_path=${OPTARG}
echo "train_io_path is ${OPTARG}"
@ -278,8 +286,10 @@ done
if [[ $train_io_path == "" ]]
then
echo "train_io path is empty"
train_io_path=${models_path}/input_output
fi
echo $train_io_path
arm64_path=${release_path}/android_aarch64
file=$(ls ${arm64_path}/*train-android-aarch64.tar.gz)
@ -299,9 +309,6 @@ file_name="${file##*/}"
IFS="-" read -r -a file_name_array <<< "$file_name"
version=${file_name_array[2]}
# Set models config filepath
models_mindspore_train_config=${basepath}/models_ms_train.cfg
ms_models_path=${basepath}/ms_models_train
logs_path=${basepath}/logs_train
@ -387,16 +394,11 @@ Run_x86_PID=$!
sleep 1
# wait ${Run_x86_PID}
cat ${run_benchmark_train_result_file}
wait ${Run_x86_PID}
Run_x86_status=$?
# Run on arm64
echo "start Run arm64 ..."
Run_arm arm64
Run_arm64_status=$?
sleep 3
sleep 1
# Run on arm32
echo "start Run arm32 ..."
@ -404,6 +406,10 @@ Run_arm arm32
Run_arm32_status=$?
sleep 1
wait ${Run_x86_PID}
Run_x86_status=$?
cat ${run_benchmark_train_result_file}
END=$(date +%s.%N)
DIFF=$(echo "$END - $START" | bc)
@ -415,6 +421,8 @@ function Print_Benchmark_Result() {
done < ${run_benchmark_train_result_file}
MS_PRINT_TESTCASE_END_MSG
}
result=0
# Check benchmark_train result and return value
if [[ ${Run_x86_status} != 0 ]];then

View File

@ -16,23 +16,15 @@ else()
endif()
if(PLATFORM_ARM32 OR PLATFORM_ARM64)
target_link_libraries(benchmark_train mindspore-lite)
target_link_libraries(benchmark_train mindspore-lite minddata-lite)
else()
if(WIN32)
target_link_libraries(benchmark_train mindspore-lite_static pthread cpu_kernel_mid nnacl_mid)
target_link_libraries(benchmark_train mindspore-lite_static pthread cpu_kernel_mid nnacl_mid minddata-lite)
else()
target_link_libraries(benchmark_train mindspore-lite pthread)
endif()
endif()
if(PLATFORM_ARM32 OR PLATFORM_ARM64)
install(TARGETS benchmark_train
RUNTIME DESTINATION ${MAIN_DIR}-${RUNTIME_COMPONENT_NAME}/benchmark_train COMPONENT ${RUNTIME_COMPONENT_NAME})
else()
if(WIN32)
install(TARGETS benchmark_train
RUNTIME DESTINATION ${MAIN_DIR}-${RUNTIME_COMPONENT_NAME}/benchmark_train COMPONENT ${RUNTIME_COMPONENT_NAME})
else()
install(TARGETS benchmark_train
RUNTIME DESTINATION ${MAIN_DIR}-${RUNTIME_COMPONENT_NAME}/benchmark_train COMPONENT ${RUNTIME_COMPONENT_NAME})
target_link_libraries(benchmark_train mindspore-lite pthread minddata-lite)
endif()
endif()
install(TARGETS benchmark_train
RUNTIME DESTINATION ${MAIN_DIR}-${RUNTIME_COMPONENT_NAME}/benchmark_train
COMPONENT ${RUNTIME_COMPONENT_NAME})

View File

@ -124,7 +124,11 @@ int NetTrain::ReadInputFile() {
MS_LOG(ERROR) << "Not supported image input";
return RET_ERROR;
} else {
for (size_t i = 0; i < flags_->input_data_list_.size(); i++) {
if (ms_inputs_.size() > flags_->input_data_list_.size()) {
MS_LOG(ERROR) << "missing input files";
return RET_ERROR;
}
for (size_t i = 0; i < ms_inputs_.size(); i++) {
auto cur_tensor = ms_inputs_.at(i);
MS_ASSERT(cur_tensor != nullptr);
size_t size;
@ -163,6 +167,7 @@ int NetTrain::CompareOutput() {
int i = 1;
for (auto it = tensors_list.begin(); it != tensors_list.end(); ++it) {
tensor = session_->GetOutputByTensorName(it->first);
std::cout << "output is tensor " << it->first << "\n";
auto outputs = tensor->MutableData();
size_t size;
std::string output_file = flags_->data_file_ + std::to_string(i) + ".bin";
@ -185,7 +190,7 @@ int NetTrain::CompareOutput() {
break;
}
i++;
delete bin_buf;
delete[] bin_buf;
}
if (!has_error) {
@ -326,7 +331,6 @@ int NetTrain::RunExportedNet() {
std::cout << "CreateSession failed while running ", model_name.c_str();
return RET_ERROR;
}
ms_inputs_ = session_->GetInputs();
auto end_prepare_time = GetTimeUs();
MS_LOG(INFO) << "Exported model PrepareTime = " << (end_prepare_time - start_prepare_time) / 1000 << " ms";
@ -438,7 +442,7 @@ int NetTrain::RunNetTrain() {
std::cout << "Run SaveToFile error";
return RET_ERROR;
}
// delete session_;
delete session_;
status = RunExportedNet();
if (status != RET_OK) {
MS_LOG(ERROR) << "Run Exported model error: " << status;