Fix pylint warnings in mindspore st test module

This commit is contained in:
leonwanghui 2020-04-22 16:44:19 +08:00
parent ca3aa6071a
commit ba43dbc148
38 changed files with 324 additions and 259 deletions

View File

@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import os
import pytest
@ -26,6 +27,7 @@ device_num = 2
device_id = int(os.getenv('DEVICE_ID'))
rank_id = 0
def setup_module():
global device_num
global rank_id
@ -42,9 +44,11 @@ def setup_module():
context.set_auto_parallel_context(device_num=device_num,
global_rank=rank_id)
def teardown_module():
distributedTool.release()
class Onehot(Cell):
def __init__(self, axis=-1, depth=1, on_value=1.0, off_value=0.0, strategy=None):
super(Onehot, self).__init__()
@ -56,25 +60,26 @@ class Onehot(Cell):
self.on_value = Tensor(on_value, ms.float32)
self.off_value = Tensor(off_value, ms.float32)
self.transpose = P.Transpose().set_strategy(strategy=trans_stra)
self.sub = P.Sub().set_strategy(strategy=((1,1),(1,1)))
self.sub = P.Sub().set_strategy(strategy=((1, 1), (1, 1)))
def construct(self, input, indices):
x = self.onehot(indices, self.depth, self.on_value, self.off_value)
x = self.transpose(x, (1,0))
x = self.transpose(x, (1, 0))
x = self.sub(input, x)
return x
class DataGenerator():
def get_parallel_blocks(self, input_, strategy):
blocks = [input_]
i = 0
for stra in strategy:
temp = []
while len(blocks)>0:
while len(blocks) > 0:
block = blocks.pop(0)
temp.extend(np.split(block, stra, axis=i))
blocks.extend(temp)
i+=1
i += 1
return blocks
def generate_data(self, shape):
@ -93,13 +98,14 @@ class DataGenerator():
stra = [1]*len(shape)
stra[0] = device_num
datas = self.get_parallel_blocks(data, stra)
return Tensor(data),Tensor(datas[rank_id])
return Tensor(data), Tensor(datas[rank_id])
class OneHotFactory:
def __init__(self, batch_size, classes, on_value=1.0, off_value=0.0, axis=None, strategy=None):
dataGen = DataGenerator()
self.input_full, self.input_part = dataGen.input_data((classes, batch_size))
self.label_full, self.label_part = dataGen.label_data((batch_size,),classes)
self.label_full, self.label_part = dataGen.label_data((batch_size,), classes)
self.depth = classes
self.on_value = on_value
self.off_value = off_value
@ -137,7 +143,7 @@ def test_reid_onehot_forward_int32_128_depth1024_model_parallel():
on_value=1.000000,
off_value=0.000000,
axis=-1,
strategy=((1,device_num),(),()))
strategy=((1, device_num), (), ()))
fact.forward_cmp()
@ -147,5 +153,5 @@ def test_reid_onehot_forward_int32_1024_depth128_model_parallel():
on_value=1.000000,
off_value=0.000000,
axis=-1,
strategy=((1,device_num),(),()))
strategy=((1, device_num), (), ()))
fact.forward_cmp()

View File

@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import os
import pytest
@ -31,7 +32,7 @@ from mindspore.nn.optim.momentum import Momentum
from mindspore.train.callback import Callback
np.set_printoptions(threshold=np.inf)
device_num=2
device_num = 2
device_id = int(os.getenv('DEVICE_ID'))
rank_id = 0
embed = 128
@ -39,6 +40,7 @@ classes = 32
batch_size = 32*2
MatmulParamShape = (classes, embed)
def setup_module():
global device_num
global rank_id
@ -55,26 +57,28 @@ def setup_module():
context.set_auto_parallel_context(device_num=device_num,
global_rank=device_id)
def teardown_module():
distributedTool.release()
class DataGenerator():
def get_parallel_blocks(self, input_, strategy):
blocks = [input_]
i = 0
for stra in strategy:
temp = []
while len(blocks)>0:
while len(blocks) > 0:
block = blocks.pop(0)
temp.extend(np.split(block, stra, axis=i))
blocks.extend(temp)
i+=1
i += 1
return blocks
def generate_data(self, shape):
size = np.cumprod(shape)[-1]
num_range = min(size, 1000)
data = (np.arange(0, size)%num_range)/num_range
data = (np.arange(0, size) % num_range)/num_range
data = np.reshape(data, shape)
return data
@ -90,7 +94,8 @@ class DataGenerator():
stra = [1]*len(shape)
stra[0] = device_num
datas = self.get_parallel_blocks(data, stra)
return Tensor(data),Tensor(datas[rank_id])
return Tensor(data), Tensor(datas[rank_id])
class Dataset():
def __init__(self, predict, label, length=1, input_num=2):
@ -121,15 +126,18 @@ class Dataset():
def get_repeat_count(self):
return self.length
class ModelCallback(Callback):
def __init__(self):
super(ModelCallback, self).__init__()
self.loss_list = []
def epoch_end(self, run_context, *args):
cb_params = run_context.original_args()
result = cb_params.net_outputs
self.loss_list.append(result.asnumpy().mean())
class SoftmaxCrossEntropyExpand(Cell):
def __init__(self, sparse=False, stra_list=[]):
super(SoftmaxCrossEntropyExpand, self).__init__()
@ -164,22 +172,25 @@ class SoftmaxCrossEntropyExpand(Cell):
loss = self.reduce_mean(loss, -1)
return loss
class MatmulNet(Cell):
def __init__(self, matmul_stra = None, loss_stra_list=[]):
def __init__(self, matmul_stra=None, loss_stra_list=[]):
super(MatmulNet, self).__init__()
self.matmul = P.MatMul(transpose_b=True).set_strategy(strategy=matmul_stra)
self.loss = SoftmaxCrossEntropyExpand(sparse=True, stra_list=loss_stra_list)
self.weight = Parameter(Tensor(np.ones(MatmulParamShape), dtype=ms.float32), name="weight")
def construct(self, x, label):
loss_input = self.matmul(x, self.weight)
out = self.loss(loss_input, label)
return out
class LossFactory():
def __init__(self):
dataGen = DataGenerator()
self.input_full, self.input_part = dataGen.input_data((batch_size, embed))
self.label_full, self.label_part = dataGen.label_data((batch_size,),embed)
self.label_full, self.label_part = dataGen.label_data((batch_size,), embed)
def single_matmul_trains(self):
single_callback = ModelCallback()
@ -206,21 +217,22 @@ class LossFactory():
def model_parallel_matmul_trains(self):
parallel_callback = ModelCallback()
matmul_stra = ((1,1),(device_num,1))
reduce_max_stra = ((1,device_num),)
sub_stra = ((1,device_num),(1,1))
exp_stra = ((1,device_num),)
reduce_sum_stra = ((1,device_num),)
div_stra = ((1,device_num),(1,1))
log_stra = ((1,device_num),)
mul_stra = ((1,device_num),(1,device_num))
sum_cross_entropy_stra = ((1,device_num),)
mul2_stra = ((),(device_num,))
matmul_stra = ((1, 1), (device_num, 1))
reduce_max_stra = ((1, device_num),)
sub_stra = ((1, device_num), (1, 1))
exp_stra = ((1, device_num),)
reduce_sum_stra = ((1, device_num),)
div_stra = ((1, device_num), (1, 1))
log_stra = ((1, device_num),)
mul_stra = ((1, device_num), (1, device_num))
sum_cross_entropy_stra = ((1, device_num),)
mul2_stra = ((), (device_num,))
reduce_mean_stra = ((device_num,),)
onehot_stra = ((1,device_num),(),())
loss_stra_list = [exp_stra, reduce_sum_stra, onehot_stra, div_stra, log_stra, sum_cross_entropy_stra, mul_stra, mul2_stra, reduce_mean_stra, reduce_max_stra, sub_stra]
onehot_stra = ((1, device_num), (), ())
loss_stra_list = [exp_stra, reduce_sum_stra, onehot_stra, div_stra, log_stra,
sum_cross_entropy_stra, mul_stra, mul2_stra, reduce_mean_stra, reduce_max_stra, sub_stra]
context.set_auto_parallel_context(parallel_mode="auto_parallel")
net = MatmulNet(matmul_stra = matmul_stra, loss_stra_list = loss_stra_list)
net = MatmulNet(matmul_stra=matmul_stra, loss_stra_list=loss_stra_list)
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
model = Model(net, optimizer=optimizer)
epoch_size = 6
@ -231,21 +243,22 @@ class LossFactory():
def mix_parallel_matmul_trains(self):
parallel_callback = ModelCallback()
matmul_stra = ((device_num,1),(1,1))
reduce_max_stra = ((1,device_num),)
sub_stra = ((device_num,1),(device_num,1))
exp_stra = ((1,device_num),)
reduce_sum_stra = ((1,device_num),)
div_stra = ((1,device_num),(1,1))
log_stra = ((1,device_num),)
mul_stra = ((1,device_num),(1,device_num))
sum_cross_entropy_stra = ((1,device_num),)
mul2_stra = ((),(device_num,))
matmul_stra = ((device_num, 1), (1, 1))
reduce_max_stra = ((1, device_num),)
sub_stra = ((device_num, 1), (device_num, 1))
exp_stra = ((1, device_num),)
reduce_sum_stra = ((1, device_num),)
div_stra = ((1, device_num), (1, 1))
log_stra = ((1, device_num),)
mul_stra = ((1, device_num), (1, device_num))
sum_cross_entropy_stra = ((1, device_num),)
mul2_stra = ((), (device_num,))
reduce_mean_stra = ((device_num,),)
onehot_stra = ((1,device_num),(),())
loss_stra_list = [exp_stra, reduce_sum_stra, onehot_stra, div_stra, log_stra, sum_cross_entropy_stra, mul_stra, mul2_stra, reduce_mean_stra, reduce_max_stra, sub_stra]
onehot_stra = ((1, device_num), (), ())
loss_stra_list = [exp_stra, reduce_sum_stra, onehot_stra, div_stra, log_stra,
sum_cross_entropy_stra, mul_stra, mul2_stra, reduce_mean_stra, reduce_max_stra, sub_stra]
context.set_auto_parallel_context(parallel_mode="auto_parallel")
net = MatmulNet(matmul_stra = matmul_stra, loss_stra_list = loss_stra_list)
net = MatmulNet(matmul_stra=matmul_stra, loss_stra_list=loss_stra_list)
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
model = Model(net, optimizer=optimizer)
epoch_size = 6
@ -254,6 +267,7 @@ class LossFactory():
loss_value = np.array(parallel_callback.loss_list)
return loss_value
def test_all_trains():
loss_factory = LossFactory()
context.reset_auto_parallel_context()

View File

@ -12,10 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import os
import pytest
@pytest.mark.level0
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@ -23,4 +23,4 @@ import pytest
def test_expand_loss():
sh_path = os.path.split(os.path.realpath(__file__))[0]
ret = os.system(f"sh {sh_path}/run_auto_parallel_loss_expand.sh")
assert(ret==0)
assert(ret == 0)

View File

@ -16,6 +16,7 @@
import os
import pytest
def test_expand_loss():
ret = os.system("sh run_onehot_model_parallel.sh")
assert(ret==0)
assert(ret == 0)

View File

@ -11,10 +11,12 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import os
import numpy as np
import pytest
from numpy import allclose
import mindspore.context as context
import mindspore.nn as nn
import mindspore.common.dtype as mstype
from mindspore import Tensor
@ -22,21 +24,21 @@ from mindspore.ops import operations as P
from mindspore.nn.optim.momentum import Momentum
from mindspore.common.initializer import One
from mindspore.train.model import Model, ParallelMode
from mindspore import context
import os
from mindspore.communication.management import init
import mindspore.ops.functional as F
from mindspore.nn.loss.loss import _Loss
from mindspore.train.callback import Callback
from mindspore.parallel import set_algo_parameters
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
context.set_context(enable_hccl=True)
context.set_context(enable_task_sink=True,device_id=int(os.getenv('DEVICE_ID')))
context.set_context(enable_task_sink=True, device_id=int(os.getenv('DEVICE_ID')))
context.set_context(enable_ir_fusion=True)
context.set_context(enable_loop_sink=False)
init()
context.set_auto_parallel_context(mirror_mean=True, parallel_mode=ParallelMode.AUTO_PARALLEL)
def weight_variable(shape, factor=0.1):
return One()
@ -52,6 +54,7 @@ def _conv1x1(in_channels, out_channels, stride=1, padding=0, pad_mode='same'):
return nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=stride, padding=padding, pad_mode=pad_mode, weight_init=init_value)
def _conv7x7(in_channels, out_channels, stride=1, padding=0, pad_mode='same'):
init_value = weight_variable((out_channels, in_channels, 7, 7))
return nn.Conv2d(in_channels, out_channels,
@ -63,6 +66,7 @@ def _fused_bn(channels, momentum=0.9):
init_bias = weight_variable((channels,))
return nn.BatchNorm2d(channels, momentum=momentum)
class BasicBlock(nn.Cell):
expansion = 1
@ -172,7 +176,7 @@ class ResNet(nn.Cell):
layer_nums,
in_channels,
out_channels,
strides=[1,2,2,2],
strides=[1, 2, 2, 2],
num_classes=100):
super(ResNet, self).__init__()
@ -292,17 +296,19 @@ class SoftmaxCrossEntropyExpand(_Loss):
rank_id = int(os.environ["RANK_ID"])
device_num = int(os.environ["RANK_SIZE"])
class DataGenerator():
def get_parallel_blocks(self, input_, strategy):
blocks = [input_]
i = 0
for stra in strategy:
temp = []
while len(blocks)>0:
while len(blocks) > 0:
block = blocks.pop(0)
temp.extend(np.split(block, stra, axis=i))
blocks.extend(temp)
i+=1
i += 1
return blocks
def generate_data(self, shape):
@ -321,7 +327,7 @@ class DataGenerator():
stra = [1]*len(shape)
stra[0] = device_num
datas = self.get_parallel_blocks(data, stra)
return Tensor(data),Tensor(datas[rank_id])
return Tensor(data), Tensor(datas[rank_id])
class Dataset():
@ -359,6 +365,7 @@ class ModelCallback(Callback):
def __init__(self):
super(ModelCallback, self).__init__()
self.loss_list = []
def epoch_end(self, run_context, *args):
cb_params = run_context.original_args()
result = cb_params.net_outputs
@ -382,7 +389,7 @@ def test_train_feed(num_classes=8192):
model.train(5, dataset, dataset_sink_mode=False, callbacks=parallel_callback)
loss_value = np.array(parallel_callback.loss_list)
expect_out = [9.010913, 8.855984, 8.56246, 8.146317, 7.624489]
assert allclose(loss_value, expect_out, 0.0001, 0.0001)
assert np.allclose(loss_value, expect_out, 0.0001, 0.0001)
@pytest.mark.level0
@ -402,4 +409,4 @@ def test_train_feed2(num_classes=1001):
model.train(5, dataset, dataset_sink_mode=False, callbacks=parallel_callback)
loss_value = np.array(parallel_callback.loss_list)
expect_out = [6.908755, 6.8358116, 6.6986914, 6.506859, 6.2708097]
assert allclose(loss_value, expect_out, 0.0001, 0.0001)
assert np.allclose(loss_value, expect_out, 0.0001, 0.0001)

View File

@ -13,12 +13,12 @@
# limitations under the License.
# ============================================================================
import numpy as np
from mindspore.common.tensor import Tensor
from mindspore.common import dtype as mstype
import mindspore.context as context
from mindspore.ops import operations as P
import mindspore.nn as nn
from mindspore.common import ms_function
from mindspore import Tensor, ms_function
from mindspore.common import dtype as mstype
from mindspore.ops import operations as P
@ms_function
def t1_while(x, y, z):
@ -28,8 +28,9 @@ def t1_while(x, y, z):
x = x + 3
return x
def test_net():
context.set_context(mode=context.GRAPH_MODE,device_target="Ascend")
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
context.set_context(enable_task_sink=True)
c1 = Tensor([2], mstype.int32)
c2 = Tensor([14], mstype.int32)
@ -38,5 +39,6 @@ def test_net():
ret = t1_while(c1, c2, c3)
assert (ret == expect)
if __name__ == "__main__":
test_net()

View File

@ -12,17 +12,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from mindspore import Tensor
from mindspore.ops import operations as P
import mindspore.nn as nn
from mindspore.common.api import ms_function
import mindspore.common.dtype as mstype
import numpy as np
import mindspore.context as context
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
import mindspore.nn as nn
import mindspore.common.dtype as mstype
from mindspore import Tensor, ms_function
from mindspore.ops import operations as P
context.set_context(mode=context.GRAPH_MODE, device_id=5, device_target="Ascend")
#context.set_context(enable_task_sink=True)
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
@ -35,17 +34,14 @@ class Net(nn.Cell):
def construct(self, x, y):
x = self.cast(x, mstype.float16)
y = self.cast(y, mstype.float16)
#x = self.softmax(x)
x = self.add(x, y)
#x = self.relu(x)
x = self.relu(x)
#x = self.softmax(x)
x = self.reduce_mean(x)
return x
def test_net():
x = np.random.randn(32, 10).astype(np.float32)
relu = Net()
output = relu(Tensor(x), Tensor(x))
print(x)
print(output.asnumpy())

View File

@ -13,15 +13,13 @@
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor, Parameter, Model, ms_function
from mindspore.ops import operations as P
from mindspore.common.initializer import initializer
from mindspore import Tensor, Parameter, Model
from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits
from mindspore.nn.optim import Momentum
from mindspore.common.api import ms_function
import mindspore.nn as wrap
import mindspore.context as context
context.set_context(device_target="Ascend", enable_task_sink=True)
@ -35,6 +33,7 @@ class MsWrapper(nn.Cell):
def __init__(self, network):
super(MsWrapper, self).__init__(auto_prefix=False)
self._network = network
@ms_function
def construct(self, *args):
return self._network(*args)
@ -42,16 +41,16 @@ class MsWrapper(nn.Cell):
def me_train_tensor(net, input_np, label_np, epoch_size=2):
loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)
opt = nn.Momentum(Tensor(np.array([0.1])), Tensor(np.array([0.9])), filter(lambda x: x.requires_grad, net.get_parameters()))
opt = nn.Momentum(Tensor(np.array([0.1])), Tensor(np.array([0.9])),
filter(lambda x: x.requires_grad, net.get_parameters()))
context.set_context(mode=context.GRAPH_MODE)
Model(net, loss, opt)
_network = wrap.WithLossCell(net, loss)
_train_net = MsWrapper(wrap.TrainOneStepCell(_network, opt))
_network = nn.WithLossCell(net, loss)
_train_net = MsWrapper(nn.TrainOneStepCell(_network, opt))
_train_net.set_train()
for epoch in range(0, epoch_size):
print(f"epoch %d"%(epoch))
print(f"epoch %d" % (epoch))
output = _train_net(Tensor(input_np), Tensor(label_np))
print("********output***********")
print(output.asnumpy())

View File

@ -13,16 +13,15 @@
# limitations under the License.
# ============================================================================
import pytest
from mindspore import Tensor
from mindspore.ops import operations as P
import mindspore.nn as nn
from mindspore.common.api import ms_function
import mindspore.common.dtype as mstype
import numpy as np
import mindspore.context as context
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
@ -35,6 +34,7 @@ class Net(nn.Cell):
x = self.relu(x)
return x
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -43,5 +43,4 @@ def test_net():
x = np.random.randn(32, 10).astype(np.float32)
relu_relu = Net()
output = relu_relu(Tensor(x))
print(x)
print(output.asnumpy())

View File

@ -13,16 +13,15 @@
# limitations under the License.
# ============================================================================
import pytest
from mindspore import Tensor
from mindspore.ops import operations as P
import mindspore.nn as nn
from mindspore.common.api import ms_function
import mindspore.common.dtype as mstype
import numpy as np
import mindspore.context as context
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
@ -41,6 +40,7 @@ class Net(nn.Cell):
x = self.relu(x)
return x
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -50,5 +50,4 @@ def test_net():
y = np.random.randn(10).astype(np.float32)
net = Net()
output = net(Tensor(x), Tensor(y))
print(x)
print(output.asnumpy())

View File

@ -12,15 +12,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from mindspore import Tensor
from mindspore.ops import operations as P
import mindspore.nn as nn
import mindspore.common.dtype as mstype
import numpy as np
import mindspore.context as context
from mindspore.common.parameter import Parameter
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
context.set_context(mode=context.GRAPH_MODE, device_id=4, device_target="Ascend")
#context.set_context(enable_task_sink=True)
class Net(nn.Cell):
def __init__(self):
@ -39,6 +38,7 @@ class Net(nn.Cell):
z = self.add(z1, z2)
return z
def test_net():
x = np.random.randn(32, 10).astype(np.float32)
y = np.random.randn(32, 10).astype(np.float32)
@ -46,6 +46,4 @@ def test_net():
h = np.random.randn(10).astype(np.float32)
relu_relu = Net()
output = relu_relu(Tensor(x), Tensor(y), Tensor(k), Tensor(h))
print(x)
print(output.asnumpy())

View File

@ -13,17 +13,16 @@
# limitations under the License.
# ============================================================================
import pytest
import numpy as np
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.ops.operations import _grad_ops as G
import mindspore.nn as nn
from mindspore.common.api import ms_function
import mindspore.common.dtype as mstype
import numpy as np
import mindspore.context as context
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
@ -41,6 +40,7 @@ class Net(nn.Cell):
x = self.relu(x)
return x
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -49,5 +49,4 @@ def test_net():
x = np.random.randn(32, 10).astype(np.float32)
net = Net()
output = net(Tensor(x))
print(x)
print(output.asnumpy())

View File

@ -14,6 +14,7 @@
# ============================================================================
import os
import filecmp
curr_path = os.path.abspath(os.curdir)
file_memreuse = curr_path + "/mem_reuse_check/memreuse.ir"
file_normal = curr_path + "/mem_reuse_check/normal_mem.ir"
@ -23,5 +24,3 @@ checker = os.path.exists(file_normal)
assert (checker, True)
checker = filecmp.cmp(file_memreuse, file_normal)
assert (checker, True)

View File

@ -19,6 +19,7 @@ from mindspore.ops import operations as P
from mindspore.common.initializer import initializer
from mindspore.common import dtype as mstype
def weight_variable(shape):
return initializer('XavierUniform', shape=shape, dtype=mstype.float32)
@ -297,4 +298,3 @@ class ResNet(nn.Cell):
def resnet50(batch_size, num_classes):
return ResNet(ResidualBlock, [3, 4, 6, 3], num_classes, batch_size)

View File

@ -12,16 +12,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.nn.optim.momentum import Momentum
from mindspore.train.model import Model, ParallelMode
from mindspore import context
import mindspore.common.dtype as mstype
import argparse
import os
import numpy as np
import mindspore.ops.functional as F
import mindspore.context as context
import mindspore.nn as nn
import mindspore.common.dtype as mstype
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.ops import functional as F
from mindspore.nn.optim.momentum import Momentum
from mindspore.train.model import Model, ParallelMode
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor
from mindspore.train.serialization import load_checkpoint, load_param_into_net
import mindspore.dataset as de
@ -30,11 +31,11 @@ import mindspore.dataset.transforms.vision.c_transforms as vision
from mindspore.communication.management import init
from resnet import resnet50
import random
random.seed(1)
np.random.seed(1)
de.config.set_seed(1)
import argparse
parser = argparse.ArgumentParser(description='Image classification')
parser.add_argument('--run_distribute', type=bool, default=False, help='Run distribute')
parser.add_argument('--device_num', type=int, default=1, help='Device num.')
@ -47,9 +48,9 @@ parser.add_argument('--checkpoint_path', type=str, default=None, help='Checkpoin
parser.add_argument('--dataset_path', type=str, default="/var/log/npu/datasets/cifar", help='Dataset path')
args_opt = parser.parse_args()
device_id=int(os.getenv('DEVICE_ID'))
device_id = int(os.getenv('DEVICE_ID'))
data_home=args_opt.dataset_path
data_home = args_opt.dataset_path
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
context.set_context(enable_task_sink=True, device_id=device_id)
@ -64,8 +65,8 @@ def create_dataset(repeat_num=1, training=True):
ds = de.Cifar10Dataset(data_dir)
if args_opt.run_distribute:
rank_id=int(os.getenv('RANK_ID'))
rank_size=int(os.getenv('RANK_SIZE'))
rank_id = int(os.getenv('RANK_ID'))
rank_size = int(os.getenv('RANK_SIZE'))
ds = de.Cifar10Dataset(data_dir, num_shards=rank_size, shard_id=rank_id)
resize_height = 224

View File

@ -12,16 +12,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.nn.optim.momentum import Momentum
from mindspore.train.model import Model, ParallelMode
from mindspore import context
import mindspore.common.dtype as mstype
import argparse
import os
import numpy as np
import mindspore.ops.functional as F
import mindspore.context as context
import mindspore.nn as nn
import mindspore.common.dtype as mstype
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.ops import functional as F
from mindspore.nn.optim.momentum import Momentum
from mindspore.train.model import Model, ParallelMode
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor
from mindspore.train.serialization import load_checkpoint, load_param_into_net
import mindspore.dataset as de
@ -35,7 +36,6 @@ random.seed(1)
np.random.seed(1)
de.config.set_seed(1)
import argparse
parser = argparse.ArgumentParser(description='Image classification')
parser.add_argument('--run_distribute', type=bool, default=False, help='Run distribute')

View File

@ -15,6 +15,7 @@
import os
import pytest
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_single
@ -22,6 +23,7 @@ def test_nccl_lenet():
return_code = os.system("mpirun -n 8 pytest -s test_nccl_lenet.py")
assert(return_code == 0)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_single
@ -29,6 +31,7 @@ def test_nccl_all_reduce_op():
return_code = os.system("mpirun -n 8 pytest -s test_nccl_all_reduce_op.py")
assert(return_code == 0)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_single
@ -36,6 +39,7 @@ def test_nccl_all_gather_op():
return_code = os.system("mpirun -n 8 pytest -s test_nccl_all_gather_op.py")
assert(return_code == 0)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_single

View File

@ -12,23 +12,25 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from mindspore import Tensor
from mindspore.ops import operations as P
import mindspore.nn as nn
import numpy as np
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
from mindspore.communication.management import init, NCCL_WORLD_COMM_GROUP, get_rank, get_group_size
context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
init('nccl')
rank = get_rank()
size = get_group_size()
x = np.ones([1,1,3,3]).astype(np.float32) * 0.01 * (rank + 1)
x = np.ones([1, 1, 3, 3]).astype(np.float32) * 0.01 * (rank + 1)
class Net(nn.Cell):
def __init__( self):
def __init__(self):
super(Net, self).__init__()
self.all_gather = P.AllGather(group=NCCL_WORLD_COMM_GROUP)
self.x = Parameter(initializer(Tensor(x), x.shape), name='x')
@ -36,6 +38,7 @@ class Net(nn.Cell):
def construct(self):
return self.all_gather(self.x)
def test_AllGather():
all_gather = Net()
output = all_gather()

View File

@ -12,23 +12,25 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from mindspore import Tensor
from mindspore.ops import operations as P
import mindspore.nn as nn
import numpy as np
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
from mindspore.communication.management import init, NCCL_WORLD_COMM_GROUP, get_rank, get_group_size
context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
init('nccl')
rank = get_rank()
size = get_group_size()
x = np.ones([3,1,3,3]).astype(np.float32) * 0.01 * (rank + 1)
x = np.ones([3, 1, 3, 3]).astype(np.float32) * 0.01 * (rank + 1)
class Net(nn.Cell):
def __init__( self):
def __init__(self):
super(Net, self).__init__()
self.x1 = Parameter(initializer(Tensor(x), x.shape), name='x1')
self.x2 = Parameter(initializer(Tensor(x), x.shape), name='x2')
@ -47,6 +49,7 @@ class Net(nn.Cell):
self.all_reduce2(self.x2),
self.all_reduce3(self.x3))
def test_AllReduce():
all_reduce = Net()
output = all_reduce()
@ -58,16 +61,16 @@ def test_AllReduce():
diff0 = output[0].asnumpy() - expect0
error0 = np.ones(shape=expect0.shape) * 1.0e-5
assert np.all(diff0 < error0)
assert (output[0].shape() == expect0.shape)
assert output[0].shape() == expect0.shape
expect1 = expect0
diff1 = output[1].asnumpy() - expect1
error1 = np.ones(shape=expect1.shape) * 1.0e-5
assert np.all(diff1 < error1)
assert (output[1].shape() == expect1.shape)
assert output[1].shape() == expect1.shape
expect2 = expect1
diff2 = output[2].asnumpy() - expect2
error2 = np.ones(shape=expect2.shape) * 1.0e-5
assert np.all(diff2 < error2)
assert (output[2].shape() == expect2.shape)
assert output[2].shape() == expect2.shape

View File

@ -12,16 +12,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
from mindspore.nn import Dense
import mindspore.nn as nn
import datetime
import numpy as np
import mindspore.context as context
from mindspore.communication.management import init, NCCL_WORLD_COMM_GROUP, get_rank, get_group_size
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.nn.optim import Momentum
from mindspore.nn import TrainOneStepCell, WithLossCell
from mindspore.ops import operations as P
from mindspore.common.tensor import Tensor
from mindspore.communication.management import init, get_rank, get_group_size
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
init('nccl')
@ -31,6 +30,7 @@ total = 5000
batch_size = 32
mini_batch = total // batch_size
class LeNet(nn.Cell):
def __init__(self):
super(LeNet, self).__init__()
@ -45,13 +45,13 @@ class LeNet(nn.Cell):
self.reshape = P.Reshape()
weight1 = Tensor(np.ones([120, 400]).astype(np.float32) * 0.01)
self.fc1 = Dense(400, 120, weight_init=weight1)
self.fc1 = nn.Dense(400, 120, weight_init=weight1)
weight2 = Tensor(np.ones([84, 120]).astype(np.float32) * 0.01)
self.fc2 = Dense(120, 84, weight_init=weight2)
self.fc2 = nn.Dense(120, 84, weight_init=weight2)
weight3 = Tensor(np.ones([10, 84]).astype(np.float32) * 0.01)
self.fc3 = Dense(84, 10, weight_init=weight3)
self.fc3 = nn.Dense(84, 10, weight_init=weight3)
def construct(self, input_x):
output = self.conv1(input_x)
@ -66,6 +66,7 @@ class LeNet(nn.Cell):
output = self.fc3(output)
return output
def test_lenet_nccl():
net = LeNet()
net.set_train()

View File

@ -12,11 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from mindspore import Tensor
from mindspore.ops import operations as P
import mindspore.nn as nn
import numpy as np
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
from mindspore.communication.management import init, NCCL_WORLD_COMM_GROUP, get_rank, get_group_size
@ -27,8 +27,9 @@ rank = get_rank()
size = get_group_size()
x = np.ones([size, 1, 3, 3]).astype(np.float32) * 0.01 * (rank + 1)
class Net(nn.Cell):
def __init__( self):
def __init__(self):
super(Net, self).__init__()
self.x = Parameter(initializer(Tensor(x), x.shape), name='x')
@ -46,6 +47,7 @@ class Net(nn.Cell):
self.reduce_scatter2(self.x),
self.reduce_scatter3(self.x))
def test_ReduceScatter():
reduce_scatter = Net()
output = reduce_scatter()
@ -53,7 +55,7 @@ def test_ReduceScatter():
sum = np.ones([size, 1, 3, 3]).astype(np.float32) * 0
for i in range(size):
sum += np.ones([size, 1, 3, 3]).astype(np.float32) * 0.01 * (i + 1)
expect0 = sum[rank : rank + 1]
expect0 = sum[rank: rank + 1]
diff0 = output[0].asnumpy() - expect0
error0 = np.ones(shape=expect0.shape) * 1.0e-5
assert np.all(diff0 < error0)

View File

@ -16,6 +16,7 @@ import mindspore.nn as nn
from mindspore.ops import operations as P
from mindspore.nn import Dense
class AlexNet(nn.Cell):
def __init__(self, num_classes=10):
super(AlexNet, self).__init__()

View File

@ -18,21 +18,22 @@
import os
import pytest
import numpy as np
from numpy import allclose
import mindspore.context as context
import mindspore.common.dtype as mstype
import mindspore.dataset.engine.datasets as de
import mindspore.dataset.transforms.c_transforms as C
from mindspore import context
from mindspore.common.tensor import Tensor
from mindspore import Tensor
from mindspore.train.model import Model
from mindspore.train.callback import Callback
from mindspore.model_zoo.Bert_NEZHA import BertConfig, BertNetworkWithLoss, BertTrainOneStepCell
from mindspore.nn.optim import Momentum
from mindspore import log as logger
_current_dir = os.path.dirname(os.path.realpath(__file__))
DATA_DIR = ["/home/workspace/mindspore_dataset/bert/example/examples.tfrecord"]
SCHEMA_DIR = "/home/workspace/mindspore_dataset/bert/example/datasetSchema.json"
def get_config(version='base', batch_size=1):
"""get config"""
if version == 'base':
@ -99,6 +100,7 @@ def get_config(version='base', batch_size=1):
bert_config = BertConfig(batch_size=batch_size)
return bert_config
def me_de_train_dataset():
"""test me de train dataset"""
# apply repeat operations
@ -137,6 +139,7 @@ class ModelCallback(Callback):
self.loss_list.append(cb_params.net_outputs.asnumpy()[0])
logger.info("epoch: {}, outputs are {}".format(cb_params.cur_epoch_num, str(cb_params.net_outputs)))
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -180,7 +183,8 @@ def test_bert_tdt():
expect_out = [12.19179, 11.965041, 11.969687, 11.97815, 11.969171, 12.603289, 12.165594,
12.824818, 12.38842, 12.604046]
logger.info("expected loss value output: {}".format(expect_out))
assert allclose(loss_value, expect_out, 0.00001, 0.00001)
assert np.allclose(loss_value, expect_out, 0.00001, 0.00001)
if __name__ == '__main__':
test_bert_tdt()

View File

@ -14,9 +14,10 @@
# ============================================================================
import numpy as np
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.nn import Dense
from mindspore import Tensor
class LeNet(nn.Cell):
def __init__(self):

View File

@ -13,9 +13,10 @@
# limitations under the License.
# ============================================================================
import numpy as np
from mindspore.common.tensor import Tensor
import mindspore.nn as nn
import mindspore.ops.operations as P
from mindspore import Tensor
from mindspore.ops import operations as P
def weight_variable(shape):
ones = np.ones(shape).astype(np.float32)
@ -294,6 +295,6 @@ class ResNet(nn.Cell):
x = self.fc(x)
return x
def resnet50(batch_size, num_classes):
return ResNet(ResidualBlock, [3, 4, 6, 3], num_classes, batch_size)

View File

@ -13,13 +13,15 @@
# limitations under the License.
# ============================================================================
import pytest
from mindspore.nn import TrainOneStepCell, WithLossCell
import mindspore.context as context
from mindspore.nn.optim import Momentum
import numpy as np
import mindspore.context as context
import mindspore.nn as nn
from mindspore.ops import operations as P
from mindspore import Tensor
from mindspore.nn import TrainOneStepCell, WithLossCell
from mindspore.nn.optim import Momentum
from mindspore.ops import operations as P
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
class LeNet(nn.Cell):
@ -52,9 +54,6 @@ class LeNet(nn.Cell):
return output
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
def train(net, data, label):
learning_rate = 0.01
momentum = 0.9

View File

@ -19,15 +19,17 @@ from __future__ import print_function
import pytest
import numpy as np
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.nn.optim import Momentum
from mindspore.ops import operations as P
from mindspore.nn import TrainOneStepCell, WithLossCell
from mindspore import Tensor
from mindspore.common.initializer import initializer
import mindspore.context as context
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
class AlexNet(nn.Cell):
def __init__(self, num_classes=10):
super(AlexNet, self).__init__()
@ -66,6 +68,7 @@ class AlexNet(nn.Cell):
x = self.fc3(x)
return x
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@ -73,14 +76,14 @@ def test_trainTensor(num_classes=10, epoch=15, batch_size=32):
net = AlexNet(num_classes)
lr = 0.1
momentum = 0.9
optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), lr, momentum, weight_decay = 0.0001)
optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), lr, momentum, weight_decay=0.0001)
criterion = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)
net_with_criterion = WithLossCell(net, criterion)
train_network = TrainOneStepCell(net_with_criterion, optimizer)
train_network.set_train()
losses=[]
losses = []
for i in range(0, epoch):
data = Tensor(np.ones([batch_size, 3 ,227, 227]).astype(np.float32) * 0.01)
data = Tensor(np.ones([batch_size, 3, 227, 227]).astype(np.float32) * 0.01)
label = Tensor(np.ones([batch_size]).astype(np.int32))
loss = train_network(data, label)
losses.append(loss)

View File

@ -16,16 +16,19 @@
import pytest
import numpy as np
import mindspore.nn as nn
import mindspore.context as context
from mindspore import Tensor
from mindspore.nn.optim import Momentum
from mindspore.ops import operations as P
from mindspore.nn import TrainOneStepCell, WithLossCell
from mindspore.nn import Dense
from mindspore import Tensor
from mindspore.common.initializer import initializer
from mindspore.common import dtype as mstype
import mindspore.context as context
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
class LeNet(nn.Cell):
def __init__(self):
super(LeNet, self).__init__()
@ -65,6 +68,7 @@ def multisteplr(total_steps, gap, base_lr=0.9, gamma=0.1, dtype=mstype.float32):
lr.append(lr_)
return Tensor(np.array(lr), dtype)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@ -81,7 +85,7 @@ def test_train_lenet():
train_network.set_train()
losses = []
for i in range(epoch):
data = Tensor(np.ones([net.batch_size, 3 ,32, 32]).astype(np.float32) * 0.01)
data = Tensor(np.ones([net.batch_size, 3, 32, 32]).astype(np.float32) * 0.01)
label = Tensor(np.ones([net.batch_size]).astype(np.int32))
loss = train_network(data, label)
losses.append(loss)

View File

@ -15,18 +15,20 @@
import pytest
import numpy as np
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.nn.optim import Momentum
from mindspore.ops import operations as P
from mindspore.nn import TrainOneStepCell, WithLossCell
from mindspore.nn import Dense
from mindspore import Tensor
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
import mindspore.context as context
import mindspore.nn as nn
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
def InitialLstmWeight(input_size, hidden_size, num_layers, bidirectional, has_bias=False):
num_directions = 1
if bidirectional:
@ -56,6 +58,7 @@ def InitialLstmWeight(input_size, hidden_size, num_layers, bidirectional, has_bi
return h, c, w
class SentimentNet(nn.Cell):
def __init__(self, vocab_size, embed_size, num_hiddens, num_layers,
bidirectional, weight, labels, batch_size):
@ -99,6 +102,7 @@ class SentimentNet(nn.Cell):
outputs = self.decoder(encoding)
return outputs
batch_size = 64
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@ -130,7 +134,7 @@ def test_LSTM():
train_network.set_train()
train_features = Tensor(np.ones([64, max_len]).astype(np.int32))
train_labels = Tensor(np.ones([64,]).astype(np.int32)[0:64])
train_labels = Tensor(np.ones([64, ]).astype(np.int32)[0:64])
losses = []
for epoch in range(num_epochs):
loss = train_network(train_features, train_labels)

View File

@ -19,36 +19,34 @@ from __future__ import print_function
import pytest
import numpy as np
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.nn.cell import Cell
from mindspore.nn.layer.conv import Conv2d
from mindspore.nn.layer.basic import Flatten
from mindspore.nn.layer.normalization import BatchNorm2d
from mindspore.nn.layer.pooling import MaxPool2d
from mindspore.ops.operations import TensorAdd
import mindspore.nn as nn
from mindspore.nn.optim import Momentum
from mindspore.ops import operations as P
from mindspore.nn import TrainOneStepCell, WithLossCell
from mindspore.nn import Dense
from mindspore import Tensor
from mindspore.common.initializer import initializer
import mindspore.context as context
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
def random_normal_init(shape, mean=0.0, stddev=0.01, seed=None):
init_value = np.ones(shape).astype(np.float32) * 0.01
return Tensor(init_value)
def variance_scaling_raw(shape):
variance_scaling_value = np.ones(shape).astype(np.float32) * 0.01
return Tensor(variance_scaling_value)
def weight_variable_0(shape):
zeros = np.zeros(shape).astype(np.float32)
return Tensor(zeros)
@ -323,6 +321,7 @@ class ResNet(Cell):
def resnet50(num_classes):
return ResNet(ResidualBlock, [3, 4, 6, 3], num_classes)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@ -335,9 +334,9 @@ def test_trainTensor(num_classes=10, epoch=8, batch_size=1):
net_with_criterion = WithLossCell(net, criterion)
train_network = TrainOneStepCell(net_with_criterion, optimizer) # optimizer
train_network.set_train()
losses=[]
losses = []
for i in range(0, epoch):
data = Tensor(np.ones([batch_size, 3 ,224, 224]).astype(np.float32) * 0.01)
data = Tensor(np.ones([batch_size, 3, 224, 224]).astype(np.float32) * 0.01)
label = Tensor(np.ones([batch_size]).astype(np.int32))
loss = train_network(data, label)
losses.append(loss)

View File

@ -22,16 +22,18 @@ import os
import time
import numpy as np
import argparse
import mindspore.nn as nn
from mindspore.common.tensor import Tensor
from mindspore.nn import TrainOneStepCell, WithLossCell
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.nn import TrainOneStepCell, WithLossCell
from mindspore.nn.optim import Momentum
from models.lenet import LeNet
from models.resnetv1_5 import resnet50
from models.alexnet import AlexNet
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
def train(net, data, label):
learning_rate = 0.01
momentum = 0.9
@ -42,29 +44,31 @@ def train(net, data, label):
train_network = TrainOneStepCell(net_with_criterion, optimizer) # optimizer
train_network.set_train()
res = train_network(data, label)
print("+++++++++Loss+++++++++++++")
print(res)
print("+++++++++++++++++++++++++++")
assert res
def test_resnet50():
data = Tensor(np.ones([32, 3 ,224, 224]).astype(np.float32) * 0.01)
data = Tensor(np.ones([32, 3, 224, 224]).astype(np.float32) * 0.01)
label = Tensor(np.ones([32]).astype(np.int32))
net = resnet50(32, 10)
train(net, data, label)
def test_lenet():
data = Tensor(np.ones([32, 1 ,32, 32]).astype(np.float32) * 0.01)
data = Tensor(np.ones([32, 1, 32, 32]).astype(np.float32) * 0.01)
label = Tensor(np.ones([32]).astype(np.int32))
net = LeNet()
train(net, data, label)
def test_alexnet():
data = Tensor(np.ones([32, 3 ,227, 227]).astype(np.float32) * 0.01)
data = Tensor(np.ones([32, 3, 227, 227]).astype(np.float32) * 0.01)
label = Tensor(np.ones([32]).astype(np.int32))
net = AlexNet()
train(net, data, label)
parser = argparse.ArgumentParser(description='MindSpore Testing Network')
parser.add_argument('--net', default='resnet50', type=str, help='net name')
parser.add_argument('--device', default='Ascend', type=str, help='device target')

View File

@ -14,7 +14,8 @@
# ============================================================================
import pytest
import numpy as np
import time, math
import time
import math
import mindspore.nn as nn
from mindspore import context, Tensor, ParameterTuple
from mindspore.ops import operations as P
@ -28,6 +29,7 @@ from mindspore.nn.optim import Momentum
np.random.seed(1)
def weight_variable():
"""weight initial"""
return TruncatedNormal(0.02)
@ -58,6 +60,7 @@ class LeNet(nn.Cell):
Examples:
>>> LeNet(num_class=10)
"""
def __init__(self, num_class=10):
super(LeNet, self).__init__()
self.num_class = num_class
@ -91,6 +94,7 @@ class CrossEntropyLoss(nn.Cell):
"""
Define loss for network
"""
def __init__(self):
super(CrossEntropyLoss, self).__init__()
self.cross_entropy = P.SoftmaxCrossEntropyWithLogits()
@ -111,6 +115,7 @@ class GradWrap(nn.Cell):
"""
GradWrap definition
"""
def __init__(self, network):
super(GradWrap, self).__init__()
self.network = network
@ -154,4 +159,3 @@ def test_ascend_pynative_lenet():
print("======epoch: ", epoch, " loss: ", loss_output.asnumpy(), " cost time: ", cost_time)
assert(loss_output.asnumpy() < 0.1)

View File

@ -33,10 +33,12 @@ SUMMARY_DIR = CUR_DIR + "/test_temp_summary_event_file/"
context.set_context(device_target="Ascend")
class MsWrapper(nn.Cell):
def __init__(self, network):
super(MsWrapper, self).__init__(auto_prefix=False)
self._network = network
@ms_function
def construct(self, *args):
return self._network(*args)
@ -45,14 +47,15 @@ class MsWrapper(nn.Cell):
def me_train_tensor(net, input_np, label_np, epoch_size=2):
context.set_context(mode=context.GRAPH_MODE)
loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)
opt = ApplyMomentum(Tensor(np.array([0.1])), Tensor(np.array([0.9])), filter(lambda x: x.requires_grad, net.get_parameters()))
opt = ApplyMomentum(Tensor(np.array([0.1])), Tensor(np.array([0.9])),
filter(lambda x: x.requires_grad, net.get_parameters()))
Model(net, loss, opt)
_network = wrap.WithLossCell(net, loss)
_train_net = MsWrapper(wrap.TrainOneStepCell(_network, opt))
_train_net.set_train()
summary_writer = SummaryRecord(SUMMARY_DIR, file_suffix="_MS_GRAPH", network=_train_net)
for epoch in range(0, epoch_size):
print(f"epoch %d"%(epoch))
print(f"epoch %d" % (epoch))
output = _train_net(Tensor(input_np), Tensor(label_np))
summary_writer.record(i)
print("********output***********")

View File

@ -108,6 +108,6 @@ def me_scalar_summary(steps, tag=None, value=None):
def test_scalarsummary_scalar1_step10_summaryrecord1():
clean_environment_file(SUMMARY_DIR_ME_TEMP)
output_dict = me_scalar_summary(10)
print("test_scalarsummary_scalar1_step10_summaryrecord1 \n",output_dict)
print("test_scalarsummary_scalar1_step10_summaryrecord1 \n", output_dict)
save_summary_events_file(SUMMARY_DIR_ME_TEMP, SUMMARY_DIR_ME)
clean_environment_file(SUMMARY_DIR_ME)

View File

@ -29,7 +29,8 @@ from mindspore.train.serialization import save, load, save_checkpoint, load_chec
_read_file_last_line, context, export
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend",
enable_task_sink=True,enable_loop_sink=True,enable_ir_fusion=True)
enable_task_sink=True, enable_loop_sink=True, enable_ir_fusion=True)
def test_resnet50_export(batch_size=1, num_classes=5):
context.set_context(enable_ir_fusion=False)

View File

@ -19,6 +19,7 @@ from mindspore.ops import operations as P
from mindspore.common.initializer import initializer
from mindspore.common import dtype as mstype
def weight_variable(shape):
return initializer('XavierUniform', shape=shape, dtype=mstype.float32)
@ -297,4 +298,3 @@ class ResNet(nn.Cell):
def resnet50(batch_size, num_classes):
return ResNet(ResidualBlock, [3, 4, 6, 3], num_classes, batch_size)

View File

@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import argparse
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
@ -35,7 +36,6 @@ random.seed(1)
np.random.seed(1)
ds.config.set_seed(1)
import argparse
parser = argparse.ArgumentParser(description='Image classification')
parser.add_argument('--run_distribute', type=bool, default=False, help='Run distribute')
parser.add_argument('--device_num', type=int, default=1, help='Device num.')
@ -48,15 +48,16 @@ parser.add_argument('--checkpoint_path', type=str, default=None, help='Checkpoin
parser.add_argument('--dataset_path', type=str, default="/var/log/npu/datasets/cifar", help='Dataset path')
args_opt = parser.parse_args()
device_id=int(os.getenv('DEVICE_ID'))
device_id = int(os.getenv('DEVICE_ID'))
data_home=args_opt.dataset_path
data_home = args_opt.dataset_path
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
context.set_context(enable_task_sink=True, device_id=device_id)
context.set_context(enable_loop_sink=True)
context.set_context(enable_mem_reuse=True)
def create_dataset(repeat_num=1, training=True):
data_dir = data_home + "/cifar-10-batches-bin"
if not training:
@ -64,8 +65,8 @@ def create_dataset(repeat_num=1, training=True):
data_set = ds.Cifar10Dataset(data_dir)
if args_opt.run_distribute:
rank_id=int(os.getenv('RANK_ID'))
rank_size=int(os.getenv('RANK_SIZE'))
rank_id = int(os.getenv('RANK_ID'))
rank_size = int(os.getenv('RANK_SIZE'))
data_set = ds.Cifar10Dataset(data_dir, num_shards=rank_size, shard_id=rank_id)
resize_height = 224
@ -103,6 +104,7 @@ def create_dataset(repeat_num=1, training=True):
return data_set
class CrossEntropyLoss(nn.Cell):
def __init__(self):
super(CrossEntropyLoss, self).__init__()

View File

@ -112,6 +112,7 @@ class CrossEntropyLoss(nn.Cell):
loss = self.mean(loss, (-1,))
return loss
class LossGet(Callback):
def __init__(self, per_print_times=1):
super(LossGet, self).__init__()
@ -143,6 +144,7 @@ class LossGet(Callback):
def get_loss(self):
return self._loss
def train_process(q, device_id, epoch_size, num_classes, device_num, batch_size, enable_hccl):
os.system("mkdir " + str(device_id))
os.chdir(str(device_id))