forked from OSSInnovation/mindspore
Fixing some tiny faults about Pylint in my code(ops)
This commit is contained in:
parent
fb7e4eac76
commit
a7ad0d0a49
|
@ -27,8 +27,8 @@ class Net(nn.Cell):
|
|||
super(Net, self).__init__()
|
||||
self.add = P.TensorAdd()
|
||||
|
||||
def construct(self, x, y):
|
||||
return self.add(x, y)
|
||||
def construct(self, x_, y_):
|
||||
return self.add(x_, y_)
|
||||
|
||||
|
||||
x = np.ones([1, 3, 3, 4]).astype(np.float32)
|
||||
|
|
|
@ -31,8 +31,8 @@ class Net(nn.Cell):
|
|||
# 'normal', [2, 3, 3, 4]), name='dout')
|
||||
|
||||
@ms_function
|
||||
def construct(self, dout):
|
||||
return self.bias_add_grad(dout)
|
||||
def construct(self, dout_):
|
||||
return self.bias_add_grad(dout_)
|
||||
|
||||
|
||||
dout = np.ones([2, 3, 4, 4]).astype(np.float32)
|
||||
|
|
|
@ -34,8 +34,8 @@ class Net(nn.Cell):
|
|||
self.get_shape = P.Shape()
|
||||
|
||||
@ms_function
|
||||
def construct(self, x, out):
|
||||
return self.conv2d_grad(out, x, self.get_shape(self.y))
|
||||
def construct(self, x_, out_):
|
||||
return self.conv2d_grad(out_, x_, self.get_shape(self.y))
|
||||
|
||||
|
||||
x = Tensor(np.array([[[
|
||||
|
|
|
@ -29,9 +29,9 @@ class Net(nn.Cell):
|
|||
self.mask = P.DropoutGenMask(10, 28)
|
||||
self.shape = P.Shape()
|
||||
|
||||
def construct(self, x, y):
|
||||
shape_x = self.shape(x)
|
||||
return self.mask(shape_x, y)
|
||||
def construct(self, x_, y_):
|
||||
shape_x = self.shape(x_)
|
||||
return self.mask(shape_x, y_)
|
||||
|
||||
|
||||
x = np.ones([2, 4, 2, 2]).astype(np.int32)
|
||||
|
|
|
@ -27,8 +27,8 @@ class Net(nn.Cell):
|
|||
super(Net, self).__init__()
|
||||
self.equal_count = P.EqualCount()
|
||||
|
||||
def construct(self, x, y):
|
||||
return self.equal_count(x, y)
|
||||
def construct(self, x_, y_):
|
||||
return self.equal_count(x_, y_)
|
||||
|
||||
|
||||
x = np.random.randn(32).astype(np.int32)
|
||||
|
|
|
@ -29,8 +29,8 @@ class Net(nn.Cell):
|
|||
self.matmul = P.MatMul()
|
||||
|
||||
@ms_function
|
||||
def construct(self, x1, x2):
|
||||
return self.matmul(x1, x2)
|
||||
def construct(self, x1_, x2_):
|
||||
return self.matmul(x1_, x2_)
|
||||
|
||||
|
||||
x1 = np.random.randn(1, 3).astype(np.float32)
|
||||
|
|
|
@ -12,8 +12,6 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
import numpy as np
|
||||
|
||||
import mindspore.context as context
|
||||
import mindspore.nn as nn
|
||||
from mindspore.common.api import ms_function
|
||||
|
|
|
@ -63,7 +63,7 @@ def test_net():
|
|||
expect = loss_np
|
||||
SparseSoftmaxCrossEntropyWithLogits = Net()
|
||||
loss_me = SparseSoftmaxCrossEntropyWithLogits(Tensor(logits), Tensor(labels))
|
||||
'''assert'''
|
||||
# assert
|
||||
assert np.allclose(expect.flatten(), loss_me.asnumpy().flatten(), 0.01, 0.01)
|
||||
print(loss_me.asnumpy().flatten())
|
||||
print("-------------------------")
|
||||
|
|
|
@ -25,8 +25,8 @@ class Net(nn.Cell):
|
|||
super(Net, self).__init__()
|
||||
self.add = P.TensorAdd()
|
||||
|
||||
def construct(self, x, y):
|
||||
return self.add(x, y)
|
||||
def construct(self, x_, y_):
|
||||
return self.add(x_, y_)
|
||||
|
||||
|
||||
x = np.random.randn(1, 3, 3, 4).astype(np.float32)
|
||||
|
|
|
@ -65,12 +65,10 @@ def test_conv2d_backprop_filter():
|
|||
conv2d_filter = Net()
|
||||
output = conv2d_filter()
|
||||
print("================================")
|
||||
"""
|
||||
expect output:
|
||||
[[[[ -60, -142, -265]
|
||||
[-104, -211, -322]
|
||||
[-102, -144, -248]]]]
|
||||
"""
|
||||
# expect output:
|
||||
# [[[[ -60, -142, -265]
|
||||
# [-104, -211, -322]
|
||||
# [-102, -144, -248]]]]
|
||||
expect = np.array([[[[-60, -142, -265],
|
||||
[-104, -211, -322],
|
||||
[-102, -144, -248]]]]).astype(np.float32)
|
||||
|
|
|
@ -64,15 +64,13 @@ def test_conv2d_backprop_input():
|
|||
conv2d_input = Net()
|
||||
output = conv2d_input()
|
||||
print("================================")
|
||||
"""
|
||||
expect output:
|
||||
[[[[ -5, -4, 5, 12, 0, -8]
|
||||
[-15, -6, 17, 17, -2, -11]
|
||||
[-15, -8, 13, 12, 2, -4]
|
||||
[-13, -6, 8, -14, 5, 20]
|
||||
[ -3, -4, -4, -19, 7, 23]
|
||||
[ -3, -2, 0, -14, 3, 16]]]]
|
||||
"""
|
||||
# expect output:
|
||||
# [[[[ -5, -4, 5, 12, 0, -8]
|
||||
# [-15, -6, 17, 17, -2, -11]
|
||||
# [-15, -8, 13, 12, 2, -4]
|
||||
# [-13, -6, 8, -14, 5, 20]
|
||||
# [ -3, -4, -4, -19, 7, 23]
|
||||
# [ -3, -2, 0, -14, 3, 16]]]]
|
||||
expect = np.array([[[[-5, -4, 5, 12, 0, -8],
|
||||
[-15, -6, 17, 17, -2, -11],
|
||||
[-15, -8, 13, 12, 2, -4],
|
||||
|
|
|
@ -59,7 +59,7 @@ def gelu_backward_cmp(input_shape):
|
|||
|
||||
class MEGeluLargeIn(Cell):
|
||||
def __init__(self):
|
||||
super(GELU, self).__init__()
|
||||
super(MEGeluLargeIn, self).__init__()
|
||||
self.matmul = P.MatMul()
|
||||
self.gelu = P.Gelu()
|
||||
|
||||
|
@ -79,7 +79,7 @@ class GradLargeIn(Cell):
|
|||
|
||||
|
||||
def gelu_backward_me_large_in_impl(x1, x2, output_grad):
|
||||
n = GradLargeIn()
|
||||
n = GELU()
|
||||
grad_with_sense = GradLargeIn(n)
|
||||
grad_with_sense.set_train()
|
||||
input_grad = grad_with_sense(x1, x2, output_grad)
|
||||
|
|
|
@ -29,8 +29,8 @@ class Net(nn.Cell):
|
|||
self.less = P.Less()
|
||||
|
||||
@ms_function
|
||||
def construct(self, x1, x2):
|
||||
return self.less(x1, x2)
|
||||
def construct(self, x1_, x2_):
|
||||
return self.less(x1_, x2_)
|
||||
|
||||
|
||||
x1 = np.random.randn(3, 4).astype(np.float16)
|
||||
|
|
|
@ -29,8 +29,8 @@ class Net(nn.Cell):
|
|||
self.less_equal = P.LessEqual()
|
||||
|
||||
@ms_function
|
||||
def construct(self, x1, x2):
|
||||
return self.less_equal(x1, x2)
|
||||
def construct(self, x1_, x2_):
|
||||
return self.less_equal(x1_, x2_)
|
||||
|
||||
|
||||
x1 = np.random.randn(3, 4).astype(np.float16)
|
||||
|
|
|
@ -28,8 +28,8 @@ class Net(nn.Cell):
|
|||
self.logical_and = P.LogicalAnd()
|
||||
|
||||
@ms_function
|
||||
def construct(self, x1, x2):
|
||||
return self.logical_and(x1, x2)
|
||||
def construct(self, x1_, x2_):
|
||||
return self.logical_and(x1_, x2_)
|
||||
|
||||
|
||||
x1 = [True, True, False, False, True, True, False, False]
|
||||
|
|
|
@ -28,8 +28,8 @@ class Net(nn.Cell):
|
|||
self.logical_not = P.LogicalNot()
|
||||
|
||||
@ms_function
|
||||
def construct(self, x1):
|
||||
return self.logical_not(x1)
|
||||
def construct(self, x):
|
||||
return self.logical_not(x)
|
||||
|
||||
|
||||
x1 = [True, True, False, False, True, True, False, False]
|
||||
|
|
|
@ -28,8 +28,8 @@ class Net(nn.Cell):
|
|||
self.logical_or = P.LogicalOr()
|
||||
|
||||
@ms_function
|
||||
def construct(self, x1, x2):
|
||||
return self.logical_or(x1, x2)
|
||||
def construct(self, x1_, x2_):
|
||||
return self.logical_or(x1_, x2_)
|
||||
|
||||
|
||||
x1 = [True, True, False, False, True, True, False, False]
|
||||
|
|
|
@ -27,8 +27,8 @@ class Net(nn.Cell):
|
|||
self.matmul = P.MatMul()
|
||||
|
||||
@ms_function
|
||||
def construct(self, x1, x2):
|
||||
return self.matmul(x1, x2)
|
||||
def construct(self, x1_, x2_):
|
||||
return self.matmul(x1_, x2_)
|
||||
|
||||
|
||||
x1 = np.random.randn(1, 3).astype(np.float32)
|
||||
|
|
|
@ -29,8 +29,8 @@ class Net(nn.Cell):
|
|||
self.matmul = P.MatMul(transpose_b=True)
|
||||
|
||||
@ms_function
|
||||
def construct(self, x1, x2):
|
||||
return self.matmul(x1, x2)
|
||||
def construct(self, x1_, x2_):
|
||||
return self.matmul(x1_, x2_)
|
||||
|
||||
|
||||
x1 = np.random.randn(10, 1).astype(np.float32)
|
||||
|
|
|
@ -44,15 +44,15 @@ class GradWrap(Cell):
|
|||
return gout
|
||||
|
||||
|
||||
def gen_data(inputA_np, inputB_np, grad=None):
|
||||
def gen_data(inputA_np, inputB_np, grad_=None):
|
||||
inputA_me = inputA_np
|
||||
if isinstance(inputA_np, np.ndarray):
|
||||
inputA_me = Tensor(inputA_me)
|
||||
inputB_me = inputB_np
|
||||
if isinstance(inputB_np, np.ndarray):
|
||||
inputB_me = Tensor(inputB_np)
|
||||
if grad is None:
|
||||
grad = np.random.randn(2).astype(np.float32)
|
||||
if grad_ is None:
|
||||
grad_ = np.random.randn(2).astype(np.float32)
|
||||
print("----inputA---")
|
||||
print(inputA_np)
|
||||
print("----inputB---")
|
||||
|
@ -60,7 +60,7 @@ def gen_data(inputA_np, inputB_np, grad=None):
|
|||
|
||||
net_me = GradWrap(MaxNetMe())
|
||||
net_me.set_train()
|
||||
output = net_me(inputA_me, inputB_me, Tensor(grad))
|
||||
output = net_me(inputA_me, inputB_me, Tensor(grad_))
|
||||
print("---me---")
|
||||
print(output[0].asnumpy())
|
||||
print(output[1].asnumpy())
|
||||
|
|
|
@ -44,7 +44,7 @@ class GradWrap(Cell):
|
|||
return gout
|
||||
|
||||
|
||||
def gen_data(inputA_np, inputB_np, grad=None):
|
||||
def gen_data(inputA_np, inputB_np, grad_=None):
|
||||
inputA_me = inputA_np
|
||||
if isinstance(inputA_np, np.ndarray):
|
||||
inputA_me = Tensor(inputA_me)
|
||||
|
@ -53,12 +53,12 @@ def gen_data(inputA_np, inputB_np, grad=None):
|
|||
if isinstance(inputB_np, np.ndarray):
|
||||
inputB_me = Tensor(inputB_np)
|
||||
|
||||
if grad is None:
|
||||
grad = np.random.randn(1, 3, 2, 2).astype(np.float32)
|
||||
if grad_ is None:
|
||||
grad_ = np.random.randn(1, 3, 2, 2).astype(np.float32)
|
||||
|
||||
print(inputA_np)
|
||||
print(inputB_np)
|
||||
print(grad)
|
||||
print(grad_)
|
||||
|
||||
net_me = GradWrap(MinNetMe())
|
||||
net_me.set_train()
|
||||
|
|
|
@ -31,8 +31,8 @@ class Grad(nn.Cell):
|
|||
self.network = network
|
||||
|
||||
@ms_function
|
||||
def construct(self, input, output_grad):
|
||||
return self.grad(self.network)(input, output_grad)
|
||||
def construct(self, inputValue, output_grad):
|
||||
return self.grad(self.network)(inputValue, output_grad)
|
||||
|
||||
|
||||
class Net(nn.Cell):
|
||||
|
|
|
@ -12,8 +12,8 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
import numpy as np
|
||||
import sys
|
||||
import numpy as np
|
||||
|
||||
import mindspore.context as context
|
||||
import mindspore.dataset as ds
|
||||
|
@ -31,8 +31,8 @@ SCHEMA_DIR = "{0}/resnet_all_datasetSchema.json".format(data_path)
|
|||
|
||||
def test_me_de_train_dataset():
|
||||
data_list = ["{0}/train-00001-of-01024.data".format(data_path)]
|
||||
data_set = ds.TFRecordDataset(data_list, schema=SCHEMA_DIR,
|
||||
columns_list=["image/encoded", "image/class/label"])
|
||||
data_set_new = ds.TFRecordDataset(data_list, schema=SCHEMA_DIR,
|
||||
columns_list=["image/encoded", "image/class/label"])
|
||||
|
||||
resize_height = 224
|
||||
resize_width = 224
|
||||
|
@ -42,21 +42,21 @@ def test_me_de_train_dataset():
|
|||
# define map operations
|
||||
|
||||
decode_op = vision.Decode()
|
||||
resize_op = vision.Resize(resize_height, resize_width,
|
||||
resize_op = vision.Resize((resize_height, resize_width),
|
||||
Inter.LINEAR) # Bilinear as default
|
||||
rescale_op = vision.Rescale(rescale, shift)
|
||||
|
||||
# apply map operations on images
|
||||
data_set = data_set.map(input_columns="image/encoded", operations=decode_op)
|
||||
data_set = data_set.map(input_columns="image/encoded", operations=resize_op)
|
||||
data_set = data_set.map(input_columns="image/encoded", operations=rescale_op)
|
||||
data_set_new = data_set_new.map(input_columns="image/encoded", operations=decode_op)
|
||||
data_set_new = data_set_new.map(input_columns="image/encoded", operations=resize_op)
|
||||
data_set_new = data_set_new.map(input_columns="image/encoded", operations=rescale_op)
|
||||
hwc2chw_op = vision.HWC2CHW()
|
||||
data_set = data_set.map(input_columns="image/encoded", operations=hwc2chw_op)
|
||||
data_set = data_set.repeat(1)
|
||||
data_set_new = data_set_new.map(input_columns="image/encoded", operations=hwc2chw_op)
|
||||
data_set_new = data_set_new.repeat(1)
|
||||
# apply batch operations
|
||||
batch_size = 32
|
||||
data_set = data_set.batch(batch_size, drop_remainder=True)
|
||||
return data_set
|
||||
batch_size_new = 32
|
||||
data_set_new = data_set_new.batch(batch_size_new, drop_remainder=True)
|
||||
return data_set_new
|
||||
|
||||
|
||||
def convert_type(shapes, types):
|
||||
|
|
|
@ -14,10 +14,10 @@
|
|||
# ============================================================================
|
||||
|
||||
import pytest
|
||||
import numpy as np
|
||||
from mindspore import Tensor
|
||||
from mindspore.ops import operations as P
|
||||
import mindspore.nn as nn
|
||||
import numpy as np
|
||||
import mindspore.context as context
|
||||
from mindspore.common import dtype as mstype
|
||||
|
||||
|
|
|
@ -68,12 +68,10 @@ def test_conv2d_backprop_filter():
|
|||
conv2d_filter = Net4()
|
||||
output = conv2d_filter()
|
||||
print("================================")
|
||||
"""
|
||||
expect output:
|
||||
[[[[ -60, -142, -265]
|
||||
[-104, -211, -322]
|
||||
[-102, -144, -248]]]]
|
||||
"""
|
||||
# expect output:
|
||||
# [[[[ -60, -142, -265]
|
||||
# [-104, -211, -322]
|
||||
# [-102, -144, -248]]]]
|
||||
expect = np.array([[[[-60, -142, -265],
|
||||
[-104, -211, -322],
|
||||
[-102, -144, -248]]]]).astype(np.float32)
|
||||
|
|
|
@ -66,16 +66,14 @@ class Net5(nn.Cell):
|
|||
def test_conv2d_backprop_input():
|
||||
conv2d_input = Net5()
|
||||
output = conv2d_input()
|
||||
print("================================")
|
||||
"""
|
||||
expect output:
|
||||
[[[[ -5, -4, 5, 12, 0, -8]
|
||||
[-15, -6, 17, 17, -2, -11]
|
||||
[-15, -8, 13, 12, 2, -4]
|
||||
[-13, -6, 8, -14, 5, 20]
|
||||
[ -3, -4, -4, -19, 7, 23]
|
||||
[ -3, -2, 0, -14, 3, 16]]]]
|
||||
"""
|
||||
print("================================")
|
||||
# expect output:
|
||||
# [[[[ -5, -4, 5, 12, 0, -8]
|
||||
# [-15, -6, 17, 17, -2, -11]
|
||||
# [-15, -8, 13, 12, 2, -4]
|
||||
# [-13, -6, 8, -14, 5, 20]
|
||||
# [ -3, -4, -4, -19, 7, 23]
|
||||
# [ -3, -2, 0, -14, 3, 16]]]]
|
||||
expect = np.array([[[[-5, -4, 5, 12, 0, -8],
|
||||
[-15, -6, 17, 17, -2, -11],
|
||||
[-15, -8, 13, 12, 2, -4],
|
||||
|
|
|
@ -55,16 +55,13 @@ def test_conv2d():
|
|||
conv2d = NetConv2d()
|
||||
output = conv2d()
|
||||
print("================================")
|
||||
"""
|
||||
expect output:
|
||||
[[[[ 45. 48. 51.]
|
||||
[ 54. 57. 60.]
|
||||
[ 63. 66. 69.]]
|
||||
|
||||
[[126. 138. 150.]
|
||||
[162. 174. 186.]
|
||||
[198. 210. 222.]]]]
|
||||
"""
|
||||
# expect output:
|
||||
# [[[[ 45. 48. 51.]
|
||||
# [ 54. 57. 60.]
|
||||
# [ 63. 66. 69.]]
|
||||
# [[126. 138. 150.]
|
||||
# [162. 174. 186.]
|
||||
# [198. 210. 222.]]]]
|
||||
expect = np.array([[[[45, 48, 51],
|
||||
[54, 57, 60],
|
||||
[63, 66, 69]],
|
||||
|
|
|
@ -14,11 +14,10 @@
|
|||
# ============================================================================
|
||||
|
||||
import pytest
|
||||
import numpy as np
|
||||
from mindspore import Tensor
|
||||
from mindspore.ops import operations as P
|
||||
import mindspore.nn as nn
|
||||
from mindspore.common.api import ms_function
|
||||
import numpy as np
|
||||
import mindspore.context as context
|
||||
from mindspore.common import dtype as mstype
|
||||
|
||||
|
@ -96,7 +95,7 @@ def test_gatherv2_axisN1():
|
|||
expect = np.array([[[1., 2.],
|
||||
[4., 5.]],
|
||||
[[7., 8.],
|
||||
[10.,11.]]])
|
||||
[10., 11.]]])
|
||||
error = np.ones(shape=ms_output.asnumpy().shape) * 1.0e-6
|
||||
diff = ms_output.asnumpy() - expect
|
||||
assert np.all(diff < error)
|
||||
|
|
|
@ -65,10 +65,8 @@ def test_momentum():
|
|||
|
||||
print("================================")
|
||||
print(losses)
|
||||
"""
|
||||
expect output:
|
||||
[[0.04132498 0.00874167 0.00874167 0.00874167 0.00874167
|
||||
0.00874167 0.00874167 0.00874167 0.00874167 0.00874167]]
|
||||
"""
|
||||
# expect output:
|
||||
# [[0.04132498 0.00874167 0.00874167 0.00874167 0.00874167
|
||||
# 0.00874167 0.00874167 0.00874167 0.00874167 0.00874167]]
|
||||
|
||||
return losses
|
||||
|
|
|
@ -41,8 +41,8 @@ def test_slice():
|
|||
expect = [[[2., -2., 2.]],
|
||||
[[4., -4., 4.]]]
|
||||
|
||||
slice = Slice()
|
||||
output = slice(x)
|
||||
slice_op = Slice()
|
||||
output = slice_op(x)
|
||||
print("output:\n", output)
|
||||
assert (output.asnumpy() == expect).all()
|
||||
|
||||
|
|
|
@ -13,17 +13,17 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
from __future__ import absolute_import
|
||||
from te import tvm
|
||||
from topi import generic
|
||||
import te.lang.cce
|
||||
from topi.cce import util
|
||||
from te import tvm
|
||||
from te.platform.fusion_manager import fusion_manager
|
||||
from topi import generic
|
||||
from topi.cce import util
|
||||
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
|
||||
|
||||
@fusion_manager.register("add3")
|
||||
def add3_compute(input1, input2, const_bias):
|
||||
sum2 = te.lang.cce.vadd(input1, input2)
|
||||
sum3 = te.lang.cce.vadds(sum2, tvm.const(const_bias, dtype = input1.dtype))
|
||||
sum3 = te.lang.cce.vadds(sum2, tvm.const(const_bias, dtype=input1.dtype))
|
||||
return sum3
|
||||
|
||||
|
||||
|
@ -44,7 +44,7 @@ cus_add3_op_info = TBERegOp("CusAdd3") \
|
|||
|
||||
|
||||
@op_info_register(cus_add3_op_info)
|
||||
def CusAdd3Impl(input1, inptu2, sum, const_bias, kernel_name="CusAdd3Impl"):
|
||||
def CusAdd3Impl(input1, inptu2, sum1, const_bias, kernel_name="CusAdd3Impl"):
|
||||
shape = input1.get("shape")
|
||||
shape = util.shape_refine(shape)
|
||||
dtype = input1.get("dtype").lower()
|
||||
|
|
|
@ -12,10 +12,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
import numpy as np
|
||||
from mindspore.ops import prim_attr_register, PrimitiveWithInfer
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore import Tensor
|
||||
|
||||
# sum = input1 + input2 + const_bias
|
||||
class CusAdd3(PrimitiveWithInfer):
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
import numpy as np
|
||||
from mindspore import Tensor
|
||||
from mindspore.ops import prim_attr_register, PrimitiveWithInfer
|
||||
from mindspore.ops import operations as P
|
||||
|
||||
# y = x^2
|
||||
class CusSquare(PrimitiveWithInfer):
|
||||
|
@ -36,10 +35,10 @@ class CusSquare(PrimitiveWithInfer):
|
|||
|
||||
def infer_dtype(self, data_dtype):
|
||||
return data_dtype
|
||||
|
||||
|
||||
def get_bprop(self):
|
||||
def bprop(data, out, dout):
|
||||
gradient = data * 2
|
||||
dx = gradient * dout
|
||||
return (dx, )
|
||||
return (dx,)
|
||||
return bprop
|
||||
|
|
|
@ -27,8 +27,8 @@ class Net(nn.Cell):
|
|||
super(Net, self).__init__()
|
||||
self.select = P.Select()
|
||||
|
||||
def construct(self, cond, input_x, input_y):
|
||||
return self.select(cond, input_x, input_y)
|
||||
def construct(self, cond_op, input_x, input_y):
|
||||
return self.select(cond_op, input_x, input_y)
|
||||
|
||||
|
||||
cond = np.array([[True, False], [True, False]]).astype(np.bool)
|
||||
|
|
|
@ -315,16 +315,16 @@ test_case_array_ops = [
|
|||
'desc_inputs': [Tensor(np.array([[1, 2], [3, 4]]).astype(np.float16))]}),
|
||||
('SpaceToDepthNet', {
|
||||
'block': SpaceToDepthNet(),
|
||||
'desc_inputs': [Tensor(np.random.rand(1,3,2,2).astype(np.float16))]}),
|
||||
'desc_inputs': [Tensor(np.random.rand(1, 3, 2, 2).astype(np.float16))]}),
|
||||
('DepthToSpaceNet', {
|
||||
'block': DepthToSpaceNet(),
|
||||
'desc_inputs': [Tensor(np.random.rand(1,12,1,1).astype(np.float16))]}),
|
||||
'desc_inputs': [Tensor(np.random.rand(1, 12, 1, 1).astype(np.float16))]}),
|
||||
('SpaceToBatchNDNet', {
|
||||
'block': SpaceToBatchNDNet(),
|
||||
'desc_inputs': [Tensor(np.random.rand(1,1,2,2).astype(np.float16))]}),
|
||||
'desc_inputs': [Tensor(np.random.rand(1, 1, 2, 2).astype(np.float16))]}),
|
||||
('BatchToSpaceNDNet', {
|
||||
'block': BatchToSpaceNDNet(),
|
||||
'desc_inputs': [Tensor(np.random.rand(4,1,1,1).astype(np.float16))]}),
|
||||
'desc_inputs': [Tensor(np.random.rand(4, 1, 1, 1).astype(np.float16))]}),
|
||||
]
|
||||
|
||||
test_case_lists = [test_case_array_ops]
|
||||
|
|
|
@ -26,7 +26,7 @@ from ....mindspore_test_framework.pipeline.forward.compile_forward \
|
|||
|
||||
|
||||
class AssignAddNet(nn.Cell):
|
||||
def __init__(self, ):
|
||||
def __init__(self,):
|
||||
super(AssignAddNet, self).__init__()
|
||||
self.op = P.AssignAdd()
|
||||
self.inputdata = Parameter(Tensor(np.zeros([1]).astype(np.bool_), mstype.bool_), name="assign_add1")
|
||||
|
@ -37,7 +37,7 @@ class AssignAddNet(nn.Cell):
|
|||
|
||||
|
||||
class AssignSubNet(nn.Cell):
|
||||
def __init__(self, ):
|
||||
def __init__(self,):
|
||||
super(AssignSubNet, self).__init__()
|
||||
self.op = P.AssignSub()
|
||||
self.inputdata = Parameter(Tensor(np.zeros([1]).astype(np.bool_), mstype.bool_), name="assign_sub1")
|
||||
|
|
|
@ -13,8 +13,8 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
"""multitype_ops directory test case"""
|
||||
import numpy as np
|
||||
from functools import partial, reduce
|
||||
import numpy as np
|
||||
|
||||
import mindspore.nn as nn
|
||||
import mindspore.context as context
|
||||
|
|
|
@ -231,7 +231,7 @@ class ApplyRMSNet(nn.Cell):
|
|||
self.apply_rms = P.ApplyRMSProp()
|
||||
self.lr = 0.001
|
||||
self.rho = 0.0
|
||||
self.momentum= 0.0
|
||||
self.momentum = 0.0
|
||||
self.epsilon = 1e-10
|
||||
self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
|
||||
self.ms = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="ms")
|
||||
|
@ -574,7 +574,8 @@ test_case_math_ops = [
|
|||
('CumSum', {
|
||||
'block': CumSumNet(),
|
||||
'desc_inputs': [Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float32))],
|
||||
'desc_bprop': [Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float32))]}),
|
||||
'desc_bprop': [Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7],
|
||||
[1, 3, 7, 9]]).astype(np.float32))]}),
|
||||
('ReduceSum_3', {
|
||||
'block': P.ReduceSum(),
|
||||
'desc_const': [0],
|
||||
|
|
|
@ -103,7 +103,7 @@ test_case_reid_ops = [
|
|||
'desc_bprop': [[128, 64, 112, 112]]}),
|
||||
('PRelu', {
|
||||
'block': P.PReLU(),
|
||||
'desc_inputs': [[128, 64, 112, 112], [64, ]],
|
||||
'desc_inputs': [[128, 64, 112, 112], [64,]],
|
||||
'desc_bprop': [[128, 64, 112, 112]]}),
|
||||
('Cos', {
|
||||
'block': P.Cos(),
|
||||
|
@ -155,11 +155,11 @@ test_case = functools.reduce(lambda x, y: x + y, test_case_lists)
|
|||
|
||||
|
||||
test_exec_case = filter(lambda x: 'skip' not in x[1] or
|
||||
'exec' not in x[1]['skip'], test_case)
|
||||
'exec' not in x[1]['skip'], test_case)
|
||||
|
||||
test_backward_exec_case = filter(lambda x: 'skip' not in x[1] or
|
||||
'backward' not in x[1]['skip'] and 'backward_exec'
|
||||
not in x[1]['skip'], test_case)
|
||||
'backward' not in x[1]['skip'] and 'backward_exec'
|
||||
not in x[1]['skip'], test_case)
|
||||
|
||||
|
||||
@mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config)
|
||||
|
|
Loading…
Reference in New Issue