forked from mindspore-Ecosystem/mindspore
Fixing some tiny faults about Pylint in my code(ops)
This commit is contained in:
parent
9cc955678d
commit
f87c70a9ab
|
@ -164,7 +164,8 @@ def CusBatchMatMul(input_x1, input_x2, output, transpose_a=False, transpose_b=Tr
|
|||
matmul_hybrid_f_t_local_UB = tik_instance.Tensor(dtype, [64],
|
||||
name="matmul_hybrid_f_t_local_UB",
|
||||
scope=tik.scope_ubuf)
|
||||
matmul_hybrid_f_t_local_UB_dst_tmp = tik_instance.Tensor(dtype, [64],
|
||||
matmul_hybrid_f_t_local_UB_dst_tmp = tik_instance.Tensor(
|
||||
dtype, [64],
|
||||
name="matmul_hybrid_f_t_local_UB_dst_tmp",
|
||||
scope=tik.scope_ubuf)
|
||||
tik_instance.vector_dup(64, matmul_hybrid_f_t_local_UB, 0, 1, 1, 8)
|
||||
|
|
|
@ -127,7 +127,7 @@ def _shape_check(shape_a, shape_b, shape_bias, src_dtype, trans_a, trans_b):
|
|||
if n_shape % cce.BLOCK_IN != 0 and n_shape != 1:
|
||||
raise RuntimeError("input shape N should be 1 or multiple of %d" % cce.BLOCK_IN)
|
||||
|
||||
if len(shape_bias) != 0:
|
||||
if shape_bias:
|
||||
if len(shape_bias) == 1:
|
||||
if is_gevm or is_gemv:
|
||||
if shape_bias[0] != m_shape * n_shape:
|
||||
|
@ -189,7 +189,7 @@ def check_supported(input_x1, input_x2, bias=None, output_y={}, trans_a=False, t
|
|||
util.check_shape_size(shape_b, SHAPE_SIZE_LIMIT)
|
||||
try:
|
||||
trans_a_f = bool(1 - trans_a)
|
||||
if src_dtype == "float32" or src_dtype == "int32":
|
||||
if src_dtype in ("float32", "int32"):
|
||||
if len(shape_a) != 2 and len(shape_b) != 2:
|
||||
return False
|
||||
if trans_b:
|
||||
|
@ -239,6 +239,7 @@ def check_supported(input_x1, input_x2, bias=None, output_y={}, trans_a=False, t
|
|||
return False
|
||||
|
||||
except RuntimeError as e:
|
||||
print(e)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
@ -385,7 +386,7 @@ def CusMatMulCubeDenseLeft(input_x1, input_x2, bias=None, output_y={}, trans_a=F
|
|||
tensor_b = tvm.placeholder(shape_b_temp, name='tensor_b',
|
||||
dtype=src_dtype)
|
||||
|
||||
if len(shape_bias) > 0:
|
||||
if shape_bias:
|
||||
tensor_bias = tvm.placeholder(shape_bias, name='tensor_bias',
|
||||
dtype=dst_dtype)
|
||||
|
||||
|
@ -449,7 +450,7 @@ def CusMatMulCubeDenseLeft(input_x1, input_x2, bias=None, output_y={}, trans_a=F
|
|||
resMatmul_local_UB, 0, 16, 224 // 2, 0, 56 * 16 * 2 // 2)
|
||||
tik_instance.BuildCCE(kernel_name=kernel_name, inputs=[input_x1, input_x2], outputs=[resMatmul])
|
||||
return tik_instance
|
||||
else:
|
||||
|
||||
print("come into tbe, shape is error!")
|
||||
result = te.lang.cce.matmul(tensor_a, tensor_b, trans_a, trans_b, format_a=format_a,
|
||||
format_b=format_b, dst_dtype=dst_dtype, tensor_bias=tensor_bias)
|
||||
|
@ -458,7 +459,7 @@ def CusMatMulCubeDenseLeft(input_x1, input_x2, bias=None, output_y={}, trans_a=F
|
|||
schedule = generic.auto_schedule(result)
|
||||
|
||||
tensor_list = [tensor_a, tensor_b, result]
|
||||
if len(shape_bias) > 0:
|
||||
if shape_bias:
|
||||
tensor_list = [tensor_a, tensor_b, tensor_bias, result]
|
||||
|
||||
config = {"print_ir": False,
|
||||
|
|
|
@ -124,7 +124,7 @@ src_dtype: str
|
|||
if n_shape % cce.BLOCK_IN != 0 and n_shape != 1:
|
||||
raise RuntimeError("input shape N should be 1 or multiple of %d" % cce.BLOCK_IN)
|
||||
|
||||
if len(shape_bias):
|
||||
if shape_bias:
|
||||
if len(shape_bias) == 1:
|
||||
if is_gevm or is_gemv:
|
||||
if shape_bias[0] != m_shape * n_shape:
|
||||
|
@ -144,7 +144,6 @@ def _get_bias(shape_bias):
|
|||
bias_length = shape_bias[0]
|
||||
if bias_length % 16 == 0:
|
||||
return shape_bias
|
||||
else:
|
||||
bias_length = (bias_length // 16) * 16 + 16
|
||||
shape_bias = []
|
||||
shape_bias.append(bias_length)
|
||||
|
@ -184,7 +183,7 @@ def check_supported(input_x1, input_x2, bias=None, output_y={}, trans_a=False, t
|
|||
util.check_shape_size(shape_b, SHAPE_SIZE_LIMIT)
|
||||
try:
|
||||
trans_a_f = bool(1 - trans_a)
|
||||
if src_dtype == "float32" or src_dtype == "int32":
|
||||
if src_dtype in ("floate32", "int32"):
|
||||
if len(shape_a) != 2 and len(shape_b) != 2:
|
||||
return False
|
||||
if trans_b:
|
||||
|
@ -234,6 +233,7 @@ def check_supported(input_x1, input_x2, bias=None, output_y={}, trans_a=False, t
|
|||
return False
|
||||
|
||||
except RuntimeError as e:
|
||||
print(e)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
|
|
@ -80,8 +80,8 @@ def CusMatMulCubeFraczRightMul(input_x1, input_x2, input_x3, bias=None, output_y
|
|||
((32, 128, 16, 16), 'float16', (32, 32, 16, 16), 'float16', (1,), 'float32'),
|
||||
((64, 32, 16, 16), 'float16', (64, 64, 16, 16), 'float16', (1,), 'float32'),
|
||||
((16, 64, 16, 16), 'float16', (16, 16, 16, 16), 'float16', (1,), 'float32')]
|
||||
input_shape = (
|
||||
tuple(input_x1_shape), input_x1_dtype, tuple(input_x2_shape), input_x2_dtype, tuple(input_x3_shape), input_x3_dtype)
|
||||
input_shape = (tuple(input_x1_shape), input_x1_dtype, tuple(input_x2_shape),
|
||||
input_x2_dtype, tuple(input_x3_shape), input_x3_dtype)
|
||||
if input_shape not in Supported:
|
||||
raise RuntimeError("input_shape %s is not supported" % str(input_shape))
|
||||
|
||||
|
|
|
@ -129,7 +129,7 @@ def _shape_check(shape_a, shape_b, shape_bias, src_dtype, trans_a, trans_b):
|
|||
if n_shape % cce.BLOCK_IN != 0 and n_shape != 1:
|
||||
raise RuntimeError("input shape N should be 1 or multiple of %d" % cce.BLOCK_IN)
|
||||
|
||||
if len(shape_bias):
|
||||
if shape_bias:
|
||||
if len(shape_bias) == 1:
|
||||
if is_gevm or is_gemv:
|
||||
if shape_bias[0] != m_shape * n_shape:
|
||||
|
@ -149,7 +149,6 @@ def _get_bias(shape_bias):
|
|||
bias_length = shape_bias[0]
|
||||
if bias_length % 16 == 0:
|
||||
return shape_bias
|
||||
else:
|
||||
bias_length = (bias_length // 16) * 16 + 16
|
||||
shape_bias = []
|
||||
shape_bias.append(bias_length)
|
||||
|
@ -189,7 +188,7 @@ def check_supported(input_x1, input_x2, bias=None, output_y={}, trans_a=False, t
|
|||
util.check_shape_size(shape_b, SHAPE_SIZE_LIMIT)
|
||||
try:
|
||||
trans_a_f = bool(1 - trans_a)
|
||||
if src_dtype == "float32" or src_dtype == "int32":
|
||||
if src_dtype in ("float32", "int32"):
|
||||
if len(shape_a) != 2 and len(shape_b) != 2:
|
||||
return False
|
||||
if trans_b:
|
||||
|
@ -239,6 +238,7 @@ def check_supported(input_x1, input_x2, bias=None, output_y={}, trans_a=False, t
|
|||
return False
|
||||
|
||||
except RuntimeError as e:
|
||||
print(e)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
@ -314,7 +314,7 @@ def CusMatMulCube(input_x1, input_x2, bias=None, output_y={}, trans_a=False, tra
|
|||
|
||||
src_dtype = input_x1.get("dtype").lower()
|
||||
dst_dtype = output_y.get("dtype").lower()
|
||||
if src_dtype == "float32" or src_dtype == "int32":
|
||||
if src_dtype in ("float32", "int32"):
|
||||
matmul_vector_cce(shape_a, shape_b, src_dtype, trans_a, trans_b, shape_bias, kernel_name)
|
||||
return
|
||||
_shape_check(shape_a, shape_b, shape_bias, src_dtype, trans_a, trans_b)
|
||||
|
@ -377,7 +377,7 @@ def CusMatMulCube(input_x1, input_x2, bias=None, output_y={}, trans_a=False, tra
|
|||
tensor_b = tvm.placeholder(shape_b_temp, name='tensor_b',
|
||||
dtype=src_dtype)
|
||||
|
||||
if len(shape_bias) > 0:
|
||||
if shape_bias:
|
||||
tensor_bias = tvm.placeholder(shape_bias, name='tensor_bias',
|
||||
dtype=dst_dtype)
|
||||
result = te.lang.cce.matmul(tensor_a, tensor_b, trans_a, trans_b, format_a=format_a,
|
||||
|
@ -387,7 +387,7 @@ def CusMatMulCube(input_x1, input_x2, bias=None, output_y={}, trans_a=False, tra
|
|||
schedule = generic.auto_schedule(result)
|
||||
|
||||
tensor_list = [tensor_a, tensor_b, result]
|
||||
if len(shape_bias) > 0:
|
||||
if shape_bias:
|
||||
tensor_list = [tensor_a, tensor_b, tensor_bias, result]
|
||||
|
||||
config = {"print_ir": False,
|
||||
|
|
|
@ -16,17 +16,10 @@
|
|||
import functools
|
||||
import numpy as np
|
||||
import mindspore.nn as nn
|
||||
import mindspore.context as context
|
||||
import mindspore.common.dtype as mstype
|
||||
|
||||
from mindspore import Tensor, Parameter
|
||||
from mindspore.common.initializer import initializer
|
||||
from mindspore.ops import Primitive
|
||||
from mindspore.ops import composite as C
|
||||
from mindspore import Tensor
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore.ops import functional as F
|
||||
from mindspore.ops import prim_attr_register, PrimitiveWithInfer
|
||||
from mindspore.ops.primitive import constexpr
|
||||
from mindspore import context
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=True)
|
||||
|
||||
|
|
|
@ -33,13 +33,13 @@ class Net(nn.Cell):
|
|||
return self.mul(x1, x2)
|
||||
|
||||
|
||||
x1 = np.random.randn(3, 4).astype(np.float32)
|
||||
x2 = np.random.randn(3, 4).astype(np.float32)
|
||||
arr_x1 = np.random.randn(3, 4).astype(np.float32)
|
||||
arr_x2 = np.random.randn(3, 4).astype(np.float32)
|
||||
|
||||
|
||||
def test_net():
|
||||
mul = Net()
|
||||
output = mul(Tensor(x1), Tensor(x2))
|
||||
print(x1)
|
||||
print(x2)
|
||||
output = mul(Tensor(arr_x1), Tensor(arr_x2))
|
||||
print(arr_x1)
|
||||
print(arr_x2)
|
||||
print(output.asnumpy())
|
||||
|
|
|
@ -33,11 +33,11 @@ class Net(nn.Cell):
|
|||
return self.npu_clear_float_status(x1)
|
||||
|
||||
|
||||
x1 = np.random.randn(8).astype(np.float32)
|
||||
arr_x1 = np.random.randn(8).astype(np.float32)
|
||||
|
||||
|
||||
def test_net():
|
||||
npu_clear_float_status = Net()
|
||||
output = npu_clear_float_status(Tensor(x1))
|
||||
print(x1)
|
||||
output = npu_clear_float_status(Tensor(arr_x1))
|
||||
print(arr_x1)
|
||||
print(output.asnumpy())
|
||||
|
|
|
@ -33,11 +33,11 @@ class Net(nn.Cell):
|
|||
return self.npu_get_float_status(x1)
|
||||
|
||||
|
||||
x1 = np.random.randn(8).astype(np.float32)
|
||||
arr_x1 = np.random.randn(8).astype(np.float32)
|
||||
|
||||
|
||||
def test_net():
|
||||
npu_get_float_status = Net()
|
||||
output = npu_get_float_status(Tensor(x1))
|
||||
print(x1)
|
||||
output = npu_get_float_status(Tensor(arr_x1))
|
||||
print(arr_x1)
|
||||
print(output.asnumpy())
|
||||
|
|
|
@ -34,11 +34,11 @@ class Net(nn.Cell):
|
|||
return x
|
||||
|
||||
|
||||
x = np.random.random(size=(2, 2)).astype(np.float32)
|
||||
arr_x = np.random.random(size=(2, 2)).astype(np.float32)
|
||||
|
||||
|
||||
def test_net():
|
||||
pad = Net()
|
||||
output = pad(Tensor(x))
|
||||
output = pad(Tensor(arr_x))
|
||||
print("=================output====================")
|
||||
print(output.asnumpy())
|
||||
|
|
|
@ -33,13 +33,13 @@ class Net(nn.Cell):
|
|||
return self.realdiv(x1, x2)
|
||||
|
||||
|
||||
x1 = np.random.randn(3, 4).astype(np.float32)
|
||||
x2 = np.random.randn(3, 4).astype(np.float32)
|
||||
arr_x1 = np.random.randn(3, 4).astype(np.float32)
|
||||
arr_x2 = np.random.randn(3, 4).astype(np.float32)
|
||||
|
||||
|
||||
def test_net():
|
||||
realdiv = Net()
|
||||
output = realdiv(Tensor(x1), Tensor(x2))
|
||||
print(x1)
|
||||
print(x2)
|
||||
output = realdiv(Tensor(arr_x1), Tensor(arr_x2))
|
||||
print(arr_x1)
|
||||
print(arr_x2)
|
||||
print(output.asnumpy())
|
||||
|
|
|
@ -33,11 +33,11 @@ class Net(nn.Cell):
|
|||
return self.reciprocal(x1)
|
||||
|
||||
|
||||
x1 = np.random.randn(3, 4).astype(np.float32)
|
||||
arr_x1 = np.random.randn(3, 4).astype(np.float32)
|
||||
|
||||
|
||||
def test_net():
|
||||
reciprocal = Net()
|
||||
output = reciprocal(Tensor(x1))
|
||||
print(x1)
|
||||
output = reciprocal(Tensor(arr_x1))
|
||||
print(arr_x1)
|
||||
print(output.asnumpy())
|
||||
|
|
|
@ -31,13 +31,13 @@ class Net(nn.Cell):
|
|||
return self.scatternd(indices, update, (3, 3))
|
||||
|
||||
|
||||
indices = np.array([[0, 1], [1, 1]]).astype(np.int32)
|
||||
update = np.array([3.2, 1.1]).astype(np.float32)
|
||||
arr_indices = np.array([[0, 1], [1, 1]]).astype(np.int32)
|
||||
arr_update = np.array([3.2, 1.1]).astype(np.float32)
|
||||
|
||||
|
||||
def test_net():
|
||||
scatternd = Net()
|
||||
print(indices)
|
||||
print(update)
|
||||
output = scatternd(Tensor(indices), Tensor(update))
|
||||
print(arr_indices)
|
||||
print(arr_update)
|
||||
output = scatternd(Tensor(arr_indices), Tensor(arr_update))
|
||||
print(output.asnumpy())
|
||||
|
|
|
@ -31,11 +31,11 @@ class Net(nn.Cell):
|
|||
return self.Softmax(x)
|
||||
|
||||
|
||||
x = np.array([[5, 1]]).astype(np.float32)
|
||||
arr_x = np.array([[5, 1]]).astype(np.float32)
|
||||
|
||||
|
||||
def test_net():
|
||||
softmax = Net()
|
||||
output = softmax(Tensor(x))
|
||||
print(x)
|
||||
output = softmax(Tensor(arr_x))
|
||||
print(arr_x)
|
||||
print(output.asnumpy())
|
||||
|
|
|
@ -31,13 +31,13 @@ class Net(nn.Cell):
|
|||
return self.split(x)
|
||||
|
||||
|
||||
x = np.random.randn(2, 4).astype(np.float32)
|
||||
arr_x = np.random.randn(2, 4).astype(np.float32)
|
||||
|
||||
|
||||
def test_net():
|
||||
split = Net()
|
||||
output = split(Tensor(x))
|
||||
output = split(Tensor(arr_x))
|
||||
print("====input========")
|
||||
print(x)
|
||||
print(arr_x)
|
||||
print("====output=======")
|
||||
print(output)
|
||||
|
|
|
@ -31,11 +31,11 @@ class Net(nn.Cell):
|
|||
return self.sqrt(x)
|
||||
|
||||
|
||||
x = np.array([1.0, 4.0, 9.0]).astype(np.float32)
|
||||
arr_x = np.array([1.0, 4.0, 9.0]).astype(np.float32)
|
||||
|
||||
|
||||
def test_net():
|
||||
sqrt = Net()
|
||||
output = sqrt(Tensor(x))
|
||||
print(x)
|
||||
output = sqrt(Tensor(arr_x))
|
||||
print(arr_x)
|
||||
print(output.asnumpy())
|
||||
|
|
|
@ -31,11 +31,11 @@ class Net(nn.Cell):
|
|||
return self.square(x)
|
||||
|
||||
|
||||
x = np.array([1.0, 4.0, 9.0]).astype(np.float32)
|
||||
arr_x = np.array([1.0, 4.0, 9.0]).astype(np.float32)
|
||||
|
||||
|
||||
def test_net():
|
||||
square = Net()
|
||||
output = square(Tensor(x))
|
||||
print(x)
|
||||
output = square(Tensor(arr_x))
|
||||
print(arr_x)
|
||||
print(output.asnumpy())
|
||||
|
|
|
@ -31,13 +31,13 @@ class Net(nn.Cell):
|
|||
return self.sub(x, y)
|
||||
|
||||
|
||||
x = np.random.randn(1, 3, 3, 4).astype(np.float32)
|
||||
y = np.random.randn(1, 3, 3, 4).astype(np.float32)
|
||||
arr_x = np.random.randn(1, 3, 3, 4).astype(np.float32)
|
||||
arr_y = np.random.randn(1, 3, 3, 4).astype(np.float32)
|
||||
|
||||
|
||||
def test_net():
|
||||
sub = Net()
|
||||
output = sub(Tensor(x), Tensor(y))
|
||||
print(x)
|
||||
print(y)
|
||||
output = sub(Tensor(arr_x), Tensor(arr_y))
|
||||
print(arr_x)
|
||||
print(arr_y)
|
||||
print(output.asnumpy())
|
||||
|
|
|
@ -31,11 +31,11 @@ class Net(nn.Cell):
|
|||
return self.tile(x, (1, 4))
|
||||
|
||||
|
||||
x = np.array([[0], [1], [2], [3]]).astype(np.int32)
|
||||
arr_x = np.array([[0], [1], [2], [3]]).astype(np.int32)
|
||||
|
||||
|
||||
def test_net():
|
||||
tile = Net()
|
||||
print(x)
|
||||
output = tile(Tensor(x))
|
||||
print(arr_x)
|
||||
output = tile(Tensor(arr_x))
|
||||
print(output.asnumpy())
|
||||
|
|
|
@ -20,7 +20,6 @@ import mindspore as ms
|
|||
from mindspore import Tensor
|
||||
from mindspore import context
|
||||
from mindspore import nn
|
||||
from mindspore.common.parameter import Parameter, ParameterTuple
|
||||
from mindspore.ops import composite as C
|
||||
from mindspore.ops import functional as F
|
||||
from mindspore.ops import operations as P
|
||||
|
@ -447,11 +446,14 @@ def test_index_to_switch_layer():
|
|||
|
||||
def test_control_depend_check():
|
||||
with pytest.raises(TypeError) as e:
|
||||
depend = P.ControlDepend(0.0)
|
||||
P.ControlDepend(0.0)
|
||||
print(e)
|
||||
with pytest.raises(ValueError) as e:
|
||||
depend = P.ControlDepend(2)
|
||||
P.ControlDepend(2)
|
||||
print(e)
|
||||
with pytest.raises(TypeError) as e:
|
||||
depend = P.ControlDepend((2,))
|
||||
P.ControlDepend((2,))
|
||||
print(e)
|
||||
|
||||
|
||||
def test_if_nested_compile():
|
||||
|
@ -497,7 +499,7 @@ def test_if_inside_for():
|
|||
c1 = Tensor(1, dtype=ms.int32)
|
||||
c2 = Tensor(1, dtype=ms.int32)
|
||||
net = Net()
|
||||
out = net(c1, c2)
|
||||
net(c1, c2)
|
||||
|
||||
|
||||
def test_while_in_while():
|
||||
|
|
|
@ -31,7 +31,6 @@ from ....mindspore_test_framework.pipeline.forward.compile_forward \
|
|||
import pipeline_for_compile_forward_ge_graph_for_case_by_case_config
|
||||
from ....mindspore_test_framework.pipeline.forward.verify_exception \
|
||||
import pipeline_for_verify_exception_for_case_by_case_config
|
||||
from mindspore import context
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=True)
|
||||
|
||||
def conv3x3(in_channels, out_channels, stride=1, padding=1):
|
||||
|
@ -391,7 +390,8 @@ def test_conv2d_same_primitive():
|
|||
t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32))
|
||||
t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32))
|
||||
net = Conv2DSameNet()
|
||||
out = net(t1, t2)
|
||||
net(t1, t2)
|
||||
|
||||
|
||||
class ComparisonNet(nn.Cell):
|
||||
def __init__(self):
|
||||
|
|
|
@ -13,30 +13,14 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test nn ops """
|
||||
import functools
|
||||
import numpy as np
|
||||
import mindspore
|
||||
|
||||
import mindspore.nn as nn
|
||||
import mindspore.context as context
|
||||
import mindspore.common.dtype as mstype
|
||||
|
||||
from mindspore import Tensor, Parameter
|
||||
from mindspore.common.initializer import initializer
|
||||
from mindspore.ops import Primitive
|
||||
from mindspore.ops import composite as C
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore import Tensor
|
||||
from mindspore.ops import functional as F
|
||||
from mindspore.ops import prim_attr_register, PrimitiveWithInfer
|
||||
from mindspore.ops.primitive import constexpr
|
||||
|
||||
from ..ut_filter import non_graph_engine
|
||||
from ....mindspore_test_framework.mindspore_test import mindspore_test
|
||||
from ....mindspore_test_framework.pipeline.forward.compile_forward \
|
||||
import pipeline_for_compile_forward_ge_graph_for_case_by_case_config
|
||||
from ....mindspore_test_framework.pipeline.forward.verify_exception \
|
||||
import pipeline_for_verify_exception_for_case_by_case_config
|
||||
from mindspore import context
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=True)
|
||||
|
||||
class FakeOp(PrimitiveWithInfer):
|
||||
|
@ -66,7 +50,7 @@ def test_conv2d_same_primitive():
|
|||
t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32))
|
||||
t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32))
|
||||
net = Conv2DSameNet()
|
||||
out = net(t1, t2)
|
||||
net(t1, t2)
|
||||
|
||||
# test cell as high order argument
|
||||
# The graph with free variables used as argument is not supported yet
|
||||
|
@ -90,7 +74,7 @@ def Xtest_conv2d_op_with_arg():
|
|||
t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32))
|
||||
t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32))
|
||||
net = OpsNet(Conv2dNet())
|
||||
out = net(t1, t2)
|
||||
net(t1, t2)
|
||||
|
||||
|
||||
def test_conv2d_op_with_arg():
|
||||
|
@ -118,8 +102,7 @@ def test_conv2d_op_with_arg():
|
|||
t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32))
|
||||
t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32))
|
||||
net = OpsNet(OpNet())
|
||||
out = net(t1, t2)
|
||||
|
||||
net(t1, t2)
|
||||
|
||||
|
||||
def test_conv2d_op_with_arg_same_input():
|
||||
|
@ -147,7 +130,7 @@ def test_conv2d_op_with_arg_same_input():
|
|||
t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32))
|
||||
t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32))
|
||||
net = OpsNet(OpNet())
|
||||
out = net(t1, t2)
|
||||
net(t1, t2)
|
||||
|
||||
# test op with partial
|
||||
def test_op_as_partial():
|
||||
|
@ -164,7 +147,7 @@ def test_op_as_partial():
|
|||
t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32))
|
||||
t3 = Tensor(np.ones([1, 16, 1, 1234]).astype(np.float32))
|
||||
net = OpAsPartial()
|
||||
out = net(t1, t2, t3)
|
||||
net(t1, t2, t3)
|
||||
|
||||
# test op with partial
|
||||
def test_op_as_partial_inside():
|
||||
|
@ -188,7 +171,8 @@ def test_op_as_partial_inside():
|
|||
t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32))
|
||||
t3 = Tensor(np.ones([1, 16, 1, 1234]).astype(np.float32))
|
||||
net = OuterNet()
|
||||
out = net(t1, t2, t3)
|
||||
net(t1, t2, t3)
|
||||
|
||||
|
||||
# test op with partial case 2
|
||||
def test_op_as_partial_independent():
|
||||
|
@ -206,7 +190,8 @@ def test_op_as_partial_independent():
|
|||
t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32))
|
||||
t3 = Tensor(np.ones([1, 16, 1, 1234]).astype(np.float32))
|
||||
net = OpAsPartial()
|
||||
out = net(t1, t2, t3)
|
||||
net(t1, t2, t3)
|
||||
|
||||
|
||||
def test_nest_partial():
|
||||
class NestPartial(nn.Cell):
|
||||
|
@ -225,7 +210,7 @@ def test_nest_partial():
|
|||
t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32))
|
||||
t3 = Tensor(np.ones([1, 16, 1, 1234]).astype(np.float32))
|
||||
net = NestPartial()
|
||||
out = net(t1, t2, t3)
|
||||
net(t1, t2, t3)
|
||||
|
||||
# high order argument
|
||||
# op and op args as network arguments
|
||||
|
@ -249,7 +234,7 @@ def test_op_with_arg_as_input():
|
|||
t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32))
|
||||
t3 = Tensor(np.ones([1, 16, 1, 1234]).astype(np.float32))
|
||||
net = OpsNet(WithOpArgNet())
|
||||
out = net(t1, t2, t3)
|
||||
net(t1, t2, t3)
|
||||
|
||||
# The partial application used as argument is not supported yet
|
||||
# because of the limit of inference specialize system
|
||||
|
@ -273,4 +258,4 @@ def Xtest_partial_as_arg():
|
|||
t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32))
|
||||
t3 = Tensor(np.ones([1, 16, 1, 1234]).astype(np.float32))
|
||||
net = OpsNet(PartialArgNet())
|
||||
out = net(t1, t2, t3)
|
||||
net(t1, t2, t3)
|
||||
|
|
Loading…
Reference in New Issue