forked from mindspore-Ecosystem/mindspore
!23365 add print op security
Merge pull request !23365 from fangzehua/print_sec
This commit is contained in:
commit
db19a40280
|
@ -21,6 +21,7 @@ import math
|
|||
from mindspore import RowTensor, SparseTensor
|
||||
from mindspore.ops import functional as F, composite as C
|
||||
from mindspore.ops.composite import multitype_ops
|
||||
from mindspore._c_expression import security
|
||||
from . import standard_method as M
|
||||
from . import trope as T
|
||||
from .namespace import CellNamespace
|
||||
|
@ -112,7 +113,6 @@ convert_object_map = {
|
|||
T.map: C.Map(),
|
||||
T.partial: F.partial,
|
||||
T.zip: C.zip_operation,
|
||||
T.print: F.print_,
|
||||
T.enumerate: M.enumerate_,
|
||||
T.isinstance: M.isinstance_,
|
||||
|
||||
|
@ -140,3 +140,6 @@ convert_object_map = {
|
|||
RowTensor: F.make_row_tensor,
|
||||
SparseTensor: F.make_sparse_tensor,
|
||||
}
|
||||
|
||||
if not security.enable_security():
|
||||
convert_object_map[T.print] = F.print_
|
||||
|
|
|
@ -23,6 +23,7 @@ from .primitive import Primitive
|
|||
from . import operations as P
|
||||
from .operations import _grad_ops
|
||||
from .composite import GradOperation
|
||||
from .._c_expression import security
|
||||
|
||||
typeof = Primitive('typeof')
|
||||
hastype = Primitive('hastype')
|
||||
|
@ -125,7 +126,8 @@ scalar_to_array = P.ScalarToArray()
|
|||
scalar_to_tensor = P.ScalarToTensor()
|
||||
tuple_to_array = P.TupleToArray()
|
||||
scalar_cast = P.ScalarCast()
|
||||
print_ = P.Print()
|
||||
if not security.enable_security():
|
||||
print_ = P.Print()
|
||||
expand_dims = P.ExpandDims()
|
||||
transpose = P.Transpose()
|
||||
squeeze = P.Squeeze()
|
||||
|
|
|
@ -17,12 +17,12 @@
|
|||
from types import FunctionType, MethodType
|
||||
|
||||
from mindspore import context
|
||||
from mindspore._c_expression import security
|
||||
from ..._checkparam import Validator as validator
|
||||
from ..._checkparam import Rel
|
||||
from ...common import dtype as mstype
|
||||
from ..primitive import prim_attr_register, Primitive, PrimitiveWithInfer
|
||||
|
||||
|
||||
def _check_mode(class_name):
|
||||
"""Check for PyNative mode."""
|
||||
mode = context.get_context('mode')
|
||||
|
@ -407,6 +407,9 @@ class Print(PrimitiveWithInfer):
|
|||
@prim_attr_register
|
||||
def __init__(self):
|
||||
"""Initialize Print."""
|
||||
if security.enable_security():
|
||||
raise ValueError(
|
||||
'The Print is not supported, please without `-s on` and recompile source.')
|
||||
self.add_prim_attr("side_effect_io", True)
|
||||
|
||||
def __call__(self, *args):
|
||||
|
|
|
@ -178,7 +178,6 @@ class CenterFaceLoss(nn.Cell):
|
|||
self.cls_loss = FocalLoss()
|
||||
self.reg_loss = SmoothL1LossNew()
|
||||
self.reg_loss_cmask = SmoothL1LossNewCMask()
|
||||
self.print = P.Print()
|
||||
|
||||
def construct(self, output_hm, output_wh, output_off, output_kps, hm, reg_mask, ind, wh, wight_mask, hm_offset,
|
||||
hps_mask, landmarks):
|
||||
|
@ -211,7 +210,6 @@ class CenterFaceWithLossCell(nn.Cell):
|
|||
self.loss = CenterFaceLoss(self.config.wh_weight, self.config.reg_offset, self.config.off_weight,
|
||||
self.config.hm_weight, self.config.lm_weight)
|
||||
self.reduce_sum = P.ReduceSum()
|
||||
self.print = P.Print()
|
||||
|
||||
def construct(self, x, hm, reg_mask, ind, wh, wight_mask, hm_offset, hps_mask, landmarks):
|
||||
output_hm, output_wh, output_off, output_kps = self.centerface_network(x)
|
||||
|
@ -326,7 +324,6 @@ class CenterFaceWithNms(nn.Cell):
|
|||
self.maxpool2d = P.MaxPoolWithArgmax(kernel_size=3, strides=1, pad_mode='same')
|
||||
self.topk = P.TopK(sorted=True)
|
||||
self.reshape = P.Reshape()
|
||||
self.print = P.Print()
|
||||
self.test_batch = self.config.test_batch_size
|
||||
self.k = self.config.K
|
||||
|
||||
|
|
|
@ -26,7 +26,6 @@ class FocalLoss(nn.Cell):
|
|||
self.log = P.Log()
|
||||
self.pow = P.Pow()
|
||||
self.sum = P.ReduceSum()
|
||||
self.print = P.Print()
|
||||
|
||||
def construct(self, pred, gt):
|
||||
"""Construct method"""
|
||||
|
|
|
@ -59,7 +59,6 @@ class ConvBNReLU(nn.Cell):
|
|||
layers = [conv, nn.BatchNorm2d(out_planes).add_flags_recursive(fp32=True), nn.ReLU6()] #, momentum=0.9
|
||||
self.features = nn.SequentialCell(layers)
|
||||
self.in_planes = in_planes
|
||||
self.print = P.Print()
|
||||
|
||||
def construct(self, x):
|
||||
x = self.features(x)
|
||||
|
|
|
@ -96,7 +96,6 @@ class BboxAssignSample(nn.Cell):
|
|||
self.range_pos_size = Tensor(np.arange(self.num_expected_pos).astype(np.float16))
|
||||
self.check_gt_one = Tensor(np.array(-1 * np.ones((self.num_gts, 4)), dtype=np.float16))
|
||||
self.check_anchor_two = Tensor(np.array(-2 * np.ones((self.num_bboxes, 4)), dtype=np.float16))
|
||||
self.print = P.Print()
|
||||
|
||||
|
||||
def construct(self, gt_bboxes_i, gt_labels_i, valid_mask, bboxes, gt_valids):
|
||||
|
|
|
@ -124,7 +124,6 @@ class Proposal(nn.Cell):
|
|||
self.min_float_num = -65536.0
|
||||
self.topK_mask = Tensor(self.min_float_num * np.ones(total_max_topk_input, np.float16))
|
||||
self.shape = P.Shape()
|
||||
self.print = P.Print()
|
||||
|
||||
def construct(self, rpn_cls_score_total, rpn_bbox_pred_total, anchor_list):
|
||||
proposals_tuple = ()
|
||||
|
|
|
@ -52,7 +52,6 @@ class RpnRegClsBlock(nn.Cell):
|
|||
self.shape1 = (-1, config.num_step, config.rnn_batch_size)
|
||||
self.shape2 = (config.batch_size, -1, config.rnn_batch_size, config.num_step)
|
||||
self.transpose = P.Transpose()
|
||||
self.print = P.Print()
|
||||
|
||||
def construct(self, x):
|
||||
x = self.reshape(x, self.shape)
|
||||
|
@ -143,7 +142,6 @@ class RPN(nn.Cell):
|
|||
self.loss = Tensor(np.zeros((1,)).astype(np.float16))
|
||||
self.clsloss = Tensor(np.zeros((1,)).astype(np.float16))
|
||||
self.regloss = Tensor(np.zeros((1,)).astype(np.float16))
|
||||
self.print = P.Print()
|
||||
|
||||
def _make_rpn_layer(self, num_layers, in_channels, feat_channels, num_anchors, cls_out_channels):
|
||||
"""
|
||||
|
|
|
@ -120,7 +120,6 @@ class openpose_loss(MyLoss):
|
|||
self.square = P.Square()
|
||||
self.reduceMean = P.ReduceMean()
|
||||
self.reduceSum = P.ReduceSum()
|
||||
self.print = P.Print()
|
||||
self.shape = P.Shape()
|
||||
self.maxoftensor = P.ArgMaxWithValue(-1)
|
||||
|
||||
|
|
|
@ -37,7 +37,6 @@ class OpenPoseNet(nn.Cell):
|
|||
self.stage_6 = Stage_x()
|
||||
self.shape = P.Shape()
|
||||
self.cat = P.Concat(axis=1)
|
||||
self.print = P.Print()
|
||||
if loadvgg and vggpath:
|
||||
param_dict = load_checkpoint(vggpath)
|
||||
param_dict_new = {}
|
||||
|
|
|
@ -120,7 +120,6 @@ class PoseNet(nn.Cell):
|
|||
self.dropout5 = nn.Dropout(0.5)
|
||||
self.cls_fc_pose_xyz = nn.Dense(2048, 3)
|
||||
self.cls_fc_pose_wpqr = nn.Dense(2048, 4)
|
||||
self.print = P.Print()
|
||||
|
||||
def construct(self, x):
|
||||
"""construct"""
|
||||
|
|
|
@ -82,7 +82,6 @@ class DropPath(Cell):
|
|||
self.rand = P.UniformReal(seed=seed) # seed must be 0, if set to other value, it's not rand for multiple call
|
||||
self.shape = P.Shape()
|
||||
self.floor = P.Floor()
|
||||
self.print = P.Print()
|
||||
|
||||
def construct(self, x):
|
||||
if self.training:
|
||||
|
|
|
@ -69,7 +69,6 @@ class FastTextNetWithLoss(nn.Cell):
|
|||
self.fasttext = FastText(vocab_size, embedding_dims, num_class)
|
||||
self.loss_func = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
|
||||
self.squeeze = P.Squeeze(axis=1)
|
||||
self.print = P.Print()
|
||||
|
||||
def construct(self, src_tokens, src_tokens_lengths, label_idx):
|
||||
"""
|
||||
|
|
|
@ -84,7 +84,6 @@ class Encoder(nn.Cell):
|
|||
self.fc = nn.Dense(2*self.hidden_size, self.hidden_size).to_float(config.compute_type)
|
||||
self.shape = P.Shape()
|
||||
self.transpose = P.Transpose()
|
||||
self.p = P.Print()
|
||||
self.cast = P.Cast()
|
||||
self.text_len = config.max_length
|
||||
self.squeeze = P.Squeeze(axis=0)
|
||||
|
@ -131,7 +130,6 @@ class Decoder(nn.Cell):
|
|||
self.text_len = config.max_length
|
||||
self.shape = P.Shape()
|
||||
self.transpose = P.Transpose()
|
||||
self.p = P.Print()
|
||||
self.cast = P.Cast()
|
||||
self.concat = P.Concat(axis=2)
|
||||
self.squeeze = P.Squeeze(axis=0)
|
||||
|
|
|
@ -115,7 +115,6 @@ class TextCNN(nn.Cell):
|
|||
|
||||
self.fc = nn.Dense(96*3, self.num_classes)
|
||||
self.drop = nn.Dropout(keep_prob=0.5)
|
||||
self.print = P.Print()
|
||||
self.reducemax = P.ReduceMax(keep_dims=False)
|
||||
|
||||
def make_layer(self, kernel_height):
|
||||
|
|
|
@ -124,7 +124,6 @@ class ResNetMulti(nn.Cell):
|
|||
self.layer5 = ClassifierModule(1024, [6, 12, 18, 24], [6, 12, 18, 24], num_classes)
|
||||
|
||||
self.layer6 = ClassifierModule(2048, [6, 12, 18, 24], [6, 12, 18, 24], num_classes)
|
||||
self.p = P.Print()
|
||||
self.shape = Shape()
|
||||
self.pad = nn.Pad(((0, 0), (0, 0), (1, 1), (1, 1)), "CONSTANT")
|
||||
|
||||
|
|
|
@ -42,7 +42,6 @@ class CriterionsFaceAttri(nn.Cell):
|
|||
|
||||
# loss
|
||||
self.ce_ignore_loss = CrossEntropyWithIgnoreIndex()
|
||||
self.printn = P.Print()
|
||||
|
||||
def construct(self, x0, x1, x2, label):
|
||||
'''Construct function.'''
|
||||
|
|
|
@ -26,7 +26,6 @@ from mindspore.nn import TrainOneStepCell
|
|||
from mindspore.context import ParallelMode
|
||||
from mindspore.train.callback import ModelCheckpoint, RunContext, CheckpointConfig
|
||||
from mindspore.train.serialization import load_checkpoint, load_param_into_net
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore.common import dtype as mstype
|
||||
from src.FaceAttribute.resnet18 import get_resnet18
|
||||
from src.FaceAttribute.loss_factory import get_loss
|
||||
|
@ -55,7 +54,6 @@ class BuildTrainNetwork(nn.Cell):
|
|||
super(BuildTrainNetwork, self).__init__()
|
||||
self.network = my_network
|
||||
self.criterion = my_criterion
|
||||
self.print = P.Print()
|
||||
|
||||
def construct(self, input_data, label):
|
||||
logit0, logit1, logit2 = self.network(input_data)
|
||||
|
|
|
@ -42,7 +42,6 @@ class YoloPostProcess(Cell):
|
|||
"""
|
||||
def __init__(self, num_classes, cur_anchors, conf_thresh, network_size, reduction, anchors_mask):
|
||||
super(YoloPostProcess, self).__init__()
|
||||
self.print = P.Print()
|
||||
self.num_classes = num_classes
|
||||
self.anchors = cur_anchors
|
||||
self.conf_thresh = conf_thresh
|
||||
|
|
|
@ -174,7 +174,6 @@ class BuildTestNetwork(nn.Cell):
|
|||
'''BuildTestNetwork'''
|
||||
def __init__(self, network, reduction_0, reduction_1, reduction_2, anchors, anchors_mask, num_classes, args):
|
||||
super(BuildTestNetwork, self).__init__()
|
||||
self.print = P.Print()
|
||||
self.network = network
|
||||
self.reduction_0 = reduction_0
|
||||
self.reduction_1 = reduction_1
|
||||
|
|
|
@ -37,7 +37,6 @@ class MyTrain(nn.Cell):
|
|||
self.model = model
|
||||
self.con_loss = con_loss
|
||||
self.criterion = criterion
|
||||
self.p = P.Print()
|
||||
self.cast = P.Cast()
|
||||
|
||||
def construct(self, lr, hr, idx):
|
||||
|
|
|
@ -91,7 +91,6 @@ class MaskBlock(nn.Cell):
|
|||
self.relu = P.ReLU()
|
||||
|
||||
self.thre = thres[num]
|
||||
self.print = P.Print()
|
||||
|
||||
def construct(self, x):
|
||||
"""construct"""
|
||||
|
|
|
@ -245,7 +245,6 @@ class InceptionC(nn.Cell):
|
|||
self.concat = P.Concat(1)
|
||||
if not self.noReLU:
|
||||
self.relu = nn.ReLU()
|
||||
self.print = P.Print()
|
||||
|
||||
def construct(self, x):
|
||||
x0 = self.branch0(x)
|
||||
|
|
|
@ -28,6 +28,7 @@ from mindspore.common.parameter import Parameter
|
|||
from mindspore.common.initializer import initializer
|
||||
from mindspore.ops.primitive import constexpr
|
||||
from capture import Capture, capture, check_output
|
||||
from tests.security_utils import security_off_wrap
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
|
||||
|
@ -48,6 +49,7 @@ def _with_save_graphs():
|
|||
clean_all_ir_files('./')
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
def test_print():
|
||||
class Print(Cell):
|
||||
def __init__(self):
|
||||
|
@ -71,6 +73,7 @@ def test_print():
|
|||
check_output(cap.output, patterns)
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
def test_print_add():
|
||||
class Print_Add(Cell):
|
||||
def __init__(self):
|
||||
|
@ -98,6 +101,7 @@ def test_print_add():
|
|||
check_output(cap.output, patterns)
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
def test_print_assign():
|
||||
class Print_Assign(Cell):
|
||||
def __init__(self):
|
||||
|
@ -125,6 +129,7 @@ def test_print_assign():
|
|||
check_output(cap.output, patterns)
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
def test_print_assign_add():
|
||||
class Print_Assign_Add(Cell):
|
||||
def __init__(self):
|
||||
|
@ -155,6 +160,7 @@ def test_print_assign_add():
|
|||
check_output(cap.output, patterns)
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
def test_print_while():
|
||||
class Print_While(Cell):
|
||||
def __init__(self):
|
||||
|
@ -189,6 +195,7 @@ def test_print_while():
|
|||
check_output(cap.output, patterns)
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
def test_print_if():
|
||||
class Print_If(Cell):
|
||||
def __init__(self):
|
||||
|
@ -219,6 +226,7 @@ def test_print_if():
|
|||
check_output(cap.output, patterns)
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
def test_print_assign_while():
|
||||
class Print_Assign_While(Cell):
|
||||
def __init__(self):
|
||||
|
@ -262,6 +270,7 @@ def test_print_assign_while():
|
|||
check_output(cap.output, patterns)
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
def test_print_assign_if():
|
||||
class Print_Assign_If(Cell):
|
||||
def __init__(self):
|
||||
|
@ -517,6 +526,7 @@ def test_for():
|
|||
np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
def test_print_for():
|
||||
class Print_For(Cell):
|
||||
def __init__(self):
|
||||
|
@ -553,6 +563,7 @@ def test_print_for():
|
|||
check_output(cap.output, patterns)
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
def test_print_assign_for():
|
||||
class Print_Assign_For(Cell):
|
||||
def __init__(self):
|
||||
|
@ -739,6 +750,7 @@ def test_multi_assign_addn():
|
|||
np.testing.assert_almost_equal(out.asnumpy(), expect.asnumpy())
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
def test_multi_assign_print():
|
||||
class Multi_Assign_Print(Cell):
|
||||
def __init__(self):
|
||||
|
|
|
@ -24,6 +24,7 @@ from mindspore.common.initializer import initializer
|
|||
from mindspore.train.model import Model
|
||||
from mindspore.ops.composite import GradOperation
|
||||
from mindspore.common import ParameterTuple
|
||||
from tests.security_utils import security_off_wrap
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
@ -346,6 +347,7 @@ class SideEffectIOCellAddnNet(Cell):
|
|||
return grad_out
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
|
@ -460,7 +462,7 @@ class SideEffectPrintInHighOrdeAddnNet(Cell):
|
|||
grad_out = grad_net(params, grad_ys)
|
||||
return grad_out
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
|
|
|
@ -21,6 +21,7 @@ from mindspore import Tensor
|
|||
import mindspore.nn as nn
|
||||
from mindspore.ops import operations as P
|
||||
import mindspore.context as context
|
||||
from tests.security_utils import security_off_wrap
|
||||
|
||||
|
||||
class PrintNetOneInput(nn.Cell):
|
||||
|
@ -53,6 +54,8 @@ class PrintNetIndex(nn.Cell):
|
|||
return x
|
||||
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
def print_testcase(nptype):
|
||||
# large shape
|
||||
x = np.arange(20808).reshape(6, 3, 34, 34).astype(nptype)
|
||||
|
@ -85,6 +88,7 @@ class PrintNetString(nn.Cell):
|
|||
return x
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
def print_testcase_string(nptype):
|
||||
x = np.ones(18).astype(nptype)
|
||||
y = np.arange(9).reshape(3, 3).astype(nptype)
|
||||
|
@ -106,6 +110,7 @@ class PrintTypes(nn.Cell):
|
|||
return x
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
|
@ -118,6 +123,7 @@ def test_print_multiple_types():
|
|||
net(x, y, z)
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
|
@ -125,6 +131,7 @@ def test_print_bool():
|
|||
print_testcase(np.bool)
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
|
@ -132,6 +139,7 @@ def test_print_int8():
|
|||
print_testcase(np.int8)
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
|
@ -139,6 +147,7 @@ def test_print_int16():
|
|||
print_testcase(np.int16)
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
|
@ -146,6 +155,7 @@ def test_print_int32():
|
|||
print_testcase(np.int32)
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
|
@ -153,6 +163,7 @@ def test_print_int64():
|
|||
print_testcase(np.int64)
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
|
@ -160,6 +171,7 @@ def test_print_uint8():
|
|||
print_testcase(np.uint8)
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
|
@ -167,6 +179,7 @@ def test_print_uint16():
|
|||
print_testcase(np.uint16)
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
|
@ -174,6 +187,7 @@ def test_print_uint32():
|
|||
print_testcase(np.uint32)
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
|
@ -181,6 +195,7 @@ def test_print_uint64():
|
|||
print_testcase(np.uint64)
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
|
@ -188,6 +203,7 @@ def test_print_float16():
|
|||
print_testcase(np.float16)
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
|
@ -195,6 +211,7 @@ def test_print_float32():
|
|||
print_testcase(np.float32)
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
|
|
|
@ -19,7 +19,7 @@ from mindspore import context, Tensor, Parameter
|
|||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.nn import Cell, TrainOneStepCell, Momentum, BatchNorm2d, BatchNorm1d
|
||||
from mindspore.ops import operations as P
|
||||
|
||||
from tests.security_utils import security_off_wrap
|
||||
|
||||
class Net(Cell):
|
||||
def __init__(self, conv2d_weight, out_channel, kernel_size, pad_mode, stride,
|
||||
|
@ -53,6 +53,7 @@ def compile_net(net):
|
|||
context.reset_auto_parallel_context()
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
def test_batchnorm_data_parallel():
|
||||
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
|
||||
strategy1 = ((8, 1, 1, 1), (1, 1, 1, 1))
|
||||
|
@ -61,6 +62,7 @@ def test_batchnorm_data_parallel():
|
|||
compile_net(net)
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
def test_batchnorm_model_parallel1():
|
||||
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
|
||||
strategy1 = ((2, 2, 1, 1), (2, 2, 1, 1))
|
||||
|
@ -69,6 +71,7 @@ def test_batchnorm_model_parallel1():
|
|||
compile_net(net)
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
def test_batchnorm_model_parallel2():
|
||||
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=32, global_rank=0)
|
||||
strategy1 = ((2, 2, 2, 2), (2, 2, 1, 1))
|
||||
|
|
|
@ -9,6 +9,7 @@ from mindspore.ops import operations as P
|
|||
from mindspore.ops import functional as F
|
||||
from mindspore import Tensor
|
||||
from mindspore.common.parameter import Parameter, ParameterTuple
|
||||
from tests.security_utils import security_off_wrap
|
||||
|
||||
grad_all_list = C.GradOperation(get_all=True, get_by_list=True)
|
||||
grad_by_list = C.GradOperation(get_by_list=True)
|
||||
|
@ -117,6 +118,7 @@ def test_insert_gradient_of():
|
|||
print(grad_net(Tensor(input_data)))
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
def test_user_defined_bprop():
|
||||
class UserDefinedNet(nn.Cell):
|
||||
def __init__(self):
|
||||
|
@ -151,6 +153,7 @@ def test_user_defined_bprop():
|
|||
|
||||
|
||||
# user defined bprop don't have the same size of parameters with primal's
|
||||
@security_off_wrap
|
||||
def test_user_defined_bad_bprop():
|
||||
class UserDefinedNet(nn.Cell):
|
||||
def __init__(self):
|
||||
|
@ -186,6 +189,7 @@ def test_user_defined_bad_bprop():
|
|||
|
||||
|
||||
# shoul compile success and Print in presented in the final function graph.
|
||||
@security_off_wrap
|
||||
@pytest.mark.skip(reason="isolated nodes exception")
|
||||
def test_unused_var():
|
||||
class UnusedVar(nn.Cell):
|
||||
|
@ -212,6 +216,7 @@ def test_unused_var():
|
|||
|
||||
|
||||
# shoul compile success and Print in presented in the final function graph.
|
||||
@security_off_wrap
|
||||
@pytest.mark.skip(reason="isolated nodes exception")
|
||||
def test_hof_unused_var():
|
||||
class UnusedVar(nn.Cell):
|
||||
|
@ -241,6 +246,7 @@ def test_hof_unused_var():
|
|||
|
||||
|
||||
# shoul compile success and Print in presented in the final function graph.
|
||||
@security_off_wrap
|
||||
@pytest.mark.skip(reason="isolated nodes exception")
|
||||
def test_partial_hof_unused_var():
|
||||
class UnusedVar(nn.Cell):
|
||||
|
|
|
@ -27,6 +27,7 @@ from mindspore.ops import composite as C
|
|||
from mindspore.ops import operations as P
|
||||
from mindspore.ops.functional import stop_gradient
|
||||
from mindspore.ops.primitive import prim_attr_register, PrimitiveWithInfer
|
||||
from tests.security_utils import security_off_wrap
|
||||
from ..ut_filter import non_graph_engine
|
||||
from ....mindspore_test_framework.utils.bprop_util import bprop
|
||||
|
||||
|
@ -429,6 +430,7 @@ def test_stop_gradient_11():
|
|||
Tensor(np.ones([2]).astype(np.float32)))
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
def test_stop_print():
|
||||
class StopPrint(nn.Cell):
|
||||
def __init__(self):
|
||||
|
|
|
@ -34,6 +34,7 @@ from mindspore.ops import operations as P
|
|||
from mindspore.train.callback import _CheckpointManager
|
||||
from mindspore.train.serialization import save_checkpoint, load_checkpoint, load_param_into_net, \
|
||||
export, _save_graph, load
|
||||
from tests.security_utils import security_off_wrap
|
||||
from ..ut_filter import non_graph_engine
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, print_file_path="print/print.pb")
|
||||
|
@ -419,6 +420,7 @@ class PrintNet(nn.Cell):
|
|||
return int8, uint8, int16, uint16, int32, uint32, int64, uint64, flt16, flt32, flt64, bool_, scale1, scale2
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
def test_print():
|
||||
print_net = PrintNet()
|
||||
int8 = Tensor(np.random.randint(100, size=(10, 10), dtype="int8"))
|
||||
|
|
Loading…
Reference in New Issue