forked from mindspore-Ecosystem/mindspore
redundant codes clean
This commit is contained in:
parent
d56683157d
commit
4e85071055
|
@ -111,7 +111,6 @@ class GraphSplitByPattern:
|
|||
"""Split graph"""
|
||||
def _buddy(op, dom, path_ops):
|
||||
"""Fuse buddy together"""
|
||||
# pylint: disable=unused-argument
|
||||
group = self.op_group[op]
|
||||
for p in group:
|
||||
# p is buddy
|
||||
|
@ -125,7 +124,6 @@ class GraphSplitByPattern:
|
|||
|
||||
def _injective(pattern, limit):
|
||||
def _checker(op, dom, path_ops):
|
||||
# pylint: disable=unused-argument
|
||||
for p in op.output.to_ops:
|
||||
if p not in self.op_group[dom]:
|
||||
return False
|
||||
|
|
|
@ -39,13 +39,11 @@ class OpInfer:
|
|||
@staticmethod
|
||||
def default_infer_dtype_func(inputs, attrs):
|
||||
"""Infer dtype"""
|
||||
# pylint: disable=unused-argument
|
||||
return inputs[0].dtype
|
||||
|
||||
@staticmethod
|
||||
def default_infer_format_func(inputs, attrs):
|
||||
"""Infer format"""
|
||||
# pylint: disable=unused-argument
|
||||
return inputs[0].data_format
|
||||
|
||||
infer_shape_func = {
|
||||
|
|
|
@ -33,7 +33,6 @@ trope_ns = CellNamespace('mindspore._extends.parse.trope')
|
|||
NO_IMPLEMENT = None # not implemented
|
||||
SYMBOL_UNDEFINE = 0xFF # Undefined var and function
|
||||
|
||||
# ops map: {op.type:(Namespace, symbol)}
|
||||
# Some space set aside for readability of code
|
||||
parse_object_map = {
|
||||
# ast grammar
|
||||
|
@ -75,7 +74,6 @@ parse_object_map = {
|
|||
SYMBOL_UNDEFINE: (None, 'undefine'),
|
||||
}
|
||||
|
||||
# convert map: {obj:(Namespace, symbol)}
|
||||
# Escape an object to another object, eg: system function(len,xxx)
|
||||
# Some space set aside for readability of code
|
||||
convert_object_map = {
|
||||
|
|
|
@ -266,7 +266,6 @@ ValuePtr GenNewTensorInner(const ValuePtr &value) {
|
|||
std::vector<ValuePtr> value_list;
|
||||
if (value->isa<tensor::Tensor>()) {
|
||||
auto tensor = value->cast<tensor::TensorPtr>();
|
||||
// return std::make_shared<tensor::Tensor>(tensor->data_type(), tensor->shape());
|
||||
auto new_tensor = std::make_shared<tensor::Tensor>(*tensor);
|
||||
new_tensor->set_device_address(nullptr);
|
||||
return new_tensor;
|
||||
|
|
|
@ -149,24 +149,6 @@ std::shared_ptr<abstract::AbstractTuple> ParserAttrShape(
|
|||
return result;
|
||||
}
|
||||
|
||||
#if 0
|
||||
#define PARSE_ONNXATTR_IN_SCALAR_FORM(type, valuetype) \
|
||||
void ParseAttrInScalar_##type##_##valuetype(const PrimitivePtr &prim, const std::string &attr_name, \
|
||||
const onnx::TensorProto &attr_tensor) { \
|
||||
MS_EXCEPTION_IF_NULL(prim); \
|
||||
std::vector<ValuePtr> attr_value_vec; \
|
||||
for (int i = 0; i < attr_tensor.type##_data_size(); ++i) { \
|
||||
auto value = static_cast<valuetype>(attr_tensor.type##_data(i)); \
|
||||
attr_value_vec.push_back(MakeValue<valuetype>(value)); \
|
||||
} \
|
||||
if (attr_value_vec.size() == 1) { \
|
||||
prim->AddAttr(attr_name, attr_value_vec[0]); \
|
||||
} else { \
|
||||
ParserScalarAttrValue(prim, attr_name, attr_value_vec); \
|
||||
} \
|
||||
}
|
||||
#endif
|
||||
|
||||
#define PARSE_ONNXATTR_IN_SCALAR_FORM(type, valuetype) \
|
||||
ValuePtr ParseAttrInScalar_##type##_##valuetype(const onnx::TensorProto &attr_tensor) { \
|
||||
auto value = static_cast<valuetype>(attr_tensor.type##_data(0)); \
|
||||
|
@ -212,7 +194,6 @@ bool MSANFModelParser::BuildParameterForFuncGraph(const ParameterPtr &node, cons
|
|||
tensor::TensorPtr tensor_info =
|
||||
std::make_shared<tensor::Tensor>(kDefaultValueSwitchMap[tensor_typeproto.elem_type()], shape);
|
||||
MS_EXCEPTION_IF_NULL(tensor_info);
|
||||
// tensor_info->MallocData();
|
||||
auto tensor_abstract = tensor_info->ToAbstract();
|
||||
MS_EXCEPTION_IF_NULL(tensor_abstract);
|
||||
node->set_abstract(tensor_abstract);
|
||||
|
@ -367,7 +348,6 @@ bool MSANFModelParser::ObtainValueNodeInTensorForm(const std::string &value_node
|
|||
shape.push_back(attr_tensor.dims(i));
|
||||
}
|
||||
tensor::TensorPtr tensor_info = std::make_shared<tensor::Tensor>(kDefaultValueSwitchMap[attr_tensor_type], shape);
|
||||
// tensor_info->MallocData();
|
||||
const std::string &tensor_buf = attr_tensor.raw_data();
|
||||
auto *tensor_data_buf = reinterpret_cast<uint8_t *>(tensor_info->data_c());
|
||||
auto ret = memcpy_s(tensor_data_buf, tensor_info->data().nbytes(), tensor_buf.data(), tensor_buf.size());
|
||||
|
|
|
@ -76,7 +76,6 @@ void WinogradInputTransform(const float *input_data, float *trans_input, float *
|
|||
size_t dst_step = tile_num * ic4 * C4NUM;
|
||||
float *trans_input_ptr = trans_input + dst_ic4_offset;
|
||||
func(tmp_data, trans_input_ptr, C4NUM, dst_step);
|
||||
// GeneralInputTransformUnit(tmp_data, trans_input_ptr, matrix_b, matrix_bt, C4NUM, dst_step, input_unit);
|
||||
}
|
||||
out_tile_index++;
|
||||
} // cal_tile_num loop
|
||||
|
@ -120,8 +119,6 @@ void WinogradOutputTransform(const float *gemm_out, float *out_data, const float
|
|||
const float *bias_ptr = bias_data + j * C4NUM;
|
||||
float *dst_ptr = out_data + dst_oc4_offset;
|
||||
func(src_ptr, dst_ptr, bias_ptr, C8NUM, output_w, output_channel, r_w, r_h, r_c);
|
||||
// GeneralOutputTransformUnit(src_ptr, dst_ptr, bias_ptr, matrix_a, matrix_at, C8NUM,
|
||||
// output_w_unit_block * output_unit, input_unit, output_unit);
|
||||
}
|
||||
out_tile_index++;
|
||||
}
|
||||
|
|
|
@ -46,7 +46,6 @@ int ActivationGrad::UnPackAttr(const Primitive &prim, const std::vector<AnfNodeP
|
|||
} else if (prim.name() == "Relu6Grad") {
|
||||
attr->type = schema::ActivationType_RELU6;
|
||||
}
|
||||
// auto alpha = GetValue<float>(prim.GetAttr("alpha"));
|
||||
attr->alpha = 0; // alpha;
|
||||
this->primitive_->value.value = attr.release();
|
||||
if (this->primitive_->value.value == nullptr) {
|
||||
|
|
|
@ -110,10 +110,6 @@ int SparseSoftmaxCrossEntropyWithLogitsCPUKernel::Run() {
|
|||
}
|
||||
|
||||
int SparseSoftmaxCrossEntropyWithLogitsCPUKernel::Init() {
|
||||
// if (context_ && context_->infer_shape_interrupt_ && !context_->running_) {
|
||||
// set_need_reinit();
|
||||
// return RET_OK;
|
||||
// }
|
||||
auto dims = in_tensors_[0]->shape();
|
||||
param->n_dim_ = 2;
|
||||
param->number_of_classes_ = dims[1];
|
||||
|
|
|
@ -136,7 +136,6 @@ class MinimumGrad(GraphKernel):
|
|||
def construct(self, x, y, dout):
|
||||
cmp_result = self.less_equal(x, y)
|
||||
dx = self.select(cmp_result, dout, self.zeros_like(dout))
|
||||
# dy = self.select(cmp_result, self.zeros_like(dout), dout)
|
||||
dy = dout - dx
|
||||
|
||||
return dx, dy
|
||||
|
|
|
@ -380,7 +380,6 @@ class PSNR(Cell):
|
|||
img2 = _convert_img_dtype_to_float32(img2, self.max_val)
|
||||
|
||||
mse = P.ReduceMean()(F.square(img1 - img2), (-3, -2, -1))
|
||||
# 10*log_10(max_val^2/MSE)
|
||||
psnr = 10 * P.Log()(F.square(max_val) / mse) / F.scalar_log(10.0)
|
||||
|
||||
return psnr
|
||||
|
|
|
@ -64,11 +64,8 @@ def binop_grad_common(x, y, dx, dy):
|
|||
|
||||
def _sum_grad(x, axis, dout):
|
||||
"""Grad definition for `Sum` operation."""
|
||||
# input_shape = [2, 3] axis = [1]
|
||||
input_shape = shape_op(x)
|
||||
# output_shape_kept_dims = [2, 1]
|
||||
output_shape_kept_dims = reduced_shape(input_shape, axis)
|
||||
# tile_scaling = [1, 3]
|
||||
tile_scaling = tuple_div(input_shape, output_shape_kept_dims)
|
||||
grad = reshape(dout, output_shape_kept_dims)
|
||||
return tile(grad, tile_scaling)
|
||||
|
@ -76,9 +73,7 @@ def _sum_grad(x, axis, dout):
|
|||
|
||||
def _min_or_max_grad(x, axis, out, dout):
|
||||
"""Grad definition for `Min` and `Max` operations."""
|
||||
# input_shape = [2, 3] axis = [1]
|
||||
input_shape = shape_op(x)
|
||||
# output_shape_kept_dims = [2, 1]
|
||||
output_shape_kept_dims = reduced_shape(input_shape, axis)
|
||||
y = reshape(out, output_shape_kept_dims)
|
||||
grad = reshape(dout, output_shape_kept_dims)
|
||||
|
|
|
@ -44,7 +44,6 @@ matmul_cube_dense_left_op_info = TBERegOp("CusMatMulCubeDenseLeft") \
|
|||
.get_op_info()
|
||||
|
||||
|
||||
# pylint: disable=locally-disabled,too-many-arguments,too-many-branches, too-many-statements, too-many-locals,
|
||||
def _shape_check(shape_a, shape_b, shape_bias, src_dtype, trans_a, trans_b):
|
||||
"""
|
||||
Check the given input if legal
|
||||
|
@ -244,8 +243,6 @@ def check_supported(input_x1, input_x2, bias=None, output_y={}, trans_a=False, t
|
|||
|
||||
return True
|
||||
|
||||
# pylint: disable=locally-disabled,too-many-arguments, too-many-locals, too-many-statements,
|
||||
# pylint: disable=inconsistent-return-statements
|
||||
# @util.check_input_type(dict, dict, (dict, NoneType), dict, bool, bool, str)
|
||||
@op_info_register(matmul_cube_dense_left_op_info)
|
||||
def CusMatMulCubeDenseLeft(input_x1, input_x2, bias=None, output_y={}, trans_a=False, trans_b=False,
|
||||
|
@ -467,3 +464,4 @@ def CusMatMulCubeDenseLeft(input_x1, input_x2, bias=None, output_y={}, trans_a=F
|
|||
"tensor_list": tensor_list}
|
||||
|
||||
te.lang.cce.cce_build_code(schedule, config)
|
||||
return None
|
||||
|
|
|
@ -40,7 +40,6 @@ matmul_cube_dense_right_op_info = TBERegOp("CusMatMulCubeDenseRight") \
|
|||
.get_op_info()
|
||||
|
||||
|
||||
# pylint: disable=inconsistent-return-statements
|
||||
@op_info_register(matmul_cube_dense_right_op_info)
|
||||
def CusMatMulCubeDenseRight(input_x1, input_x2, input_x3, bias=None, output_y={}, trans_a=False, trans_b=False,
|
||||
kernel_name="matmulcube"):
|
||||
|
@ -171,3 +170,4 @@ def CusMatMulCubeDenseRight(input_x1, input_x2, input_x3, bias=None, output_y={}
|
|||
|
||||
tik_instance.BuildCCE(kernel_name=kernel_name, inputs=[input_x1, input_x2, input_x3], outputs=[resMatmul])
|
||||
return tik_instance
|
||||
return None
|
||||
|
|
|
@ -41,7 +41,6 @@ matmul_cube_fracz_left_cast_op_info = TBERegOp("CusMatMulCubeFraczLeftCast") \
|
|||
.get_op_info()
|
||||
|
||||
|
||||
# pylint: disable=locally-disabled,too-many-arguments,too-many-branches, too-many-statements, too-many-locals,
|
||||
def _shape_check(shape_a, shape_b, shape_bias, src_dtype, trans_a, trans_b):
|
||||
"""
|
||||
Check the given input if legal
|
||||
|
@ -239,7 +238,6 @@ def check_supported(input_x1, input_x2, bias=None, output_y={}, trans_a=False, t
|
|||
return True
|
||||
|
||||
|
||||
# pylint: disable=locally-disabled,too-many-arguments, too-many-locals, too-many-statements
|
||||
@op_info_register(matmul_cube_fracz_left_cast_op_info)
|
||||
def CusMatMulCubeFraczLeftCast(input_x1, input_x2, bias=None, output_y={}, trans_a=False, trans_b=False,
|
||||
kernel_name="CusMatMulCubeFraczLeftCast"):
|
||||
|
|
|
@ -47,7 +47,6 @@ matmul_cube_op_info = TBERegOp("CusMatMulCube") \
|
|||
.get_op_info()
|
||||
|
||||
|
||||
# pylint: disable=locally-disabled,too-many-arguments,too-many-branches, too-many-statements, too-many-locals,
|
||||
def _shape_check(shape_a, shape_b, shape_bias, src_dtype, trans_a, trans_b):
|
||||
"""
|
||||
Check the given input if legal
|
||||
|
@ -244,7 +243,6 @@ def check_supported(input_x1, input_x2, bias=None, output_y={}, trans_a=False, t
|
|||
return True
|
||||
|
||||
|
||||
# pylint: disable=locally-disabled,too-many-arguments, too-many-locals, too-many-statements
|
||||
@op_info_register(matmul_cube_op_info)
|
||||
def CusMatMulCube(input_x1, input_x2, bias=None, output_y={}, trans_a=False, trans_b=False, kernel_name="matmulcube"):
|
||||
"""
|
||||
|
|
|
@ -317,8 +317,6 @@ class ExportToQuantInferNetwork:
|
|||
|
||||
def __init__(self, network, mean, std_dev, *inputs, is_mindir=False):
|
||||
network = validator.check_isinstance('network', network, (nn.Cell,))
|
||||
# quantize for inputs: q = f / scale + zero_point
|
||||
# dequantize for outputs: f = (q - zero_point) * scale
|
||||
self.input_scale = 1 / std_dev
|
||||
self.input_zero_point = round(mean)
|
||||
self.data_type = mstype.int8
|
||||
|
|
|
@ -48,7 +48,6 @@ def cal_quantization_params(input_min,
|
|||
if (input_min > input_max).all():
|
||||
raise ValueError("input_min min should less than input max.")
|
||||
if (input_max == input_min).all():
|
||||
# scale = 1.0, zp = 0.0
|
||||
return np.ones(input_min.shape), np.zeros(input_min.shape)
|
||||
|
||||
if data_type == np.int8:
|
||||
|
|
|
@ -139,8 +139,6 @@ def _get_rank_info():
|
|||
rank_size = get_group_size()
|
||||
rank_id = get_rank()
|
||||
else:
|
||||
# rank_size = rank_id = None
|
||||
|
||||
rank_size = 1
|
||||
rank_id = 0
|
||||
|
||||
|
|
|
@ -25,15 +25,12 @@ def get_param_groups(network):
|
|||
parameter_name = x.name
|
||||
if parameter_name.endswith('.bias'):
|
||||
# all bias not using weight decay
|
||||
# print('no decay:{}'.format(parameter_name))
|
||||
no_decay_params.append(x)
|
||||
elif parameter_name.endswith('.gamma'):
|
||||
# bn weight bias not using weight decay, be carefully for now x not include BN
|
||||
# print('no decay:{}'.format(parameter_name))
|
||||
no_decay_params.append(x)
|
||||
elif parameter_name.endswith('.beta'):
|
||||
# bn weight bias not using weight decay, be carefully for now x not include BN
|
||||
# print('no decay:{}'.format(parameter_name))
|
||||
no_decay_params.append(x)
|
||||
else:
|
||||
decay_params.append(x)
|
||||
|
|
|
@ -33,7 +33,6 @@ GRADIENT_CLIP_VALUE = 10.0
|
|||
clip_grad = C.MultitypeFuncGraph("clip_grad")
|
||||
|
||||
|
||||
# pylint: disable=consider-using-in
|
||||
@clip_grad.register("Number", "Number", "Tensor")
|
||||
def _clip_grad(clip_type, clip_value, grad):
|
||||
"""
|
||||
|
@ -47,7 +46,7 @@ def _clip_grad(clip_type, clip_value, grad):
|
|||
Outputs:
|
||||
tuple[Tensor]: clipped gradients.
|
||||
"""
|
||||
if clip_type != 0 and clip_type != 1:
|
||||
if clip_type not in (0, 1):
|
||||
return grad
|
||||
dt = F.dtype(grad)
|
||||
if clip_type == 0:
|
||||
|
|
|
@ -200,10 +200,8 @@ class TrainDataset:
|
|||
img_path = self.all_img_paths[index]
|
||||
gt_path = self.all_gt_paths[index]
|
||||
|
||||
# start0 = time.time()
|
||||
img = get_img(img_path)
|
||||
bboxes, tags = get_bboxes(img, gt_path)
|
||||
# end0 = time.time()
|
||||
|
||||
# multi-scale training
|
||||
if self.is_transform:
|
||||
|
|
|
@ -39,7 +39,6 @@ def main():
|
|||
|
||||
hccn_table = {}
|
||||
hccn_table['board_id'] = '0x002f' # A+K
|
||||
# hccn_table['board_id'] = '0x0000' # A+X
|
||||
|
||||
hccn_table['chip_info'] = '910'
|
||||
hccn_table['deploy_mode'] = 'lab'
|
||||
|
|
|
@ -129,7 +129,6 @@ class TrainOneStepCell(nn.Cell):
|
|||
def __init__(self, network, optimizer, sens=1.0, reduce_flag=False, mean=True, degree=None):
|
||||
super(TrainOneStepCell, self).__init__(auto_prefix=False)
|
||||
self.network = network
|
||||
# self.backbone = network._backbone
|
||||
self.weights = ParameterTuple(network.trainable_params())
|
||||
self.optimizer = optimizer
|
||||
self.grad = C.GradOperation(get_by_list=True,
|
||||
|
|
|
@ -53,4 +53,3 @@ class StepLossTimeMonitor(Callback):
|
|||
if self._per_print_times != 0 and cb_params.cur_step_num % self._per_print_times == 0:
|
||||
# TEST
|
||||
print("step: %s, loss is %s, fps is %s" % (cur_step_in_epoch, loss, step_fps), flush=True)
|
||||
# print("step: %s, loss is %s, fps is %s" % ( cur_step_in_epoch, loss, step_fps))
|
||||
|
|
|
@ -106,7 +106,6 @@ std::string SSDModelUtil::getDecodeResult(float *branchScores, float *branchBoxD
|
|||
std::string tmpid_str = std::to_string(outBuff[i][0]);
|
||||
result += tmpid_str;
|
||||
result += "_";
|
||||
// tmpid_str = std::to_string(outBuff[i][1]);
|
||||
MS_PRINT("label_classes i %d, outBuff %d", i, (int) outBuff[i][1]);
|
||||
tmpid_str = label_classes[static_cast<int>(outBuff[i][1])];
|
||||
// label id
|
||||
|
|
|
@ -665,7 +665,6 @@ public class CameraFragment extends Fragment {
|
|||
|
||||
mPreviewRequestBuilder = mCameraDevice.createCaptureRequest(CameraDevice.TEMPLATE_PREVIEW);
|
||||
mPreviewRequestBuilder.addTarget(surface);
|
||||
// mPreviewRequestBuilder.addTarget(mImageReader.getSurface());
|
||||
// Here, we create a CameraCaptureSession for ic_launcher preview.
|
||||
mCameraDevice.createCaptureSession(Arrays.asList(surface, mImageReader.getSurface()),
|
||||
new CameraCaptureSession.StateCallback() {
|
||||
|
|
|
@ -55,7 +55,6 @@ public class DealDataActivity extends AppCompatActivity {
|
|||
super.handleMessage(msg);
|
||||
if (1 == msg.what) {
|
||||
dealData();
|
||||
// dealSingleData();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -121,14 +120,12 @@ public class DealDataActivity extends AppCompatActivity {
|
|||
private void dealSingleData() {
|
||||
String fileFullName = IMGPATH + "/error.jpg";
|
||||
Bitmap bitmap = BitmapFactory.decodeResource(getResources(),R.drawable.error).copy(Bitmap.Config.ARGB_8888, true);
|
||||
// Bitmap bitmap = BitmapFactory.decodeFile(fileFullName).copy(Bitmap.Config.ARGB_8888, true);
|
||||
if (bitmap != null) {
|
||||
|
||||
String result = mTrackingMobile.MindSpore_runnet(bitmap);
|
||||
Log.d(TAG, ">>result" + result);
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("error.jpg").append("_").append(result);
|
||||
// writeStringIntoSDcard(IMG_RESULT_SINGLE_PATH, sb.toString());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -89,9 +89,6 @@ public class ObjectRectView extends View {
|
|||
super.onDraw(canvas);
|
||||
|
||||
if (mRecognitions == null || mRecognitions.size() == 0) {
|
||||
// mPaint.setColor(Color.TRANSPARENT);
|
||||
// mObjRectF = new RectF(0, 0, 5, 5);
|
||||
// canvas.drawRoundRect(mObjRectF, 0, 0, mPaint);
|
||||
return;
|
||||
}
|
||||
for (int i = 0;i<mRecognitions.size();i++){
|
||||
|
|
|
@ -36,7 +36,6 @@ GRADIENT_CLIP_VALUE = 1.0
|
|||
clip_grad = C.MultitypeFuncGraph("clip_grad")
|
||||
|
||||
|
||||
# pylint: disable=consider-using-in
|
||||
@clip_grad.register("Number", "Number", "Tensor")
|
||||
def _clip_grad(clip_type, clip_value, grad):
|
||||
"""
|
||||
|
@ -50,7 +49,7 @@ def _clip_grad(clip_type, clip_value, grad):
|
|||
Outputs:
|
||||
tuple[Tensor], clipped gradients.
|
||||
"""
|
||||
if clip_type != 0 and clip_type != 1:
|
||||
if clip_type not in (0, 1):
|
||||
return grad
|
||||
dt = F.dtype(grad)
|
||||
if clip_type == 0:
|
||||
|
|
|
@ -42,7 +42,6 @@ GRADIENT_CLIP_VALUE = 1.0
|
|||
clip_grad = C.MultitypeFuncGraph("clip_grad")
|
||||
|
||||
|
||||
# pylint: disable=consider-using-in
|
||||
@clip_grad.register("Number", "Number", "Tensor")
|
||||
def _clip_grad(clip_type, clip_value, grad):
|
||||
"""
|
||||
|
@ -56,7 +55,7 @@ def _clip_grad(clip_type, clip_value, grad):
|
|||
Outputs:
|
||||
tuple[Tensor], clipped gradients.
|
||||
"""
|
||||
if clip_type != 0 and clip_type != 1:
|
||||
if clip_type not in (0, 1):
|
||||
return grad
|
||||
dt = F.dtype(grad)
|
||||
if clip_type == 0:
|
||||
|
|
|
@ -103,7 +103,6 @@ class ImdbParser():
|
|||
vocab = set(chain(*tokenized_features))
|
||||
self.__vacab[seg] = vocab
|
||||
|
||||
# word_to_idx looks like {'hello': 1, 'world':111, ... '<unk>': 0}
|
||||
word_to_idx = {word: i + 1 for i, word in enumerate(vocab)}
|
||||
word_to_idx['<unk>'] = 0
|
||||
self.__word2idx[seg] = word_to_idx
|
||||
|
|
|
@ -35,7 +35,6 @@ GRADIENT_CLIP_TYPE = 1
|
|||
GRADIENT_CLIP_VALUE = 1.0
|
||||
|
||||
clip_grad = C.MultitypeFuncGraph("clip_grad")
|
||||
# pylint: disable=consider-using-in
|
||||
@clip_grad.register("Number", "Number", "Tensor")
|
||||
def _clip_grad(clip_type, clip_value, grad):
|
||||
"""
|
||||
|
@ -49,7 +48,7 @@ def _clip_grad(clip_type, clip_value, grad):
|
|||
Outputs:
|
||||
tuple[Tensor], clipped gradients.
|
||||
"""
|
||||
if clip_type != 0 and clip_type != 1:
|
||||
if clip_type not in (0, 1):
|
||||
return grad
|
||||
dt = F.dtype(grad)
|
||||
if clip_type == 0:
|
||||
|
@ -89,7 +88,7 @@ class ClipGradients(nn.Cell):
|
|||
clip_type,
|
||||
clip_value):
|
||||
"""clip gradients"""
|
||||
if clip_type != 0 and clip_type != 1:
|
||||
if clip_type not in (0, 1):
|
||||
return grads
|
||||
new_grads = ()
|
||||
for grad in grads:
|
||||
|
|
|
@ -32,7 +32,6 @@ GRADIENT_CLIP_TYPE = 1
|
|||
GRADIENT_CLIP_VALUE = 5.0
|
||||
|
||||
|
||||
# pylint: disable=consider-using-in
|
||||
class ClipGradients(nn.Cell):
|
||||
"""
|
||||
Clip gradients.
|
||||
|
@ -56,7 +55,7 @@ class ClipGradients(nn.Cell):
|
|||
clip_type,
|
||||
clip_value):
|
||||
"""Defines the gradients clip."""
|
||||
if clip_type != 0 and clip_type != 1:
|
||||
if clip_type not in (0, 1):
|
||||
return grads
|
||||
|
||||
new_grads = ()
|
||||
|
|
|
@ -78,14 +78,12 @@ def create_dataset(dataset_path, do_train, config, platform, repeat_num=1, batch
|
|||
center_crop_p = P.CenterCrop(224)
|
||||
totensor = P.ToTensor()
|
||||
normalize_p = P.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
|
||||
#change_op_p = P.HWC2CHW()
|
||||
composeop = P.ComposeOp(
|
||||
[decode_p, resize_p, center_crop_p, totensor, normalize_p])
|
||||
if do_train:
|
||||
trans = [resize_crop_op, horizontal_flip_op, color_op,
|
||||
rescale_op, normalize_op, change_swap_op]
|
||||
else:
|
||||
#trans = [decode_op, resize_op, center_crop, rescale_op, normalize_op, change_swap_op]
|
||||
trans = composeop()
|
||||
type_cast_op = C2.TypeCast(mstype.int32)
|
||||
|
||||
|
|
|
@ -23,7 +23,6 @@ from mindspore import context, Tensor
|
|||
from mindspore.train.serialization import load_checkpoint, load_param_into_net
|
||||
from src.ssd_ghostnet import SSD300, ssd_ghostnet
|
||||
from src.dataset import create_ssd_dataset, data_to_mindrecord_byte_image, voc_data_to_mindrecord
|
||||
# from src.config_ghostnet import config
|
||||
from src.config_ghostnet_13x import config
|
||||
from src.coco_eval import metrics
|
||||
|
||||
|
|
|
@ -161,7 +161,6 @@ def create_voc_label(is_training):
|
|||
voc_dir = config.voc_dir
|
||||
cls_map = {name: i for i, name in enumerate(config.coco_classes)}
|
||||
sub_dir = 'train' if is_training else 'eval'
|
||||
#sub_dir = 'train'
|
||||
voc_dir = os.path.join(voc_dir, sub_dir)
|
||||
if not os.path.isdir(voc_dir):
|
||||
raise ValueError(f'Cannot find {sub_dir} dataset path.')
|
||||
|
|
|
@ -36,7 +36,6 @@ def update_onnx_initializer(onnx_file, ckpt_file, output_file):
|
|||
|
||||
for i, _ in enumerate(initializer):
|
||||
item = initializer[i]
|
||||
#print(item.name, item.data_type, item.dims, len(item.raw_data))
|
||||
if not item.name in param_dict:
|
||||
print(f"Warning: Can not find '{item.name}' in checkpoint parameters dictionary")
|
||||
continue
|
||||
|
|
Loading…
Reference in New Issue