redundant_codes

Signed-off-by: zhupuxu <zhupuxu@huawei.com>
This commit is contained in:
zhupuxu 2020-12-08 14:37:40 +08:00
parent e7555043bd
commit 4f569677b7
21 changed files with 13 additions and 55 deletions

View File

@ -131,7 +131,7 @@ def check_number(arg_value, value, rel, arg_type=int, arg_name=None, prim_name=N
"""
Check argument integer.
Usage:
Example:
- number = check_int(number, 0, Rel.GE, "number", None) # number >= 0
"""
rel_fn = Rel.get_fns(rel)

View File

@ -24,8 +24,8 @@ HALF = 0.5
def expand_gelu(expand_info):
"""Gelu expander"""
# cal formula are:
# gelu(x) = 0.5 * x * (1.0 + tanh(y))
# y = sqrt(2.0 / pi) * (x + 0.044715 * x * x * x)
# gelu(x) is 0.5 * x * (1.0 + tanh(y))
# y is sqrt(2.0 / pi) * (x + 0.044715 * x * x * x)
# get op info.
input_desc = expand_info['input_desc'][0]

View File

@ -25,10 +25,10 @@ HALF = 0.5
def expand_gelugrad(expand_info):
"""GeluGrad expander"""
# cal formula are:
# gelu_grad(dy, x) = dy * y'
# y' = 0.5 * (1.0 + tanh(tanh_para)) + 0.5 * x * (1.0 - tanh(tanh_para) * tanh(para)) * mul_right
# tanh_para = sqrt(2.0 / pi) * (x + 0.044715 * x * x * x)
# mul_right = sqrt(2.0 / pi) * (1 + 3 * 0.044715 * x * x)
# gelu_grad(dy, x) is dy * y'
# y' is 0.5 * (1.0 + tanh(tanh_para)) + 0.5 * x * (1.0 - tanh(tanh_para) * tanh(para)) * mul_right
# tanh_para is sqrt(2.0 / pi) * (x + 0.044715 * x * x * x)
# mul_right is sqrt(2.0 / pi) * (1 + 3 * 0.044715 * x * x)
# get op info.
input_desc_0 = expand_info['input_desc'][0]

View File

@ -66,8 +66,6 @@ def expand_layernormgrad(expand_info):
mean_cof = graph_builder.value(x.dtype, (1.0 / reduce_size), x.data_format)
# cal dg db
# dg = np.sum(dy * np.power(var + epsilon, -0.5) * (x - mean), axis=tuple(param_axis), keepdims=True)
# db = np.sum(dy, axis=tuple(param_axis), keepdims=True)
var_eps = graph_builder.emit('TensorAdd', [variance, eps])
sqrt_var_eps = graph_builder.emit('Sqrt', [var_eps])
rsqrt_var_eps = graph_builder.emit('RealDiv', [const_one, sqrt_var_eps])
@ -78,8 +76,6 @@ def expand_layernormgrad(expand_info):
db = graph_builder.emit('ReduceSum', [dy], attrs={'reduce_axis': param_axis, 'keep_dims': False})
# cal sum_1
# sum1 = np.sum((-0.5) * dy * gamma * (x - mean) * np.power(var + epsilon, -1.5), axis=tuple(norm_axis),
# keepdims=True)
tmp_var_eps = graph_builder.emit('Mul', [sqrt_var_eps, var_eps])
r_tmp_var_eps = graph_builder.emit('RealDiv', [const_one, tmp_var_eps])
x_sub_mean_mul_r_tmp_var_eps = graph_builder.emit('Mul', [x_sub_mean, r_tmp_var_eps])
@ -89,18 +85,13 @@ def expand_layernormgrad(expand_info):
sum_1 = graph_builder.emit('ReduceSum', [sum_1_mul], attrs={'reduce_axis': norm_axis, 'keep_dims': True})
# cal sum_2
# sum2 = np.sum(dy * gamma, axis=tuple(norm_axis), keepdims=True)
sum_2 = graph_builder.emit('ReduceSum', [dy_mul_gamma], attrs={'reduce_axis': norm_axis, 'keep_dims': True})
# cal sum_3
# sum3 = np.sum(-2.0 * (x - mean), axis=tuple(norm_axis), keepdims=True)
sum_3_mul = graph_builder.emit('Mul', [const_neg_two, x_sub_mean])
sum_3 = graph_builder.emit('ReduceSum', [sum_3_mul], attrs={'reduce_axis': norm_axis, 'keep_dims': True})
# cal dx = dx1 + dx2 + dx3
# dx1 = dy * gamma * rsqrt_var_eps
# dx2 = sum1 * 2.0 / mean_cof * x_sub_mean
# dx3 = (1.0 / mean_cof) * (-1.0 * rsqrt_var_eps * sum2 + 1.0 / mean_cof * sum1 * sum3)
dx_1 = graph_builder.emit('Mul', [dy_mul_gamma, rsqrt_var_eps])
sum_1_mul_two = graph_builder.emit('Mul', [sum_1, const_two])
sum_1_mul_two_tmp = graph_builder.emit('Mul', [sum_1_mul_two, mean_cof])

View File

@ -38,8 +38,7 @@ def expand_reducemean(expand_info):
x_shape = input_x.shape
graph_scope.set_input(input_x)
# cal reduce_mean
# when axis = None, reduce axis are all
# cal reduce_mean, when axis = None, reduce axis are all
all_shape = 1.0
real_axis = []
if not axis:

View File

@ -19,7 +19,7 @@ from mindspore._extends.graph_kernel.model import model_builder as builder
def expand_sqrtgrad(expand_info):
"""SqrtGrad expander"""
# cal formula are:
# sqrt_grad(x, dout) = dout / (2 * x)
# sqrt_grad(x, dout) is dout / (2 * x)
# get op info.
input_desc_0 = expand_info['input_desc'][0]

View File

@ -21,7 +21,6 @@ ONE = 1.0
def expand_tanhgrad(expand_info):
"""TanhGrad expander"""
# tanh_grad(y, dy) = dy * (1- y * y)
# get op info.
input_desc_0 = expand_info['input_desc'][0]
input_desc_1 = expand_info['input_desc'][1]

View File

@ -156,7 +156,6 @@ AnfNodePtr AdjustAllReduceMulAdd::operator()(const OptimizerPtr &, const AnfNode
if (x_shape != z_shape) {
// AddN requires x_ and z_ have the same shape.
// If broadcasting TensorAdd is supported then can use this
// AnfNodePtr add = NewCNode({NewValueNode(prim::kPrimTensorAdd), z_, x_}, fg);
return nullptr;
}
AnfNodePtr tuple = NewCNode({make_tuple_op_node, z_, x_}, fg);

View File

@ -62,13 +62,6 @@ Status NodeRemovalPass::RemovalNodes::Visit(std::shared_ptr<RepeatNode> node, bo
// Perform ShuffleNode removal check.
Status NodeRemovalPass::RemovalNodes::Visit(std::shared_ptr<ShuffleNode> node, bool *modified) {
*modified = false;
#if 0
// If we are in a cache descendant tree, then this shuffle op needs to be removed
if (is_caching_) {
MS_LOG(INFO) << "Shuffle under an operation with cache is identified for removal.";
nodes_to_remove_.push_back(std::static_pointer_cast<DatasetNode>(node));
}
#endif
return Status::OK();
}

View File

@ -326,7 +326,6 @@ AbstractBasePtr InferImplUnsortedSegmentMax(const AnalysisEnginePtr &, const Pri
} else {
num_segments_value = *static_cast<int32_t *>(num_segments_tensor->data_c());
}
// num_segments_value = *static_cast<int64_t *>(num_segments_tensor->data_c());
} else if (args_spec_list[2]->isa<AbstractScalar>()) { // num_segments is Scalar
auto num_segments = CheckArg<AbstractScalar>(op_name, args_spec_list, 2);
if (num_segments->GetTypeTrack()->type_id() == TypeId::kNumberTypeInt64) {

View File

@ -483,7 +483,6 @@ AbstractBasePtr InferImplCast(const AnalysisEnginePtr &, const PrimitivePtr &pri
const AbstractBasePtrList &args_spec_list) {
const std::string op_name = primitive->name();
// GPU has 2 inputs while tbe has 1 only. Skip CheckArgsSize.
// CheckArgsSize(op_name, args_spec_list, 1);
auto input_x = CheckArg<AbstractTensor>(op_name, args_spec_list, 0);
MS_EXCEPTION_IF_NULL(input_x);
MS_EXCEPTION_IF_NULL(input_x->shape());

View File

@ -195,7 +195,6 @@ Status AlbumOp::LoadImageTensor(const std::string &image_file_path, uint32_t col
// load empty tensor since image is not jpg
MS_LOG(INFO) << "Bin file found" << image_file_path << ".";
RETURN_IF_NOT_OK(Tensor::CreateFromFile(image_file_path, tensor));
// row->push_back(std::move(image));
return Status::OK();
}
@ -270,7 +269,6 @@ Status AlbumOp::LoadStringArrayTensor(const nlohmann::json &json_obj, uint32_t c
MS_LOG(INFO) << "String array label found: " << data << ".";
// TensorPtr label;
RETURN_IF_NOT_OK(Tensor::CreateFromVector(data, tensor));
// row->push_back(std::move(label));
return Status::OK();
}
@ -281,7 +279,6 @@ Status AlbumOp::LoadStringTensor(const nlohmann::json &json_obj, uint32_t col_nu
MS_LOG(INFO) << "String label found: " << data << ".";
TensorPtr label;
RETURN_IF_NOT_OK(Tensor::CreateScalar<std::string>(data, tensor));
// row->push_back(std::move(label));
return Status::OK();
}
@ -310,7 +307,6 @@ Status AlbumOp::LoadIntArrayTensor(const nlohmann::json &json_obj, uint32_t col_
RETURN_STATUS_UNEXPECTED("Invalid data, column type is neither int32 nor int64, it is " +
data_schema_->column(col_num).type().ToString());
}
// row->push_back(std::move(label));
return Status::OK();
}
@ -339,7 +335,6 @@ Status AlbumOp::LoadFloatArrayTensor(const nlohmann::json &json_obj, uint32_t co
RETURN_STATUS_UNEXPECTED("Invalid data, column type is neither float32 nor float64, it is " +
data_schema_->column(col_num).type().ToString());
}
// row->push_back(std::move(float_array));
return Status::OK();
}
@ -347,7 +342,6 @@ Status AlbumOp::LoadIDTensor(const std::string &file, uint32_t col_num, TensorPt
if (data_schema_->column(col_num).type() == DataType::DE_STRING) {
// TensorPtr id;
RETURN_IF_NOT_OK(Tensor::CreateScalar<std::string>(file, tensor));
// row->push_back(std::move(id));
return Status::OK();
}
// hack to get the file name without extension, the 1 is to get rid of the backslash character
@ -355,7 +349,6 @@ Status AlbumOp::LoadIDTensor(const std::string &file, uint32_t col_num, TensorPt
// TensorPtr id;
RETURN_IF_NOT_OK(Tensor::CreateScalar<int64_t>(image_id, tensor));
MS_LOG(INFO) << "File ID " << image_id << ".";
// row->push_back(std::move(id));
return Status::OK();
}
@ -363,7 +356,6 @@ Status AlbumOp::LoadEmptyTensor(uint32_t col_num, TensorPtr *tensor) {
// hack to get the file name without extension, the 1 is to get rid of the backslash character
// TensorPtr empty_tensor;
RETURN_IF_NOT_OK(Tensor::CreateEmpty(TensorShape({0}), data_schema_->column(col_num).type(), tensor));
// row->push_back(std::move(empty_tensor));
return Status::OK();
}
@ -382,7 +374,6 @@ Status AlbumOp::LoadFloatTensor(const nlohmann::json &json_obj, uint32_t col_num
RETURN_IF_NOT_OK(Tensor::CreateScalar<float>(data, tensor));
MS_LOG(INFO) << "float found: " << json_obj << ".";
}
// row->push_back(std::move(float_tensor));
return Status::OK();
}
@ -398,7 +389,6 @@ Status AlbumOp::LoadIntTensor(const nlohmann::json &json_obj, uint32_t col_num,
RETURN_IF_NOT_OK(Tensor::CreateScalar<int32_t>(data, tensor));
MS_LOG(INFO) << "int32 found: " << json_obj << ".";
}
// row->push_back(std::move(int_tensor));
return Status::OK();
}
@ -410,7 +400,6 @@ Status AlbumOp::LoadIntTensor(const nlohmann::json &json_obj, uint32_t col_num,
Status AlbumOp::LoadTensorRow(row_id_type row_id, const std::string &file,
std::unordered_map<std::string, std::shared_ptr<Tensor>> *map_row) {
// testing here is to just print out file path
// (*row) = TensorRow(row_id, {});
MS_LOG(INFO) << "Image row file: " << file << ".";
std::ifstream file_handle(folder_path_ + file);

View File

@ -57,8 +57,6 @@ void SoftmaxGrad(const float *input_ptr, const float *yt_ptr, float *output_ptr,
*(output_ptr + outter_offset + k * N + j) += a * sum_data[j];
}
}
// gemm(0, 0, M, N, K, -1, sum_mul, K, sum_data, N, 1, &output_ptr[outter_offset], N);
}
for (int i = 0; i < ele_size; i++) {

View File

@ -91,7 +91,6 @@ int OneHot::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outpu
int axis = GetAxis();
// indices, depth, on_value, off_value
// indices, depth, on_off_value(contain 2 values);
if (inputs.size() != kOneHotInputNum && inputs.size() != kOneHotInputNumOpt) {
MS_LOG(ERROR) << "OneHot got inputs num " << inputs.size() << ", should be " << kOneHotInputNum << " or "
<< kOneHotInputNumOpt;

View File

@ -43,7 +43,6 @@ class SparseToDenseOpenCLKernel : public OpenCLKernel {
int InitOutputToDefault();
private:
// bool IndicesIsScalar{false};
bool enable_fp16_{false};
float default_{0.0f};
float weight_scalar_{0.f};

View File

@ -337,7 +337,6 @@ int NetTrain::RunExportedNet() {
}
context->thread_num_ = flags_->num_threads_;
// context->enable_float16_ = flags_->enable_fp16_;
session_ = session::TrainSession::CreateSession(flags_->export_file_.c_str(), context.get());
if (session_ == nullptr) {
MS_LOG(ERROR) << "CreateSession failed while running ", model_name.c_str();
@ -406,7 +405,6 @@ int NetTrain::RunNetTrain() {
context->device_list_[0].device_info_.cpu_device_info_.cpu_bind_mode_ = NO_BIND;
}
context->thread_num_ = flags_->num_threads_;
// context->enable_float16_ = flags_->enable_fp16_;
session_ = session::TrainSession::CreateSession(flags_->model_file_.c_str(), context.get());
if (session_ == nullptr) {
MS_LOG(ERROR) << "CreateSession failed while running ", model_name.c_str();

View File

@ -901,7 +901,8 @@ class Gelu(GraphKernel):
def _math_four_compute(data_x):
"""
return: math_four equal 2*(np(sqrt(2 / np.pi)*(x + 0.044715*tf.pow(x, 3)))
Return:
math_four equal 2*(np(sqrt(2 / np.pi)*(x + 0.044715*tf.pow(x, 3)))
"""
datax_pow = data_x * data_x * data_x
datax_muls_c = self.mul(datax_pow, self.CSVALUE)

View File

@ -431,8 +431,8 @@ class SampledSoftmaxLoss(_Loss):
# Sample the negative labels.
# sampled shape: [num_sampled] tensor
# true_expected_count shape = [batch_size, 1] tensor
# sampled_expected_count shape = [num_sampled] tensor
# true_expected_count shape is [batch_size, 1] tensor
# sampled_expected_count shape is [num_sampled] tensor
if sampled_values is None:
sampled_values = self.sampler(labels)

View File

@ -308,7 +308,6 @@ class Categorical(Distribution):
broadcast_shape_tensor = logits * value
broadcast_shape = self.shape(broadcast_shape_tensor)
# broadcast_shape (N, C)
num_classes = broadcast_shape[-1]
label_shape = broadcast_shape[:-1]
@ -373,7 +372,6 @@ class Categorical(Distribution):
broadcast_shape_tensor = probs * value
broadcast_shape = self.shape(broadcast_shape_tensor)
# broadcast_shape (N, C)
num_classes = broadcast_shape[-1]
label_shape = broadcast_shape[:-1]

View File

@ -276,7 +276,6 @@ def tensor_expand_dims(data, tuple_index):
def tensor_index_by_tuple(data, tuple_index):
"""Tensor getitem by tuple of various types with None"""
# data, tuple_index_without_none = tensor_expand_dims(data, tuple_index)
tuple_index_without_none = tuple_index
if len(tuple_index) == 1:
return data[tuple_index_without_none[0]]

View File

@ -743,9 +743,7 @@ class MatMul(PrimitiveWithInfer):
# validate whether last two dims satisfying matrix multiply
x1_last = x1[-2:]
x2_last = x2[-2:]
# x1_col = x1_last[1] if (not transpose_a) else x1_last[0]
x1_col = x1_last[not self.transpose_a]
# x2_row = x2_last[0] if (not transpose_b) else x2_last[1]
x2_row = x2_last[self.transpose_b]
if x1_col != x2_row:
raise ValueError(f'For \'{cls_name}\' evaluator shapes of inputs can not do this operator,'