Merge pull request !39647 from tanghuikang/clean_code
This commit is contained in:
i-robot 2022-08-05 01:23:17 +00:00 committed by Gitee
commit 0aeef0019d
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
9 changed files with 43 additions and 33 deletions

View File

@ -18,6 +18,7 @@
#define MINDSPORE_CCSRC_FRONTEND_PARALLEL_OPS_INFO_OPS_UTILS_H_
#include "mindspore/core/ops/core_ops.h"
#include "mindspore/core/utils/log_adapter.h"
namespace mindspore {
namespace parallel {
@ -582,9 +583,24 @@ constexpr char TENSOR_SCATTER_UPDATE[] = "TensorScatterUpdate";
constexpr char MIN_MAX_UPDATE_PER_LAYER[] = "MinMaxUpdatePerLayer";
constexpr char STACK[] = "Stack";
constexpr size_t LAST_INDEX(size_t s) { return s - 1; }
constexpr size_t SECOND_FROM_END(size_t s) { return s - 2; }
constexpr size_t THIRD_FROM_END(size_t s) { return s - 3; }
constexpr size_t LAST_INDEX(size_t s) {
if (s < 1) {
MS_LOG(EXCEPTION) << "Get last index for an empty size.";
}
return s - 1;
}
constexpr size_t SECOND_FROM_END(size_t s) {
if (s < 2) {
MS_LOG(EXCEPTION) << "Get second index from end for a size less than two.";
}
return s - 2;
}
constexpr size_t THIRD_FROM_END(size_t s) {
if (s < 3) {
MS_LOG(EXCEPTION) << "Get third index from end for a size less than three.";
}
return s - 3;
}
} // namespace parallel
} // namespace mindspore

View File

@ -512,7 +512,7 @@ int64_t ReshapeInfo::GetSWCIndexByOutputLayoutWithZeroComm(const TensorLayout &o
for (size_t i = 0; i < strategy_cost_.size(); ++i) {
const auto &swc = strategy_cost_[i];
if (swc->outputs_ptr[0].tensor_layout() == output_layout &&
swc->cost_list[0]->communication_without_parameter_ == 0.0) {
fabs(swc->cost_list[0]->communication_without_parameter_ - 0.0) < DBL_EPSILON) {
(void)index_computation.emplace_back(SizeToLong(i), swc->cost_list[0]->computation_cost_);
}
}
@ -555,7 +555,7 @@ int64_t ReshapeInfo::GetSWCIndexByInputLayoutWithZeroComm(const TensorLayout &in
for (size_t i = 0; i < strategy_cost_.size(); ++i) {
const auto &swc = strategy_cost_[i];
if (swc->inputs_ptr[0].tensor_layout() == input_layout &&
swc->cost_list[0]->communication_without_parameter_ == 0.0) {
fabs(swc->cost_list[0]->communication_without_parameter_ - 0.0) < DBL_EPSILON) {
(void)index_computation.emplace_back(SizeToLong(i), swc->cost_list[0]->computation_cost_);
}
}

View File

@ -18,6 +18,7 @@
#include <algorithm>
#include <functional>
#include "plugin/device/cpu/hal/device/cpu_device_address.h"
#include "utils/convert_utils_base.h"
namespace mindspore {
namespace kernel {
@ -45,15 +46,12 @@ bool BucketizeCpuKernelMod::Launch(const std::vector<kernel::AddressPtr> &inputs
if (dtype_ != kNumberTypeInt32 && dtype_ != kNumberTypeInt64 && dtype_ != kNumberTypeFloat32 &&
dtype_ != kNumberTypeFloat64) {
MS_LOG(EXCEPTION) << "Input data type must int32 or int64 or float32 or float64, but got data type." << dtype_;
return false;
}
size_t input_sizes = input_shape_.size();
size_t output_sizes = output_shape_.size();
if (input_sizes != output_sizes) {
MS_LOG(EXCEPTION) << "The tensor shape of input need be same with output.";
return false;
}
// BucketizeCompute(inputs, outputs);
switch (dtype_) {
case kNumberTypeInt32:
return BucketizeCompute<int32_t>(inputs, outputs);
@ -81,21 +79,21 @@ bool BucketizeCpuKernelMod::BucketizeCompute(const std::vector<AddressPtr> &inpu
auto sharder_bucketize = [&](size_t start, size_t end) {
for (size_t i = start; i < end; i++) {
auto first_bigger_it = std::upper_bound(boundaries_data.begin(), boundaries_data.end(), input_data[i]);
output_data[i] = first_bigger_it - boundaries_data.begin();
output_data[i] = LongToInt(first_bigger_it - boundaries_data.begin());
}
};
ParallelLaunchAutoSearch(sharder_bucketize, data_num_, this, &parallel_search_info_);
} else {
for (size_t i = 0; i < data_num_; i++) {
auto first_bigger_it = std::upper_bound(boundaries_data.begin(), boundaries_data.end(), input_data[i]);
output_data[i] = first_bigger_it - boundaries_data.begin();
output_data[i] = LongToInt(first_bigger_it - boundaries_data.begin());
}
}
return true;
}
std::vector<KernelAttr> BucketizeCpuKernelMod::GetOpSupport() {
static std::vector<KernelAttr> support_list = {
static const std::vector<KernelAttr> support_list = {
KernelAttr().AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeInt32),
KernelAttr().AddInputAttr(kNumberTypeInt64).AddOutputAttr(kNumberTypeInt32),
KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeInt32),

View File

@ -52,7 +52,7 @@ bool CheckNumericsCpuKernelMod::Launch(const std::vector<kernel::AddressPtr> &in
}
template <typename T>
void CheckNumericsCpuKernelMod::CheckNanOrInf(T value) {
void CheckNumericsCpuKernelMod::CheckNanOrInf(T value) const {
if (std::isnan(value)) {
MS_LOG(EXCEPTION) << ": Tensor had NaN values";
} else if (std::isinf(value)) {

View File

@ -38,7 +38,7 @@ class CheckNumericsCpuKernelMod : public DeprecatedNativeCpuKernelMod {
const std::vector<AddressPtr> &outputs) override;
std::vector<KernelAttr> GetOpSupport() override {
static std::vector<KernelAttr> support_list = {
static const std::vector<KernelAttr> support_list = {
KernelAttr().AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumberTypeFloat16),
KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32),
KernelAttr().AddInputAttr(kNumberTypeFloat64).AddOutputAttr(kNumberTypeFloat64)};
@ -50,7 +50,7 @@ class CheckNumericsCpuKernelMod : public DeprecatedNativeCpuKernelMod {
void LaunchKernelFloat(const std::vector<AddressPtr> &inputs, const std::vector<kernel::AddressPtr> &outputs);
template <typename T>
void CheckNanOrInf(T value);
void CheckNanOrInf(T value) const;
std::map<TypeId, size_t> dtype_map_ = {
{kNumberTypeFloat16, sizeof(float16)}, {kNumberTypeFloat32, sizeof(float)}, {kNumberTypeFloat64, sizeof(double)}};

View File

@ -25,7 +25,7 @@ constexpr size_t kCoalesceOutputsNum = 3;
constexpr char kKernelName[] = "Coalesce";
} // namespace
void CoalesceCpuKernelMod::CheckParam(const CNodePtr &kernel_node) {
void CoalesceCpuKernelMod::CheckParam(const CNodePtr &kernel_node) const {
size_t input_num = common::AnfAlgo::GetInputTensorNum(kernel_node);
CHECK_KERNEL_INPUTS_NUM(input_num, kCoalesceInputsNum, kKernelName);
size_t output_num = common::AnfAlgo::GetOutputTensorNum(kernel_node);
@ -67,12 +67,12 @@ void CoalesceCpuKernelMod::InitKernel(const CNodePtr &kernel_node) {
node_wpt_ = kernel_node;
dtype_ = AnfAlgo::GetInputDeviceDataType(kernel_node, 1);
auto indices_shape = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0);
values_size_ = indices_shape[1];
shape_size_ = indices_shape[0];
values_size_ = IntToSize(indices_shape[1]);
shape_size_ = IntToSize(indices_shape[0]);
is_need_retrieve_output_shape_ = true;
}
void CoalesceCpuKernelMod::Check(const std::vector<kernel::AddressPtr> &inputs) {
void CoalesceCpuKernelMod::Check(const std::vector<kernel::AddressPtr> &inputs) const {
auto x_indices_addr = reinterpret_cast<int64_t *>(inputs[0]->addr);
auto x_shape_addr = reinterpret_cast<int64_t *>(inputs[2]->addr);
for (size_t i = 0; i < values_size_; i++) {

View File

@ -44,8 +44,8 @@ class CoalesceCpuKernelMod : public DeprecatedNativeCpuKernelMod {
private:
template <typename T>
void LaunchKernel(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &outputs);
void CheckParam(const CNodePtr &kernel_node);
void Check(const std::vector<AddressPtr> &inputs);
void CheckParam(const CNodePtr &kernel_node) const;
void Check(const std::vector<AddressPtr> &inputs) const;
TypeId dtype_{kTypeUnknown};
size_t values_size_{0};
size_t shape_size_{0};

View File

@ -27,18 +27,12 @@
namespace {
constexpr size_t kComplexInputsNum = 2;
constexpr size_t kComplexOutputsNum = 1;
#define COMPLEX_COMPUTE_CASE(DTYPE, TYPE) \
case (DTYPE): { \
ret = LaunchKernel<TYPE>(inputs, outputs); \
break; \
}
} // namespace
namespace mindspore {
namespace kernel {
bool ComplexCpuKernelMod::Init(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs) {
const std::vector<KernelTensorPtr> & /* outputs */) {
MS_EXCEPTION_IF_NULL(base_operator);
kernel_name_ = base_operator->name();
input1_dtype_ = inputs[0]->GetDtype();
@ -52,8 +46,12 @@ bool ComplexCpuKernelMod::Launch(const std::vector<AddressPtr> &inputs, const st
CHECK_KERNEL_INPUTS_NUM(inputs.size(), kComplexInputsNum, kernel_name_);
CHECK_KERNEL_OUTPUTS_NUM(outputs.size(), kComplexOutputsNum, kernel_name_);
switch (input1_dtype_) {
COMPLEX_COMPUTE_CASE(kNumberTypeFloat32, float)
COMPLEX_COMPUTE_CASE(kNumberTypeFloat64, double)
case kNumberTypeFloat32:
ret = LaunchKernel<float>(inputs, outputs);
break;
case kNumberTypeFloat64:
ret = LaunchKernel<double>(inputs, outputs);
break;
default:
ret = false;
MS_EXCEPTION(TypeError) << "For Complex, unsupported input data type: " << TypeIdToString(input1_dtype_) << " .";
@ -77,7 +75,7 @@ bool ComplexCpuKernelMod::LaunchKernel(const std::vector<AddressPtr> &inputs, co
}
std::vector<KernelAttr> ComplexCpuKernelMod::GetOpSupport() {
static std::vector<KernelAttr> support_list = {
static const std::vector<KernelAttr> support_list = {
KernelAttr().AddInputAttr(kNumberTypeFloat32).AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeComplex64),
KernelAttr()
.AddInputAttr(kNumberTypeFloat64)

View File

@ -17,7 +17,6 @@
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_COMPLEX_CPU_KERNEL_H
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_COMPLEX_CPU_KERNEL_H
#include <cmath>
#include <vector>
#include <tuple>
#include <map>
@ -35,7 +34,7 @@ class ComplexCpuKernelMod : public NativeCpuKernelMod {
~ComplexCpuKernelMod() override = default;
bool Init(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs) override;
const std::vector<KernelTensorPtr> & /* outputs */) override;
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
const std::vector<AddressPtr> &outputs) override;
@ -45,7 +44,6 @@ class ComplexCpuKernelMod : public NativeCpuKernelMod {
private:
template <typename T>
bool LaunchKernel(const std::vector<kernel::AddressPtr> &inputs, const std::vector<kernel::AddressPtr> &outputs);
string kernel_name_;
TypeId input1_dtype_{kTypeUnknown};
TypeId input2_dtype_{kTypeUnknown};
};