forked from mindspore-Ecosystem/mindspore
fix code check
This commit is contained in:
parent
164896b7b8
commit
a090ebdf83
|
@ -37,11 +37,11 @@ Status SomasSolverCore::MemoryAllocationSolver() {
|
|||
auto start = std::chrono::system_clock::now();
|
||||
Status retval = SUCCESS;
|
||||
size_t best = SIZE_MAX;
|
||||
size_t best_timing = SIZE_MAX;
|
||||
if (all_) { // loop over all heuristics
|
||||
FittingType best_branching = kBest;
|
||||
SortingType best_sorting = kGreaterSizeSmallerIndex;
|
||||
AlgorithmType best_algorithm = kManyObjects;
|
||||
int64_t best_timing = INT64_MAX;
|
||||
uint32_t best_sol = 0;
|
||||
size_t worst = 0;
|
||||
BuildBlocks();
|
||||
|
|
|
@ -28,6 +28,8 @@ namespace ascend {
|
|||
constexpr float kMSMemoryRatio = 0.9375; // 15/16
|
||||
constexpr float kReservedMemoryRatio = 0.0625; // 1/16
|
||||
constexpr float kHalfRatio = 0.5;
|
||||
// The Ascend max available device memory is 32GB.
|
||||
constexpr float kAscendMaxDeviceMemory = 32;
|
||||
|
||||
bool AscendMemAdapter::Initialize() {
|
||||
if (initialized_) {
|
||||
|
@ -158,7 +160,7 @@ uint8_t *AscendMemAdapter::MallocDynamicDevMem(size_t size, const std::string &t
|
|||
|
||||
void AscendMemAdapter::ResetDynamicMemory() { cur_dynamic_mem_offset_ = 0; }
|
||||
|
||||
std::string AscendMemAdapter::DevMemStatistics() {
|
||||
std::string AscendMemAdapter::DevMemStatistics() const {
|
||||
std::ostringstream oss;
|
||||
oss << "\nDevice HBM memory size: " << device_hbm_total_size_ / kMBToByte << "M";
|
||||
oss << "\nMindSpore Used memory size: " << ms_used_hbm_size_ / kMBToByte << "M";
|
||||
|
@ -170,7 +172,7 @@ std::string AscendMemAdapter::DevMemStatistics() {
|
|||
return oss.str();
|
||||
}
|
||||
|
||||
std::string AscendMemAdapter::DevMemDetailInfo() {
|
||||
std::string AscendMemAdapter::DevMemDetailInfo() const {
|
||||
std::ostringstream oss;
|
||||
oss << "\nMemory Detail Info:";
|
||||
oss << "\nStatic Memory Blocks:";
|
||||
|
@ -192,7 +194,7 @@ size_t AscendMemAdapter::GetDeviceMemSizeFromContext() {
|
|||
MS_EXCEPTION_IF_NULL(context);
|
||||
size_t size_from_context;
|
||||
auto max_device_memory = context->get_param<float>(MS_CTX_MAX_DEVICE_MEMORY);
|
||||
if (max_device_memory != mindspore::kDefaultMaxDeviceMemory) {
|
||||
if (max_device_memory <= kAscendMaxDeviceMemory) {
|
||||
MS_LOG(INFO) << "context max_device_memory:" << max_device_memory;
|
||||
size_from_context = FloatToSize(max_device_memory * kGBToByte);
|
||||
} else {
|
||||
|
|
|
@ -45,8 +45,8 @@ class AscendMemAdapter {
|
|||
[[nodiscard]] uint64_t FreeDevMemSize() const { return static_mem_offset_ - max_dynamic_mem_offset_; }
|
||||
[[nodiscard]] uint64_t MaxHbmSizeForMs() const { return max_available_ms_hbm_size_; }
|
||||
[[nodiscard]] uint64_t GetMsUsedHbmSize() const { return ms_used_hbm_size_; }
|
||||
std::string DevMemStatistics();
|
||||
std::string DevMemDetailInfo();
|
||||
std::string DevMemStatistics() const;
|
||||
std::string DevMemDetailInfo() const;
|
||||
|
||||
private:
|
||||
struct MemoryBlock {
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
namespace mindspore {
|
||||
namespace kernel {
|
||||
bool HcomAllGatherKernel::Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &,
|
||||
bool HcomAllGatherKernel::Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
|
||||
const std::vector<AddressPtr> &outputs, void *stream_ptr) {
|
||||
MS_LOG(DEBUG) << "HcomAllGather launch";
|
||||
if (inputs.empty() || outputs.empty() || hccl_data_type_list_.empty()) {
|
||||
|
|
|
@ -50,7 +50,7 @@ bool HcomUtil::GetKernelOutputShape(const AnfNodePtr &anf_node, vector<vector<si
|
|||
size_t output_num = common::AnfAlgo::GetOutputTensorNum(anf_node);
|
||||
for (size_t i = 0; i < output_num; ++i) {
|
||||
std::vector<size_t> shape_i = AnfAlgo::GetOutputDeviceShape(anf_node, i);
|
||||
hccl_kernel_output_shape_list->emplace_back(shape_i);
|
||||
(void)hccl_kernel_output_shape_list->emplace_back(shape_i);
|
||||
}
|
||||
|
||||
return true;
|
||||
|
|
|
@ -116,7 +116,7 @@ std::vector<int64_t> TbeJsonUtils::GetOutputDeviceShapeForTbeBuild(const AnfNode
|
|||
auto format = AnfAlgo::GetOutputFormat(anf_node, real_idx);
|
||||
shape = AnfAlgo::GetOutputDeviceShapeForTbeBuild(anf_node, real_idx, format);
|
||||
if (shape.empty()) {
|
||||
shape.emplace_back(1);
|
||||
(void)shape.emplace_back(1);
|
||||
}
|
||||
return shape;
|
||||
}
|
||||
|
|
|
@ -137,13 +137,13 @@ void PrintInfo(const nlohmann::json &info, const std::string &job_name, const in
|
|||
auto message = GetJsonValue<std::string>(info, kMessage);
|
||||
if (level == 0) {
|
||||
MS_LOG(DEBUG) << "Job id:" << job_id << ", name :" << job_name << ", message:" << message;
|
||||
} else if (level == INFO) {
|
||||
} else if (level == static_cast<size_t>(INFO)) {
|
||||
MS_LOG(INFO) << "Job id:" << job_id << ", name :" << job_name << ", message:" << message;
|
||||
} else if (level == WARNING) {
|
||||
} else if (level == static_cast<size_t>(WARNING)) {
|
||||
MS_LOG(WARNING) << "Job id:" << job_id << ", name :" << job_name << ", message:" << message;
|
||||
} else if (level == ERROR) {
|
||||
} else if (level == static_cast<size_t>(ERROR)) {
|
||||
MS_LOG(ERROR) << "Job id:" << job_id << ", name :" << job_name << ", message:" << message;
|
||||
} else if (level == EXCEPTION) {
|
||||
} else if (level == static_cast<size_t>(EXCEPTION)) {
|
||||
ReportToErrorManager(message);
|
||||
}
|
||||
}
|
||||
|
@ -637,7 +637,7 @@ JsonNameMap TbeKernelCompileManager::GetAllSuccessFusion() {
|
|||
if (TbeUtils::SearchCache(json_name) != nullptr) {
|
||||
(void)success_fusion_ops_.emplace(scope_id, full_name);
|
||||
}
|
||||
iter++;
|
||||
(void)iter++;
|
||||
}
|
||||
return success_fusion_ops_;
|
||||
}
|
||||
|
|
|
@ -133,7 +133,7 @@ bool TbeKernelBroadCastSelecter::IsBroadCastSupportFracZ(SupportFormat *support_
|
|||
if (HasScalarInput()) {
|
||||
for (const auto &shape : input_shapes_) {
|
||||
if (IsScalarShape(shape)) {
|
||||
input_support_format.emplace_back(kOpFormat_DEFAULT);
|
||||
(void)input_support_format.emplace_back(kOpFormat_DEFAULT);
|
||||
} else {
|
||||
if (!Is4DShape(shape)) {
|
||||
return false;
|
||||
|
@ -141,7 +141,7 @@ bool TbeKernelBroadCastSelecter::IsBroadCastSupportFracZ(SupportFormat *support_
|
|||
if (shape[kChannelN] % kAlignmented16 != 0 || shape[kChannelC] % kAlignmented16 != 0) {
|
||||
return false;
|
||||
}
|
||||
input_support_format.emplace_back(kOpFormat_FRAC_Z);
|
||||
(void)input_support_format.emplace_back(kOpFormat_FRAC_Z);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -295,8 +295,8 @@ bool TbeKernelBroadCastSelecter::IsBroadCastSupportNDC1HWC0(SupportFormat *suppo
|
|||
input_support_format.assign(input_num_, kOpFormat_NDC1HWC0);
|
||||
}
|
||||
GenOutputSupportFormat(kOpFormat_NDC1HWC0, &output_support_format);
|
||||
support_format->input_format.emplace_back(input_support_format);
|
||||
support_format->output_format.emplace_back(output_support_format);
|
||||
(void)support_format->input_format.emplace_back(input_support_format);
|
||||
(void)support_format->output_format.emplace_back(output_support_format);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ namespace mindspore {
|
|||
namespace opt {
|
||||
class StridedReadConvStridedWriteFusionPass : public FusionBasePass {
|
||||
public:
|
||||
explicit StridedReadConvStridedWriteFusionPass(FusionIdAllocatorPtr idAllocator)
|
||||
explicit StridedReadConvStridedWriteFusionPass(const FusionIdAllocatorPtr idAllocator)
|
||||
: FusionBasePass("StridedReadConvStridedWriteFusionPass", idAllocator) {}
|
||||
~StridedReadConvStridedWriteFusionPass() override = default;
|
||||
void MatchSingleFusionPattern(const session::KernelGraph &kernel_graph, FusedNodeRecord *candidate_fusion) override;
|
||||
|
|
|
@ -47,7 +47,8 @@ std::vector<AnfNodePtr> SplitInputsForReduceScatter::InsertSplitForInput(const F
|
|||
min_shape[0] /= static_cast<int64_t>(rank_size_t);
|
||||
max_shape[0] /= static_cast<int64_t>(rank_size_t);
|
||||
ShapeVector shape_tmp;
|
||||
std::transform(output_node_shape.begin(), output_node_shape.end(), std::back_inserter(shape_tmp), SizeToLong);
|
||||
(void)std::transform(output_node_shape.begin(), output_node_shape.end(), std::back_inserter(shape_tmp),
|
||||
SizeToLong);
|
||||
std::vector<BaseShapePtr> shapes(rank_size_t, std::make_shared<abstract::Shape>(shape_tmp, min_shape, max_shape));
|
||||
common::AnfAlgo::SetOutputTypeAndDetailShape(dtypes, shapes, split.get());
|
||||
} else {
|
||||
|
|
|
@ -290,9 +290,9 @@ AnfNodePtr DynamicRnnGradFissionV2::AddLSTMInputGradNode(const FuncGraphPtr &fun
|
|||
std::vector<TypeId> split_types;
|
||||
std::vector<int64_t> size_split;
|
||||
for (size_t i = 0; i < num_split_x; ++i) {
|
||||
split_shapes.emplace_back(split_c_dims);
|
||||
split_types.emplace_back(kNumberTypeFloat32);
|
||||
size_split.emplace_back(1);
|
||||
(void)split_shapes.emplace_back(split_c_dims);
|
||||
(void)split_types.emplace_back(kNumberTypeFloat32);
|
||||
(void)size_split.emplace_back(1);
|
||||
}
|
||||
// Create lstm_split_c
|
||||
auto lstm_split_c = CreateLSTMSPlitV(func_graph, origin_input7, split_shapes, split_types, size_split, num_split_x);
|
||||
|
|
|
@ -51,7 +51,7 @@ void AddOutputs(const CNodePtr &cnode, const std::vector<size_t> &input_indices)
|
|||
AbstractBasePtrList origin_abstract_list = origin_abstract_tuple->elements();
|
||||
(void)std::copy(origin_abstract_list.begin(), origin_abstract_list.end(), std::back_inserter(abstract_list));
|
||||
} else {
|
||||
abstract_list.emplace_back(origin_abstract);
|
||||
(void)abstract_list.emplace_back(origin_abstract);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < input_indices.size(); ++i) {
|
||||
|
|
|
@ -128,7 +128,6 @@ CNodePtr CreateSplitNode(const FuncGraphPtr &graph, const std::vector<AnfNodePtr
|
|||
MS_EXCEPTION_IF_NULL(num_split);
|
||||
if (split_input.empty()) {
|
||||
MS_LOG(EXCEPTION) << "The input is empty, can not create splitv node.";
|
||||
return nullptr;
|
||||
}
|
||||
auto split_v = pass.NewCNode(split_input, graph);
|
||||
MS_EXCEPTION_IF_NULL(split_v);
|
||||
|
@ -623,8 +622,8 @@ CNodePtr NeighborExchangeV2UnifyMindIR::CreateMiddleConcat(
|
|||
|
||||
++input_num_all;
|
||||
single_shape[concat_dim] += LongToSize(last_len);
|
||||
max_shape[concat_dim] += (is_dynamic) ? last_len : 0;
|
||||
min_shape[concat_dim] += (is_dynamic) ? last_len : 0;
|
||||
max_shape[concat_dim] += (is_dynamic) ? static_cast<int64_t>(last_len) : 0;
|
||||
min_shape[concat_dim] += (is_dynamic) ? static_cast<int64_t>(last_len) : 0;
|
||||
}
|
||||
|
||||
std::vector<TypeId> concat_output_dtype = {common::AnfAlgo::GetOutputInferDataType(all_to_all_v_outputs[0], 0)};
|
||||
|
|
|
@ -75,7 +75,7 @@ const AnfNodePtr BatchToSpaceNDAttrUpdate::Process(const FuncGraphPtr &graph, co
|
|||
}
|
||||
auto crops = common::AnfAlgo::GetNodeAttr<std::vector<std::vector<int64_t>>>(node, kAttrCrops);
|
||||
if (crops.size() == kBlockShapeDimNum) {
|
||||
crops.emplace(crops.begin(), std::vector<int64_t>{0, 0});
|
||||
(void)crops.emplace(crops.begin(), std::vector<int64_t>{0, 0});
|
||||
common::AnfAlgo::SetNodeAttr(kAttrCrops, MakeValue(crops), node);
|
||||
}
|
||||
return node;
|
||||
|
|
|
@ -122,7 +122,8 @@ void DynamicKernel::InferShapeForNopNode(AnfNodePtr *input_node) {
|
|||
nop_road.push(*input_node);
|
||||
|
||||
auto temp_node = input_node;
|
||||
while (true) {
|
||||
bool loop = true;
|
||||
while (loop) {
|
||||
auto input_node_with_idx = common::AnfAlgo::GetPrevNodeOutput(*temp_node, 0);
|
||||
auto in_node = input_node_with_idx.first;
|
||||
MS_EXCEPTION_IF_NULL(in_node);
|
||||
|
@ -130,7 +131,7 @@ void DynamicKernel::InferShapeForNopNode(AnfNodePtr *input_node) {
|
|||
nop_road.push(in_node);
|
||||
temp_node = &in_node;
|
||||
} else {
|
||||
break;
|
||||
loop = false;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue