fix_coding_style_check_warning

This commit is contained in:
c00425699 2020-04-15 16:15:45 +08:00
parent 9c9c709159
commit 8765810528
38 changed files with 15 additions and 59 deletions

View File

@ -370,6 +370,5 @@ void DeviceManager::Clear() {
stage_devices_.clear();
gm_.Clear();
}
} // namespace parallel
} // namespace mindspore

View File

@ -29,7 +29,6 @@
namespace mindspore {
namespace parallel {
DeviceMatrix::DeviceMatrix(int32_t rank, RankList dev_list, Shape dev_shape)
: rank_(rank), dev_list_(std::move(dev_list)), dev_shape_(std::move(dev_shape)) {
if (!std::any_of(dev_list_.begin(), dev_list_.end(), [rank](int32_t a) { return a == rank; })) {

View File

@ -27,7 +27,6 @@
namespace mindspore {
namespace parallel {
#define REGISTER(className) \
OperatorInfoPtr objectCreator##className(std::string name, Shapes in, Shapes out, PrimitiveAttrs& attrs) { \
return std::make_shared<className>(name, in, out, attrs); \

View File

@ -229,7 +229,8 @@ Status Softmax::GenerateStrategies(int32_t stage_id) {
}
is_auto_parallel_ = true;
Shape input0_split(inputs_shape_[0].size(), 1);
Shape input0_split;
(void)input0_split.insert(input0_split.begin(), inputs_shape_[0].size(), 1);
for (auto& element : axis_) {
int32_t axis_index = element;
if (element < 0) {

View File

@ -27,7 +27,6 @@
namespace mindspore {
namespace parallel {
Status BatchParallelInfo::CheckStrategy(const StrategyPtr& strategy) {
if (CheckStrategyValue(strategy, inputs_shape_, is_auto_parallel_) != SUCCESS) {
if (is_auto_parallel_) {

View File

@ -194,7 +194,8 @@ Status SoftmaxCrossEntropyWithLogitsInfo::GenerateStrategies(int32_t stage_id) {
}
is_auto_parallel_ = true;
Shape input0_split(inputs_shape_[0].size(), 1);
Shape input0_split;
(void)input0_split.insert(input0_split.begin(), inputs_shape_[0].size(), 1);
input0_split[IntToSize(axis_index)] = 0;
Shapes splittable_inputs = {input0_split, input0_split};
std::vector<StrategyPtr> sp_vector;

View File

@ -1255,6 +1255,5 @@ void OperatorInfo::BreakingTiesForPerferringDataParallel(const StrategyPtr& stra
double OperatorInfo::GetForwardMemoryCostFromCNode() {
return operator_cost()->GetForwardComputationCost(inputs_tensor_info_, outputs_tensor_info_, 0);
}
} // namespace parallel
} // namespace mindspore

View File

@ -212,8 +212,10 @@ Status PReLUInfo::GenerateStrategies(int32_t stage_id) {
return FAILED;
}
is_auto_parallel_ = true;
Shape input0_split(inputs_shape_[0].size(), 1);
input0_split[1] = 0;
Shape input0_split;
input0_split.emplace_back(1);
input0_split.emplace_back(0);
(void)input0_split.insert(input0_split.end(), inputs_shape_[0].size() - 2, 1);
Shape input1_split(inputs_shape_[1].size(), 0);
Shapes splittable_inputs = {input0_split, input1_split};
std::vector<StrategyPtr> sp_vector;

View File

@ -413,8 +413,9 @@ Status ReshapeInfo::GenerateStrategies(int32_t stage_id) {
return FAILED;
}
is_auto_parallel_ = true;
Shape input0_split(inputs_shape_[0].size(), 0);
input0_split[0] = 1;
Shape input0_split;
input0_split.emplace_back(1);
(void)input0_split.insert(input0_split.end(), inputs_shape_[0].size() - 1, 0);
Shapes splittable_inputs = {input0_split};
std::vector<StrategyPtr> sp_vector;
if (GenerateStrategiesForIndependentInputs(stage_id, inputs_shape_, splittable_inputs, &sp_vector) != SUCCESS) {

View File

@ -27,7 +27,6 @@
namespace mindspore {
namespace parallel {
Status VirtualDatasetInfo::CheckStrategy(const StrategyPtr& strategy) {
if (CheckStrategyValue(strategy, inputs_shape_, is_auto_parallel_) != SUCCESS) {
if (is_auto_parallel_) {
@ -225,8 +224,9 @@ Status VirtualDatasetInfo::GenerateStrategies(int32_t stage_id) {
StrategyPtr sp;
std::vector<Dimensions> strategy;
for (auto& shape : inputs_shape_) {
Shape temp(shape.size(), 1);
temp[0] = SizeToInt(total_dev_num);
Shape temp;
temp.emplace_back(SizeToInt(total_dev_num));
(void)temp.insert(temp.end(), shape.size() - 1, 1);
strategy.push_back(temp);
}
sp = std::make_shared<Strategy>(stage_id, strategy);

View File

@ -51,7 +51,6 @@ class VirtualDatasetInfo : public OperatorInfo {
Status GetAttrs() override;
Status InferAsLossDivisor() override;
};
} // namespace parallel
} // namespace mindspore

View File

@ -21,7 +21,6 @@
namespace mindspore {
namespace parallel {
enum Status {
SUCCESS = 0,
FAILED,

View File

@ -487,7 +487,6 @@ Status ConstructCostGraphNodes(const std::vector<AnfNodePtr> &all_nodes, const F
bool is_find_wrong = (current_op_ptr->name().find(VIRTUAL_DATA_SET_INFO) == std::string::npos) &&
(current_op_ptr->name().find(BATCH_PARALLEL) == std::string::npos) &&
(current_op_ptr->name().find(prim->name()) == std::string::npos);
if (is_find_wrong) {
MS_LOG(EXCEPTION) << "The OperatorInfo: " << current_op_ptr->name()
<< " does not match the Prim: " << prim->name();
@ -947,7 +946,6 @@ Status ParallelStrategyRecSearch(const std::vector<AnfNodePtr> &all_nodes, const
graph = EliminateGraph(graph, eli_list, index_list);
size_t num_device = g_device_manager->DeviceNum();
if (PartitionForAllDevices(num_device, graph) == SUCCESS) {
MS_LOG(INFO) << "Partition Success With " << num_device << " devices.";
} else {

View File

@ -55,7 +55,6 @@ Status ParallelStrategyRecSearch(const std::vector<AnfNodePtr> &all_nodes, const
std::vector<std::vector<std::string>> RecInputTensorNames(const std::map<std::string, std::string>::iterator &it,
std::vector<std::vector<std::string>> input_tensor_names);
} // namespace parallel
} // namespace mindspore
#endif // PARALLEL_STEP_AUTO_PARALLEL_H_

View File

@ -2094,7 +2094,6 @@ CNodePtr FindLossCNodeFromRoot(const FuncGraphPtr& root) {
MS_EXCEPTION_IF_NULL(root_return_node);
const auto& all_nodes = root->nodes();
FuncGraphPtr func_graph = FindForwardGraphByRootNodes(all_nodes);
if (func_graph == nullptr) {
return FindLossCNode(root);
} else {
@ -2109,7 +2108,6 @@ FuncGraphPtr ForwardGraph(const FuncGraphPtr& root) {
MS_EXCEPTION_IF_NULL(root_return_node);
const auto& all_nodes = root->nodes();
FuncGraphPtr func_graph = FindForwardGraphByRootNodes(all_nodes);
if (func_graph != nullptr) {
forward_graph = func_graph;
}

View File

@ -27,7 +27,6 @@
namespace mindspore {
namespace parallel {
#define MIN_SLICE_NUM 1
using Dimensions = std::vector<int32_t>;

View File

@ -26,7 +26,6 @@
namespace mindspore {
namespace parallel {
Status Arrangement::Init(const std::vector<int32_t>& array) {
Status status = Array::Init(array);
if (status != Status::SUCCESS) {

View File

@ -28,7 +28,6 @@
namespace mindspore {
namespace parallel {
class Arrangement : public Array {
public:
Arrangement() : size_(1) {}
@ -53,7 +52,6 @@ class Arrangement : public Array {
void ComputeSize();
int32_t size_;
};
} // namespace parallel
} // namespace mindspore

View File

@ -21,7 +21,6 @@
namespace mindspore {
namespace parallel {
std::string Array::ToString() const {
std::ostringstream buffer;
buffer << "[ ";

View File

@ -26,7 +26,6 @@
namespace mindspore {
namespace parallel {
class Array {
public:
Array() = default;
@ -43,7 +42,6 @@ class Array {
protected:
std::vector<int32_t> array_;
};
} // namespace parallel
} // namespace mindspore

View File

@ -52,7 +52,6 @@ class ConstructOperator {
Shape dev_matrix_shape_;
Status CreateGroupByDim(size_t axis, std::vector<Group>* group);
};
} // namespace parallel
} // namespace mindspore

View File

@ -20,7 +20,6 @@
namespace mindspore {
namespace parallel {
std::string LayoutTransfer::ToString() const {
std::ostringstream buffer;
buffer << std::endl << std::string("from_in_ tensor layout:" + from_in_.ToString());
@ -37,6 +36,5 @@ Status LayoutTransfer::Init(const TensorLayout& from_in, const TensorLayout& to_
Status status = CheckValidTransfer();
return status;
}
} // namespace parallel
} // namespace mindspore

View File

@ -23,7 +23,6 @@
namespace mindspore {
namespace parallel {
class LayoutTransfer {
public:
LayoutTransfer() = default;
@ -43,7 +42,6 @@ class LayoutTransfer {
private:
virtual Status CheckValidTransfer() = 0;
};
} // namespace parallel
} // namespace mindspore

View File

@ -26,7 +26,6 @@
namespace mindspore {
namespace parallel {
Status Map::Init(const std::vector<int32_t>& array) {
Status status = Array::Init(array);
if (status != Status::SUCCESS) {

View File

@ -46,7 +46,6 @@ class Map : public Array {
private:
bool IsValidMap();
};
} // namespace parallel
} // namespace mindspore

View File

@ -21,7 +21,6 @@
namespace mindspore {
namespace parallel {
Status RedistributionLayoutTransfer::CheckValidTransfer() { return Status::SUCCESS; }
/*
@ -66,6 +65,5 @@ std::shared_ptr<ReshapeLayoutTransfer> RedistributionLayoutTransfer::UnifyDevice
}
return unified_device_arrangement_ptr->UnifyDeviceArrangementAndTensorShape();
}
} // namespace parallel
} // namespace mindspore

View File

@ -24,7 +24,6 @@
namespace mindspore {
namespace parallel {
class RedistributionLayoutTransfer : public LayoutTransfer {
public:
RedistributionLayoutTransfer() = default;
@ -35,7 +34,6 @@ class RedistributionLayoutTransfer : public LayoutTransfer {
Status CheckValidTransfer() override;
std::shared_ptr<ReshapeLayoutTransfer> UnifyDeviceArrangement() const;
};
} // namespace parallel
} // namespace mindspore

View File

@ -22,7 +22,6 @@
namespace mindspore {
namespace parallel {
Status RedistributionOperatorInfer::Init(const TensorLayout& tensor_layout, const Map& out_tensor_map,
RankList dev_list) {
in_tensor_map_ = tensor_layout.tensor_map();
@ -273,6 +272,5 @@ Status RedistributionOperatorInfer::TransferConcatByAxis(Args args) {
}
return Status::SUCCESS;
}
} // namespace parallel
} // namespace mindspore

View File

@ -28,7 +28,6 @@
#include "utils/convert_utils.h"
namespace mindspore {
namespace parallel {
using DeviceArrangement = std::vector<int32_t>;
using TensorMap = std::vector<int32_t>;
using TensorShape = std::vector<int32_t>;
@ -69,7 +68,6 @@ class RedistributionOperatorInfer {
RankList dev_list_;
bool construct_op_flag_;
};
} // namespace parallel
} // namespace mindspore

View File

@ -20,7 +20,6 @@
namespace mindspore {
namespace parallel {
Status ReshapeLayoutTransfer::CheckValidTransfer() {
if (!IsSameDeviceArrangement()) {
return Status::FAILED;

View File

@ -23,7 +23,6 @@
namespace mindspore {
namespace parallel {
class ReshapeLayoutTransfer : public LayoutTransfer {
public:
ReshapeLayoutTransfer() = default;
@ -43,7 +42,6 @@ class ReshapeLayoutTransfer : public LayoutTransfer {
bool FromTensorShapeCanBeExpandByTo() const;
bool ToTensorShapeCanBeExpandByFrom() const;
};
} // namespace parallel
} // namespace mindspore

View File

@ -21,7 +21,6 @@
namespace mindspore {
namespace parallel {
/*
* example:
* shape = [2, 8, 32]
@ -260,6 +259,5 @@ Status ExpandShape(const std::vector<int32_t>& in, const std::vector<int32_t>& e
}
return status;
}
} // namespace parallel
} // namespace mindspore

View File

@ -27,7 +27,6 @@
namespace mindspore {
namespace parallel {
/*
* compute the accumulating product of all the values in shape from left to right,
* the accumulating results are saved in shape_accum from left to right
@ -167,7 +166,6 @@ Status ExpandAccumulateProduct(const std::vector<int64_t>& in_accum_reverse,
* out = [2, 4, 2, 4, 8]
*/
Status ExpandShape(const std::vector<int32_t>& in, const std::vector<int32_t>& expand, std::vector<int32_t>* out);
} // namespace parallel
} // namespace mindspore

View File

@ -28,7 +28,6 @@
namespace mindspore {
namespace parallel {
using Shapes = std::vector<Shape>;
class TensorInfo {
@ -55,7 +54,6 @@ class TensorInfo {
// reduce method's reduce dim
std::vector<int32_t> reduce_dim_;
};
} // namespace parallel
} // namespace mindspore

View File

@ -27,7 +27,6 @@
namespace mindspore {
namespace parallel {
std::string TensorLayout::ToString() const { return StandardToString() + OriginToString(); }
std::string TensorLayout::StandardToString() const {
@ -337,7 +336,7 @@ Status TensorLayout::UpdateTensorMap(uint32_t index, int32_t value) {
MS_LOG(ERROR) << "Index is out of the size of the tensor map!";
return Status::FAILED;
}
Shape shape = tensor_map_.array();
auto shape = tensor_map_.array();
shape[index] = value;
if (tensor_map_.Init(shape) == Status::FAILED) {
MS_LOG(ERROR) << "Update tensor map failed!";

View File

@ -30,7 +30,6 @@
namespace mindspore {
namespace parallel {
class TensorLayout {
public:
TensorLayout() = default;
@ -94,7 +93,6 @@ class TensorLayout {
Map tensor_map_;
Arrangement tensor_shape_;
};
} // namespace parallel
} // namespace mindspore

View File

@ -24,7 +24,6 @@
namespace mindspore {
namespace parallel {
Status TensorRedistribution::Init(const TensorLayout& from, const TensorLayout& to, const RankList& dev_list) {
from_origin_ = from;
to_origin_ = to;

View File

@ -33,7 +33,6 @@
namespace mindspore {
namespace parallel {
class TensorRedistribution {
public:
explicit TensorRedistribution(bool construct_op_flag = true, bool keep_reshape = false)
@ -83,7 +82,6 @@ class TensorRedistribution {
bool construct_op_flag_;
bool keep_reshape_;
};
} // namespace parallel
} // namespace mindspore