!17028 clear warnings on master
From: @huaweib Reviewed-by: @jjfeing,@kisnwang Signed-off-by: @kisnwang
This commit is contained in:
commit
aa49147f31
|
@ -64,16 +64,10 @@ bool SetIOIputSize(const std::shared_ptr<AnfNode> &anf_node, const size_t &input
|
|||
}
|
||||
} else {
|
||||
auto type_ptr = TypeIdToType(AnfAlgo::GetInputDeviceDataType(anf_node, i));
|
||||
MS_EXCEPTION_IF_NULL(type_ptr);
|
||||
int64_t size_i = 1;
|
||||
for (size_t j = 0; j < shape_i.size(); j++) {
|
||||
size_i = LongMulWithOverflowCheck(size_i, static_cast<int>(shape_i[j]));
|
||||
}
|
||||
size_t type_byte = GetTypeByte(type_ptr);
|
||||
if (type_byte == 0) {
|
||||
if (!GetShapeSize(shape_i, type_ptr, &size_i)) {
|
||||
return false;
|
||||
}
|
||||
size_i = LongMulWithOverflowCheck(size_i, SizeToInt(type_byte));
|
||||
input_size_list->push_back(LongToSize(size_i));
|
||||
}
|
||||
}
|
||||
|
@ -98,16 +92,10 @@ bool SetIOSize(const std::shared_ptr<AnfNode> &anf_node, const std::shared_ptr<A
|
|||
for (size_t i = 0; i < output_num; i++) {
|
||||
std::vector<size_t> shape_i = AnfAlgo::GetOutputDeviceShape(anf_node, i);
|
||||
TypePtr type_ptr = TypeIdToType(AnfAlgo::GetOutputDeviceDataType(anf_node, i));
|
||||
MS_EXCEPTION_IF_NULL(type_ptr);
|
||||
int64_t size_i = 1;
|
||||
for (size_t j = 0; j < shape_i.size(); j++) {
|
||||
size_i = LongMulWithOverflowCheck(size_i, static_cast<int>(shape_i[j]));
|
||||
}
|
||||
size_t type_byte = GetTypeByte(type_ptr);
|
||||
if (type_byte == 0) {
|
||||
if (!GetShapeSize(shape_i, type_ptr, &size_i)) {
|
||||
return false;
|
||||
}
|
||||
size_i = LongMulWithOverflowCheck(size_i, SizeToInt(type_byte));
|
||||
output_size_list.push_back(LongToSize(size_i));
|
||||
}
|
||||
kernel_mod_ptr->SetOutputSizeList(output_size_list);
|
||||
|
|
|
@ -848,5 +848,17 @@ void ComputeInterpolationWeights(const size_t out_size, const size_t in_size, co
|
|||
}
|
||||
}
|
||||
|
||||
bool GetShapeSize(const std::vector<size_t> &shape, const TypePtr &type_ptr, int64_t *size_i) {
|
||||
MS_EXCEPTION_IF_NULL(type_ptr);
|
||||
size_t type_byte = GetTypeByte(type_ptr);
|
||||
if (type_byte == 0) {
|
||||
return false;
|
||||
}
|
||||
for (size_t j = 0; j < shape.size(); j++) {
|
||||
size_i[0] = LongMulWithOverflowCheck(size_i[0], static_cast<int>(shape[j]));
|
||||
}
|
||||
size_i[0] = LongMulWithOverflowCheck(size_i[0], SizeToInt(type_byte));
|
||||
return true;
|
||||
}
|
||||
} // namespace kernel
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -80,6 +80,7 @@ TypeId DtypeToTypeId(const std::string &dtypes);
|
|||
std::string Dtype2ShortType(const std::string &dtypes);
|
||||
std::string TypeId2String(TypeId type_id, bool unknown_as_default = false);
|
||||
size_t GetDtypeNbyte(const std::string &dtypes);
|
||||
bool GetShapeSize(const std::vector<size_t> &shape, const TypePtr &type_ptr, int64_t *size_i);
|
||||
bool ParseMetadata(const CNodePtr &kernel_node, const std::shared_ptr<const OpInfo> &op_info_ptr, Processor processor,
|
||||
std::vector<std::shared_ptr<KernelBuildInfo>> *const kernel_info_list);
|
||||
void SaveJsonInfo(const std::string &json_name, const std::string &info, const std::string &base_path = kCceKernelMeta);
|
||||
|
|
|
@ -19,9 +19,8 @@
|
|||
#include "utils/ms_context.h"
|
||||
namespace mindspore {
|
||||
namespace kernel {
|
||||
bool HcomReceiveKernel::Launch(const std::vector<AddressPtr> & /*inputs*/,
|
||||
const std::vector<AddressPtr> & /*workspace*/,
|
||||
const std::vector<AddressPtr> & /*outputs*/, void * /*stream_ptr*/) {
|
||||
bool HcomReceiveKernel::Launch(const std::vector<AddressPtr> &, const std::vector<AddressPtr> &,
|
||||
const std::vector<AddressPtr> &, void *) {
|
||||
MS_LOG(INFO) << "HcomReceive launch";
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -20,8 +20,8 @@
|
|||
|
||||
namespace mindspore {
|
||||
namespace kernel {
|
||||
bool HcomSendKernel::Launch(const std::vector<AddressPtr> & /*inputs*/, const std::vector<AddressPtr> & /*workspace*/,
|
||||
const std::vector<AddressPtr> & /*outputs*/, void * /*stream_ptr*/) {
|
||||
bool HcomSendKernel::Launch(const std::vector<AddressPtr> &, const std::vector<AddressPtr> &,
|
||||
const std::vector<AddressPtr> &, void *) {
|
||||
MS_LOG(INFO) << "HcomSend launch";
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -113,6 +113,7 @@ bool HcomUtil::GetHcomCount(const AnfNodePtr &anf_node, const vector<HcclDataTyp
|
|||
size_t input_size;
|
||||
uint32_t type_size = 4;
|
||||
size_t size = AnfAlgo::GetInputTensorNum(anf_node);
|
||||
auto cnode = anf_node->cast<CNodePtr>();
|
||||
if (AnfAlgo::GetCNodeName(anf_node) == kReceiveOpName) {
|
||||
size = AnfAlgo::GetOutputTensorNum(anf_node);
|
||||
}
|
||||
|
@ -128,7 +129,6 @@ bool HcomUtil::GetHcomCount(const AnfNodePtr &anf_node, const vector<HcclDataTyp
|
|||
|
||||
if (AnfAlgo::GetCNodeName(anf_node) == kReduceScatterOpName) {
|
||||
int64_t rank_size;
|
||||
auto cnode = anf_node->cast<CNodePtr>();
|
||||
auto primitive = AnfAlgo::GetCNodePrimitive(anf_node);
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
if (primitive->GetAttr("rank_size") != nullptr) {
|
||||
|
@ -145,7 +145,6 @@ bool HcomUtil::GetHcomCount(const AnfNodePtr &anf_node, const vector<HcclDataTyp
|
|||
total_size = total_size + block_size;
|
||||
} else {
|
||||
if (AnfAlgo::GetCNodeName(anf_node) == kAllGatherOpName) {
|
||||
auto cnode = anf_node->cast<CNodePtr>();
|
||||
if (AnfAlgo::HasNodeAttr(kAttrFusion, cnode) && AnfAlgo::GetNodeAttr<int64_t>(anf_node, kAttrFusion) &&
|
||||
AnfAlgo::GetInputTensorNum(anf_node) > 1) {
|
||||
block_size = (input_size + align_size - 1 + filled_size) / align_size * align_size;
|
||||
|
@ -177,8 +176,7 @@ bool HcomUtil::GetHcomOperationType(const AnfNodePtr &anf_node, HcclReduceOp *op
|
|||
MS_LOG(ERROR) << "Get HCOM_ATTR_REDUCE_TYPE fail, not support!";
|
||||
return false;
|
||||
}
|
||||
auto hcom_op_type_get = GetValue<const char *>(primitive->GetAttr("op"));
|
||||
string hcom_op_type(hcom_op_type_get);
|
||||
auto hcom_op_type = GetValue<std::string>(primitive->GetAttr("op"));
|
||||
if (hcom_op_type == "min") {
|
||||
*op_type = HCCL_REDUCE_MIN;
|
||||
} else if (hcom_op_type == "max") {
|
||||
|
|
|
@ -22,12 +22,13 @@
|
|||
#include <utility>
|
||||
#include "runtime/mem.h"
|
||||
#include "utils/ms_context.h"
|
||||
#include "backend/kernel_compiler/common_utils.h"
|
||||
#include "runtime/device/kernel_runtime.h"
|
||||
#include "runtime/device/ascend/executor/host_dynamic_kernel.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace kernel {
|
||||
void HostKernelFactory::Registe(const std::string &name, HostKernelCreater &&fun) {
|
||||
void HostKernelFactory::Register(const std::string &name, HostKernelCreater &&fun) {
|
||||
hostKernelMap_.emplace(name, std::move(fun));
|
||||
}
|
||||
|
||||
|
@ -56,16 +57,10 @@ bool HostKernelMod::Init(const AnfNodePtr &anf_node) {
|
|||
for (size_t i = 0; i < input_num; i++) {
|
||||
std::vector<size_t> shape_i = AnfAlgo::GetInputDeviceShape(anf_node, i);
|
||||
TypePtr type_ptr = TypeIdToType(AnfAlgo::GetInputDeviceDataType(anf_node, i));
|
||||
MS_EXCEPTION_IF_NULL(type_ptr);
|
||||
int64_t size_i = 1;
|
||||
for (size_t j = 0; j < shape_i.size(); j++) {
|
||||
size_i = LongMulWithOverflowCheck(size_i, static_cast<int>(shape_i[j]));
|
||||
}
|
||||
size_t type_byte = GetTypeByte(type_ptr);
|
||||
if (type_byte == 0) {
|
||||
if (!GetShapeSize(shape_i, type_ptr, &size_i)) {
|
||||
return false;
|
||||
}
|
||||
size_i = LongMulWithOverflowCheck(size_i, SizeToInt(type_byte));
|
||||
input_size_list_.push_back(LongToSize(size_i));
|
||||
}
|
||||
|
||||
|
@ -74,14 +69,9 @@ bool HostKernelMod::Init(const AnfNodePtr &anf_node) {
|
|||
TypePtr type_ptr = TypeIdToType(AnfAlgo::GetOutputDeviceDataType(anf_node, i));
|
||||
MS_EXCEPTION_IF_NULL(type_ptr);
|
||||
int64_t size_i = 1;
|
||||
for (size_t j = 0; j < shape_i.size(); j++) {
|
||||
size_i = LongMulWithOverflowCheck(size_i, static_cast<int>(shape_i[j]));
|
||||
}
|
||||
size_t type_byte = GetTypeByte(type_ptr);
|
||||
if (type_byte == 0) {
|
||||
if (!GetShapeSize(shape_i, type_ptr, &size_i)) {
|
||||
return false;
|
||||
}
|
||||
size_i = LongMulWithOverflowCheck(size_i, SizeToInt(type_byte));
|
||||
output_size_list_.push_back(LongToSize(size_i));
|
||||
}
|
||||
return true;
|
||||
|
|
|
@ -55,7 +55,7 @@ class HostKernelFactory {
|
|||
|
||||
public:
|
||||
static HostKernelFactory &Get();
|
||||
void Registe(const string &name, HostKernelCreater &&fun);
|
||||
void Register(const string &name, HostKernelCreater &&fun);
|
||||
static std::shared_ptr<HostKernelMod> Get(const string &name);
|
||||
|
||||
private:
|
||||
|
@ -65,7 +65,7 @@ class HostKernelFactory {
|
|||
class _HostKernelRegister {
|
||||
public:
|
||||
_HostKernelRegister(const string &name, HostKernelCreater &&fun) {
|
||||
HostKernelFactory::Get().Registe(name, std::move(fun));
|
||||
HostKernelFactory::Get().Register(name, std::move(fun));
|
||||
}
|
||||
~_HostKernelRegister() = default;
|
||||
};
|
||||
|
|
|
@ -106,7 +106,7 @@ std::vector<size_t> TransShapeToSizet(const abstract::ShapePtr &shape) {
|
|||
return shape_size_t;
|
||||
}
|
||||
|
||||
enum ShapeType { kMaxShape, kMinShape };
|
||||
enum class ShapeType { kMaxShape, kMinShape };
|
||||
|
||||
void GetRealOutputRecursively(const AnfNodePtr &node, size_t output_index,
|
||||
std::vector<session::KernelWithIndex> *inputs) {
|
||||
|
@ -1102,19 +1102,19 @@ void AnfRuntimeAlgorithm::SetOutputInferTypeAndShape(const std::vector<TypeId> &
|
|||
MS_LOG(EXCEPTION) << "Types size " << types.size() << "should be same with shapes size " << shapes.size()
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
}
|
||||
auto abstract_ptr = node_ptr->abstract();
|
||||
if (shapes.empty()) {
|
||||
node->set_abstract(std::make_shared<abstract::AbstractNone>());
|
||||
} else if (shapes.size() == 1) {
|
||||
// single output handle
|
||||
ShapeVector shape_int;
|
||||
auto abstract_ptr = node_ptr->abstract();
|
||||
abstract::AbstractTensorPtr abstract = nullptr;
|
||||
if (abstract_ptr != nullptr) {
|
||||
auto max_shape = GetOutputMaxShape(node_ptr, 0);
|
||||
auto min_shape = GetOutputMinShape(node_ptr, 0);
|
||||
auto max_shape0 = GetOutputMaxShape(node_ptr, 0);
|
||||
auto min_shape0 = GetOutputMinShape(node_ptr, 0);
|
||||
std::transform(shapes[0].begin(), shapes[0].end(), std::back_inserter(shape_int), SizeToLong);
|
||||
abstract = std::make_shared<AbstractTensor>(TypeIdToType(types[0]),
|
||||
std::make_shared<abstract::Shape>(shape_int, min_shape, max_shape));
|
||||
std::make_shared<abstract::Shape>(shape_int, min_shape0, max_shape0));
|
||||
} else {
|
||||
abstract = std::make_shared<AbstractTensor>(TypeIdToType(types[0]), shape_int);
|
||||
}
|
||||
|
@ -1124,7 +1124,6 @@ void AnfRuntimeAlgorithm::SetOutputInferTypeAndShape(const std::vector<TypeId> &
|
|||
std::vector<AbstractBasePtr> abstract_list;
|
||||
for (size_t i = 0; i < types.size(); ++i) {
|
||||
ShapeVector shape_int;
|
||||
auto abstract_ptr = node_ptr->abstract();
|
||||
abstract::AbstractTensorPtr abstract = nullptr;
|
||||
if (abstract_ptr != nullptr) {
|
||||
auto max_shape = GetOutputMaxShape(node_ptr, i);
|
||||
|
@ -1800,7 +1799,7 @@ std::vector<int64_t> GetShapeFromSequeueShape(const abstract::SequeueShapePtr &s
|
|||
MS_EXCEPTION_IF_NULL(shape);
|
||||
if (shape->isa<abstract::Shape>()) {
|
||||
auto shape_ptr = shape->cast<abstract::ShapePtr>();
|
||||
if (type == kMaxShape) {
|
||||
if (type == ShapeType::kMaxShape) {
|
||||
return shape_ptr->max_shape().empty() ? shape_ptr->shape() : shape_ptr->max_shape();
|
||||
} else {
|
||||
return shape_ptr->min_shape().empty() ? shape_ptr->shape() : shape_ptr->min_shape();
|
||||
|
@ -1828,8 +1827,8 @@ std::vector<int64_t> AnfRuntimeAlgorithm::GetOutputMaxShape(const AnfNodePtr &an
|
|||
auto shape_ptr = shape->cast<abstract::ShapePtr>();
|
||||
return shape_ptr->max_shape().empty() ? shape_ptr->shape() : shape_ptr->max_shape();
|
||||
} else if (shape->isa<abstract::SequeueShape>()) {
|
||||
auto shape_ptr = shape->cast<abstract::SequeueShapePtr>();
|
||||
return GetShapeFromSequeueShape(shape_ptr, index, kMaxShape);
|
||||
auto sequeue_shape_ptr = shape->cast<abstract::SequeueShapePtr>();
|
||||
return GetShapeFromSequeueShape(sequeue_shape_ptr, index, ShapeType::kMaxShape);
|
||||
} else if (shape->isa<abstract::NoShape>()) {
|
||||
return {};
|
||||
} else {
|
||||
|
@ -1846,8 +1845,8 @@ std::vector<int64_t> AnfRuntimeAlgorithm::GetOutputMinShape(const AnfNodePtr &an
|
|||
auto shape_ptr = shape->cast<abstract::ShapePtr>();
|
||||
return shape_ptr->min_shape().empty() ? shape_ptr->shape() : shape_ptr->min_shape();
|
||||
} else if (shape->isa<abstract::SequeueShape>()) {
|
||||
auto shape_ptr = shape->cast<abstract::SequeueShapePtr>();
|
||||
return GetShapeFromSequeueShape(shape_ptr, index, kMinShape);
|
||||
auto sequeue_shape_ptr = shape->cast<abstract::SequeueShapePtr>();
|
||||
return GetShapeFromSequeueShape(sequeue_shape_ptr, index, ShapeType::kMinShape);
|
||||
} else if (shape->isa<abstract::NoShape>()) {
|
||||
return {};
|
||||
} else {
|
||||
|
|
Loading…
Reference in New Issue