!5055 prepare to support int64
Merge pull request !5055 from lirongzhen1/int64
This commit is contained in:
commit
5b1cf18cb9
|
@ -371,9 +371,9 @@ bool IsNeedPadding(const std::string &format, const size_t shape_size) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<int> GetRuntimePaddingShape(const AnfNodePtr &node, size_t index) {
|
ShapeVector GetRuntimePaddingShape(const AnfNodePtr &node, size_t index) {
|
||||||
MS_EXCEPTION_IF_NULL(node);
|
MS_EXCEPTION_IF_NULL(node);
|
||||||
std::vector<int> shape;
|
ShapeVector shape;
|
||||||
std::vector<size_t> host_shape;
|
std::vector<size_t> host_shape;
|
||||||
if (node->isa<ValueNode>()) {
|
if (node->isa<ValueNode>()) {
|
||||||
auto value_node = node->cast<ValueNodePtr>();
|
auto value_node = node->cast<ValueNodePtr>();
|
||||||
|
|
|
@ -26,6 +26,7 @@
|
||||||
#include "ir/dtype.h"
|
#include "ir/dtype.h"
|
||||||
#include "backend/kernel_compiler/kernel.h"
|
#include "backend/kernel_compiler/kernel.h"
|
||||||
#include "ir/dtype/type.h"
|
#include "ir/dtype/type.h"
|
||||||
|
#include "utils/shape_utils.h"
|
||||||
|
|
||||||
namespace mindspore {
|
namespace mindspore {
|
||||||
namespace trans {
|
namespace trans {
|
||||||
|
@ -52,7 +53,7 @@ size_t ShapeSize(const std::vector<size_t> &shape);
|
||||||
size_t CubeSizeByType(const TypeId data_type);
|
size_t CubeSizeByType(const TypeId data_type);
|
||||||
|
|
||||||
std::vector<size_t> PaddingShapeTo4d(const std::vector<size_t> &shape, const std::vector<Axis> &padding_axis = {});
|
std::vector<size_t> PaddingShapeTo4d(const std::vector<size_t> &shape, const std::vector<Axis> &padding_axis = {});
|
||||||
std::vector<int> GetRuntimePaddingShape(const AnfNodePtr &node, size_t index);
|
ShapeVector GetRuntimePaddingShape(const AnfNodePtr &node, size_t index);
|
||||||
bool IsNeedPadding(const std::string &format, const size_t shape_size);
|
bool IsNeedPadding(const std::string &format, const size_t shape_size);
|
||||||
std::vector<size_t> TransShapeToDevice(const std::vector<size_t> &shape, const std::string &format);
|
std::vector<size_t> TransShapeToDevice(const std::vector<size_t> &shape, const std::string &format);
|
||||||
bool TransDataType(const TypeIdArgs &args, void *result);
|
bool TransDataType(const TypeIdArgs &args, void *result);
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
#include "frontend/parallel/ps/util.h"
|
#include "frontend/parallel/ps/util.h"
|
||||||
#include "frontend/parallel/ps/common.h"
|
#include "frontend/parallel/ps/common.h"
|
||||||
#include "frontend/parallel/ps/worker_proxy.h"
|
#include "frontend/parallel/ps/worker_proxy.h"
|
||||||
|
#include "utils/shape_utils.h"
|
||||||
|
|
||||||
namespace mindspore {
|
namespace mindspore {
|
||||||
namespace parallel {
|
namespace parallel {
|
||||||
|
@ -41,15 +42,15 @@ class Worker {
|
||||||
}
|
}
|
||||||
|
|
||||||
void Run();
|
void Run();
|
||||||
void Push(const std::vector<size_t> &keys, std::vector<uintptr_t> addrs, const std::vector<int> &sizes);
|
void Push(const std::vector<size_t> &keys, std::vector<uintptr_t> addrs, const ShapeVector &sizes);
|
||||||
void Pull(const size_t key, void *dev_addr, const size_t size);
|
void Pull(const size_t key, void *dev_addr, const size_t size);
|
||||||
size_t SetParamKey(const std::string ¶m_name);
|
size_t SetParamKey(const std::string ¶m_name);
|
||||||
void SetParamInitInServer(const std::string ¶m_name, bool init_in_server);
|
void SetParamInitInServer(const std::string ¶m_name, bool init_in_server);
|
||||||
bool GetParamInitInServer(const std::string ¶m_name);
|
bool GetParamInitInServer(const std::string ¶m_name);
|
||||||
void SetKeyOptimId(size_t key, const std::string &optimizer_name);
|
void SetKeyOptimId(size_t key, const std::string &optimizer_name);
|
||||||
void SetOptimInputShapes(size_t key, const std::vector<int> &shape);
|
void SetOptimInputShapes(size_t key, const ShapeVector &shape);
|
||||||
void AddEmbeddingTable(const ::ps::Key &key, const size_t &row_count);
|
void AddEmbeddingTable(const ::ps::Key &key, const size_t &row_count);
|
||||||
void InitPSEmbeddingTable(const std::vector<size_t> &keys, std::vector<size_t> shapes, const std::vector<int> &sizes);
|
void InitPSEmbeddingTable(const std::vector<size_t> &keys, std::vector<size_t> shapes, const ShapeVector &sizes);
|
||||||
void InitPSParamAndOptim(const std::string ¶m_name, tensor::TensorPtr tensor);
|
void InitPSParamAndOptim(const std::string ¶m_name, tensor::TensorPtr tensor);
|
||||||
void DoPSEmbeddingLookup(const ::ps::SArray<::ps::Key> &keys, const ::ps::SArray<int> &lookup_ids,
|
void DoPSEmbeddingLookup(const ::ps::SArray<::ps::Key> &keys, const ::ps::SArray<int> &lookup_ids,
|
||||||
const ::ps::SArray<int> &lens, ::ps::SArray<T> *lookup_result, int cmd);
|
const ::ps::SArray<int> &lens, ::ps::SArray<T> *lookup_result, int cmd);
|
||||||
|
@ -75,7 +76,7 @@ class Worker {
|
||||||
std::map<std::string, size_t> param_to_key_;
|
std::map<std::string, size_t> param_to_key_;
|
||||||
std::map<size_t, bool> init_keys_;
|
std::map<size_t, bool> init_keys_;
|
||||||
std::map<size_t, int> key_to_optimId_;
|
std::map<size_t, int> key_to_optimId_;
|
||||||
std::map<size_t, std::vector<std::vector<int>>> key_to_optim_shapes_;
|
std::map<size_t, std::vector<ShapeVector>> key_to_optim_shapes_;
|
||||||
std::map<std::string, bool> param_to_init_in_server_;
|
std::map<std::string, bool> param_to_init_in_server_;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -94,7 +95,7 @@ void Worker<T>::Run() {
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
void Worker<T>::Push(const std::vector<size_t> &keys, std::vector<uintptr_t> addrs, const std::vector<int> &sizes) {
|
void Worker<T>::Push(const std::vector<size_t> &keys, std::vector<uintptr_t> addrs, const ShapeVector &sizes) {
|
||||||
size_t total_size = 0;
|
size_t total_size = 0;
|
||||||
for (auto size : sizes) {
|
for (auto size : sizes) {
|
||||||
total_size += size;
|
total_size += size;
|
||||||
|
@ -154,7 +155,7 @@ void Worker<T>::InitPSParamData(const std::vector<size_t> &keys, void *origin_ad
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
void Worker<T>::SetOptimInputShapes(size_t key, const std::vector<int> &shape) {
|
void Worker<T>::SetOptimInputShapes(size_t key, const ShapeVector &shape) {
|
||||||
if (key_to_optim_shapes_.find(key) == key_to_optim_shapes_.end()) {
|
if (key_to_optim_shapes_.find(key) == key_to_optim_shapes_.end()) {
|
||||||
key_to_optim_shapes_[key] = {shape};
|
key_to_optim_shapes_[key] = {shape};
|
||||||
} else {
|
} else {
|
||||||
|
@ -167,7 +168,7 @@ void Worker<T>::InitPSOptimInputShapes(const size_t key) {
|
||||||
::ps::SArray<::ps::Key> keys;
|
::ps::SArray<::ps::Key> keys;
|
||||||
::ps::SArray<int> shape_len;
|
::ps::SArray<int> shape_len;
|
||||||
::ps::SArray<T> all_shape;
|
::ps::SArray<T> all_shape;
|
||||||
std::vector<std::vector<int>> shapes = key_to_optim_shapes_[key];
|
std::vector<ShapeVector> shapes = key_to_optim_shapes_[key];
|
||||||
for (auto shape : shapes) {
|
for (auto shape : shapes) {
|
||||||
keys.push_back(key);
|
keys.push_back(key);
|
||||||
if (shape.size() == 0) {
|
if (shape.size() == 0) {
|
||||||
|
@ -255,7 +256,7 @@ void Worker<T>::InitPSOptimId(const size_t param_key) {
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
void Worker<T>::InitPSEmbeddingTable(const std::vector<size_t> &keys, std::vector<size_t> shapes,
|
void Worker<T>::InitPSEmbeddingTable(const std::vector<size_t> &keys, std::vector<size_t> shapes,
|
||||||
const std::vector<int> &sizes) {
|
const ShapeVector &sizes) {
|
||||||
bool has_init = IsKeyInit(keys[0]);
|
bool has_init = IsKeyInit(keys[0]);
|
||||||
if (has_init) {
|
if (has_init) {
|
||||||
MS_LOG(DEBUG) << "The key embedding table of key " << keys[0] << " is initialized.";
|
MS_LOG(DEBUG) << "The key embedding table of key " << keys[0] << " is initialized.";
|
||||||
|
@ -272,7 +273,7 @@ template <typename T>
|
||||||
void Worker<T>::InitPSParamAndOptim(const std::string ¶m_name, tensor::TensorPtr tensor) {
|
void Worker<T>::InitPSParamAndOptim(const std::string ¶m_name, tensor::TensorPtr tensor) {
|
||||||
void *param_data = tensor->data_c();
|
void *param_data = tensor->data_c();
|
||||||
size_t param_size = LongToSize(tensor->data().nbytes());
|
size_t param_size = LongToSize(tensor->data().nbytes());
|
||||||
std::vector<int> param_shape = tensor->shape_c();
|
ShapeVector param_shape = tensor->shape_c();
|
||||||
|
|
||||||
size_t param_key = GetParamKey(param_name);
|
size_t param_key = GetParamKey(param_name);
|
||||||
if (param_key == kInvalidKey) {
|
if (param_key == kInvalidKey) {
|
||||||
|
@ -280,7 +281,7 @@ void Worker<T>::InitPSParamAndOptim(const std::string ¶m_name, tensor::Tenso
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
bool init_in_server = false;
|
bool init_in_server = false;
|
||||||
std::vector<int> shape_init_in_server = {1};
|
ShapeVector shape_init_in_server = {1};
|
||||||
if (param_shape == shape_init_in_server) {
|
if (param_shape == shape_init_in_server) {
|
||||||
init_in_server = true;
|
init_in_server = true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -42,7 +42,7 @@
|
||||||
#include "frontend/optimizer/py_pass_manager.h"
|
#include "frontend/optimizer/py_pass_manager.h"
|
||||||
#include "pybind_api/pybind_patch.h"
|
#include "pybind_api/pybind_patch.h"
|
||||||
#include "backend/kernel_compiler/cpu/random_op_cpu_kernel.h"
|
#include "backend/kernel_compiler/cpu/random_op_cpu_kernel.h"
|
||||||
|
#include "utils/shape_utils.h"
|
||||||
#if (ENABLE_CPU && (ENABLE_D || ENABLE_GPU))
|
#if (ENABLE_CPU && (ENABLE_D || ENABLE_GPU))
|
||||||
#include "frontend/parallel/ps/common.h"
|
#include "frontend/parallel/ps/common.h"
|
||||||
#include "frontend/parallel/ps/util.h"
|
#include "frontend/parallel/ps/util.h"
|
||||||
|
@ -136,10 +136,10 @@ py::bool_ VerifyInputSignature(const py::list input_signature, const py::tuple i
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
std::shared_ptr<MetaTensor> sig = input_signature[count].cast<std::shared_ptr<MetaTensor>>();
|
std::shared_ptr<MetaTensor> sig = input_signature[count].cast<std::shared_ptr<MetaTensor>>();
|
||||||
std::vector<int> sig_shape = sig->shape();
|
ShapeVector sig_shape = sig->shape();
|
||||||
TypePtr sig_type = sig->Dtype();
|
TypePtr sig_type = sig->Dtype();
|
||||||
|
|
||||||
std::vector<int> tensor_shape = m_tensor->shape_c();
|
ShapeVector tensor_shape = m_tensor->shape_c();
|
||||||
if (tensor_shape != sig_shape) {
|
if (tensor_shape != sig_shape) {
|
||||||
MS_LOG(ERROR) << "Python input shape is incompatible with input_signature";
|
MS_LOG(ERROR) << "Python input shape is incompatible with input_signature";
|
||||||
return false;
|
return false;
|
||||||
|
@ -849,13 +849,13 @@ bool InitExecDatasetVm(const std::string &queue_name, int64_t size, int64_t batc
|
||||||
const std::vector<TypePtr> &types, const std::vector<std::vector<int64_t>> &shapes,
|
const std::vector<TypePtr> &types, const std::vector<std::vector<int64_t>> &shapes,
|
||||||
const std::vector<int64_t> &input_indexes, bool need_run) {
|
const std::vector<int64_t> &input_indexes, bool need_run) {
|
||||||
MS_LOG(INFO) << "Start InitDataSet Entry";
|
MS_LOG(INFO) << "Start InitDataSet Entry";
|
||||||
std::vector<int> int_input_indexes;
|
ShapeVector int_input_indexes;
|
||||||
(void)std::transform(input_indexes.begin(), input_indexes.end(), std::back_inserter(int_input_indexes),
|
(void)std::transform(input_indexes.begin(), input_indexes.end(), std::back_inserter(int_input_indexes),
|
||||||
[](int64_t item) { return static_cast<int>(item); });
|
[](int64_t item) { return static_cast<int>(item); });
|
||||||
std::vector<std::vector<int>> int_shapes;
|
std::vector<ShapeVector> int_shapes;
|
||||||
(void)std::transform(shapes.begin(), shapes.end(), std::back_inserter(int_shapes),
|
(void)std::transform(shapes.begin(), shapes.end(), std::back_inserter(int_shapes),
|
||||||
[](const std::vector<int64_t> &item) {
|
[](const std::vector<int64_t> &item) {
|
||||||
std::vector<int> vector_item;
|
ShapeVector vector_item;
|
||||||
(void)std::transform(item.begin(), item.end(), std::back_inserter(vector_item),
|
(void)std::transform(item.begin(), item.end(), std::back_inserter(vector_item),
|
||||||
[](int64_t inner_item) { return static_cast<int>(inner_item); });
|
[](int64_t inner_item) { return static_cast<int>(inner_item); });
|
||||||
return vector_item;
|
return vector_item;
|
||||||
|
|
|
@ -39,6 +39,7 @@
|
||||||
#include "abstract/primitive_infer_map.h"
|
#include "abstract/primitive_infer_map.h"
|
||||||
#include "abstract/param_validator.h"
|
#include "abstract/param_validator.h"
|
||||||
#include "utils/ms_utils.h"
|
#include "utils/ms_utils.h"
|
||||||
|
#include "utils/shape_utils.h"
|
||||||
|
|
||||||
namespace mindspore {
|
namespace mindspore {
|
||||||
namespace abstract {
|
namespace abstract {
|
||||||
|
@ -309,13 +310,13 @@ py::dict ConvertAbstractToPython(const AbstractBasePtr &abs_base) {
|
||||||
dic["dtype"] = arg->BuildType();
|
dic["dtype"] = arg->BuildType();
|
||||||
dic["value"] = BuildValue(arg->BuildValue());
|
dic["value"] = BuildValue(arg->BuildValue());
|
||||||
} else if (abs_base->isa<AbstractScalar>() || abs_base->isa<AbstractType>() || abs_base->isa<AbstractRefKey>()) {
|
} else if (abs_base->isa<AbstractScalar>() || abs_base->isa<AbstractType>() || abs_base->isa<AbstractRefKey>()) {
|
||||||
std::vector<int> shape;
|
ShapeVector shape;
|
||||||
dic["shape"] = shape;
|
dic["shape"] = shape;
|
||||||
dic["dtype"] = abs_base->BuildType();
|
dic["dtype"] = abs_base->BuildType();
|
||||||
dic["value"] = BuildValue(abs_base->BuildValue());
|
dic["value"] = BuildValue(abs_base->BuildValue());
|
||||||
} else if (abs_base->isa<AbstractSlice>()) {
|
} else if (abs_base->isa<AbstractSlice>()) {
|
||||||
auto arg_slice = dyn_cast<AbstractSlice>(abs_base);
|
auto arg_slice = dyn_cast<AbstractSlice>(abs_base);
|
||||||
std::vector<int> shape;
|
ShapeVector shape;
|
||||||
dic["shape"] = shape;
|
dic["shape"] = shape;
|
||||||
dic["dtype"] = arg_slice->BuildType();
|
dic["dtype"] = arg_slice->BuildType();
|
||||||
dic["value"] = BuildValue(arg_slice->BuildValue());
|
dic["value"] = BuildValue(arg_slice->BuildValue());
|
||||||
|
|
|
@ -275,7 +275,7 @@ void AscendDeviceAddress::SyncStream() const {
|
||||||
MS_LOG(INFO) << "Finish!";
|
MS_LOG(INFO) << "Finish!";
|
||||||
}
|
}
|
||||||
|
|
||||||
bool AscendDeviceAddress::SyncDeviceToHost(const std::vector<int> &shape, size_t size, mindspore::TypeId type,
|
bool AscendDeviceAddress::SyncDeviceToHost(const ShapeVector &shape, size_t size, mindspore::TypeId type,
|
||||||
void *host_ptr) const {
|
void *host_ptr) const {
|
||||||
MS_LOG(INFO) << "SyncDeviceToHost, Device(format:" << format_ << ", type_id:" << TypeIdLabel(type_id_)
|
MS_LOG(INFO) << "SyncDeviceToHost, Device(format:" << format_ << ", type_id:" << TypeIdLabel(type_id_)
|
||||||
<< ", size:" << size_ << "), Host(type_id:" << TypeIdLabel(type) << ", size:" << size << ")";
|
<< ", size:" << size_ << "), Host(type_id:" << TypeIdLabel(type) << ", size:" << size << ")";
|
||||||
|
@ -462,7 +462,7 @@ std::vector<size_t> AscendDeviceAddress::GetDeviceShape(std::vector<size_t> *hos
|
||||||
return device_shape;
|
return device_shape;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool AscendDeviceAddress::SyncDeviceToHostAndConvertFormat(const std::vector<int> &shape, size_t size,
|
bool AscendDeviceAddress::SyncDeviceToHostAndConvertFormat(const ShapeVector &shape, size_t size,
|
||||||
mindspore::TypeId type, void *host_ptr) const {
|
mindspore::TypeId type, void *host_ptr) const {
|
||||||
MS_LOG(INFO) << "SyncDeviceToHostAndConvertFormat, Device(format:" << format_ << ", type_id:" << TypeIdLabel(type_id_)
|
MS_LOG(INFO) << "SyncDeviceToHostAndConvertFormat, Device(format:" << format_ << ", type_id:" << TypeIdLabel(type_id_)
|
||||||
<< ", size:" << size_ << "), Host(type_id:" << TypeIdLabel(type) << ", size:" << size << ")";
|
<< ", size:" << size_ << "), Host(type_id:" << TypeIdLabel(type) << ", size:" << size << ")";
|
||||||
|
@ -513,7 +513,7 @@ bool AscendDeviceAddress::SyncDeviceToHostAndConvertFormat(const std::vector<int
|
||||||
return sync_ok;
|
return sync_ok;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool AscendDeviceAddress::SyncHostToDevice(const std::vector<int> &shape, size_t size, mindspore::TypeId type,
|
bool AscendDeviceAddress::SyncHostToDevice(const ShapeVector &shape, size_t size, mindspore::TypeId type,
|
||||||
const void *host_ptr) const {
|
const void *host_ptr) const {
|
||||||
MS_LOG(INFO) << "SyncHostToDevice, Device(format:" << format_ << ", type_id:" << TypeIdLabel(type_id_)
|
MS_LOG(INFO) << "SyncHostToDevice, Device(format:" << format_ << ", type_id:" << TypeIdLabel(type_id_)
|
||||||
<< ", size:" << size_ << "), Host(type_id:" << TypeIdLabel(type) << ", size:" << size << ")";
|
<< ", size:" << size_ << "), Host(type_id:" << TypeIdLabel(type) << ", size:" << size << ")";
|
||||||
|
@ -557,7 +557,7 @@ bool AscendDeviceAddress::SyncHostToDevice(const std::vector<int> &shape, size_t
|
||||||
return sync_ok;
|
return sync_ok;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool AscendDeviceAddress::ConvertFormatAndSyncHostToDevice(const std::vector<int> &shape, size_t size,
|
bool AscendDeviceAddress::ConvertFormatAndSyncHostToDevice(const ShapeVector &shape, size_t size,
|
||||||
mindspore::TypeId type, const void *host_ptr) const {
|
mindspore::TypeId type, const void *host_ptr) const {
|
||||||
bool sync_ok = false;
|
bool sync_ok = false;
|
||||||
MS_LOG(INFO) << "ConvertFormatAndSyncHostToDevice, Device(format:" << format_ << ", type_id:" << TypeIdLabel(type_id_)
|
MS_LOG(INFO) << "ConvertFormatAndSyncHostToDevice, Device(format:" << format_ << ", type_id:" << TypeIdLabel(type_id_)
|
||||||
|
@ -622,7 +622,7 @@ AscendDeviceAddress::~AscendDeviceAddress() {
|
||||||
|
|
||||||
#ifdef ENABLE_DUMP_E2E
|
#ifdef ENABLE_DUMP_E2E
|
||||||
bool AscendDeviceAddress::DumpMemToFile(bool trans_flag, const std::string &filepath, const std::string &host_fmt,
|
bool AscendDeviceAddress::DumpMemToFile(bool trans_flag, const std::string &filepath, const std::string &host_fmt,
|
||||||
const std::vector<int> &host_shape, TypeId host_type) const {
|
const ShapeVector &host_shape, TypeId host_type) const {
|
||||||
bool ret = false;
|
bool ret = false;
|
||||||
if (filepath.empty()) {
|
if (filepath.empty()) {
|
||||||
MS_LOG(ERROR) << "Dump file path is null!";
|
MS_LOG(ERROR) << "Dump file path is null!";
|
||||||
|
@ -666,8 +666,8 @@ bool AscendDeviceAddress::DumpMemToFile(bool trans_flag, const std::string &file
|
||||||
|
|
||||||
#ifdef ENABLE_DEBUGGER
|
#ifdef ENABLE_DEBUGGER
|
||||||
bool AscendDeviceAddress::LoadMemToHost(bool trans_flag, const std::string &tensor_name, int execution_order,
|
bool AscendDeviceAddress::LoadMemToHost(bool trans_flag, const std::string &tensor_name, int execution_order,
|
||||||
const std::string &host_fmt, const std::vector<int> &host_shape,
|
const std::string &host_fmt, const ShapeVector &host_shape, TypeId host_type,
|
||||||
TypeId host_type, size_t slot, Debugger *debugger, bool keep_prev) const {
|
size_t slot, Debugger *debugger, bool keep_prev) const {
|
||||||
bool ret = false;
|
bool ret = false;
|
||||||
DebugServices *debug_services = debugger->debug_services();
|
DebugServices *debug_services = debugger->debug_services();
|
||||||
TensorLoader *tensor_loader = debug_services->tensor_loader();
|
TensorLoader *tensor_loader = debug_services->tensor_loader();
|
||||||
|
|
|
@ -25,6 +25,7 @@
|
||||||
#include "runtime/device/ascend/ascend_memory_pool.h"
|
#include "runtime/device/ascend/ascend_memory_pool.h"
|
||||||
#include "ir/dtype.h"
|
#include "ir/dtype.h"
|
||||||
#include "backend/kernel_compiler/kernel.h"
|
#include "backend/kernel_compiler/kernel.h"
|
||||||
|
#include "utils/shape_utils.h"
|
||||||
|
|
||||||
namespace mindspore {
|
namespace mindspore {
|
||||||
#ifdef ENABLE_DEBUGGER
|
#ifdef ENABLE_DEBUGGER
|
||||||
|
@ -38,23 +39,22 @@ class AscendDeviceAddress : public DeviceAddress {
|
||||||
explicit AscendDeviceAddress(void *ptr, size_t size, const std::string &format, TypeId type_id)
|
explicit AscendDeviceAddress(void *ptr, size_t size, const std::string &format, TypeId type_id)
|
||||||
: DeviceAddress(ptr, size, format, type_id) {}
|
: DeviceAddress(ptr, size, format, type_id) {}
|
||||||
~AscendDeviceAddress() override;
|
~AscendDeviceAddress() override;
|
||||||
bool SyncDeviceToHost(const std::vector<int> &shape, size_t size, TypeId type, void *host_ptr) const override;
|
bool SyncDeviceToHost(const ShapeVector &shape, size_t size, TypeId type, void *host_ptr) const override;
|
||||||
bool SyncHostToDevice(const std::vector<int> &shape, size_t size, TypeId type, const void *host_ptr) const override;
|
bool SyncHostToDevice(const ShapeVector &shape, size_t size, TypeId type, const void *host_ptr) const override;
|
||||||
DeviceAddressType DeviceType() const override { return DeviceAddressType::kAscend; }
|
DeviceAddressType DeviceType() const override { return DeviceAddressType::kAscend; }
|
||||||
#ifdef ENABLE_DUMP_E2E
|
#ifdef ENABLE_DUMP_E2E
|
||||||
bool DumpMemToFile(bool dump_mode, const std::string &filepath, const std::string &host_fmt,
|
bool DumpMemToFile(bool dump_mode, const std::string &filepath, const std::string &host_fmt,
|
||||||
const std::vector<int> &host_shape, TypeId host_type) const;
|
const ShapeVector &host_shape, TypeId host_type) const;
|
||||||
#endif
|
#endif
|
||||||
#ifdef ENABLE_DEBUGGER
|
#ifdef ENABLE_DEBUGGER
|
||||||
bool LoadMemToHost(bool dump_mode, const std::string &tensor_name, int execution_order, const std::string &host_fmt,
|
bool LoadMemToHost(bool dump_mode, const std::string &tensor_name, int execution_order, const std::string &host_fmt,
|
||||||
const std::vector<int> &host_shape, TypeId host_type, size_t slot, Debugger *debugger,
|
const ShapeVector &host_shape, TypeId host_type, size_t slot, Debugger *debugger,
|
||||||
bool keep_prev) const;
|
bool keep_prev) const;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
private:
|
private:
|
||||||
bool SyncDeviceToHostAndConvertFormat(const std::vector<int> &shape, size_t size, TypeId type, void *host_ptr) const;
|
bool SyncDeviceToHostAndConvertFormat(const ShapeVector &shape, size_t size, TypeId type, void *host_ptr) const;
|
||||||
bool ConvertFormatAndSyncHostToDevice(const std::vector<int> &shape, size_t size, TypeId type,
|
bool ConvertFormatAndSyncHostToDevice(const ShapeVector &shape, size_t size, TypeId type, const void *host_ptr) const;
|
||||||
const void *host_ptr) const;
|
|
||||||
bool SyncDeviceToHostAndConvertFormatBasedOnTransData(const std::vector<size_t> &host_shape,
|
bool SyncDeviceToHostAndConvertFormatBasedOnTransData(const std::vector<size_t> &host_shape,
|
||||||
const std::vector<size_t> &device_shape, size_t size,
|
const std::vector<size_t> &device_shape, size_t size,
|
||||||
mindspore::TypeId type, void *host_ptr) const;
|
mindspore::TypeId type, void *host_ptr) const;
|
||||||
|
|
|
@ -39,6 +39,7 @@
|
||||||
#include "backend/kernel_compiler/tbe/tbe_utils.h"
|
#include "backend/kernel_compiler/tbe/tbe_utils.h"
|
||||||
#include "runtime/device/ascend/ascend_memory_manager.h"
|
#include "runtime/device/ascend/ascend_memory_manager.h"
|
||||||
#include "debug/tensor_load.h"
|
#include "debug/tensor_load.h"
|
||||||
|
#include "utils/shape_utils.h"
|
||||||
#ifdef MEM_REUSE_DEBUG
|
#ifdef MEM_REUSE_DEBUG
|
||||||
#include "backend/optimizer/mem_reuse/mem_reuse_checker.h"
|
#include "backend/optimizer/mem_reuse/mem_reuse_checker.h"
|
||||||
#endif
|
#endif
|
||||||
|
@ -231,7 +232,7 @@ void DumpOutput(mindspore::session::KernelGraph *graph, const string &dump_path,
|
||||||
auto output_size = AnfAlgo::GetOutputTensorNum(node);
|
auto output_size = AnfAlgo::GetOutputTensorNum(node);
|
||||||
for (size_t j = 0; j < output_size; ++j) {
|
for (size_t j = 0; j < output_size; ++j) {
|
||||||
auto addr = AnfAlgo::GetOutputAddr(node, j);
|
auto addr = AnfAlgo::GetOutputAddr(node, j);
|
||||||
std::vector<int> int_shapes;
|
ShapeVector int_shapes;
|
||||||
if (trans_flag) {
|
if (trans_flag) {
|
||||||
int_shapes = trans::GetRuntimePaddingShape(node, j);
|
int_shapes = trans::GetRuntimePaddingShape(node, j);
|
||||||
} else {
|
} else {
|
||||||
|
@ -266,7 +267,7 @@ void DumpParameters(mindspore::session::KernelGraph *graph, const string &dump_p
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
auto addr = AnfAlgo::GetOutputAddr(item, PRAMATER_OUTPUT_INDEX);
|
auto addr = AnfAlgo::GetOutputAddr(item, PRAMATER_OUTPUT_INDEX);
|
||||||
std::vector<int> int_shapes;
|
ShapeVector int_shapes;
|
||||||
if (trans_flag) {
|
if (trans_flag) {
|
||||||
int_shapes = trans::GetRuntimePaddingShape(item, PRAMATER_OUTPUT_INDEX);
|
int_shapes = trans::GetRuntimePaddingShape(item, PRAMATER_OUTPUT_INDEX);
|
||||||
} else {
|
} else {
|
||||||
|
@ -351,7 +352,7 @@ void LoadOutput(mindspore::session::KernelGraph *graph, Debugger *debugger) {
|
||||||
auto format = kOpFormat_DEFAULT;
|
auto format = kOpFormat_DEFAULT;
|
||||||
string tensor_name = kernel_name + ':' + std::to_string(j);
|
string tensor_name = kernel_name + ':' + std::to_string(j);
|
||||||
auto ascend_addr = dynamic_cast<const mindspore::device::ascend::AscendDeviceAddress *>(addr);
|
auto ascend_addr = dynamic_cast<const mindspore::device::ascend::AscendDeviceAddress *>(addr);
|
||||||
std::vector<int> int_shapes;
|
ShapeVector int_shapes;
|
||||||
if (trans_flag) {
|
if (trans_flag) {
|
||||||
int_shapes = trans::GetRuntimePaddingShape(node, j);
|
int_shapes = trans::GetRuntimePaddingShape(node, j);
|
||||||
} else {
|
} else {
|
||||||
|
@ -387,7 +388,7 @@ void LoadParameters(mindspore::session::KernelGraph *graph, Debugger *debugger)
|
||||||
auto format = kOpFormat_DEFAULT;
|
auto format = kOpFormat_DEFAULT;
|
||||||
string tensor_name = parameter_name + ':' + "0";
|
string tensor_name = parameter_name + ':' + "0";
|
||||||
auto ascend_addr = dynamic_cast<const mindspore::device::ascend::AscendDeviceAddress *>(addr);
|
auto ascend_addr = dynamic_cast<const mindspore::device::ascend::AscendDeviceAddress *>(addr);
|
||||||
std::vector<int> int_shapes;
|
ShapeVector int_shapes;
|
||||||
if (trans_flag) {
|
if (trans_flag) {
|
||||||
int_shapes = trans::GetRuntimePaddingShape(item, PRAMATER_OUTPUT_INDEX);
|
int_shapes = trans::GetRuntimePaddingShape(item, PRAMATER_OUTPUT_INDEX);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -20,8 +20,7 @@
|
||||||
namespace mindspore {
|
namespace mindspore {
|
||||||
namespace device {
|
namespace device {
|
||||||
namespace cpu {
|
namespace cpu {
|
||||||
bool CPUDeviceAddress::SyncDeviceToHost(const std::vector<int> & /*shape*/, size_t size, TypeId type,
|
bool CPUDeviceAddress::SyncDeviceToHost(const ShapeVector & /*shape*/, size_t size, TypeId type, void *host_ptr) const {
|
||||||
void *host_ptr) const {
|
|
||||||
if (ptr_ == nullptr) {
|
if (ptr_ == nullptr) {
|
||||||
MS_LOG(ERROR) << "The pointer ptr_ is null!";
|
MS_LOG(ERROR) << "The pointer ptr_ is null!";
|
||||||
return false;
|
return false;
|
||||||
|
@ -50,7 +49,7 @@ bool CPUDeviceAddress::SyncDeviceToHost(const std::vector<int> & /*shape*/, size
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool CPUDeviceAddress::SyncHostToDevice(const std::vector<int> & /*shape*/, size_t size, TypeId type,
|
bool CPUDeviceAddress::SyncHostToDevice(const ShapeVector & /*shape*/, size_t size, TypeId type,
|
||||||
const void *host_ptr) const {
|
const void *host_ptr) const {
|
||||||
if (host_ptr == ptr_) {
|
if (host_ptr == ptr_) {
|
||||||
MS_LOG(DEBUG) << "host_ptr is equal to ptr_, request ignored.";
|
MS_LOG(DEBUG) << "host_ptr is equal to ptr_, request ignored.";
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include "runtime/device/device_address.h"
|
#include "runtime/device/device_address.h"
|
||||||
|
#include "utils/shape_utils.h"
|
||||||
|
|
||||||
namespace mindspore {
|
namespace mindspore {
|
||||||
namespace device {
|
namespace device {
|
||||||
|
@ -32,8 +33,8 @@ class CPUDeviceAddress : public DeviceAddress {
|
||||||
|
|
||||||
~CPUDeviceAddress() override = default;
|
~CPUDeviceAddress() override = default;
|
||||||
|
|
||||||
bool SyncDeviceToHost(const std::vector<int> &shape, size_t size, TypeId type, void *host_ptr) const override;
|
bool SyncDeviceToHost(const ShapeVector &shape, size_t size, TypeId type, void *host_ptr) const override;
|
||||||
bool SyncHostToDevice(const std::vector<int> &shape, size_t size, TypeId type, const void *host_ptr) const override;
|
bool SyncHostToDevice(const ShapeVector &shape, size_t size, TypeId type, const void *host_ptr) const override;
|
||||||
DeviceAddressType DeviceType() const override { return DeviceAddressType::kCPU; }
|
DeviceAddressType DeviceType() const override { return DeviceAddressType::kCPU; }
|
||||||
};
|
};
|
||||||
} // namespace cpu
|
} // namespace cpu
|
||||||
|
|
|
@ -25,6 +25,7 @@
|
||||||
#include "backend/session/anf_runtime_algorithm.h"
|
#include "backend/session/anf_runtime_algorithm.h"
|
||||||
#include "backend/session/session_basic.h"
|
#include "backend/session/session_basic.h"
|
||||||
#include "frontend/operator/ops.h"
|
#include "frontend/operator/ops.h"
|
||||||
|
#include "utils/shape_utils.h"
|
||||||
|
|
||||||
namespace mindspore {
|
namespace mindspore {
|
||||||
namespace device {
|
namespace device {
|
||||||
|
@ -52,7 +53,7 @@ void CPUKernelRuntime::AssignValueNodeAddress(session::KernelGraph *kernel_graph
|
||||||
}
|
}
|
||||||
auto tensor = node_value->cast<TensorPtr>();
|
auto tensor = node_value->cast<TensorPtr>();
|
||||||
MS_EXCEPTION_IF_NULL(tensor);
|
MS_EXCEPTION_IF_NULL(tensor);
|
||||||
std::vector<int> data_shape = tensor->shape();
|
ShapeVector data_shape = tensor->shape();
|
||||||
size_t tensor_size = std::accumulate(data_shape.begin(), data_shape.end(), type_size, std::multiplies<size_t>());
|
size_t tensor_size = std::accumulate(data_shape.begin(), data_shape.end(), type_size, std::multiplies<size_t>());
|
||||||
DeviceAddressPtr address = CreateDeviceAddress(nullptr, tensor_size, kOpFormat_DEFAULT, kNumberTypeFloat32);
|
DeviceAddressPtr address = CreateDeviceAddress(nullptr, tensor_size, kOpFormat_DEFAULT, kNumberTypeFloat32);
|
||||||
MS_EXCEPTION_IF_NULL(address);
|
MS_EXCEPTION_IF_NULL(address);
|
||||||
|
@ -135,7 +136,7 @@ tensor::TensorPtr CPUKernelRuntime::CreatTensorForOutput(session::KernelGraph *k
|
||||||
tensor::TensorPtr tensor = kernel_graph->GetInternalOutputTensor(node, index);
|
tensor::TensorPtr tensor = kernel_graph->GetInternalOutputTensor(node, index);
|
||||||
if (tensor == nullptr) {
|
if (tensor == nullptr) {
|
||||||
auto shape = AnfAlgo::GetOutputInferShape(node, index);
|
auto shape = AnfAlgo::GetOutputInferShape(node, index);
|
||||||
std::vector<int> temp_shape;
|
ShapeVector temp_shape;
|
||||||
(void)temp_shape.insert(temp_shape.end(), shape.begin(), shape.end());
|
(void)temp_shape.insert(temp_shape.end(), shape.begin(), shape.end());
|
||||||
tensor = std::make_shared<tensor::Tensor>(infer_type_id, temp_shape);
|
tensor = std::make_shared<tensor::Tensor>(infer_type_id, temp_shape);
|
||||||
bool is_internal_output = kernel_graph->IsInternalOutput(node, index);
|
bool is_internal_output = kernel_graph->IsInternalOutput(node, index);
|
||||||
|
@ -149,7 +150,7 @@ tensor::TensorPtr CPUKernelRuntime::CreatTensorForOutput(session::KernelGraph *k
|
||||||
} else {
|
} else {
|
||||||
if (infer_type_id != device_type_id) {
|
if (infer_type_id != device_type_id) {
|
||||||
size_t type_size = GetTypeByte(TypeIdToType(device_type_id));
|
size_t type_size = GetTypeByte(TypeIdToType(device_type_id));
|
||||||
std::vector<int> data_shape = tensor->shape();
|
ShapeVector data_shape = tensor->shape();
|
||||||
size_t tensor_size = std::accumulate(data_shape.begin(), data_shape.end(), type_size, std::multiplies<size_t>());
|
size_t tensor_size = std::accumulate(data_shape.begin(), data_shape.end(), type_size, std::multiplies<size_t>());
|
||||||
address->ptr_ = resource_manager_.MemMalloc(tensor_size);
|
address->ptr_ = resource_manager_.MemMalloc(tensor_size);
|
||||||
need_sync_outputs->emplace_back(tensor);
|
need_sync_outputs->emplace_back(tensor);
|
||||||
|
@ -224,7 +225,7 @@ void CPUKernelRuntime::BindInputOutput(session::KernelGraph *kernel_graph, const
|
||||||
tensor->data_type() == kNumberTypeInt32) {
|
tensor->data_type() == kNumberTypeInt32) {
|
||||||
address->ptr_ = tensor->data_c();
|
address->ptr_ = tensor->data_c();
|
||||||
} else {
|
} else {
|
||||||
std::vector<int> data_shape = tensor->shape();
|
ShapeVector data_shape = tensor->shape();
|
||||||
size_t tensor_size =
|
size_t tensor_size =
|
||||||
std::accumulate(data_shape.begin(), data_shape.end(), sizeof(float), std::multiplies<size_t>());
|
std::accumulate(data_shape.begin(), data_shape.end(), sizeof(float), std::multiplies<size_t>());
|
||||||
address->ptr_ = resource_manager_.MemMalloc(tensor_size);
|
address->ptr_ = resource_manager_.MemMalloc(tensor_size);
|
||||||
|
|
|
@ -22,6 +22,7 @@
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include "ir/dtype.h"
|
#include "ir/dtype.h"
|
||||||
#include "ir/device_sync.h"
|
#include "ir/device_sync.h"
|
||||||
|
#include "utils/shape_utils.h"
|
||||||
|
|
||||||
namespace mindspore {
|
namespace mindspore {
|
||||||
namespace device {
|
namespace device {
|
||||||
|
@ -60,7 +61,7 @@ class DeviceAddress : public mindspore::DeviceSync {
|
||||||
size_t GetSize() const { return size_; }
|
size_t GetSize() const { return size_; }
|
||||||
std::string format() const { return format_; }
|
std::string format() const { return format_; }
|
||||||
TypeId type_id() const { return type_id_; }
|
TypeId type_id() const { return type_id_; }
|
||||||
void set_host_shape(const std::vector<int> &shape) { host_shape_ = shape; }
|
void set_host_shape(const ShapeVector &shape) { host_shape_ = shape; }
|
||||||
virtual void set_status(DeviceAddressStatus status) {}
|
virtual void set_status(DeviceAddressStatus status) {}
|
||||||
virtual DeviceAddressStatus status() const { return DeviceAddressStatus::kInDevice; }
|
virtual DeviceAddressStatus status() const { return DeviceAddressStatus::kInDevice; }
|
||||||
virtual DeviceAddressType DeviceType() const { return DeviceAddressType::kUnknown; }
|
virtual DeviceAddressType DeviceType() const { return DeviceAddressType::kUnknown; }
|
||||||
|
@ -77,7 +78,7 @@ class DeviceAddress : public mindspore::DeviceSync {
|
||||||
TypeId type_id_{kNumberTypeFloat16};
|
TypeId type_id_{kNumberTypeFloat16};
|
||||||
bool from_mem_pool_{false};
|
bool from_mem_pool_{false};
|
||||||
uint8_t *communication_ptr_{nullptr};
|
uint8_t *communication_ptr_{nullptr};
|
||||||
std::vector<int> host_shape_{};
|
ShapeVector host_shape_{};
|
||||||
friend class KernelRuntime;
|
friend class KernelRuntime;
|
||||||
friend class MemoryManager;
|
friend class MemoryManager;
|
||||||
friend class mindspore::device::ascend::tasksink::TaskGenerator;
|
friend class mindspore::device::ascend::tasksink::TaskGenerator;
|
||||||
|
|
|
@ -30,7 +30,7 @@
|
||||||
namespace mindspore {
|
namespace mindspore {
|
||||||
namespace device {
|
namespace device {
|
||||||
namespace gpu {
|
namespace gpu {
|
||||||
bool GPUDeviceAddress::SyncDeviceToHost(const std::vector<int> &, size_t size, TypeId, void *host_ptr) const {
|
bool GPUDeviceAddress::SyncDeviceToHost(const ShapeVector &, size_t size, TypeId, void *host_ptr) const {
|
||||||
MS_EXCEPTION_IF_NULL(host_ptr);
|
MS_EXCEPTION_IF_NULL(host_ptr);
|
||||||
bool need_sync = (size != 0) && (size_ != 0) && (size <= size_);
|
bool need_sync = (size != 0) && (size_ != 0) && (size <= size_);
|
||||||
if (!need_sync) {
|
if (!need_sync) {
|
||||||
|
@ -50,7 +50,7 @@ bool GPUDeviceAddress::SyncDeviceToHost(const std::vector<int> &, size_t size, T
|
||||||
return GPUDeviceManager::GetInstance().CopyDeviceMemToHost(host_ptr, ptr_, size);
|
return GPUDeviceManager::GetInstance().CopyDeviceMemToHost(host_ptr, ptr_, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool GPUDeviceAddress::SyncHostToDevice(const std::vector<int> &, size_t size, TypeId, const void *host_ptr) const {
|
bool GPUDeviceAddress::SyncHostToDevice(const ShapeVector &, size_t size, TypeId, const void *host_ptr) const {
|
||||||
MS_EXCEPTION_IF_NULL(host_ptr);
|
MS_EXCEPTION_IF_NULL(host_ptr);
|
||||||
bool need_sync = (size != 0) && (size_ != 0) && (size <= size_);
|
bool need_sync = (size != 0) && (size_ != 0) && (size <= size_);
|
||||||
if (!need_sync) {
|
if (!need_sync) {
|
||||||
|
@ -80,8 +80,8 @@ GPUDeviceAddress::~GPUDeviceAddress() {
|
||||||
}
|
}
|
||||||
#ifdef ENABLE_DEBUGGER
|
#ifdef ENABLE_DEBUGGER
|
||||||
bool GPUDeviceAddress::LoadMemToHost(const std::string &tensor_name, int execution_order, const std::string &host_fmt,
|
bool GPUDeviceAddress::LoadMemToHost(const std::string &tensor_name, int execution_order, const std::string &host_fmt,
|
||||||
const std::vector<int> &host_shape, TypeId host_type, size_t slot,
|
const ShapeVector &host_shape, TypeId host_type, size_t slot, Debugger *debugger,
|
||||||
Debugger *debugger, bool keep_prev) const {
|
bool keep_prev) const {
|
||||||
bool ret = false;
|
bool ret = false;
|
||||||
if (size_ == 0) {
|
if (size_ == 0) {
|
||||||
return true;
|
return true;
|
||||||
|
|
|
@ -21,6 +21,8 @@
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include "runtime/device/device_address.h"
|
#include "runtime/device/device_address.h"
|
||||||
|
|
||||||
|
using ShapeVecotr = std::vector<int>;
|
||||||
|
|
||||||
namespace mindspore {
|
namespace mindspore {
|
||||||
#ifdef ENABLE_DEBUGGER
|
#ifdef ENABLE_DEBUGGER
|
||||||
class Debugger;
|
class Debugger;
|
||||||
|
@ -34,15 +36,15 @@ class GPUDeviceAddress : public DeviceAddress {
|
||||||
: DeviceAddress(ptr, size, format, type_id) {}
|
: DeviceAddress(ptr, size, format, type_id) {}
|
||||||
~GPUDeviceAddress() override;
|
~GPUDeviceAddress() override;
|
||||||
|
|
||||||
bool SyncDeviceToHost(const std::vector<int> &shape, size_t size, TypeId type, void *host_ptr) const override;
|
bool SyncDeviceToHost(const ShapeVector &shape, size_t size, TypeId type, void *host_ptr) const override;
|
||||||
bool SyncHostToDevice(const std::vector<int> &shape, size_t size, TypeId type, const void *host_ptr) const override;
|
bool SyncHostToDevice(const ShapeVector &shape, size_t size, TypeId type, const void *host_ptr) const override;
|
||||||
void set_status(DeviceAddressStatus status) { status_ = status; }
|
void set_status(DeviceAddressStatus status) { status_ = status; }
|
||||||
DeviceAddressStatus status() const { return status_; }
|
DeviceAddressStatus status() const { return status_; }
|
||||||
DeviceAddressType DeviceType() const override { return DeviceAddressType::kGPU; }
|
DeviceAddressType DeviceType() const override { return DeviceAddressType::kGPU; }
|
||||||
|
|
||||||
#ifdef ENABLE_DEBUGGER
|
#ifdef ENABLE_DEBUGGER
|
||||||
bool LoadMemToHost(const std::string &tensor_name, int execution_order, const std::string &host_fmt,
|
bool LoadMemToHost(const std::string &tensor_name, int execution_order, const std::string &host_fmt,
|
||||||
const std::vector<int> &host_shape, TypeId host_type, size_t slot, Debugger *debugger,
|
const ShapeVector &host_shape, TypeId host_type, size_t slot, Debugger *debugger,
|
||||||
bool keep_prev) const;
|
bool keep_prev) const;
|
||||||
#endif
|
#endif
|
||||||
private:
|
private:
|
||||||
|
|
|
@ -32,6 +32,7 @@
|
||||||
#include "common/trans.h"
|
#include "common/trans.h"
|
||||||
#include "ir/dtype.h"
|
#include "ir/dtype.h"
|
||||||
#include "profiler/device/gpu/gpu_profiling.h"
|
#include "profiler/device/gpu/gpu_profiling.h"
|
||||||
|
#include "utils/shape_utils.h"
|
||||||
#ifdef ENABLE_DEBUGGER
|
#ifdef ENABLE_DEBUGGER
|
||||||
#include "debug/debug_services.h"
|
#include "debug/debug_services.h"
|
||||||
#endif
|
#endif
|
||||||
|
@ -107,7 +108,7 @@ void DumpOutput(mindspore::session::KernelGraph *graph, const string &dump_path,
|
||||||
auto addr = AnfAlgo::GetOutputAddr(node, j);
|
auto addr = AnfAlgo::GetOutputAddr(node, j);
|
||||||
TypeId addr_type_id = addr->type_id();
|
TypeId addr_type_id = addr->type_id();
|
||||||
std::string addr_format = addr->format();
|
std::string addr_format = addr->format();
|
||||||
std::vector<int> int_shapes;
|
ShapeVector int_shapes;
|
||||||
if (trans_flag) {
|
if (trans_flag) {
|
||||||
int_shapes = trans::GetRuntimePaddingShape(node, j);
|
int_shapes = trans::GetRuntimePaddingShape(node, j);
|
||||||
} else {
|
} else {
|
||||||
|
@ -153,7 +154,7 @@ void DumpParameters(mindspore::session::KernelGraph *graph, const string &dump_p
|
||||||
auto addr = AnfAlgo::GetOutputAddr(item, PARAMETER_OUTPUT_INDEX);
|
auto addr = AnfAlgo::GetOutputAddr(item, PARAMETER_OUTPUT_INDEX);
|
||||||
TypeId addr_type_id = addr->type_id();
|
TypeId addr_type_id = addr->type_id();
|
||||||
std::string addr_format = addr->format();
|
std::string addr_format = addr->format();
|
||||||
std::vector<int> int_shapes;
|
ShapeVector int_shapes;
|
||||||
if (trans_flag) {
|
if (trans_flag) {
|
||||||
int_shapes = trans::GetRuntimePaddingShape(item, PARAMETER_OUTPUT_INDEX);
|
int_shapes = trans::GetRuntimePaddingShape(item, PARAMETER_OUTPUT_INDEX);
|
||||||
} else {
|
} else {
|
||||||
|
@ -251,7 +252,7 @@ void LoadKernelData(Debugger *debugger, const CNodePtr &kernel,
|
||||||
auto format = kOpFormat_DEFAULT;
|
auto format = kOpFormat_DEFAULT;
|
||||||
auto gpu_addr = std::make_unique<GPUDeviceAddress>(addr->addr, addr->size, format, type);
|
auto gpu_addr = std::make_unique<GPUDeviceAddress>(addr->addr, addr->size, format, type);
|
||||||
string input_tensor_name = input_kernel_name + ':' + "0";
|
string input_tensor_name = input_kernel_name + ':' + "0";
|
||||||
std::vector<int> int_shapes;
|
ShapeVector int_shapes;
|
||||||
auto shape = AnfAlgo::GetOutputDeviceShape(input_kernel, PARAMETER_OUTPUT_INDEX);
|
auto shape = AnfAlgo::GetOutputDeviceShape(input_kernel, PARAMETER_OUTPUT_INDEX);
|
||||||
(void)std::transform(shape.begin(), shape.end(), std::back_inserter(int_shapes),
|
(void)std::transform(shape.begin(), shape.end(), std::back_inserter(int_shapes),
|
||||||
[](size_t inner_item) { return SizeToInt(inner_item); });
|
[](size_t inner_item) { return SizeToInt(inner_item); });
|
||||||
|
@ -270,7 +271,7 @@ void LoadKernelData(Debugger *debugger, const CNodePtr &kernel,
|
||||||
auto format = kOpFormat_DEFAULT;
|
auto format = kOpFormat_DEFAULT;
|
||||||
auto gpu_addr = std::make_unique<GPUDeviceAddress>(addr->addr, addr->size, format, type);
|
auto gpu_addr = std::make_unique<GPUDeviceAddress>(addr->addr, addr->size, format, type);
|
||||||
string tensor_name = kernel_name + ':' + std::to_string(j);
|
string tensor_name = kernel_name + ':' + std::to_string(j);
|
||||||
std::vector<int> int_shapes;
|
ShapeVector int_shapes;
|
||||||
auto shape = AnfAlgo::GetOutputDeviceShape(kernel, j);
|
auto shape = AnfAlgo::GetOutputDeviceShape(kernel, j);
|
||||||
(void)std::transform(shape.begin(), shape.end(), std::back_inserter(int_shapes),
|
(void)std::transform(shape.begin(), shape.end(), std::back_inserter(int_shapes),
|
||||||
[](size_t inner_item) { return SizeToInt(inner_item); });
|
[](size_t inner_item) { return SizeToInt(inner_item); });
|
||||||
|
@ -310,7 +311,7 @@ void LoadParameters(const session::KernelGraph *graph, Debugger *debugger, bool
|
||||||
auto format = kOpFormat_DEFAULT;
|
auto format = kOpFormat_DEFAULT;
|
||||||
string tensor_name = parameter_name + ':' + "0";
|
string tensor_name = parameter_name + ':' + "0";
|
||||||
auto gpu_addr = dynamic_cast<const mindspore::device::gpu::GPUDeviceAddress *>(addr);
|
auto gpu_addr = dynamic_cast<const mindspore::device::gpu::GPUDeviceAddress *>(addr);
|
||||||
std::vector<int> int_shapes;
|
ShapeVector int_shapes;
|
||||||
auto shape = AnfAlgo::GetOutputDeviceShape(item, PARAMETER_OUTPUT_INDEX);
|
auto shape = AnfAlgo::GetOutputDeviceShape(item, PARAMETER_OUTPUT_INDEX);
|
||||||
(void)std::transform(shape.begin(), shape.end(), std::back_inserter(int_shapes),
|
(void)std::transform(shape.begin(), shape.end(), std::back_inserter(int_shapes),
|
||||||
[](size_t inner_item) { return SizeToInt(inner_item); });
|
[](size_t inner_item) { return SizeToInt(inner_item); });
|
||||||
|
|
|
@ -31,6 +31,7 @@
|
||||||
#include "runtime/device/ascend/profiling/profiling_manager.h"
|
#include "runtime/device/ascend/profiling/profiling_manager.h"
|
||||||
#include "runtime/base.h"
|
#include "runtime/base.h"
|
||||||
#include "runtime/device/ascend/ascend_stream_assign.h"
|
#include "runtime/device/ascend/ascend_stream_assign.h"
|
||||||
|
#include "utils/shape_utils.h"
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
constexpr auto kProfilingGraphId = "PROFILING_GRAPH_ID";
|
constexpr auto kProfilingGraphId = "PROFILING_GRAPH_ID";
|
||||||
|
@ -320,7 +321,7 @@ void KernelAdjust::CreateSwitchOpParameters(const std::shared_ptr<session::Kerne
|
||||||
std::map<std::string, mindspore::ParameterPtr> *switch_loop_input) {
|
std::map<std::string, mindspore::ParameterPtr> *switch_loop_input) {
|
||||||
MS_EXCEPTION_IF_NULL(kernel_graph_ptr);
|
MS_EXCEPTION_IF_NULL(kernel_graph_ptr);
|
||||||
MS_EXCEPTION_IF_NULL(switch_loop_input);
|
MS_EXCEPTION_IF_NULL(switch_loop_input);
|
||||||
std::vector<int> shp = {1};
|
ShapeVector shp = {1};
|
||||||
tensor::TensorPtr tensor_ptr = std::make_shared<tensor::Tensor>(kInt32->type_id(), shp);
|
tensor::TensorPtr tensor_ptr = std::make_shared<tensor::Tensor>(kInt32->type_id(), shp);
|
||||||
MS_EXCEPTION_IF_NULL(tensor_ptr);
|
MS_EXCEPTION_IF_NULL(tensor_ptr);
|
||||||
mindspore::abstract::AbstractBasePtr paremeter_abstract_ptr = tensor_ptr->ToAbstract();
|
mindspore::abstract::AbstractBasePtr paremeter_abstract_ptr = tensor_ptr->ToAbstract();
|
||||||
|
@ -559,7 +560,7 @@ void KernelAdjust::LoadSwitchInputs(std::vector<tensor::TensorPtr> *inputs) {
|
||||||
MS_LOG(INFO) << "---------------- LoadSwitchInputs---";
|
MS_LOG(INFO) << "---------------- LoadSwitchInputs---";
|
||||||
MS_EXCEPTION_IF_NULL(inputs);
|
MS_EXCEPTION_IF_NULL(inputs);
|
||||||
// current loop count
|
// current loop count
|
||||||
std::vector<int> shp = {1};
|
ShapeVector shp = {1};
|
||||||
tensor::TensorPtr cur_loop_count = std::make_shared<tensor::Tensor>(kInt32->type_id(), shp);
|
tensor::TensorPtr cur_loop_count = std::make_shared<tensor::Tensor>(kInt32->type_id(), shp);
|
||||||
MS_EXCEPTION_IF_NULL(cur_loop_count);
|
MS_EXCEPTION_IF_NULL(cur_loop_count);
|
||||||
int32_t *val = nullptr;
|
int32_t *val = nullptr;
|
||||||
|
|
|
@ -27,6 +27,7 @@
|
||||||
#include "backend/session/anf_runtime_algorithm.h"
|
#include "backend/session/anf_runtime_algorithm.h"
|
||||||
#include "backend/optimizer/common/helper.h"
|
#include "backend/optimizer/common/helper.h"
|
||||||
#include "ir/value.h"
|
#include "ir/value.h"
|
||||||
|
#include "utils/shape_utils.h"
|
||||||
using mindspore::kernel::Address;
|
using mindspore::kernel::Address;
|
||||||
using mindspore::kernel::AddressPtr;
|
using mindspore::kernel::AddressPtr;
|
||||||
|
|
||||||
|
@ -681,7 +682,7 @@ void KernelRuntime::AssignStaticMemoryValueNode(session::KernelGraph *graph) {
|
||||||
MS_LOG(EXCEPTION) << "Cannot alloc address when flag is: " << kStaticMem << ", tensor size is: " << tensor_size;
|
MS_LOG(EXCEPTION) << "Cannot alloc address when flag is: " << kStaticMem << ", tensor size is: " << tensor_size;
|
||||||
}
|
}
|
||||||
AnfAlgo::SetOutputAddr(address, 0, value_node.get());
|
AnfAlgo::SetOutputAddr(address, 0, value_node.get());
|
||||||
std::vector<int> shape = {1, SizeToInt(tensor_size)};
|
ShapeVector shape = {1, SizeToInt(tensor_size)};
|
||||||
if (!address->SyncHostToDevice(shape, tensor_size, kNumberTypeUInt8, value.data())) {
|
if (!address->SyncHostToDevice(shape, tensor_size, kNumberTypeUInt8, value.data())) {
|
||||||
MS_LOG(EXCEPTION) << "kValueNode SyncHostToDevice fail!";
|
MS_LOG(EXCEPTION) << "kValueNode SyncHostToDevice fail!";
|
||||||
}
|
}
|
||||||
|
|
|
@ -94,8 +94,8 @@ GeFormat TransformUtil::ConvertFormat(const string &format) {
|
||||||
|
|
||||||
static int64_t IntegerCastFunc(size_t temp) { return static_cast<int64_t>(temp); }
|
static int64_t IntegerCastFunc(size_t temp) { return static_cast<int64_t>(temp); }
|
||||||
|
|
||||||
std::shared_ptr<GeTensorDesc> TransformUtil::GetGeTensorDesc(const std::vector<int> &me_shape,
|
std::shared_ptr<GeTensorDesc> TransformUtil::GetGeTensorDesc(const ShapeVector &me_shape, const MeDataType &me_type,
|
||||||
const MeDataType &me_type, const std::string &format) {
|
const std::string &format) {
|
||||||
// convert me shape to ge shape
|
// convert me shape to ge shape
|
||||||
std::vector<int64_t> ge_shape;
|
std::vector<int64_t> ge_shape;
|
||||||
|
|
||||||
|
@ -196,7 +196,7 @@ GeTensorPtr TransformUtil::ConvertTensor(const MeTensorPtr &tensor, const std::s
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<MeTensorPtr> TransformUtil::ConvertGeTensors(const std::vector<GeTensorPtr> &ge_tensors,
|
std::vector<MeTensorPtr> TransformUtil::ConvertGeTensors(const std::vector<GeTensorPtr> &ge_tensors,
|
||||||
const std::vector<std::vector<int>> &request_dims) {
|
const std::vector<ShapeVector> &request_dims) {
|
||||||
std::vector<MeTensorPtr> outputs;
|
std::vector<MeTensorPtr> outputs;
|
||||||
|
|
||||||
for (size_t index = 0; index < ge_tensors.size(); index++) {
|
for (size_t index = 0; index < ge_tensors.size(); index++) {
|
||||||
|
@ -204,7 +204,7 @@ std::vector<MeTensorPtr> TransformUtil::ConvertGeTensors(const std::vector<GeTen
|
||||||
if (index < request_dims.size()) {
|
if (index < request_dims.size()) {
|
||||||
me_tensor_ptr = ConvertGeTensor(ge_tensors[index], request_dims[index]);
|
me_tensor_ptr = ConvertGeTensor(ge_tensors[index], request_dims[index]);
|
||||||
} else {
|
} else {
|
||||||
std::vector<int> empty_shape;
|
ShapeVector empty_shape;
|
||||||
me_tensor_ptr = ConvertGeTensor(ge_tensors[index], empty_shape);
|
me_tensor_ptr = ConvertGeTensor(ge_tensors[index], empty_shape);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -270,7 +270,7 @@ MeDataType TransformUtil::ConvertGeDataType(const GeDataType &type) {
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
bool IsGeShapeCompatible(const GeShape &ge_shape, const std::vector<int> &request_dims) {
|
bool IsGeShapeCompatible(const GeShape &ge_shape, const ShapeVector &request_dims) {
|
||||||
MS_LOG(INFO) << "GeTensor's shape is " << TransformUtil::PrintVector(ge_shape.GetDims());
|
MS_LOG(INFO) << "GeTensor's shape is " << TransformUtil::PrintVector(ge_shape.GetDims());
|
||||||
MS_LOG(INFO) << "Me request shape is " << TransformUtil::PrintVector(request_dims);
|
MS_LOG(INFO) << "Me request shape is " << TransformUtil::PrintVector(request_dims);
|
||||||
|
|
||||||
|
@ -307,20 +307,20 @@ bool IsGeShapeCompatible(const GeShape &ge_shape, const std::vector<int> &reques
|
||||||
}
|
}
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
GeShape TransformUtil::ConvertMeShape(const std::vector<int> &me_dims) {
|
GeShape TransformUtil::ConvertMeShape(const ShapeVector &me_dims) {
|
||||||
std::vector<int64_t> ge_dims;
|
std::vector<int64_t> ge_dims;
|
||||||
(void)std::copy(me_dims.begin(), me_dims.end(), std::back_inserter(ge_dims));
|
(void)std::copy(me_dims.begin(), me_dims.end(), std::back_inserter(ge_dims));
|
||||||
return GeShape(ge_dims);
|
return GeShape(ge_dims);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<int> TransformUtil::ConvertGeShape(const GeShape &ge_shape) {
|
ShapeVector TransformUtil::ConvertGeShape(const GeShape &ge_shape) {
|
||||||
std::vector<int> me_dims;
|
ShapeVector me_dims;
|
||||||
std::vector<int64_t> ge_dims = ge_shape.GetDims();
|
std::vector<int64_t> ge_dims = ge_shape.GetDims();
|
||||||
(void)std::copy(ge_dims.begin(), ge_dims.end(), std::back_inserter(me_dims));
|
(void)std::copy(ge_dims.begin(), ge_dims.end(), std::back_inserter(me_dims));
|
||||||
return me_dims;
|
return me_dims;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<int> TransformUtil::ConvertGeShape(const GeShape &ge_shape, const std::vector<int> &request_dims) {
|
ShapeVector TransformUtil::ConvertGeShape(const GeShape &ge_shape, const ShapeVector &request_dims) {
|
||||||
vector<int> ret;
|
vector<int> ret;
|
||||||
if (ge_shape.GetDimNum() == 0) {
|
if (ge_shape.GetDimNum() == 0) {
|
||||||
MS_LOG(DEBUG) << "GeTensor's shape is scalar";
|
MS_LOG(DEBUG) << "GeTensor's shape is scalar";
|
||||||
|
@ -336,7 +336,7 @@ std::vector<int> TransformUtil::ConvertGeShape(const GeShape &ge_shape, const st
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
MeTensorPtr TransformUtil::GenerateMeTensor(const GeTensorPtr &ge_tensor, const std::vector<int> &me_dims,
|
MeTensorPtr TransformUtil::GenerateMeTensor(const GeTensorPtr &ge_tensor, const ShapeVector &me_dims,
|
||||||
const TypeId &me_type) {
|
const TypeId &me_type) {
|
||||||
MeTensor me_tensor(me_type, me_dims);
|
MeTensor me_tensor(me_type, me_dims);
|
||||||
|
|
||||||
|
@ -380,7 +380,7 @@ MeTensorPtr TransformUtil::ConvertGeTensor(const GeTensorPtr &ge_tensor) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// if request_dims is empty, use ge tensor's shape,otherwise convert to request shape
|
// if request_dims is empty, use ge tensor's shape,otherwise convert to request shape
|
||||||
MeTensorPtr TransformUtil::ConvertGeTensor(const GeTensorPtr ge_tensor, const std::vector<int> &request_dims) {
|
MeTensorPtr TransformUtil::ConvertGeTensor(const GeTensorPtr ge_tensor, const ShapeVector &request_dims) {
|
||||||
MS_EXCEPTION_IF_NULL(ge_tensor);
|
MS_EXCEPTION_IF_NULL(ge_tensor);
|
||||||
GeShape ge_shape = ge_tensor->GetTensorDesc().GetShape();
|
GeShape ge_shape = ge_tensor->GetTensorDesc().GetShape();
|
||||||
vector<int> me_dims = ConvertGeShape(ge_shape, request_dims);
|
vector<int> me_dims = ConvertGeShape(ge_shape, request_dims);
|
||||||
|
|
|
@ -26,8 +26,8 @@
|
||||||
#include "ir/dtype.h"
|
#include "ir/dtype.h"
|
||||||
#include "ir/tensor.h"
|
#include "ir/tensor.h"
|
||||||
#include "transform/graph_ir/types.h"
|
#include "transform/graph_ir/types.h"
|
||||||
|
|
||||||
#include "graph/tensor.h"
|
#include "graph/tensor.h"
|
||||||
|
#include "utils/shape_utils.h"
|
||||||
|
|
||||||
namespace mindspore {
|
namespace mindspore {
|
||||||
namespace transform {
|
namespace transform {
|
||||||
|
@ -73,7 +73,7 @@ class TransformUtil {
|
||||||
* Return:
|
* Return:
|
||||||
* [shared_ptr<GeTensorDesc>] the shared pointer of ge tensor description
|
* [shared_ptr<GeTensorDesc>] the shared pointer of ge tensor description
|
||||||
* */
|
* */
|
||||||
static std::shared_ptr<GeTensorDesc> GetGeTensorDesc(const std::vector<int> &shape, const MeDataType &me_type,
|
static std::shared_ptr<GeTensorDesc> GetGeTensorDesc(const ShapeVector &shape, const MeDataType &me_type,
|
||||||
const std::string &format);
|
const std::string &format);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -107,20 +107,20 @@ class TransformUtil {
|
||||||
/*
|
/*
|
||||||
* Parameters:
|
* Parameters:
|
||||||
* tensor: [GeTensor] the data tensor in GE
|
* tensor: [GeTensor] the data tensor in GE
|
||||||
* request_dims [std::vector<int>] the output Me tensors must adjust to this shapes
|
* request_dims [ShapeVector] the output Me tensors must adjust to this shapes
|
||||||
* Return:
|
* Return:
|
||||||
* [MeTensor] the data tensor in ME
|
* [MeTensor] the data tensor in ME
|
||||||
* */
|
* */
|
||||||
static MeTensorPtr ConvertGeTensor(GeTensorPtr ge_tensor, const std::vector<int> &request_dims);
|
static MeTensorPtr ConvertGeTensor(GeTensorPtr ge_tensor, const ShapeVector &request_dims);
|
||||||
/*
|
/*
|
||||||
* Parameters:
|
* Parameters:
|
||||||
* ge_tensors: [std::vector<GeTensorPtr>] the data tensor in GE
|
* ge_tensors: [std::vector<GeTensorPtr>] the data tensor in GE
|
||||||
* request_dims [std::vector<std::vector<int>>] the output Me tensors must adjust to this shapes
|
* request_dims [std::vector<ShapeVector>] the output Me tensors must adjust to this shapes
|
||||||
* Return:
|
* Return:
|
||||||
* [std::vector<MeTensorPtr>] the data tensor in ME
|
* [std::vector<MeTensorPtr>] the data tensor in ME
|
||||||
* */
|
* */
|
||||||
static std::vector<MeTensorPtr> ConvertGeTensors(const std::vector<GeTensorPtr> &ge_tensors,
|
static std::vector<MeTensorPtr> ConvertGeTensors(const std::vector<GeTensorPtr> &ge_tensors,
|
||||||
const std::vector<std::vector<int>> &request_dims);
|
const std::vector<ShapeVector> &request_dims);
|
||||||
/*
|
/*
|
||||||
* Parameters:
|
* Parameters:
|
||||||
* ge_tensors: [std::vector<GeTensorPtr>] the data tensor in GE
|
* ge_tensors: [std::vector<GeTensorPtr>] the data tensor in GE
|
||||||
|
@ -131,13 +131,12 @@ class TransformUtil {
|
||||||
/*
|
/*
|
||||||
* Parameters:
|
* Parameters:
|
||||||
* ge_tensor: [GeTensor] the data tensor in GE
|
* ge_tensor: [GeTensor] the data tensor in GE
|
||||||
* me_dims: [std::vector<int>] the shape of created Me tensor
|
* me_dims: [ShapeVector] the shape of created Me tensor
|
||||||
* me_type: [TypeId] the type of created Me tensor
|
* me_type: [TypeId] the type of created Me tensor
|
||||||
* Return:
|
* Return:
|
||||||
* [MeTensor] the data tensor in ME
|
* [MeTensor] the data tensor in ME
|
||||||
* */
|
* */
|
||||||
static MeTensorPtr GenerateMeTensor(const GeTensorPtr &ge_tensor, const std::vector<int> &me_dims,
|
static MeTensorPtr GenerateMeTensor(const GeTensorPtr &ge_tensor, const ShapeVector &me_dims, const TypeId &me_type);
|
||||||
const TypeId &me_type);
|
|
||||||
/*
|
/*
|
||||||
* Parameters:
|
* Parameters:
|
||||||
* type: [GeDataType] the ge tensor data type
|
* type: [GeDataType] the ge tensor data type
|
||||||
|
@ -148,11 +147,11 @@ class TransformUtil {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Parameters:
|
* Parameters:
|
||||||
* me_dims: [std::vector<int>] the me shape
|
* me_dims: [ShapeVector] the me shape
|
||||||
* Return:
|
* Return:
|
||||||
* [GeShape] the ge shape
|
* [GeShape] the ge shape
|
||||||
* */
|
* */
|
||||||
static GeShape ConvertMeShape(const std::vector<int> &me_dims);
|
static GeShape ConvertMeShape(const ShapeVector &me_dims);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Parameters:
|
* Parameters:
|
||||||
|
@ -160,7 +159,7 @@ class TransformUtil {
|
||||||
* Return:
|
* Return:
|
||||||
* [vector<int>] the me shape
|
* [vector<int>] the me shape
|
||||||
* */
|
* */
|
||||||
static std::vector<int> ConvertGeShape(const GeShape &ge_shape);
|
static ShapeVector ConvertGeShape(const GeShape &ge_shape);
|
||||||
|
|
||||||
/* Function:
|
/* Function:
|
||||||
* Convert GeShape to Me request shape, Support pattern:
|
* Convert GeShape to Me request shape, Support pattern:
|
||||||
|
@ -176,11 +175,11 @@ class TransformUtil {
|
||||||
* Return:
|
* Return:
|
||||||
* [vector<int>] the me shape
|
* [vector<int>] the me shape
|
||||||
* */
|
* */
|
||||||
static std::vector<int> ConvertGeShape(const GeShape &ge_shape, const std::vector<int> &request_dims);
|
static ShapeVector ConvertGeShape(const GeShape &ge_shape, const ShapeVector &request_dims);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Parameters:
|
* Parameters:
|
||||||
* vec: [std::vector<int>] the vector to print
|
* vec: [ShapeVector] the vector to print
|
||||||
* Return:
|
* Return:
|
||||||
* [string] value string
|
* [string] value string
|
||||||
* */
|
* */
|
||||||
|
|
|
@ -22,6 +22,7 @@
|
||||||
#include "pipeline/jit/parse/data_converter.h"
|
#include "pipeline/jit/parse/data_converter.h"
|
||||||
#include "pipeline/jit/parse/python_adapter.h"
|
#include "pipeline/jit/parse/python_adapter.h"
|
||||||
#include "utils/visible.h"
|
#include "utils/visible.h"
|
||||||
|
#include "utils/shape_utils.h"
|
||||||
|
|
||||||
namespace mindspore {
|
namespace mindspore {
|
||||||
namespace callbacks {
|
namespace callbacks {
|
||||||
|
@ -36,7 +37,7 @@ using mindspore::transform::Status;
|
||||||
using mindspore::transform::TransformUtil;
|
using mindspore::transform::TransformUtil;
|
||||||
|
|
||||||
bool GetParameterShape(const FuncGraphPtr &graph, const std::string ¶m_name,
|
bool GetParameterShape(const FuncGraphPtr &graph, const std::string ¶m_name,
|
||||||
const std::shared_ptr<std::vector<int>> &shape) {
|
const std::shared_ptr<ShapeVector> &shape) {
|
||||||
if (graph == nullptr) {
|
if (graph == nullptr) {
|
||||||
MS_LOG(ERROR) << "Graph is null, can not get graph parameter";
|
MS_LOG(ERROR) << "Graph is null, can not get graph parameter";
|
||||||
return false;
|
return false;
|
||||||
|
@ -74,7 +75,7 @@ static TensorPtr GetMeTensorTransformed(uint32_t graph_id, const std::string &pa
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::shared_ptr<std::vector<int>> parameter_shape_ptr = std::make_shared<std::vector<int>>();
|
std::shared_ptr<ShapeVector> parameter_shape_ptr = std::make_shared<ShapeVector>();
|
||||||
if (!GetParameterShape(anf_graph, parameter_name, parameter_shape_ptr)) {
|
if (!GetParameterShape(anf_graph, parameter_name, parameter_shape_ptr)) {
|
||||||
MS_LOG(ERROR) << "Can not get parameter shape during callback";
|
MS_LOG(ERROR) << "Can not get parameter shape during callback";
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
@ -133,7 +134,7 @@ static TensorPtr GetMeTensorForSummary(const std::string &name, const std::share
|
||||||
// process the scalar type summary
|
// process the scalar type summary
|
||||||
// Because the ge tensor is dim = 4, so set the (1,1,1,1)-->(1,)
|
// Because the ge tensor is dim = 4, so set the (1,1,1,1)-->(1,)
|
||||||
// We do the (1,) shape is scalar
|
// We do the (1,) shape is scalar
|
||||||
auto shape = std::vector<int>({ONE_SHAPE});
|
auto shape = ShapeVector({ONE_SHAPE});
|
||||||
return TransformUtil::ConvertGeTensor(ge_tensor_ptr, shape);
|
return TransformUtil::ConvertGeTensor(ge_tensor_ptr, shape);
|
||||||
}
|
}
|
||||||
if (tname == "[:Tensor]" || tname == "[:Histogram]") {
|
if (tname == "[:Tensor]" || tname == "[:Histogram]") {
|
||||||
|
|
|
@ -33,6 +33,7 @@
|
||||||
#include "ir/param_info.h"
|
#include "ir/param_info.h"
|
||||||
#include "utils/base_ref_extends.h"
|
#include "utils/base_ref_extends.h"
|
||||||
#include "utils/ms_context.h"
|
#include "utils/ms_context.h"
|
||||||
|
#include "utils/shape_utils.h"
|
||||||
|
|
||||||
namespace mindspore {
|
namespace mindspore {
|
||||||
py::object BuiltinsToPyData(const Any &value);
|
py::object BuiltinsToPyData(const Any &value);
|
||||||
|
@ -394,7 +395,7 @@ py::object VectorRefToPyData(const VectorRef &value_list) {
|
||||||
AbstractBasePtr PyListDtype2AbstractTensor(const py::object &shape_obj, const py::object &type_obj,
|
AbstractBasePtr PyListDtype2AbstractTensor(const py::object &shape_obj, const py::object &type_obj,
|
||||||
const py::object &min_shape, const py::object &max_shape) {
|
const py::object &min_shape, const py::object &max_shape) {
|
||||||
if ((py::isinstance<py::list>(shape_obj) || py::isinstance<py::tuple>(shape_obj)) && py::isinstance<Type>(type_obj)) {
|
if ((py::isinstance<py::list>(shape_obj) || py::isinstance<py::tuple>(shape_obj)) && py::isinstance<Type>(type_obj)) {
|
||||||
auto ret_vec = shape_obj.cast<std::vector<int>>();
|
auto ret_vec = shape_obj.cast<ShapeVector>();
|
||||||
auto ret_dtype = type_obj.cast<TypePtr>();
|
auto ret_dtype = type_obj.cast<TypePtr>();
|
||||||
MS_EXCEPTION_IF_NULL(ret_dtype);
|
MS_EXCEPTION_IF_NULL(ret_dtype);
|
||||||
// if the size of shape list is empty, return an scalar abstract
|
// if the size of shape list is empty, return an scalar abstract
|
||||||
|
@ -403,13 +404,13 @@ AbstractBasePtr PyListDtype2AbstractTensor(const py::object &shape_obj, const py
|
||||||
return abs_scalar;
|
return abs_scalar;
|
||||||
}
|
}
|
||||||
AbstractBasePtr tensor = nullptr;
|
AbstractBasePtr tensor = nullptr;
|
||||||
std::vector<int> min_shape_vec;
|
ShapeVector min_shape_vec;
|
||||||
std::vector<int> max_shape_vec;
|
ShapeVector max_shape_vec;
|
||||||
if (!min_shape.is_none()) {
|
if (!min_shape.is_none()) {
|
||||||
min_shape_vec = min_shape.cast<std::vector<int>>();
|
min_shape_vec = min_shape.cast<ShapeVector>();
|
||||||
}
|
}
|
||||||
if (!max_shape.is_none()) {
|
if (!max_shape.is_none()) {
|
||||||
max_shape_vec = max_shape.cast<std::vector<int>>();
|
max_shape_vec = max_shape.cast<ShapeVector>();
|
||||||
}
|
}
|
||||||
auto ret_shape = std::make_shared<abstract::Shape>(ret_vec, min_shape_vec, max_shape_vec);
|
auto ret_shape = std::make_shared<abstract::Shape>(ret_vec, min_shape_vec, max_shape_vec);
|
||||||
if (ret_dtype->isa<TensorType>()) {
|
if (ret_dtype->isa<TensorType>()) {
|
||||||
|
|
|
@ -26,6 +26,7 @@
|
||||||
#include "abstract/abstract_value.h"
|
#include "abstract/abstract_value.h"
|
||||||
#include "proto/onnx.pb.h"
|
#include "proto/onnx.pb.h"
|
||||||
#include "utils/log_adapter.h"
|
#include "utils/log_adapter.h"
|
||||||
|
#include "utils/shape_utils.h"
|
||||||
|
|
||||||
using std::string;
|
using std::string;
|
||||||
|
|
||||||
|
@ -96,7 +97,7 @@ bool MSANFModelParser::BuildParameterForFuncGraph(const ParameterPtr &node, cons
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
const onnx::TensorShapeProto &tensor_shape = tensor_typeproto.shape();
|
const onnx::TensorShapeProto &tensor_shape = tensor_typeproto.shape();
|
||||||
std::vector<int> shape;
|
ShapeVector shape;
|
||||||
for (int i = 0; i < tensor_shape.dim_size(); ++i) {
|
for (int i = 0; i < tensor_shape.dim_size(); ++i) {
|
||||||
shape.push_back(tensor_shape.dim(i).dim_value());
|
shape.push_back(tensor_shape.dim(i).dim_value());
|
||||||
}
|
}
|
||||||
|
@ -241,7 +242,7 @@ bool MSANFModelParser::GetAttrValueForCNode(const PrimitivePtr &prim, const onnx
|
||||||
bool MSANFModelParser::ObtainValueNodeInTensorForm(const std::string &value_node_name,
|
bool MSANFModelParser::ObtainValueNodeInTensorForm(const std::string &value_node_name,
|
||||||
const onnx::TensorProto &attr_tensor) {
|
const onnx::TensorProto &attr_tensor) {
|
||||||
const int attr_tensor_type = attr_tensor.data_type();
|
const int attr_tensor_type = attr_tensor.data_type();
|
||||||
std::vector<int> shape;
|
ShapeVector shape;
|
||||||
for (int i = 0; i < attr_tensor.dims_size(); ++i) {
|
for (int i = 0; i < attr_tensor.dims_size(); ++i) {
|
||||||
shape.push_back(attr_tensor.dims(i));
|
shape.push_back(attr_tensor.dims(i));
|
||||||
}
|
}
|
||||||
|
@ -355,7 +356,7 @@ bool MSANFModelParser::BuildValueNodeForFuncGraph(const onnx::NodeProto &node_pr
|
||||||
}
|
}
|
||||||
|
|
||||||
AbstractBasePtr MSANFModelParser::GetAbstractForCNode(const onnx::AttributeProto &attr_proto) {
|
AbstractBasePtr MSANFModelParser::GetAbstractForCNode(const onnx::AttributeProto &attr_proto) {
|
||||||
std::vector<int> shape_vec;
|
ShapeVector shape_vec;
|
||||||
const onnx::TensorProto &attr_tensor = attr_proto.t();
|
const onnx::TensorProto &attr_tensor = attr_proto.t();
|
||||||
for (int i = 0; i < attr_tensor.dims_size(); ++i) {
|
for (int i = 0; i < attr_tensor.dims_size(); ++i) {
|
||||||
shape_vec.push_back(attr_tensor.dims(i));
|
shape_vec.push_back(attr_tensor.dims(i));
|
||||||
|
@ -471,7 +472,7 @@ bool MSANFModelParser::BuildReturnForFuncGraph(const FuncGraphPtr &outputFuncGra
|
||||||
const onnx::ValueInfoProto &output_node = importProto.output(0);
|
const onnx::ValueInfoProto &output_node = importProto.output(0);
|
||||||
const onnx::TypeProto &output_typeproto = output_node.type();
|
const onnx::TypeProto &output_typeproto = output_node.type();
|
||||||
int output_type = output_typeproto.tensor_type().elem_type();
|
int output_type = output_typeproto.tensor_type().elem_type();
|
||||||
std::vector<int> output_shape;
|
ShapeVector output_shape;
|
||||||
for (int i = 0; i < output_typeproto.tensor_type().shape().dim_size(); ++i) {
|
for (int i = 0; i < output_typeproto.tensor_type().shape().dim_size(); ++i) {
|
||||||
output_shape.push_back(output_typeproto.tensor_type().shape().dim(i).dim_value());
|
output_shape.push_back(output_typeproto.tensor_type().shape().dim(i).dim_value());
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,6 +22,7 @@
|
||||||
#include "ir/tensor.h"
|
#include "ir/tensor.h"
|
||||||
#include "pybind11/pybind11.h"
|
#include "pybind11/pybind11.h"
|
||||||
#include "utils/ms_utils.h"
|
#include "utils/ms_utils.h"
|
||||||
|
#include "utils/shape_utils.h"
|
||||||
#ifndef NO_DLIB
|
#ifndef NO_DLIB
|
||||||
#include "tdt/tsd_client.h"
|
#include "tdt/tsd_client.h"
|
||||||
#include "tdt/tdt_host_interface.h"
|
#include "tdt/tdt_host_interface.h"
|
||||||
|
@ -59,7 +60,7 @@ std::string GetParseType(const std::string &tensorType_) {
|
||||||
return type_iter->second;
|
return type_iter->second;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ParseTensorShape(const std::string &input_shape_str, std::vector<int> *const tensor_shape, size_t *dims) {
|
bool ParseTensorShape(const std::string &input_shape_str, ShapeVector *const tensor_shape, size_t *dims) {
|
||||||
if (tensor_shape == nullptr) {
|
if (tensor_shape == nullptr) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -189,7 +190,7 @@ bool ConvertDataItem2Tensor(const std::vector<tdt::DataItem> &items) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<int> tensor_shape;
|
ShapeVector tensor_shape;
|
||||||
size_t totaldims = 1;
|
size_t totaldims = 1;
|
||||||
if (!ParseTensorShape(item.tensorShape_, &tensor_shape, &totaldims)) {
|
if (!ParseTensorShape(item.tensorShape_, &tensor_shape, &totaldims)) {
|
||||||
MS_LOG(ERROR) << "Tensor print can not parse tensor shape, receive info" << item.tensorShape_;
|
MS_LOG(ERROR) << "Tensor print can not parse tensor shape, receive info" << item.tensorShape_;
|
||||||
|
@ -235,7 +236,7 @@ bool SaveDataItem2File(const std::vector<tdt::DataItem> &items, const std::strin
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<int> tensor_shape;
|
ShapeVector tensor_shape;
|
||||||
size_t totaldims = 1;
|
size_t totaldims = 1;
|
||||||
if (!ParseTensorShape(item.tensorShape_, &tensor_shape, &totaldims)) {
|
if (!ParseTensorShape(item.tensorShape_, &tensor_shape, &totaldims)) {
|
||||||
MS_LOG(ERROR) << "Tensor print can not parse tensor shape, receive info" << item.tensorShape_;
|
MS_LOG(ERROR) << "Tensor print can not parse tensor shape, receive info" << item.tensorShape_;
|
||||||
|
|
|
@ -33,6 +33,7 @@
|
||||||
#include "ir/value.h"
|
#include "ir/value.h"
|
||||||
#include "ir/tensor.h"
|
#include "ir/tensor.h"
|
||||||
#include "abstract/dshape.h"
|
#include "abstract/dshape.h"
|
||||||
|
#include "utils/shape_utils.h"
|
||||||
|
|
||||||
namespace mindspore {
|
namespace mindspore {
|
||||||
namespace abstract {
|
namespace abstract {
|
||||||
|
@ -250,7 +251,7 @@ class AbstractUndetermined : public AbstractBase {
|
||||||
}
|
}
|
||||||
set_shape(shape);
|
set_shape(shape);
|
||||||
}
|
}
|
||||||
AbstractUndetermined(const TypePtr &element_type, const std::vector<int> &shape)
|
AbstractUndetermined(const TypePtr &element_type, const ShapeVector &shape)
|
||||||
: AbstractBase(kAnyValue), element_(std::make_shared<AbstractScalar>(kAnyValue, element_type)) {
|
: AbstractBase(kAnyValue), element_(std::make_shared<AbstractScalar>(kAnyValue, element_type)) {
|
||||||
if (element_type == nullptr) {
|
if (element_type == nullptr) {
|
||||||
MS_LOG(EXCEPTION) << "element_type is nullptr";
|
MS_LOG(EXCEPTION) << "element_type is nullptr";
|
||||||
|
@ -273,8 +274,7 @@ class AbstractTensor : public AbstractUndetermined {
|
||||||
// only element_ and value, shape track are valid member, type track are unknown.
|
// only element_ and value, shape track are valid member, type track are unknown.
|
||||||
explicit AbstractTensor(const AbstractBasePtr &element, const BaseShapePtr &shape = std::make_shared<Shape>())
|
explicit AbstractTensor(const AbstractBasePtr &element, const BaseShapePtr &shape = std::make_shared<Shape>())
|
||||||
: AbstractUndetermined(element, shape) {}
|
: AbstractUndetermined(element, shape) {}
|
||||||
AbstractTensor(const TypePtr &element_type, const std::vector<int> &shape)
|
AbstractTensor(const TypePtr &element_type, const ShapeVector &shape) : AbstractUndetermined(element_type, shape) {}
|
||||||
: AbstractUndetermined(element_type, shape) {}
|
|
||||||
explicit AbstractTensor(const tensor::TensorPtr &tensor) : AbstractUndetermined(tensor->Dtype(), tensor->shape()) {}
|
explicit AbstractTensor(const tensor::TensorPtr &tensor) : AbstractUndetermined(tensor->Dtype(), tensor->shape()) {}
|
||||||
~AbstractTensor() override = default;
|
~AbstractTensor() override = default;
|
||||||
MS_DECLARE_PARENT(AbstractTensor, AbstractUndetermined)
|
MS_DECLARE_PARENT(AbstractTensor, AbstractUndetermined)
|
||||||
|
@ -633,7 +633,7 @@ class AbstractRowTensor : public AbstractUndetermined {
|
||||||
public:
|
public:
|
||||||
explicit AbstractRowTensor(const AbstractBasePtr &element, const BaseShapePtr &shape = std::make_shared<Shape>())
|
explicit AbstractRowTensor(const AbstractBasePtr &element, const BaseShapePtr &shape = std::make_shared<Shape>())
|
||||||
: AbstractUndetermined(element, shape) {}
|
: AbstractUndetermined(element, shape) {}
|
||||||
AbstractRowTensor(const TypePtr &element_type, const std::vector<int> &shape)
|
AbstractRowTensor(const TypePtr &element_type, const ShapeVector &shape)
|
||||||
: AbstractUndetermined(element_type, shape) {}
|
: AbstractUndetermined(element_type, shape) {}
|
||||||
~AbstractRowTensor() override = default;
|
~AbstractRowTensor() override = default;
|
||||||
MS_DECLARE_PARENT(AbstractRowTensor, AbstractUndetermined)
|
MS_DECLARE_PARENT(AbstractRowTensor, AbstractUndetermined)
|
||||||
|
@ -662,7 +662,7 @@ class AbstractSparseTensor : public AbstractUndetermined {
|
||||||
public:
|
public:
|
||||||
explicit AbstractSparseTensor(const AbstractBasePtr &element, const BaseShapePtr &shape = std::make_shared<Shape>())
|
explicit AbstractSparseTensor(const AbstractBasePtr &element, const BaseShapePtr &shape = std::make_shared<Shape>())
|
||||||
: AbstractUndetermined(element, shape) {}
|
: AbstractUndetermined(element, shape) {}
|
||||||
AbstractSparseTensor(const TypePtr &element_type, const std::vector<int> &shape)
|
AbstractSparseTensor(const TypePtr &element_type, const ShapeVector &shape)
|
||||||
: AbstractUndetermined(element_type, shape) {}
|
: AbstractUndetermined(element_type, shape) {}
|
||||||
~AbstractSparseTensor() override = default;
|
~AbstractSparseTensor() override = default;
|
||||||
MS_DECLARE_PARENT(AbstractSparseTensor, AbstractUndetermined)
|
MS_DECLARE_PARENT(AbstractSparseTensor, AbstractUndetermined)
|
||||||
|
|
|
@ -29,6 +29,7 @@
|
||||||
|
|
||||||
#include "utils/log_adapter.h"
|
#include "utils/log_adapter.h"
|
||||||
#include "base/base.h"
|
#include "base/base.h"
|
||||||
|
#include "utils/shape_utils.h"
|
||||||
|
|
||||||
namespace mindspore {
|
namespace mindspore {
|
||||||
namespace abstract {
|
namespace abstract {
|
||||||
|
@ -69,12 +70,12 @@ class Shape : public BaseShape {
|
||||||
(void)std::transform(list_in.begin(), list_in.end(), std::back_inserter(shape_),
|
(void)std::transform(list_in.begin(), list_in.end(), std::back_inserter(shape_),
|
||||||
[](const int64_t &value) { return static_cast<int>(value); });
|
[](const int64_t &value) { return static_cast<int>(value); });
|
||||||
}
|
}
|
||||||
explicit Shape(const std::vector<int> &list) : shape_(list) {}
|
explicit Shape(const ShapeVector &list) : shape_(list) {}
|
||||||
explicit Shape(const std::vector<int64_t> &list) {
|
explicit Shape(const std::vector<int64_t> &list) {
|
||||||
(void)std::transform(list.begin(), list.end(), std::back_inserter(shape_),
|
(void)std::transform(list.begin(), list.end(), std::back_inserter(shape_),
|
||||||
[](const int64_t &value) { return static_cast<int>(value); });
|
[](const int64_t &value) { return static_cast<int>(value); });
|
||||||
}
|
}
|
||||||
Shape(const std::vector<int> &list, const std::vector<int> &min_shape, const std::vector<int> &max_shape)
|
Shape(const ShapeVector &list, const ShapeVector &min_shape, const ShapeVector &max_shape)
|
||||||
: shape_(list), min_shape_(min_shape), max_shape_(max_shape) {}
|
: shape_(list), min_shape_(min_shape), max_shape_(max_shape) {}
|
||||||
~Shape() override = default;
|
~Shape() override = default;
|
||||||
MS_DECLARE_PARENT(Shape, BaseShape)
|
MS_DECLARE_PARENT(Shape, BaseShape)
|
||||||
|
@ -83,13 +84,13 @@ class Shape : public BaseShape {
|
||||||
bool operator==(const BaseShape &other) const override;
|
bool operator==(const BaseShape &other) const override;
|
||||||
BaseShapePtr Clone() const override { return std::make_shared<Shape>(shape_, min_shape_, max_shape_); }
|
BaseShapePtr Clone() const override { return std::make_shared<Shape>(shape_, min_shape_, max_shape_); }
|
||||||
void Broaden() override;
|
void Broaden() override;
|
||||||
std::vector<int> &shape() { return shape_; }
|
ShapeVector &shape() { return shape_; }
|
||||||
std::vector<int> &min_shape() { return min_shape_; }
|
ShapeVector &min_shape() { return min_shape_; }
|
||||||
std::vector<int> &max_shape() { return max_shape_; }
|
ShapeVector &max_shape() { return max_shape_; }
|
||||||
|
|
||||||
std::vector<int> shape_; // use SHP_ANY to implement the any shape in python
|
ShapeVector shape_; // use SHP_ANY to implement the any shape in python
|
||||||
std::vector<int> min_shape_; // record mininum length for each dynamic dimention
|
ShapeVector min_shape_; // record mininum length for each dynamic dimention
|
||||||
std::vector<int> max_shape_; // record maximum length for each dynamic dimention
|
ShapeVector max_shape_; // record maximum length for each dynamic dimention
|
||||||
};
|
};
|
||||||
using ShapePtr = std::shared_ptr<Shape>;
|
using ShapePtr = std::shared_ptr<Shape>;
|
||||||
using ShapePtrList = std::vector<ShapePtr>;
|
using ShapePtrList = std::vector<ShapePtr>;
|
||||||
|
|
|
@ -17,11 +17,12 @@
|
||||||
#include "abstract/infer_functions.h"
|
#include "abstract/infer_functions.h"
|
||||||
#include "abstract/utils.h"
|
#include "abstract/utils.h"
|
||||||
#include "abstract/param_validator.h"
|
#include "abstract/param_validator.h"
|
||||||
|
#include "utils/shape_utils.h"
|
||||||
|
|
||||||
namespace mindspore {
|
namespace mindspore {
|
||||||
namespace abstract {
|
namespace abstract {
|
||||||
namespace {
|
namespace {
|
||||||
std::vector<int> BroadcastShape(std::vector<int> shpx, std::vector<int> shpy) {
|
ShapeVector BroadcastShape(ShapeVector shpx, ShapeVector shpy) {
|
||||||
int dlen = SizeToInt(shpx.size()) - SizeToInt(shpy.size());
|
int dlen = SizeToInt(shpx.size()) - SizeToInt(shpy.size());
|
||||||
if (dlen < 0) {
|
if (dlen < 0) {
|
||||||
for (int i = 0; i < -dlen; ++i) {
|
for (int i = 0; i < -dlen; ++i) {
|
||||||
|
@ -35,7 +36,7 @@ std::vector<int> BroadcastShape(std::vector<int> shpx, std::vector<int> shpy) {
|
||||||
if (shpx.size() != shpy.size()) {
|
if (shpx.size() != shpy.size()) {
|
||||||
MS_LOG(EXCEPTION) << "Failure: shpx.size() != shpy.size().";
|
MS_LOG(EXCEPTION) << "Failure: shpx.size() != shpy.size().";
|
||||||
}
|
}
|
||||||
std::vector<int> shp;
|
ShapeVector shp;
|
||||||
for (size_t i = 0; i < shpx.size(); i++) {
|
for (size_t i = 0; i < shpx.size(); i++) {
|
||||||
auto a = shpx[i];
|
auto a = shpx[i];
|
||||||
auto b = shpy[i];
|
auto b = shpy[i];
|
||||||
|
@ -50,7 +51,7 @@ std::vector<int> BroadcastShape(std::vector<int> shpx, std::vector<int> shpy) {
|
||||||
} else if (a == b) {
|
} else if (a == b) {
|
||||||
shp.push_back(a);
|
shp.push_back(a);
|
||||||
} else {
|
} else {
|
||||||
return std::vector<int>();
|
return ShapeVector();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return shp;
|
return shp;
|
||||||
|
@ -89,18 +90,18 @@ AbstractBasePtr InferImplBroadCastShape(const AnalysisEnginePtr &, const Primiti
|
||||||
auto value_tuple_x = xs->BuildValue()->cast<ValueTuplePtr>();
|
auto value_tuple_x = xs->BuildValue()->cast<ValueTuplePtr>();
|
||||||
MS_EXCEPTION_IF_NULL(value_tuple_x);
|
MS_EXCEPTION_IF_NULL(value_tuple_x);
|
||||||
auto shp_tuple_x = value_tuple_x->value();
|
auto shp_tuple_x = value_tuple_x->value();
|
||||||
std::vector<int> shp_x;
|
ShapeVector shp_x;
|
||||||
(void)std::transform(std::begin(shp_tuple_x), std::end(shp_tuple_x), std::back_inserter(shp_x),
|
(void)std::transform(std::begin(shp_tuple_x), std::end(shp_tuple_x), std::back_inserter(shp_x),
|
||||||
[](const ValuePtr &e) -> int { return GetValue<int>(e); });
|
[](const ValuePtr &e) -> int { return GetValue<int>(e); });
|
||||||
|
|
||||||
auto value_tuple_y = ys->BuildValue()->cast<ValueTuplePtr>();
|
auto value_tuple_y = ys->BuildValue()->cast<ValueTuplePtr>();
|
||||||
MS_EXCEPTION_IF_NULL(value_tuple_y);
|
MS_EXCEPTION_IF_NULL(value_tuple_y);
|
||||||
auto shp_tuple_y = value_tuple_y->value();
|
auto shp_tuple_y = value_tuple_y->value();
|
||||||
std::vector<int> shp_y;
|
ShapeVector shp_y;
|
||||||
(void)std::transform(std::begin(shp_tuple_y), std::end(shp_tuple_y), std::back_inserter(shp_y),
|
(void)std::transform(std::begin(shp_tuple_y), std::end(shp_tuple_y), std::back_inserter(shp_y),
|
||||||
[](const ValuePtr &e) -> int { return GetValue<int>(e); });
|
[](const ValuePtr &e) -> int { return GetValue<int>(e); });
|
||||||
|
|
||||||
std::vector<int> res = BroadcastShape(shp_x, shp_y);
|
ShapeVector res = BroadcastShape(shp_x, shp_y);
|
||||||
if (res.empty()) {
|
if (res.empty()) {
|
||||||
MS_LOG(EXCEPTION) << "BroadcastShape fail: " << args_spec_list[0]->ToString() << ","
|
MS_LOG(EXCEPTION) << "BroadcastShape fail: " << args_spec_list[0]->ToString() << ","
|
||||||
<< args_spec_list[1]->ToString();
|
<< args_spec_list[1]->ToString();
|
||||||
|
@ -130,7 +131,7 @@ AbstractBasePtr InferImplTile(const AnalysisEnginePtr &, const PrimitivePtr &pri
|
||||||
MS_LOG(EXCEPTION) << "shape's data field can't be anything: " << args_spec_list[1]->ToString();
|
MS_LOG(EXCEPTION) << "shape's data field can't be anything: " << args_spec_list[1]->ToString();
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<int> mul_shp;
|
ShapeVector mul_shp;
|
||||||
auto value_tuple_mul = mul_shp_value->cast<ValueTuplePtr>();
|
auto value_tuple_mul = mul_shp_value->cast<ValueTuplePtr>();
|
||||||
auto mul_shp_data = value_tuple_mul->value();
|
auto mul_shp_data = value_tuple_mul->value();
|
||||||
(void)std::transform(std::begin(mul_shp_data), std::end(mul_shp_data), std::back_inserter(mul_shp),
|
(void)std::transform(std::begin(mul_shp_data), std::end(mul_shp_data), std::back_inserter(mul_shp),
|
||||||
|
@ -140,7 +141,7 @@ AbstractBasePtr InferImplTile(const AnalysisEnginePtr &, const PrimitivePtr &pri
|
||||||
<< input_shape->shape().size() << ", value size is: " << mul_shp_data.size() << ".";
|
<< input_shape->shape().size() << ", value size is: " << mul_shp_data.size() << ".";
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<int> result_shp;
|
ShapeVector result_shp;
|
||||||
for (size_t i = 0; i < mul_shp_data.size(); ++i) {
|
for (size_t i = 0; i < mul_shp_data.size(); ++i) {
|
||||||
result_shp.push_back(input_shape->shape()[i] * mul_shp[i]);
|
result_shp.push_back(input_shape->shape()[i] * mul_shp[i]);
|
||||||
}
|
}
|
||||||
|
@ -195,9 +196,9 @@ AbstractBasePtr InferImplUnique(const AnalysisEnginePtr &, const PrimitivePtr &p
|
||||||
if (shape->shape().size() != 1) {
|
if (shape->shape().size() != 1) {
|
||||||
MS_LOG(EXCEPTION) << "Rank of " << op_name << "'s input must be 1.";
|
MS_LOG(EXCEPTION) << "Rank of " << op_name << "'s input must be 1.";
|
||||||
}
|
}
|
||||||
std::vector<int> ids_shape = {Shape::SHP_ANY};
|
ShapeVector ids_shape = {Shape::SHP_ANY};
|
||||||
std::vector<int> min_shape = {1};
|
ShapeVector min_shape = {1};
|
||||||
std::vector<int> max_shape = shape->shape();
|
ShapeVector max_shape = shape->shape();
|
||||||
auto ids =
|
auto ids =
|
||||||
std::make_shared<AbstractTensor>(input->element(), std::make_shared<Shape>(ids_shape, min_shape, max_shape));
|
std::make_shared<AbstractTensor>(input->element(), std::make_shared<Shape>(ids_shape, min_shape, max_shape));
|
||||||
auto ids_idx = std::make_shared<AbstractTensor>(std::make_shared<Int>(32), shape->shape());
|
auto ids_idx = std::make_shared<AbstractTensor>(std::make_shared<Int>(32), shape->shape());
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
#include "abstract/utils.h"
|
#include "abstract/utils.h"
|
||||||
#include "abstract/param_validator.h"
|
#include "abstract/param_validator.h"
|
||||||
#include "utils/check_convert_utils.h"
|
#include "utils/check_convert_utils.h"
|
||||||
|
#include "utils/shape_utils.h"
|
||||||
|
|
||||||
namespace mindspore {
|
namespace mindspore {
|
||||||
namespace abstract {
|
namespace abstract {
|
||||||
|
@ -82,7 +83,7 @@ AbstractBasePtr InferImplPooling(const AnalysisEnginePtr &, const PrimitivePtr &
|
||||||
|
|
||||||
int h_out = ((h_input + 2 * padding - (window - 1) - 1) / stride) + 1;
|
int h_out = ((h_input + 2 * padding - (window - 1) - 1) / stride) + 1;
|
||||||
int w_out = ((w_input + 2 * padding - (window - 1) - 1) / stride) + 1;
|
int w_out = ((w_input + 2 * padding - (window - 1) - 1) / stride) + 1;
|
||||||
std::vector<int> shape_out = {input_shape->shape()[0], input_shape->shape()[1], h_out, w_out};
|
ShapeVector shape_out = {input_shape->shape()[0], input_shape->shape()[1], h_out, w_out};
|
||||||
AbstractBasePtr ret = input_tensor->Broaden();
|
AbstractBasePtr ret = input_tensor->Broaden();
|
||||||
ret->set_shape(std::make_shared<Shape>(shape_out));
|
ret->set_shape(std::make_shared<Shape>(shape_out));
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -271,11 +272,11 @@ AbstractBasePtr InferImplBiasAddGrad(const AnalysisEnginePtr &, const PrimitiveP
|
||||||
MS_EXCEPTION_IF_NULL(args_spec_list[0]);
|
MS_EXCEPTION_IF_NULL(args_spec_list[0]);
|
||||||
ShapePtr shape_y = dyn_cast<Shape>(args_spec_list[0]->GetShapeTrack());
|
ShapePtr shape_y = dyn_cast<Shape>(args_spec_list[0]->GetShapeTrack());
|
||||||
MS_EXCEPTION_IF_NULL(shape_y);
|
MS_EXCEPTION_IF_NULL(shape_y);
|
||||||
std::vector<int> y_dims = shape_y->shape();
|
ShapeVector y_dims = shape_y->shape();
|
||||||
if (y_dims.size() < 2) {
|
if (y_dims.size() < 2) {
|
||||||
MS_LOG(EXCEPTION) << primitive->name() << " input y backprop, dim should >= 2, while " << y_dims.size() << ".";
|
MS_LOG(EXCEPTION) << primitive->name() << " input y backprop, dim should >= 2, while " << y_dims.size() << ".";
|
||||||
}
|
}
|
||||||
std::vector<int> bias_dims = {y_dims[1]};
|
ShapeVector bias_dims = {y_dims[1]};
|
||||||
ShapePtr ret_shape = std::make_shared<Shape>(bias_dims);
|
ShapePtr ret_shape = std::make_shared<Shape>(bias_dims);
|
||||||
AbstractBasePtr ret = args_spec_list[0]->Broaden();
|
AbstractBasePtr ret = args_spec_list[0]->Broaden();
|
||||||
ret->set_shape(ret_shape);
|
ret->set_shape(ret_shape);
|
||||||
|
|
|
@ -25,6 +25,7 @@
|
||||||
#include "abstract/utils.h"
|
#include "abstract/utils.h"
|
||||||
#include "utils/ms_context.h"
|
#include "utils/ms_context.h"
|
||||||
#include "utils/symbolic.h"
|
#include "utils/symbolic.h"
|
||||||
|
#include "utils/shape_utils.h"
|
||||||
|
|
||||||
namespace mindspore {
|
namespace mindspore {
|
||||||
namespace abstract {
|
namespace abstract {
|
||||||
|
@ -215,7 +216,7 @@ AbstractBasePtr InferImplMakeRowTensor(const AnalysisEnginePtr &, const Primitiv
|
||||||
auto dense_shape_value = dense_shape->BuildValue()->cast<ValueTuplePtr>();
|
auto dense_shape_value = dense_shape->BuildValue()->cast<ValueTuplePtr>();
|
||||||
MS_EXCEPTION_IF_NULL(dense_shape_value);
|
MS_EXCEPTION_IF_NULL(dense_shape_value);
|
||||||
auto shp = dense_shape_value->value();
|
auto shp = dense_shape_value->value();
|
||||||
std::vector<int> dense_shape_vec;
|
ShapeVector dense_shape_vec;
|
||||||
(void)std::transform(std::begin(shp), std::end(shp), std::back_inserter(dense_shape_vec),
|
(void)std::transform(std::begin(shp), std::end(shp), std::back_inserter(dense_shape_vec),
|
||||||
[](const ValuePtr &e) -> int {
|
[](const ValuePtr &e) -> int {
|
||||||
auto elem = GetValue<int>(e);
|
auto elem = GetValue<int>(e);
|
||||||
|
@ -309,7 +310,7 @@ AbstractBasePtr InferImplMakeSparseTensor(const AnalysisEnginePtr &, const Primi
|
||||||
auto dense_shape_value = dense_shape->BuildValue()->cast<ValueTuplePtr>();
|
auto dense_shape_value = dense_shape->BuildValue()->cast<ValueTuplePtr>();
|
||||||
MS_EXCEPTION_IF_NULL(dense_shape_value);
|
MS_EXCEPTION_IF_NULL(dense_shape_value);
|
||||||
auto shp = dense_shape_value->value();
|
auto shp = dense_shape_value->value();
|
||||||
std::vector<int> dense_shape_vec;
|
ShapeVector dense_shape_vec;
|
||||||
(void)std::transform(std::begin(shp), std::end(shp), std::back_inserter(dense_shape_vec),
|
(void)std::transform(std::begin(shp), std::end(shp), std::back_inserter(dense_shape_vec),
|
||||||
[](const ValuePtr &e) -> int {
|
[](const ValuePtr &e) -> int {
|
||||||
auto elem = GetValue<int>(e);
|
auto elem = GetValue<int>(e);
|
||||||
|
|
|
@ -23,6 +23,7 @@
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include "utils/symbolic.h"
|
#include "utils/symbolic.h"
|
||||||
#include "abstract/param_validator.h"
|
#include "abstract/param_validator.h"
|
||||||
|
#include "utils/shape_utils.h"
|
||||||
|
|
||||||
namespace mindspore {
|
namespace mindspore {
|
||||||
namespace abstract {
|
namespace abstract {
|
||||||
|
@ -62,7 +63,7 @@ ShapePtr ShapeJoin(const ShapePtr &shape1, const ShapePtr &shape2) {
|
||||||
MS_EXCEPTION(ValueError) << "Unsupported shape join. shape1 = " << shape1->ToString()
|
MS_EXCEPTION(ValueError) << "Unsupported shape join. shape1 = " << shape1->ToString()
|
||||||
<< ", shape2 = " << shape2->ToString();
|
<< ", shape2 = " << shape2->ToString();
|
||||||
}
|
}
|
||||||
std::vector<int> dims;
|
ShapeVector dims;
|
||||||
bool has_dynamic_shape = false;
|
bool has_dynamic_shape = false;
|
||||||
dims.resize(shape1->shape().size());
|
dims.resize(shape1->shape().size());
|
||||||
for (std::size_t i = 0; i < shape1->shape().size(); i++) {
|
for (std::size_t i = 0; i < shape1->shape().size(); i++) {
|
||||||
|
@ -80,8 +81,8 @@ ShapePtr ShapeJoin(const ShapePtr &shape1, const ShapePtr &shape2) {
|
||||||
return std::make_shared<Shape>(dims);
|
return std::make_shared<Shape>(dims);
|
||||||
}
|
}
|
||||||
// calculate dynamic shape
|
// calculate dynamic shape
|
||||||
std::vector<int> min_dims(dims.size());
|
ShapeVector min_dims(dims.size());
|
||||||
std::vector<int> max_dims(dims.size());
|
ShapeVector max_dims(dims.size());
|
||||||
for (size_t i = 0; i < dims.size(); ++i) {
|
for (size_t i = 0; i < dims.size(); ++i) {
|
||||||
if (dims[i] != Shape::SHP_ANY) {
|
if (dims[i] != Shape::SHP_ANY) {
|
||||||
min_dims[i] = max_dims[i] = dims[i];
|
min_dims[i] = max_dims[i] = dims[i];
|
||||||
|
@ -213,7 +214,7 @@ int GetPositiveAxis(int axis_value, size_t increment) {
|
||||||
|
|
||||||
// Return if two shapes can be broadcast.
|
// Return if two shapes can be broadcast.
|
||||||
// Broadcast shape is placed in broadcast_output_shape.
|
// Broadcast shape is placed in broadcast_output_shape.
|
||||||
std::vector<int> RealBroadcast(const std::string &op, std::vector<int> x_shape, std::vector<int> y_shape) {
|
ShapeVector RealBroadcast(const std::string &op, ShapeVector x_shape, ShapeVector y_shape) {
|
||||||
std::reverse(x_shape.begin(), x_shape.end());
|
std::reverse(x_shape.begin(), x_shape.end());
|
||||||
std::reverse(y_shape.begin(), y_shape.end());
|
std::reverse(y_shape.begin(), y_shape.end());
|
||||||
// Fill a placeholder value 1 which will be replaced later.
|
// Fill a placeholder value 1 which will be replaced later.
|
||||||
|
@ -221,7 +222,7 @@ std::vector<int> RealBroadcast(const std::string &op, std::vector<int> x_shape,
|
||||||
y_shape.resize(std_len, 1);
|
y_shape.resize(std_len, 1);
|
||||||
x_shape.resize(std_len, 1);
|
x_shape.resize(std_len, 1);
|
||||||
|
|
||||||
std::vector<int> broadcast_shape;
|
ShapeVector broadcast_shape;
|
||||||
for (size_t i = 0; i < std_len; i++) {
|
for (size_t i = 0; i < std_len; i++) {
|
||||||
int x_i = x_shape[i]; // i-th dimension of x
|
int x_i = x_shape[i]; // i-th dimension of x
|
||||||
int y_i = y_shape[i]; // i-th dimension of y
|
int y_i = y_shape[i]; // i-th dimension of y
|
||||||
|
|
|
@ -22,6 +22,7 @@
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
#include "ir/dtype/type.h"
|
#include "ir/dtype/type.h"
|
||||||
|
#include "utils/shape_utils.h"
|
||||||
|
|
||||||
using std::string;
|
using std::string;
|
||||||
|
|
||||||
|
@ -29,9 +30,8 @@ namespace mindspore {
|
||||||
// Interface for data synchornize between device and host.
|
// Interface for data synchornize between device and host.
|
||||||
class DeviceSync {
|
class DeviceSync {
|
||||||
public:
|
public:
|
||||||
virtual bool SyncDeviceToHost(const std::vector<int> &shape, size_t size, TypeId type, void *host_ptr) const = 0;
|
virtual bool SyncDeviceToHost(const ShapeVector &shape, size_t size, TypeId type, void *host_ptr) const = 0;
|
||||||
virtual bool SyncHostToDevice(const std::vector<int> &shape, size_t size, TypeId type,
|
virtual bool SyncHostToDevice(const ShapeVector &shape, size_t size, TypeId type, const void *host_ptr) const = 0;
|
||||||
const void *host_ptr) const = 0;
|
|
||||||
virtual void *GetMutablePtr() const = 0;
|
virtual void *GetMutablePtr() const = 0;
|
||||||
};
|
};
|
||||||
using DeviceSyncPtr = std::shared_ptr<DeviceSync>;
|
using DeviceSyncPtr = std::shared_ptr<DeviceSync>;
|
||||||
|
|
|
@ -27,9 +27,9 @@ namespace tensor {
|
||||||
// MetaTensor has default type_id_ which is TypeId::kTypeUnknown.
|
// MetaTensor has default type_id_ which is TypeId::kTypeUnknown.
|
||||||
MetaTensor::MetaTensor() : data_type_(TypeId::kTypeUnknown) {}
|
MetaTensor::MetaTensor() : data_type_(TypeId::kTypeUnknown) {}
|
||||||
|
|
||||||
MetaTensor::MetaTensor(const TypeId data_type, const std::vector<int> &shape) : data_type_(data_type), shape_(shape) {}
|
MetaTensor::MetaTensor(const TypeId data_type, const ShapeVector &shape) : data_type_(data_type), shape_(shape) {}
|
||||||
|
|
||||||
MetaTensor::MetaTensor(const TypePtr &type_ptr, const std::vector<int> &shape) {
|
MetaTensor::MetaTensor(const TypePtr &type_ptr, const ShapeVector &shape) {
|
||||||
TypeId data_type = TypeId::kTypeUnknown;
|
TypeId data_type = TypeId::kTypeUnknown;
|
||||||
if (type_ptr != nullptr) {
|
if (type_ptr != nullptr) {
|
||||||
data_type = type_ptr->type_id();
|
data_type = type_ptr->type_id();
|
||||||
|
|
|
@ -27,6 +27,7 @@
|
||||||
#include "ir/dtype.h"
|
#include "ir/dtype.h"
|
||||||
#include "utils/convert_utils_base.h"
|
#include "utils/convert_utils_base.h"
|
||||||
#include "utils/hashing.h"
|
#include "utils/hashing.h"
|
||||||
|
#include "utils/shape_utils.h"
|
||||||
|
|
||||||
// brief mindspore namespace.
|
// brief mindspore namespace.
|
||||||
//
|
//
|
||||||
|
@ -38,7 +39,6 @@ namespace mindspore {
|
||||||
//
|
//
|
||||||
// A sub namespace in ME to support tensor related definition.
|
// A sub namespace in ME to support tensor related definition.
|
||||||
namespace tensor {
|
namespace tensor {
|
||||||
|
|
||||||
// brief Device info of Tensor
|
// brief Device info of Tensor
|
||||||
//
|
//
|
||||||
// Includes the format and data type of a tensor.
|
// Includes the format and data type of a tensor.
|
||||||
|
@ -64,9 +64,9 @@ class MetaTensor : public Value {
|
||||||
// information of a Tensor. The following codes will create a 2x3 float
|
// information of a Tensor. The following codes will create a 2x3 float
|
||||||
// param data_type The data type of the tensor.
|
// param data_type The data type of the tensor.
|
||||||
// param shape The shape of the tensor.
|
// param shape The shape of the tensor.
|
||||||
MetaTensor(const TypeId data_type, const std::vector<int> &shape);
|
MetaTensor(const TypeId data_type, const ShapeVector &shape);
|
||||||
|
|
||||||
MetaTensor(const TypePtr &type_ptr, const std::vector<int> &shape);
|
MetaTensor(const TypePtr &type_ptr, const ShapeVector &shape);
|
||||||
// brief Constructs a MetaTensor object from an existing MetaTensor instance.
|
// brief Constructs a MetaTensor object from an existing MetaTensor instance.
|
||||||
//
|
//
|
||||||
// The constructed MetaTensor object will have the same data type and shape as the
|
// The constructed MetaTensor object will have the same data type and shape as the
|
||||||
|
@ -116,7 +116,7 @@ class MetaTensor : public Value {
|
||||||
// order it represents.
|
// order it represents.
|
||||||
//
|
//
|
||||||
// return A const vector<int> which represents the shape of the tensor.
|
// return A const vector<int> which represents the shape of the tensor.
|
||||||
const std::vector<int> &shape() const { return shape_; }
|
const ShapeVector &shape() const { return shape_; }
|
||||||
|
|
||||||
// brief Sets the shape of a tensor.
|
// brief Sets the shape of a tensor.
|
||||||
//
|
//
|
||||||
|
@ -127,7 +127,7 @@ class MetaTensor : public Value {
|
||||||
//
|
//
|
||||||
// param shape The shape of the tensor.
|
// param shape The shape of the tensor.
|
||||||
// return The shape's size.
|
// return The shape's size.
|
||||||
size_t set_shape(const std::vector<int> &shape) {
|
size_t set_shape(const ShapeVector &shape) {
|
||||||
this->shape_ = shape;
|
this->shape_ = shape;
|
||||||
return shape_.size();
|
return shape_.size();
|
||||||
}
|
}
|
||||||
|
@ -184,11 +184,11 @@ class MetaTensor : public Value {
|
||||||
|
|
||||||
// brief Shape of the tensor.
|
// brief Shape of the tensor.
|
||||||
//
|
//
|
||||||
// A std::vector<int> container is used to store the shape of a tensor.
|
// A ShapeVector container is used to store the shape of a tensor.
|
||||||
// Each element of the vector represents the size of a dimension of the tensor.
|
// Each element of the vector represents the size of a dimension of the tensor.
|
||||||
// The order of each element in the vector is as same as the the dimension's
|
// The order of each element in the vector is as same as the the dimension's
|
||||||
// order it represents. If the dimension size is not set, its value will be -1.
|
// order it represents. If the dimension size is not set, its value will be -1.
|
||||||
std::vector<int> shape_;
|
ShapeVector shape_;
|
||||||
|
|
||||||
// brief Device info of Tensor
|
// brief Device info of Tensor
|
||||||
//
|
//
|
||||||
|
|
|
@ -24,6 +24,7 @@
|
||||||
|
|
||||||
#include "ir/visitor.h"
|
#include "ir/visitor.h"
|
||||||
#include "base/core_ops.h"
|
#include "base/core_ops.h"
|
||||||
|
#include "utils/shape_utils.h"
|
||||||
|
|
||||||
namespace mindspore {
|
namespace mindspore {
|
||||||
///
|
///
|
||||||
|
@ -599,7 +600,7 @@ class PConstant : public PBase<PConstant<T> > {
|
||||||
|
|
||||||
auto tensor_abstract = node->abstract()->cast<abstract::AbstractTensorPtr>();
|
auto tensor_abstract = node->abstract()->cast<abstract::AbstractTensorPtr>();
|
||||||
TypePtr tensor_type_ptr = tensor_abstract->element()->BuildType();
|
TypePtr tensor_type_ptr = tensor_abstract->element()->BuildType();
|
||||||
std::vector<int> tensor_shape = tensor_abstract->shape()->shape();
|
ShapeVector tensor_shape = tensor_abstract->shape()->shape();
|
||||||
|
|
||||||
auto new_tensor_ptr = std::make_shared<tensor::Tensor>(tensor_type_ptr->type_id(), tensor_shape);
|
auto new_tensor_ptr = std::make_shared<tensor::Tensor>(tensor_type_ptr->type_id(), tensor_shape);
|
||||||
size_t mem_size = GetTypeByte(tensor_type_ptr) * IntToSize(new_tensor_ptr->ElementsNum());
|
size_t mem_size = GetTypeByte(tensor_type_ptr) * IntToSize(new_tensor_ptr->ElementsNum());
|
||||||
|
@ -619,7 +620,7 @@ class PConstant : public PBase<PConstant<T> > {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
auto x_abstract = x->abstract()->cast<abstract::AbstractTensorPtr>();
|
auto x_abstract = x->abstract()->cast<abstract::AbstractTensorPtr>();
|
||||||
std::vector<int> x_shape = x_abstract->shape()->shape();
|
ShapeVector x_shape = x_abstract->shape()->shape();
|
||||||
if (x_shape != tensor_shape) {
|
if (x_shape != tensor_shape) {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
@ -664,7 +665,7 @@ class PConstant : public PBase<PConstant<T> > {
|
||||||
|
|
||||||
auto tensor_abstract = node->abstract()->cast<abstract::AbstractTensorPtr>();
|
auto tensor_abstract = node->abstract()->cast<abstract::AbstractTensorPtr>();
|
||||||
TypePtr tensor_type_ptr = tensor_abstract->element()->BuildType();
|
TypePtr tensor_type_ptr = tensor_abstract->element()->BuildType();
|
||||||
std::vector<int> tensor_shape = tensor_abstract->shape()->shape();
|
ShapeVector tensor_shape = tensor_abstract->shape()->shape();
|
||||||
|
|
||||||
auto new_tensor_ptr = std::make_shared<tensor::Tensor>(tensor_type_ptr->type_id(), tensor_shape);
|
auto new_tensor_ptr = std::make_shared<tensor::Tensor>(tensor_type_ptr->type_id(), tensor_shape);
|
||||||
size_t mem_size = GetTypeByte(tensor_type_ptr) * IntToSize(new_tensor_ptr->ElementsNum());
|
size_t mem_size = GetTypeByte(tensor_type_ptr) * IntToSize(new_tensor_ptr->ElementsNum());
|
||||||
|
@ -744,7 +745,7 @@ class PConstant : public PBase<PConstant<T> > {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<int> tensor_out_shape = tensor_3_abstract->shape()->shape();
|
ShapeVector tensor_out_shape = tensor_3_abstract->shape()->shape();
|
||||||
int data_out_size = std::accumulate(tensor_out_shape.begin(), tensor_out_shape.end(), 1, std::multiplies<int>());
|
int data_out_size = std::accumulate(tensor_out_shape.begin(), tensor_out_shape.end(), 1, std::multiplies<int>());
|
||||||
if ((tensor_ptr_1->DataSize() > 1) && (tensor_ptr_1->DataSize() != data_out_size)) {
|
if ((tensor_ptr_1->DataSize() > 1) && (tensor_ptr_1->DataSize() != data_out_size)) {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
|
|
@ -50,11 +50,11 @@ static TypeId TypeIdOf(const TypePtr &data_type, TypeId defaultTypeId) {
|
||||||
return data_type ? data_type->type_id() : defaultTypeId;
|
return data_type ? data_type->type_id() : defaultTypeId;
|
||||||
}
|
}
|
||||||
|
|
||||||
static size_t SizeOf(const std::vector<int> &shape) {
|
static size_t SizeOf(const ShapeVector &shape) {
|
||||||
return std::accumulate(shape.begin(), shape.end(), size_t(1), std::multiplies<size_t>());
|
return std::accumulate(shape.begin(), shape.end(), size_t(1), std::multiplies<size_t>());
|
||||||
}
|
}
|
||||||
|
|
||||||
static std::string ShapeToString(const std::vector<int> &shape) {
|
static std::string ShapeToString(const ShapeVector &shape) {
|
||||||
std::string str = "[";
|
std::string str = "[";
|
||||||
const size_t count = shape.size();
|
const size_t count = shape.size();
|
||||||
for (size_t i = 0; i < count; ++i) {
|
for (size_t i = 0; i < count; ++i) {
|
||||||
|
@ -93,7 +93,7 @@ std::unique_ptr<T[]> NewData(Scalar scalar) {
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
std::unique_ptr<T[]> CopyData(const std::vector<int> &shape, void *const data, TypeId data_type) {
|
std::unique_ptr<T[]> CopyData(const ShapeVector &shape, void *const data, TypeId data_type) {
|
||||||
const size_t size = SizeOf(shape);
|
const size_t size = SizeOf(shape);
|
||||||
switch (data_type) {
|
switch (data_type) {
|
||||||
case kNumberTypeBool: {
|
case kNumberTypeBool: {
|
||||||
|
@ -151,7 +151,7 @@ std::unique_ptr<T[]> CopyData(const std::vector<int> &shape, void *const data, T
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
std::unique_ptr<T[]> CopyData(const std::vector<int> &shape, void *const data, size_t data_len) {
|
std::unique_ptr<T[]> CopyData(const ShapeVector &shape, void *const data, size_t data_len) {
|
||||||
size_t size = SizeOf(shape);
|
size_t size = SizeOf(shape);
|
||||||
if (size * sizeof(T) != data_len) {
|
if (size * sizeof(T) != data_len) {
|
||||||
MS_LOG(EXCEPTION) << "Incorrect tensor input data length " << data_len << ", expect " << size * sizeof(T)
|
MS_LOG(EXCEPTION) << "Incorrect tensor input data length " << data_len << ", expect " << size * sizeof(T)
|
||||||
|
@ -165,21 +165,21 @@ std::unique_ptr<T[]> CopyData(const std::vector<int> &shape, void *const data, s
|
||||||
template <typename T>
|
template <typename T>
|
||||||
class TensorDataImpl : public TensorData {
|
class TensorDataImpl : public TensorData {
|
||||||
public:
|
public:
|
||||||
explicit TensorDataImpl(const std::vector<int> &shape) : ndim_(shape.size()), data_size_(SizeOf(shape)) {}
|
explicit TensorDataImpl(const ShapeVector &shape) : ndim_(shape.size()), data_size_(SizeOf(shape)) {}
|
||||||
~TensorDataImpl() = default;
|
~TensorDataImpl() = default;
|
||||||
|
|
||||||
TensorDataImpl(const std::vector<int> &shape, void *data, size_t data_len)
|
TensorDataImpl(const ShapeVector &shape, void *data, size_t data_len)
|
||||||
: ndim_(shape.size()), data_size_(SizeOf(shape)), data_(CopyData<T>(shape, data, data_len)) {}
|
: ndim_(shape.size()), data_size_(SizeOf(shape)), data_(CopyData<T>(shape, data, data_len)) {}
|
||||||
|
|
||||||
TensorDataImpl(const std::vector<int> &shape, void *data, TypeId data_type)
|
TensorDataImpl(const ShapeVector &shape, void *data, TypeId data_type)
|
||||||
: ndim_(shape.size()), data_size_(SizeOf(shape)), data_(CopyData<T>(shape, data, data_type)) {}
|
: ndim_(shape.size()), data_size_(SizeOf(shape)), data_(CopyData<T>(shape, data, data_type)) {}
|
||||||
|
|
||||||
template <typename U>
|
template <typename U>
|
||||||
TensorDataImpl(const std::vector<int> &shape, const U *input, size_t size)
|
TensorDataImpl(const ShapeVector &shape, const U *input, size_t size)
|
||||||
: ndim_(shape.size()), data_size_(SizeOf(shape)), data_(NewData<T>(input, size)) {}
|
: ndim_(shape.size()), data_size_(SizeOf(shape)), data_(NewData<T>(input, size)) {}
|
||||||
|
|
||||||
template <typename Scalar>
|
template <typename Scalar>
|
||||||
TensorDataImpl(const std::vector<int> &shape, Scalar scalar)
|
TensorDataImpl(const ShapeVector &shape, Scalar scalar)
|
||||||
: ndim_(shape.size()), data_size_(SizeOf(shape)), data_(NewData<T>(scalar)) {}
|
: ndim_(shape.size()), data_size_(SizeOf(shape)), data_(NewData<T>(scalar)) {}
|
||||||
|
|
||||||
ssize_t size() const override { return static_cast<ssize_t>(data_size_); }
|
ssize_t size() const override { return static_cast<ssize_t>(data_size_); }
|
||||||
|
@ -219,7 +219,7 @@ class TensorDataImpl : public TensorData {
|
||||||
std::equal(data_.get(), data_.get() + data_size_, ptr->data_.get());
|
std::equal(data_.get(), data_.get() + data_size_, ptr->data_.get());
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string ToString(const TypeId type, const std::vector<int> &shape) const override {
|
std::string ToString(const TypeId type, const ShapeVector &shape) const override {
|
||||||
constexpr auto valid =
|
constexpr auto valid =
|
||||||
std::is_same<T, bool>::value || std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value ||
|
std::is_same<T, bool>::value || std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value ||
|
||||||
std::is_same<T, int16_t>::value || std::is_same<T, int32_t>::value || std::is_same<T, int64_t>::value ||
|
std::is_same<T, int16_t>::value || std::is_same<T, int32_t>::value || std::is_same<T, int64_t>::value ||
|
||||||
|
@ -307,8 +307,7 @@ class TensorDataImpl : public TensorData {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SummaryStringRecursive(std::ostringstream &ss, const std::vector<int> &shape, ssize_t *cursor,
|
void SummaryStringRecursive(std::ostringstream &ss, const ShapeVector &shape, ssize_t *cursor, ssize_t depth) const {
|
||||||
ssize_t depth) const {
|
|
||||||
if (depth >= static_cast<ssize_t>(ndim_)) {
|
if (depth >= static_cast<ssize_t>(ndim_)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -366,7 +365,7 @@ class TensorDataImpl : public TensorData {
|
||||||
};
|
};
|
||||||
|
|
||||||
template <typename... Args>
|
template <typename... Args>
|
||||||
TensorDataPtr MakeTensorData(TypeId data_type, const std::vector<int> &shape, const Args... args) {
|
TensorDataPtr MakeTensorData(TypeId data_type, const ShapeVector &shape, const Args... args) {
|
||||||
switch (data_type) {
|
switch (data_type) {
|
||||||
case kNumberTypeBool:
|
case kNumberTypeBool:
|
||||||
return std::make_shared<TensorDataImpl<bool>>(shape, args...);
|
return std::make_shared<TensorDataImpl<bool>>(shape, args...);
|
||||||
|
@ -416,16 +415,16 @@ Tensor::Tensor(const Tensor &tensor, TypeId data_type)
|
||||||
device_sync_(tensor.device_sync_),
|
device_sync_(tensor.device_sync_),
|
||||||
padding_type_(tensor.padding_type()) {}
|
padding_type_(tensor.padding_type()) {}
|
||||||
|
|
||||||
Tensor::Tensor(TypeId data_type, const std::vector<int> &shape, TensorDataPtr data)
|
Tensor::Tensor(TypeId data_type, const ShapeVector &shape, TensorDataPtr data)
|
||||||
: MetaTensor(data_type, shape), data_(std::move(data)), id_(MakeId()) {}
|
: MetaTensor(data_type, shape), data_(std::move(data)), id_(MakeId()) {}
|
||||||
|
|
||||||
Tensor::Tensor(TypeId data_type, const std::vector<int> &shape)
|
Tensor::Tensor(TypeId data_type, const ShapeVector &shape)
|
||||||
: Tensor(data_type, shape, MakeTensorData(data_type, shape)) {}
|
: Tensor(data_type, shape, MakeTensorData(data_type, shape)) {}
|
||||||
|
|
||||||
Tensor::Tensor(TypeId data_type, const std::vector<int> &shape, void *data, size_t data_len)
|
Tensor::Tensor(TypeId data_type, const ShapeVector &shape, void *data, size_t data_len)
|
||||||
: Tensor(data_type, shape, MakeTensorData(data_type, shape, data, data_len)) {}
|
: Tensor(data_type, shape, MakeTensorData(data_type, shape, data, data_len)) {}
|
||||||
|
|
||||||
Tensor::Tensor(TypeId data_type, const std::vector<int> &shape, void *data, TypeId src_data_type)
|
Tensor::Tensor(TypeId data_type, const ShapeVector &shape, void *data, TypeId src_data_type)
|
||||||
: Tensor(data_type, shape, MakeTensorData(data_type, shape, data, src_data_type)) {}
|
: Tensor(data_type, shape, MakeTensorData(data_type, shape, data, src_data_type)) {}
|
||||||
|
|
||||||
Tensor::Tensor(const std::vector<int64_t> &input, const TypePtr &data_type)
|
Tensor::Tensor(const std::vector<int64_t> &input, const TypePtr &data_type)
|
||||||
|
|
|
@ -26,6 +26,7 @@
|
||||||
#include "ir/meta_tensor.h"
|
#include "ir/meta_tensor.h"
|
||||||
#include "utils/log_adapter.h"
|
#include "utils/log_adapter.h"
|
||||||
#include "base/float16.h"
|
#include "base/float16.h"
|
||||||
|
#include "utils/shape_utils.h"
|
||||||
|
|
||||||
// brief mindspore namespace.
|
// brief mindspore namespace.
|
||||||
//
|
//
|
||||||
|
@ -67,7 +68,7 @@ class TensorData {
|
||||||
std::equal(this_data, this_data + nbytes(), other_data));
|
std::equal(this_data, this_data + nbytes(), other_data));
|
||||||
}
|
}
|
||||||
/// To string.
|
/// To string.
|
||||||
virtual std::string ToString(const TypeId type, const std::vector<int> &shape) const = 0;
|
virtual std::string ToString(const TypeId type, const ShapeVector &shape) const = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
using TensorDataPtr = std::shared_ptr<TensorData>;
|
using TensorDataPtr = std::shared_ptr<TensorData>;
|
||||||
|
@ -91,31 +92,31 @@ class Tensor : public MetaTensor {
|
||||||
// brief Create tensor with the given shared tensor data.
|
// brief Create tensor with the given shared tensor data.
|
||||||
//
|
//
|
||||||
// param data_type [TypeId] Data type of the tensor.
|
// param data_type [TypeId] Data type of the tensor.
|
||||||
// param shape The shape represented by std::vector<int> of the tensor.
|
// param shape The shape represented by ShapeVector of the tensor.
|
||||||
// param data The shared tensor data.
|
// param data The shared tensor data.
|
||||||
Tensor(TypeId data_type, const std::vector<int> &shape, TensorDataPtr data);
|
Tensor(TypeId data_type, const ShapeVector &shape, TensorDataPtr data);
|
||||||
|
|
||||||
// brief Create a lazy allocated tensor.
|
// brief Create a lazy allocated tensor.
|
||||||
//
|
//
|
||||||
// param data_type [TypeId] Data type of the tensor.
|
// param data_type [TypeId] Data type of the tensor.
|
||||||
// param shape The shape represented by std::vector<int> of the tensor.
|
// param shape The shape represented by ShapeVector of the tensor.
|
||||||
Tensor(TypeId data_type, const std::vector<int> &shape);
|
Tensor(TypeId data_type, const ShapeVector &shape);
|
||||||
|
|
||||||
// brief Create a tensor with input data buffer.
|
// brief Create a tensor with input data buffer.
|
||||||
//
|
//
|
||||||
// param data_type [TypeId] Data type of the tensor.
|
// param data_type [TypeId] Data type of the tensor.
|
||||||
// param shape The shape represented by std::vector<int> of the tensor.
|
// param shape The shape represented by ShapeVector of the tensor.
|
||||||
// param data The input data to be copied into tensor.
|
// param data The input data to be copied into tensor.
|
||||||
// param data_len The length of data in bytes.
|
// param data_len The length of data in bytes.
|
||||||
Tensor(TypeId data_type, const std::vector<int> &shape, void *data, size_t data_len);
|
Tensor(TypeId data_type, const ShapeVector &shape, void *data, size_t data_len);
|
||||||
|
|
||||||
// brief Create a tensor with input data buffer and given source data type.
|
// brief Create a tensor with input data buffer and given source data type.
|
||||||
//
|
//
|
||||||
// param data_type [TypeId] Data type of the tensor.
|
// param data_type [TypeId] Data type of the tensor.
|
||||||
// param shape The shape represented by std::vector<int> of the tensor.
|
// param shape The shape represented by ShapeVector of the tensor.
|
||||||
// param data The input data to be copied into tensor.
|
// param data The input data to be copied into tensor.
|
||||||
// param src_data_type The source data type.
|
// param src_data_type The source data type.
|
||||||
Tensor(TypeId data_type, const std::vector<int> &shape, void *data, TypeId src_data_type);
|
Tensor(TypeId data_type, const ShapeVector &shape, void *data, TypeId src_data_type);
|
||||||
|
|
||||||
// brief Create 1 dimension tensor from an int vector.
|
// brief Create 1 dimension tensor from an int vector.
|
||||||
//
|
//
|
||||||
|
@ -185,8 +186,8 @@ class Tensor : public MetaTensor {
|
||||||
|
|
||||||
// brief Get the tensor's shape for C++
|
// brief Get the tensor's shape for C++
|
||||||
//
|
//
|
||||||
// return [std::vector<int>]
|
// return [ShapeVector]
|
||||||
std::vector<int> shape_c(void) const { return shape(); }
|
ShapeVector shape_c(void) const { return shape(); }
|
||||||
|
|
||||||
// brief Get Tensor data pointer for c++ type
|
// brief Get Tensor data pointer for c++ type
|
||||||
//
|
//
|
||||||
|
|
|
@ -0,0 +1,23 @@
|
||||||
|
/**
|
||||||
|
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef MINDSPORE_SHAPE_UTILS_INFO_H_
|
||||||
|
#define MINDSPORE_SHAPE_UTILS_INFO_H_
|
||||||
|
|
||||||
|
#include <vector>
|
||||||
|
using ShapeVector = std::vector<int>;
|
||||||
|
|
||||||
|
#endif // MINDSPORE_SHAPE_UTILS_INFO_H_
|
Loading…
Reference in New Issue