forked from mindspore-Ecosystem/mindspore
commit
b3c7515ba0
|
@ -30,7 +30,6 @@
|
|||
namespace mindspore {
|
||||
#define MS_API __attribute__((visibility("default")))
|
||||
namespace inference {
|
||||
|
||||
enum DataType {
|
||||
kMSI_Unknown = 0,
|
||||
kMSI_Bool = 1,
|
||||
|
@ -209,7 +208,6 @@ class VectorInferTensorWrapRequest : public RequestBase {
|
|||
}
|
||||
const std::vector<InferTensor> &tensor_list_;
|
||||
};
|
||||
|
||||
} // namespace inference
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_INCLUDE_INFER_TENSOR_H_
|
||||
|
|
|
@ -25,7 +25,6 @@
|
|||
|
||||
namespace mindspore {
|
||||
namespace inference {
|
||||
|
||||
enum StatusCode { SUCCESS = 0, FAILED, INVALID_INPUTS };
|
||||
|
||||
class Status {
|
||||
|
|
|
@ -57,7 +57,6 @@ CNodePtr Insert(const FuncGraphPtr &func_graph, const CNodePtr &cnode, const std
|
|||
} else {
|
||||
new_node = kernel_graph->NewCNode(cnode);
|
||||
}
|
||||
|
||||
} else if (op_name == kBasicLSTMCellWeightGradOpName) {
|
||||
std::vector<AnfNodePtr> make_tuple_inputs = {NewValueNode(prim::kPrimMakeTuple)};
|
||||
size_t out_num = AnfAlgo::GetOutputTensorNum(cnode);
|
||||
|
|
|
@ -208,7 +208,6 @@ void AscendControlParser::LinkGraph(NotNull<KernelGraphPtr> kg) {
|
|||
memo.clear();
|
||||
// assign label resource
|
||||
device::ascend::AscendLabelAssign::GetInstance().AssignLabel(kg);
|
||||
// AttachChildGraphToReturnNode(kg, NOT_NULL(&memo));
|
||||
}
|
||||
|
||||
void AscendControlParser::EraseParameter(NotNull<KernelGraphPtr> root_graph,
|
||||
|
|
|
@ -68,7 +68,6 @@ void E2eDumpUtil::DumpGPUMemToFile(const std::string &file_path, const std::stri
|
|||
TensorLoader *tensor_loader = debug_services->tensor_loader();
|
||||
auto ret = tensor_loader->DumpTensorToFile(original_kernel_name, trans_flag, file_path, format, int_shapes, type,
|
||||
addr->type_id(), addr->format(), slot);
|
||||
|
||||
if (!ret) {
|
||||
MS_LOG(ERROR) << "DumpTensorToFile Failed: flag:" << std::to_string(trans_flag) << ", path:" << file_path
|
||||
<< ", host_format:" << format;
|
||||
|
|
|
@ -29,7 +29,6 @@ void AscendMemoryManager::MallocDeviceMemory() {
|
|||
auto context_mem = GetDeviceMemSizeFromContext();
|
||||
device_mem_size_ = context_mem == 0 ? kAscendDeviceMemSize : context_mem;
|
||||
auto ret = rtMalloc(reinterpret_cast<void **>(&device_mem_base_), device_mem_size_, RT_MEMORY_HBM);
|
||||
|
||||
if (ret != RT_ERROR_NONE) {
|
||||
MS_EXCEPTION(DeviceProcessError) << "rtMalloc mem size[" << device_mem_size_ << "] fail, ret[" << ret << "]";
|
||||
}
|
||||
|
|
|
@ -542,7 +542,6 @@ void AscendStreamAssign::InsertStreamActiveForCommon(const NotNull<KernelGraphPt
|
|||
|
||||
if (AnfAlgo::GetCNodeName(cur_cnode_ptr) == kStreamSwitchOpName) {
|
||||
MS_LOG(INFO) << "Insert StreamActive op after FP StreamSwitch for stream parallel";
|
||||
// UpdateStreamSwitch(graph_ptr, cur_cnode_ptr, &update_cnode_list);
|
||||
update_cnode_list.emplace_back(cur_cnode_ptr);
|
||||
} else {
|
||||
update_cnode_list.emplace_back(cur_cnode_ptr);
|
||||
|
|
|
@ -57,9 +57,6 @@ constexpr const char *kOpTypeOpDebug = "Opdebug";
|
|||
namespace mindspore {
|
||||
namespace device {
|
||||
namespace ascend {
|
||||
static void DumpKernelOutput(const CNodePtr &kernel, void *args, NotNull<aicpu::dump::Task *> task);
|
||||
static void DumpKernelInput(const CNodePtr &kernel, void *args, NotNull<aicpu::dump::Task *> task);
|
||||
static void RtLoadDumpData(const aicpu::dump::OpMappingInfo &dump_info, void **ptr);
|
||||
|
||||
DataDumper::~DataDumper() {
|
||||
ReleaseDevMem(&dev_load_mem_);
|
||||
|
@ -328,7 +325,7 @@ void DataDumper::OpDebugUnregister() {
|
|||
}
|
||||
}
|
||||
|
||||
void RtLoadDumpData(const aicpu::dump::OpMappingInfo &dump_info, void **ptr) {
|
||||
void DataDumper::RtLoadDumpData(const aicpu::dump::OpMappingInfo &dump_info, void **ptr) {
|
||||
std::string proto_str;
|
||||
size_t proto_size = dump_info.ByteSizeLong();
|
||||
bool ret = dump_info.SerializeToString(&proto_str);
|
||||
|
@ -357,7 +354,7 @@ void RtLoadDumpData(const aicpu::dump::OpMappingInfo &dump_info, void **ptr) {
|
|||
}
|
||||
}
|
||||
|
||||
void DumpKernelOutput(const CNodePtr &kernel, void *args, NotNull<aicpu::dump::Task *> task) {
|
||||
void DataDumper::DumpKernelOutput(const CNodePtr &kernel, void *args, NotNull<aicpu::dump::Task *> task) {
|
||||
if (!DumpJsonParser::GetInstance().OutputNeedDump()) {
|
||||
MS_LOG(INFO) << "Skip dump output";
|
||||
return;
|
||||
|
@ -391,7 +388,7 @@ void DumpKernelOutput(const CNodePtr &kernel, void *args, NotNull<aicpu::dump::T
|
|||
}
|
||||
}
|
||||
|
||||
void DumpKernelInput(const CNodePtr &kernel, void *args, NotNull<aicpu::dump::Task *> task) {
|
||||
void DataDumper::DumpKernelInput(const CNodePtr &kernel, void *args, NotNull<aicpu::dump::Task *> task) {
|
||||
if (!DumpJsonParser::GetInstance().InputNeedDump()) {
|
||||
MS_LOG(INFO) << "Skip dump input";
|
||||
return;
|
||||
|
|
|
@ -65,6 +65,9 @@ class DataDumper {
|
|||
void SetOpDebugMappingInfo(const NotNull<aicpu::dump::OpMappingInfo *> dump_info) const;
|
||||
void ConstructDumpTask(NotNull<const CNodePtr &> kernel, NotNull<aicpu::dump::Task *> dump_task) const;
|
||||
void GetNeedDumpKernelList(NotNull<std::map<std::string, CNodePtr> *> kernel_map) const;
|
||||
static void DumpKernelOutput(const CNodePtr &kernel, void *args, NotNull<aicpu::dump::Task *> task);
|
||||
static void DumpKernelInput(const CNodePtr &kernel, void *args, NotNull<aicpu::dump::Task *> task);
|
||||
static void RtLoadDumpData(const aicpu::dump::OpMappingInfo &dump_info, void **ptr);
|
||||
|
||||
std::function<void *()> model_handle_;
|
||||
uint32_t debug_task_id_;
|
||||
|
|
|
@ -541,7 +541,6 @@ KernelSelectStatus SelectKernelInfo(const CNodePtr &kernel_node, KernelType kern
|
|||
kernel::KernelQuery(kernel_node, &kernel_info_list, kernel_type);
|
||||
auto select_status = SetMatchedKernelInfo(kernel_node, kernel_info_list);
|
||||
// If aicore not find valid kernel info reloading aicpu kernel info list to find it
|
||||
|
||||
if (select_status == kNoMatched) {
|
||||
MS_LOG(WARNING) << "The node [" << kernel_node->DebugString()
|
||||
<< "] cannot find valid TBE kernel info, try to get aicpu kernel info";
|
||||
|
|
|
@ -33,7 +33,7 @@ inline void *GetMPIAdapterHandle() {
|
|||
return handle;
|
||||
}
|
||||
|
||||
inline void *GetMPIAdapterFunc(const char *name) {
|
||||
void *GetMPIAdapterFunc(const char *name) {
|
||||
static void *handle = GetMPIAdapterHandle();
|
||||
if (handle == nullptr) {
|
||||
MS_LOG(EXCEPTION) << "Load lib " << name << " failed, make sure you have installed it!";
|
||||
|
|
|
@ -28,7 +28,6 @@ DvppProcess::DvppProcess() {}
|
|||
DvppProcess::~DvppProcess() {}
|
||||
|
||||
static uint32_t ToEven(uint32_t num) { return (num + 1) / 2 * 2; }
|
||||
|
||||
static uint32_t ToOdd(uint32_t num) {
|
||||
if (num == 0) {
|
||||
return 1;
|
||||
|
|
|
@ -13,13 +13,13 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
import pytest
|
||||
import numpy as np
|
||||
import mindspore as ms
|
||||
from mindspore.nn import ReLU
|
||||
from mindspore.nn import Cell
|
||||
from mindspore.common.tensor import Tensor
|
||||
from mindspore.ops import operations as P
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
|
|
Loading…
Reference in New Issue