!18034 code security check
Merge pull request !18034 from zhangbuxue/code_security_check
This commit is contained in:
commit
ca5c8775b7
|
@ -41,11 +41,19 @@ void CastCPUKernel<S, T>::InitKernel(const CNodePtr &kernel_node) {
|
||||||
template <typename S, typename T>
|
template <typename S, typename T>
|
||||||
bool CastCPUKernel<S, T>::Launch(const std::vector<kernel::AddressPtr> &inputs, const std::vector<kernel::AddressPtr> &,
|
bool CastCPUKernel<S, T>::Launch(const std::vector<kernel::AddressPtr> &inputs, const std::vector<kernel::AddressPtr> &,
|
||||||
const std::vector<kernel::AddressPtr> &outputs) {
|
const std::vector<kernel::AddressPtr> &outputs) {
|
||||||
S *input = reinterpret_cast<S *>(inputs[0]->addr);
|
if (inputs.size() != 1 || outputs.size() != 1) {
|
||||||
T *output = reinterpret_cast<T *>(outputs[0]->addr);
|
MS_LOG(ERROR) << "Cast requires 1 input and 1 output, but got " << inputs.size() << " input and " << outputs.size()
|
||||||
|
<< " output.";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (outputs[0]->size == 0) {
|
||||||
|
MS_LOG(WARNING) << "Cast output memory size should be greater than 0, but got 0.";
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
const auto input = reinterpret_cast<S *>(inputs[0]->addr);
|
||||||
|
const auto output = reinterpret_cast<T *>(outputs[0]->addr);
|
||||||
MS_LOG(DEBUG) << "Type source: " << typeid(S).name() << "; target: " << typeid(T).name();
|
MS_LOG(DEBUG) << "Type source: " << typeid(S).name() << "; target: " << typeid(T).name();
|
||||||
size_t lens = outputs[0]->size > 0 ? static_cast<size_t>(outputs[0]->size / sizeof(T)) : 1;
|
Cast<S, T>(input, output, outputs[0]->size / sizeof(T));
|
||||||
Cast<S, T>(input, output, lens);
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
} // namespace kernel
|
} // namespace kernel
|
||||||
|
|
|
@ -226,14 +226,22 @@ bool EltWiseGradCPUKernel<T>::Launch(const std::vector<kernel::AddressPtr> &inpu
|
||||||
{"GeLUGrad", &EltWiseGradCPUKernel<T>::GeluGrad}, {"AsinGrad", &EltWiseGradCPUKernel<T>::AsinGrad},
|
{"GeLUGrad", &EltWiseGradCPUKernel<T>::GeluGrad}, {"AsinGrad", &EltWiseGradCPUKernel<T>::AsinGrad},
|
||||||
{"ACosGrad", &EltWiseGradCPUKernel<T>::ACosGrad}, {"AtanGrad", &EltWiseGradCPUKernel<T>::AtanGrad},
|
{"ACosGrad", &EltWiseGradCPUKernel<T>::ACosGrad}, {"AtanGrad", &EltWiseGradCPUKernel<T>::AtanGrad},
|
||||||
{"AsinhGrad", &EltWiseGradCPUKernel<T>::AsinhGrad}, {"AcoshGrad", &EltWiseGradCPUKernel<T>::AcoshGrad}};
|
{"AsinhGrad", &EltWiseGradCPUKernel<T>::AsinhGrad}, {"AcoshGrad", &EltWiseGradCPUKernel<T>::AcoshGrad}};
|
||||||
const auto *input1 = reinterpret_cast<T *>(inputs[0]->addr);
|
if (inputs.size() < 2 || outputs.size() != 1) {
|
||||||
const auto *input2 = reinterpret_cast<T *>(inputs[1]->addr);
|
MS_LOG(ERROR) << kernel_name_ << " requires at least 2 inputs and 1 output, but got " << inputs.size()
|
||||||
auto *output = reinterpret_cast<T *>(outputs[0]->addr);
|
<< " inputs and " << outputs.size() << " output.";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (outputs[0]->size == 0) {
|
||||||
|
MS_LOG(WARNING) << kernel_name_ << " output memory size should be greater than 0, but got 0.";
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
const auto input0 = reinterpret_cast<T *>(inputs[0]->addr);
|
||||||
|
const auto input1 = reinterpret_cast<T *>(inputs[1]->addr);
|
||||||
|
auto output = reinterpret_cast<T *>(outputs[0]->addr);
|
||||||
|
|
||||||
size_t count = outputs[0]->size > 0 ? static_cast<size_t>(outputs[0]->size / sizeof(T)) : 1;
|
|
||||||
CPUKernelUtils::ParallelFor(
|
CPUKernelUtils::ParallelFor(
|
||||||
std::bind(elt_map.at(kernel_name_), this, input1, input2, output, std::placeholders::_1, std::placeholders::_2),
|
std::bind(elt_map.at(kernel_name_), this, input0, input1, output, std::placeholders::_1, std::placeholders::_2),
|
||||||
count);
|
outputs[0]->size / sizeof(T));
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
} // namespace kernel
|
} // namespace kernel
|
||||||
|
|
|
@ -57,20 +57,6 @@ void SliceCPUKernel::InitKernel(const CNodePtr &kernel_node) {
|
||||||
data_size_ = size_pair->second;
|
data_size_ = size_pair->second;
|
||||||
}
|
}
|
||||||
|
|
||||||
void SliceCPUKernel::ParallelRun(void *input_addr, void *output_addr, int thread_num) {
|
|
||||||
std::vector<common::Task> tasks;
|
|
||||||
int thread_index = 0;
|
|
||||||
while (thread_index < thread_num) {
|
|
||||||
auto block = [&, thread_index]() {
|
|
||||||
DoSlice(input_addr, output_addr, &slice_param_, thread_index, data_size_);
|
|
||||||
return common::SUCCESS;
|
|
||||||
};
|
|
||||||
tasks.emplace_back(block);
|
|
||||||
thread_index++;
|
|
||||||
}
|
|
||||||
common::ThreadPool::GetInstance().SyncRun(tasks);
|
|
||||||
}
|
|
||||||
|
|
||||||
void SliceCPUKernel::InitSliceParam(const std::vector<size_t> &input_shape, const std::vector<int64_t> &begin,
|
void SliceCPUKernel::InitSliceParam(const std::vector<size_t> &input_shape, const std::vector<int64_t> &begin,
|
||||||
const std::vector<int64_t> &size) {
|
const std::vector<int64_t> &size) {
|
||||||
for (size_t i = 0; i < DIMENSION_8D; i++) {
|
for (size_t i = 0; i < DIMENSION_8D; i++) {
|
||||||
|
@ -98,7 +84,13 @@ void SliceCPUKernel::InitSliceParam(const std::vector<size_t> &input_shape, cons
|
||||||
|
|
||||||
bool SliceCPUKernel::Launch(const std::vector<kernel::AddressPtr> &inputs, const std::vector<kernel::AddressPtr> &,
|
bool SliceCPUKernel::Launch(const std::vector<kernel::AddressPtr> &inputs, const std::vector<kernel::AddressPtr> &,
|
||||||
const std::vector<kernel::AddressPtr> &outputs) {
|
const std::vector<kernel::AddressPtr> &outputs) {
|
||||||
|
if (inputs.size() != 1 || outputs.size() != 1) {
|
||||||
|
MS_LOG(ERROR) << "Slice requires 1 input and 1 output, but got " << inputs.size() << " input and " << outputs.size()
|
||||||
|
<< " output.";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
if (outputs[0]->size == 0) {
|
if (outputs[0]->size == 0) {
|
||||||
|
MS_LOG(WARNING) << "Slice output memory size should be greater than 0, but got 0.";
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
auto input_addr = inputs[0]->addr;
|
auto input_addr = inputs[0]->addr;
|
||||||
|
|
|
@ -39,9 +39,7 @@ class SliceCPUKernel : public CPUKernel {
|
||||||
private:
|
private:
|
||||||
void InitSliceParam(const std::vector<size_t> &input_shape, const std::vector<int64_t> &begin,
|
void InitSliceParam(const std::vector<size_t> &input_shape, const std::vector<int64_t> &begin,
|
||||||
const std::vector<int64_t> &size);
|
const std::vector<int64_t> &size);
|
||||||
void ParallelRun(void *input_addr, void *output_addr, int thread_num);
|
|
||||||
|
|
||||||
bool parallel_{true};
|
|
||||||
int data_size_{4};
|
int data_size_{4};
|
||||||
SliceParameter slice_param_;
|
SliceParameter slice_param_;
|
||||||
};
|
};
|
||||||
|
|
|
@ -47,11 +47,23 @@ template <typename I, typename T>
|
||||||
bool SparseTensorDenseMatmulCPUKernel<I, T>::Launch(const std::vector<kernel::AddressPtr> &inputs,
|
bool SparseTensorDenseMatmulCPUKernel<I, T>::Launch(const std::vector<kernel::AddressPtr> &inputs,
|
||||||
const std::vector<kernel::AddressPtr> & /*workspace*/,
|
const std::vector<kernel::AddressPtr> & /*workspace*/,
|
||||||
const std::vector<kernel::AddressPtr> &outputs) {
|
const std::vector<kernel::AddressPtr> &outputs) {
|
||||||
|
if (inputs.size() != 4 || outputs.size() != 1) {
|
||||||
|
MS_LOG(ERROR) << "SparseTensorDenseMatmul requires 4 inputs and 1 output, but got " << inputs.size()
|
||||||
|
<< " inputs and " << outputs.size() << " output.";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (outputs[0]->size == 0) {
|
||||||
|
MS_LOG(WARNING) << "SparseTensorDenseMatmul output memory size should be greater than 0, but got 0.";
|
||||||
|
return true;
|
||||||
|
}
|
||||||
auto a_indices = reinterpret_cast<I *>(inputs[0]->addr);
|
auto a_indices = reinterpret_cast<I *>(inputs[0]->addr);
|
||||||
auto a_values = reinterpret_cast<T *>(inputs[1]->addr);
|
auto a_values = reinterpret_cast<T *>(inputs[1]->addr);
|
||||||
auto b = reinterpret_cast<T *>(inputs[3]->addr);
|
auto b = reinterpret_cast<T *>(inputs[3]->addr);
|
||||||
auto out = reinterpret_cast<T *>(outputs[0]->addr);
|
auto out = reinterpret_cast<T *>(outputs[0]->addr);
|
||||||
const size_t output_length = outputs[0]->size / sizeof(T);
|
const size_t output_length = outputs[0]->size / sizeof(T);
|
||||||
|
const size_t indices_length = inputs[0]->size / sizeof(I);
|
||||||
|
const size_t values_length = inputs[1]->size / sizeof(T);
|
||||||
|
const size_t b_length = inputs[3]->size / sizeof(T);
|
||||||
if (memset_s(out, output_length, 0, output_length) != EOK) {
|
if (memset_s(out, output_length, 0, output_length) != EOK) {
|
||||||
MS_LOG(EXCEPTION) << "Memset Failed!";
|
MS_LOG(EXCEPTION) << "Memset Failed!";
|
||||||
}
|
}
|
||||||
|
@ -62,6 +74,12 @@ bool SparseTensorDenseMatmulCPUKernel<I, T>::Launch(const std::vector<kernel::Ad
|
||||||
const size_t b_dim_1 = b_shape_[1];
|
const size_t b_dim_1 = b_shape_[1];
|
||||||
const size_t same_dim = adj_dt_ ? b_dim_1 : b_dim_0;
|
const size_t same_dim = adj_dt_ ? b_dim_1 : b_dim_0;
|
||||||
for (size_t i = 0; i < values_size_; ++i) {
|
for (size_t i = 0; i < values_size_; ++i) {
|
||||||
|
if (i * 2 + 1 >= indices_length) {
|
||||||
|
MS_LOG(EXCEPTION) << "The index of a_indices out of bounds.";
|
||||||
|
}
|
||||||
|
if (i >= values_length) {
|
||||||
|
MS_LOG(EXCEPTION) << "The index of a_values out of bounds.";
|
||||||
|
}
|
||||||
const int row = adj_st_ ? a_indices[i * 2 + 1] : a_indices[i * 2];
|
const int row = adj_st_ ? a_indices[i * 2 + 1] : a_indices[i * 2];
|
||||||
const int col = adj_st_ ? a_indices[i * 2] : a_indices[i * 2 + 1];
|
const int col = adj_st_ ? a_indices[i * 2] : a_indices[i * 2 + 1];
|
||||||
if (row >= SizeToInt(out_dim_0) || row < 0 || col >= SizeToInt(same_dim) || col < 0) {
|
if (row >= SizeToInt(out_dim_0) || row < 0 || col >= SizeToInt(same_dim) || col < 0) {
|
||||||
|
@ -71,9 +89,15 @@ bool SparseTensorDenseMatmulCPUKernel<I, T>::Launch(const std::vector<kernel::Ad
|
||||||
|
|
||||||
for (size_t n = 0; n < out_dim_1; ++n) {
|
for (size_t n = 0; n < out_dim_1; ++n) {
|
||||||
if (adj_dt_) {
|
if (adj_dt_) {
|
||||||
|
if (n * b_dim_1 + col >= b_length) {
|
||||||
|
MS_LOG(EXCEPTION) << "The index of b out of bounds.";
|
||||||
|
}
|
||||||
const T b_value = b[n * b_dim_1 + col];
|
const T b_value = b[n * b_dim_1 + col];
|
||||||
out[row * out_dim_1 + n] += a_values[i] * b_value;
|
out[row * out_dim_1 + n] += a_values[i] * b_value;
|
||||||
} else {
|
} else {
|
||||||
|
if (col * b_dim_1 + n >= b_length) {
|
||||||
|
MS_LOG(EXCEPTION) << "The index of b out of bounds.";
|
||||||
|
}
|
||||||
const T b_value = b[col * b_dim_1 + n];
|
const T b_value = b[col * b_dim_1 + n];
|
||||||
out[row * out_dim_1 + n] += a_values[i] * b_value;
|
out[row * out_dim_1 + n] += a_values[i] * b_value;
|
||||||
}
|
}
|
||||||
|
|
|
@ -43,18 +43,35 @@ template <typename I, typename T>
|
||||||
bool SparseToDenseCPUKernel<I, T>::Launch(const std::vector<kernel::AddressPtr> &inputs,
|
bool SparseToDenseCPUKernel<I, T>::Launch(const std::vector<kernel::AddressPtr> &inputs,
|
||||||
const std::vector<kernel::AddressPtr> & /*workspace*/,
|
const std::vector<kernel::AddressPtr> & /*workspace*/,
|
||||||
const std::vector<kernel::AddressPtr> &outputs) {
|
const std::vector<kernel::AddressPtr> &outputs) {
|
||||||
|
if (inputs.size() != 3 || outputs.size() != 1) {
|
||||||
|
MS_LOG(ERROR) << "SparseToDense requires 3 inputs and 1 output, but got " << inputs.size() << " inputs and "
|
||||||
|
<< outputs.size() << " output.";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (outputs[0]->size == 0) {
|
||||||
|
MS_LOG(WARNING) << "SparseToDense output memory size should be greater than 0, but got 0.";
|
||||||
|
return true;
|
||||||
|
}
|
||||||
auto indices_addr = reinterpret_cast<I *>(inputs[0]->addr);
|
auto indices_addr = reinterpret_cast<I *>(inputs[0]->addr);
|
||||||
auto values_addr = reinterpret_cast<T *>(inputs[1]->addr);
|
auto values_addr = reinterpret_cast<T *>(inputs[1]->addr);
|
||||||
auto output_addr = reinterpret_cast<T *>(outputs[0]->addr);
|
auto output_addr = reinterpret_cast<T *>(outputs[0]->addr);
|
||||||
const size_t output_length = outputs[0]->size / sizeof(T);
|
const size_t output_length = outputs[0]->size / sizeof(T);
|
||||||
|
const size_t indices_length = inputs[0]->size / sizeof(I);
|
||||||
|
const size_t values_length = inputs[1]->size / sizeof(T);
|
||||||
if (memset_s(output_addr, output_length, 0, output_length) != EOK) {
|
if (memset_s(output_addr, output_length, 0, output_length) != EOK) {
|
||||||
MS_LOG(EXCEPTION) << "Memset Failed!";
|
MS_LOG(EXCEPTION) << "Memset Failed!";
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t rank = output_shape_.size();
|
size_t rank = output_shape_.size();
|
||||||
for (size_t i = 0; i < values_size_; ++i) {
|
for (size_t i = 0; i < values_size_; ++i) {
|
||||||
|
if (i >= values_length) {
|
||||||
|
MS_LOG(EXCEPTION) << "The index of values out of bounds.";
|
||||||
|
}
|
||||||
size_t out_index = 0;
|
size_t out_index = 0;
|
||||||
for (size_t j = 0; j < rank; j++) {
|
for (size_t j = 0; j < rank; j++) {
|
||||||
|
if (i * rank + j >= indices_length) {
|
||||||
|
MS_LOG(EXCEPTION) << "The index of indices out of bounds.";
|
||||||
|
}
|
||||||
int index = indices_addr[i * rank + j];
|
int index = indices_addr[i * rank + j];
|
||||||
if (index >= SizeToInt(output_shape_[j]) || index < 0) {
|
if (index >= SizeToInt(output_shape_[j]) || index < 0) {
|
||||||
MS_EXCEPTION(ValueError) << "The " << i << "th value in " << j << "th dimension index: " << index
|
MS_EXCEPTION(ValueError) << "The " << i << "th value in " << j << "th dimension index: " << index
|
||||||
|
|
|
@ -209,7 +209,13 @@ void StridedSliceCPUKernel::ParallelRun(uint8_t *input_addr, uint8_t *output_add
|
||||||
bool StridedSliceCPUKernel::Launch(const std::vector<kernel::AddressPtr> &inputs,
|
bool StridedSliceCPUKernel::Launch(const std::vector<kernel::AddressPtr> &inputs,
|
||||||
const std::vector<kernel::AddressPtr> & /*workspace*/,
|
const std::vector<kernel::AddressPtr> & /*workspace*/,
|
||||||
const std::vector<kernel::AddressPtr> &outputs) {
|
const std::vector<kernel::AddressPtr> &outputs) {
|
||||||
|
if (inputs.size() != 1 || outputs.size() != 1) {
|
||||||
|
MS_LOG(ERROR) << "StridedSlice requires 1 input and 1 output, but got " << inputs.size() << " input and "
|
||||||
|
<< outputs.size() << " output.";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
if (outputs[0]->size == 0) {
|
if (outputs[0]->size == 0) {
|
||||||
|
MS_LOG(WARNING) << "StridedSlice output memory size should be greater than 0, but got 0.";
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
auto input_addr = reinterpret_cast<uint8_t *>(inputs[0]->addr);
|
auto input_addr = reinterpret_cast<uint8_t *>(inputs[0]->addr);
|
||||||
|
|
|
@ -49,12 +49,10 @@ class HSwishGradKernel : public GpuKernel {
|
||||||
size_t input_num = AnfAlgo::GetInputTensorNum(kernel_node);
|
size_t input_num = AnfAlgo::GetInputTensorNum(kernel_node);
|
||||||
if (input_num != 2) {
|
if (input_num != 2) {
|
||||||
MS_LOG(EXCEPTION) << "Input number is " << input_num << ", but HSwishGrad needs 2 inputs.";
|
MS_LOG(EXCEPTION) << "Input number is " << input_num << ", but HSwishGrad needs 2 inputs.";
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
size_t output_num = AnfAlgo::GetOutputTensorNum(kernel_node);
|
size_t output_num = AnfAlgo::GetOutputTensorNum(kernel_node);
|
||||||
if (output_num != 1) {
|
if (output_num != 1) {
|
||||||
MS_LOG(EXCEPTION) << "Output number is " << output_num << ", but HSwishGrad has 1 output.";
|
MS_LOG(EXCEPTION) << "Output number is " << output_num << ", but HSwishGrad has 1 output.";
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
auto input_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0);
|
auto input_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0);
|
||||||
input_size_ = 1;
|
input_size_ = 1;
|
||||||
|
|
|
@ -60,7 +60,7 @@ void TbeUtils::GenSocInfo(nlohmann::json *soc_info_json) {
|
||||||
void TbeUtils::SaveJsonInfo(const std::string &json_name, const std::string &info) {
|
void TbeUtils::SaveJsonInfo(const std::string &json_name, const std::string &info) {
|
||||||
char real_path[PATH_MAX] = {0};
|
char real_path[PATH_MAX] = {0};
|
||||||
std::string path = kCceKernelMeta + json_name + kInfoSuffix;
|
std::string path = kCceKernelMeta + json_name + kInfoSuffix;
|
||||||
if (path.size() > PATH_MAX) {
|
if (path.size() >= PATH_MAX) {
|
||||||
MS_LOG(ERROR) << "File path: " << path << "is too long.";
|
MS_LOG(ERROR) << "File path: " << path << "is too long.";
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -213,7 +213,7 @@ void SomasSolverPre::TensorRelationLog(const std::vector<DynamicBitSet> *pConstr
|
||||||
MS_EXCEPTION_IF_NULL(context_ptr);
|
MS_EXCEPTION_IF_NULL(context_ptr);
|
||||||
auto save_graphs_path = context_ptr->get_param<std::string>(MS_CTX_SAVE_GRAPHS_PATH);
|
auto save_graphs_path = context_ptr->get_param<std::string>(MS_CTX_SAVE_GRAPHS_PATH);
|
||||||
std::string filename = save_graphs_path + "/" + "somas_tensor_relation_" + std::to_string(graph->graph_id()) + ".ir";
|
std::string filename = save_graphs_path + "/" + "somas_tensor_relation_" + std::to_string(graph->graph_id()) + ".ir";
|
||||||
if (filename.size() > PATH_MAX) {
|
if (filename.size() >= PATH_MAX) {
|
||||||
MS_LOG(ERROR) << "File path " << filename << " is too long.";
|
MS_LOG(ERROR) << "File path " << filename << " is too long.";
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,7 +28,7 @@
|
||||||
|
|
||||||
namespace mindspore {
|
namespace mindspore {
|
||||||
std::optional<std::string> Common::GetRealPath(const std::string &input_path) {
|
std::optional<std::string> Common::GetRealPath(const std::string &input_path) {
|
||||||
if (input_path.length() > PATH_MAX) {
|
if (input_path.length() >= PATH_MAX) {
|
||||||
MS_LOG(EXCEPTION) << "The length of path: " << input_path << " exceeds limit: " << PATH_MAX;
|
MS_LOG(EXCEPTION) << "The length of path: " << input_path << " exceeds limit: " << PATH_MAX;
|
||||||
}
|
}
|
||||||
#if defined(SYSTEM_ENV_POSIX)
|
#if defined(SYSTEM_ENV_POSIX)
|
||||||
|
@ -39,7 +39,6 @@ std::optional<std::string> Common::GetRealPath(const std::string &input_path) {
|
||||||
MS_LOG(EXCEPTION) << "Unsupported platform.";
|
MS_LOG(EXCEPTION) << "Unsupported platform.";
|
||||||
#endif
|
#endif
|
||||||
// get real path
|
// get real path
|
||||||
std::string out_path;
|
|
||||||
char real_path[PATH_MAX] = {0};
|
char real_path[PATH_MAX] = {0};
|
||||||
// input_path is dir + file_name
|
// input_path is dir + file_name
|
||||||
if (path_split_pos != std::string::npos) {
|
if (path_split_pos != std::string::npos) {
|
||||||
|
@ -63,36 +62,30 @@ std::optional<std::string> Common::GetRealPath(const std::string &input_path) {
|
||||||
return std::nullopt;
|
return std::nullopt;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
out_path = std::string(real_path) + file_name;
|
return std::string(real_path) + file_name;
|
||||||
} else {
|
}
|
||||||
// input_path is only file_name
|
// input_path is only file_name
|
||||||
#if defined(SYSTEM_ENV_POSIX)
|
#if defined(SYSTEM_ENV_POSIX)
|
||||||
if (input_path.length() > NAME_MAX) {
|
if (input_path.length() > NAME_MAX) {
|
||||||
MS_LOG(EXCEPTION) << "The length of file name : " << input_path.length() << " exceeds limit: " << NAME_MAX;
|
MS_LOG(EXCEPTION) << "The length of file name : " << input_path.length() << " exceeds limit: " << NAME_MAX;
|
||||||
}
|
}
|
||||||
if (realpath(common::SafeCStr(input_path), real_path) == nullptr) {
|
if (realpath(common::SafeCStr(input_path), real_path) == nullptr) {
|
||||||
MS_LOG(INFO) << "The file " << input_path << " does not exist, it will be created.";
|
MS_LOG(INFO) << "The file " << input_path << " does not exist, it will be created.";
|
||||||
}
|
}
|
||||||
#elif defined(SYSTEM_ENV_WINDOWS)
|
#elif defined(SYSTEM_ENV_WINDOWS)
|
||||||
if (_fullpath(real_path, common::SafeCStr(input_path), PATH_MAX) == nullptr) {
|
if (_fullpath(real_path, common::SafeCStr(input_path), PATH_MAX) == nullptr) {
|
||||||
MS_LOG(INFO) << "The file " << input_path << " does not exist, it will be created.";
|
MS_LOG(INFO) << "The file " << input_path << " does not exist, it will be created.";
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
out_path = std::string(real_path);
|
return std::string(real_path);
|
||||||
}
|
|
||||||
|
|
||||||
if (out_path.length() > PATH_MAX) {
|
|
||||||
MS_LOG(EXCEPTION) << "The file real path: " << out_path << " exceeds limit: " << PATH_MAX;
|
|
||||||
}
|
|
||||||
return out_path;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Common::CreateNotExistDirs(const std::string &path) {
|
bool Common::CreateNotExistDirs(const std::string &path) {
|
||||||
std::shared_ptr<system::FileSystem> fs = system::Env::GetFileSystem();
|
std::shared_ptr<system::FileSystem> fs = system::Env::GetFileSystem();
|
||||||
MS_EXCEPTION_IF_NULL(fs);
|
MS_EXCEPTION_IF_NULL(fs);
|
||||||
char temp_path[PATH_MAX] = {0};
|
char temp_path[PATH_MAX] = {0};
|
||||||
if (path.length() > PATH_MAX) {
|
if (path.length() >= PATH_MAX) {
|
||||||
MS_LOG(ERROR) << "Path lens is max than " << PATH_MAX;
|
MS_LOG(ERROR) << "Path length is equal to or max than " << PATH_MAX;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
for (uint32_t i = 0; i < path.length(); i++) {
|
for (uint32_t i = 0; i < path.length(); i++) {
|
||||||
|
@ -294,7 +287,7 @@ std::string Common::AddId(const std::string &filename, const std::string &suffix
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Common::SaveStringToFile(const std::string filename, const std::string string_info) {
|
bool Common::SaveStringToFile(const std::string filename, const std::string string_info) {
|
||||||
if (filename.size() > PATH_MAX) {
|
if (filename.size() >= PATH_MAX) {
|
||||||
MS_LOG(ERROR) << "File path " << filename << " is too long.";
|
MS_LOG(ERROR) << "File path " << filename << " is too long.";
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,17 +19,17 @@
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
#include <utility>
|
#include <utility>
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
#include <cmath>
|
|
||||||
|
|
||||||
#include "mindspore/core/utils/log_adapter.h"
|
|
||||||
#include "mindspore/core/ir/dtype.h"
|
#include "mindspore/core/ir/dtype.h"
|
||||||
|
#include "mindspore/core/utils/log_adapter.h"
|
||||||
|
#include "mindspore/core/utils/convert_utils_base.h"
|
||||||
|
|
||||||
namespace mindspore {
|
namespace mindspore {
|
||||||
namespace {
|
namespace {
|
||||||
// npy file header start information
|
// npy file header start information
|
||||||
const char kMagicPrefix[] = "\x93NUMPY";
|
const char kMagicPrefix[] = "\x93NUMPY";
|
||||||
// magical length include kMagicPrefix length and version length
|
// magical length include kMagicPrefix length and version length
|
||||||
const size_t kMagicLen = 8;
|
const size_t kMagicLen = 6;
|
||||||
const size_t kArrayAlign = 64;
|
const size_t kArrayAlign = 64;
|
||||||
|
|
||||||
// first: header_length_type, second: encoding_type
|
// first: header_length_type, second: encoding_type
|
||||||
|
@ -90,23 +90,27 @@ std::string NpyHeader::shape_to_str() const {
|
||||||
buffer << ")";
|
buffer << ")";
|
||||||
return buffer.str();
|
return buffer.str();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// dtype description corresponding to tensor type
|
||||||
|
const std::unordered_map<TypeId, DtypeDescr> type_desc_map = {
|
||||||
|
{kNumberTypeBool, DtypeDescr{'|', 'b', 1}}, {kNumberTypeInt8, DtypeDescr{'|', 'i', 1}},
|
||||||
|
{kNumberTypeInt16, DtypeDescr{'<', 'i', 2}}, {kNumberTypeInt32, DtypeDescr{'<', 'i', 4}},
|
||||||
|
{kNumberTypeInt64, DtypeDescr{'<', 'i', 8}}, {kNumberTypeUInt8, DtypeDescr{'|', 'u', 1}},
|
||||||
|
{kNumberTypeUInt16, DtypeDescr{'<', 'u', 2}}, {kNumberTypeUInt32, DtypeDescr{'<', 'u', 4}},
|
||||||
|
{kNumberTypeUInt64, DtypeDescr{'<', 'u', 8}}, {kNumberTypeFloat16, DtypeDescr{'<', 'f', 2}},
|
||||||
|
{kNumberTypeFloat32, DtypeDescr{'<', 'f', 4}}, {kNumberTypeFloat64, DtypeDescr{'<', 'f', 8}},
|
||||||
|
};
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
void int_to_byte(size_t number, char *byte, size_t length) {
|
void int_to_byte(size_t number, char *byte, size_t length) {
|
||||||
|
const size_t byte_len = 8;
|
||||||
|
const size_t mask = 0xff;
|
||||||
for (size_t i = 0; i < length; i++) {
|
for (size_t i = 0; i < length; i++) {
|
||||||
byte[i] = (number >> (i * 8)) & 0xff;
|
byte[i] = (number >> (i * byte_len)) & mask;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string GenerateNpyHeader(const ShapeVector &shape, TypeId type_id, bool fortran_order) {
|
std::string GenerateNpyHeader(const ShapeVector &shape, TypeId type_id, bool fortran_order) {
|
||||||
static std::unordered_map<TypeId, DtypeDescr> type_desc_map = {
|
|
||||||
{kNumberTypeBool, DtypeDescr{'|', 'b', 1}}, {kNumberTypeInt8, DtypeDescr{'|', 'i', 1}},
|
|
||||||
{kNumberTypeInt16, DtypeDescr{'<', 'i', 2}}, {kNumberTypeInt32, DtypeDescr{'<', 'i', 4}},
|
|
||||||
{kNumberTypeInt64, DtypeDescr{'<', 'i', 8}}, {kNumberTypeUInt8, DtypeDescr{'|', 'u', 1}},
|
|
||||||
{kNumberTypeUInt16, DtypeDescr{'<', 'u', 2}}, {kNumberTypeUInt32, DtypeDescr{'<', 'u', 4}},
|
|
||||||
{kNumberTypeUInt64, DtypeDescr{'<', 'u', 8}}, {kNumberTypeFloat16, DtypeDescr{'<', 'f', 2}},
|
|
||||||
{kNumberTypeFloat32, DtypeDescr{'<', 'f', 4}}, {kNumberTypeFloat64, DtypeDescr{'<', 'f', 8}},
|
|
||||||
};
|
|
||||||
auto type_desc = type_desc_map.find(type_id);
|
auto type_desc = type_desc_map.find(type_id);
|
||||||
if (type_desc == type_desc_map.end()) {
|
if (type_desc == type_desc_map.end()) {
|
||||||
MS_LOG(WARNING) << "Not support dump the " << TypeIdToType(type_id)->ToString() << " data to npy file.";
|
MS_LOG(WARNING) << "Not support dump the " << TypeIdToType(type_id)->ToString() << " data to npy file.";
|
||||||
|
@ -115,32 +119,32 @@ std::string GenerateNpyHeader(const ShapeVector &shape, TypeId type_id, bool for
|
||||||
|
|
||||||
NpyHeader npy_header{type_desc->second, fortran_order, shape};
|
NpyHeader npy_header{type_desc->second, fortran_order, shape};
|
||||||
std::string header_str = npy_header.str();
|
std::string header_str = npy_header.str();
|
||||||
size_t header_len = header_str.length();
|
|
||||||
version_type version{1, 0};
|
version_type version{1, 0};
|
||||||
size_t total_len = kMagicLen + 2 + header_len + 1;
|
const size_t header_len = header_str.length();
|
||||||
if (total_len > std::pow(2, 16)) {
|
const size_t version_len = 2;
|
||||||
|
const size_t max_len = 65535;
|
||||||
|
size_t length_len = 2;
|
||||||
|
size_t total_len = kMagicLen + version_len + length_len + header_len + 1;
|
||||||
|
if (total_len > max_len) {
|
||||||
version = {2, 0};
|
version = {2, 0};
|
||||||
total_len = kMagicLen + 4 + header_len + 1;
|
length_len = 4;
|
||||||
|
total_len = kMagicLen + version_len + length_len + header_len + 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const size_t pad_len = kArrayAlign - total_len % kArrayAlign;
|
||||||
|
const size_t padding_header_len = header_len + pad_len + 1;
|
||||||
|
const std::string padding(pad_len, ' ');
|
||||||
|
const std::string end_line = "\n";
|
||||||
|
char *length_byte = new char[length_len];
|
||||||
|
int_to_byte(padding_header_len, length_byte, length_len);
|
||||||
|
|
||||||
std::ostringstream out;
|
std::ostringstream out;
|
||||||
out << kMagicPrefix;
|
(void)out.write(kMagicPrefix, SizeToLong(kMagicLen));
|
||||||
out.put(version.first);
|
(void)out.put(version.first);
|
||||||
out.put(version.second);
|
(void)out.put(version.second);
|
||||||
|
(void)out.write(length_byte, SizeToLong(length_len));
|
||||||
size_t pad_len = kArrayAlign - total_len % kArrayAlign;
|
out << header_str << padding << end_line;
|
||||||
size_t padding_header_len = header_len + pad_len + 1;
|
delete[] length_byte;
|
||||||
if (version == version_type{1, 0}) {
|
|
||||||
char length_byte[2];
|
|
||||||
int_to_byte(padding_header_len, length_byte, 2);
|
|
||||||
out.write(length_byte, 2);
|
|
||||||
} else {
|
|
||||||
char length_byte[4];
|
|
||||||
int_to_byte(padding_header_len, length_byte, 4);
|
|
||||||
out.write(length_byte, 4);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string padding(pad_len, ' ');
|
|
||||||
out << header_str << padding << "\n";
|
|
||||||
return out.str();
|
return out.str();
|
||||||
}
|
}
|
||||||
} // namespace mindspore
|
} // namespace mindspore
|
||||||
|
|
|
@ -29,15 +29,15 @@ std::shared_ptr<ResourceManager> ResourceManager::ptr_ = nullptr;
|
||||||
*/
|
*/
|
||||||
APP_ERROR ExistFile(const std::string &filePath) {
|
APP_ERROR ExistFile(const std::string &filePath) {
|
||||||
struct stat fileSat = {0};
|
struct stat fileSat = {0};
|
||||||
char c[PATH_MAX + 1] = {0x00};
|
char c[PATH_MAX] = {0x00};
|
||||||
size_t count = filePath.copy(c, PATH_MAX + 1);
|
size_t count = filePath.copy(c, PATH_MAX);
|
||||||
if (count != filePath.length()) {
|
if (count != filePath.length()) {
|
||||||
MS_LOG(ERROR) << "Failed to strcpy" << c;
|
MS_LOG(ERROR) << "Failed to strcpy" << c;
|
||||||
return APP_ERR_COMM_FAILURE;
|
return APP_ERR_COMM_FAILURE;
|
||||||
}
|
}
|
||||||
// Get the absolute path of input directory
|
// Get the absolute path of input directory
|
||||||
char path[PATH_MAX + 1] = {0x00};
|
char path[PATH_MAX] = {0x00};
|
||||||
if ((strlen(c) > PATH_MAX) || (realpath(c, path) == nullptr)) {
|
if ((strlen(c) >= PATH_MAX) || (realpath(c, path) == nullptr)) {
|
||||||
MS_LOG(ERROR) << "Failed to get canonicalize path";
|
MS_LOG(ERROR) << "Failed to get canonicalize path";
|
||||||
return APP_ERR_COMM_EXIST;
|
return APP_ERR_COMM_EXIST;
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <set>
|
#include <set>
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
|
#include <climits>
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
#include "CommonDataType.h"
|
#include "CommonDataType.h"
|
||||||
|
@ -27,8 +28,6 @@
|
||||||
#include "mindspore/core/utils/log_adapter.h"
|
#include "mindspore/core/utils/log_adapter.h"
|
||||||
#include "mindspore/ccsrc/cxx_api/graph/acl/acl_env_guard.h"
|
#include "mindspore/ccsrc/cxx_api/graph/acl/acl_env_guard.h"
|
||||||
|
|
||||||
#define PATH_MAX 4096
|
|
||||||
|
|
||||||
enum ModelLoadMethod {
|
enum ModelLoadMethod {
|
||||||
LOAD_FROM_FILE = 0, // Loading from file, memory of model and weights are managed by ACL
|
LOAD_FROM_FILE = 0, // Loading from file, memory of model and weights are managed by ACL
|
||||||
LOAD_FROM_MEM, // Loading from memory, memory of model and weights are managed by ACL
|
LOAD_FROM_MEM, // Loading from memory, memory of model and weights are managed by ACL
|
||||||
|
|
|
@ -223,10 +223,10 @@ Status Path::OpenFile(int *file_descriptor, bool create) {
|
||||||
RETURN_STATUS_UNEXPECTED(oss.str());
|
RETURN_STATUS_UNEXPECTED(oss.str());
|
||||||
}
|
}
|
||||||
// Convert to canonical form.
|
// Convert to canonical form.
|
||||||
if (strlen(common::SafeCStr(path_)) > PATH_MAX) {
|
if (strlen(common::SafeCStr(path_)) >= PATH_MAX) {
|
||||||
RETURN_STATUS_UNEXPECTED(strerror(errno));
|
RETURN_STATUS_UNEXPECTED(strerror(errno));
|
||||||
}
|
}
|
||||||
char canonical_path[PATH_MAX + 1] = {0x00};
|
char canonical_path[PATH_MAX] = {0x00};
|
||||||
#if defined(_WIN32) || defined(_WIN64)
|
#if defined(_WIN32) || defined(_WIN64)
|
||||||
auto err = _fullpath(canonical_path, common::SafeCStr(path_), PATH_MAX);
|
auto err = _fullpath(canonical_path, common::SafeCStr(path_), PATH_MAX);
|
||||||
#else
|
#else
|
||||||
|
@ -246,7 +246,7 @@ Status Path::OpenFile(int *file_descriptor, bool create) {
|
||||||
RETURN_STATUS_UNEXPECTED(strerror(errno));
|
RETURN_STATUS_UNEXPECTED(strerror(errno));
|
||||||
}
|
}
|
||||||
auto cur_inx = strlen(canonical_path);
|
auto cur_inx = strlen(canonical_path);
|
||||||
if ((cur_inx + file_part.length() + 1) > PATH_MAX) {
|
if (cur_inx + file_part.length() >= PATH_MAX) {
|
||||||
RETURN_STATUS_UNEXPECTED(strerror(errno));
|
RETURN_STATUS_UNEXPECTED(strerror(errno));
|
||||||
}
|
}
|
||||||
canonical_path[cur_inx++] = separator_;
|
canonical_path[cur_inx++] = separator_;
|
||||||
|
|
|
@ -820,7 +820,7 @@ bool AscendKernelRuntime::HcclInit() {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (strlen(config_path_str) > kPathMax) {
|
if (strlen(config_path_str) >= kPathMax) {
|
||||||
MS_LOG(ERROR) << "File path oversize";
|
MS_LOG(ERROR) << "File path oversize";
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -278,7 +278,7 @@ void TaskGenerator::DumpTaskInfo(const string &real_filename,
|
||||||
}
|
}
|
||||||
|
|
||||||
void TaskGenerator::DumpTaskInfo(const std::string &real_filename) {
|
void TaskGenerator::DumpTaskInfo(const std::string &real_filename) {
|
||||||
if (real_filename.size() > PATH_MAX) {
|
if (real_filename.size() >= PATH_MAX) {
|
||||||
MS_LOG(ERROR) << "File path " << real_filename << " is too long.";
|
MS_LOG(ERROR) << "File path " << real_filename << " is too long.";
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -183,8 +183,8 @@ bool WinWriteFile::Open() {
|
||||||
if (file_name_.c_str() == nullptr) {
|
if (file_name_.c_str() == nullptr) {
|
||||||
MS_LOG(EXCEPTION) << "The file path is null.";
|
MS_LOG(EXCEPTION) << "The file path is null.";
|
||||||
}
|
}
|
||||||
char path[PATH_MAX + 1] = {0x00};
|
char path[PATH_MAX] = {0x00};
|
||||||
if (file_name_.size() > PATH_MAX || _fullpath(path, file_name_.c_str(), PATH_MAX) == nullptr) {
|
if (file_name_.size() >= PATH_MAX || _fullpath(path, file_name_.c_str(), PATH_MAX) == nullptr) {
|
||||||
MS_LOG(EXCEPTION) << "Convert to real path fail, file name is " << file_name_ << ".";
|
MS_LOG(EXCEPTION) << "Convert to real path fail, file name is " << file_name_ << ".";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -145,8 +145,8 @@ class PosixWriteFile : public WriteFile {
|
||||||
if (nullptr == file_name_.c_str()) {
|
if (nullptr == file_name_.c_str()) {
|
||||||
MS_LOG(EXCEPTION) << "The file path is null.";
|
MS_LOG(EXCEPTION) << "The file path is null.";
|
||||||
}
|
}
|
||||||
char path[PATH_MAX + 1] = {0x00};
|
char path[PATH_MAX] = {0x00};
|
||||||
if (file_name_.size() > PATH_MAX || nullptr == realpath(file_name_.c_str(), path)) {
|
if (file_name_.size() >= PATH_MAX || nullptr == realpath(file_name_.c_str(), path)) {
|
||||||
MS_LOG(EXCEPTION) << "Convert to real path fail, file name is " << file_name_ << ".";
|
MS_LOG(EXCEPTION) << "Convert to real path fail, file name is " << file_name_ << ".";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -50,11 +50,11 @@ inline uint32_t sigma3(uint32_t x) { return (x >> 17 | x << 15) ^ (x >> 19 | x <
|
||||||
std::string LoadFilePath(const std::string &path) {
|
std::string LoadFilePath(const std::string &path) {
|
||||||
char real_path[PATH_MAX] = {0};
|
char real_path[PATH_MAX] = {0};
|
||||||
#if defined(_WIN32) || defined(_WIN64)
|
#if defined(_WIN32) || defined(_WIN64)
|
||||||
if (path.size() > PATH_MAX || _fullpath(real_path, path.c_str(), PATH_MAX) == nullptr) {
|
if (path.size() >= PATH_MAX || _fullpath(real_path, path.c_str(), PATH_MAX) == nullptr) {
|
||||||
return "";
|
return "";
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
if (path.size() > PATH_MAX || realpath(path.c_str(), real_path) == nullptr) {
|
if (path.size() >= PATH_MAX || realpath(path.c_str(), real_path) == nullptr) {
|
||||||
return "";
|
return "";
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -52,13 +52,13 @@ std::string Location::ToString(SourceLineTip tip) {
|
||||||
return debug_info_ss.str();
|
return debug_info_ss.str();
|
||||||
}
|
}
|
||||||
|
|
||||||
char path[PATH_MAX + 1] = {0x00};
|
char path[PATH_MAX] = {0x00};
|
||||||
#if defined(_WIN32) || defined(_WIN64)
|
#if defined(_WIN32) || defined(_WIN64)
|
||||||
if (file_name_.size() > PATH_MAX || _fullpath(path, file_name_.c_str(), PATH_MAX) == nullptr) {
|
if (file_name_.size() >= PATH_MAX || _fullpath(path, file_name_.c_str(), PATH_MAX) == nullptr) {
|
||||||
return debug_info_ss.str();
|
return debug_info_ss.str();
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
if (file_name_.size() > PATH_MAX || realpath(file_name_.c_str(), path) == nullptr) {
|
if (file_name_.size() >= PATH_MAX || realpath(file_name_.c_str(), path) == nullptr) {
|
||||||
return debug_info_ss.str();
|
return debug_info_ss.str();
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -107,7 +107,7 @@ int CreateOutputDir(std::string *file_path) {
|
||||||
if (file_path->empty()) {
|
if (file_path->empty()) {
|
||||||
MS_LOG(ERROR) << "input file path is empty.";
|
MS_LOG(ERROR) << "input file path is empty.";
|
||||||
return RET_ERROR;
|
return RET_ERROR;
|
||||||
} else if (file_path->size() > PATH_MAX) {
|
} else if (file_path->size() >= PATH_MAX) {
|
||||||
MS_LOG(ERROR) << "input file path is too long";
|
MS_LOG(ERROR) << "input file path is too long";
|
||||||
return RET_ERROR;
|
return RET_ERROR;
|
||||||
}
|
}
|
||||||
|
|
|
@ -618,7 +618,7 @@ void ParseBiasCorrection(PostQuantConfig *post_quant_config, std::string value)
|
||||||
STATUS ParseConfigFile(std::string config_file, PostQuantConfig *post_quant_config) {
|
STATUS ParseConfigFile(std::string config_file, PostQuantConfig *post_quant_config) {
|
||||||
MS_ASSERT(post_quant_config != nullptr);
|
MS_ASSERT(post_quant_config != nullptr);
|
||||||
|
|
||||||
if (config_file.empty() || config_file.length() > PATH_MAX) {
|
if (config_file.empty() || config_file.length() >= PATH_MAX) {
|
||||||
MS_LOG(ERROR) << "invalid config path!";
|
MS_LOG(ERROR) << "invalid config path!";
|
||||||
return RET_PARAM_INVALID;
|
return RET_PARAM_INVALID;
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,9 +32,6 @@ class SparseToDense(Cell):
|
||||||
Outputs:
|
Outputs:
|
||||||
Tensor, converted from sparse tensor.
|
Tensor, converted from sparse tensor.
|
||||||
|
|
||||||
Args:
|
|
||||||
sparse_tensor (SparseTensor): the sparse tensor to convert.
|
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
TypeError: If `sparse_tensor.indices` is not a Tensor.
|
TypeError: If `sparse_tensor.indices` is not a Tensor.
|
||||||
TypeError: If 'sparse_tensor.values' is not a Tensor.
|
TypeError: If 'sparse_tensor.values' is not a Tensor.
|
||||||
|
@ -117,8 +114,8 @@ class SparseTensorDenseMatmul(Cell):
|
||||||
>>> out = sparse_dense_matmul(indices, values, sparse_shape, dense)
|
>>> out = sparse_dense_matmul(indices, values, sparse_shape, dense)
|
||||||
>>> print(out)
|
>>> print(out)
|
||||||
[[2 2]
|
[[2 2]
|
||||||
[0 6]
|
[6 6]
|
||||||
[6 0]]
|
[0 0]]
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, adjoint_st=False, adjoint_dt=False):
|
def __init__(self, adjoint_st=False, adjoint_dt=False):
|
||||||
|
|
|
@ -29,11 +29,11 @@ class SparseToDense(PrimitiveWithInfer):
|
||||||
|
|
||||||
Inputs:
|
Inputs:
|
||||||
- **indices** (Tensor) - A 2-D Tensor, represents the position of the element in the sparse tensor.
|
- **indices** (Tensor) - A 2-D Tensor, represents the position of the element in the sparse tensor.
|
||||||
Support int32, int64, each element value should be a non-negative int number. The shape is :math:`(n, 2)`.
|
Support int32, int64, each element value should be a non-negative int number. The shape is :math:`(n, 2)`.
|
||||||
- **values** (Tensor) - A 1-D Tensor, represents the value corresponding to the position in the `indices`.
|
- **values** (Tensor) - A 1-D Tensor, represents the value corresponding to the position in the `indices`.
|
||||||
The shape should be :math:`(n,)`.
|
The shape should be :math:`(n,)`.
|
||||||
- **sparse_shape** (tuple(int)) - A positive int tuple which specifies the shape of sparse tensor,
|
- **sparse_shape** (tuple(int)) - A positive int tuple which specifies the shape of sparse tensor,
|
||||||
should have 2 elements, represent sparse tensor shape is :math:`(N, C)`.
|
should have 2 elements, represent sparse tensor shape is :math:`(N, C)`.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Tensor, converted from sparse tensor. The dtype is same as `values`, and the shape is `sparse_shape`.
|
Tensor, converted from sparse tensor. The dtype is same as `values`, and the shape is `sparse_shape`.
|
||||||
|
@ -95,21 +95,21 @@ class SparseTensorDenseMatmul(PrimitiveWithInfer):
|
||||||
The rank of sparse matrix and dense matrix must equal to `2`.
|
The rank of sparse matrix and dense matrix must equal to `2`.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
- *adjoint_st** (bool) - If true, sparse tensor is transposed before multiplication. Default: False.
|
adjoint_st (bool): If true, sparse tensor is transposed before multiplication. Default: False.
|
||||||
- *adjoint_dt** (bool) - If true, dense tensor is transposed before multiplication. Default: False.
|
adjoint_dt (bool): If true, dense tensor is transposed before multiplication. Default: False.
|
||||||
|
|
||||||
Inputs:
|
Inputs:
|
||||||
- **indices** (Tensor) - A 2-D Tensor, represents the position of the element in the sparse tensor.
|
- **indices** (Tensor) - A 2-D Tensor, represents the position of the element in the sparse tensor.
|
||||||
Support int32, int64, each element value should be a non-negative int number. The shape is :math:`(n, 2)`.
|
Support int32, int64, each element value should be a non-negative int number. The shape is :math:`(n, 2)`.
|
||||||
- **values** (Tensor) - A 1-D Tensor, represents the value corresponding to the position in the `indices`.
|
- **values** (Tensor) - A 1-D Tensor, represents the value corresponding to the position in the `indices`.
|
||||||
Support float16, float32, float64, int32, int64. The shape should be :math:`(n,)`.
|
Support float16, float32, float64, int32, int64. The shape should be :math:`(n,)`.
|
||||||
- **sparse_shape** (tuple(int)) - A positive int tuple which specifies the shape of sparse tensor,
|
- **sparse_shape** (tuple(int)) - A positive int tuple which specifies the shape of sparse tensor,
|
||||||
should have 2 elements, represent sparse tensor shape is :math:`(N, C)`.
|
should have 2 elements, represent sparse tensor shape is :math:`(N, C)`.
|
||||||
- **dense** (Tensor) - A 2-D Tensor, the dtype is same as `values`.
|
- **dense** (Tensor) - A 2-D Tensor, the dtype is same as `values`.
|
||||||
If `adjoint_st` is False and `adjoint_dt` is False, the shape must be :math:`(C, M)`.
|
If `adjoint_st` is False and `adjoint_dt` is False, the shape must be :math:`(C, M)`.
|
||||||
If `adjoint_st` is False and `adjoint_dt` is True, the shape must be :math:`(M, C)`.
|
If `adjoint_st` is False and `adjoint_dt` is True, the shape must be :math:`(M, C)`.
|
||||||
If `adjoint_st` is True and `adjoint_dt` is False, the shape must be :math:`(N, M)`.
|
If `adjoint_st` is True and `adjoint_dt` is False, the shape must be :math:`(N, M)`.
|
||||||
If `adjoint_st` is True and `adjoint_dt` is True, the shape must be :math:`(M, N)`.
|
If `adjoint_st` is True and `adjoint_dt` is True, the shape must be :math:`(M, N)`.
|
||||||
|
|
||||||
Outputs:
|
Outputs:
|
||||||
Tensor, the dtype is the same as `values`.
|
Tensor, the dtype is the same as `values`.
|
||||||
|
@ -134,8 +134,8 @@ class SparseTensorDenseMatmul(PrimitiveWithInfer):
|
||||||
>>> out = sparse_dense_matmul(indices, values, sparse_shape, dense)
|
>>> out = sparse_dense_matmul(indices, values, sparse_shape, dense)
|
||||||
>>> print(out)
|
>>> print(out)
|
||||||
[[2 2]
|
[[2 2]
|
||||||
[0 6]
|
[6 6]
|
||||||
[6 0]]
|
[0 0]]
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@prim_attr_register
|
@prim_attr_register
|
||||||
|
|
Loading…
Reference in New Issue