!18034 code security check

Merge pull request !18034 from zhangbuxue/code_security_check
This commit is contained in:
i-robot 2021-06-16 17:04:53 +08:00 committed by Gitee
commit ca5c8775b7
25 changed files with 173 additions and 129 deletions

View File

@ -41,11 +41,19 @@ void CastCPUKernel<S, T>::InitKernel(const CNodePtr &kernel_node) {
template <typename S, typename T>
bool CastCPUKernel<S, T>::Launch(const std::vector<kernel::AddressPtr> &inputs, const std::vector<kernel::AddressPtr> &,
const std::vector<kernel::AddressPtr> &outputs) {
S *input = reinterpret_cast<S *>(inputs[0]->addr);
T *output = reinterpret_cast<T *>(outputs[0]->addr);
if (inputs.size() != 1 || outputs.size() != 1) {
MS_LOG(ERROR) << "Cast requires 1 input and 1 output, but got " << inputs.size() << " input and " << outputs.size()
<< " output.";
return false;
}
if (outputs[0]->size == 0) {
MS_LOG(WARNING) << "Cast output memory size should be greater than 0, but got 0.";
return true;
}
const auto input = reinterpret_cast<S *>(inputs[0]->addr);
const auto output = reinterpret_cast<T *>(outputs[0]->addr);
MS_LOG(DEBUG) << "Type source: " << typeid(S).name() << "; target: " << typeid(T).name();
size_t lens = outputs[0]->size > 0 ? static_cast<size_t>(outputs[0]->size / sizeof(T)) : 1;
Cast<S, T>(input, output, lens);
Cast<S, T>(input, output, outputs[0]->size / sizeof(T));
return true;
}
} // namespace kernel

View File

@ -226,14 +226,22 @@ bool EltWiseGradCPUKernel<T>::Launch(const std::vector<kernel::AddressPtr> &inpu
{"GeLUGrad", &EltWiseGradCPUKernel<T>::GeluGrad}, {"AsinGrad", &EltWiseGradCPUKernel<T>::AsinGrad},
{"ACosGrad", &EltWiseGradCPUKernel<T>::ACosGrad}, {"AtanGrad", &EltWiseGradCPUKernel<T>::AtanGrad},
{"AsinhGrad", &EltWiseGradCPUKernel<T>::AsinhGrad}, {"AcoshGrad", &EltWiseGradCPUKernel<T>::AcoshGrad}};
const auto *input1 = reinterpret_cast<T *>(inputs[0]->addr);
const auto *input2 = reinterpret_cast<T *>(inputs[1]->addr);
auto *output = reinterpret_cast<T *>(outputs[0]->addr);
if (inputs.size() < 2 || outputs.size() != 1) {
MS_LOG(ERROR) << kernel_name_ << " requires at least 2 inputs and 1 output, but got " << inputs.size()
<< " inputs and " << outputs.size() << " output.";
return false;
}
if (outputs[0]->size == 0) {
MS_LOG(WARNING) << kernel_name_ << " output memory size should be greater than 0, but got 0.";
return true;
}
const auto input0 = reinterpret_cast<T *>(inputs[0]->addr);
const auto input1 = reinterpret_cast<T *>(inputs[1]->addr);
auto output = reinterpret_cast<T *>(outputs[0]->addr);
size_t count = outputs[0]->size > 0 ? static_cast<size_t>(outputs[0]->size / sizeof(T)) : 1;
CPUKernelUtils::ParallelFor(
std::bind(elt_map.at(kernel_name_), this, input1, input2, output, std::placeholders::_1, std::placeholders::_2),
count);
std::bind(elt_map.at(kernel_name_), this, input0, input1, output, std::placeholders::_1, std::placeholders::_2),
outputs[0]->size / sizeof(T));
return true;
}
} // namespace kernel

View File

@ -57,20 +57,6 @@ void SliceCPUKernel::InitKernel(const CNodePtr &kernel_node) {
data_size_ = size_pair->second;
}
void SliceCPUKernel::ParallelRun(void *input_addr, void *output_addr, int thread_num) {
std::vector<common::Task> tasks;
int thread_index = 0;
while (thread_index < thread_num) {
auto block = [&, thread_index]() {
DoSlice(input_addr, output_addr, &slice_param_, thread_index, data_size_);
return common::SUCCESS;
};
tasks.emplace_back(block);
thread_index++;
}
common::ThreadPool::GetInstance().SyncRun(tasks);
}
void SliceCPUKernel::InitSliceParam(const std::vector<size_t> &input_shape, const std::vector<int64_t> &begin,
const std::vector<int64_t> &size) {
for (size_t i = 0; i < DIMENSION_8D; i++) {
@ -98,7 +84,13 @@ void SliceCPUKernel::InitSliceParam(const std::vector<size_t> &input_shape, cons
bool SliceCPUKernel::Launch(const std::vector<kernel::AddressPtr> &inputs, const std::vector<kernel::AddressPtr> &,
const std::vector<kernel::AddressPtr> &outputs) {
if (inputs.size() != 1 || outputs.size() != 1) {
MS_LOG(ERROR) << "Slice requires 1 input and 1 output, but got " << inputs.size() << " input and " << outputs.size()
<< " output.";
return false;
}
if (outputs[0]->size == 0) {
MS_LOG(WARNING) << "Slice output memory size should be greater than 0, but got 0.";
return true;
}
auto input_addr = inputs[0]->addr;

View File

@ -39,9 +39,7 @@ class SliceCPUKernel : public CPUKernel {
private:
void InitSliceParam(const std::vector<size_t> &input_shape, const std::vector<int64_t> &begin,
const std::vector<int64_t> &size);
void ParallelRun(void *input_addr, void *output_addr, int thread_num);
bool parallel_{true};
int data_size_{4};
SliceParameter slice_param_;
};

View File

@ -47,11 +47,23 @@ template <typename I, typename T>
bool SparseTensorDenseMatmulCPUKernel<I, T>::Launch(const std::vector<kernel::AddressPtr> &inputs,
const std::vector<kernel::AddressPtr> & /*workspace*/,
const std::vector<kernel::AddressPtr> &outputs) {
if (inputs.size() != 4 || outputs.size() != 1) {
MS_LOG(ERROR) << "SparseTensorDenseMatmul requires 4 inputs and 1 output, but got " << inputs.size()
<< " inputs and " << outputs.size() << " output.";
return false;
}
if (outputs[0]->size == 0) {
MS_LOG(WARNING) << "SparseTensorDenseMatmul output memory size should be greater than 0, but got 0.";
return true;
}
auto a_indices = reinterpret_cast<I *>(inputs[0]->addr);
auto a_values = reinterpret_cast<T *>(inputs[1]->addr);
auto b = reinterpret_cast<T *>(inputs[3]->addr);
auto out = reinterpret_cast<T *>(outputs[0]->addr);
const size_t output_length = outputs[0]->size / sizeof(T);
const size_t indices_length = inputs[0]->size / sizeof(I);
const size_t values_length = inputs[1]->size / sizeof(T);
const size_t b_length = inputs[3]->size / sizeof(T);
if (memset_s(out, output_length, 0, output_length) != EOK) {
MS_LOG(EXCEPTION) << "Memset Failed!";
}
@ -62,6 +74,12 @@ bool SparseTensorDenseMatmulCPUKernel<I, T>::Launch(const std::vector<kernel::Ad
const size_t b_dim_1 = b_shape_[1];
const size_t same_dim = adj_dt_ ? b_dim_1 : b_dim_0;
for (size_t i = 0; i < values_size_; ++i) {
if (i * 2 + 1 >= indices_length) {
MS_LOG(EXCEPTION) << "The index of a_indices out of bounds.";
}
if (i >= values_length) {
MS_LOG(EXCEPTION) << "The index of a_values out of bounds.";
}
const int row = adj_st_ ? a_indices[i * 2 + 1] : a_indices[i * 2];
const int col = adj_st_ ? a_indices[i * 2] : a_indices[i * 2 + 1];
if (row >= SizeToInt(out_dim_0) || row < 0 || col >= SizeToInt(same_dim) || col < 0) {
@ -71,9 +89,15 @@ bool SparseTensorDenseMatmulCPUKernel<I, T>::Launch(const std::vector<kernel::Ad
for (size_t n = 0; n < out_dim_1; ++n) {
if (adj_dt_) {
if (n * b_dim_1 + col >= b_length) {
MS_LOG(EXCEPTION) << "The index of b out of bounds.";
}
const T b_value = b[n * b_dim_1 + col];
out[row * out_dim_1 + n] += a_values[i] * b_value;
} else {
if (col * b_dim_1 + n >= b_length) {
MS_LOG(EXCEPTION) << "The index of b out of bounds.";
}
const T b_value = b[col * b_dim_1 + n];
out[row * out_dim_1 + n] += a_values[i] * b_value;
}

View File

@ -43,18 +43,35 @@ template <typename I, typename T>
bool SparseToDenseCPUKernel<I, T>::Launch(const std::vector<kernel::AddressPtr> &inputs,
const std::vector<kernel::AddressPtr> & /*workspace*/,
const std::vector<kernel::AddressPtr> &outputs) {
if (inputs.size() != 3 || outputs.size() != 1) {
MS_LOG(ERROR) << "SparseToDense requires 3 inputs and 1 output, but got " << inputs.size() << " inputs and "
<< outputs.size() << " output.";
return false;
}
if (outputs[0]->size == 0) {
MS_LOG(WARNING) << "SparseToDense output memory size should be greater than 0, but got 0.";
return true;
}
auto indices_addr = reinterpret_cast<I *>(inputs[0]->addr);
auto values_addr = reinterpret_cast<T *>(inputs[1]->addr);
auto output_addr = reinterpret_cast<T *>(outputs[0]->addr);
const size_t output_length = outputs[0]->size / sizeof(T);
const size_t indices_length = inputs[0]->size / sizeof(I);
const size_t values_length = inputs[1]->size / sizeof(T);
if (memset_s(output_addr, output_length, 0, output_length) != EOK) {
MS_LOG(EXCEPTION) << "Memset Failed!";
}
size_t rank = output_shape_.size();
for (size_t i = 0; i < values_size_; ++i) {
if (i >= values_length) {
MS_LOG(EXCEPTION) << "The index of values out of bounds.";
}
size_t out_index = 0;
for (size_t j = 0; j < rank; j++) {
if (i * rank + j >= indices_length) {
MS_LOG(EXCEPTION) << "The index of indices out of bounds.";
}
int index = indices_addr[i * rank + j];
if (index >= SizeToInt(output_shape_[j]) || index < 0) {
MS_EXCEPTION(ValueError) << "The " << i << "th value in " << j << "th dimension index: " << index

View File

@ -209,7 +209,13 @@ void StridedSliceCPUKernel::ParallelRun(uint8_t *input_addr, uint8_t *output_add
bool StridedSliceCPUKernel::Launch(const std::vector<kernel::AddressPtr> &inputs,
const std::vector<kernel::AddressPtr> & /*workspace*/,
const std::vector<kernel::AddressPtr> &outputs) {
if (inputs.size() != 1 || outputs.size() != 1) {
MS_LOG(ERROR) << "StridedSlice requires 1 input and 1 output, but got " << inputs.size() << " input and "
<< outputs.size() << " output.";
return false;
}
if (outputs[0]->size == 0) {
MS_LOG(WARNING) << "StridedSlice output memory size should be greater than 0, but got 0.";
return true;
}
auto input_addr = reinterpret_cast<uint8_t *>(inputs[0]->addr);

View File

@ -49,12 +49,10 @@ class HSwishGradKernel : public GpuKernel {
size_t input_num = AnfAlgo::GetInputTensorNum(kernel_node);
if (input_num != 2) {
MS_LOG(EXCEPTION) << "Input number is " << input_num << ", but HSwishGrad needs 2 inputs.";
return false;
}
size_t output_num = AnfAlgo::GetOutputTensorNum(kernel_node);
if (output_num != 1) {
MS_LOG(EXCEPTION) << "Output number is " << output_num << ", but HSwishGrad has 1 output.";
return false;
}
auto input_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0);
input_size_ = 1;

View File

@ -60,7 +60,7 @@ void TbeUtils::GenSocInfo(nlohmann::json *soc_info_json) {
void TbeUtils::SaveJsonInfo(const std::string &json_name, const std::string &info) {
char real_path[PATH_MAX] = {0};
std::string path = kCceKernelMeta + json_name + kInfoSuffix;
if (path.size() > PATH_MAX) {
if (path.size() >= PATH_MAX) {
MS_LOG(ERROR) << "File path: " << path << "is too long.";
return;
}

View File

@ -213,7 +213,7 @@ void SomasSolverPre::TensorRelationLog(const std::vector<DynamicBitSet> *pConstr
MS_EXCEPTION_IF_NULL(context_ptr);
auto save_graphs_path = context_ptr->get_param<std::string>(MS_CTX_SAVE_GRAPHS_PATH);
std::string filename = save_graphs_path + "/" + "somas_tensor_relation_" + std::to_string(graph->graph_id()) + ".ir";
if (filename.size() > PATH_MAX) {
if (filename.size() >= PATH_MAX) {
MS_LOG(ERROR) << "File path " << filename << " is too long.";
return;
}

View File

@ -28,7 +28,7 @@
namespace mindspore {
std::optional<std::string> Common::GetRealPath(const std::string &input_path) {
if (input_path.length() > PATH_MAX) {
if (input_path.length() >= PATH_MAX) {
MS_LOG(EXCEPTION) << "The length of path: " << input_path << " exceeds limit: " << PATH_MAX;
}
#if defined(SYSTEM_ENV_POSIX)
@ -39,7 +39,6 @@ std::optional<std::string> Common::GetRealPath(const std::string &input_path) {
MS_LOG(EXCEPTION) << "Unsupported platform.";
#endif
// get real path
std::string out_path;
char real_path[PATH_MAX] = {0};
// input_path is dir + file_name
if (path_split_pos != std::string::npos) {
@ -63,8 +62,8 @@ std::optional<std::string> Common::GetRealPath(const std::string &input_path) {
return std::nullopt;
}
#endif
out_path = std::string(real_path) + file_name;
} else {
return std::string(real_path) + file_name;
}
// input_path is only file_name
#if defined(SYSTEM_ENV_POSIX)
if (input_path.length() > NAME_MAX) {
@ -78,21 +77,15 @@ std::optional<std::string> Common::GetRealPath(const std::string &input_path) {
MS_LOG(INFO) << "The file " << input_path << " does not exist, it will be created.";
}
#endif
out_path = std::string(real_path);
}
if (out_path.length() > PATH_MAX) {
MS_LOG(EXCEPTION) << "The file real path: " << out_path << " exceeds limit: " << PATH_MAX;
}
return out_path;
return std::string(real_path);
}
bool Common::CreateNotExistDirs(const std::string &path) {
std::shared_ptr<system::FileSystem> fs = system::Env::GetFileSystem();
MS_EXCEPTION_IF_NULL(fs);
char temp_path[PATH_MAX] = {0};
if (path.length() > PATH_MAX) {
MS_LOG(ERROR) << "Path lens is max than " << PATH_MAX;
if (path.length() >= PATH_MAX) {
MS_LOG(ERROR) << "Path length is equal to or max than " << PATH_MAX;
return false;
}
for (uint32_t i = 0; i < path.length(); i++) {
@ -294,7 +287,7 @@ std::string Common::AddId(const std::string &filename, const std::string &suffix
}
bool Common::SaveStringToFile(const std::string filename, const std::string string_info) {
if (filename.size() > PATH_MAX) {
if (filename.size() >= PATH_MAX) {
MS_LOG(ERROR) << "File path " << filename << " is too long.";
return false;
}

View File

@ -19,17 +19,17 @@
#include <unordered_map>
#include <utility>
#include <sstream>
#include <cmath>
#include "mindspore/core/utils/log_adapter.h"
#include "mindspore/core/ir/dtype.h"
#include "mindspore/core/utils/log_adapter.h"
#include "mindspore/core/utils/convert_utils_base.h"
namespace mindspore {
namespace {
// npy file header start information
const char kMagicPrefix[] = "\x93NUMPY";
// magical length include kMagicPrefix length and version length
const size_t kMagicLen = 8;
const size_t kMagicLen = 6;
const size_t kArrayAlign = 64;
// first: header_length_type, second: encoding_type
@ -90,23 +90,27 @@ std::string NpyHeader::shape_to_str() const {
buffer << ")";
return buffer.str();
}
} // namespace
void int_to_byte(size_t number, char *byte, size_t length) {
for (size_t i = 0; i < length; i++) {
byte[i] = (number >> (i * 8)) & 0xff;
}
}
std::string GenerateNpyHeader(const ShapeVector &shape, TypeId type_id, bool fortran_order) {
static std::unordered_map<TypeId, DtypeDescr> type_desc_map = {
// dtype description corresponding to tensor type
const std::unordered_map<TypeId, DtypeDescr> type_desc_map = {
{kNumberTypeBool, DtypeDescr{'|', 'b', 1}}, {kNumberTypeInt8, DtypeDescr{'|', 'i', 1}},
{kNumberTypeInt16, DtypeDescr{'<', 'i', 2}}, {kNumberTypeInt32, DtypeDescr{'<', 'i', 4}},
{kNumberTypeInt64, DtypeDescr{'<', 'i', 8}}, {kNumberTypeUInt8, DtypeDescr{'|', 'u', 1}},
{kNumberTypeUInt16, DtypeDescr{'<', 'u', 2}}, {kNumberTypeUInt32, DtypeDescr{'<', 'u', 4}},
{kNumberTypeUInt64, DtypeDescr{'<', 'u', 8}}, {kNumberTypeFloat16, DtypeDescr{'<', 'f', 2}},
{kNumberTypeFloat32, DtypeDescr{'<', 'f', 4}}, {kNumberTypeFloat64, DtypeDescr{'<', 'f', 8}},
};
};
} // namespace
void int_to_byte(size_t number, char *byte, size_t length) {
const size_t byte_len = 8;
const size_t mask = 0xff;
for (size_t i = 0; i < length; i++) {
byte[i] = (number >> (i * byte_len)) & mask;
}
}
std::string GenerateNpyHeader(const ShapeVector &shape, TypeId type_id, bool fortran_order) {
auto type_desc = type_desc_map.find(type_id);
if (type_desc == type_desc_map.end()) {
MS_LOG(WARNING) << "Not support dump the " << TypeIdToType(type_id)->ToString() << " data to npy file.";
@ -115,32 +119,32 @@ std::string GenerateNpyHeader(const ShapeVector &shape, TypeId type_id, bool for
NpyHeader npy_header{type_desc->second, fortran_order, shape};
std::string header_str = npy_header.str();
size_t header_len = header_str.length();
version_type version{1, 0};
size_t total_len = kMagicLen + 2 + header_len + 1;
if (total_len > std::pow(2, 16)) {
const size_t header_len = header_str.length();
const size_t version_len = 2;
const size_t max_len = 65535;
size_t length_len = 2;
size_t total_len = kMagicLen + version_len + length_len + header_len + 1;
if (total_len > max_len) {
version = {2, 0};
total_len = kMagicLen + 4 + header_len + 1;
length_len = 4;
total_len = kMagicLen + version_len + length_len + header_len + 1;
}
const size_t pad_len = kArrayAlign - total_len % kArrayAlign;
const size_t padding_header_len = header_len + pad_len + 1;
const std::string padding(pad_len, ' ');
const std::string end_line = "\n";
char *length_byte = new char[length_len];
int_to_byte(padding_header_len, length_byte, length_len);
std::ostringstream out;
out << kMagicPrefix;
out.put(version.first);
out.put(version.second);
size_t pad_len = kArrayAlign - total_len % kArrayAlign;
size_t padding_header_len = header_len + pad_len + 1;
if (version == version_type{1, 0}) {
char length_byte[2];
int_to_byte(padding_header_len, length_byte, 2);
out.write(length_byte, 2);
} else {
char length_byte[4];
int_to_byte(padding_header_len, length_byte, 4);
out.write(length_byte, 4);
}
std::string padding(pad_len, ' ');
out << header_str << padding << "\n";
(void)out.write(kMagicPrefix, SizeToLong(kMagicLen));
(void)out.put(version.first);
(void)out.put(version.second);
(void)out.write(length_byte, SizeToLong(length_len));
out << header_str << padding << end_line;
delete[] length_byte;
return out.str();
}
} // namespace mindspore

View File

@ -29,15 +29,15 @@ std::shared_ptr<ResourceManager> ResourceManager::ptr_ = nullptr;
*/
APP_ERROR ExistFile(const std::string &filePath) {
struct stat fileSat = {0};
char c[PATH_MAX + 1] = {0x00};
size_t count = filePath.copy(c, PATH_MAX + 1);
char c[PATH_MAX] = {0x00};
size_t count = filePath.copy(c, PATH_MAX);
if (count != filePath.length()) {
MS_LOG(ERROR) << "Failed to strcpy" << c;
return APP_ERR_COMM_FAILURE;
}
// Get the absolute path of input directory
char path[PATH_MAX + 1] = {0x00};
if ((strlen(c) > PATH_MAX) || (realpath(c, path) == nullptr)) {
char path[PATH_MAX] = {0x00};
if ((strlen(c) >= PATH_MAX) || (realpath(c, path) == nullptr)) {
MS_LOG(ERROR) << "Failed to get canonicalize path";
return APP_ERR_COMM_EXIST;
}

View File

@ -19,6 +19,7 @@
#include <vector>
#include <set>
#include <cstring>
#include <climits>
#include <unordered_map>
#include <mutex>
#include "CommonDataType.h"
@ -27,8 +28,6 @@
#include "mindspore/core/utils/log_adapter.h"
#include "mindspore/ccsrc/cxx_api/graph/acl/acl_env_guard.h"
#define PATH_MAX 4096
enum ModelLoadMethod {
LOAD_FROM_FILE = 0, // Loading from file, memory of model and weights are managed by ACL
LOAD_FROM_MEM, // Loading from memory, memory of model and weights are managed by ACL

View File

@ -223,10 +223,10 @@ Status Path::OpenFile(int *file_descriptor, bool create) {
RETURN_STATUS_UNEXPECTED(oss.str());
}
// Convert to canonical form.
if (strlen(common::SafeCStr(path_)) > PATH_MAX) {
if (strlen(common::SafeCStr(path_)) >= PATH_MAX) {
RETURN_STATUS_UNEXPECTED(strerror(errno));
}
char canonical_path[PATH_MAX + 1] = {0x00};
char canonical_path[PATH_MAX] = {0x00};
#if defined(_WIN32) || defined(_WIN64)
auto err = _fullpath(canonical_path, common::SafeCStr(path_), PATH_MAX);
#else
@ -246,7 +246,7 @@ Status Path::OpenFile(int *file_descriptor, bool create) {
RETURN_STATUS_UNEXPECTED(strerror(errno));
}
auto cur_inx = strlen(canonical_path);
if ((cur_inx + file_part.length() + 1) > PATH_MAX) {
if (cur_inx + file_part.length() >= PATH_MAX) {
RETURN_STATUS_UNEXPECTED(strerror(errno));
}
canonical_path[cur_inx++] = separator_;

View File

@ -820,7 +820,7 @@ bool AscendKernelRuntime::HcclInit() {
return false;
}
}
if (strlen(config_path_str) > kPathMax) {
if (strlen(config_path_str) >= kPathMax) {
MS_LOG(ERROR) << "File path oversize";
return false;
}

View File

@ -278,7 +278,7 @@ void TaskGenerator::DumpTaskInfo(const string &real_filename,
}
void TaskGenerator::DumpTaskInfo(const std::string &real_filename) {
if (real_filename.size() > PATH_MAX) {
if (real_filename.size() >= PATH_MAX) {
MS_LOG(ERROR) << "File path " << real_filename << " is too long.";
return;
}

View File

@ -183,8 +183,8 @@ bool WinWriteFile::Open() {
if (file_name_.c_str() == nullptr) {
MS_LOG(EXCEPTION) << "The file path is null.";
}
char path[PATH_MAX + 1] = {0x00};
if (file_name_.size() > PATH_MAX || _fullpath(path, file_name_.c_str(), PATH_MAX) == nullptr) {
char path[PATH_MAX] = {0x00};
if (file_name_.size() >= PATH_MAX || _fullpath(path, file_name_.c_str(), PATH_MAX) == nullptr) {
MS_LOG(EXCEPTION) << "Convert to real path fail, file name is " << file_name_ << ".";
}

View File

@ -145,8 +145,8 @@ class PosixWriteFile : public WriteFile {
if (nullptr == file_name_.c_str()) {
MS_LOG(EXCEPTION) << "The file path is null.";
}
char path[PATH_MAX + 1] = {0x00};
if (file_name_.size() > PATH_MAX || nullptr == realpath(file_name_.c_str(), path)) {
char path[PATH_MAX] = {0x00};
if (file_name_.size() >= PATH_MAX || nullptr == realpath(file_name_.c_str(), path)) {
MS_LOG(EXCEPTION) << "Convert to real path fail, file name is " << file_name_ << ".";
}

View File

@ -50,11 +50,11 @@ inline uint32_t sigma3(uint32_t x) { return (x >> 17 | x << 15) ^ (x >> 19 | x <
std::string LoadFilePath(const std::string &path) {
char real_path[PATH_MAX] = {0};
#if defined(_WIN32) || defined(_WIN64)
if (path.size() > PATH_MAX || _fullpath(real_path, path.c_str(), PATH_MAX) == nullptr) {
if (path.size() >= PATH_MAX || _fullpath(real_path, path.c_str(), PATH_MAX) == nullptr) {
return "";
}
#else
if (path.size() > PATH_MAX || realpath(path.c_str(), real_path) == nullptr) {
if (path.size() >= PATH_MAX || realpath(path.c_str(), real_path) == nullptr) {
return "";
}
#endif

View File

@ -52,13 +52,13 @@ std::string Location::ToString(SourceLineTip tip) {
return debug_info_ss.str();
}
char path[PATH_MAX + 1] = {0x00};
char path[PATH_MAX] = {0x00};
#if defined(_WIN32) || defined(_WIN64)
if (file_name_.size() > PATH_MAX || _fullpath(path, file_name_.c_str(), PATH_MAX) == nullptr) {
if (file_name_.size() >= PATH_MAX || _fullpath(path, file_name_.c_str(), PATH_MAX) == nullptr) {
return debug_info_ss.str();
}
#else
if (file_name_.size() > PATH_MAX || realpath(file_name_.c_str(), path) == nullptr) {
if (file_name_.size() >= PATH_MAX || realpath(file_name_.c_str(), path) == nullptr) {
return debug_info_ss.str();
}
#endif

View File

@ -107,7 +107,7 @@ int CreateOutputDir(std::string *file_path) {
if (file_path->empty()) {
MS_LOG(ERROR) << "input file path is empty.";
return RET_ERROR;
} else if (file_path->size() > PATH_MAX) {
} else if (file_path->size() >= PATH_MAX) {
MS_LOG(ERROR) << "input file path is too long";
return RET_ERROR;
}

View File

@ -618,7 +618,7 @@ void ParseBiasCorrection(PostQuantConfig *post_quant_config, std::string value)
STATUS ParseConfigFile(std::string config_file, PostQuantConfig *post_quant_config) {
MS_ASSERT(post_quant_config != nullptr);
if (config_file.empty() || config_file.length() > PATH_MAX) {
if (config_file.empty() || config_file.length() >= PATH_MAX) {
MS_LOG(ERROR) << "invalid config path!";
return RET_PARAM_INVALID;
}

View File

@ -32,9 +32,6 @@ class SparseToDense(Cell):
Outputs:
Tensor, converted from sparse tensor.
Args:
sparse_tensor (SparseTensor): the sparse tensor to convert.
Raises:
TypeError: If `sparse_tensor.indices` is not a Tensor.
TypeError: If 'sparse_tensor.values' is not a Tensor.
@ -117,8 +114,8 @@ class SparseTensorDenseMatmul(Cell):
>>> out = sparse_dense_matmul(indices, values, sparse_shape, dense)
>>> print(out)
[[2 2]
[0 6]
[6 0]]
[6 6]
[0 0]]
"""
def __init__(self, adjoint_st=False, adjoint_dt=False):

View File

@ -95,8 +95,8 @@ class SparseTensorDenseMatmul(PrimitiveWithInfer):
The rank of sparse matrix and dense matrix must equal to `2`.
Args:
- *adjoint_st** (bool) - If true, sparse tensor is transposed before multiplication. Default: False.
- *adjoint_dt** (bool) - If true, dense tensor is transposed before multiplication. Default: False.
adjoint_st (bool): If true, sparse tensor is transposed before multiplication. Default: False.
adjoint_dt (bool): If true, dense tensor is transposed before multiplication. Default: False.
Inputs:
- **indices** (Tensor) - A 2-D Tensor, represents the position of the element in the sparse tensor.
@ -134,8 +134,8 @@ class SparseTensorDenseMatmul(PrimitiveWithInfer):
>>> out = sparse_dense_matmul(indices, values, sparse_shape, dense)
>>> print(out)
[[2 2]
[0 6]
[6 0]]
[6 6]
[0 0]]
"""
@prim_attr_register