fix codedex

Signed-off-by: zhupuxu <zhupuxu@huawei.com>
This commit is contained in:
zhupuxu 2020-12-02 14:09:07 +08:00
parent 44ea3902b8
commit da0a18c5e3
9 changed files with 6 additions and 18 deletions

View File

@ -58,7 +58,6 @@ class MS_API Model {
extern MS_API const char* kDeviceTypeAscendCL; extern MS_API const char* kDeviceTypeAscendCL;
extern MS_API const char* kDeviceTypeAscendMS; extern MS_API const char* kDeviceTypeAscendMS;
} // namespace api } // namespace api
} // namespace mindspore } // namespace mindspore
#endif // MINDSPORE_INCLUDE_API_MODEL_H #endif // MINDSPORE_INCLUDE_API_MODEL_H

View File

@ -307,7 +307,7 @@ Status ModelProcess::CheckAndInitInput(const std::map<std::string, Buffer> &inpu
const auto &input = iter->second; const auto &input = iter->second;
const void *data = input.Data(); const void *data = input.Data();
void *input_buffer; void *input_buffer = nullptr;
if (!is_run_on_device_) { if (!is_run_on_device_) {
ret = aclrtMemcpy(info.device_data, info.buffer_size, data, input.DataSize(), ACL_MEMCPY_HOST_TO_DEVICE); ret = aclrtMemcpy(info.device_data, info.buffer_size, data, input.DataSize(), ACL_MEMCPY_HOST_TO_DEVICE);
if (ret != ACL_ERROR_NONE) { if (ret != ACL_ERROR_NONE) {

View File

@ -25,7 +25,6 @@
namespace mindspore { namespace mindspore {
namespace api { namespace api {
namespace { namespace {
uint64_t kSharedMemorySize = 100ull << 20; // 100 MB uint64_t kSharedMemorySize = 100ull << 20; // 100 MB
} }
@ -63,7 +62,6 @@ Status MultiProcess::MainProcess(ProcessFuncCall parent_process, ProcessFuncCall
} }
shmat_data_addr_ = shmat_addr_ + sizeof(MessageFlag) * 2; shmat_data_addr_ = shmat_addr_ + sizeof(MessageFlag) * 2;
shmat_data_max_size_ = memory_size_ - (shmat_data_addr_ - shmat_addr_); shmat_data_max_size_ = memory_size_ - (shmat_data_addr_ - shmat_addr_);
MS_LOG_INFO << "Shm addr " << (uint64_t)shmat_addr_; MS_LOG_INFO << "Shm addr " << (uint64_t)shmat_addr_;
if (pid == 0) { if (pid == 0) {
ChildProcess(child_process); ChildProcess(child_process);

View File

@ -22,7 +22,6 @@
namespace mindspore { namespace mindspore {
namespace api { namespace api {
struct MessageFlag { struct MessageFlag {
uint64_t heartbeat = 0; uint64_t heartbeat = 0;
uint64_t stop = false; uint64_t stop = false;
@ -61,7 +60,6 @@ class MultiProcess {
Status ParentProcess(ProcessFuncCall parent_process); Status ParentProcess(ProcessFuncCall parent_process);
void ChildProcess(ProcessFuncCall child_process); void ChildProcess(ProcessFuncCall child_process);
}; };
} // namespace api } // namespace api
} // namespace mindspore } // namespace mindspore

View File

@ -21,7 +21,6 @@
namespace mindspore { namespace mindspore {
namespace api { namespace api {
Status SharedMemory::Create(uint64_t memory_size) { Status SharedMemory::Create(uint64_t memory_size) {
auto access_mode = S_IRUSR | S_IWUSR | S_IROTH | S_IWOTH | S_IRGRP | S_IWGRP; auto access_mode = S_IRUSR | S_IWUSR | S_IROTH | S_IWOTH | S_IRGRP | S_IWGRP;
shm_id_ = shmget(IPC_PRIVATE, memory_size, IPC_CREAT | IPC_EXCL | access_mode); shm_id_ = shmget(IPC_PRIVATE, memory_size, IPC_CREAT | IPC_EXCL | access_mode);
@ -64,6 +63,5 @@ void SharedMemory::Destroy() {
MS_LOG_ERROR << errMsg; MS_LOG_ERROR << errMsg;
} }
} }
} // namespace api } // namespace api
} // namespace mindspore } // namespace mindspore

View File

@ -21,7 +21,6 @@
namespace mindspore { namespace mindspore {
namespace api { namespace api {
class SharedMemory { class SharedMemory {
public: public:
Status Create(uint64_t memory_size); Status Create(uint64_t memory_size);
@ -34,7 +33,6 @@ class SharedMemory {
int shm_id_ = -1; int shm_id_ = -1;
uint8_t *shmat_addr_ = nullptr; uint8_t *shmat_addr_ = nullptr;
}; };
} // namespace api } // namespace api
} // namespace mindspore } // namespace mindspore

View File

@ -41,7 +41,6 @@ using std::vector;
namespace py = pybind11; namespace py = pybind11;
namespace mindspore { namespace mindspore {
namespace api { namespace api {
MsModel::MsModel(uint32_t device_id) : device_id_(device_id) {} MsModel::MsModel(uint32_t device_id) : device_id_(device_id) {}
MsModel::~MsModel() = default; MsModel::~MsModel() = default;
@ -320,7 +319,7 @@ void MsModel::RegAllOp() {
} }
py::module c_expression = py::module::import("mindspore._c_expression"); py::module c_expression = py::module::import("mindspore._c_expression");
size_t ops_info_long = c_expression.attr("OpInfoLoaderPy")().attr("get_all_ops_info")().cast<size_t>(); size_t ops_info_long = c_expression.attr("OpInfoLoaderPy")().attr("get_all_ops_info")().cast<size_t>();
auto all_ops_info = reinterpret_cast<std::vector<kernel::OpInfo *> *>(ops_info_long); auto all_ops_info = reinterpret_cast<std::vector<kernel::OpInfo *> *>(static_cast<uintptr_t>(ops_info_long));
for (auto op_info : *all_ops_info) { for (auto op_info : *all_ops_info) {
kernel::OpLib::RegOpInfo(std::shared_ptr<kernel::OpInfo>(op_info)); kernel::OpLib::RegOpInfo(std::shared_ptr<kernel::OpInfo>(op_info));
} }
@ -414,6 +413,5 @@ Status MsModel::GetOutputsInfo(std::vector<Tensor> *tensor_list) const {
} }
return SUCCESS; return SUCCESS;
} }
} // namespace api } // namespace api
} // namespace mindspore } // namespace mindspore

View File

@ -79,7 +79,6 @@ class MsModel : public ModelImpl {
}; };
API_REG_MODEL(AscendMS, MsModel); API_REG_MODEL(AscendMS, MsModel);
} // namespace api } // namespace api
} // namespace mindspore } // namespace mindspore
#endif // MINDSPORE_CCSRC_SESSION_SESSION_BASIC_H #endif // MINDSPORE_CCSRC_SESSION_SESSION_BASIC_H

View File

@ -17,8 +17,8 @@
#include "nnacl/fp16/batchnorm_fp16.h" #include "nnacl/fp16/batchnorm_fp16.h"
#include <math.h> #include <math.h>
void BatchNormFp16(const float16_t *input, const void *mean, const void *variance, void BatchNormFp16(const float16_t *input, const void *mean, const void *variance, BatchNormParameter *param,
BatchNormParameter *param, int task_id, float16_t *output) { int task_id, float16_t *output) {
int units_per_thread = UP_DIV(param->unit_, param->op_parameter_.thread_num_); int units_per_thread = UP_DIV(param->unit_, param->op_parameter_.thread_num_);
int completed_units = task_id * units_per_thread; int completed_units = task_id * units_per_thread;
int cur_unit = MSMIN(units_per_thread, param->unit_ - completed_units); int cur_unit = MSMIN(units_per_thread, param->unit_ - completed_units);
@ -47,9 +47,9 @@ void FusedBatchNormFp16(const void *input, const void *scale, const void *offset
float16_t variance_sqrt = sqrt(((const float16_t *)variance)[c] + param->epsilon_); float16_t variance_sqrt = sqrt(((const float16_t *)variance)[c] + param->epsilon_);
if (variance_sqrt != 0) { if (variance_sqrt != 0) {
float16_t norm_val = float16_t norm_val =
(((const float16_t *)input)[cur_offset + c] - ((const float16_t *)mean)[c]) / variance_sqrt; (((const float16_t *)input)[cur_offset + c] - ((const float16_t *)mean)[c]) / variance_sqrt;
((float16_t *)output)[cur_offset + c] = ((float16_t *)output)[cur_offset + c] =
norm_val * ((const float16_t *)scale)[c] + ((const float16_t *)offset)[c]; norm_val * ((const float16_t *)scale)[c] + ((const float16_t *)offset)[c];
} }
} }
cur_offset += param->channel_; cur_offset += param->channel_;