forked from mindspore-Ecosystem/mindspore
!6574 reviewbot warning clean
Merge pull request !6574 from liubuyu/code_clean
This commit is contained in:
commit
b94e85b303
|
@ -16,12 +16,13 @@
|
|||
@title mindspore_build
|
||||
|
||||
SET BASEPATH=%CD%
|
||||
IF NOT EXIST "%BASEPATH%/build" (
|
||||
SET BUILD_PATH=%BASEPATH%/build
|
||||
|
||||
IF NOT EXIST "%BUILD_PATH%" (
|
||||
md "build"
|
||||
)
|
||||
|
||||
cd %BASEPATH%/build
|
||||
set BUILD_PATH=%CD%
|
||||
cd %BUILD_PATH%
|
||||
|
||||
IF NOT EXIST "%BUILD_PATH%/mindspore" (
|
||||
md "mindspore"
|
||||
|
|
2
build.sh
2
build.sh
|
@ -237,7 +237,7 @@ checkopts()
|
|||
;;
|
||||
z)
|
||||
eval ARG=\$\{$OPTIND\}
|
||||
if [[ -n $ARG && $ARG != -* ]]; then
|
||||
if [[ -n "$ARG" && "$ARG" != -* ]]; then
|
||||
OPTARG="$ARG"
|
||||
check_on_off $OPTARG z
|
||||
OPTIND=$((OPTIND + 1))
|
||||
|
|
|
@ -81,7 +81,6 @@ class MS_API InferSession {
|
|||
}
|
||||
static std::shared_ptr<InferSession> CreateSession(const std::string &device, uint32_t device_id);
|
||||
};
|
||||
|
||||
} // namespace inference
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_INCLUDE_MS_SESSION_H
|
||||
|
|
|
@ -66,7 +66,6 @@ class MirrorPadGpuFwdKernel : public GpuKernel {
|
|||
}
|
||||
|
||||
string mode = GetValue<string>(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("mode"));
|
||||
|
||||
if (mode == "REFLECT") {
|
||||
mode_ = 0; // reflected mirroring
|
||||
} else {
|
||||
|
|
|
@ -66,7 +66,6 @@ class MirrorPadGpuBackKernel : public GpuKernel {
|
|||
}
|
||||
|
||||
string mode = GetValue<string>(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("mode"));
|
||||
|
||||
if (mode == "REFLECT") {
|
||||
mode_ = 0; // reflected mirroring
|
||||
} else {
|
||||
|
|
|
@ -27,6 +27,5 @@ MS_REG_GPU_KERNEL_ONE(
|
|||
ROIAlign,
|
||||
KernelAttr().AddInputAttr(kNumberTypeFloat16).AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumberTypeFloat16),
|
||||
ROIAlignGpuFwdKernel, half)
|
||||
|
||||
} // namespace kernel
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -27,6 +27,5 @@ MS_REG_GPU_KERNEL_ONE(
|
|||
ROIAlignGrad,
|
||||
KernelAttr().AddInputAttr(kNumberTypeFloat16).AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumberTypeFloat16),
|
||||
ROIAlignGradGpuFwdKernel, half)
|
||||
|
||||
} // namespace kernel
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -14,12 +14,12 @@ endif ()
|
|||
|
||||
if (ENABLE_CPU)
|
||||
file(GLOB_RECURSE CPU_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "cpu/*.cc")
|
||||
list(REMOVE_ITEM CPU_SRC_LIST "cpu/mpi/mpi_adapter.cc", "cpu/mpi/mpi_export.cc")
|
||||
list(REMOVE_ITEM CPU_SRC_LIST "cpu/mpi/mpi_adapter.cc" "cpu/mpi/mpi_export.cc")
|
||||
endif ()
|
||||
|
||||
if (ENABLE_MPI)
|
||||
if (ENABLE_CPU)
|
||||
file(GLOB_RECURSE MPI_SRC_LIST "cpu/mpi/mpi_adapter.cc", "cpu/mpi/mpi_export.cc")
|
||||
file(GLOB_RECURSE MPI_SRC_LIST "cpu/mpi/mpi_adapter.cc" "cpu/mpi/mpi_export.cc")
|
||||
set_property(SOURCE ${MPI_SRC_LIST}
|
||||
PROPERTY COMPILE_DEFINITIONS SUBMODULE_ID=mindspore::SubModuleId::SM_DEVICE)
|
||||
add_library(mpi_adapter SHARED ${MPI_SRC_LIST})
|
||||
|
|
|
@ -57,7 +57,6 @@ constexpr const char *kOpTypeOpDebug = "Opdebug";
|
|||
namespace mindspore {
|
||||
namespace device {
|
||||
namespace ascend {
|
||||
|
||||
DataDumper::~DataDumper() {
|
||||
ReleaseDevMem(&dev_load_mem_);
|
||||
ReleaseDevMem(&dev_unload_mem_);
|
||||
|
|
|
@ -141,7 +141,6 @@ inline bool CheckNullInput(std::vector<size_t> input_shape) {
|
|||
MS_LOG(EXCEPTION) << "CUAD curand Error: " << message << " | curandStatus: " << status; \
|
||||
} \
|
||||
}
|
||||
|
||||
} // namespace gpu
|
||||
} // namespace device
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -25,7 +25,6 @@
|
|||
namespace mindspore {
|
||||
namespace device {
|
||||
namespace gpu {
|
||||
|
||||
MPIInitializer &MPIInitializer::GetInstance() {
|
||||
static MPIInitializer instance;
|
||||
return instance;
|
||||
|
|
|
@ -63,10 +63,8 @@ namespace pse_adaptor {
|
|||
vector<int> row(label_mat.cols);
|
||||
for (int y = 0; y < label_mat.cols; ++y) {
|
||||
int label = label_mat.at<int>(x, y);
|
||||
|
||||
if (label == 0) continue;
|
||||
if (area[label] < min_area) continue;
|
||||
|
||||
Point point(x, y);
|
||||
queue.push(point);
|
||||
row[y] = label;
|
||||
|
|
|
@ -26,8 +26,7 @@ MSNetWork::MSNetWork(void) : session_(nullptr) {}
|
|||
|
||||
MSNetWork::~MSNetWork(void) {}
|
||||
|
||||
void
|
||||
MSNetWork::CreateSessionMS(char *modelBuffer, size_t bufferLen, mindspore::lite::Context *ctx) {
|
||||
void MSNetWork::CreateSessionMS(char *modelBuffer, size_t bufferLen, mindspore::lite::Context *ctx) {
|
||||
session_ = mindspore::session::LiteSession::CreateSession(ctx);
|
||||
if (session_ == nullptr) {
|
||||
MS_PRINT("Create Session failed.");
|
||||
|
@ -52,4 +51,3 @@ int MSNetWork::ReleaseNets(void) {
|
|||
delete session_;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -52,7 +52,7 @@ class MSNetWork {
|
|||
|
||||
int ReleaseNets(void);
|
||||
|
||||
mindspore::session::LiteSession * session() const { return session_; }
|
||||
mindspore::session::LiteSession *session() const { return session_; }
|
||||
private:
|
||||
mindspore::session::LiteSession *session_;
|
||||
};
|
||||
|
|
|
@ -145,9 +145,8 @@ char *CreateLocalModelBuffer(JNIEnv *env, jobject modelBuffer) {
|
|||
* @param msOutputs
|
||||
* @return
|
||||
*/
|
||||
std::string
|
||||
ProcessRunnetResult(const int RET_CATEGORY_SUM, const char *const labels_name_map[],
|
||||
std::unordered_map<std::string, mindspore::tensor::MSTensor *> msOutputs) {
|
||||
std::string ProcessRunnetResult(const int RET_CATEGORY_SUM, const char *const labels_name_map[],
|
||||
std::unordered_map<std::string, mindspore::tensor::MSTensor *> msOutputs) {
|
||||
// Get the branch of the model output.
|
||||
// Use iterators to get map elements.
|
||||
std::unordered_map<std::string, mindspore::tensor::MSTensor *>::iterator iter;
|
||||
|
@ -160,7 +159,7 @@ ProcessRunnetResult(const int RET_CATEGORY_SUM, const char *const labels_name_ma
|
|||
MS_PRINT("Number of tensor elements:%d", tensorNum);
|
||||
|
||||
// Get a pointer to the first score.
|
||||
float *temp_scores = static_cast<float * >(outputTensor->MutableData());
|
||||
float *temp_scores = static_cast<float *>(outputTensor->MutableData());
|
||||
|
||||
float scores[RET_CATEGORY_SUM];
|
||||
for (int i = 0; i < RET_CATEGORY_SUM; ++i) {
|
||||
|
@ -202,12 +201,12 @@ bool BitmapToLiteMat(JNIEnv *env, const jobject &srcBitmap, LiteMat *lite_mat) {
|
|||
MS_PRINT("Init From RGBA error");
|
||||
}
|
||||
} else {
|
||||
unsigned char *pixels_ptr = new unsigned char[info.width*info.height*4];
|
||||
unsigned char *pixels_ptr = new unsigned char[info.width * info.height * 4];
|
||||
unsigned char *ptr = pixels_ptr;
|
||||
unsigned char *data = reinterpret_cast<unsigned char *>(pixels);
|
||||
for (int i = 0; i < info.height; i++) {
|
||||
memcpy(ptr, data, info.width*4);
|
||||
ptr += info.width*4;
|
||||
memcpy(ptr, data, info.width * 4);
|
||||
ptr += info.width * 4;
|
||||
data += info.stride;
|
||||
}
|
||||
ret = InitFromPixel(reinterpret_cast<const unsigned char *>(pixels_ptr),
|
||||
|
|
|
@ -18,6 +18,4 @@
|
|||
#define MINDSPORE_JNI_HMS_DEBUG_MINDSPORENETNATIVE_H
|
||||
|
||||
|
||||
|
||||
|
||||
#endif // MINDSPORE_JNI_HMS_DEBUG_MINDSPORENETNATIVE_H
|
||||
|
|
|
@ -26,8 +26,7 @@ MSNetWork::MSNetWork(void) : session_(nullptr) {}
|
|||
|
||||
MSNetWork::~MSNetWork(void) {}
|
||||
|
||||
void
|
||||
MSNetWork::CreateSessionMS(char *modelBuffer, size_t bufferLen, mindspore::lite::Context *ctx) {
|
||||
void MSNetWork::CreateSessionMS(char *modelBuffer, size_t bufferLen, mindspore::lite::Context *ctx) {
|
||||
session_ = mindspore::session::LiteSession::CreateSession(ctx);
|
||||
if (session_ == nullptr) {
|
||||
MS_PRINT("Create Session failed.");
|
||||
|
|
|
@ -52,7 +52,7 @@ class MSNetWork {
|
|||
|
||||
int ReleaseNets(void);
|
||||
|
||||
mindspore::session::LiteSession * session() const { return session_; }
|
||||
mindspore::session::LiteSession *session() const { return session_; }
|
||||
private:
|
||||
mindspore::session::LiteSession *session_;
|
||||
};
|
||||
|
|
|
@ -45,7 +45,7 @@ bool BitmapToLiteMat(JNIEnv *env, const jobject &srcBitmap, LiteMat *lite_mat) {
|
|||
return false;
|
||||
}
|
||||
AndroidBitmap_lockPixels(env, srcBitmap, &pixels);
|
||||
if (info.stride == info.width*4) {
|
||||
if (info.stride == info.width * 4) {
|
||||
ret = InitFromPixel(reinterpret_cast<const unsigned char *>(pixels),
|
||||
LPixelType::RGBA2RGB, LDataType::UINT8,
|
||||
info.width, info.height, lite_mat_bgr);
|
||||
|
@ -53,12 +53,12 @@ bool BitmapToLiteMat(JNIEnv *env, const jobject &srcBitmap, LiteMat *lite_mat) {
|
|||
MS_PRINT("Init From RGBA error");
|
||||
}
|
||||
} else {
|
||||
unsigned char *pixels_ptr = new unsigned char[info.width*info.height*4];
|
||||
unsigned char *pixels_ptr = new unsigned char[info.width * info.height * 4];
|
||||
unsigned char *ptr = pixels_ptr;
|
||||
unsigned char *data = reinterpret_cast<unsigned char *>(pixels);
|
||||
for (int i = 0; i < info.height; i++) {
|
||||
memcpy(ptr, data, info.width*4);
|
||||
ptr += info.width*4;
|
||||
memcpy(ptr, data, info.width * 4);
|
||||
ptr += info.width * 4;
|
||||
data += info.stride;
|
||||
}
|
||||
ret = InitFromPixel(reinterpret_cast<const unsigned char *>(pixels_ptr),
|
||||
|
@ -110,8 +110,7 @@ char *CreateLocalModelBuffer(JNIEnv *env, jobject modelBuffer) {
|
|||
* @param srcImageHeight The height of the original input image.
|
||||
* @return
|
||||
*/
|
||||
std::string ProcessRunnetResult(std::unordered_map<std::string,
|
||||
mindspore::tensor::MSTensor *> msOutputs,
|
||||
std::string ProcessRunnetResult(std::unordered_map<std::string, mindspore::tensor::MSTensor *> msOutputs,
|
||||
int srcImageWidth, int srcImageHeight) {
|
||||
std::unordered_map<std::string, mindspore::tensor::MSTensor *>::iterator iter;
|
||||
iter = msOutputs.begin();
|
||||
|
@ -124,8 +123,8 @@ std::string ProcessRunnetResult(std::unordered_map<std::string,
|
|||
MS_PRINT("%s %s", branch1_string.c_str(), branch2_string.c_str());
|
||||
|
||||
// ----------- 接口测试 --------------------------
|
||||
float *tmpscores2 = reinterpret_cast<float * >(branch1_tensor->MutableData());
|
||||
float *tmpdata = reinterpret_cast<float * >(branch2_tensor->MutableData());
|
||||
float *tmpscores2 = reinterpret_cast<float *>(branch1_tensor->MutableData());
|
||||
float *tmpdata = reinterpret_cast<float *>(branch2_tensor->MutableData());
|
||||
|
||||
// Using ssd model util to process model branch outputs.
|
||||
SSDModelUtil ssdUtil(srcImageWidth, srcImageHeight);
|
||||
|
|
|
@ -177,7 +177,6 @@ void SSDModelUtil::getDefaultBoxes() {
|
|||
tempWHBox.boxw = h;
|
||||
tempWHBox.boxh = w;
|
||||
all_sizes.push_back(tempWHBox);
|
||||
|
||||
} else {
|
||||
// len(all_sizes) = 6.
|
||||
tempWHBox.boxw = sk1;
|
||||
|
|
Loading…
Reference in New Issue