Compatible with multiple compilers

This commit is contained in:
sunsuodong 2021-06-04 19:15:52 +08:00
parent cf295a00bb
commit 56ae44d98d
15 changed files with 24 additions and 67 deletions

View File

@ -17,6 +17,7 @@
#define MINDSPORE_INCLUDE_API_DUAL_ABI_HELPER_H_
#include <algorithm>
#include <iterator>
#include <map>
#include <memory>
#include <optional>

View File

@ -16,7 +16,7 @@
#include "nnacl/fp32/invert_permutation_fp32.h"
inline void InvertPermutation(const int *input, int *output, int num) {
void InvertPermutation(const int *input, int *output, int num) {
for (int i = 0; i < num; i++) {
int index = input[i];
output[index] = i;

View File

@ -71,7 +71,6 @@ void MatmulFloatNeon32Opt(const float *a, const float *b, float *c, const float
void MatmulFloatNeon32Opt12x4(const float *a, const float *b, float *c, const float *bias, int act_type, int depth,
int row, int col, int stride, int write_mode);
#elif ENABLE_SSE
#include <x86intrin.h>
void MatmulFloatSse64(const float *a, const float *b, float *c, const float *bias, int act_type, int depth, int row,
int col, int stride, size_t writeNhwc, size_t WriteWino);
void MatmulFloatSse64Opt(const float *a, const float *b, float *c, const float *bias, int act_type, int depth, int row,

View File

@ -15,6 +15,7 @@
*/
#include "nnacl/fp32/roi_pooling_fp32.h"
#include <float.h>
#include <math.h>
#include <string.h>
#include "nnacl/errorcode.h"
@ -68,7 +69,7 @@ int ROIPooling(const float *in_ptr, float *out_ptr, const float *roi, float *max
wend = MSMIN(MSMAX(wend + roi_start_w, 0), width_);
bool is_empty = (hend <= hstart) || (wend <= wstart);
for (int j = 0; j < channels_; ++j) {
max_c[j] = is_empty ? 0 : -__FLT_MAX__;
max_c[j] = is_empty ? 0 : -FLT_MAX;
}
int pooled_index = i * param->out_strides_[0] + ph * param->out_strides_[1] + pw * param->out_strides_[2];
int bd_index = hstart * param->in_strides_[1];

View File

@ -15,43 +15,17 @@
*/
#include "nnacl/infer/infer_register.h"
InferShape *g_infer_func = NULL;
#ifdef MS_COMPILE_IOS
void InitInferFuncBuf() {
#else
__attribute__((constructor(101))) void InitInferFuncBuf() {
#endif
if (g_infer_func != NULL) {
return;
}
g_infer_func = malloc(PrimType_MAX * sizeof(InferShape));
if (g_infer_func != NULL) {
memset(g_infer_func, 0, PrimType_MAX * sizeof(InferShape));
}
}
__attribute__((destructor)) void DestroyInferFuncBuf() {
if (g_infer_func == NULL) {
return;
}
free(g_infer_func);
g_infer_func = NULL;
}
__attribute__((init_priority(101))) InferShape g_infer_func[PrimType_MAX * sizeof(InferShape)];
InferShape GetInferFunc(int prim_type) {
if (g_infer_func != NULL && prim_type < PrimType_MAX) {
if (prim_type < PrimType_MAX) {
return g_infer_func[prim_type];
}
return NULL;
}
void RegInfer(int prim_type, InferShape func) {
#ifdef MS_COMPILE_IOS
if (g_infer_func == NULL) {
InitInferFuncBuf();
}
#endif
if (g_infer_func != NULL && prim_type < PrimType_MAX) {
if (prim_type < PrimType_MAX) {
g_infer_func[prim_type] = func;
}
}

View File

@ -23,7 +23,7 @@
extern "C" {
#endif
int LogSoftMaxInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size,
int LogSoftmaxInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size,
OpParameter *parameter);
#ifdef __cplusplus

View File

@ -15,7 +15,6 @@
*/
#include "thread/threadpool.h"
#include <unistd.h>
#include <algorithm>
#include "thread/core_affinity.h"

View File

@ -35,8 +35,8 @@ namespace mindspore {
#define THREAD_ERROR(content, args...) \
{ printf("[ERROR] %s|%d: " #content "\r\n", __func__, __LINE__, ##args); }
#else
#define THREAD_INFO(content, args...)
#define THREAD_ERROR(content, args...)
#define THREAD_INFO(content, ...)
#define THREAD_ERROR(content, ...)
#endif
#define THREAD_ERROR_IF_NULL(ptr) \

View File

@ -65,7 +65,7 @@ class MS_API KernelInterface {
};
/// \brief KernelInterfaceCreator defined a functor to create KernelInterface.
using KernelInterfaceCreator MS_API = std::function<std::shared_ptr<KernelInterface>()>;
using KernelInterfaceCreator = std::function<std::shared_ptr<KernelInterface>()>;
/// \brief RegisterKernelInterface defined registration and acquisition of KernelInterface.
class MS_API RegisterKernelInterface {

View File

@ -28,8 +28,8 @@
namespace mindspore {
namespace kernel {
static const char *const kArchCPU __attribute__((unused)) = "CPU";
static const char *const kArchGPU __attribute__((unused)) = "GPU";
extern const char *const kArchCPU;
extern const char *const kArchGPU;
/// \brief KernelDesc defined kernel's basic attribute.
struct MS_API KernelDesc {
@ -59,7 +59,7 @@ struct MS_API KernelDesc {
/// \param[in] ctx Define for holding environment variables during runtime.
///
/// \return Smart Pointer of kernel.
using CreateKernel MS_API = std::function<std::shared_ptr<kernel::Kernel>(
using CreateKernel = std::function<std::shared_ptr<kernel::Kernel>(
const std::vector<tensor::MSTensor *> &inputs, const std::vector<tensor::MSTensor *> &outputs,
const schema::Primitive *primitive, const lite::Context *ctx)>;

View File

@ -88,10 +88,11 @@ Status ModelImpl::Build() {
MS_LOG(ERROR) << "Invalid thread affinity.";
return kLiteInputParamInvalid;
}
lite::DeviceInfo cpu_info = {.cpu_device_info_ = {cpu_context->GetEnableFP16(), mode}};
lite::DeviceInfo cpu_info = {0};
cpu_info.cpu_device_info_ = {cpu_context->GetEnableFP16(), mode};
model_context.device_list_.push_back({lite::DT_CPU, cpu_info});
if (device_list.size() == 2) {
lite::DeviceInfo device_info;
lite::DeviceInfo device_info = {0};
if (device_list[1]->GetDeviceType() == kMaliGPU) {
auto gpu_context = device_list[1]->Cast<MaliGPUDeviceInfo>();
device_info.gpu_device_info_ = {gpu_context->GetEnableFP16()};

View File

@ -14,6 +14,7 @@
* limitations under the License.
*/
#include "src/inner_context.h"
#include <algorithm>
#include "include/errorcode.h"
#include "src/common/log_adapter.h"
#include "src/common/utils.h"

View File

@ -20,6 +20,9 @@
namespace mindspore {
namespace kernel {
const char *const kArchCPU = "CPU";
const char *const kArchGPU = "GPU";
int RegisterKernel::RegCustomKernel(const std::string &arch, const std::string &provider, TypeId data_type,
const std::string &type, CreateKernel creator) {
return lite::RegistryKernelImpl::GetInstance()->RegCustomKernel(arch, provider, data_type, type, creator);

View File

@ -27,8 +27,8 @@ using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_INFER_INVALID;
using mindspore::lite::RET_OK;
using mindspore::schema::Format;
using mindspore::schema::PrimitiveType_AdderFusion;
using mindspore::schema::Format::Format_NHWC;
namespace mindspore::kernel {
int AdderCPUKernel::Init() {

View File

@ -142,28 +142,6 @@ set(LITE_SRC
${SRC_DIR}/train/train_populate_parameter.cc
)
if(ENABLE_MINDRT)
include_directories(${CORE_DIR}/mindrt/)
include_directories(${CORE_DIR}/mindrt/include/)
include_directories(${CORE_DIR}/mindrt/src/)
set(MINDRT_SRC
${SRC_DIR}/lite_mindrt.cc
${SRC_DIR}/mindrt_executor.cc
${CORE_DIR}/mindrt/src/mindrt.cc
${CORE_DIR}/mindrt/src/actor/actor.cc
${CORE_DIR}/mindrt/src/actor/actormgr.cc
${CORE_DIR}/mindrt/src/actor/actorpolicy.cc
${CORE_DIR}/mindrt/src/actor/aid.cc
${CORE_DIR}/mindrt/src/async/async.cc
${CORE_DIR}/mindrt/src/async/future.cc
${CORE_DIR}/mindrt/src/async/uuid_base.cc
${CORE_DIR}/mindrt/src/async/uuid_generator.cc
${CORE_DIR}/mindrt/src/thread/threadpool.cc
${CORE_DIR}/mindrt/src/thread/core_affinity.cc
${CORE_DIR}/mindrt/src/thread/inter_threadpool.cc
)
endif()
file(GLOB PROTO_FILE ""
${CMAKE_CURRENT_SOURCE_DIR}/parser/caffe/caffe.proto
${CMAKE_CURRENT_SOURCE_DIR}/parser/tf/proto/*.proto
@ -180,18 +158,18 @@ ms_build_flatbuffers_lite(TFLITE_FBS_FILES ${CMAKE_CURRENT_SOURCE_DIR}/parser/tf
set_property(SOURCE ${CONVERTER_SRC} PROPERTY COMPILE_DEFINITIONS SUBMODULE_ID=mindspore::SubModuleId::SM_LITE)
set_property(SOURCE ${CCSRC_SRC} PROPERTY COMPILE_DEFINITIONS SUBMODULE_ID=mindspore::SubModuleId::SM_LITE)
set_property(SOURCE ${LITE_SRC} PROPERTY COMPILE_DEFINITIONS SUBMODULE_ID=mindspore::SubModuleId::SM_LITE)
set_property(SOURCE ${MINDRT_SRC} PROPERTY COMPILE_DEFINITIONS SUBMODULE_ID=mindspore::SubModuleId::SM_LITE)
add_executable(converter_lite
main.cc
${CCSRC_SRC}
${CONVERTER_SRC}
${LITE_SRC}
${MINDRT_SRC}
)
add_dependencies(converter_lite fbs_src)
add_dependencies(converter_lite fbs_inner_src)
target_link_libraries(converter_lite PRIVATE
mindrt_mid
cpu_ops_mid
nnacl_mid
cpu_kernel_mid