diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/reshape_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/reshape_infer.c index 3899e512b04..d9390fae67c 100644 --- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/reshape_infer.c +++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/reshape_infer.c @@ -32,14 +32,12 @@ int CalShape(const int *data, const TensorC *const *inputs, int *out_shape, size } ShapePush(out_shape, out_shape_size, data[i]); } - if (size == 0) { - return NNACL_ERR; - } + if ((int)(data[index]) == -1) { if (index >= MAX_SHAPE_SIZE) { return NNACL_ERR; } - out_shape[index] = input_count / size; + out_shape[index] = size == 0 ? 0 : input_count / size; } return NNACL_OK; } diff --git a/mindspore/lite/java/src/main/java/com/mindspore/MSTensor.java b/mindspore/lite/java/src/main/java/com/mindspore/MSTensor.java index 80865d706e8..434a7988a07 100644 --- a/mindspore/lite/java/src/main/java/com/mindspore/MSTensor.java +++ b/mindspore/lite/java/src/main/java/com/mindspore/MSTensor.java @@ -52,7 +52,8 @@ public class MSTensor { * @param buffer tensor buffer */ public static MSTensor createTensor(String tensorName, int dataType, int[] tensorShape, ByteBuffer buffer) { - if (tensorName == null || tensorShape == null || buffer == null) { + if (tensorName == null || tensorShape == null || buffer == null || dataType < DataType.kNumberTypeBool || + dataType > DataType.kNumberTypeFloat64) { return null; } long tensorPtr = createTensorByNative(tensorName, dataType, tensorShape, buffer); diff --git a/mindspore/lite/java/src/main/java/com/mindspore/ModelParallelRunner.java b/mindspore/lite/java/src/main/java/com/mindspore/ModelParallelRunner.java index 64292f56741..03ca4ab1082 100644 --- a/mindspore/lite/java/src/main/java/com/mindspore/ModelParallelRunner.java +++ b/mindspore/lite/java/src/main/java/com/mindspore/ModelParallelRunner.java @@ -56,36 +56,50 @@ public class ModelParallelRunner { } /** - * Build a model runner from model path so that it can run on a device. + * Build a model runner from model path so that it can run on a device. * * @param modelPath the model path. * @param runnerConfig the RunnerConfig Object. * @return init status. */ public boolean init(String modelPath, RunnerConfig runnerConfig) { + rwLock.writeLock().lock(); if (runnerConfig == null || modelPath == null) { + rwLock.writeLock().unlock(); return false; } + if (modelParallelRunnerPtr != 0L){ + rwLock.writeLock().unlock(); + return true; + } modelParallelRunnerPtr = this.init(modelPath, runnerConfig.getRunnerConfigPtr()); + rwLock.writeLock().unlock(); return modelParallelRunnerPtr != 0L; } /** - * Build a model runner from model path so that it can run on a device. + * Build a model runner from model path so that it can run on a device. * * @param modelPath the model path. * @return init status. */ public boolean init(String modelPath) { + rwLock.writeLock().lock(); if (modelPath == null) { + rwLock.writeLock().unlock(); return false; } + if (modelParallelRunnerPtr != 0L){ + rwLock.writeLock().unlock(); + return true; + } modelParallelRunnerPtr = this.init(modelPath, 0L); + rwLock.writeLock().unlock(); return modelParallelRunnerPtr != 0; } /** - * Build a model runner from model path so that it can run on a device. + * Build a model runner from model path so that it can run on a device. * * @param inputs inputs A vector where model inputs are arranged in sequence. * @param outputs outputs Which is a pointer to a vector. The model outputs are filled in the container in sequence. @@ -197,11 +211,12 @@ public class ModelParallelRunner { break; } rwLock.writeLock().lock(); - if (modelParallelRunnerPtr != 0L) { - this.free(modelParallelRunnerPtr); - modelParallelRunnerPtr = 0L; - } + long modelParallelRunnerTempPtr = modelParallelRunnerPtr; + modelParallelRunnerPtr = 0L; rwLock.writeLock().unlock(); + if (modelParallelRunnerTempPtr != 0L) { + this.free(modelParallelRunnerTempPtr); + } } private native long init(String modelPath, long runnerConfigPtr);