diff --git a/mindspore/lite/src/extendrt/delegate/tensorrt/op/batchtospace_tensorrt.cc b/mindspore/lite/src/extendrt/delegate/tensorrt/op/batchtospace_tensorrt.cc index b5c0609d555..2fcedab503d 100644 --- a/mindspore/lite/src/extendrt/delegate/tensorrt/op/batchtospace_tensorrt.cc +++ b/mindspore/lite/src/extendrt/delegate/tensorrt/op/batchtospace_tensorrt.cc @@ -45,17 +45,6 @@ int BatchToSpaceTensorRT::IsSupport(const BaseOperatorPtr &base_operator, const int BatchToSpaceTensorRT::AddInnerOp(TensorRTContext *ctx) { nvinfer1::ITensor *input_tensor = input(ctx, 0).trt_tensor_; - if (input(ctx, 0).trt_tensor_->getDimensions().nbDims == DIMENSION_4D && input(ctx, 0).format_ == Format::NHWC) { - // transpose: NHWC->NCHW - nvinfer1::IShuffleLayer *transpose_layer_in = NHWC2NCHW(ctx, *input(ctx, 0).trt_tensor_); - if (transpose_layer_in == nullptr) { - MS_LOG(ERROR) << "transpose: NHWC->NCHW failed"; - return RET_ERROR; - } - transpose_layer_in->setName((op_name_ + "_transpose2NCHW").c_str()); - this->transpose_layer_ = transpose_layer_in; - input_tensor = transpose_layer_in->getOutput(0); - } const int *block_size_ptr = reinterpret_cast(in_tensors_[1].Data()); int bh = *(block_size_ptr + 0); int bw = *(block_size_ptr + 1); diff --git a/mindspore/lite/src/extendrt/delegate/tensorrt/op/concate_tensorrt.cc b/mindspore/lite/src/extendrt/delegate/tensorrt/op/concate_tensorrt.cc index b7d9c6a29b6..9f970cac6b8 100644 --- a/mindspore/lite/src/extendrt/delegate/tensorrt/op/concate_tensorrt.cc +++ b/mindspore/lite/src/extendrt/delegate/tensorrt/op/concate_tensorrt.cc @@ -66,15 +66,6 @@ int ConcateTensorRT::AddInnerOp(TensorRTContext *ctx) { MS_LOG(ERROR) << "PreProcessInputs failed for " << op_name_; return ret; } - if (!same_format_) { - if (trt_input_tensors[0]->getDimensions().nbDims == DIMENSION_4D && out_format_ == Format::NCHW) { - // when inputs all NCHW, change axis - axis_ = ConvertAxisFromNHWC2NCHW(axis_); - MS_LOG(DEBUG) << "concate axis change to " << axis_ << " when using NCHW format."; - } else { - MS_LOG(WARNING) << "input tensor format needs check, convert concat axis failed for " << op_name_; - } - } if (type_ == ops::kNameStack) { for (size_t i = 0; i != in_tensors_.size(); ++i) { @@ -127,29 +118,9 @@ int ConcateTensorRT::PreProcessInputs(TensorRTContext *ctx, nvinfer1::ITensor *t } } - // make sure all inputs are same format - if (input_nbDims == DIMENSION_4D) { - for (size_t i = 0; i < in_tensors_.size(); i++) { - if (input(ctx, i).format_ == out_format_) { - trt_input_tensors[i] = input(ctx, i).trt_tensor_; - MS_LOG(DEBUG) << "concate input " << GetTensorFormat(input(ctx, i)); - } else { - nvinfer1::IShuffleLayer *transpose_layer = NHWC2NCHW(ctx, *input(ctx, i).trt_tensor_); - if (transpose_layer == nullptr) { - MS_LOG(ERROR) << "op action convert failed"; - return RET_ERROR; - } - trt_input_tensors[i] = transpose_layer->getOutput(0); - this->transpose_layer_ = transpose_layer; - same_format_ = true; - MS_LOG(DEBUG) << "concate input " << GetTensorFormat(trt_input_tensors[i], Format::NHWC, true); - } - } - } else { - for (size_t i = 0; i < in_tensors_.size(); i++) { - trt_input_tensors[i] = input(ctx, i).trt_tensor_; - MS_LOG(DEBUG) << "concate input " << GetTensorFormat(input(ctx, i)); - } + for (size_t i = 0; i < in_tensors_.size(); i++) { + trt_input_tensors[i] = input(ctx, i).trt_tensor_; + MS_LOG(DEBUG) << "concate input " << GetTensorFormat(input(ctx, i)); } return RET_OK; } diff --git a/mindspore/lite/src/extendrt/delegate/tensorrt/op/convolution_tensorrt.cc b/mindspore/lite/src/extendrt/delegate/tensorrt/op/convolution_tensorrt.cc index dacd7890966..02fdf273cb9 100644 --- a/mindspore/lite/src/extendrt/delegate/tensorrt/op/convolution_tensorrt.cc +++ b/mindspore/lite/src/extendrt/delegate/tensorrt/op/convolution_tensorrt.cc @@ -54,17 +54,6 @@ int ConvolutionTensorRT::AddInnerOp(TensorRTContext *ctx) { } nvinfer1::ITensor *conv_input = input(ctx, 0).trt_tensor_; - if (input(ctx, 0).trt_tensor_->getDimensions().nbDims == DIMENSION_4D && input(ctx, 0).format_ == Format::NHWC) { - // transpose: NHWC->NCHW - nvinfer1::IShuffleLayer *transpose_layer_in = NHWC2NCHW(ctx, *input(ctx, 0).trt_tensor_); - if (transpose_layer_in == nullptr) { - MS_LOG(ERROR) << "transpose: NHWC->NCHW failed"; - return RET_ERROR; - } - transpose_layer_in->setName((op_name_ + "_transpose2NCHW").c_str()); - this->transpose_layer_ = transpose_layer_in; - conv_input = transpose_layer_in->getOutput(0); - } // transpose weight const auto &weight_tensor = in_tensors_[1]; diff --git a/mindspore/lite/src/extendrt/delegate/tensorrt/op/deconvolution_tensorrt.cc b/mindspore/lite/src/extendrt/delegate/tensorrt/op/deconvolution_tensorrt.cc index ac9d654db92..cb528a1881f 100644 --- a/mindspore/lite/src/extendrt/delegate/tensorrt/op/deconvolution_tensorrt.cc +++ b/mindspore/lite/src/extendrt/delegate/tensorrt/op/deconvolution_tensorrt.cc @@ -52,17 +52,6 @@ int DeconvolutionTensorRT::AddInnerOp(TensorRTContext *ctx) { return RET_ERROR; } nvinfer1::ITensor *deconv_input = input(ctx, 0).trt_tensor_; - if (input(ctx, 0).trt_tensor_->getDimensions().nbDims == DIMENSION_4D && input(ctx, 0).format_ == Format::NHWC) { - // transpose: NHWC->NCHW - nvinfer1::IShuffleLayer *transpose_layer_in = NHWC2NCHW(ctx, *input(ctx, 0).trt_tensor_); - if (transpose_layer_in == nullptr) { - MS_LOG(ERROR) << "transpose: NHWC->NCHW failed"; - return RET_ERROR; - } - transpose_layer_in->setName((op_name_ + "_transpose2NCHW").c_str()); - this->transpose_layer_ = transpose_layer_in; - deconv_input = transpose_layer_in->getOutput(0); - } // transpose weight const auto &weight_tensor = in_tensors_[1]; diff --git a/mindspore/lite/src/extendrt/delegate/tensorrt/op/elementwise_tensorrt.cc b/mindspore/lite/src/extendrt/delegate/tensorrt/op/elementwise_tensorrt.cc index b2369727d28..50554c8af29 100644 --- a/mindspore/lite/src/extendrt/delegate/tensorrt/op/elementwise_tensorrt.cc +++ b/mindspore/lite/src/extendrt/delegate/tensorrt/op/elementwise_tensorrt.cc @@ -179,26 +179,7 @@ int ElementWiseTensorRT::PreprocessInputTensors(TensorRTContext *ctx, ITensorHel } *x_input = input(ctx, 0); *y_input = input(ctx, 1); - int const_tensor_index = (in_tensors_[0].Data() != nullptr && in_tensors_[0].IsConst()) ? 0 : 1; - auto input_helper = const_tensor_index == 0 ? y_input : x_input; - auto const_helper = const_tensor_index == 0 ? x_input : y_input; - MS_LOG(DEBUG) << "before transpose " << GetTensorFormat(*x_input); - MS_LOG(DEBUG) << "before transpose " << GetTensorFormat(*y_input); - if (input_helper->trt_tensor_->getDimensions().nbDims == DIMENSION_4D && - input_helper->format_ != const_helper->format_) { - // when inputs format are different, change to NHWC - auto need_trans = input_helper->format_ == Format::NCHW ? input_helper : const_helper; - nvinfer1::IShuffleLayer *transpose_layer = NCHW2NHWC(ctx, *need_trans->trt_tensor_); - if (transpose_layer == nullptr) { - MS_LOG(ERROR) << "op action convert failed"; - return RET_ERROR; - } - transpose_layer->setName((op_name_ + "_input_transpose2NHWC").c_str()); - need_trans->trt_tensor_ = transpose_layer->getOutput(0); - need_trans->format_ = Format::NHWC; - need_trans->same_format_ = true; - } MS_LOG(DEBUG) << "after transpose " << GetTensorFormat(*x_input); MS_LOG(DEBUG) << "after transpose " << GetTensorFormat(*y_input); if (GetDimsVolume(x_input->trt_tensor_->getDimensions()) == GetDimsVolume(y_input->trt_tensor_->getDimensions()) && diff --git a/mindspore/lite/src/extendrt/delegate/tensorrt/op/pad_tensorrt.cc b/mindspore/lite/src/extendrt/delegate/tensorrt/op/pad_tensorrt.cc index dabaeaa7fe0..4e137842c24 100644 --- a/mindspore/lite/src/extendrt/delegate/tensorrt/op/pad_tensorrt.cc +++ b/mindspore/lite/src/extendrt/delegate/tensorrt/op/pad_tensorrt.cc @@ -69,18 +69,6 @@ int PadTensorRT::AddInnerOp(TensorRTContext *ctx) { nvinfer1::ITensor *pad_input = input(ctx, 0).trt_tensor_; MS_LOG(DEBUG) << "before transpose " << GetTensorFormat(pad_input, input(ctx, 0).format_, input(ctx, 0).same_format_); - if (input(ctx, 0).trt_tensor_->getDimensions().nbDims == DIMENSION_4D && input(ctx, 0).format_ == Format::NHWC) { - // transpose: NHWC->NCHW - nvinfer1::IShuffleLayer *transpose_layer_in = NHWC2NCHW(ctx, *input(ctx, 0).trt_tensor_); - if (transpose_layer_in == nullptr) { - MS_LOG(ERROR) << "transpose: NHWC->NCHW failed"; - return RET_ERROR; - } - transpose_layer_in->setName((op_name_ + "_transpose2NCHW").c_str()); - this->transpose_layer_ = transpose_layer_in; - pad_input = transpose_layer_in->getOutput(0); - MS_LOG(DEBUG) << "after transpose " << GetTensorFormat(pad_input, Format::NCHW, false); - } // trt 6 only support 2D padding const int *padding_data = reinterpret_cast(in_tensors_[1].Data()); diff --git a/mindspore/lite/src/extendrt/delegate/tensorrt/op/pool_tensorrt.cc b/mindspore/lite/src/extendrt/delegate/tensorrt/op/pool_tensorrt.cc index 411e6713edd..1429cf96262 100644 --- a/mindspore/lite/src/extendrt/delegate/tensorrt/op/pool_tensorrt.cc +++ b/mindspore/lite/src/extendrt/delegate/tensorrt/op/pool_tensorrt.cc @@ -51,17 +51,6 @@ int PoolTensorRT::AddInnerOp(TensorRTContext *ctx) { } nvinfer1::ITensor *pool_input = input(ctx, 0).trt_tensor_; - if (input(ctx, 0).trt_tensor_->getDimensions().nbDims == DIMENSION_4D && input(ctx, 0).format_ == Format::NHWC) { - // transpose: NHWC->NCHW - nvinfer1::IShuffleLayer *transpose_layer_in = NHWC2NCHW(ctx, *input(ctx, 0).trt_tensor_); - if (transpose_layer_in == nullptr) { - MS_LOG(ERROR) << "transpose: NHWC->NCHW failed"; - return RET_ERROR; - } - transpose_layer_in->setName((op_name_ + "_transpose2NCHW").c_str()); - this->transpose_layer_ = transpose_layer_in; - pool_input = transpose_layer_in->getOutput(0); - } // global version pooling if (kernel_size_.empty()) { diff --git a/mindspore/lite/src/extendrt/delegate/tensorrt/op/reduce_tensorrt.cc b/mindspore/lite/src/extendrt/delegate/tensorrt/op/reduce_tensorrt.cc index a3b68c58bc7..cd2a6cced5a 100644 --- a/mindspore/lite/src/extendrt/delegate/tensorrt/op/reduce_tensorrt.cc +++ b/mindspore/lite/src/extendrt/delegate/tensorrt/op/reduce_tensorrt.cc @@ -48,36 +48,7 @@ int ReduceTensorRT::AddInnerOp(TensorRTContext *ctx) { out_format_ = input(ctx, 0).format_; nvinfer1::ITensor *reduce_input = input(ctx, 0).trt_tensor_; MS_LOG(DEBUG) << "origin input " << GetTensorFormat(input(ctx, 0)); - if (input(ctx, 0).trt_tensor_->getDimensions().nbDims == DIMENSION_4D && - !SameDims(input(ctx, 0).trt_tensor_->getDimensions(), in_tensors_[0].Shape())) { - /* - if (input(ctx, 0).format_ == Format::NCHW) { - // NCHW->NHWC - nvinfer1::IShuffleLayer *transpose_layer = NCHW2NHWC(ctx, *input(ctx, 0).trt_tensor_); - if (transpose_layer == nullptr) { - MS_LOG(ERROR) << "create transpose layer failed for " << op_name_; - return RET_ERROR; - } - transpose_layer->setName((op_name_ + "_transpose_in").c_str()); - reduce_input = transpose_layer->getOutput(0); - out_format_ = Format::NHWC; - this->transpose_layer_ = transpose_layer; - } else if (input(ctx, 0).format_ == Format::NHWC) { - // NHWC->NCHW - nvinfer1::IShuffleLayer *transpose_layer = NHWC2NCHW(ctx, *input(ctx, 0).trt_tensor_); - if (transpose_layer == nullptr) { - MS_LOG(ERROR) << "create transpose layer failed for " << op_name_; - return RET_ERROR; - } - transpose_layer->setName((op_name_ + "_transpose_in").c_str()); - reduce_input = transpose_layer->getOutput(0); - out_format_ = Format::NCHW; - this->transpose_layer_ = transpose_layer; - } else { - MS_LOG(WARNING) << "input tensor format needs check: " << op_name_; - } - */ - } + MS_LOG(DEBUG) << "after transpose input " << GetTensorFormat(reduce_input, out_format_, true); if (reduce_op->get_mode() == ReduceMode::Reduce_L2) { // x^2 diff --git a/mindspore/lite/src/extendrt/delegate/tensorrt/op/resize_tensorrt.cc b/mindspore/lite/src/extendrt/delegate/tensorrt/op/resize_tensorrt.cc index 66beb0b9611..6b5e9e0482c 100644 --- a/mindspore/lite/src/extendrt/delegate/tensorrt/op/resize_tensorrt.cc +++ b/mindspore/lite/src/extendrt/delegate/tensorrt/op/resize_tensorrt.cc @@ -53,17 +53,6 @@ int ResizeTensorRT::AddInnerOp(TensorRTContext *ctx) { nvinfer1::ITensor *resize_in_tensor = input(ctx, 0).trt_tensor_; MS_LOG(DEBUG) << "origin input " << GetTensorFormat(input(ctx, 0)); - if (resize_in_tensor->getDimensions().nbDims == DIMENSION_4D && input(ctx, 0).format_ == Format::NHWC) { - // NHWC->NCHW - nvinfer1::IShuffleLayer *transpose_layer = NHWC2NCHW(ctx, *input(ctx, 0).trt_tensor_); - if (transpose_layer == nullptr) { - MS_LOG(ERROR) << "create transpose layer failed for " << op_name_; - return RET_ERROR; - } - transpose_layer->setName((op_name_ + "_transpose_in").c_str()); - resize_in_tensor = transpose_layer->getOutput(0); - this->transpose_layer_ = transpose_layer; - } MS_LOG(DEBUG) << "after transpose input " << GetTensorFormat(resize_in_tensor, Format::NCHW, true); auto method = resize_op_->get_method(); diff --git a/mindspore/lite/src/extendrt/delegate/tensorrt/op/scale_tensorrt.cc b/mindspore/lite/src/extendrt/delegate/tensorrt/op/scale_tensorrt.cc index 5c89187a1d0..9f0d2ba636c 100644 --- a/mindspore/lite/src/extendrt/delegate/tensorrt/op/scale_tensorrt.cc +++ b/mindspore/lite/src/extendrt/delegate/tensorrt/op/scale_tensorrt.cc @@ -69,7 +69,7 @@ int ScaleTensorRT::AddInnerOp(TensorRTContext *ctx) { MS_LOG(DEBUG) << "after transpose " << GetTensorFormat(scale_in_tensor, out_format_, out_same_format_); nvinfer1::ITensor *op_out_tensor{nullptr}; - if (scale_in_tensor->getDimensions().nbDims == DIMENSION_4D) { + if (scale_in_tensor->getDimensions().nbDims == DIMENSION_4D && mode_ != nvinfer1::ScaleMode::kCHANNEL) { op_out_tensor = RunAs4DimsScale(ctx, scale_in_tensor); } else { op_out_tensor = RunAsMutiDimsScale(ctx, scale_in_tensor); @@ -92,37 +92,6 @@ int ScaleTensorRT::AddInnerOp(TensorRTContext *ctx) { nvinfer1::ITensor *ScaleTensorRT::PreProcessInputTensor(TensorRTContext *ctx) { nvinfer1::ITensor *scale_in_tensor = input(ctx, 0).trt_tensor_; - if (input(ctx, 0).trt_tensor_->getDimensions().nbDims == DIMENSION_4D && mode_ == nvinfer1::ScaleMode::kCHANNEL) { - // per channel input format should be nchw, otherwise should be same with scale nhwc - // transpose: NHWC->NCHW - if ((input(ctx, 0).format_ == Format::NHWC && axis_ == kNHWC_C) || - (input(ctx, 0).same_format_ == true && axis_ == kNHWC_C)) { - nvinfer1::IShuffleLayer *transpose_layer_in = NHWC2NCHW(ctx, *input(ctx, 0).trt_tensor_); - if (transpose_layer_in == nullptr) { - MS_LOG(ERROR) << "op action convert failed"; - return nullptr; - } - transpose_layer_in->setName((op_name_ + "_transpose2NCHW").c_str()); - scale_in_tensor = transpose_layer_in->getOutput(0); - out_format_ = Format::NCHW; - out_same_format_ = !out_same_format_; - } else if (out_format_ != Format::NCHW && axis_ != kNCHW_C) { - MS_LOG(WARNING) << op_name_ << " out format (NHWC:1, NCHW:0) infer as " << out_format_ << ", and axis is " - << axis_; - } - } else if (input(ctx, 0).trt_tensor_->getDimensions().nbDims == DIMENSION_4D && - input(ctx, 0).format_ == Format::NCHW && mode_ == nvinfer1::ScaleMode::kELEMENTWISE) { - // transpose: NCHW->NHWC - nvinfer1::IShuffleLayer *transpose_layer_in = NCHW2NHWC(ctx, *input(ctx, 0).trt_tensor_); - if (transpose_layer_in == nullptr) { - MS_LOG(ERROR) << "op action convert failed"; - return nullptr; - } - transpose_layer_in->setName((op_name_ + "_transpose2NHWC").c_str()); - scale_in_tensor = transpose_layer_in->getOutput(0); - out_format_ = Format::NHWC; - out_same_format_ = true; - } return scale_in_tensor; } diff --git a/mindspore/lite/src/extendrt/delegate/tensorrt/op/shape_tensorrt.cc b/mindspore/lite/src/extendrt/delegate/tensorrt/op/shape_tensorrt.cc index 9c88c283dfc..b0b15176d2b 100644 --- a/mindspore/lite/src/extendrt/delegate/tensorrt/op/shape_tensorrt.cc +++ b/mindspore/lite/src/extendrt/delegate/tensorrt/op/shape_tensorrt.cc @@ -42,17 +42,7 @@ int ShapeTensorRT::AddInnerOp(TensorRTContext *ctx) { return RET_ERROR; } nvinfer1::ITensor *shape_input = input(ctx, 0).trt_tensor_; - if (input(ctx, 0).trt_tensor_->getDimensions().nbDims == DIMENSION_4D && input(ctx, 0).format_ == Format::NCHW) { - // transpose: NCHW->NHWC - nvinfer1::IShuffleLayer *transpose_layer_in = NCHW2NHWC(ctx, *input(ctx, 0).trt_tensor_); - if (transpose_layer_in == nullptr) { - MS_LOG(ERROR) << "transpose: NCHW->NHWC failed for " << op_name_; - return RET_ERROR; - } - transpose_layer_in->setName((op_name_ + "_transpose2NHWC").c_str()); - shape_input = transpose_layer_in->getOutput(0); - this->transpose_layer_ = transpose_layer_in; - } + nvinfer1::IShapeLayer *shape_layer = ctx->network()->addShape(*shape_input); if (shape_layer == nullptr) { diff --git a/mindspore/lite/src/extendrt/delegate/tensorrt/op/shuffle_tensorrt.cc b/mindspore/lite/src/extendrt/delegate/tensorrt/op/shuffle_tensorrt.cc index 9749810fb56..b7664c68f78 100644 --- a/mindspore/lite/src/extendrt/delegate/tensorrt/op/shuffle_tensorrt.cc +++ b/mindspore/lite/src/extendrt/delegate/tensorrt/op/shuffle_tensorrt.cc @@ -133,32 +133,7 @@ int ShuffleTensorRT::InputTensorPreprocess(TensorRTContext *ctx) { shuffler_input_ = input(ctx, 0).trt_tensor_; MS_LOG(DEBUG) << "before transpose " << GetTensorFormat(input(ctx, 0)); out_format_ = input(ctx, 0).format_; - if (shuffler_input_->getDimensions().nbDims == DIMENSION_4D && !input(ctx, 0).same_format_) { - // input tensor support NCHW format input - /* - if (input(ctx, 0).format_ == Format::NCHW) { - // for transpose op, if tensor has same dim with ms tensor, keep origin dims - nvinfer1::IShuffleLayer *transpose_layer = NCHW2NHWC(ctx_, *shuffler_input_); - if (transpose_layer == nullptr) { - MS_LOG(ERROR) << "create transpose layer failed for " << op_name_; - return RET_ERROR; - } - transpose_layer->setName((op_name_ + "_transpose_in").c_str()); - shuffler_input_ = transpose_layer->getOutput(0); - out_format_ = Format::NHWC; - } else if (input(ctx, 0).format_ == Format::NHWC) { - // infer format may error, correct here - nvinfer1::IShuffleLayer *transpose_layer = NHWC2NCHW(ctx_, *shuffler_input_); - if (transpose_layer == nullptr) { - MS_LOG(ERROR) << "create transpose layer failed for " << op_name_; - return RET_ERROR; - } - transpose_layer->setName((op_name_ + "_transpose_in").c_str()); - shuffler_input_ = transpose_layer->getOutput(0); - out_format_ = Format::NCHW; - } - */ - } + MS_LOG(DEBUG) << "after transpose " << GetTensorFormat(shuffler_input_, out_format_, true); return RET_OK; } @@ -244,15 +219,7 @@ int ShuffleTensorRT::AddTransposeOp(nvinfer1::IShuffleLayer *shuffle_layer) { perm_data++; } shuffle_layer->setFirstTranspose(perm); - if (perm_ternsor.ElementNum() == DIMENSION_4D) { - if (perm.order[kNCHW_C] == kNHWC_C && perm.order[kNCHW_H] == kNHWC_H && perm.order[kNCHW_W] == kNHWC_W) { - out_format_ = Format::NCHW; - } else if (perm.order[kNHWC_H] == kNCHW_H && perm.order[kNHWC_W] == kNCHW_W && perm.order[kNHWC_C] == kNCHW_C) { - out_format_ = Format::NHWC; - } else { - MS_LOG(INFO) << "input format and perm order is not NHWC or NCHW: " << op_name_; - } - } + shuffler_output_ = shuffle_layer->getOutput(0); return RET_OK; } diff --git a/mindspore/lite/src/extendrt/delegate/tensorrt/op/softmax_tensorrt.cc b/mindspore/lite/src/extendrt/delegate/tensorrt/op/softmax_tensorrt.cc index c100cdecbcc..1ea6c29627c 100644 --- a/mindspore/lite/src/extendrt/delegate/tensorrt/op/softmax_tensorrt.cc +++ b/mindspore/lite/src/extendrt/delegate/tensorrt/op/softmax_tensorrt.cc @@ -80,10 +80,7 @@ nvinfer1::ISoftMaxLayer *SoftMaxTensorRT::AddSoftMaxOp(TensorRTContext *ctx) { return nullptr; } int64_t axis_format_value = (axis_val[0] == -1) ? input(ctx, 0).trt_tensor_->getDimensions().nbDims - 1 : axis_val[0]; - if (input(ctx, 0).trt_tensor_->getDimensions().nbDims == DIMENSION_4D && input(ctx, 0).format_ == Format::NHWC) { - // transpose axis to NCHW - axis_format_value = ConvertAxisFromNHWC2NCHW(axis_format_value); - } + uint32_t axis_bit = 1 << axis_format_value; MS_LOG(DEBUG) << op_name_ << " axis_value is " << axis_format_value << ", set axis to " << axis_bit; current_layer_->setAxes(axis_bit); diff --git a/mindspore/lite/src/extendrt/delegate/tensorrt/op/spacetobatch_tensorrt.cc b/mindspore/lite/src/extendrt/delegate/tensorrt/op/spacetobatch_tensorrt.cc index 3e6065e8c03..747c2b28bb5 100644 --- a/mindspore/lite/src/extendrt/delegate/tensorrt/op/spacetobatch_tensorrt.cc +++ b/mindspore/lite/src/extendrt/delegate/tensorrt/op/spacetobatch_tensorrt.cc @@ -45,17 +45,6 @@ int SpaceToBatchTensorRT::IsSupport(const BaseOperatorPtr &base_operator, const int SpaceToBatchTensorRT::AddInnerOp(TensorRTContext *ctx) { nvinfer1::ITensor *input_tensor = input(ctx, 0).trt_tensor_; - if (input(ctx, 0).trt_tensor_->getDimensions().nbDims == DIMENSION_4D && input(ctx, 0).format_ == Format::NHWC) { - // transpose: NHWC->NCHW - nvinfer1::IShuffleLayer *transpose_layer_in = NHWC2NCHW(ctx, *input(ctx, 0).trt_tensor_); - if (transpose_layer_in == nullptr) { - MS_LOG(ERROR) << "transpose: NHWC->NCHW failed"; - return RET_ERROR; - } - transpose_layer_in->setName((op_name_ + "_transpose2NCHW").c_str()); - this->transpose_layer_ = transpose_layer_in; - input_tensor = transpose_layer_in->getOutput(0); - } const int *block_size_ptr = reinterpret_cast(in_tensors_[1].Data()); int bh = *(block_size_ptr + 0); int bw = *(block_size_ptr + 1);