forked from mindspore-Ecosystem/mindspore
!41241 [MSLITE] Fix bug for Tensorrt dynamic shape inference
Merge pull request !41241 from zhangyongxian/dev_zhangyongxian_bugfix
This commit is contained in:
commit
e819545cd5
|
@ -152,7 +152,7 @@ int ElementWiseTensorRT::AddInnerOp(TensorRTContext *ctx) {
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
auto output_helper =
|
auto output_helper =
|
||||||
ITensorHelper{op_out_tensor, x_input.format_, x_input.same_format_, x_input.is_tensor_ && y_input.is_tensor_};
|
ITensorHelper{op_out_tensor, x_input.format_, x_input.same_format_, x_input.is_tensor_ || y_input.is_tensor_};
|
||||||
ctx->RegisterTensor(output_helper, out_tensors_[0].Name());
|
ctx->RegisterTensor(output_helper, out_tensors_[0].Name());
|
||||||
MS_LOG(DEBUG) << "output " << GetTensorFormat(output_helper);
|
MS_LOG(DEBUG) << "output " << GetTensorFormat(output_helper);
|
||||||
return RET_OK;
|
return RET_OK;
|
||||||
|
|
|
@ -233,8 +233,8 @@ nvinfer1::ITensor *MatMulTensorRT::AddBias(TensorRTContext *ctx, nvinfer1::ITens
|
||||||
if (in_tensors_.size() == kBiasIndex + 1) {
|
if (in_tensors_.size() == kBiasIndex + 1) {
|
||||||
nvinfer1::ITensor *bias = nullptr;
|
nvinfer1::ITensor *bias = nullptr;
|
||||||
if (in_tensors_[kBiasIndex].Shape().size() < static_cast<size_t>(out_tensor->getDimensions().nbDims)) {
|
if (in_tensors_[kBiasIndex].Shape().size() < static_cast<size_t>(out_tensor->getDimensions().nbDims)) {
|
||||||
std::vector<int64_t> expect_dims(out_tensors_[0].Shape());
|
std::vector<int64_t> expect_dims(input_tensor->getDimensions().nbDims, 1);
|
||||||
expect_dims[0] = out_tensor->getDimensions().d[0];
|
expect_dims[expect_dims.size() - 1] = in_tensors_[kBiasIndex].Shape().back();
|
||||||
bias = ConvertTensorWithExpandDims(ctx, in_tensors_[kBiasIndex], expect_dims, op_name_);
|
bias = ConvertTensorWithExpandDims(ctx, in_tensors_[kBiasIndex], expect_dims, op_name_);
|
||||||
} else if (in_tensors_[kBiasIndex].Shape().size() == static_cast<size_t>(out_tensor->getDimensions().nbDims)) {
|
} else if (in_tensors_[kBiasIndex].Shape().size() == static_cast<size_t>(out_tensor->getDimensions().nbDims)) {
|
||||||
bias = ConvertConstantTensor(ctx, in_tensors_[kBiasIndex], op_name_);
|
bias = ConvertConstantTensor(ctx, in_tensors_[kBiasIndex], op_name_);
|
||||||
|
|
|
@ -31,8 +31,6 @@ int ShapeTensorRT::IsSupport(const schema::Primitive *primitive, const std::vect
|
||||||
MS_LOG(ERROR) << "Unsupported output tensor size, size is " << out_tensors.size();
|
MS_LOG(ERROR) << "Unsupported output tensor size, size is " << out_tensors.size();
|
||||||
return RET_ERROR;
|
return RET_ERROR;
|
||||||
}
|
}
|
||||||
dynamic_shape_params_.support_dynamic_ = false;
|
|
||||||
dynamic_shape_params_.support_hw_dynamic_ = false;
|
|
||||||
return RET_OK;
|
return RET_OK;
|
||||||
}
|
}
|
||||||
int ShapeTensorRT::AddInnerOp(TensorRTContext *ctx) {
|
int ShapeTensorRT::AddInnerOp(TensorRTContext *ctx) {
|
||||||
|
|
|
@ -76,8 +76,8 @@ class StrideSliceTensorRTUtil final : public SliceTensorRTUtil {
|
||||||
stride_dims = nvinfer1::Dims{size_dims.nbDims, {}};
|
stride_dims = nvinfer1::Dims{size_dims.nbDims, {}};
|
||||||
std::fill(start_dims.d, start_dims.d + start_dims.nbDims, 0);
|
std::fill(start_dims.d, start_dims.d + start_dims.nbDims, 0);
|
||||||
std::fill(stride_dims.d, stride_dims.d + stride_dims.nbDims, 1);
|
std::fill(stride_dims.d, stride_dims.d + stride_dims.nbDims, 1);
|
||||||
if (start_value == -1) {
|
if (start_value < 0) {
|
||||||
start_value = input_dims.d[axis_value] - 1;
|
start_value = input_dims.d[axis_value] + start_value;
|
||||||
}
|
}
|
||||||
for (int i = 0; i < start_dims.nbDims; i++) {
|
for (int i = 0; i < start_dims.nbDims; i++) {
|
||||||
if (i == axis_value) {
|
if (i == axis_value) {
|
||||||
|
|
Loading…
Reference in New Issue