forked from mindspore-Ecosystem/mindspore
!20660 [LITE] remove blanks and unused codes
Merge pull request !20660 from zhaozhenlong/lite/issue/remove_blanks_clean_code
This commit is contained in:
commit
ce09544be2
|
@ -64,18 +64,7 @@ std::vector<int64_t> GetNpuTensorShape(int dim, std::shared_ptr<hiai::AiTensor>
|
|||
return npu_shape;
|
||||
}
|
||||
|
||||
std::vector<int> ExpandShapeTo4d(const std::vector<int> &shape) {
|
||||
if (shape.size() == 0 || shape.size() >= NPU_SHAPE_SIZE) {
|
||||
return shape;
|
||||
}
|
||||
std::vector<int> ret{shape};
|
||||
for (auto i = shape.size(); i < NPU_SHAPE_SIZE; ++i) {
|
||||
ret.push_back(1);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool IsSameShapeTensor(mindspore::MSTensor tensor, std::shared_ptr<hiai::AiTensor> npu_tensor) {
|
||||
bool IsSameShapeTensor(mindspore::MSTensor tensor, const std::shared_ptr<hiai::AiTensor> &npu_tensor) {
|
||||
if (tensor.Shape().size() > NPU_SHAPE_SIZE) {
|
||||
MS_LOG(ERROR) << "Npu does not support output tensor dims greater than 4";
|
||||
return false;
|
||||
|
|
|
@ -137,7 +137,6 @@ int CumSumCPUKernel::DoCumsumInt(int task_id) {
|
|||
|
||||
int CumSumCPUKernel::Run() {
|
||||
int ret = ParallelLaunch(this->ms_context_, CumsumLaunch, this, op_parameter_->thread_num_);
|
||||
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Crop launch fail!ret: " << ret;
|
||||
return RET_ERROR;
|
||||
|
|
Loading…
Reference in New Issue