!9289 index use at

From: @zhaozhenlong
Reviewed-by: @zhanghaibo5,@hangangqiang
Signed-off-by: @hangangqiang
This commit is contained in:
mindspore-ci-bot 2020-12-02 10:00:55 +08:00 committed by Gitee
commit 3eb4c14d86
48 changed files with 226 additions and 222 deletions

View File

@ -51,8 +51,8 @@ int ExpandDims::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr>
return RET_ERROR; return RET_ERROR;
} }
// use axis instead of dim // use axis instead of dim
if (inputs[1]->isa<ValueNode>()) { if (inputs.at(1)->isa<ValueNode>()) {
auto axis_tensor = inputs[1]->cast<ValueNodePtr>(); auto axis_tensor = inputs.at(1)->cast<ValueNodePtr>();
int axis = CastToInt(axis_tensor->value()).front(); int axis = CastToInt(axis_tensor->value()).front();
attr->dim = axis; attr->dim = axis;
} else { } else {

View File

@ -76,7 +76,7 @@ int Fill::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> output
std::vector<int> output_shape; std::vector<int> output_shape;
for (size_t i = 0; i < GetDims().size(); i++) { for (size_t i = 0; i < GetDims().size(); i++) {
output_shape.push_back(GetDims()[i]); output_shape.push_back(GetDims().at(i));
} }
output->set_shape(output_shape); output->set_shape(output_shape);
return RET_OK; return RET_OK;

View File

@ -45,10 +45,10 @@ int Flatten::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> out
auto input_shape = input->shape(); auto input_shape = input->shape();
std::vector<int> output_shape(2); std::vector<int> output_shape(2);
output_shape[0] = input_shape[0]; output_shape.at(0) = input_shape.at(0);
output_shape[1] = 1; output_shape.at(1) = 1;
for (size_t i = 1; i < input_shape.size(); i++) { for (size_t i = 1; i < input_shape.size(); i++) {
output_shape[1] *= input_shape[i]; output_shape.at(1) *= input_shape.at(i);
} }
output->set_shape(output_shape); output->set_shape(output_shape);
return RET_OK; return RET_OK;

View File

@ -44,10 +44,10 @@ int FlattenGrad::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *>
auto input_shape = input->shape(); auto input_shape = input->shape();
std::vector<int> output_shape(2); std::vector<int> output_shape(2);
output_shape[0] = input_shape[0]; output_shape.at(0) = input_shape.at(0);
output_shape[1] = 1; output_shape.at(1) = 1;
for (size_t i = 1; i < input_shape.size(); i++) { for (size_t i = 1; i < input_shape.size(); i++) {
output_shape[1] *= input_shape[i]; output_shape.at(1) *= input_shape.at(i);
} }
output->set_shape(output_shape); output->set_shape(output_shape);
return RET_OK; return RET_OK;

View File

@ -65,7 +65,7 @@ int FullConnection::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<
MS_ASSERT(this->primitive_ != nullptr); MS_ASSERT(this->primitive_ != nullptr);
auto input0 = inputs_.front(); auto input0 = inputs_.front();
MS_ASSERT(input0 != nullptr); MS_ASSERT(input0 != nullptr);
auto input1 = inputs_[1]; auto input1 = inputs_.at(1);
MS_ASSERT(input1 != nullptr); MS_ASSERT(input1 != nullptr);
auto output = outputs_.front(); auto output = outputs_.front();
MS_ASSERT(output != nullptr); MS_ASSERT(output != nullptr);
@ -83,34 +83,34 @@ int FullConnection::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<
int new_k = 1; int new_k = 1;
if (GetUseAxis()) { if (GetUseAxis()) {
for (size_t i = GetAxis(); i < input0->shape().size(); ++i) { for (size_t i = GetAxis(); i < input0->shape().size(); ++i) {
new_k *= input0->shape()[i]; new_k *= input0->shape().at(i);
} }
if (new_k != input1->shape()[1]) { if (new_k != input1->shape().at(1)) {
MS_LOG(ERROR) << "Input1 size invalid"; MS_LOG(ERROR) << "Input1 size invalid";
return RET_INPUT_TENSOR_ERROR; return RET_INPUT_TENSOR_ERROR;
} }
} else { } else {
new_k = input1->shape()[1]; new_k = input1->shape().at(1);
} }
if (GetHasBias()) { if (GetHasBias()) {
if (inputs_[2]->shape()[0] != input1->shape()[0]) { if (inputs_.at(2)->shape().at(0) != input1->shape().at(0)) {
MS_LOG(ERROR) << "bias size invalid"; MS_LOG(ERROR) << "bias size invalid";
return RET_INPUT_TENSOR_ERROR; return RET_INPUT_TENSOR_ERROR;
} }
} }
std::vector<int> out_shape{inputs_[0]->shape()}; std::vector<int> out_shape{inputs_.at(0)->shape()};
if (GetUseAxis()) { if (GetUseAxis()) {
out_shape.resize(GetAxis() + 1); out_shape.resize(GetAxis() + 1);
out_shape[GetAxis()] = input1->shape()[0]; out_shape.at(GetAxis()) = input1->shape().at(0);
} else { } else {
int total = 1; int total = 1;
for (size_t i = 0; i < input0->shape().size(); ++i) { for (size_t i = 0; i < input0->shape().size(); ++i) {
total *= input0->shape()[i]; total *= input0->shape().at(i);
} }
out_shape.resize(2); out_shape.resize(2);
auto batch_size = total / new_k; auto batch_size = total / new_k;
out_shape[0] = batch_size; out_shape.at(0) = batch_size;
out_shape[1] = input1->shape()[0]; out_shape.at(1) = input1->shape().at(0);
} }
output->set_shape(out_shape); output->set_shape(out_shape);
output->set_data_type(input0->data_type()); output->set_data_type(input0->data_type());

View File

@ -57,8 +57,8 @@ int Gather::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inp
gather_attr = nullptr; gather_attr = nullptr;
return RET_ERROR; return RET_ERROR;
} }
if (inputs[2]->isa<ValueNode>()) { if (inputs.at(2)->isa<ValueNode>()) {
ValueNodePtr axis_tensor = inputs[2]->cast<ValueNodePtr>(); ValueNodePtr axis_tensor = inputs.at(2)->cast<ValueNodePtr>();
int axis = CastToInt(axis_tensor->value()).front(); int axis = CastToInt(axis_tensor->value()).front();
gather_attr->axis = axis; gather_attr->axis = axis;
} else { } else {
@ -137,7 +137,7 @@ int Gather::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outp
std::vector<int> out_shape{in_shape}; std::vector<int> out_shape{in_shape};
out_shape.erase(out_shape.begin() + axis); out_shape.erase(out_shape.begin() + axis);
for (int i = indices_rank - 1; i >= 0; --i) { for (int i = indices_rank - 1; i >= 0; --i) {
out_shape.insert(out_shape.begin() + axis, indices_shape[i]); out_shape.insert(out_shape.begin() + axis, indices_shape.at(i));
} }
output->set_shape(out_shape); output->set_shape(out_shape);
return RET_OK; return RET_OK;

View File

@ -72,17 +72,17 @@ int GatherNd::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> ou
int in_rank = in_shape.size(); int in_rank = in_shape.size();
auto indices_shape = indices->shape(); auto indices_shape = indices->shape();
int indices_rank = indices_shape.size(); int indices_rank = indices_shape.size();
if (indices_shape[indices_rank - 1] > in_rank) { if (indices_shape.at(indices_rank - 1) > in_rank) {
MS_LOG(ERROR) << "Input of indices data is error!"; MS_LOG(ERROR) << "Input of indices data is error!";
return RET_ERROR; return RET_ERROR;
} }
std::vector<int> out_shape; std::vector<int> out_shape;
int i = 0; int i = 0;
for (i = 0; i < indices_rank - 1; ++i) { for (i = 0; i < indices_rank - 1; ++i) {
out_shape.emplace_back(indices_shape[i]); out_shape.emplace_back(indices_shape.at(i));
} }
for (i = indices_shape[indices_rank - 1]; i < in_rank; ++i) { for (i = indices_shape.at(indices_rank - 1); i < in_rank; ++i) {
out_shape.emplace_back(in_shape[i]); out_shape.emplace_back(in_shape.at(i));
} }
output->set_shape(out_shape); output->set_shape(out_shape);
return RET_OK; return RET_OK;

View File

@ -97,7 +97,7 @@ int LayerNorm::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite:
} }
size_t first_index = input_shape.size() - normalized_shape.size(); size_t first_index = input_shape.size() - normalized_shape.size();
for (size_t i = first_index; i < input_shape.size(); ++i) { for (size_t i = first_index; i < input_shape.size(); ++i) {
if (input_shape[i] != normalized_shape[i - first_index]) { if (input_shape.at(i) != normalized_shape.at(i - first_index)) {
MS_LOG(INFO) << "normalized_shape attr invalid"; MS_LOG(INFO) << "normalized_shape attr invalid";
return RET_PARAM_INVALID; return RET_PARAM_INVALID;
} }

View File

@ -59,13 +59,13 @@ int Lstm::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> output
} }
auto input = inputs_.front(); auto input = inputs_.front();
MS_ASSERT(input != nullptr); MS_ASSERT(input != nullptr);
auto weight_i = inputs_[1]; auto weight_i = inputs_.at(1);
MS_ASSERT(input != nullptr); MS_ASSERT(weight_i != nullptr);
auto output = outputs_.front(); auto output = outputs_.front();
MS_ASSERT(output != nullptr); MS_ASSERT(output != nullptr);
for (int i = 0; i < kLstmOutputNum; i++) { for (int i = 0; i < kLstmOutputNum; i++) {
outputs_[i]->set_data_type(input->data_type()); outputs_.at(i)->set_data_type(input->data_type());
outputs_[i]->set_format(input->format()); outputs_.at(i)->set_format(input->format());
} }
if (!infer_flag()) { if (!infer_flag()) {
return RET_OK; return RET_OK;

View File

@ -125,7 +125,7 @@ int MatMul::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outp
del_end = true; del_end = true;
} }
for (size_t i = 0; i < (a_shape.size() - 2) && i < (b_shape.size() - 2); ++i) { for (size_t i = 0; i < (a_shape.size() - 2) && i < (b_shape.size() - 2); ++i) {
if (a_shape[a_shape.size() - 3 - i] != b_shape[b_shape.size() - 3 - i]) { if (a_shape.at(a_shape.size() - 3 - i) != b_shape.at(b_shape.size() - 3 - i)) {
MS_LOG(ERROR) << "Op MatMul's dimensions must be equal"; MS_LOG(ERROR) << "Op MatMul's dimensions must be equal";
return RET_INPUT_TENSOR_ERROR; return RET_INPUT_TENSOR_ERROR;
} }

View File

@ -103,7 +103,7 @@ int Mean::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> output
for (size_t i = 0; i < in_shape.size(); i++) { for (size_t i = 0; i < in_shape.size(); i++) {
bool reduce_axis = false; bool reduce_axis = false;
for (size_t idx = 0; idx < num_axes; ++idx) { for (size_t idx = 0; idx < num_axes; ++idx) {
if (static_cast<size_t>(axes[idx]) == i) { if (static_cast<size_t>(axes.at(idx)) == i) {
reduce_axis = true; reduce_axis = true;
break; break;
} }
@ -113,7 +113,7 @@ int Mean::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> output
out_shape.push_back(1); out_shape.push_back(1);
} }
} else { } else {
out_shape.push_back(in_shape[i]); out_shape.push_back(in_shape.at(i));
} }
} }
output->set_shape(out_shape); output->set_shape(out_shape);

View File

@ -72,8 +72,8 @@ PrimitiveC *OnesLikeCreator(const schema::Primitive *primitive) {
Registry OnesLikeRegistry(schema::PrimitiveType_OnesLike, OnesLikeCreator); Registry OnesLikeRegistry(schema::PrimitiveType_OnesLike, OnesLikeCreator);
#endif #endif
int OnesLike::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { int OnesLike::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) {
Tensor *x = inputs_[0]; Tensor *x = inputs_.at(0);
Tensor *out = outputs_[0]; Tensor *out = outputs_.at(0);
std::vector<int> x_shape = x->shape(); std::vector<int> x_shape = x->shape();
std::vector<int> output_shape(x_shape.size()); std::vector<int> output_shape(x_shape.size());
output_shape.assign(x_shape.begin(), x_shape.end()); output_shape.assign(x_shape.begin(), x_shape.end());

View File

@ -110,7 +110,7 @@ int Pad::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs)
MS_ASSERT(input->shape().size() <= 4); MS_ASSERT(input->shape().size() <= 4);
for (size_t i = 0; i < input_shape.size(); i++) { for (size_t i = 0; i < input_shape.size(); i++) {
auto paddings_index = i; auto paddings_index = i;
auto shape = input_shape[i] + paddings[2 * paddings_index] + paddings[2 * paddings_index + 1]; auto shape = input_shape.at(i) + paddings.at(2 * paddings_index) + paddings.at(2 * paddings_index + 1);
output_shape.push_back(shape); output_shape.push_back(shape);
} }

View File

@ -111,12 +111,12 @@ int Pooling::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &in
} }
auto kernel_size = CastToInt(prim.GetAttr("ksize")); auto kernel_size = CastToInt(prim.GetAttr("ksize"));
attr->windowH = kernel_size[2]; attr->windowH = kernel_size.at(2);
attr->windowW = kernel_size[3]; attr->windowW = kernel_size.at(3);
auto stride = CastToInt(prim.GetAttr("strides")); auto stride = CastToInt(prim.GetAttr("strides"));
attr->strideH = stride[2]; attr->strideH = stride.at(2);
attr->strideW = stride[3]; attr->strideW = stride.at(3);
this->primitive_->value.value = attr; this->primitive_->value.value = attr;
if (this->primitive_->value.value == nullptr) { if (this->primitive_->value.value == nullptr) {
MS_LOG(ERROR) << "primitive value is nullptr"; MS_LOG(ERROR) << "primitive value is nullptr";

View File

@ -100,12 +100,12 @@ int PoolingGrad::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr>
} }
auto kernel_size = CastToInt(prim.GetAttr("ksize")); auto kernel_size = CastToInt(prim.GetAttr("ksize"));
attr->windowH = kernel_size[2]; attr->windowH = kernel_size.at(2);
attr->windowW = kernel_size[3]; attr->windowW = kernel_size.at(3);
auto stride = CastToInt(prim.GetAttr("strides")); auto stride = CastToInt(prim.GetAttr("strides"));
attr->strideH = stride[2]; attr->strideH = stride.at(2);
attr->strideW = stride[3]; attr->strideW = stride.at(3);
this->primitive_->value.value = attr; this->primitive_->value.value = attr;
if (this->primitive_->value.value == nullptr) { if (this->primitive_->value.value == nullptr) {
MS_LOG(ERROR) << "primitive value is nullptr"; MS_LOG(ERROR) << "primitive value is nullptr";

View File

@ -103,14 +103,14 @@ Registry PowerRegistry(schema::PrimitiveType_Power, PowerCreator);
int Power::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs) { int Power::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs) {
MS_ASSERT(this->primitive_ != nullptr); MS_ASSERT(this->primitive_ != nullptr);
auto x_tensor = inputs[0]; auto x_tensor = inputs.at(0);
MS_ASSERT(x_tensor != nullptr); MS_ASSERT(x_tensor != nullptr);
Tensor *exp_tensor = nullptr; Tensor *exp_tensor = nullptr;
if (inputs.size() == 2) { if (inputs.size() == 2) {
exp_tensor = inputs[1]; exp_tensor = inputs.at(1);
MS_ASSERT(exp_tensor != nullptr); MS_ASSERT(exp_tensor != nullptr);
} }
auto output_tensor = outputs[0]; auto output_tensor = outputs.at(0);
MS_ASSERT(output_tensor != nullptr); MS_ASSERT(output_tensor != nullptr);
output_tensor->set_data_type(x_tensor->data_type()); output_tensor->set_data_type(x_tensor->data_type());
output_tensor->set_format(x_tensor->format()); output_tensor->set_format(x_tensor->format());
@ -119,7 +119,7 @@ int Power::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> output
} }
if (exp_tensor != nullptr) { if (exp_tensor != nullptr) {
if ((exp_tensor->shape().size() > 1 && exp_tensor->shape() != x_tensor->shape()) || if ((exp_tensor->shape().size() > 1 && exp_tensor->shape() != x_tensor->shape()) ||
(exp_tensor->shape().size() == 1 && exp_tensor->shape()[0] != 1) || (exp_tensor->shape().size() == 1 && exp_tensor->shape().at(0) != 1) ||
exp_tensor->data_type() != x_tensor->data_type()) { exp_tensor->data_type() != x_tensor->data_type()) {
MS_LOG(ERROR) << "Power inputs shape or type is not equal!"; MS_LOG(ERROR) << "Power inputs shape or type is not equal!";
return RET_INPUT_TENSOR_ERROR; return RET_INPUT_TENSOR_ERROR;

View File

@ -331,7 +331,7 @@ void PrimitiveC::GetAttrDataFromInput(const AnfNodePtr &inputNode, std::vector<i
auto tuple = val->cast<ValueTuplePtr>(); auto tuple = val->cast<ValueTuplePtr>();
MS_ASSERT(tuple != nullptr); MS_ASSERT(tuple != nullptr);
for (size_t i = 0; i < tuple->size(); i++) { for (size_t i = 0; i < tuple->size(); i++) {
auto elem = tuple->value()[i]; auto elem = tuple->value().at(i);
MS_ASSERT(elem != nullptr); MS_ASSERT(elem != nullptr);
data->emplace_back(CastToInt(elem).front()); data->emplace_back(CastToInt(elem).front());
} }
@ -349,7 +349,7 @@ void PrimitiveC::set_input_quant_params(const std::vector<std::vector<schema::Qu
void PrimitiveC::set_input_quant_param(const size_t &index, const std::vector<schema::QuantParamT> &input_quant_param) { void PrimitiveC::set_input_quant_param(const size_t &index, const std::vector<schema::QuantParamT> &input_quant_param) {
MS_ASSERT(index < this->input_quant_param_.size()); MS_ASSERT(index < this->input_quant_param_.size());
this->input_quant_param_[index] = input_quant_param; this->input_quant_param_.at(index) = input_quant_param;
} }
void PrimitiveC::set_output_quant_params(const std::vector<std::vector<schema::QuantParamT>> &output_quant_param) { void PrimitiveC::set_output_quant_params(const std::vector<std::vector<schema::QuantParamT>> &output_quant_param) {
@ -359,7 +359,7 @@ void PrimitiveC::set_output_quant_params(const std::vector<std::vector<schema::Q
void PrimitiveC::set_output_quant_param(const size_t &index, void PrimitiveC::set_output_quant_param(const size_t &index,
const std::vector<schema::QuantParamT> &output_quant_param) { const std::vector<schema::QuantParamT> &output_quant_param) {
MS_ASSERT(index < this->output_quant_param_.size()); MS_ASSERT(index < this->output_quant_param_.size());
this->output_quant_param_[index] = output_quant_param; this->output_quant_param_.at(index) = output_quant_param;
} }
bool PrimitiveC::IsInputQuantParamsInited() { bool PrimitiveC::IsInputQuantParamsInited() {

View File

@ -58,11 +58,11 @@ int PriorBoxCPUKernel::Init() {
int PriorBoxCPUKernel::ReSize() { return GeneratePriorBox(); } int PriorBoxCPUKernel::ReSize() { return GeneratePriorBox(); }
int PriorBoxCPUKernel::GeneratePriorBox() { int PriorBoxCPUKernel::GeneratePriorBox() {
const int fmap_w = in_tensors_[0]->Width(); const int fmap_w = in_tensors_.at(0)->Width();
const int fmap_h = in_tensors_[0]->Height(); const int fmap_h = in_tensors_.at(0)->Height();
const int image_w = prior_box_param_->image_size_w > 0 ? prior_box_param_->image_size_w : in_tensors_[1]->Width(); const int image_w = prior_box_param_->image_size_w > 0 ? prior_box_param_->image_size_w : in_tensors_.at(1)->Width();
const int image_h = prior_box_param_->image_size_h > 0 ? prior_box_param_->image_size_h : in_tensors_[1]->Height(); const int image_h = prior_box_param_->image_size_h > 0 ? prior_box_param_->image_size_h : in_tensors_.at(1)->Height();
const float step_w = const float step_w =
prior_box_param_->step_w > 0.0f ? prior_box_param_->step_w : static_cast<float>(image_w) / fmap_w; prior_box_param_->step_w > 0.0f ? prior_box_param_->step_w : static_cast<float>(image_w) / fmap_w;

View File

@ -54,10 +54,10 @@ void FullconnectionFP16CPUKernel::FreeTmpBuffer() {
int FullconnectionFP16CPUKernel::ReSize() { int FullconnectionFP16CPUKernel::ReSize() {
FreeTmpBuffer(); FreeTmpBuffer();
int row = 1; int row = 1;
for (size_t i = 0; i < out_tensors_[0]->shape().size() - 1; ++i) row *= (out_tensors_[0]->shape())[i]; for (size_t i = 0; i < out_tensors_.at(0)->shape().size() - 1; ++i) row *= (out_tensors_.at(0)->shape())[i];
fc_param_->row_ = row; fc_param_->row_ = row;
fc_param_->col_ = out_tensors_[0]->shape().back(); fc_param_->col_ = out_tensors_.at(0)->shape().back();
fc_param_->deep_ = (in_tensors_[1]->shape())[1]; fc_param_->deep_ = (in_tensors_.at(1)->shape()).at(1);
fc_param_->row_16_ = UP_ROUND(fc_param_->row_, C16NUM); fc_param_->row_16_ = UP_ROUND(fc_param_->row_, C16NUM);
fc_param_->col_8_ = UP_ROUND(fc_param_->col_, C8NUM); fc_param_->col_8_ = UP_ROUND(fc_param_->col_, C8NUM);
thread_count_ = MSMIN(thread_count_, UP_DIV(fc_param_->col_, C8NUM)); thread_count_ = MSMIN(thread_count_, UP_DIV(fc_param_->col_, C8NUM));
@ -89,21 +89,21 @@ int FullconnectionFP16CPUKernel::ReSize() {
} }
memset(b_pack_ptr_, 0, b_pack_col * fc_param_->deep_ * sizeof(float16_t)); memset(b_pack_ptr_, 0, b_pack_col * fc_param_->deep_ * sizeof(float16_t));
fc_param_->b_const_ = (in_tensors_[1]->data_c() != nullptr); fc_param_->b_const_ = (in_tensors_.at(1)->data_c() != nullptr);
if (fc_param_->b_const_) { if (fc_param_->b_const_) {
if (in_tensors_[1]->data_type() == kNumberTypeFloat32) { if (in_tensors_.at(1)->data_type() == kNumberTypeFloat32) {
if (is_vector_input_) { if (is_vector_input_) {
Float32ToFloat16(reinterpret_cast<float *>(in_tensors_[1]->data_c()), b_pack_ptr_, Float32ToFloat16(reinterpret_cast<float *>(in_tensors_.at(1)->data_c()), b_pack_ptr_,
fc_param_->col_ * fc_param_->deep_); fc_param_->col_ * fc_param_->deep_);
} else { } else {
InitMatrixB(reinterpret_cast<float *>(in_tensors_[1]->data_c()), b_pack_ptr_); InitMatrixB(reinterpret_cast<float *>(in_tensors_.at(1)->data_c()), b_pack_ptr_);
} }
} else { } else {
if (is_vector_input_) { if (is_vector_input_) {
memcpy(b_pack_ptr_, reinterpret_cast<float16_t *>(in_tensors_[1]->data_c()), memcpy(b_pack_ptr_, reinterpret_cast<float16_t *>(in_tensors_.at(1)->data_c()),
fc_param_->col_ * fc_param_->deep_ * sizeof(float16_t)); fc_param_->col_ * fc_param_->deep_ * sizeof(float16_t));
} else { } else {
InitMatrixB(reinterpret_cast<float16_t *>(in_tensors_[1]->data_c()), b_pack_ptr_); InitMatrixB(reinterpret_cast<float16_t *>(in_tensors_.at(1)->data_c()), b_pack_ptr_);
} }
} }
b_ptr_ = b_pack_ptr_; b_ptr_ = b_pack_ptr_;
@ -116,10 +116,10 @@ int FullconnectionFP16CPUKernel::ReSize() {
return RET_MEMORY_FAILED; return RET_MEMORY_FAILED;
} }
memset(bias_ptr_, 0, b_pack_col * sizeof(float16_t)); memset(bias_ptr_, 0, b_pack_col * sizeof(float16_t));
Float32ToFloat16(reinterpret_cast<float *>(in_tensors_[2]->data_c()), bias_ptr_, fc_param_->col_); Float32ToFloat16(reinterpret_cast<float *>(in_tensors_.at(2)->data_c()), bias_ptr_, fc_param_->col_);
} }
if (out_tensors_[0]->data_type() == kNumberTypeFloat32) { if (out_tensors_.at(0)->data_type() == kNumberTypeFloat32) {
output_fp16_ = output_fp16_ =
reinterpret_cast<float16_t *>(ctx_->allocator->Malloc(fc_param_->row_ * fc_param_->col_ * sizeof(float16_t))); reinterpret_cast<float16_t *>(ctx_->allocator->Malloc(fc_param_->row_ * fc_param_->col_ * sizeof(float16_t)));
if (output_fp16_ == nullptr) { if (output_fp16_ == nullptr) {
@ -183,43 +183,43 @@ int FcFP16Run(void *cdata, int task_id) {
} }
int FullconnectionFP16CPUKernel::Run() { int FullconnectionFP16CPUKernel::Run() {
auto out_tensor = out_tensors_[0]; auto out_tensor = out_tensors_.at(0);
if (out_tensor->data_type() == kNumberTypeFloat32) { if (out_tensor->data_type() == kNumberTypeFloat32) {
output_ptr_ = output_fp16_; output_ptr_ = output_fp16_;
} else { } else {
output_ptr_ = reinterpret_cast<float16_t *>(out_tensor->data_c()); output_ptr_ = reinterpret_cast<float16_t *>(out_tensor->data_c());
} }
if (in_tensors_[0]->data_type() == kNumberTypeFloat32) { if (in_tensors_.at(0)->data_type() == kNumberTypeFloat32) {
if (is_vector_input_) { if (is_vector_input_) {
Float32ToFloat16(reinterpret_cast<float *>(in_tensors_[0]->data_c()), a_pack_ptr_, fc_param_->deep_); Float32ToFloat16(reinterpret_cast<float *>(in_tensors_.at(0)->data_c()), a_pack_ptr_, fc_param_->deep_);
} else { } else {
InitMatrixA(reinterpret_cast<float *>(in_tensors_[0]->data_c()), a_pack_ptr_); InitMatrixA(reinterpret_cast<float *>(in_tensors_.at(0)->data_c()), a_pack_ptr_);
} }
a_ptr_ = a_pack_ptr_; a_ptr_ = a_pack_ptr_;
} else { } else {
if (is_vector_input_) { if (is_vector_input_) {
a_ptr_ = reinterpret_cast<float16_t *>(in_tensors_[0]->data_c()); a_ptr_ = reinterpret_cast<float16_t *>(in_tensors_.at(0)->data_c());
} else { } else {
InitMatrixA(reinterpret_cast<float16_t *>(in_tensors_[0]->data_c()), a_pack_ptr_); InitMatrixA(reinterpret_cast<float16_t *>(in_tensors_.at(0)->data_c()), a_pack_ptr_);
a_ptr_ = a_pack_ptr_; a_ptr_ = a_pack_ptr_;
} }
} }
if (!fc_param_->b_const_) { if (!fc_param_->b_const_) {
if (in_tensors_[1]->data_type() == kNumberTypeFloat32) { if (in_tensors_.at(1)->data_type() == kNumberTypeFloat32) {
if (is_vector_input_) { if (is_vector_input_) {
Float32ToFloat16(reinterpret_cast<float *>(in_tensors_[1]->data_c()), b_pack_ptr_, Float32ToFloat16(reinterpret_cast<float *>(in_tensors_.at(1)->data_c()), b_pack_ptr_,
fc_param_->col_ * fc_param_->deep_); fc_param_->col_ * fc_param_->deep_);
} else { } else {
InitMatrixB(reinterpret_cast<float *>(in_tensors_[1]->data_c()), b_pack_ptr_); InitMatrixB(reinterpret_cast<float *>(in_tensors_.at(1)->data_c()), b_pack_ptr_);
} }
b_ptr_ = b_pack_ptr_; b_ptr_ = b_pack_ptr_;
} else { } else {
if (is_vector_input_) { if (is_vector_input_) {
b_ptr_ = reinterpret_cast<float16_t *>(in_tensors_[1]->data_c()); b_ptr_ = reinterpret_cast<float16_t *>(in_tensors_.at(1)->data_c());
} else { } else {
InitMatrixB(reinterpret_cast<float16_t *>(in_tensors_[1]->data_c()), b_pack_ptr_); InitMatrixB(reinterpret_cast<float16_t *>(in_tensors_.at(1)->data_c()), b_pack_ptr_);
b_ptr_ = b_pack_ptr_; b_ptr_ = b_pack_ptr_;
} }
} }

View File

@ -28,7 +28,7 @@ using mindspore::schema::PrimitiveType_Conv2D;
namespace mindspore::kernel { namespace mindspore::kernel {
int GroupConvolutionFP16CPUKernel::Init() { int GroupConvolutionFP16CPUKernel::Init() {
for (int i = 0; i < group_num_; ++i) { for (int i = 0; i < group_num_; ++i) {
auto ret = group_convs_[i]->Init(); auto ret = group_convs_.at(i)->Init();
if (ret != RET_OK) { if (ret != RET_OK) {
MS_LOG(ERROR) << "Sub kernel init failed."; MS_LOG(ERROR) << "Sub kernel init failed.";
return ret; return ret;
@ -40,7 +40,7 @@ int GroupConvolutionFP16CPUKernel::Init() {
int GroupConvolutionFP16CPUKernel::ReSize() { int GroupConvolutionFP16CPUKernel::ReSize() {
for (int i = 0; i < group_num_; ++i) { for (int i = 0; i < group_num_; ++i) {
auto ret = group_convs_[i]->ReSize(); auto ret = group_convs_.at(i)->ReSize();
if (ret != RET_OK) { if (ret != RET_OK) {
MS_LOG(ERROR) << "Sub kernel resize failed."; MS_LOG(ERROR) << "Sub kernel resize failed.";
return RET_ERROR; return RET_ERROR;
@ -94,7 +94,7 @@ int GroupConvolutionFP16CPUKernel::PreProcess() {
int in_w = conv_param_->input_w_; int in_w = conv_param_->input_w_;
int in_c = conv_param_->input_channel_; int in_c = conv_param_->input_channel_;
in_shape = {in_batch, in_h, in_w, in_c}; in_shape = {in_batch, in_h, in_w, in_c};
auto sub_kernel_in_tensor = group_convs_[i]->in_tensors().front(); auto sub_kernel_in_tensor = group_convs_.at(i)->in_tensors().front();
sub_kernel_in_tensor->set_shape(in_shape); sub_kernel_in_tensor->set_shape(in_shape);
ret = sub_kernel_in_tensor->MallocData(); ret = sub_kernel_in_tensor->MallocData();
if (ret != RET_OK) { if (ret != RET_OK) {
@ -141,9 +141,9 @@ int GroupConvolutionFP16CPUKernel::SeparateInput(int group_id) {
int in_plane = in_h * in_w; int in_plane = in_h * in_w;
int sub_in_channel = conv_param_->input_channel_; int sub_in_channel = conv_param_->input_channel_;
int ori_in_channel = sub_in_channel * group_num_; int ori_in_channel = sub_in_channel * group_num_;
auto sub_in_data = group_convs_[group_id]->in_tensors().front()->data_c(); auto sub_in_data = group_convs_.at(group_id)->in_tensors().front()->data_c();
auto in_data_type = in_tensors_.front()->data_type(); auto in_data_type = in_tensors_.front()->data_type();
auto sub_in_data_type = group_convs_[group_id]->in_tensors().front()->data_type(); auto sub_in_data_type = group_convs_.at(group_id)->in_tensors().front()->data_type();
if (in_data_type != sub_in_data_type) { if (in_data_type != sub_in_data_type) {
MS_LOG(ERROR) << "data type of sub conv kernel input should be the same as origin input's."; MS_LOG(ERROR) << "data type of sub conv kernel input should be the same as origin input's.";
return RET_ERROR; return RET_ERROR;
@ -183,7 +183,7 @@ void GroupConvolutionFP16CPUKernel::PostConcat(int group_id) {
int out_plane = out_h * out_w; int out_plane = out_h * out_w;
int sub_out_channel = conv_param_->output_channel_; int sub_out_channel = conv_param_->output_channel_;
int ori_out_channel = sub_out_channel * group_num_; int ori_out_channel = sub_out_channel * group_num_;
auto sub_out_data = reinterpret_cast<float16_t *>(group_convs_[group_id]->out_tensors().front()->data_c()); auto sub_out_data = reinterpret_cast<float16_t *>(group_convs_.at(group_id)->out_tensors().front()->data_c());
MS_ASSERT(sub_out_data); MS_ASSERT(sub_out_data);
float16_t *src_ptr = sub_out_data; float16_t *src_ptr = sub_out_data;
float16_t *dst_ptr = ori_out_data_ + group_id * sub_out_channel; float16_t *dst_ptr = ori_out_data_ + group_id * sub_out_channel;
@ -206,7 +206,7 @@ int GroupConvolutionFP16CPUKernel::Run() {
return ret; return ret;
} }
// sun kernels run // sun kernels run
ret = group_convs_[i]->Run(); ret = group_convs_.at(i)->Run();
if (ret != RET_OK) { if (ret != RET_OK) {
MS_LOG(ERROR) << "sub kernel " << i << " execute failed."; MS_LOG(ERROR) << "sub kernel " << i << " execute failed.";
return ret; return ret;

View File

@ -262,7 +262,7 @@ int MatmulFP16Run(void *cdata, int task_id) {
} }
int MatmulFP16CPUKernel::Run() { int MatmulFP16CPUKernel::Run() {
auto out_tensor = out_tensors_[0]; auto out_tensor = out_tensors_.at(0);
auto ret = MallocFp16Output(); auto ret = MallocFp16Output();
if (ret != RET_OK) { if (ret != RET_OK) {
MS_LOG(ERROR) << "Matmul MallocFp16Output failed"; MS_LOG(ERROR) << "Matmul MallocFp16Output failed";
@ -280,10 +280,10 @@ int MatmulFP16CPUKernel::Run() {
MS_LOG(ERROR) << "Matmul fp16 malloc matrix A buffer failed"; MS_LOG(ERROR) << "Matmul fp16 malloc matrix A buffer failed";
return RET_ERROR; return RET_ERROR;
} }
if (in_tensors_[0]->data_type() == kNumberTypeFloat32) { if (in_tensors_.at(0)->data_type() == kNumberTypeFloat32) {
InitMatrixA(reinterpret_cast<float *>(in_tensors_[0]->data_c()), a_pack_ptr_); InitMatrixA(reinterpret_cast<float *>(in_tensors_.at(0)->data_c()), a_pack_ptr_);
} else { } else {
InitMatrixA(reinterpret_cast<float16_t *>(in_tensors_[0]->data_c()), a_pack_ptr_); InitMatrixA(reinterpret_cast<float16_t *>(in_tensors_.at(0)->data_c()), a_pack_ptr_);
} }
} }
if (!params_->b_const_) { if (!params_->b_const_) {
@ -292,10 +292,10 @@ int MatmulFP16CPUKernel::Run() {
MS_LOG(ERROR) << "Matmul fp16 malloc matrix B buffer failed"; MS_LOG(ERROR) << "Matmul fp16 malloc matrix B buffer failed";
return RET_ERROR; return RET_ERROR;
} }
if (in_tensors_[1]->data_type() == kNumberTypeFloat32) { if (in_tensors_.at(1)->data_type() == kNumberTypeFloat32) {
InitMatrixB(reinterpret_cast<float *>(in_tensors_[1]->data_c()), b_pack_ptr_); InitMatrixB(reinterpret_cast<float *>(in_tensors_.at(1)->data_c()), b_pack_ptr_);
} else { } else {
InitMatrixB(reinterpret_cast<float16_t *>(in_tensors_[1]->data_c()), b_pack_ptr_); InitMatrixB(reinterpret_cast<float16_t *>(in_tensors_.at(1)->data_c()), b_pack_ptr_);
} }
} }
for (int i = 0; i < params_->batch; ++i) { for (int i = 0; i < params_->batch; ++i) {

View File

@ -115,14 +115,14 @@ int QuantDTypeCastFP16Run(void *cdata, int task_id) {
} }
int QuantDTypeCastFp16CPUKernel::Run() { int QuantDTypeCastFp16CPUKernel::Run() {
if (in_tensors_[0]->data_type() == TypeId::kNumberTypeInt8 && if (in_tensors_.at(0)->data_type() == TypeId::kNumberTypeInt8 &&
out_tensors_[0]->data_type() == TypeId::kNumberTypeFloat16) { out_tensors_.at(0)->data_type() == TypeId::kNumberTypeFloat16) {
int8_ptr_ = reinterpret_cast<int8_t *>(in_tensors_[0]->data_c()); int8_ptr_ = reinterpret_cast<int8_t *>(in_tensors_.at(0)->data_c());
float16_ptr_ = reinterpret_cast<float16_t *>(out_tensors_[0]->data_c()); float16_ptr_ = reinterpret_cast<float16_t *>(out_tensors_.at(0)->data_c());
} else if (in_tensors_[0]->data_type() == TypeId::kNumberTypeFloat16 && } else if (in_tensors_.at(0)->data_type() == TypeId::kNumberTypeFloat16 &&
out_tensors_[0]->data_type() == TypeId::kNumberTypeInt8) { out_tensors_.at(0)->data_type() == TypeId::kNumberTypeInt8) {
float16_ptr_ = reinterpret_cast<float16_t *>(in_tensors_[0]->data_c()); float16_ptr_ = reinterpret_cast<float16_t *>(in_tensors_.at(0)->data_c());
int8_ptr_ = reinterpret_cast<int8_t *>(out_tensors_[0]->data_c()); int8_ptr_ = reinterpret_cast<int8_t *>(out_tensors_.at(0)->data_c());
} else { } else {
MS_LOG(ERROR) << "QuantDTypeCastFp16 not support input or output type"; MS_LOG(ERROR) << "QuantDTypeCastFp16 not support input or output type";
return RET_ERROR; return RET_ERROR;

View File

@ -48,14 +48,14 @@ int ExpandDimsCPUKernel::DoExpandDims(int task_id) {
return RET_OK; return RET_OK;
} }
int offset = task_id * thread_sz_stride_; int offset = task_id * thread_sz_stride_;
if (this->in_tensors_[0]->data_type() == kNumberTypeFloat32) { if (this->in_tensors_.at(0)->data_type() == kNumberTypeFloat32) {
int ret = ExpandDims(reinterpret_cast<float *>(in_ptr_) + offset, reinterpret_cast<float *>(out_ptr_) + offset, int ret = ExpandDims(reinterpret_cast<float *>(in_ptr_) + offset, reinterpret_cast<float *>(out_ptr_) + offset,
size * sizeof(float)); size * sizeof(float));
if (ret != RET_OK) { if (ret != RET_OK) {
MS_LOG(ERROR) << "ExpandDimsRun error task_id[" << task_id << "] error_code[" << ret << "]"; MS_LOG(ERROR) << "ExpandDimsRun error task_id[" << task_id << "] error_code[" << ret << "]";
return ret; return ret;
} }
} else if (this->in_tensors_[0]->data_type() == kNumberTypeInt8) { } else if (this->in_tensors_.at(0)->data_type() == kNumberTypeInt8) {
int ret = ExpandDims(reinterpret_cast<int8_t *>(in_ptr_) + offset, reinterpret_cast<int8_t *>(out_ptr_) + offset, int ret = ExpandDims(reinterpret_cast<int8_t *>(in_ptr_) + offset, reinterpret_cast<int8_t *>(out_ptr_) + offset,
size * sizeof(int8_t)); size * sizeof(int8_t));
if (ret != RET_OK) { if (ret != RET_OK) {

View File

@ -35,17 +35,17 @@ int FlattenCPUKernel::Init() {
} }
int FlattenCPUKernel::ReSize() { int FlattenCPUKernel::ReSize() {
auto output_shape = out_tensors_[0]->shape(); auto output_shape = out_tensors_.at(0)->shape();
flatten_param_->size = sizeof(float); flatten_param_->size = sizeof(float);
for (size_t i = 0; i < output_shape.size(); i++) { for (size_t i = 0; i < output_shape.size(); i++) {
flatten_param_->size *= output_shape[i]; flatten_param_->size *= output_shape.at(i);
} }
return RET_OK; return RET_OK;
} }
int FlattenCPUKernel::Run() { int FlattenCPUKernel::Run() {
auto input = reinterpret_cast<float *>(in_tensors_[0]->MutableData()); auto input = reinterpret_cast<float *>(in_tensors_.at(0)->MutableData());
auto output = reinterpret_cast<float *>(out_tensors_[0]->MutableData()); auto output = reinterpret_cast<float *>(out_tensors_.at(0)->MutableData());
Flatten(input, output, flatten_param_); Flatten(input, output, flatten_param_);
return RET_OK; return RET_OK;
} }

View File

@ -44,12 +44,12 @@ void FullconnectionCPUKernel::FreeBuf() {
int FullconnectionCPUKernel::ReSize() { int FullconnectionCPUKernel::ReSize() {
FreeBuf(); FreeBuf();
int row = 1; int row = 1;
for (size_t i = 0; i < out_tensors_[0]->shape().size() - 1; ++i) { for (size_t i = 0; i < out_tensors_.at(0)->shape().size() - 1; ++i) {
row *= (out_tensors_[0]->shape())[i]; row *= (out_tensors_.at(0)->shape())[i];
} }
fc_param_->row_ = row; fc_param_->row_ = row;
fc_param_->col_ = out_tensors_[0]->shape().back(); fc_param_->col_ = out_tensors_.at(0)->shape().back();
fc_param_->deep_ = (in_tensors_[1]->shape())[1]; fc_param_->deep_ = (in_tensors_.at(1)->shape()).at(1);
fc_param_->row_12_ = UP_ROUND(fc_param_->row_, C12NUM); fc_param_->row_12_ = UP_ROUND(fc_param_->row_, C12NUM);
fc_param_->col_8_ = UP_ROUND(fc_param_->col_, C8NUM); fc_param_->col_8_ = UP_ROUND(fc_param_->col_, C8NUM);
@ -98,14 +98,14 @@ int FullconnectionCPUKernel::ReSize() {
} }
memset(b_pack_ptr_, 0, col_tmp * fc_param_->deep_ * sizeof(float)); memset(b_pack_ptr_, 0, col_tmp * fc_param_->deep_ * sizeof(float));
fc_param_->a_const_ = (in_tensors_[0]->data_c() != nullptr); fc_param_->a_const_ = (in_tensors_.at(0)->data_c() != nullptr);
fc_param_->b_const_ = (in_tensors_[1]->data_c() != nullptr); fc_param_->b_const_ = (in_tensors_.at(1)->data_c() != nullptr);
if (fc_param_->a_const_) { if (fc_param_->a_const_) {
InitMatrixA(reinterpret_cast<float *>(in_tensors_[0]->MutableData()), a_pack_ptr_); InitMatrixA(reinterpret_cast<float *>(in_tensors_.at(0)->MutableData()), a_pack_ptr_);
a_ptr_ = a_pack_ptr_; a_ptr_ = a_pack_ptr_;
} }
if (fc_param_->b_const_) { if (fc_param_->b_const_) {
InitMatrixB(reinterpret_cast<float *>(in_tensors_[1]->MutableData()), b_pack_ptr_); InitMatrixB(reinterpret_cast<float *>(in_tensors_.at(1)->MutableData()), b_pack_ptr_);
b_ptr_ = b_pack_ptr_; b_ptr_ = b_pack_ptr_;
} }
return RET_OK; return RET_OK;

View File

@ -42,10 +42,10 @@ void FusedBatchnormCPUKernel::FreeScaleAndOffset() {
} }
int FusedBatchnormCPUKernel::InitConstTensor() { int FusedBatchnormCPUKernel::InitConstTensor() {
auto scale = in_tensors_[1]; auto scale = in_tensors_.at(1);
auto offset = in_tensors_[2]; auto offset = in_tensors_.at(2);
auto mean = in_tensors_[3]; auto mean = in_tensors_.at(3);
auto variance = in_tensors_[4]; auto variance = in_tensors_.at(4);
scale_ = malloc(scale->Size()); scale_ = malloc(scale->Size());
offset_ = malloc(offset->Size()); offset_ = malloc(offset->Size());
@ -82,10 +82,10 @@ int FusedBatchnormCPUKernel::Run() {
FusedBatchNormFp32MeanVar(in, current_mean, current_var, param, static_cast<float *>(save_mean), FusedBatchNormFp32MeanVar(in, current_mean, current_var, param, static_cast<float *>(save_mean),
static_cast<float *>(save_variance)); static_cast<float *>(save_variance));
memcpy(out_tensors_[1]->MutableData(), scale, out_tensors_[1]->Size()); memcpy(out_tensors_.at(1)->MutableData(), scale, out_tensors_.at(1)->Size());
memcpy(out_tensors_[2]->MutableData(), offset, out_tensors_[2]->Size()); memcpy(out_tensors_.at(2)->MutableData(), offset, out_tensors_.at(2)->Size());
memcpy(out_tensors_[3]->MutableData(), current_mean, out_tensors_[3]->Size()); memcpy(out_tensors_.at(3)->MutableData(), current_mean, out_tensors_.at(3)->Size());
memcpy(out_tensors_[4]->MutableData(), current_var, out_tensors_[4]->Size()); memcpy(out_tensors_.at(4)->MutableData(), current_var, out_tensors_.at(4)->Size());
// Copy to local variables // Copy to local variables
memcpy(scale_, scale, in_tensors_[1]->Size()); memcpy(scale_, scale, in_tensors_[1]->Size());
@ -108,16 +108,16 @@ int FusedBatchnormCPUKernel::Run() {
int FusedBatchnormCPUKernel::Eval() { int FusedBatchnormCPUKernel::Eval() {
LiteKernel::Eval(); LiteKernel::Eval();
if (trained_) { if (trained_) {
float *save_mean = static_cast<float *>(in_tensors_[3]->MutableData()); float *save_mean = static_cast<float *>(in_tensors_.at(3)->MutableData());
float *save_var = static_cast<float *>(in_tensors_[4]->MutableData()); float *save_var = static_cast<float *>(in_tensors_.at(4)->MutableData());
float *scale = static_cast<float *>(in_tensors_[1]->MutableData()); float *scale = static_cast<float *>(in_tensors_.at(1)->MutableData());
float *bias = static_cast<float *>(in_tensors_[2]->MutableData()); float *bias = static_cast<float *>(in_tensors_.at(2)->MutableData());
// Copy to local variables // Copy to local variables
memcpy(scale_, scale, in_tensors_[1]->Size()); memcpy(scale_, scale, in_tensors_.at(1)->Size());
memcpy(offset_, bias, in_tensors_[2]->Size()); memcpy(offset_, bias, in_tensors_.at(2)->Size());
memcpy(mean_, save_mean, in_tensors_[3]->Size()); memcpy(mean_, save_mean, in_tensors_.at(3)->Size());
memcpy(variance_, save_var, in_tensors_[4]->Size()); memcpy(variance_, save_var, in_tensors_.at(4)->Size());
} }
return RET_OK; return RET_OK;
} }

View File

@ -84,7 +84,7 @@ int GatherNdCPUKernel::ReSize() {
int idx_stride = idx_lastshape; int idx_stride = idx_lastshape;
for (int j = 0; j < count_; ++j) { for (int j = 0; j < count_; ++j) {
for (int k = 0; k < idx_lastshape; ++k) { for (int k = 0; k < idx_lastshape; ++k) {
in_offset_[j] += indices_ptr[j * idx_stride + k] * in_stride[k]; in_offset_[j] += indices_ptr[j * idx_stride + k] * in_stride.at(k);
} }
} }

View File

@ -55,14 +55,14 @@ int GatherCPUKernel::DoGather(int task_id) {
int indices_element_size = indices_tensor->ElementsNum(); int indices_element_size = indices_tensor->ElementsNum();
auto axis = (reinterpret_cast<GatherParameter *>(op_parameter_))->axis_; auto axis = (reinterpret_cast<GatherParameter *>(op_parameter_))->axis_;
const int limit = in_shape[axis]; const int limit = in_shape.at(axis);
int outer_size = 1, inner_size = 1; int outer_size = 1, inner_size = 1;
for (int i = 0; i < axis; ++i) { for (int i = 0; i < axis; ++i) {
outer_size *= in_shape[i]; outer_size *= in_shape.at(i);
} }
for (int i = axis + 1; i < in_rank; ++i) { for (int i = axis + 1; i < in_rank; ++i) {
inner_size *= in_shape[i]; inner_size *= in_shape.at(i);
} }
int stride = UP_DIV(outer_size, op_parameter_->thread_num_); int stride = UP_DIV(outer_size, op_parameter_->thread_num_);
int count = MSMIN(stride, outer_size - stride * task_id); int count = MSMIN(stride, outer_size - stride * task_id);

View File

@ -28,7 +28,7 @@ using mindspore::schema::PrimitiveType_Conv2D;
namespace mindspore::kernel { namespace mindspore::kernel {
int GroupConvolutionCPUKernel::Init() { int GroupConvolutionCPUKernel::Init() {
for (int i = 0; i < group_num_; ++i) { for (int i = 0; i < group_num_; ++i) {
auto ret = group_convs_[i]->Init(); auto ret = group_convs_.at(i)->Init();
if (ret != RET_OK) { if (ret != RET_OK) {
MS_LOG(ERROR) << "Sub kernel init failed."; MS_LOG(ERROR) << "Sub kernel init failed.";
return ret; return ret;
@ -40,7 +40,7 @@ int GroupConvolutionCPUKernel::Init() {
int GroupConvolutionCPUKernel::ReSize() { int GroupConvolutionCPUKernel::ReSize() {
for (int i = 0; i < group_num_; ++i) { for (int i = 0; i < group_num_; ++i) {
auto ret = group_convs_[i]->ReSize(); auto ret = group_convs_.at(i)->ReSize();
if (ret != RET_OK) { if (ret != RET_OK) {
MS_LOG(ERROR) << "Sub kernel resize failed."; MS_LOG(ERROR) << "Sub kernel resize failed.";
return RET_ERROR; return RET_ERROR;
@ -94,7 +94,7 @@ int GroupConvolutionCPUKernel::PreProcess() {
int in_w = conv_param_->input_w_; int in_w = conv_param_->input_w_;
int in_c = conv_param_->input_channel_; int in_c = conv_param_->input_channel_;
in_shape = {in_batch, in_h, in_w, in_c}; in_shape = {in_batch, in_h, in_w, in_c};
auto sub_kernel_in_tensor = group_convs_[i]->in_tensors().front(); auto sub_kernel_in_tensor = group_convs_.at(i)->in_tensors().front();
sub_kernel_in_tensor->set_shape(in_shape); sub_kernel_in_tensor->set_shape(in_shape);
ret = sub_kernel_in_tensor->MallocData(); ret = sub_kernel_in_tensor->MallocData();
if (ret != RET_OK) { if (ret != RET_OK) {
@ -108,7 +108,7 @@ int GroupConvolutionCPUKernel::PreProcess() {
int out_w = conv_param_->output_w_; int out_w = conv_param_->output_w_;
int out_c = conv_param_->output_channel_; int out_c = conv_param_->output_channel_;
out_shape = {out_batch, out_h, out_w, out_c}; out_shape = {out_batch, out_h, out_w, out_c};
auto sub_kernel_out_tensors = group_convs_[i]->out_tensors(); auto sub_kernel_out_tensors = group_convs_.at(i)->out_tensors();
for (auto tensor : sub_kernel_out_tensors) { for (auto tensor : sub_kernel_out_tensors) {
tensor->set_shape(out_shape); tensor->set_shape(out_shape);
ret = tensor->MallocData(); ret = tensor->MallocData();
@ -140,7 +140,7 @@ void GroupConvolutionCPUKernel::SeparateInput(int group_id) {
int in_plane = in_h * in_w; int in_plane = in_h * in_w;
int sub_in_channel = conv_param_->input_channel_; int sub_in_channel = conv_param_->input_channel_;
int ori_in_channel = sub_in_channel * group_num_; int ori_in_channel = sub_in_channel * group_num_;
auto sub_in_data = reinterpret_cast<float *>(group_convs_[group_id]->in_tensors().front()->data_c()); auto sub_in_data = reinterpret_cast<float *>(group_convs_.at(group_id)->in_tensors().front()->data_c());
float *src_ptr = ori_in_data_ + group_id * sub_in_channel; float *src_ptr = ori_in_data_ + group_id * sub_in_channel;
float *dst_ptr = sub_in_data; float *dst_ptr = sub_in_data;
for (int i = 0; i < in_plane; ++i) { for (int i = 0; i < in_plane; ++i) {
@ -156,7 +156,7 @@ void GroupConvolutionCPUKernel::PostConcat(int group_id) {
int out_plane = out_h * out_w; int out_plane = out_h * out_w;
int sub_out_channel = conv_param_->output_channel_; int sub_out_channel = conv_param_->output_channel_;
int ori_out_channel = sub_out_channel * group_num_; int ori_out_channel = sub_out_channel * group_num_;
auto sub_out_data = reinterpret_cast<float *>(group_convs_[group_id]->out_tensors().front()->data_c()); auto sub_out_data = reinterpret_cast<float *>(group_convs_.at(group_id)->out_tensors().front()->data_c());
float *src_ptr = sub_out_data; float *src_ptr = sub_out_data;
float *dst_ptr = ori_out_data_ + group_id * sub_out_channel; float *dst_ptr = ori_out_data_ + group_id * sub_out_channel;
for (int i = 0; i < out_plane; ++i) { for (int i = 0; i < out_plane; ++i) {
@ -173,7 +173,7 @@ int GroupConvolutionCPUKernel::Run() {
// first, separate group conv input into several parts. This step must be in runtime stage. // first, separate group conv input into several parts. This step must be in runtime stage.
SeparateInput(i); SeparateInput(i);
// sun kernels run // sun kernels run
auto ret = group_convs_[i]->Run(); auto ret = group_convs_.at(i)->Run();
if (ret != RET_OK) { if (ret != RET_OK) {
MS_LOG(ERROR) << "sub kernel " << i << " execute failed."; MS_LOG(ERROR) << "sub kernel " << i << " execute failed.";
return ret; return ret;

View File

@ -36,12 +36,12 @@ int InstanceNormCPUKernel::Init() {
int InstanceNormCPUKernel::ReSize() { int InstanceNormCPUKernel::ReSize() {
auto input_shapes = in_tensors_.front()->shape(); auto input_shapes = in_tensors_.front()->shape();
auto n_dim = input_shapes.size(); auto n_dim = input_shapes.size();
outer_size_ = input_shapes[0] * input_shapes[n_dim - 1]; outer_size_ = input_shapes.at(0) * input_shapes.at(n_dim - 1);
inner_size_ = 1; inner_size_ = 1;
for (size_t i = 0; i < n_dim - 1; ++i) { for (size_t i = 0; i < n_dim - 1; ++i) {
inner_size_ *= input_shapes[i]; inner_size_ *= input_shapes.at(i);
} }
param_->channel_ = input_shapes[n_dim - 1]; param_->channel_ = input_shapes.at(n_dim - 1);
return RET_OK; return RET_OK;
} }

View File

@ -39,9 +39,9 @@ int LayerNormCPUKernel::ReSize() {
inner_size_ = 1; inner_size_ = 1;
for (size_t i = 0; i < shape.size(); ++i) { for (size_t i = 0; i < shape.size(); ++i) {
if (i + param_->normalized_dims_ < shape.size()) { if (i + param_->normalized_dims_ < shape.size()) {
outer_size_ *= shape[i]; outer_size_ *= shape.at(i);
} else { } else {
inner_size_ *= shape[i]; inner_size_ *= shape.at(i);
} }
} }
return RET_OK; return RET_OK;

View File

@ -42,10 +42,10 @@ int LocalResponseNormCPUKernel::DoLocalResponseNorm(int task_id) {
auto in_shape = input_tensor->shape(); auto in_shape = input_tensor->shape();
MS_ASSERT(in_shape.size() == 4); MS_ASSERT(in_shape.size() == 4);
int batch = in_shape[0]; int batch = in_shape.at(0);
int height = in_shape[1]; int height = in_shape.at(1);
int width = in_shape[2]; int width = in_shape.at(2);
int channel = in_shape[3]; int channel = in_shape.at(3);
int outer_size = batch * width * height; int outer_size = batch * width * height;
int stride = UP_DIV(outer_size, thread_count_); int stride = UP_DIV(outer_size, thread_count_);

View File

@ -50,14 +50,14 @@ int LstmCPUKernel::InitParam() {
auto input = in_tensors_.front(); auto input = in_tensors_.front();
MS_ASSERT(input != nullptr); MS_ASSERT(input != nullptr);
std::vector<int> in_shape = input->shape(); std::vector<int> in_shape = input->shape();
lstm_parm_->seq_len_ = in_shape[0]; lstm_parm_->seq_len_ = in_shape.at(0);
lstm_parm_->batch_ = in_shape[1]; lstm_parm_->batch_ = in_shape.at(1);
lstm_parm_->input_size_ = in_shape[2]; lstm_parm_->input_size_ = in_shape.at(2);
auto weight_i = in_tensors_[1]; auto weight_i = in_tensors_.at(1);
MS_ASSERT(weight_i != nullptr); MS_ASSERT(weight_i != nullptr);
std::vector<int> w_shape = weight_i->shape(); std::vector<int> w_shape = weight_i->shape();
lstm_parm_->hidden_size_ = w_shape[1] / 4; lstm_parm_->hidden_size_ = w_shape.at(1) / 4;
lstm_parm_->input_step_ = lstm_parm_->batch_ * lstm_parm_->input_size_; lstm_parm_->input_step_ = lstm_parm_->batch_ * lstm_parm_->input_size_;
lstm_parm_->output_step_ = lstm_parm_->bidirectional_ ? 2 * lstm_parm_->batch_ * lstm_parm_->hidden_size_ lstm_parm_->output_step_ = lstm_parm_->bidirectional_ ? 2 * lstm_parm_->batch_ * lstm_parm_->hidden_size_

View File

@ -58,8 +58,9 @@ void MatmulCPUKernel::FreeTmpBuffer() {
} }
int MatmulCPUKernel::MallocMatrixABuffer() { int MatmulCPUKernel::MallocMatrixABuffer() {
auto a_shape = in_tensors_[0]->shape(); auto a_shape = in_tensors_.at(0)->shape();
int batch = 1; int batch = 1;
MS_ASSERT(a_shape.size() >= 2);
for (size_t i = 0; i < a_shape.size() - 2; ++i) { for (size_t i = 0; i < a_shape.size() - 2; ++i) {
batch *= a_shape[i]; batch *= a_shape[i];
} }
@ -102,11 +103,12 @@ int MatmulCPUKernel::MallocMatrixABuffer() {
} }
int MatmulCPUKernel::MallocMatrixBBuffer() { int MatmulCPUKernel::MallocMatrixBBuffer() {
auto b_shape = in_tensors_[1]->shape(); auto b_shape = in_tensors_.at(1)->shape();
if (b_shape.empty()) { if (b_shape.empty()) {
return RET_OK; return RET_OK;
} }
int batch = 1; int batch = 1;
MS_ASSERT(b_shape.size() >= 2);
for (size_t i = 0; i < b_shape.size() - 2; ++i) { for (size_t i = 0; i < b_shape.size() - 2; ++i) {
batch *= b_shape[i]; batch *= b_shape[i];
} }
@ -133,11 +135,11 @@ int MatmulCPUKernel::MallocMatrixBBuffer() {
} }
int MatmulCPUKernel::InitBias() { int MatmulCPUKernel::InitBias() {
auto b_shape = in_tensors_[1]->shape(); auto b_shape = in_tensors_.at(1)->shape();
auto c_shape = out_tensors_[0]->shape(); auto c_shape = out_tensors_.at(0)->shape();
params_->col_ = params_->b_const_ params_->col_ = params_->b_const_
? (params_->b_transpose_ ? b_shape[b_shape.size() - 2] : b_shape[b_shape.size() - 1]) ? (params_->b_transpose_ ? b_shape.at(b_shape.size() - 2) : b_shape.at(b_shape.size() - 1))
: (c_shape[c_shape.size() - 1]); : (c_shape.at(c_shape.size() - 1));
params_->col_8_ = UP_ROUND(params_->col_, 8); params_->col_8_ = UP_ROUND(params_->col_, 8);
auto col_tmp = is_vector_a_ ? params_->col_ : params_->col_8_; auto col_tmp = is_vector_a_ ? params_->col_ : params_->col_8_;
if (bias_ptr_ == nullptr) { if (bias_ptr_ == nullptr) {
@ -221,15 +223,15 @@ void MatmulCPUKernel::InitMatrixB(const float *src_ptr, float *dst_ptr) {
} }
int MatmulCPUKernel::Init() { int MatmulCPUKernel::Init() {
params_->a_const_ = (in_tensors_[0]->data_c() != nullptr); params_->a_const_ = (in_tensors_.at(0)->data_c() != nullptr);
params_->b_const_ = (in_tensors_[1]->data_c() != nullptr); params_->b_const_ = (in_tensors_.at(1)->data_c() != nullptr);
if (params_->a_const_) { if (params_->a_const_) {
auto ret = MallocMatrixABuffer(); auto ret = MallocMatrixABuffer();
if (ret != RET_OK) { if (ret != RET_OK) {
MS_LOG(ERROR) << "Matmul fp32 malloc matrix buffer failed"; MS_LOG(ERROR) << "Matmul fp32 malloc matrix buffer failed";
return RET_ERROR; return RET_ERROR;
} }
InitMatrixA(reinterpret_cast<float *>(in_tensors_[0]->data_c()), a_pack_ptr_); InitMatrixA(reinterpret_cast<float *>(in_tensors_.at(0)->data_c()), a_pack_ptr_);
a_ptr_ = a_pack_ptr_; a_ptr_ = a_pack_ptr_;
} }
if (params_->b_const_) { if (params_->b_const_) {
@ -238,7 +240,7 @@ int MatmulCPUKernel::Init() {
MS_LOG(ERROR) << "Matmul fp32 malloc matrix B buffer failed"; MS_LOG(ERROR) << "Matmul fp32 malloc matrix B buffer failed";
return RET_ERROR; return RET_ERROR;
} }
InitMatrixB(reinterpret_cast<float *>(in_tensors_[1]->data_c()), b_pack_ptr_); InitMatrixB(reinterpret_cast<float *>(in_tensors_.at(1)->data_c()), b_pack_ptr_);
b_ptr_ = b_pack_ptr_; b_ptr_ = b_pack_ptr_;
// init bias // init bias
ret = InitBias(); ret = InitBias();
@ -281,9 +283,9 @@ int MatmulFloatRun(void *cdata, int task_id) {
} }
int MatmulCPUKernel::Run() { int MatmulCPUKernel::Run() {
auto a_src = reinterpret_cast<float *>(in_tensors_[0]->data_c()); auto a_src = reinterpret_cast<float *>(in_tensors_.at(0)->data_c());
auto b_src = reinterpret_cast<float *>(in_tensors_[1]->data_c()); auto b_src = reinterpret_cast<float *>(in_tensors_.at(1)->data_c());
auto c_src = reinterpret_cast<float *>(out_tensors_[0]->data_c()); auto c_src = reinterpret_cast<float *>(out_tensors_.at(0)->data_c());
if (!params_->a_const_ || IsTrain()) { if (!params_->a_const_ || IsTrain()) {
if (a_pack_ptr_ != nullptr) { if (a_pack_ptr_ != nullptr) {
@ -356,8 +358,8 @@ int MatmulCPUKernel::Run() {
int MatmulCPUKernel::Eval() { int MatmulCPUKernel::Eval() {
// Copy weights after training // Copy weights after training
auto a_src = reinterpret_cast<float *>(in_tensors_[0]->data_c()); auto a_src = reinterpret_cast<float *>(in_tensors_.at(0)->data_c());
auto b_src = reinterpret_cast<float *>(in_tensors_[1]->data_c()); auto b_src = reinterpret_cast<float *>(in_tensors_.at(1)->data_c());
LiteKernel::Eval(); LiteKernel::Eval();
if (params_->a_const_) { if (params_->a_const_) {
if (a_pack_ptr_ == nullptr) { if (a_pack_ptr_ == nullptr) {

View File

@ -28,8 +28,8 @@ int Nchw2NhwcCPUKernel::Init() { return RET_OK; }
int Nchw2NhwcCPUKernel::ReSize() { return RET_OK; } int Nchw2NhwcCPUKernel::ReSize() { return RET_OK; }
int Nchw2NhwcCPUKernel::Run() { int Nchw2NhwcCPUKernel::Run() {
auto input = in_tensors_[0]; auto input = in_tensors_.at(0);
auto output = out_tensors_[0]; auto output = out_tensors_.at(0);
if (input->shape().size() == 4) { if (input->shape().size() == 4) {
if (input->data_type() == kNumberTypeFloat32) { if (input->data_type() == kNumberTypeFloat32) {

View File

@ -28,8 +28,8 @@ int Nhwc2NchwCPUKernel::Init() { return RET_OK; }
int Nhwc2NchwCPUKernel::ReSize() { return RET_OK; } int Nhwc2NchwCPUKernel::ReSize() { return RET_OK; }
int Nhwc2NchwCPUKernel::Run() { int Nhwc2NchwCPUKernel::Run() {
auto input = in_tensors_[0]; auto input = in_tensors_.at(0);
auto output = out_tensors_[0]; auto output = out_tensors_.at(0);
if (input->shape().size() == 4) { if (input->shape().size() == 4) {
if (input->data_type() == kNumberTypeFloat32) { if (input->data_type() == kNumberTypeFloat32) {

View File

@ -122,13 +122,13 @@ int NonMaxSuppressionCPUKernel::Run() {
return RET_ERROR; return RET_ERROR;
} }
constexpr size_t kBatchIndex = 0; constexpr size_t kBatchIndex = 0;
if (score_dims[kBatchIndex] != box_dims[kBatchIndex]) { if (score_dims.at(kBatchIndex) != box_dims.at(kBatchIndex)) {
MS_LOG(ERROR) << "Boxes tensor batch num should be equal to scores tensor's batch num."; MS_LOG(ERROR) << "Boxes tensor batch num should be equal to scores tensor's batch num.";
return RET_ERROR; return RET_ERROR;
} }
constexpr size_t kScoreDimsBoxNumIndex = 2; constexpr size_t kScoreDimsBoxNumIndex = 2;
constexpr size_t kBoxDimsBoxNumIndex = 1; constexpr size_t kBoxDimsBoxNumIndex = 1;
if (score_dims[kScoreDimsBoxNumIndex] != box_dims[kBoxDimsBoxNumIndex]) { if (score_dims.at(kScoreDimsBoxNumIndex) != box_dims.at(kBoxDimsBoxNumIndex)) {
MS_LOG(ERROR) << "Boxes tensor spatial dimension should be equal to scores tensor's spatial dimension."; MS_LOG(ERROR) << "Boxes tensor spatial dimension should be equal to scores tensor's spatial dimension.";
return RET_ERROR; return RET_ERROR;
} }
@ -138,10 +138,10 @@ int NonMaxSuppressionCPUKernel::Run() {
return RET_ERROR; return RET_ERROR;
} }
int batch_num = score_dims[kBatchIndex]; int batch_num = score_dims.at(kBatchIndex);
constexpr size_t kClassIndex = 1; constexpr size_t kClassIndex = 1;
int class_num = score_dims[kClassIndex]; int class_num = score_dims.at(kClassIndex);
int box_num = score_dims[kScoreDimsBoxNumIndex]; int box_num = score_dims.at(kScoreDimsBoxNumIndex);
float *scores_data = reinterpret_cast<float *>(score_tensor->data_c()); float *scores_data = reinterpret_cast<float *>(score_tensor->data_c());
if (scores_data == nullptr) { if (scores_data == nullptr) {
MS_LOG(ERROR) << "score tensor data nullptr"; MS_LOG(ERROR) << "score tensor data nullptr";

View File

@ -50,11 +50,11 @@ int PowerCPUKernel::Run() {
} }
int PowerCPUKernel::RunImpl(int task_id) { int PowerCPUKernel::RunImpl(int task_id) {
auto x_addr = reinterpret_cast<float *>(in_tensors_[0]->MutableData()); auto x_addr = reinterpret_cast<float *>(in_tensors_.at(0)->MutableData());
MS_ASSERT(x_addr); MS_ASSERT(x_addr);
auto output_addr = reinterpret_cast<float *>(out_tensors_[0]->MutableData()); auto output_addr = reinterpret_cast<float *>(out_tensors_.at(0)->MutableData());
MS_ASSERT(output_addr); MS_ASSERT(output_addr);
auto size = in_tensors_[0]->ElementsNum(); auto size = in_tensors_.at(0)->ElementsNum();
int stride = UP_DIV(size, thread_count_); int stride = UP_DIV(size, thread_count_);
int len = MSMIN(stride, size - stride * task_id); int len = MSMIN(stride, size - stride * task_id);
float *exp_addr = nullptr; float *exp_addr = nullptr;

View File

@ -52,13 +52,13 @@ int PReluCPUKernel::DoExcute(int task_id) {
int PReluCPUKernel::ProcessInput() { int PReluCPUKernel::ProcessInput() {
// input tensor // input tensor
auto input_tensor = in_tensors_[0]; auto input_tensor = in_tensors_.at(0);
auto in_shape = input_tensor->shape(); auto in_shape = input_tensor->shape();
auto n_dim = in_shape.size(); auto n_dim = in_shape.size();
auto channel_num = in_shape.at(n_dim - 1); auto channel_num = in_shape.at(n_dim - 1);
int input_plane = 1; int input_plane = 1;
for (size_t i = 0; i < n_dim - 1; ++i) { for (size_t i = 0; i < n_dim - 1; ++i) {
input_plane *= in_shape[i]; input_plane *= in_shape.at(i);
} }
int tile_block = UP_DIV(input_plane, TILE_NUM); int tile_block = UP_DIV(input_plane, TILE_NUM);
prelu_param_->input_num_ = input_tensor->ElementsNum(); prelu_param_->input_num_ = input_tensor->ElementsNum();
@ -76,7 +76,7 @@ int PReluCPUKernel::ProcessInput() {
int PReluCPUKernel::ProcessShareChannelInput() { int PReluCPUKernel::ProcessShareChannelInput() {
// input tensor // input tensor
auto input_tensor = in_tensors_[0]; auto input_tensor = in_tensors_.at(0);
prelu_param_->input_num_ = input_tensor->ElementsNum(); prelu_param_->input_num_ = input_tensor->ElementsNum();
#ifdef ENABLE_ARM64 #ifdef ENABLE_ARM64
prelu_param_->tile_block_ = UP_DIV(prelu_param_->input_num_, 64); prelu_param_->tile_block_ = UP_DIV(prelu_param_->input_num_, 64);

View File

@ -34,7 +34,7 @@ int RankCPUKernel::ReSize() { return RET_OK; }
int RankCPUKernel::Run() { int RankCPUKernel::Run() {
auto output_ptr = reinterpret_cast<float *>(out_tensors_.at(0)->MutableData()); auto output_ptr = reinterpret_cast<float *>(out_tensors_.at(0)->MutableData());
MS_ASSERT(output_ptr); MS_ASSERT(output_ptr);
auto in_shape = in_tensors_[0]->shape(); auto in_shape = in_tensors_.at(0)->shape();
auto rank = in_shape.size(); auto rank = in_shape.size();
Rank(output_ptr, rank); Rank(output_ptr, rank);
return RET_OK; return RET_OK;

View File

@ -74,7 +74,7 @@ void FullconnectionInt8CPUKernel::FreeTmpBuffer() {
} }
int FullconnectionInt8CPUKernel::MallocQuantParam() { int FullconnectionInt8CPUKernel::MallocQuantParam() {
auto weight_tensor = in_tensors_[1]; auto weight_tensor = in_tensors_.at(1);
auto weight_quant_params = weight_tensor->quant_params(); auto weight_quant_params = weight_tensor->quant_params();
int col = weight_tensor->shape().front(); int col = weight_tensor->shape().front();
filter_per_channel_ = (weight_quant_params.size() > 1); filter_per_channel_ = (weight_quant_params.size() > 1);
@ -111,15 +111,15 @@ int FullconnectionInt8CPUKernel::Init() {
return ret; return ret;
} }
auto in_quant_params = in_tensors_[0]->quant_params(); auto in_quant_params = in_tensors_.at(0)->quant_params();
quant_.input_.zp_ = in_quant_params.front().zeroPoint; quant_.input_.zp_ = in_quant_params.front().zeroPoint;
quant_.input_.scale_ = in_quant_params.front().scale; quant_.input_.scale_ = in_quant_params.front().scale;
auto out_quant_params = out_tensors_[0]->quant_params(); auto out_quant_params = out_tensors_.at(0)->quant_params();
quant_.output_.zp_ = out_quant_params.front().zeroPoint; quant_.output_.zp_ = out_quant_params.front().zeroPoint;
quant_.output_.scale_ = out_quant_params.front().scale; quant_.output_.scale_ = out_quant_params.front().scale;
auto weight_tensor = in_tensors_[1]; auto weight_tensor = in_tensors_.at(1);
fc_param_->b_const_ = (weight_tensor->data_c() != nullptr); fc_param_->b_const_ = (weight_tensor->data_c() != nullptr);
int weight_quant_num = filter_per_channel_ ? weight_tensor->shape().front() : 1; int weight_quant_num = filter_per_channel_ ? weight_tensor->shape().front() : 1;
auto weight_quant_params = weight_tensor->quant_params(); auto weight_quant_params = weight_tensor->quant_params();
@ -148,12 +148,12 @@ int FullconnectionInt8CPUKernel::Init() {
void FullconnectionInt8CPUKernel::InitParam() { void FullconnectionInt8CPUKernel::InitParam() {
int row = 1; int row = 1;
for (size_t i = 0; i < out_tensors_[0]->shape().size() - 1; ++i) { for (size_t i = 0; i < out_tensors_.at(0)->shape().size() - 1; ++i) {
row *= (out_tensors_[0]->shape())[i]; row *= (out_tensors_.at(0)->shape()).at(i);
} }
fc_param_->row_ = row; fc_param_->row_ = row;
fc_param_->col_ = out_tensors_[0]->shape().back(); fc_param_->col_ = out_tensors_.at(0)->shape().back();
fc_param_->deep_ = (in_tensors_[1]->shape())[1]; fc_param_->deep_ = (in_tensors_.at(1)->shape()).at(1);
fc_param_->row_4_ = UP_ROUND(fc_param_->row_, C4NUM); fc_param_->row_4_ = UP_ROUND(fc_param_->row_, C4NUM);
fc_param_->row_8_ = UP_ROUND(fc_param_->row_, C8NUM); fc_param_->row_8_ = UP_ROUND(fc_param_->row_, C8NUM);
@ -207,13 +207,13 @@ int FullconnectionInt8CPUKernel::ReSize() {
FreeTmpBuffer(); FreeTmpBuffer();
return RET_MEMORY_FAILED; return RET_MEMORY_FAILED;
} }
memcpy(bias_ptr_, in_tensors_[2]->data_c(), fc_param_->col_ * sizeof(int)); memcpy(bias_ptr_, in_tensors_.at(2)->data_c(), fc_param_->col_ * sizeof(int));
} else { } else {
bias_ptr_ = nullptr; bias_ptr_ = nullptr;
} }
if (fc_param_->b_const_) { if (fc_param_->b_const_) {
auto weight_data = reinterpret_cast<int8_t *>(in_tensors_[1]->data_c()); auto weight_data = reinterpret_cast<int8_t *>(in_tensors_.at(1)->data_c());
RowMajor2Row16x4MajorInt8(weight_data, pack_b_ptr_, fc_param_->col_, fc_param_->deep_); RowMajor2Row16x4MajorInt8(weight_data, pack_b_ptr_, fc_param_->col_, fc_param_->deep_);
CalcWeightBiasSums(weight_data, fc_param_->deep_, fc_param_->col_, quant_.input_.zp_, quant_.filter_zp_, bias_ptr_, CalcWeightBiasSums(weight_data, fc_param_->deep_, fc_param_->col_, quant_.input_.zp_, quant_.filter_zp_, bias_ptr_,
weight_bias_sums_, ColMajor, filter_per_channel_); weight_bias_sums_, ColMajor, filter_per_channel_);
@ -254,20 +254,20 @@ int FcInt8Run(void *cdata, int task_id) {
} }
int FullconnectionInt8CPUKernel::Run() { int FullconnectionInt8CPUKernel::Run() {
auto input_ptr = reinterpret_cast<int8_t *>(in_tensors_[0]->data_c()); auto input_ptr = reinterpret_cast<int8_t *>(in_tensors_.at(0)->data_c());
RowMajor2Row16x4MajorInt8(input_ptr, pack_a_ptr_, fc_param_->row_, fc_param_->deep_); RowMajor2Row16x4MajorInt8(input_ptr, pack_a_ptr_, fc_param_->row_, fc_param_->deep_);
int32_t tmp_weight_zp = filter_per_channel_ ? 1 : quant_.filter_zp_[0]; int32_t tmp_weight_zp = filter_per_channel_ ? 1 : quant_.filter_zp_[0];
CalcInputSums(input_ptr, fc_param_->row_, fc_param_->deep_, tmp_weight_zp, input_sums_, RowMajor); CalcInputSums(input_ptr, fc_param_->row_, fc_param_->deep_, tmp_weight_zp, input_sums_, RowMajor);
if (!fc_param_->b_const_) { if (!fc_param_->b_const_) {
auto weight_data = reinterpret_cast<int8_t *>(in_tensors_[1]->data_c()); auto weight_data = reinterpret_cast<int8_t *>(in_tensors_.at(1)->data_c());
RowMajor2Row16x4MajorInt8(weight_data, pack_b_ptr_, fc_param_->col_, fc_param_->deep_); RowMajor2Row16x4MajorInt8(weight_data, pack_b_ptr_, fc_param_->col_, fc_param_->deep_);
CalcWeightBiasSums(weight_data, fc_param_->deep_, fc_param_->col_, quant_.input_.zp_, quant_.filter_zp_, bias_ptr_, CalcWeightBiasSums(weight_data, fc_param_->deep_, fc_param_->col_, quant_.input_.zp_, quant_.filter_zp_, bias_ptr_,
weight_bias_sums_, ColMajor, filter_per_channel_); weight_bias_sums_, ColMajor, filter_per_channel_);
} }
c_ptr_ = reinterpret_cast<int8_t *>(out_tensors_[0]->data_c()); c_ptr_ = reinterpret_cast<int8_t *>(out_tensors_.at(0)->data_c());
auto ret = ParallelLaunch(this->context_->thread_pool_, FcInt8Run, this, thread_count_); auto ret = ParallelLaunch(this->context_->thread_pool_, FcInt8Run, this, thread_count_);
if (ret != RET_OK) { if (ret != RET_OK) {
MS_LOG(ERROR) << "ParallelLaunch failed"; MS_LOG(ERROR) << "ParallelLaunch failed";

View File

@ -77,11 +77,11 @@ int GatherNdInt8CPUKernel::ReSize() {
auto in_shape = in_tensors_.front()->shape(); auto in_shape = in_tensors_.front()->shape();
int in_rank = in_shape.size(); int in_rank = in_shape.size();
int idx_lastshape = indices_shape[indices_rank - 1]; int idx_lastshape = indices_shape.at(indices_rank - 1);
auto indices_ptr = reinterpret_cast<int8_t *>(indices_tensor->MutableData()); auto indices_ptr = reinterpret_cast<int8_t *>(indices_tensor->MutableData());
area_ = 1; area_ = 1;
for (int i = idx_lastshape; i < in_rank; ++i) { for (int i = idx_lastshape; i < in_rank; ++i) {
area_ *= in_shape[i]; area_ *= in_shape.at(i);
} }
std::vector<int> in_stride(in_rank); std::vector<int> in_stride(in_rank);
in_stride[in_rank - 1] = 1; in_stride[in_rank - 1] = 1;

View File

@ -61,7 +61,7 @@ int GatherInt8CPUKernel::DoGather(int task_id) {
int in_rank = in_shape.size(); int in_rank = in_shape.size();
int indices_element_size = indices_tensor->ElementsNum(); int indices_element_size = indices_tensor->ElementsNum();
const int limit = in_shape[axis_]; const int limit = in_shape.at(axis_);
for (int i = 0; i < indices_element_size; ++i) { for (int i = 0; i < indices_element_size; ++i) {
if (indices_ptr[i] >= limit) { if (indices_ptr[i] >= limit) {
MS_LOG(ERROR) << " indice data: " << indices_ptr[i] << " is not in [ 0, " << limit - 1 << " ]"; MS_LOG(ERROR) << " indice data: " << indices_ptr[i] << " is not in [ 0, " << limit - 1 << " ]";
@ -71,12 +71,12 @@ int GatherInt8CPUKernel::DoGather(int task_id) {
int outer_size = 1; int outer_size = 1;
for (int i = 0; i < axis_; ++i) { for (int i = 0; i < axis_; ++i) {
outer_size *= in_shape[i]; outer_size *= in_shape.at(i);
} }
int inner_size = 1; int inner_size = 1;
for (int i = axis_ + 1; i < in_rank; ++i) { for (int i = axis_ + 1; i < in_rank; ++i) {
inner_size *= in_shape[i]; inner_size *= in_shape.at(i);
} }
int stride = UP_DIV(outer_size, thread_count_); int stride = UP_DIV(outer_size, thread_count_);

View File

@ -89,9 +89,9 @@ int LayerNormInt8CPUKernel::ReSize() {
inner_size_ = 1; inner_size_ = 1;
for (size_t i = 0; i < shape.size(); ++i) { for (size_t i = 0; i < shape.size(); ++i) {
if (i + param_->normalized_dims_ < shape.size()) { if (i + param_->normalized_dims_ < shape.size()) {
outer_size_ *= shape[i]; outer_size_ *= shape.at(i);
} else { } else {
inner_size_ *= shape[i]; inner_size_ *= shape.at(i);
} }
} }

View File

@ -85,7 +85,7 @@ int LeakyReluInt8CPUKernel::ReSize() {
auto *out_tensor = out_tensors_.at(kOutputIndex); auto *out_tensor = out_tensors_.at(kOutputIndex);
auto input_dim = input_tensor->shape().size(); auto input_dim = input_tensor->shape().size();
quant_prelu_parm_.input_dim_ = input_dim; quant_prelu_parm_.input_dim_ = input_dim;
quant_prelu_parm_.element_num = in_tensors_[0]->Size(); quant_prelu_parm_.element_num = in_tensors_.at(0)->Size();
auto input_shape = input_tensor->shape(); auto input_shape = input_tensor->shape();
if (quant_prelu_parm_.in_shape_ != nullptr) { if (quant_prelu_parm_.in_shape_ != nullptr) {
free(quant_prelu_parm_.in_shape_); free(quant_prelu_parm_.in_shape_);

View File

@ -39,12 +39,14 @@ int MatmulInt8CPUKernel::Init() {
int MatmulInt8CPUKernel::ReSize() { int MatmulInt8CPUKernel::ReSize() {
FreeTmpBuffer(); FreeTmpBuffer();
int batch = 1; int batch = 1;
auto x_shape = in_tensors_[0]->shape(); auto x_shape = in_tensors_.at(0)->shape();
auto o_shape = out_tensors_[0]->shape(); auto o_shape = out_tensors_.at(0)->shape();
MS_ASSERT(x_shape.size() >= 2);
for (size_t i = 0; i < x_shape.size() - 2; ++i) { for (size_t i = 0; i < x_shape.size() - 2; ++i) {
batch *= x_shape[i]; batch *= x_shape[i];
} }
params_->batch = batch; params_->batch = batch;
MS_ASSERT(o_shape.size() >= 2);
params_->row_ = o_shape[o_shape.size() - 2]; params_->row_ = o_shape[o_shape.size() - 2];
params_->col_ = o_shape[o_shape.size() - 1]; params_->col_ = o_shape[o_shape.size() - 1];
params_->deep_ = params_->a_transpose_ ? x_shape[x_shape.size() - 2] : x_shape[x_shape.size() - 1]; params_->deep_ = params_->a_transpose_ ? x_shape[x_shape.size() - 2] : x_shape[x_shape.size() - 1];
@ -77,25 +79,25 @@ int MatmulInt8CPUKernel::ReSize() {
thread_count_ = MSMIN(thread_count_, UP_DIV(params_->col_4_, 4)); thread_count_ = MSMIN(thread_count_, UP_DIV(params_->col_4_, 4));
thread_stride_ = UP_DIV(UP_DIV(params_->col_4_, 4), thread_count_); thread_stride_ = UP_DIV(UP_DIV(params_->col_4_, 4), thread_count_);
auto input_tensor = in_tensors_[0]; auto input_tensor = in_tensors_.at(0);
auto params = input_tensor->quant_params(); auto params = input_tensor->quant_params();
MS_ASSERT(params.size() == 1); MS_ASSERT(params.size() == 1);
quant_params_.input.zp_ = params.front().zeroPoint; quant_params_.input.zp_ = params.front().zeroPoint;
quant_params_.input.scale_ = params.front().scale; quant_params_.input.scale_ = params.front().scale;
auto weight_tensor = in_tensors_[1]; auto weight_tensor = in_tensors_.at(1);
params = weight_tensor->quant_params(); params = weight_tensor->quant_params();
MS_ASSERT(params.size() == 1); MS_ASSERT(params.size() == 1);
quant_params_.weight.zp_ = params.front().zeroPoint; quant_params_.weight.zp_ = params.front().zeroPoint;
quant_params_.weight.scale_ = params.front().scale; quant_params_.weight.scale_ = params.front().scale;
auto output_tensor = out_tensors_[0]; auto output_tensor = out_tensors_.at(0);
params = output_tensor->quant_params(); params = output_tensor->quant_params();
MS_ASSERT(params.size() == 1); MS_ASSERT(params.size() == 1);
quant_params_.output.zp_ = params.front().zeroPoint; quant_params_.output.zp_ = params.front().zeroPoint;
quant_params_.output.scale_ = params.front().scale; quant_params_.output.scale_ = params.front().scale;
params_->b_const_ = (in_tensors_[1]->data_c() != nullptr); params_->b_const_ = (in_tensors_.at(1)->data_c() != nullptr);
if (params_->b_const_) { if (params_->b_const_) {
auto b_ptr = reinterpret_cast<int8_t *>(in_tensors_[1]->data_c()); auto b_ptr = reinterpret_cast<int8_t *>(in_tensors_.at(1)->data_c());
for (int i = 0; i < params_->batch; ++i) { for (int i = 0; i < params_->batch; ++i) {
auto cur_b = b_ptr + i * params_->deep_ * params_->col_; auto cur_b = b_ptr + i * params_->deep_ * params_->col_;
auto cur_b_pack = b_c16x4_batch_ + i * params_->col_4_ * params_->deep_16_; auto cur_b_pack = b_c16x4_batch_ + i * params_->col_4_ * params_->deep_16_;
@ -152,14 +154,14 @@ int MatmulInt8Run(void *cdata, int task_id) {
} }
int MatmulInt8CPUKernel::Run() { int MatmulInt8CPUKernel::Run() {
auto a_ptr = reinterpret_cast<int8_t *>(in_tensors_[0]->data_c()); auto a_ptr = reinterpret_cast<int8_t *>(in_tensors_.at(0)->data_c());
auto c_ptr = reinterpret_cast<int8_t *>(out_tensors_[0]->data_c()); auto c_ptr = reinterpret_cast<int8_t *>(out_tensors_.at(0)->data_c());
auto a_stride = params_->row_ * params_->deep_; auto a_stride = params_->row_ * params_->deep_;
auto b_stride = params_->deep_ * params_->col_; auto b_stride = params_->deep_ * params_->col_;
auto c_stride = params_->row_ * params_->col_; auto c_stride = params_->row_ * params_->col_;
if (!params_->b_const_) { if (!params_->b_const_) {
auto b_ptr = reinterpret_cast<int8_t *>(in_tensors_[1]->data_c()); auto b_ptr = reinterpret_cast<int8_t *>(in_tensors_.at(1)->data_c());
for (int i = 0; i < params_->batch; ++i) { for (int i = 0; i < params_->batch; ++i) {
auto cur_b = b_ptr + i * b_stride; auto cur_b = b_ptr + i * b_stride;
auto cur_b_pack = b_c16x4_batch_ + i * params_->col_4_ * params_->deep_16_; auto cur_b_pack = b_c16x4_batch_ + i * params_->col_4_ * params_->deep_16_;

View File

@ -87,8 +87,8 @@ int PadInt8CPUKernel::SetQuantParam() {
} }
int PadInt8CPUKernel::InitPadParam() { int PadInt8CPUKernel::InitPadParam() {
auto in_dims = in_tensors_[0]->shape(); auto in_dims = in_tensors_.at(0)->shape();
auto out_dims = out_tensors_[0]->shape(); auto out_dims = out_tensors_.at(0)->shape();
int ndims = in_dims.size(); int ndims = in_dims.size();
int in[] = {1, 1, 1, 1}; int in[] = {1, 1, 1, 1};
@ -265,8 +265,8 @@ int PadInt8CPUKernel::CopyPaddingFromInput() {
} }
int PadInt8CPUKernel::Run() { int PadInt8CPUKernel::Run() {
in_data_ = reinterpret_cast<int8_t *>(in_tensors_[0]->MutableData()); in_data_ = reinterpret_cast<int8_t *>(in_tensors_.at(0)->MutableData());
out_data_ = reinterpret_cast<int8_t *>(out_tensors_[0]->MutableData()); out_data_ = reinterpret_cast<int8_t *>(out_tensors_.at(0)->MutableData());
int error_code; int error_code;
if (pad_param_->pad_mode_ == static_cast<int>(schema::PaddingMode_CONSTANT)) { if (pad_param_->pad_mode_ == static_cast<int>(schema::PaddingMode_CONSTANT)) {

View File

@ -66,7 +66,7 @@ int PowerInt8CPUKernel::DoPower(int task_id) {
int8_t *output_data = reinterpret_cast<int8_t *>(out_tensors_[0]->MutableData()); int8_t *output_data = reinterpret_cast<int8_t *>(out_tensors_[0]->MutableData());
MS_ASSERT(output_data); MS_ASSERT(output_data);
auto size = in_tensors_[0]->ElementsNum(); auto size = in_tensors_.at(0)->ElementsNum();
int stride = UP_DIV(size, op_parameter_->thread_num_); int stride = UP_DIV(size, op_parameter_->thread_num_);
int count = MSMIN(stride, size - stride * task_id); int count = MSMIN(stride, size - stride * task_id);
int8_t *exp_ptr = nullptr; int8_t *exp_ptr = nullptr;