!48772 Dynamic sequence support tensor.

Merge pull request !48772 from gaoyong10/dynamic_shape_02
This commit is contained in:
i-robot 2023-02-23 11:33:48 +00:00 committed by Gitee
commit bf270c76c1
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
5 changed files with 123 additions and 8 deletions

View File

@ -48,6 +48,33 @@ Backend::Backend(const std::string &name) : name_(name), is_multi_graph_sink_(fa
}
namespace {
using Tensor = tensor::Tensor;
bool CheckValidTensorTuple(const std::vector<ValuePtr> &values) {
if (values.empty() || values[0] == nullptr || (!values[0]->isa<tensor::Tensor>())) {
return false;
}
const auto &const_tensor = values[0]->cast<TensorPtr>();
MS_EXCEPTION_IF_NULL(const_tensor);
const auto &const_shape = const_tensor->shape();
const auto &const_type_id = const_tensor->data_type();
size_t const_size = const_tensor->Size();
for (size_t i = 1; i < values.size(); ++i) {
if (values[i] == nullptr || (!values[i]->isa<Tensor>())) {
MS_LOG(ERROR) << "Invalid value:" << (values[i] == nullptr ? "nullptr" : values[i]->ToString()) << " index:" << i
<< " in value tuple";
return false;
}
const auto &tensor = values[i]->cast<TensorPtr>();
MS_EXCEPTION_IF_NULL(tensor);
const auto &shape = tensor->shape();
const auto &type_id = tensor->data_type();
size_t size = tensor->Size();
if (shape != const_shape || type_id != const_type_id || size != const_size) {
return false;
}
}
return true;
}
// In dynamic sequence, since the number of members is not determined in compile time, the entire sequence needs
// to be placed in single tensor, and the shape of the tuple needs to be recorded in the tensor, so that the shape
// of the tensor can be accurately restored during the dynamic shape derivation process in runtime.
@ -59,11 +86,49 @@ TensorPtr SequenceToTensor(const ValuePtr &value) {
const auto &sequence_value = value->cast<ValueSequencePtr>();
const auto &values = sequence_value->value();
if (values.empty() || values[0] == nullptr || (!values[0]->isa<Scalar>())) {
if (values.empty() || values[0] == nullptr || ((!values[0]->isa<Scalar>()) && (!values[0]->isa<Tensor>()))) {
MS_LOG(WARNING) << "Empty sequence in sequence value:" << value->ToString();
return std::make_shared<tensor::Tensor>();
}
if (values[0]->isa<Tensor>()) {
MS_LOG(DEBUG) << "Check dynamic tuple tensor";
if (!CheckValidTensorTuple(values)) {
MS_LOG(EXCEPTION) << "Invalid dynamic sequence tuple:" << value->ToString();
}
const auto &tensor = values[0]->cast<TensorPtr>();
MS_EXCEPTION_IF_NULL(tensor);
size_t size = tensor->Size();
const auto &type_id = tensor->data_type();
ShapeVector shape_vector{SizeToLong(values.size())};
auto single_shape_vector = tensor->shape();
const auto &single_shape = std::make_shared<abstract::Shape>(single_shape_vector);
shape_vector.insert(shape_vector.end(), single_shape_vector.begin(), single_shape_vector.end());
const auto &shape = std::make_shared<abstract::Shape>(shape_vector);
auto new_tensor = std::make_shared<tensor::Tensor>(type_id, shape_vector);
MS_EXCEPTION_IF_NULL(new_tensor);
const auto dst_ptr = new_tensor->data_c();
MS_EXCEPTION_IF_NULL(dst_ptr);
MS_LOG(DEBUG) << "Copy start, dst size:" << new_tensor->data().nbytes();
for (size_t i = 0; i < values.size(); ++i) {
const auto &sub_value = values[i];
MS_EXCEPTION_IF_NULL(sub_value);
const auto &src_tensor = sub_value->cast<TensorPtr>();
MS_EXCEPTION_IF_NULL(src_tensor);
MS_EXCEPTION_IF_NULL(src_tensor->data_c());
auto ret = memcpy_s((reinterpret_cast<char *>(dst_ptr)) + i * size,
static_cast<size_t>(new_tensor->data().nbytes()), src_tensor->data_c(), size);
if (ret != EOK) {
MS_LOG(EXCEPTION) << "Failed to copy data into tensor, memcpy_s errorno: " << ret;
}
}
const auto &element_shapes = std::vector<abstract::BaseShapePtr>(values.size(), single_shape);
new_tensor->set_base_shape(std::make_shared<abstract::TupleShape>(element_shapes));
MS_LOG(DEBUG) << "merge tensor from:" << value->ToString() << " to:" << new_tensor->ToString() << " tensor addr"
<< new_tensor;
return new_tensor;
}
// Create the tensor.
TensorPtr tensor;
MS_EXCEPTION_IF_NULL(values[0]->type());
@ -87,6 +152,7 @@ TensorPtr SequenceToTensor(const ValuePtr &value) {
void PushInputTensor(const BaseRef &arg, std::vector<tensor::TensorPtr> *inputs, const AnfNodePtr &node) {
MS_EXCEPTION_IF_NULL(inputs);
if (node != nullptr && node->abstract() != nullptr && common::AnfAlgo::IsDynamicSequence(node)) {
MS_LOG(DEBUG) << "node:" << node->fullname_with_scope() << " abs:" << node->abstract()->ToString();
if (!utils::isa<ValuePtr>(arg)) {
MS_LOG(EXCEPTION) << "Invalid input for dynamic sequence node:" << node->DebugString();
}

View File

@ -138,7 +138,8 @@ class COMMON_EXPORT AnfAlgo {
// set infer shapes and types of anf node
static void SetOutputInferTypeAndShape(const std::vector<TypeId> &types, const std::vector<ShapeVector> &shapes,
AnfNode *node, bool disable_dynamic_len = false);
static void SetScalarTupleOutputInferType(const std::vector<TypeId> &types, const AnfNodePtr &node);
static void SetScalarTupleOutputInferType(const std::vector<TypeId> &types, const std::vector<ShapeVector> &shapes,
const AnfNodePtr &node);
// set output shape ptr
static void SetOutputTypeAndDetailShape(const std::vector<TypeId> &types,
const std::vector<abstract::BaseShapePtr> &shapes, AnfNode *node);

View File

@ -482,7 +482,7 @@ void ControlActor::UpdateDynamicShapeInParameter() {
if (common::AnfAlgo::IsDynamicSequence(parameter)) {
const auto &shapes = BaseShapeToShapeVector(node->Shape());
std::vector<TypeId> types = std::vector(shapes.size(), input_device_tensors_[i]->type_id());
common::AnfAlgo::SetScalarTupleOutputInferType(types, parameter);
common::AnfAlgo::SetScalarTupleOutputInferType(types, shapes, parameter);
continue;
}
common::AnfAlgo::SetOutputInferTypeAndShape({input_device_tensors_[i]->type_id()}, {shape}, parameter.get());

View File

@ -351,9 +351,12 @@ void DataPrepareActor::UpdateDynamicShape(const AnfNodePtr &input_node, const Te
// If the shape of the tensor exists and is a tuple shape, it means that the tensor is a tuple type, and it needs
// to be restored the shape to tuple type when infer shape.
if (input_tensor->base_shape_ptr() != nullptr && input_tensor->base_shape_ptr()->isa<abstract::SequenceShape>()) {
MS_LOG(DEBUG) << "trans to scalar abs for node:" << input_node->fullname_with_scope()
<< " shape:" << input_tensor->base_shape_ptr()->ToString()
<< " abs:" << (input_node->abstract() == nullptr ? "nullptr" : input_node->abstract()->ToString());
shapes = BaseShapeToShapeVector(input_tensor->base_shape_ptr());
types = std::vector(shapes.size(), input_tensor->data_type());
common::AnfAlgo::SetScalarTupleOutputInferType(types, input_node);
common::AnfAlgo::SetScalarTupleOutputInferType(types, shapes, input_node);
return;
}
// In runtime, the dynamic len tag should be removed.
@ -377,8 +380,11 @@ void DataPrepareActor::UpdateDeviceAddressForDataNode(const AnfNodePtr &input_no
kOpFormat_DEFAULT, kOpFormat_ND, kOpFormat_NCHW, kOpFormat_NHWC, kOpFormat_HWCN,
};
if (kNormalFormat.find(device_format) != kNormalFormat.end()) {
MS_LOG(DEBUG) << "Set device address:" << device_address << " size from:" << device_address->GetSize()
<< " to:" << tensor_data_size;
device_address->SetSize(tensor_data_size);
} else {
MS_LOG(DEBUG) << "Update data node device address size";
// Size of 5D format device_address is larger than tensor_data_size.
UpdateDataNodeDeviceAddressSize(input_node, input_tensor, device_address);
}

View File

@ -636,14 +636,27 @@ ShapeVector AnfAlgo::GetOutputInferShape(const AnfNodePtr &node, const abstract:
}
return GetShape(base_shape);
} else if (base_shape->isa<abstract::TupleShape>()) {
auto tuple_shape = base_shape->cast<abstract::TupleShapePtr>();
} else if (base_shape->isa<abstract::SequenceShape>()) {
auto tuple_shape = base_shape->cast<abstract::SequenceShapePtr>();
MS_EXCEPTION_IF_NULL(tuple_shape);
if (tuple_shape->size() == 0) {
return ShapeVector();
}
if (IsDynamicSequence(node) || is_real_squence_output) {
return ShapeVector{SizeToLong(tuple_shape->size())};
const auto &sequence_abs = node->abstract()->cast<abstract::AbstractSequencePtr>();
MS_EXCEPTION_IF_NULL(sequence_abs);
const auto &element_abs = sequence_abs->dynamic_len_element_abs();
ShapeVector shape_vector = {SizeToLong(tuple_shape->size())};
if (element_abs == nullptr || (!element_abs->isa<abstract::AbstractTensor>())) {
return shape_vector;
}
MS_LOG(DEBUG) << "Element of dynamic sequence is tensor:" << element_abs->ToString()
<< " node:" << node->fullname_with_scope();
const auto &element_shape = element_abs->BuildShape();
MS_EXCEPTION_IF_NULL(element_shape);
const auto &element_shape_vector = GetOutputInferShape(node, element_shape, 0);
shape_vector.insert(shape_vector.end(), element_shape_vector.begin(), element_shape_vector.end());
return shape_vector;
}
if (output_idx >= tuple_shape->size()) {
MS_LOG(EXCEPTION) << "Output index " << output_idx << "is larger than output number " << tuple_shape->size()
@ -694,6 +707,12 @@ TypeId AnfAlgo::GetOutputInferDataType(const TypePtr &type, size_t output_idx) {
MS_EXCEPTION_IF_NULL(tuple_ptr);
if (tuple_ptr->dynamic_len()) {
MS_EXCEPTION_IF_NULL(tuple_ptr->dynamic_element_type());
if (tuple_ptr->dynamic_element_type()->isa<TensorType>()) {
const auto &tensor_type = tuple_ptr->dynamic_element_type()->cast<TensorTypePtr>();
MS_EXCEPTION_IF_NULL(tensor_type);
const auto &element_type = tensor_type->element();
return element_type->type_id();
}
return tuple_ptr->dynamic_element_type()->type_id();
}
if (tuple_ptr->size() == 0) {
@ -781,10 +800,33 @@ void DeleteDynamicLen(AnfNode *node) {
}
} // namespace
void AnfAlgo::SetScalarTupleOutputInferType(const std::vector<TypeId> &types, const AnfNodePtr &node) {
void AnfAlgo::SetScalarTupleOutputInferType(const std::vector<TypeId> &types, const std::vector<ShapeVector> &shapes,
const AnfNodePtr &node) {
MS_EXCEPTION_IF_NULL(node);
DeleteDynamicLen(node.get());
std::vector<abstract::AbstractBasePtr> abstract_list;
MS_LOG(DEBUG) << "Set scalar tuple output infer type for node:" << node->fullname_with_scope();
if (node->abstract() != nullptr && node->abstract()->isa<abstract::AbstractSequence>()) {
const auto &sequence_abs = node->abstract()->cast<abstract::AbstractSequencePtr>();
MS_EXCEPTION_IF_NULL(sequence_abs);
MS_LOG(DEBUG) << "Check abs:" << sequence_abs->ToString();
if (sequence_abs->dynamic_len_element_abs() != nullptr &&
sequence_abs->dynamic_len_element_abs()->isa<AbstractTensor>()) {
if (shapes.empty()) {
MS_LOG(EXCEPTION) << "Invalid shape for node:" << node->fullname_with_scope();
}
MS_LOG(DEBUG) << "Check dynmiac len element abs:" << sequence_abs->dynamic_len_element_abs()->ToString();
for (size_t i = 0; i < types.size(); ++i) {
ShapeVector shape = shapes[0];
auto abstract = std::make_shared<abstract::AbstractTensor>(TypeIdToType(types[i]), shape);
abstract_list.emplace_back(abstract);
}
auto abstract_tuple = std::make_shared<abstract::AbstractTuple>(abstract_list);
node->set_abstract(abstract_tuple);
return;
}
}
MS_LOG(DEBUG) << "Check abs for scalar";
for (size_t i = 0; i < types.size(); ++i) {
abstract::AbstractScalarPtr abstract = std::make_shared<abstract::AbstractScalar>(TypeIdToType(types[i]));
abstract_list.emplace_back(abstract);