!46397 Support MapParameter data processing.

Merge pull request !46397 from Margaret_wangrui/maptensor_pynative
This commit is contained in:
i-robot 2022-12-08 01:44:20 +00:00 committed by Gitee
commit 4c26ad74c8
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
9 changed files with 504 additions and 343 deletions

View File

@ -267,6 +267,13 @@ ParameterPtr ConstructRunOpParameter(const std::shared_ptr<KernelGraph> &graph,
kernel_build_info_builder->SetOutputsReshapeType({input_tensor->padding_type()});
AnfAlgo::SetOutputAddr(device_address, 0, param.get());
}
if (input_tensor->isa<tensor::MapTensor>()) {
auto map_tensor = input_tensor->cast<tensor::MapTensorPtr>();
auto map_tensor_abs = std::make_shared<abstract::AbstractMapTensor>(map_tensor);
AnfAlgo::SetSelectKernelBuildInfo(kernel_build_info_builder->Build(), param.get());
param->set_abstract(map_tensor_abs);
return param;
}
AnfAlgo::SetSelectKernelBuildInfo(kernel_build_info_builder->Build(), param.get());
// construct abstract of parameter
auto type_of_tensor = input_tensor->Dtype();

View File

@ -525,8 +525,36 @@ void UpdateOutputAbstract(const VectorRef &outputs, const session::BackendOpRunI
op_run_info->base_op_run_info.abstract = std::make_shared<abstract::AbstractTuple>(elements);
}
TensorPtr CreateOutputMapTensor(const AnfNodePtr &output_node, size_t output_index) {
const auto &device_tensor = AnfAlgo::GetMutableOutputAddr(output_node, output_index, false);
MS_EXCEPTION_IF_NULL(device_tensor);
const auto &user_data = device_tensor->user_data();
MS_EXCEPTION_IF_NULL(user_data);
const auto &user_data_type = user_data->get<UserDataType>(kUserDataType);
MS_EXCEPTION_IF_NULL(user_data_type);
if (*user_data_type == UserDataType::kUserTypeHashTable) {
auto shape_vector = user_data->get<ShapeVector>(kHashTableShapeVector);
auto key_type = user_data->get<TypeId>(kHashTableKeyType);
auto value_type = user_data->get<TypeId>(kHashTableValueType);
auto default_value = user_data->get<Value>(kHashTableDefaultValue);
MS_EXCEPTION_IF_NULL(shape_vector);
MS_EXCEPTION_IF_NULL(key_type);
MS_EXCEPTION_IF_NULL(value_type);
MS_EXCEPTION_IF_NULL(default_value);
auto map_tensor = std::make_shared<tensor::MapTensor>(*key_type, *value_type, *shape_vector, default_value);
map_tensor->set_device_address(device_tensor);
return map_tensor;
}
MS_LOG(WARNING) << "Invalid user data type:" << *user_data_type;
return nullptr;
}
TensorPtr CreateOutputTensor(const AnfNodePtr &output_node, size_t output_index) {
MS_EXCEPTION_IF_NULL(output_node);
const auto &abstract = common::AnfAlgo::GetNodeAbstractByIndex(output_node, output_index);
if (abstract != nullptr && abstract->isa<abstract::AbstractMapTensor>()) {
return CreateOutputMapTensor(output_node, output_index);
}
// Create host tensor, the output tensor should use the infer type, it will be handed correctly by tensor data sync
// when infer type is not equal to device type.
auto type_id = common::AnfAlgo::GetOutputInferDataType(output_node, output_index);

View File

@ -65,6 +65,8 @@ ValuePtr GetInferValueFromAbstract(const AbstractBasePtr &abs) {
return abs->cast<abstract::AbstractCOOTensorPtr>()->BuildValue();
} else if (abs->isa<abstract::AbstractCSRTensor>()) {
return abs->cast<abstract::AbstractCSRTensorPtr>()->BuildValue();
} else if (abs->isa<abstract::AbstractMapTensor>()) {
return kAnyValue;
} else {
MS_LOG(DEBUG) << "Unsupported abstract type for primitive, the abs is " << abs->ToString();
return kAnyValue;

View File

@ -418,6 +418,22 @@ void DataConvert::ConvertValueTupleToTensor(const FrontendOpRunInfoPtr &op_run_i
(void)op_run_info->base_op_run_info.input_tensor.emplace_back(tensor_ptr);
}
void DataConvert::ConvertMapTensor(const FrontendOpRunInfoPtr &op_run_info, const tensor::MapTensorPtr &map_tensor,
const PrimitivePtr &op_prim) {
MS_EXCEPTION_IF_NULL(op_run_info);
MS_EXCEPTION_IF_NULL(op_prim);
MS_EXCEPTION_IF_NULL(map_tensor);
constexpr int input_num = 1;
const auto input_names = op_prim->GetAttr(kAttrInputNames);
if (input_names == nullptr) {
MS_LOG(DEBUG) << "input_names are nullptr";
return;
}
(void)op_run_info->base_op_run_info.input_tensor.emplace_back(map_tensor);
const auto it = op_run_info->base_op_run_info.input_mask.end();
(void)op_run_info->base_op_run_info.input_mask.insert(it, input_num, kParameterWeightTensorMask);
}
void DataConvert::ConvertCSRTensorToTensorList(const FrontendOpRunInfoPtr &op_run_info,
const tensor::CSRTensorPtr &csr_tensor, const PrimitivePtr &op_prim) {
MS_EXCEPTION_IF_NULL(op_run_info);
@ -466,7 +482,10 @@ void DataConvert::ConvertValueToTensor(const FrontendOpRunInfoPtr &op_run_info,
MS_EXCEPTION_IF_NULL(op_prim);
tensor::TensorPtr tensor_ptr = nullptr;
int64_t tensor_mask = kParameterDataTensorMask;
if (v->isa<tensor::Tensor>()) {
if (v->isa<tensor::MapTensor>()) {
ConvertMapTensor(op_run_info, v->cast<tensor::MapTensorPtr>(), op_prim);
return;
} else if (v->isa<tensor::Tensor>()) {
tensor_ptr = v->cast<tensor::TensorPtr>();
if (tensor_ptr->is_parameter()) {
tensor_mask = kParameterWeightTensorMask;

View File

@ -59,6 +59,8 @@ struct DataConvert {
static void GetInputTensor(const FrontendOpRunInfoPtr &op_run_info, const std::string &device_target);
static void ConvertCSRTensorToTensorList(const FrontendOpRunInfoPtr &op_run_info,
const tensor::CSRTensorPtr &csr_tensor, const PrimitivePtr &op_prim);
static void ConvertMapTensor(const FrontendOpRunInfoPtr &op_run_info, const tensor::MapTensorPtr &map_tensor,
const PrimitivePtr &op_prim);
static void ConvertValueTupleToTensor(const FrontendOpRunInfoPtr &op_run_info, const ValueSequencePtr &value_seq);
static void PlantTensorTupleToVector(const FrontendOpRunInfoPtr &op_run_info, const ValueSequencePtr &value_seq,
const PrimitivePtr &op_prim, size_t index);

View File

@ -83,6 +83,28 @@ void UpdateParameterShapeFromInputTensor(const AnfNodePtr &input_node, const ten
input_node.get());
}
void SetDeviceAddress(const AnfNodePtr &input_node, const tensor::TensorPtr &input_tensor) {
MS_EXCEPTION_IF_NULL(input_tensor);
auto tensor_address = std::dynamic_pointer_cast<device::DeviceAddress>(input_tensor->device_address());
auto node_address = AnfAlgo::GetMutableOutputAddr(input_node, 0);
UpdateParameterShapeFromInputTensor(input_node, input_tensor);
MS_EXCEPTION_IF_NULL(node_address);
if (tensor_address == nullptr) {
input_tensor->set_device_address(node_address);
input_tensor->set_sync_status(kNeedSyncHostToDeviceImmediately);
input_tensor->set_lazy_callback([]() { runtime::OpExecutor::GetInstance().Wait(); });
node_address->set_from_persistent_mem(input_tensor->is_parameter());
node_address->SetNodeIndex(input_node, 0);
}
// The DeviceType and format of DeviceAddress is always the same after UpdateInputTensor
if (tensor_address != nullptr && tensor_address != node_address) {
AnfAlgo::SetOutputAddr(tensor_address, 0, input_node.get());
}
}
void UpdateInputNodeDeviceAddress(const std::vector<AnfNodePtr> &input_nodes,
const std::vector<tensor::TensorPtr> &input_tensors) {
MS_LOG(DEBUG) << "Start";
@ -95,23 +117,14 @@ void UpdateInputNodeDeviceAddress(const std::vector<AnfNodePtr> &input_nodes,
auto &input_node = input_nodes[i];
auto &input_tensor = input_tensors[i];
MS_EXCEPTION_IF_NULL(input_tensor);
auto tensor_address = std::dynamic_pointer_cast<device::DeviceAddress>(input_tensor->device_address());
auto node_address = AnfAlgo::GetMutableOutputAddr(input_node, 0);
UpdateParameterShapeFromInputTensor(input_node, input_tensor);
MS_EXCEPTION_IF_NULL(node_address);
if (tensor_address == nullptr) {
input_tensor->set_device_address(node_address);
input_tensor->set_sync_status(kNeedSyncHostToDeviceImmediately);
input_tensor->set_lazy_callback([]() { runtime::OpExecutor::GetInstance().Wait(); });
node_address->set_from_persistent_mem(input_tensor->is_parameter());
node_address->SetNodeIndex(input_node, 0);
}
// The DeviceType and format of DeviceAddress is always the same after UpdateInputTensor
if (tensor_address != nullptr && tensor_address != node_address) {
AnfAlgo::SetOutputAddr(tensor_address, 0, input_node.get());
if (input_tensor->isa<tensor::MapTensor>()) {
auto map_tensor = input_tensor->cast<tensor::MapTensorPtr>();
SetDeviceAddress(input_node, map_tensor);
SetDeviceAddress(input_node, map_tensor->key_tensor());
SetDeviceAddress(input_node, map_tensor->value_tensor());
SetDeviceAddress(input_node, map_tensor->status_tensor());
} else {
SetDeviceAddress(input_node, input_tensor);
}
}
MS_LOG(DEBUG) << "End";
@ -221,6 +234,37 @@ void CopyValueNodeDataToDevice(const KernelGraphPtr &graph, const device::Device
MS_LOG(DEBUG) << "End";
}
void UpdateAddressSizeForDynamicShapeTensor(const tensor::TensorPtr &input_tensor) {
if (input_tensor->base_shape_ptr() != nullptr) {
auto device_address = std::dynamic_pointer_cast<device::DeviceAddress>(input_tensor->device_address());
MS_EXCEPTION_IF_NULL(device_address);
auto tensor_size = LongToSize(input_tensor->data().nbytes());
if (tensor_size != device_address->GetSize()) {
device_address->SetSize(tensor_size);
}
}
}
void CopyMapTensorDataToDevice(const tensor::MapTensorPtr &map_tensor, const AnfNodePtr &input_node,
const device::DeviceContext *device_context) {
MS_EXCEPTION_IF_NULL(map_tensor);
auto key_tensor = map_tensor->key_tensor();
MS_EXCEPTION_IF_NULL(key_tensor);
UpdateAddressSizeForDynamicShapeTensor(key_tensor);
CopyTensorDataToDevice(key_tensor, input_node, device_context);
key_tensor->set_sync_status(kNoNeedSync);
auto value_tensor = map_tensor->value_tensor();
MS_EXCEPTION_IF_NULL(value_tensor);
UpdateAddressSizeForDynamicShapeTensor(value_tensor);
CopyTensorDataToDevice(value_tensor, input_node, device_context);
value_tensor->set_sync_status(kNoNeedSync);
auto status_tensor = map_tensor->status_tensor();
MS_EXCEPTION_IF_NULL(status_tensor);
UpdateAddressSizeForDynamicShapeTensor(status_tensor);
CopyTensorDataToDevice(status_tensor, input_node, device_context);
status_tensor->set_sync_status(kNoNeedSync);
}
void CopyParameterDataToDevice(const std::vector<AnfNodePtr> &input_nodes,
const std::vector<tensor::TensorPtr> &input_tensors,
const device::DeviceContext *device_context) {
@ -234,16 +278,14 @@ void CopyParameterDataToDevice(const std::vector<AnfNodePtr> &input_nodes,
MS_EXCEPTION_IF_NULL(input_tensors[i]);
if (input_tensors[i]->NeedSyncHostToDeviceImmediately()) {
// First op in dynamic shape scenario(feed mode)
if (input_tensors[i]->base_shape_ptr() != nullptr) {
auto device_address = std::dynamic_pointer_cast<device::DeviceAddress>(input_tensors[i]->device_address());
MS_EXCEPTION_IF_NULL(device_address);
auto tensor_size = LongToSize(input_tensors[i]->data().nbytes());
if (tensor_size != device_address->GetSize()) {
device_address->SetSize(tensor_size);
}
if (input_tensors[i]->isa<tensor::MapTensor>()) {
auto map_tensor = input_tensors[i]->cast<tensor::MapTensorPtr>();
CopyMapTensorDataToDevice(map_tensor, input_nodes[i], device_context);
} else {
UpdateAddressSizeForDynamicShapeTensor(input_tensors[i]);
CopyTensorDataToDevice(input_tensors[i], input_nodes[i], device_context);
input_tensors[i]->set_sync_status(kNoNeedSync);
}
CopyTensorDataToDevice(input_tensors[i], input_nodes[i], device_context);
input_tensors[i]->set_sync_status(kNoNeedSync);
}
}
MS_LOG(DEBUG) << "End";
@ -264,13 +306,15 @@ void UpdateOutputAddrSize(const AnfNodePtr &node, const std::shared_ptr<OpRuntim
}
bool MallocForKernelInput(const std::shared_ptr<OpRuntimeInfo> &runtime_info,
const device::DeviceContext *device_context) {
const device::DeviceContext *device_context, const CNodePtr &node) {
auto kernel_mod = AnfAlgo::GetKernelMod(node);
MS_EXCEPTION_IF_NULL(runtime_info);
MS_EXCEPTION_IF_NULL(device_context);
MS_EXCEPTION_IF_NULL(device_context->device_res_manager_);
auto input_size = runtime_info->GetInputSize();
for (size_t i = 0; i < input_size; ++i) {
auto input_address = runtime_info->GetInputDeviceAddress(i);
kernel_mod->set_input_user_data(input_address->user_data().get(), i);
MS_EXCEPTION_IF_NULL(input_address);
if (input_address->GetPtr() == nullptr &&
!device_context->device_res_manager_->AllocateMemory(input_address.get())) {
@ -299,6 +343,7 @@ bool MallocForKernelOutput(const std::shared_ptr<OpRuntimeInfo> &runtime_info, c
for (size_t i = 0; i < output_size; ++i) {
auto device_address = runtime_info->GetOutputDeviceAddress(i);
MS_EXCEPTION_IF_NULL(device_address);
kernel_mod->set_output_user_data(device_address->user_data().get(), i);
// For example, we need to call cudnnGetRNNTrainingReserveSize to get real output size in LstmGpuKernelMod!
if (kernel_out_size_list[i] != device_address->GetSize()) {
// If the format of the DeviceAddress is different, then the size is originally different.
@ -477,7 +522,7 @@ void LaunchKernelsDynamic(const KernelGraphPtr &graph, const device::DeviceConte
auto runtime_info = node->user_data<runtime::OpRuntimeInfo>();
MS_EXCEPTION_IF_NULL(runtime_info);
if (!MallocForKernelInput(runtime_info, device_context)) {
if (!MallocForKernelInput(runtime_info, device_context, node)) {
MS_LOG(EXCEPTION) << "Malloc for kernel input failed, Memory isn't enough, node:" << node->fullname_with_scope();
}
auto inputs = CreateKernelInputAddress(runtime_info);
@ -524,7 +569,7 @@ void LaunchKernels(const KernelGraphPtr &graph, const device::DeviceContext *dev
auto runtime_info = node->user_data<runtime::OpRuntimeInfo>();
MS_EXCEPTION_IF_NULL(runtime_info);
if (!MallocForKernelInput(runtime_info, device_context)) {
if (!MallocForKernelInput(runtime_info, device_context, node)) {
MS_LOG(EXCEPTION) << "Malloc for kernel input failed, Memory isn't enough, node:" << node->fullname_with_scope();
}
auto inputs = CreateKernelInputAddress(runtime_info);

View File

@ -21,9 +21,10 @@ import sys
from copy import copy
import numbers
import mindspore as ms
from mindspore.common.parameter import Tensor, Parameter
from mindspore.common.parameter import Parameter
from mindspore._c_expression import Tensor as Tensor_
from mindspore._c_expression import MapTensor_
from mindspore.ops.operations import _map_tensor_ops
class MapParameter(Parameter):
@ -128,6 +129,8 @@ class MapParameter(Parameter):
else:
self._map_tensor = MapTensor_(self.key_dtype, self.value_dtype, self.value_shape, self.default_value,
self.permit_filter_value, self.evict_filter_value)
self.map_put = _map_tensor_ops.put
self.map_erase = _map_tensor_ops.erase
def __getitem__(self, key_tensor):
return self.get(key_tensor, True)
@ -183,8 +186,8 @@ class MapParameter(Parameter):
Returns:
Tensor, the value tensor for the key tensor.
"""
result_tensor = self._map_tensor.get(key_tensor, insert_default_value)
return Tensor(result_tensor, internal=True)
map_get = _map_tensor_ops.MapTensorGet(insert_default_value)
return map_get(self._map_tensor, key_tensor)
def get_keys(self):
"""
@ -193,7 +196,7 @@ class MapParameter(Parameter):
Returns:
Tensor, the tensor contains all keys.
"""
return self.key_tensor
return self._map_tensor.get_keys()
def get_values(self):
"""
@ -202,7 +205,7 @@ class MapParameter(Parameter):
Returns:
Tensor, the tensor contains all values.
"""
return self.value_tensor
return self._map_tensor.get_values()
def get_data(self):
"""
@ -211,8 +214,7 @@ class MapParameter(Parameter):
Returns:
Tensor, the tensor contains all keys and values.
"""
return self.key_tensor, self.value_tensor
return self._map_tensor.get_data()
def put(self, key_tensor, value_tensor):
"""
@ -225,8 +227,8 @@ class MapParameter(Parameter):
Returns:
MapParameter, the MapParameter object itself.
"""
self._map_tensor.put(key_tensor, value_tensor)
return self
self.map_put(self._map_tensor, key_tensor, value_tensor)
return self._map_tensor
def erase(self, key_tensor):
"""
@ -238,8 +240,8 @@ class MapParameter(Parameter):
Returns:
MapParameter, the MapParameter object itself.
"""
self._map_tensor.erase(key_tensor)
return self
self.map_erase(self._map_tensor, key_tensor)
return self._map_tensor
def export_data(self, incremental=False):
"""

View File

@ -13,12 +13,16 @@
# limitations under the License.
# ============================================================================
import os
import os.path
import pytest
import numpy as np
import mindspore as ms
import mindspore.nn as nn
from mindspore import context, Tensor, Parameter, save_checkpoint, load_checkpoint
from mindspore import context, Tensor, Parameter, save_checkpoint, load_checkpoint, ParameterTuple
from mindspore.experimental import MapParameter
from mindspore.common.initializer import initializer
from mindspore.ops import composite as C
from mindspore import export, load
@pytest.mark.level0
@ -39,6 +43,7 @@ def test_simple_graph_compile_export():
self.values = Tensor([[11, 11, 11], [22, 22, 22]], dtype=ms.float32)
def construct(self):
self.m.put(self.keys, self.values)
return self.p, self.m
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
@ -84,8 +89,6 @@ def test_maptensor_put_get_export(ms_type):
print("out3:", out3)
data = net.m.export_data()
print("data:", data)
assert len(data) == 3
assert len(data[0]) == 4
@pytest.mark.level0
@ -113,10 +116,355 @@ def test_mapparameter_ckpt_save_load(ms_type):
value2 = self.m.get(key2, True)
return value1, value2, self.m
net = MyNet(ms_type)
net(ms_type)
file_name = "map_parameter.ckpt"
save_checkpoint(net, file_name)
assert os.path.exists(file_name)
load_checkpoint(file_name)
os.remove(file_name)
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
if not 'SAULT_ENV_TYPE' in os.environ or not "CUDA10" in os.environ['SAULT_ENV_TYPE']:
net = MyNet(ms_type)
net(ms_type)
file_name = "map_parameter.ckpt"
save_checkpoint(net, file_name)
assert os.path.exists(file_name)
load_checkpoint(file_name)
os.remove(file_name)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_map_parameter_get():
"""
Feature: MapParameter
Description: Test get api for MapParameter.
Expectation: get api works as expected.
"""
keys = Tensor([1, 2], dtype=ms.int32)
values = Tensor([[1, 2], [1, 2]], dtype=ms.float32)
map_tensor = MapParameter(key_tensor=keys, value_tensor=values, default_value='zeros')
key = Tensor([3], dtype=ms.int32)
get_value = map_tensor.get(key)
print("get_value:", get_value)
data1 = map_tensor.export_data(incremental=False)
print("data1:", data1)
map_tensor.put(Tensor([3], dtype=ms.int32), Tensor([[3, 3]], dtype=ms.float32))
data2 = map_tensor.export_data(incremental=False)
print("data2:", data2)
map_tensor[Tensor([1, 2, 3], dtype=ms.int32)] = Tensor([[11, 11], [22, 22], [33, 33]], dtype=ms.float32)
data3 = map_tensor.export_data(incremental=False)
print("data3:", data3)
map_tensor.erase(Tensor([1, 2, 3], dtype=ms.int32))
data4 = map_tensor.export_data(incremental=False)
print("data4:", data4)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_map_parameter_put():
"""
Feature: MapParameter
Description: Test put api for MapParameter.
Expectation: put api works as expected.
"""
keys = Tensor([1, 2], dtype=ms.int32)
values = Tensor([[1, 2], [1, 2]], dtype=ms.float32)
map_tensor = MapParameter(key_tensor=keys, value_tensor=values, default_value='zeros')
key = Tensor([3], dtype=ms.int32)
value = Tensor([[4, 5]], dtype=ms.float32)
map_tensor.put(key, value)
data1 = map_tensor.export_data(incremental=False)
print("data1:", data1)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_map_parameter_erase():
"""
Feature: MapParameter
Description: Test erase api for MapParameter.
Expectation: erase api works as expected.
"""
keys = Tensor([1, 2], dtype=ms.int32)
values = Tensor([[1, 2], [1, 2]], dtype=ms.float32)
map_tensor = MapParameter(key_tensor=keys, value_tensor=values, default_value='zeros')
key = Tensor([2], dtype=ms.int32)
map_tensor.erase(key)
data1 = map_tensor.export_data(incremental=False)
print("data1:", data1)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_basic_operations():
"""
Feature: MapParameter
Description: Test MapParameter basic operations.
Expectation: MapParameter works as expected.
"""
m = MapParameter(key_dtype=ms.int32, value_dtype=ms.float32, value_shape=(2), default_value='zeros', name='my_map')
assert m.name == 'my_map'
assert m.requires_grad
t = m.get(Tensor([1, 2, 3], dtype=ms.int32))
assert t.dtype == ms.float32
assert t.shape == (3, 2)
assert np.allclose(t.asnumpy(), 0)
t = m[Tensor([1, 2, 3], dtype=ms.int32)]
assert t.dtype == ms.float32
assert t.shape == (3, 2)
assert np.allclose(t.asnumpy(), 0)
m.put(Tensor([1, 2, 3], dtype=ms.int32), Tensor([[1, 1], [2, 2], [3, 3]], dtype=ms.float32))
data = m.export_data()
print(m)
print("data:", data)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_simple_graph_compile():
"""
Feature: MapParameter
Description: Test IR graph compiled with MapParameter.
Expectation: IR graph with MapParameter created without exceptions.
"""
class MyNet(nn.Cell):
def __init__(self):
nn.Cell.__init__(self)
self.p = Parameter(initializer('ones', (2, 3), ms.float32))
self.m = MapParameter(key_dtype=ms.int32, value_dtype=ms.float32, value_shape=(3,))
self.key = Tensor([1, 2], dtype=ms.int32)
def construct(self, x):
self.m.put(self.key, x)
value1 = self.m.get(self.key)
value2 = self.m[self.key]
self.m[self.key] = value2
self.m.erase(self.key)
return self.p + value1 + value2, self.m
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
if not 'SAULT_ENV_TYPE' in os.environ or not "CUDA10" in os.environ['SAULT_ENV_TYPE']:
net = MyNet()
t = initializer('ones', (2, 3), ms.float32)
t = t.init_data()
out = net(t)
print(out)
assert out[0].shape == (2, 3)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_export_update_api():
"""
Feature: MapParameter
Description: Test export update api for MapParameter.
Expectation: Export update api works as expected.
"""
m1 = MapParameter(key_dtype=ms.int32, value_dtype=ms.float32, value_shape=(3,))
data1 = m1.export_data(incremental=False)
print("data1:", data1)
m1.import_data(data1)
keys = Tensor([1, 2], dtype=ms.int32)
values = Tensor([[1, 2], [1, 2]], dtype=ms.float32)
m2 = MapParameter(key_tensor=keys, value_tensor=values, default_value='zeros')
data2 = m2.export_data(incremental=False)
print("data2:", data2)
m1.import_data(data2)
new_data1 = m1.export_data(incremental=False)
print("new_data1:", new_data1)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_map_parameter_clone():
"""
Feature: MapParameter
Description: Test MapParameter clone() method.
Expectation: MapParameter cloned as expected.
"""
m = MapParameter(key_dtype=ms.int32, value_dtype=ms.float32, value_shape=(3,), name="map")
p = Parameter(Tensor(1), name="param")
params = ParameterTuple([m, p])
cloned_params = params.clone(prefix="cloned", init='zeros')
cloned_map = cloned_params[0]
assert isinstance(cloned_map, MapParameter)
assert cloned_map.name == 'cloned.map'
assert cloned_map.key_dtype == m.key_dtype
assert cloned_map.value_dtype == m.value_dtype
assert cloned_map.value_shape == m.value_shape
assert cloned_map.default_value == 'zeros'
old_map_tensor = m._map_tensor # pylint: disable=W0212
new_map_tensor = cloned_map._map_tensor # pylint: disable=W0212
assert new_map_tensor != old_map_tensor
assert new_map_tensor.key_dtype == old_map_tensor.key_dtype
assert new_map_tensor.value_dtype == old_map_tensor.value_dtype
assert new_map_tensor.value_shape == old_map_tensor.value_shape
clone_same = cloned_map.clone(init='same')
assert clone_same.key_dtype == m.key_dtype
assert clone_same.value_dtype == m.value_dtype
assert clone_same.value_shape == m.value_shape
assert clone_same.default_value == 'zeros'
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_grad_net():
"""
Feature: MapParameter
Description: Test grad graph compiled with MapParameter.
Expectation: Grad graph for MapParameter created without exceptions.
"""
class MyNet(nn.Cell):
def __init__(self):
nn.Cell.__init__(self)
self.m = MapParameter(key_dtype=ms.int32, value_dtype=ms.float32, value_shape=(3,))
self.key = Tensor([1, 2], dtype=ms.int32)
def construct(self, x):
a = self.m.get(self.key)
self.m.erase(self.key)
return x * a
class GradNet(nn.Cell):
def __init__(self, network):
super(GradNet, self).__init__()
self.grad_by_list = C.GradOperation(get_by_list=True)
self.network = network
self.weights = ParameterTuple(network.trainable_params())
def construct(self, *inputs):
gout = self.grad_by_list(self.network, self.weights)(*inputs)
return gout
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
if not 'SAULT_ENV_TYPE' in os.environ or not "CUDA10" in os.environ['SAULT_ENV_TYPE']:
net = MyNet()
grad = GradNet(net)
t = initializer('ones', (2, 3), ms.float32)
t = t.init_data()
grad(t)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_map_parameter_in_construct():
"""
Feature: MapParameter
Description: Test new MapParameter in construct.
Expectation: New MapTensor in construct without exceptions.
"""
class MapTensorNet(nn.Cell):
def __init__(self):
super().__init__()
self.default_value = 'zeros'
self.key_tensor = Tensor([1, 2], dtype=ms.int32)
self.value_tensor = Tensor([[1, 2], [1, 2]], dtype=ms.float32)
self.new_key_tensor = Tensor([3, 4], dtype=ms.int32)
self.new_value_tensor = Tensor([[3, 3], [4, 4]], dtype=ms.float32)
def construct(self):
new_map_tensor = MapParameter(self.key_tensor, self.value_tensor, self.default_value)
new_map_tensor.put(self.new_key_tensor, self.new_value_tensor)
return new_map_tensor
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
if not 'SAULT_ENV_TYPE' in os.environ or not "CUDA10" in os.environ['SAULT_ENV_TYPE']:
net = MapTensorNet()
out = net()
print("out:", out)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_map_parameter_get_data_api():
"""
Feature: MapParameter
Description: Test get_data api for MapParameter.
Expectation: get_data api works as expected.
"""
keys = Tensor([1, 2], dtype=ms.int32)
values = Tensor([[1, 2], [1, 2]], dtype=ms.float32)
map_tensor = MapParameter(key_tensor=keys, value_tensor=values, default_value='zeros')
[the_keys, the_values] = map_tensor.get_data()
print("the_keys:", the_keys)
print("the_values:", the_values)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_map_parameter_filter():
"""
Feature: MapParameter
Description: Test IR graph compiled with MapParameter, test with filter.
Expectation: IR graph with MapParameter created without exceptions.
"""
class MyNet(nn.Cell):
def __init__(self):
nn.Cell.__init__(self)
self.m = MapParameter(key_dtype=ms.int32, value_dtype=ms.float32, value_shape=(3,), permit_filter_value=2,
evict_filter_value=3)
def construct(self):
return self.m
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
if not 'SAULT_ENV_TYPE' in os.environ or not "CUDA10" in os.environ['SAULT_ENV_TYPE']:
net = MyNet()
out = net()
print("out:", out)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_simple_graph_export_load():
"""
Feature: MapParameter
Description: Test IR graph export and load with MapParameter.
Expectation: IR graph with MapParameter exported and loaded without exceptions.
"""
class MyNet(nn.Cell):
def __init__(self):
nn.Cell.__init__(self)
self.p = Parameter(initializer('ones', (2, 3), ms.float32))
self.m = MapParameter(key_dtype=ms.int32, value_dtype=ms.float32, value_shape=(3,))
self.key = Tensor([1, 2], dtype=ms.int32)
def construct(self, x):
self.m.put(self.key, x)
value1 = self.m.get(self.key)
value2 = self.m[self.key]
self.m[self.key] = value2
self.m.erase(self.key)
self.m.put(self.key, value2)
return self.p + value1 + value2
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
if not 'SAULT_ENV_TYPE' in os.environ or not "CUDA10" in os.environ['SAULT_ENV_TYPE']:
net = MyNet()
t = initializer('ones', (2, 3), ms.float32)
t = t.init_data()
file_path = "./map-parameter.mindir"
export(net, t, file_name=file_path, file_format="MINDIR")
assert os.path.isfile(file_path)
load(file_path)
file_path = "./map-parameter-incremental.mindir"
export(net, t, file_name=file_path, file_format="MINDIR", incremental=True)
assert os.path.isfile(file_path)
load(file_path)

View File

@ -1,292 +0,0 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import os.path
import numpy as np
import mindspore as ms
import mindspore.nn as nn
from mindspore import context, Tensor, Parameter, ParameterTuple
from mindspore.experimental import MapParameter
from mindspore.common.initializer import initializer
from mindspore.ops import composite as C
from mindspore import export, load
def test_basic_operations():
"""
Feature: MapParameter
Description: Test MapParameter basic operations.
Expectation: MapParameter works as expected.
"""
m = MapParameter(key_dtype=ms.int32, value_dtype=ms.float32, value_shape=(2), default_value='zeros', name='my_map')
assert m.name == 'my_map'
assert m.requires_grad
t = m.get(Tensor([1, 2, 3], dtype=ms.int32))
assert t.dtype == ms.float32
assert t.shape == (3, 2)
assert np.allclose(t.asnumpy(), 0)
t = m.get(Tensor([1, 2, 3], dtype=ms.int32))
assert t.dtype == ms.float32
assert t.shape == (3, 2)
assert np.allclose(t.asnumpy(), 0)
t = m[Tensor([1, 2, 3], dtype=ms.int32)]
assert t.dtype == ms.float32
assert t.shape == (3, 2)
assert np.allclose(t.asnumpy(), 0)
m.put(Tensor([1, 2, 3], dtype=ms.int32), Tensor([[1, 1], [2, 2], [3, 3]], dtype=ms.float32))
m[Tensor([1, 2, 3], dtype=ms.int32)] = Tensor([[11, 11], [22, 22], [33, 33]], dtype=ms.float32)
m.erase(Tensor([1, 2, 3], dtype=ms.int32))
data = m.get_data()
assert data == (None, None)
print(m)
def test_simple_graph_compile():
"""
Feature: MapParameter
Description: Test IR graph compiled with MapParameter.
Expectation: IR graph with MapParameter created without exceptions.
"""
class MyNet(nn.Cell):
def __init__(self):
nn.Cell.__init__(self)
self.p = Parameter(initializer('ones', (2, 3), ms.float32))
self.m = MapParameter(key_dtype=ms.int32, value_dtype=ms.float32, value_shape=(3,))
self.key = Tensor([1, 2], dtype=ms.int32)
def construct(self, x):
self.m.put(self.key, x)
value1 = self.m.get(self.key)
value2 = self.m[self.key]
self.m[self.key] = value2
self.m.erase(self.key)
keys = self.m.get_keys()
values = self.m.get_values()
keys_values = self.m.get_data()
print(keys_values)
self.m.put(keys, values)
return self.p + value1 + value2
context.set_context(mode=context.GRAPH_MODE)
net = MyNet()
t = initializer('ones', (2, 3), ms.float32)
t = t.init_data()
out = net(t)
print(out)
assert out.shape == (2, 3)
def test_export_update_api():
"""
Feature: MapParameter
Description: Test export update api for MapParameter.
Expectation: Export update api works as expected.
"""
m1 = MapParameter(key_dtype=ms.int32, value_dtype=ms.float32, value_shape=(3,))
data1 = m1.export_data(incremental=False)
print("data1:", data1)
m1.import_data(data1)
keys = Tensor([1, 2], dtype=ms.int32)
values = Tensor([[1, 2], [1, 2]], dtype=ms.float32)
m2 = MapParameter(key_tensor=keys, value_tensor=values, default_value='zeros')
data2 = m2.export_data(incremental=False)
print("data2:", data2)
m1.import_data(data2)
new_data1 = m1.export_data(incremental=False)
print("new_data1:", new_data1)
def test_map_parameter_clone():
"""
Feature: MapParameter
Description: Test MapParameter clone() method.
Expectation: MapParameter cloned as expected.
"""
m = MapParameter(key_dtype=ms.int32, value_dtype=ms.float32, value_shape=(3,), name="map")
p = Parameter(Tensor(1), name="param")
params = ParameterTuple([m, p])
cloned_params = params.clone(prefix="cloned", init='zeros')
cloned_map = cloned_params[0]
assert isinstance(cloned_map, MapParameter)
assert cloned_map.name == 'cloned.map'
assert cloned_map.key_dtype == m.key_dtype
assert cloned_map.value_dtype == m.value_dtype
assert cloned_map.value_shape == m.value_shape
assert cloned_map.default_value == 'zeros'
old_map_tensor = m._map_tensor # pylint: disable=W0212
new_map_tensor = cloned_map._map_tensor # pylint: disable=W0212
assert new_map_tensor != old_map_tensor
assert new_map_tensor.key_dtype == old_map_tensor.key_dtype
assert new_map_tensor.value_dtype == old_map_tensor.value_dtype
assert new_map_tensor.value_shape == old_map_tensor.value_shape
clone_same = cloned_map.clone(init='same')
assert clone_same.key_dtype == m.key_dtype
assert clone_same.value_dtype == m.value_dtype
assert clone_same.value_shape == m.value_shape
assert clone_same.default_value == 'zeros'
def test_grad_net():
"""
Feature: MapParameter
Description: Test grad graph compiled with MapParameter.
Expectation: Grad graph for MapParameter created without exceptions.
"""
class MyNet(nn.Cell):
def __init__(self):
nn.Cell.__init__(self)
self.m = MapParameter(key_dtype=ms.int32, value_dtype=ms.float32, value_shape=(3,))
self.key = Tensor([1, 2], dtype=ms.int32)
def construct(self, x):
a = self.m.get(self.key)
self.m.erase(self.key)
return x * a
class GradNet(nn.Cell):
def __init__(self, network):
super(GradNet, self).__init__()
self.grad_by_list = C.GradOperation(get_by_list=True)
self.network = network
self.weights = ParameterTuple(network.trainable_params())
def construct(self, *inputs):
gout = self.grad_by_list(self.network, self.weights)(*inputs)
return gout
context.set_context(mode=context.GRAPH_MODE)
net = MyNet()
grad = GradNet(net)
t = initializer('ones', (2, 3), ms.float32)
t = t.init_data()
grad(t)
def test_map_parameter_in_init_and_construct():
"""
Feature: MapParameter
Description: Test new MapParameter in construct.
Expectation: New MapTensor in construct without exceptions.
"""
class MapTensorNet(nn.Cell):
def __init__(self):
super().__init__()
self.default_value = 'zeros'
self.map_param_1 = MapParameter(key_dtype=ms.int32, value_dtype=ms.float32, value_shape=(3,))
self.key_tensor = Tensor([1, 2], dtype=ms.int32)
self.value_tensor = Tensor([[1, 2], [1, 2]], dtype=ms.float32)
self.map_param_2 = MapParameter(key_tensor=self.key_tensor, value_tensor=self.value_tensor,
default_value=self.default_value)
def construct(self):
keys = self.map_param_2.get_keys()
values = self.map_param_2.get_values()
new_map_tensor = MapParameter(keys, values, self.default_value)
new_data = new_map_tensor.get_data()
return self.map_param_1, new_map_tensor, new_data
context.set_context(mode=context.GRAPH_MODE)
net = MapTensorNet()
out = net()
print("out:", out)
def test_map_parameter_get_data_api():
"""
Feature: MapParameter
Description: Test get_data api for MapParameter.
Expectation: get_data api works as expected.
"""
keys = Tensor([1, 2], dtype=ms.int32)
values = Tensor([[1, 2], [1, 2]], dtype=ms.float32)
map_tensor = MapParameter(key_tensor=keys, value_tensor=values, default_value='zeros')
get_keys = map_tensor.get_keys()
print("get_keys:", get_keys)
get_values = map_tensor.get_values()
print("get_values:", get_values)
[the_keys, the_values] = map_tensor.get_data()
print("the_keys:", the_keys)
print("the_values:", the_values)
def test_map_parameter_filter():
"""
Feature: MapParameter
Description: Test IR graph compiled with MapParameter, test with filter.
Expectation: IR graph with MapParameter created without exceptions.
"""
class MyNet(nn.Cell):
def __init__(self):
nn.Cell.__init__(self)
self.m = MapParameter(key_dtype=ms.int32, value_dtype=ms.float32, value_shape=(3,), permit_filter_value=2,
evict_filter_value=3)
def construct(self):
return self.m
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
net = MyNet()
out = net()
print("out:", out)
def test_simple_graph_export_load():
"""
Feature: MapParameter
Description: Test IR graph export and load with MapParameter.
Expectation: IR graph with MapParameter exported and loaded without exceptions.
"""
class MyNet(nn.Cell):
def __init__(self):
nn.Cell.__init__(self)
self.p = Parameter(initializer('ones', (2, 3), ms.float32))
self.m = MapParameter(key_dtype=ms.int32, value_dtype=ms.float32, value_shape=(3,))
self.key = Tensor([1, 2], dtype=ms.int32)
def construct(self, x):
self.m.put(self.key, x)
value1 = self.m.get(self.key)
value2 = self.m[self.key]
self.m[self.key] = value2
self.m.erase(self.key)
keys = self.m.get_keys()
values = self.m.get_values()
self.m.put(keys, values)
return self.p + value1 + value2
context.set_context(mode=context.GRAPH_MODE)
net = MyNet()
t = initializer('ones', (2, 3), ms.float32)
t = t.init_data()
file_path = "./map-parameter.mindir"
export(net, t, file_name=file_path, file_format="MINDIR")
assert os.path.isfile(file_path)
load(file_path)
file_path = "./map-parameter-incremental.mindir"
export(net, t, file_name=file_path, file_format="MINDIR", incremental=True)
assert os.path.isfile(file_path)
load(file_path)