!14849 fix codedex and bot

From: @fangzehua
Reviewed-by: @wuxuejian,@liangchenghui
Signed-off-by: @wuxuejian
This commit is contained in:
mindspore-ci-bot 2021-04-10 11:19:26 +08:00 committed by Gitee
commit c907c95da5
14 changed files with 8 additions and 797 deletions

View File

@ -30,7 +30,6 @@ void AssignCPUKernel::InitKernel(const CNodePtr &kernel_node) {
MS_EXCEPTION_IF_NULL(kernel_node);
auto input_x_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0);
auto input_y_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1);
if (input_x_shape.size() != input_y_shape.size()) MS_LOG(EXCEPTION) << "x y must be same shape";
for (size_t i = 0; i < input_x_shape.size(); ++i) {
if (input_x_shape[i] != input_y_shape[i]) {

View File

@ -1,113 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/cpu/cache_swap_hashmap_cpu_kernel.h"
#include <string>
#include "runtime/device/cpu/cpu_device_address.h"
namespace mindspore {
namespace kernel {
template <typename T>
void Compress(HashmapEntry<T> *entry_p, const size_t &length, T entry) {
T i = (entry + 1) % length, off = 1;
for (; !entry_p[i].IsEmpty(); i = (i + 1) % length, off++) {
if (entry_p[i].tag > off) {
entry_p[entry].key = entry_p[i].key;
entry_p[entry].value = entry_p[i].value;
entry_p[entry].step = entry_p[i].step;
entry_p[entry].tag = entry_p[i].tag - off;
entry_p[i].SetEmpty();
off = 0;
entry = i;
}
}
}
void CacheSwapHashmapCPUKernel::InitKernel(const CNodePtr &kernel_node) {
auto hashmap_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0);
auto emb_idx_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1);
if (hashmap_shape.size() != 2) {
MS_LOG(EXCEPTION) << "Dimension of HashMap must be 2, (n, 4)";
}
for (size_t i = 0; i < emb_idx_shape.size(); ++i) {
batch_size_ *= emb_idx_shape[i];
}
hashmap_length_ = hashmap_shape[0];
if (hashmap_length_ <= 0) {
MS_LOG(EXCEPTION) << "Hashmap length must > 0";
}
dtype_ = AnfAlgo::GetPrevNodeOutputInferDataType(kernel_node, 0);
}
bool CacheSwapHashmapCPUKernel::Launch(const std::vector<kernel::AddressPtr> &inputs,
const std::vector<kernel::AddressPtr> & /*workspace*/,
const std::vector<kernel::AddressPtr> &outputs) {
if (dtype_ == kNumberTypeInt32) {
LaunchKernel<int>(inputs, outputs);
} else if (dtype_ == kNumberTypeInt64) {
LaunchKernel<int64_t>(inputs, outputs);
} else {
MS_LOG(ERROR) << "Only support int32, int64";
return false;
}
return true;
}
template <typename T>
void CacheSwapHashmapCPUKernel::LaunchKernel(const std::vector<AddressPtr> &inputs,
const std::vector<kernel::AddressPtr> &outputs) {
HashmapEntry<T> *hashmap = reinterpret_cast<HashmapEntry<T> *>(inputs[0]->addr);
auto miss_emb_idx = reinterpret_cast<T *>(inputs[1]->addr);
step_ = *reinterpret_cast<T *>(inputs[2]->addr);
auto swap_cache_idx = reinterpret_cast<T *>(outputs[0]->addr);
auto old_emb_idx = reinterpret_cast<T *>(outputs[1]->addr);
for (size_t i = 0; i < batch_size_; ++i) {
if (miss_emb_idx[i] < 0) {
swap_cache_idx[i] = -1;
old_emb_idx[i] = -1;
} else {
T emb_idx = miss_emb_idx[i];
T entry = HashFunc(emb_idx, hashmap_length_);
T tag_count = 1;
while (!hashmap[entry].IsEmpty()) {
entry = (entry + 1) % hashmap_length_;
tag_count++;
}
hashmap[entry].key = emb_idx;
hashmap[entry].step = step_;
hashmap[entry].tag = tag_count;
T tmp_entry = (entry + 1) % hashmap_length_;
while (hashmap[tmp_entry].IsEmpty() || hashmap[tmp_entry].IsUsing(step_)) {
tmp_entry = (tmp_entry + 1) % hashmap_length_;
}
swap_cache_idx[i] = hashmap[tmp_entry].value;
old_emb_idx[i] = hashmap[tmp_entry].key;
hashmap[entry].value = swap_cache_idx[i];
hashmap[tmp_entry].SetEmpty();
Compress(hashmap, hashmap_length_, tmp_entry);
}
}
}
} // namespace kernel
} // namespace mindspore

View File

@ -1,87 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_CACHE_SWAP_HASHMAP_CPU_KERNEL_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_CACHE_SWAP_HASHMAP_CPU_KERNEL_H_
#include <vector>
#include <memory>
#include <unordered_map>
#include "backend/kernel_compiler/cpu/cpu_kernel.h"
#include "backend/kernel_compiler/cpu/cpu_kernel_factory.h"
#include "backend/kernel_compiler/cpu/search_cache_idx_cpu_kernel.h"
namespace mindspore {
namespace kernel {
class CacheSwapHashmapCPUKernel : public CPUKernel {
public:
CacheSwapHashmapCPUKernel() = default;
~CacheSwapHashmapCPUKernel() override = default;
void InitKernel(const CNodePtr &kernel_node) override;
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
const std::vector<AddressPtr> &outputs) override;
template <typename T>
void LaunchKernel(const std::vector<AddressPtr> &inputs, const std::vector<kernel::AddressPtr> &outputs);
private:
size_t batch_size_{1};
size_t hashmap_length_{1};
int64_t step_{0};
TypeId dtype_{kTypeUnknown};
};
MS_REG_CPU_KERNEL(CacheSwapHashmap,
KernelAttr()
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddOutputAttr(kNumberTypeInt32)
.AddOutputAttr(kNumberTypeInt32),
CacheSwapHashmapCPUKernel);
MS_REG_CPU_KERNEL(CacheSwapHashmap,
KernelAttr()
.AddInputAttr(kNumberTypeInt64)
.AddInputAttr(kNumberTypeInt64)
.AddInputAttr(kNumberTypeInt32)
.AddOutputAttr(kNumberTypeInt64)
.AddOutputAttr(kNumberTypeInt64),
CacheSwapHashmapCPUKernel);
MS_REG_CPU_KERNEL(CacheSwapHashmap,
KernelAttr()
.AddInputAttr(kNumberTypeInt64)
.AddInputAttr(kNumberTypeInt64)
.AddInputAttr(kNumberTypeInt64)
.AddOutputAttr(kNumberTypeInt64)
.AddOutputAttr(kNumberTypeInt64),
CacheSwapHashmapCPUKernel);
MS_REG_CPU_KERNEL(CacheSwapHashmap,
KernelAttr()
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt64)
.AddOutputAttr(kNumberTypeInt32)
.AddOutputAttr(kNumberTypeInt32),
CacheSwapHashmapCPUKernel);
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_CACHE_SWAP_HASHMAP_CPU_KERNEL_H_

View File

@ -1,108 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/cpu/search_cache_idx_cpu_kernel.h"
#include <string>
#include "runtime/device/cpu/cpu_device_address.h"
namespace mindspore {
namespace kernel {
void SearchCacheIdxCPUKernel::InitKernel(const CNodePtr &kernel_node) {
auto hashmap_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0);
auto emb_idx_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1);
if (hashmap_shape.size() != 2) {
MS_LOG(EXCEPTION) << "Dimension of HashMap must be 2, (n, 4)";
}
for (size_t i = 0; i < emb_idx_shape.size(); ++i) {
batch_size_ *= emb_idx_shape[i];
}
hashmap_length_ = hashmap_shape[0];
if (hashmap_length_ <= 0) {
MS_LOG(EXCEPTION) << "Hashmap length must > 0";
}
dtype_ = AnfAlgo::GetPrevNodeOutputInferDataType(kernel_node, 0);
}
bool SearchCacheIdxCPUKernel::Launch(const std::vector<kernel::AddressPtr> &inputs,
const std::vector<kernel::AddressPtr> & /*workspace*/,
const std::vector<kernel::AddressPtr> &outputs) {
if (dtype_ == kNumberTypeInt32) {
LaunchKernel<int>(inputs, outputs);
} else if (dtype_ == kNumberTypeInt64) {
LaunchKernel<int64_t>(inputs, outputs);
} else {
MS_LOG(ERROR) << "Only support int32, int64";
return false;
}
return true;
}
template <typename T>
void SearchCacheIdxCPUKernel::LaunchKernel(const std::vector<AddressPtr> &inputs,
const std::vector<kernel::AddressPtr> &outputs) {
HashmapEntry<T> *hashmap = reinterpret_cast<HashmapEntry<T> *>(inputs[0]->addr);
auto input_indices = reinterpret_cast<T *>(inputs[1]->addr);
step_ = *reinterpret_cast<T *>(inputs[2]->addr);
emb_max_num = *reinterpret_cast<T *>(inputs[3]->addr);
cache_max_num = *reinterpret_cast<T *>(inputs[4]->addr);
auto output_cache_idx = reinterpret_cast<T *>(outputs[0]->addr);
auto output_miss_idx = reinterpret_cast<T *>(outputs[1]->addr);
auto output_miss_emb_idx = reinterpret_cast<T *>(outputs[2]->addr);
float total_count = 0;
int count_size = 0;
float hit_count = 0;
for (size_t i = 0; i < batch_size_; ++i) {
if (input_indices[i] == emb_max_num) {
output_miss_idx[i] = -1;
output_cache_idx[i] = cache_max_num;
output_miss_emb_idx[i] = -1;
continue;
}
T key = input_indices[i];
T tmp_entry = HashFunc(key, hashmap_length_);
int count = 1;
count_size += 1;
while ((!hashmap[tmp_entry].IsEmpty() && !hashmap[tmp_entry].IsKey(key))) {
tmp_entry = (tmp_entry + 1) % hashmap_length_;
count += 1;
}
total_count += count;
if (hashmap[tmp_entry].IsEmpty()) {
output_miss_idx[i] = i;
output_miss_emb_idx[i] = key;
output_cache_idx[i] = -1;
} else {
hit_count += 1;
output_miss_idx[i] = -1;
output_cache_idx[i] = hashmap[tmp_entry].value;
hashmap[tmp_entry].step = step_;
output_miss_emb_idx[i] = -1;
}
}
if (count_size != 0) {
MS_LOG(INFO) << "avg search count: " << total_count / count_size;
MS_LOG(INFO) << "cache hit rate: " << hit_count / count_size;
}
}
} // namespace kernel
} // namespace mindspore

View File

@ -1,138 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_SEARCH_CACHE_IDX_CPU_KERNEL_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_SEARCH_CACHE_IDX_CPU_KERNEL_H_
#include <math.h>
#include <vector>
#include <memory>
#include <unordered_map>
#include "backend/kernel_compiler/cpu/cpu_kernel.h"
#include "backend/kernel_compiler/cpu/cpu_kernel_factory.h"
#define NULLTAG 0
namespace mindspore {
namespace kernel {
template <typename T>
struct HashmapEntry {
T key;
T value;
T step;
T tag;
bool IsEmpty() {
if (this->tag == NULLTAG)
return true;
else
return false;
}
bool IsUsing(const T &train_step) {
if (this->step >= (train_step - 1))
return true;
else
return false;
}
bool IsKey(const T &emb_idx) {
if (this->key == emb_idx)
return true;
else
return false;
}
void SetEmpty() { this->tag = NULLTAG; }
};
template <typename T>
T HashFunc(const T &key, const size_t &m) {
return (T)(((0.6180339 * key) - floor(0.6180339 * key)) * m);
}
class SearchCacheIdxCPUKernel : public CPUKernel {
public:
SearchCacheIdxCPUKernel() = default;
~SearchCacheIdxCPUKernel() override = default;
void InitKernel(const CNodePtr &kernel_node) override;
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
const std::vector<AddressPtr> &outputs) override;
template <typename T>
void LaunchKernel(const std::vector<AddressPtr> &inputs, const std::vector<kernel::AddressPtr> &outputs);
private:
size_t batch_size_{1};
size_t hashmap_length_{1};
size_t step_{0};
int64_t emb_max_num = 999999999;
int64_t cache_max_num = 999999999;
TypeId dtype_{kTypeUnknown};
};
MS_REG_CPU_KERNEL(SearchCacheIdx,
KernelAttr()
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddOutputAttr(kNumberTypeInt32)
.AddOutputAttr(kNumberTypeInt32)
.AddOutputAttr(kNumberTypeInt32),
SearchCacheIdxCPUKernel);
MS_REG_CPU_KERNEL(SearchCacheIdx,
KernelAttr()
.AddInputAttr(kNumberTypeInt64)
.AddInputAttr(kNumberTypeInt64)
.AddInputAttr(kNumberTypeInt64)
.AddInputAttr(kNumberTypeInt64)
.AddInputAttr(kNumberTypeInt64)
.AddOutputAttr(kNumberTypeInt64)
.AddOutputAttr(kNumberTypeInt64)
.AddOutputAttr(kNumberTypeInt64),
SearchCacheIdxCPUKernel);
MS_REG_CPU_KERNEL(SearchCacheIdx,
KernelAttr()
.AddInputAttr(kNumberTypeInt64)
.AddInputAttr(kNumberTypeInt64)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddOutputAttr(kNumberTypeInt64)
.AddOutputAttr(kNumberTypeInt64)
.AddOutputAttr(kNumberTypeInt64),
SearchCacheIdxCPUKernel);
MS_REG_CPU_KERNEL(SearchCacheIdx,
KernelAttr()
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt64)
.AddInputAttr(kNumberTypeInt64)
.AddInputAttr(kNumberTypeInt64)
.AddOutputAttr(kNumberTypeInt32)
.AddOutputAttr(kNumberTypeInt32)
.AddOutputAttr(kNumberTypeInt32),
SearchCacheIdxCPUKernel);
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_SEARCH_CACHE_IDX_CPU_KERNEL_H_

View File

@ -82,7 +82,8 @@ void UpdateCacheCPUKernel::LaunchKernel(const std::vector<AddressPtr> &inputs,
char *tmp = update + i * one_length_size;
if (indices[i] * one_length_size + one_length_size <= max_size) {
int ret = memcpy_s(input_x + indices[i] * one_length_size, one_length_size, tmp, one_length_size);
int ret =
memcpy_s(input_x + indices[i] * one_length_size, max_size - indices[i] * one_length_size, tmp, one_length_size);
if (ret != 0) {
MS_LOG(EXCEPTION) << "memcpy_s error, errorno" << ret;
}

View File

@ -145,7 +145,7 @@ void MemCopyFromHostToCache(void *hashmap_addr, void *host_addr, void *cache_add
auto cache_data = static_cast<char *>(cache_addr);
auto hashmap_data = static_cast<HashmapEntry<T> *>(hashmap_addr);
// default param type float
size_t param_type_size = 4;
const size_t param_type_size = 4;
size_t single_col_bytes = param_type_size * col_size;
for (size_t i = 0; i < hashmap_size; ++i) {
if (!hashmap_data[i].IsEmpty()) {
@ -263,8 +263,6 @@ AnfNodePtr InitHashMap(const FuncGraphPtr &func_graph, const int64_t host_size,
AnfNodePtr InitStep(const FuncGraphPtr &func_graph, TypeId type_id) {
std::vector<int64_t> host_shape{1};
auto new_tensor = std::make_shared<tensor::Tensor>(type_id, host_shape);
auto step_data = static_cast<int64_t *>(new_tensor->data_c());
step_data[0] = 0;
ParamInfoPtr new_param_info = std::make_shared<ParamInfo>();
std::string step_name = "cache_step";
new_param_info->set_name(step_name);

View File

@ -280,7 +280,7 @@ void MemCopyFromCacheToHost(void *hashmap_addr, void *host_addr, void *cache_add
auto cache_data = static_cast<char *>(cache_addr);
auto hashmap_data = static_cast<HashmapEntry<T> *>(hashmap_addr);
// default param type float
size_t param_type_size = 4;
const size_t param_type_size = 4;
size_t single_col_bytes = param_type_size * col_size;
for (size_t i = 0; i < hashmap_size; ++i) {
if (!hashmap_data[i].IsEmpty()) {

View File

@ -55,8 +55,6 @@ from .standard_normal import _standard_normal_aicpu
from .gamma import _gamma_aicpu
from .poisson import _poisson_aicpu
from .update_cache import _update_cache_aicpu
from .search_cache_idx import _search_cache_idx_aicpu
from .cache_swap_hashmap import _cache_swap_hashmap_aicpu
from .cache_swap_table import _cache_swap_table_aicpu
from .uniform_int import _uniform_int_aicpu
from .uniform_real import _uniform_real_aicpu

View File

@ -1,43 +0,0 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""CacheSwapHashmap op"""
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
cache_swap_hashmap_op_info = AiCPURegOp("CacheSwapHashmap") \
.fusion_type("OPAQUE") \
.input(0, "hashmap", "required") \
.input(1, "miss_emb_idx", "required") \
.input(2, "step", "required") \
.output(0, "swap_cache_idx", "required") \
.output(1, "old_emb_idx", "required") \
.dtype_format(DataType.I32_Default, DataType.I32_Default, \
DataType.I32_Default, DataType.I32_Default, \
DataType.I32_Default) \
.dtype_format(DataType.I64_Default, DataType.I64_Default, \
DataType.I32_Default, DataType.I64_Default, \
DataType.I64_Default) \
.dtype_format(DataType.I32_Default, DataType.I32_Default, \
DataType.I64_Default, DataType.I32_Default, \
DataType.I32_Default) \
.dtype_format(DataType.I64_Default, DataType.I64_Default, \
DataType.I64_Default, DataType.I64_Default, \
DataType.I64_Default) \
.get_op_info()
@op_info_register(cache_swap_hashmap_op_info)
def _cache_swap_hashmap_aicpu():
"""CacheSwapHashmap AiCPU register"""
return

View File

@ -1,51 +0,0 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""EmbeddingLookup op"""
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
search_cache_idx_op_info = AiCPURegOp("SearchCacheIdx") \
.fusion_type("OPAQUE") \
.input(0, "hashmap", "required") \
.input(1, "indices", "required") \
.input(2, "step", "required") \
.input(3, "emb_max_num", "required") \
.input(4, "cache_max_num", "required") \
.output(0, "cache_idx", "required") \
.output(1, "miss_idx_1d", "required") \
.output(2, "miss_emb_idx", "required") \
.dtype_format(DataType.I32_Default, DataType.I32_Default,
DataType.I32_Default, DataType.I32_Default, DataType.I32_Default,
DataType.I32_Default, DataType.I32_Default,
DataType.I32_Default) \
.dtype_format(DataType.I64_Default, DataType.I64_Default,
DataType.I32_Default, DataType.I32_Default, DataType.I32_Default,
DataType.I64_Default, DataType.I64_Default,
DataType.I64_Default) \
.dtype_format(DataType.I32_Default, DataType.I32_Default,
DataType.I64_Default, DataType.I64_Default, DataType.I64_Default,
DataType.I32_Default, DataType.I32_Default,
DataType.I32_Default) \
.dtype_format(DataType.I64_Default, DataType.I64_Default,
DataType.I64_Default, DataType.I64_Default, DataType.I64_Default,
DataType.I64_Default, DataType.I64_Default,
DataType.I64_Default) \
.get_op_info()
@op_info_register(search_cache_idx_op_info)
def _search_cache_idx_aicpu():
"""SearchCacheIdx AiCPU register"""
return

View File

@ -95,8 +95,7 @@ from ._thor_ops import (CusBatchMatMul, CusCholeskyTrsm, CusFusedAbsMax1, CusImg
CusMatMulCubeFraczLeftCast, Im2Col, UpdateThorGradient, Cholesky, CholeskyTrsm, DetTriangle,
ProdForceSeA)
from .sparse_ops import (SparseToDense, SparseTensorDenseMatmul)
from ._embedding_cache_ops import (CacheSwapHashmap, SearchCacheIdx, CacheSwapTable, UpdateCache, MapCacheIdx,
SubAndFilter,
from ._embedding_cache_ops import (CacheSwapTable, UpdateCache, MapCacheIdx, SubAndFilter,
MapUniform, DynamicAssign, PadAndShift)
from .quantum_ops import PQC, Evolution
from .sponge_ops import (BondForce, BondEnergy, BondAtomEnergy, BondForceWithAtomEnergy, BondForceWithAtomVirial,

View File

@ -15,7 +15,7 @@
"""cache_ops"""
from ..._checkparam import Validator as validator
from ...common import dtype as mstype
from ..primitive import PrimitiveWithInfer, prim_attr_register, PrimitiveWithCheck
from ..primitive import prim_attr_register, PrimitiveWithCheck
from .. import signature as sig
@ -30,7 +30,7 @@ class UpdateCache(PrimitiveWithCheck):
- **updates** (Tensor) - The update values.
Outputs:
- **out** (Tensor) - Returns a [1] Tensor, which is not usefull.
- **out** (Tensor) - Returns a [1] Tensor, which is not useful.
"""
__mindspore_signature__ = (
sig.make_sig('input_x', sig.sig_rw.RW_WRITE,
@ -101,92 +101,6 @@ class SubAndFilter(PrimitiveWithCheck):
return input_x_dtype
class SearchCacheIdx(PrimitiveWithInfer):
"""
Search the keys of a hashmap, and return the values.
Inputs:
- **hashmap** (Parameter) - The dim of hashmap is (n, 4), which cols represent the `key, value, step, tag`.
`key, value`: Map the indices of big table and cache table.
`step`: The resent step, when searching the key, it will be updated at the same time.
`step` can make sure the indices which are using in the last step will not be deleted in hashmap.
`tag`: We use linear probing(`h(k, i) = (h(k) + i) % m`) to solve hash conflicts.
tag is the count of linear probing times of the key. If `tag == 0`, means that the entry is empty.
The Hash Function is:
`((0.6180339 * key) - floor(0.618033 * key)) * hashmap_length`, in order to avoid data clustering.
- **indices** (Tensor) - The indices which are keys of hashmap.
- **step** (int) - The current step when searching.
- **emb_max_num** (int) - Max length of big table.
To avoid searching when `indices >= emb_max_num`, and make value = `cache_max_num`.
- **cache_max_num** (int) - Max length of cache table.
Outputs:
- **cache_idx** (Tensor) - Result of searched value, if search missed, value = -1.
- **miss_idx** (Tensor) - The index of Tensor indices which search missed.
If search success, miss_idx[i] = -1.
- **miss_emb_idx** (Tensor) - The value of Tensor indices which search missed.
If search success, miss_emb_idx[i] = -1.
Examples:
>>> hashmap = Parameter(Tensor(np.array([[0, 0, 0, 0],
[10, 5, -5, 1],
[2, 1, -5, 1],
[15, 7, -5, 2],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[3, 3, -5, 1],
[21, 9, -5, 1]], np.int32)), name="hashmap")
>>> indices = Tensor(np.array([10, 2, 25, 5, 3], np.int32))
>>> step = 0, emb_max_num = 25, cache_max_num = 10
>>> ops = ops.SearchCacheIdx()
>>> cache_idx, miss_idx, miss_emb_idx = ops(hashmap, indices, step, emb_max_num, cache_max_num)
cache_idx : [5, 1, 10, -1, 3]
miss_idx : [-1, -1, -1, 3, -1]
miss_emb_idx : [-1, -1, -1, 5, -1]
hashmap after search : [[0, 0, 0, 0],
[10, 5, 0, 1],
[2, 1, 0, 1],
[15, 7, -5, 2],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[3, 3, 0, 1],
[21, 9, -5, 1]]
"""
__mindspore_signature__ = (
sig.make_sig('hashmap', sig.sig_rw.RW_WRITE,
dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T),
sig.make_sig('step', dtype=sig.sig_dtype.T),
sig.make_sig('emb_max_num', dtype=sig.sig_dtype.T),
sig.make_sig('cache_max_num', dtype=sig.sig_dtype.T)
)
@prim_attr_register
def __init__(self):
"""init SearchCacheIdx"""
self.init_prim_io_names(inputs=['hashmap', 'indices', 'step', 'emb_max_num', 'cache_max_num'],
outputs=['cache_idx', 'miss_idx', 'miss_emb_idx'])
def infer_shape(self, hashmap_shape, indices_shape, step_shape, emb_max_num_shape, cache_max_num_shape):
if len(hashmap_shape) != 2:
raise ValueError("The dimension of 'hashmap' in SearchCacheIdx must be 2, "
"but got %d." % len(hashmap_shape))
out_shape = (indices_shape, indices_shape, indices_shape)
return out_shape
def infer_dtype(self, hashmap_dtype, indices_dtype, step_dtype, emb_max_num_dtype, cache_max_num_dtype):
args = {"hashmap": hashmap_dtype, "indices": indices_dtype}
validator.check_tensors_dtypes_same_and_valid(
args, mstype.int_type, self.name)
out_dtype = (hashmap_dtype, hashmap_dtype, hashmap_dtype)
return out_dtype
class MapUniform(PrimitiveWithCheck):
"""
Map a tensor by using fomula : value = key % `group_num` * `per_group_size` + key // `group_num`.
@ -227,48 +141,6 @@ class MapUniform(PrimitiveWithCheck):
'group_num', group_num_dtype, [mstype.Int], self.name)
class CacheSwapHashmap(PrimitiveWithInfer):
"""
Delete a hashmap entry,and insert a new key to hashmap, return the key and value of delete entry.
Inputs:
- **hashmap** (Parameter) - Same to operation SearchCacheIdx.
- **miss_emb_idx** (Tensor) - The keys which are going to insert, -1 is skipped. It is the result
- **step** (int) - The current step.
Outputs:
- **swap_cache_idx** (Tensor) - Deleted value of entry, -1 is skipped.
- **old_emb_idx** (Tensor) - Deleted key of entry, -1 is skipped.
"""
__mindspore_signature__ = (
sig.make_sig('hashmap', sig.sig_rw.RW_WRITE,
dtype=sig.sig_dtype.T),
sig.make_sig('miss_emb_idx', dtype=sig.sig_dtype.T),
sig.make_sig('step', dtype=sig.sig_dtype.T)
)
@prim_attr_register
def __init__(self):
"""init CacheSwapHashmap"""
self.init_prim_io_names(inputs=['hashmap', 'miss_emb_idx', 'step'],
outputs=['swap_cache_idx', 'old_emb_idx'])
def infer_shape(self, hashmap_shape, miss_emb_idx_shape, step_shape):
if len(hashmap_shape) != 2:
raise ValueError("The dimension of 'hashmap' in CacheSwapHashmap must be 2, "
"but got %d." % len(hashmap_shape))
out_shape = (miss_emb_idx_shape, miss_emb_idx_shape)
return out_shape
def infer_dtype(self, hashmap_dtype, miss_emb_idx_dtype, step_dtype):
validator.check_tensor_dtype_valid(
"miss_emb_idx", miss_emb_idx_dtype, mstype.int_type, self.name)
out_dtype = (miss_emb_idx_dtype, miss_emb_idx_dtype)
return out_dtype
class CacheSwapTable(PrimitiveWithCheck):
"""
Delete a hashmap entry,and insert a new key to hashmap, return the key and value of delete entry.
@ -396,7 +268,7 @@ class PadAndShift(PrimitiveWithCheck):
Pad a tensor with -1, and shift with a length.
Inputs:
- **input_x** (Tensor) - The input Tensor, which will be copyed
- **input_x** (Tensor) - The input Tensor, which will be copied
to `output`.
- **cum_sum_arr** (Tensor) - The last value of cum_sum_arr is
the pad length of output tensor, cum_sum_arr[shift_idx] is

View File

@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import math
import numpy as np
import pytest
@ -26,55 +25,6 @@ context.set_context(mode=context.GRAPH_MODE,
device_target='CPU', save_graphs=True)
def hash_func(key, length):
return (int)(((0.6180339 * key) - math.floor(0.6180339 * key)) * length)
def init_hashmap(hash_map_length):
key_np = np.array([2, 3, 10, 15, 21], np.int32)
value_np = np.array([1, 3, 5, 7, 9], np.int32)
NULLTAG = 0
INIT_STEP = -5
hashmap_np = np.zeros((hash_map_length, 4), np.int32)
for i, key in enumerate(key_np):
entry = hash_func(key, hash_map_length)
count = 1
while (hashmap_np[entry, 3] != NULLTAG and hashmap_np[entry, 0] != key):
count += 1
entry = (entry + 1) % hash_map_length
if (hashmap_np[entry, 3] == NULLTAG):
hashmap_np[entry] = [key, value_np[i], INIT_STEP, count]
return hashmap_np
class SearchCacheIdxNet(nn.Cell):
def __init__(self, hashmap_np):
super().__init__()
self.ops = P.SearchCacheIdx()
self.hashmap = Parameter(Tensor(hashmap_np), name="hashmap")
self.emb_max = 25
self.cache_max = 10
self.step = 0
def construct(self, indices):
return self.ops(self.hashmap, indices, self.step, self.emb_max, self.cache_max)
class CacheSwapHashmapNet(nn.Cell):
def __init__(self, hashmap_np):
super().__init__()
self.net = SearchCacheIdxNet(hashmap_np)
self.ops = P.CacheSwapHashmap()
self.step = 0
self.emb_max = 25
self.cache_max = 10
def construct(self, indices):
_, _, miss_emb_idx = self.net(indices)
return self.ops(self.net.hashmap, miss_emb_idx, self.step)
class UpdateCacheNet(nn.Cell):
def __init__(self, x):
super().__init__()
@ -86,72 +36,6 @@ class UpdateCacheNet(nn.Cell):
return self.ops(self.x, indices, update, self.max_num)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_search_cache_idx():
hashmap_np = init_hashmap(10)
indices_np = np.array([10, 2, 20, 5, 3], np.int32)
search_cache_idx = SearchCacheIdxNet(hashmap_np)
indices = Tensor(indices_np)
cache_idx, miss_idx, miss_emb_idx = search_cache_idx(indices)
expect_cache_idx = [5, 1, -1, -1, 3]
expect_miss_idx = [-1, -1, 2, 3, -1]
expect_miss_emb_idx = [-1, -1, 20, 5, -1]
hashmap_np_after_ops = [[0, 0, 0, 0],
[10, 5, 0, 1],
[2, 1, 0, 1],
[15, 7, -5, 2],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[3, 3, 0, 1],
[21, 9, -5, 1]]
assert np.allclose(cache_idx.asnumpy(),
np.array(expect_cache_idx, np.int32))
assert np.allclose(miss_idx.asnumpy(), np.array(expect_miss_idx, np.int32))
assert np.allclose(miss_emb_idx.asnumpy(),
np.array(expect_miss_emb_idx, np.int32))
assert np.allclose(search_cache_idx.hashmap.data.asnumpy(),
np.array(hashmap_np_after_ops, np.int32))
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_cache_swap_hashmap():
hashmap_np = init_hashmap(10)
indices_np = np.array([10, 2, 20, 5, 3], np.int32)
net = CacheSwapHashmapNet(hashmap_np)
indices = Tensor(indices_np)
swap_cache_idx, old_emb_idx = net(indices)
expect_swap_cache_idx = [-1, -1, 9, 7, -1]
expect_old_emb_idx = [-1, -1, 21, 15, -1]
hashmap_np_after_ops = [[5, 7, 0, 1],
[10, 5, 0, 1],
[2, 1, 0, 1],
[20, 9, 0, 1],
[20, 9, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[3, 3, 0, 1],
[21, 9, -5, 0]]
assert np.allclose(swap_cache_idx.asnumpy(),
np.array(expect_swap_cache_idx, np.int32))
assert np.allclose(old_emb_idx.asnumpy(),
np.array(expect_old_emb_idx, np.int32))
assert np.allclose(net.net.hashmap.data.asnumpy(),
np.array(hashmap_np_after_ops, np.int32))
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard