!21908 Add UniformInt & UniformReal kernels on CPU

Merge pull request !21908 from chengang/random_ops
This commit is contained in:
i-robot 2021-08-17 12:15:18 +00:00 committed by Gitee
commit f0a7fa47c4
4 changed files with 213 additions and 13 deletions

View File

@ -27,17 +27,7 @@ void StandardNormal(float *output, std::normal_distribution<float> distribution,
}
}
void LaunchStandardNormal(int seed, int seed2, const std::vector<AddressPtr> &outputs) {
unsigned int RNG_seed;
std::random_device rd;
if (seed2 != 0) {
RNG_seed = IntToUint(seed2);
} else if (seed != 0) {
RNG_seed = IntToUint(seed);
} else {
RNG_seed = rd();
}
void LaunchStandardNormal(unsigned int seed, const std::vector<AddressPtr> &outputs) {
auto output = reinterpret_cast<float *>(outputs[0]->addr);
// multithreading
size_t lens = outputs[0]->size / sizeof(float);
@ -58,7 +48,7 @@ void LaunchStandardNormal(int seed, int seed2, const std::vector<AddressPtr> &ou
std::normal_distribution<float> distribution;
while (start < lens) {
// avoid different threads using the same seed to generate the same random number
std::default_random_engine random_generator(++RNG_seed);
std::default_random_engine random_generator(++seed);
size_t end = (start + once_compute_size) > lens ? lens : (start + once_compute_size);
threads.emplace_back(std::thread(StandardNormal, output, distribution, random_generator, start, end));
start += once_compute_size;
@ -68,6 +58,63 @@ void LaunchStandardNormal(int seed, int seed2, const std::vector<AddressPtr> &ou
}
}
void LaunchUniformInt(unsigned int seed, const std::vector<AddressPtr> &inputs,
const std::vector<AddressPtr> &outputs) {
if (inputs.size() != 3) {
MS_LOG(EXCEPTION) << "Expect input number 3, actual got input number " << inputs.size();
}
if (outputs.size() != 1) {
MS_LOG(EXCEPTION) << "Expect output number 1, actual got output number " << outputs.size();
}
// Init min/max values.
int min_val = reinterpret_cast<int *>(inputs[1]->addr)[0];
int max_val = reinterpret_cast<int *>(inputs[2]->addr)[0];
if (max_val <= min_val) {
MS_LOG(EXCEPTION) << "Invalid min/max values: (" << min_val << "/" << max_val << ")";
}
// Init output address.
auto output = reinterpret_cast<int *>(outputs[0]->addr);
MS_EXCEPTION_IF_NULL(output);
// Init sample number.
size_t num_sample = outputs[0]->size / sizeof(int);
// Init random int generator.
std::mt19937 gen(seed);
std::uniform_int_distribution<> distrib(min_val, max_val - 1);
// Generate random int values.
for (size_t i = 0; i < num_sample; ++i) {
output[i] = distrib(gen);
}
}
void LaunchUniformReal(unsigned int seed, const std::vector<AddressPtr> &inputs,
const std::vector<AddressPtr> &outputs) {
if (inputs.size() != 1) {
MS_LOG(EXCEPTION) << "Expect input number 1, actual got input number " << inputs.size();
}
if (outputs.size() != 1) {
MS_LOG(EXCEPTION) << "Expect output number 1, actual got output number " << outputs.size();
}
// Init output address.
auto output = reinterpret_cast<int *>(outputs[0]->addr);
MS_EXCEPTION_IF_NULL(output);
// Init sample number.
size_t num_sample = outputs[0]->size / sizeof(int);
// Init random real generator.
std::mt19937 gen(seed);
std::uniform_real_distribution<> distrib(0.0, 1.0);
// Generate random real values.
for (size_t i = 0; i < num_sample; ++i) {
output[i] = distrib(gen);
}
}
void RandomCPUKernel::InitKernel(const CNodePtr &kernel_node) {
MS_EXCEPTION_IF_NULL(kernel_node);
std::string kernel_name = AnfAlgo::GetCNodeName(kernel_node);
@ -94,8 +141,22 @@ void RandomCPUKernel::InitKernel(const CNodePtr &kernel_node) {
bool RandomCPUKernel::Launch(const std::vector<kernel::AddressPtr> &inputs, const std::vector<kernel::AddressPtr> &,
const std::vector<kernel::AddressPtr> &outputs) {
unsigned int RNG_seed = 0;
std::random_device rd;
if (seed2_ != 0) {
RNG_seed = IntToUint(seed2_);
} else if (seed_ != 0) {
RNG_seed = IntToUint(seed_);
} else {
RNG_seed = rd();
}
if (random_op_type_ == RANDOM_OP_NORMAL) {
LaunchStandardNormal(seed_, seed2_, outputs);
LaunchStandardNormal(RNG_seed, outputs);
} else if (random_op_type_ == RANDOM_OP_UNIFORM_INT) {
LaunchUniformInt(RNG_seed, inputs, outputs);
} else if (random_op_type_ == RANDOM_OP_UNIFORM_REAL) {
LaunchUniformReal(RNG_seed, inputs, outputs);
} else {
MS_LOG(EXCEPTION) << "Random operation " << random_op_type_ << " is not supported.";
}

View File

@ -45,6 +45,15 @@ class RandomCPUKernel : public CPUKernel {
MS_REG_CPU_KERNEL(StandardNormal, KernelAttr().AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeFloat32),
RandomCPUKernel);
MS_REG_CPU_KERNEL(UniformInt,
KernelAttr()
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddOutputAttr(kNumberTypeInt32),
RandomCPUKernel)
MS_REG_CPU_KERNEL(UniformReal, KernelAttr().AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeFloat32),
RandomCPUKernel)
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RANDOM_CPU_KERNEL_H_

View File

@ -0,0 +1,67 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common import dtype as mstype
from mindspore.ops import operations as P
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
class Net(nn.Cell):
def __init__(self, shape, seed=0, seed2=0):
super(Net, self).__init__()
self.shape = shape
self.min_val = Tensor(10, mstype.int32)
self.max_val = Tensor(100, mstype.int32)
self.seed = seed
self.seed2 = seed2
self.uniformint = P.UniformInt(seed, seed2)
def construct(self):
return self.uniformint(self.shape, self.min_val, self.max_val)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_net():
seed = 10
seed2 = 10
shape = (5, 6, 8)
net = Net(shape, seed, seed2)
output = net()
assert output.shape == (5, 6, 8)
outnumpyflatten_1 = output.asnumpy().flatten()
seed = 0
seed2 = 10
shape = (5, 6, 8)
net = Net(shape, seed, seed2)
output = net()
assert output.shape == (5, 6, 8)
outnumpyflatten_2 = output.asnumpy().flatten()
# same seed should generate same random number
assert (outnumpyflatten_1 == outnumpyflatten_2).all()
seed = 0
seed2 = 0
shape = (130, 120, 141)
net = Net(shape, seed, seed2)
output = net()
assert output.shape == (130, 120, 141)

View File

@ -0,0 +1,63 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore.ops import operations as P
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
class Net(nn.Cell):
def __init__(self, shape, seed=0, seed2=0):
super(Net, self).__init__()
self.shape = shape
self.seed = seed
self.seed2 = seed2
self.uniformreal = P.UniformReal(seed, seed2)
def construct(self):
return self.uniformreal(self.shape)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_net():
seed = 10
seed2 = 10
shape = (5, 6, 8)
net = Net(shape, seed, seed2)
output = net()
assert output.shape == (5, 6, 8)
outnumpyflatten_1 = output.asnumpy().flatten()
seed = 0
seed2 = 10
shape = (5, 6, 8)
net = Net(shape, seed, seed2)
output = net()
assert output.shape == (5, 6, 8)
outnumpyflatten_2 = output.asnumpy().flatten()
# same seed should generate same random number
assert (outnumpyflatten_1 == outnumpyflatten_2).all()
seed = 0
seed2 = 0
shape = (130, 120, 141)
net = Net(shape, seed, seed2)
output = net()
assert output.shape == (130, 120, 141)