!37821 [feat] [assistant] [I4XJIC] Add Uniform

Merge pull request !37821 from 桂宁馨/Uniform
This commit is contained in:
i-robot 2022-12-07 12:28:26 +00:00 committed by Gitee
commit 9775d00218
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
8 changed files with 314 additions and 14 deletions

View File

@ -0,0 +1,146 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/cpu/kernel/uniform_cpu_kernel.h"
#include <algorithm>
#include <map>
#include <memory>
#include <numeric>
#include <string>
#include <utility>
#include <vector>
#include <cfloat>
#include <cmath>
#include <iostream>
#include <functional>
#include <random>
#include "mindspore/core/ops/uniform.h"
#include "kernel/common_utils.h"
#include "utils/ms_utils.h"
#include "plugin/device/cpu/hal/device/cpu_device_address.h"
#include "plugin/device/cpu/kernel/cpu_kernel.h"
namespace mindspore {
namespace kernel {
namespace {
const size_t kUniformInputsNum = 1;
const size_t kUniformOutputsNum = 1;
} // namespace
uint64_t UniformCpuKernelMod::New64() {
std::random_device device("/dev/urandom");
static std::mt19937_64 rng = std::mt19937_64(device());
return (rng)();
}
void UniformCpuKernelMod::InitMSPhiloxRandom(int64_t seed_, int64_t offset_) {
if (seed_ == 0 && offset_ == 0) {
seed_ = New64();
offset_ = New64();
}
generator_ = random::MSPhiloxRandom(seed_, offset_);
}
float UniformCpuKernelMod::RandFloat() {
uint32_t x = GenerateSingle();
const uint32_t man = x & 0x7fffffu; // 23 bit mantissa
const uint32_t exp = static_cast<uint32_t>(127);
const uint32_t val = (exp << 23) | man;
float result;
memcpy_s(&result, sizeof(result), &val, sizeof(val));
return result - 1.0f;
}
uint32_t UniformCpuKernelMod::GenerateSingle() {
if (used_result_index_ == random::MSPhiloxRandom::kResultElementCount) {
unused_results_ = generator_();
used_result_index_ = 0;
}
return unused_results_[used_result_index_++];
}
bool UniformCpuKernelMod::Init(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs) {
MS_EXCEPTION_IF_NULL(base_operator);
auto op = std::dynamic_pointer_cast<ops::Uniform>(base_operator);
kernel_name_ = op->name();
auto kernel_attr = GetKernelAttrFromTensors(inputs, outputs);
auto [is_match, index] = MatchKernelAttr(kernel_attr, GetOpSupport());
kernel_ptr_ = std::make_shared<ops::Uniform>(base_operator->GetPrim());
if (!is_match) {
MS_LOG(EXCEPTION) << "Uniform does not support this kernel data type: " << kernel_attr;
}
from_ = op->get_from();
to_ = op->get_to();
seed_ = op->get_seed();
offset_ = op->get_offset();
if (from_ > to_) {
MS_LOG(ERROR) << "For Uniform, 'minval' must <= 'maxval', but got 'minval'=" << from_ << " ,'maxval'=" << to_;
}
kernel_func_ = func_list_[index].second;
return true;
}
int UniformCpuKernelMod::Resize(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs,
const std::map<uint32_t, tensor::TensorPtr> &) {
int ret = KRET_OK;
if ((ret = NativeCpuKernelMod::Resize(base_operator, inputs, outputs)) != 0) {
return ret;
}
std::vector<int64_t> input_shape = inputs.at(kIndex0)->GetShapeVector();
std::transform(input_shape.begin(), input_shape.end(), std::back_inserter(input_shape_), LongToSize);
return ret;
}
template <typename T>
bool UniformCpuKernelMod::LaunchKernel(const std::vector<kernel::AddressPtr> &inputs,
const std::vector<kernel::AddressPtr> &outputs) {
CHECK_KERNEL_INPUTS_NUM(inputs.size(), kUniformInputsNum, kernel_name_);
CHECK_KERNEL_OUTPUTS_NUM(outputs.size(), kUniformOutputsNum, kernel_name_);
InitMSPhiloxRandom(seed_, offset_);
auto y = reinterpret_cast<T *>(outputs[0]->addr);
input_elements_ = std::accumulate(input_shape_.begin(), input_shape_.end(), int64_t(1), std::multiplies<int64_t>());
for (int64_t i = 0; i < input_elements_; i++) {
y[i] = static_cast<T>(RandFloat() * (to_ - from_) + from_);
}
return true;
}
std::vector<std::pair<KernelAttr, UniformCpuKernelMod::UniformFunc>> UniformCpuKernelMod::func_list_ = {
{KernelAttr().AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumberTypeFloat16),
&UniformCpuKernelMod::LaunchKernel<float16>},
{KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32),
&UniformCpuKernelMod::LaunchKernel<float>},
{KernelAttr().AddInputAttr(kNumberTypeFloat64).AddOutputAttr(kNumberTypeFloat64),
&UniformCpuKernelMod::LaunchKernel<double>}};
std::vector<KernelAttr> UniformCpuKernelMod::GetOpSupport() {
std::vector<KernelAttr> support_list;
(void)std::transform(func_list_.begin(), func_list_.end(), std::back_inserter(support_list),
[](const std::pair<KernelAttr, UniformFunc> &pair) { return pair.first; });
return support_list;
}
MS_KERNEL_FACTORY_REG(NativeCpuKernelMod, Uniform, UniformCpuKernelMod);
} // namespace kernel
} // namespace mindspore

View File

@ -0,0 +1,83 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_PLUGIN_DEVICE_CPU_KERNEL_UNIFORM_CPU_KERNEL_H_
#define MINDSPORE_CCSRC_PLUGIN_DEVICE_CPU_KERNEL_UNIFORM_CPU_KERNEL_H_
#include <vector>
#include <map>
#include <string>
#include <cmath>
#include <random>
#include <algorithm>
#include <utility>
#include "plugin/device/cpu/kernel/cpu_kernel.h"
#include "plugin/factory/ms_factory.h"
#include "plugin/device/cpu/kernel/random_util.h"
namespace mindspore {
namespace kernel {
class UniformCpuKernelMod : public NativeCpuKernelMod {
public:
UniformCpuKernelMod() = default;
~UniformCpuKernelMod() override = default;
bool Init(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs) override;
int Resize(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs, const std::map<uint32_t, tensor::TensorPtr> &) override;
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
const std::vector<AddressPtr> &outputs) override {
return kernel_func_(this, inputs, outputs);
}
std::vector<KernelAttr> GetOpSupport() override;
private:
bool CheckUniformShape();
template <typename T>
bool LaunchKernel(const std::vector<kernel::AddressPtr> &inputs, const std::vector<kernel::AddressPtr> &outputs);
using UniformFunc = std::function<bool(UniformCpuKernelMod *, const std::vector<kernel::AddressPtr> &,
const std::vector<kernel::AddressPtr> &)>;
private:
random::MSPhiloxRandom generator_;
using ResType = random::Array<uint32_t, random::MSPhiloxRandom::kResultElementCount>;
ResType unused_results_;
size_t used_result_index_ = random::MSPhiloxRandom::kResultElementCount;
float RandFloat();
uint64_t New64();
void InitMSPhiloxRandom(int64_t seed, int64_t offset);
uint32_t GenerateSingle();
static std::vector<std::pair<KernelAttr, UniformFunc>> func_list_;
UniformFunc kernel_func_;
std::vector<size_t> input_shape_;
std::vector<size_t> output_shape_;
int64_t input_elements_;
float from_{0.0};
float to_{1.0};
int64_t seed_{0};
int64_t offset_{0};
BaseOperatorPtr kernel_ptr_{nullptr};
};
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_PLUGIN_DEVICE_CPU_KERNEL_UNIFORM_CPU_KERNEL_H_

View File

@ -21,14 +21,20 @@
namespace mindspore { namespace mindspore {
namespace ops { namespace ops {
void Uniform::Init(float from, float to) { void Uniform::Init(float from, float to, int64_t seed, int64_t offset) {
this->set_from(from); this->set_from(from);
this->set_to(to); this->set_to(to);
this->set_seed(seed);
this->set_offset(offset);
} }
void Uniform::set_from(float from) { (void)this->AddAttr(kFrom, api::MakeValue(from)); } void Uniform::set_from(float from) { (void)this->AddAttr(kFrom, api::MakeValue(from)); }
void Uniform::set_to(float to) { (void)this->AddAttr(kTo, api::MakeValue(to)); } void Uniform::set_to(float to) { (void)this->AddAttr(kTo, api::MakeValue(to)); }
void Uniform::set_seed(int64_t seed) { (void)this->AddAttr(kSeed, api::MakeValue(seed)); }
void Uniform::set_offset(int64_t offset) { (void)this->AddAttr(kOffset, api::MakeValue(offset)); }
float Uniform::get_from() const { float Uniform::get_from() const {
auto value_ptr = GetAttr(kFrom); auto value_ptr = GetAttr(kFrom);
return GetValue<float>(value_ptr); return GetValue<float>(value_ptr);
@ -39,6 +45,16 @@ float Uniform::get_to() const {
return GetValue<float>(value_ptr); return GetValue<float>(value_ptr);
} }
int64_t Uniform::get_seed() const {
auto value_ptr = GetAttr(kSeed);
return GetValue<int64_t>(value_ptr);
}
int64_t Uniform::get_offset() const {
auto value_ptr = GetAttr(kOffset);
return GetValue<int64_t>(value_ptr);
}
namespace { namespace {
abstract::ShapePtr UniformInferShape(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) { abstract::ShapePtr UniformInferShape(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape]; auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];

View File

@ -34,11 +34,15 @@ class MIND_API Uniform : public BaseOperator {
public: public:
Uniform() : BaseOperator(kNameUniform) { InitIOName({"x"}, {"y"}); } Uniform() : BaseOperator(kNameUniform) { InitIOName({"x"}, {"y"}); }
/// \brief Method to init the ops attributes. /// \brief Method to init the ops attributes.
void Init(const float from, const float to); void Init(const float from, const float to, const int64_t seed, const int64_t offset);
/// \brief Set from. /// \brief Set from.
void set_from(const float from); void set_from(const float from);
/// \brief Set to. /// \brief Set to.
void set_to(const float to); void set_to(const float to);
/// \brief Set seed.
void set_seed(const int64_t seed);
/// \brief Set offset.
void set_offset(const int64_t offset);
/// \brief Get from. /// \brief Get from.
/// ///
/// \return from. /// \return from.
@ -47,6 +51,14 @@ class MIND_API Uniform : public BaseOperator {
/// ///
/// \return to. /// \return to.
float get_to() const; float get_to() const;
/// \brief Get seed.
///
/// \return seed.
int64_t get_seed() const;
/// \brief Get offset.
///
/// \return offset.
int64_t get_offset() const;
MIND_API_BASE_MEMBER(Uniform); MIND_API_BASE_MEMBER(Uniform);
}; };

View File

@ -0,0 +1,34 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Uniform op"""
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
uniform_op_info = AiCPURegOp("Uniform") \
.fusion_type("OPAQUE") \
.input(0, "x", "required") \
.output(0, "y", "required") \
.attr("from", "float") \
.attr("to", "float") \
.attr("seed", "int") \
.attr("offset", "int") \
.dtype_format(DataType.F32_Default, DataType.F32_Default) \
.dtype_format(DataType.F64_Default, DataType.F64_Default) \
.get_op_info()
@op_info_register(uniform_op_info)
def _uniform_aicpu():
"""Uniform aicpu register"""
return

View File

@ -5525,7 +5525,7 @@ def tril_indices(row, col, offset=0, dtype=mstype.int64):
``GPU`` ``CPU`` ``GPU`` ``CPU``
Examples: Examples:
>>> net = ops.TrilIndices(4, 3, -1, mindspore.int64) >>> net = ops.tril_indices(4, 3, -1, mindspore.int64)
>>> output = net() >>> output = net()
>>> print(output) >>> print(output)
[[1 2 2 3 3 3] [[1 2 2 3 3 3]
@ -5570,7 +5570,7 @@ def triu_indices(row, col, offset=0, dtype=mstype.int64):
``GPU`` ``CPU`` ``GPU`` ``CPU``
Examples: Examples:
>>> net = ops.TriuIndices(5, 4, 2, mindspore.int64) >>> net = ops.triu_indices(5, 4, 2, mindspore.int64)
>>> output = net() >>> output = net()
>>> print(output) >>> print(output)
[[0 0 1] [[0 0 1]

View File

@ -1005,39 +1005,43 @@ class Uniform(Primitive):
Generates random numbers according to the Uniform random number distribution. Generates random numbers according to the Uniform random number distribution.
Args: Args:
min_val(float):must be non-negative. Default: 0.0. minval(float):must be non-negative. Default: 0.0.
max_val(float):must be non-negative. Default: 1.0. maxval(float):must be non-negative. Default: 1.0.
Inputs: Inputs:
- **x** (Tensor) - The x of random tensor to be generated. - **x** (Tensor) - The x of random tensor to be generated.
Only constant value is allowed, and the date type is float16, float32, float64. Only constant value is allowed, and the date type is float16, float32, float64.
Raises: Raises:
TypeError: If `min_val` or `max_val` is not a float. TypeError: If `minval` or `maxval` is not a float.
TypeError: If `x`is not a Tensor. TypeError: If `x`is not a Tensor.
ValueError: If `minval` is larger than `maxval`.
Outputs: Outputs:
- **output** (Tensor) - With the same type and shape as the 'x'. - **output** (Tensor) - With the same type and shape as the 'x'.
Supported Platforms: Supported Platforms:
``GPU`` ``GPU`` ``CPU``
Examples: Examples:
>>> x = Tensor(np.random.randn(3,4), mstype.float64) >>> x = Tensor(np.random.randn(3,4), mstype.float64)
>>> uniform = Uniform(min_val=1.0, max_val=2.0) >>> uniform = Uniform(minval=1.0, maxval=2.0)
>>> y = uniform(x) >>> y = uniform(x)
>>> print(y.shape) >>> print(y.shape)
(3, 4) (3, 4)
""" """
@prim_attr_register @prim_attr_register
def __init__(self, min_val=0, max_val=1): def __init__(self, minval=0., maxval=1., seed=0, offset=0):
"""Initialize Uniform""" """Initialize Uniform"""
self.init_prim_io_names(inputs=['x'], outputs=['y']) self.init_prim_io_names(inputs=['x'], outputs=['y'])
self.add_prim_attr("from", 0.0) self.add_prim_attr("from", minval)
self.add_prim_attr("to", 1.0) self.add_prim_attr("to", maxval)
Validator.check_non_negative_float(min_val, "from", self.name) Validator.check_value_type('seed', seed, [int], self.name)
Validator.check_non_negative_float(max_val, "to", self.name) Validator.check_value_type('offset', offset, [int], self.name)
Validator.check('minval', minval, 'maxval', maxval, Rel.LE, self.name)
Validator.check_non_negative_float(minval, "minval", self.name)
Validator.check_non_negative_float(maxval, "maxval", self.name)
class RandpermV2(Primitive): class RandpermV2(Primitive):

View File

@ -82,6 +82,7 @@ from mindspore.ops.operations.array_ops import SegmentProd
from mindspore.ops.operations.array_ops import ScatterAddWithAxis from mindspore.ops.operations.array_ops import ScatterAddWithAxis
from mindspore.ops.operations.array_ops import ConcatOffsetV1 from mindspore.ops.operations.array_ops import ConcatOffsetV1
from mindspore.ops.operations.random_ops import NonDeterministicInts from mindspore.ops.operations.random_ops import NonDeterministicInts
from mindspore.ops.operations.random_ops import Uniform
from mindspore.ops.operations.random_ops import TruncatedNormal from mindspore.ops.operations.random_ops import TruncatedNormal
from mindspore.ops.operations.random_ops import MultinomialWithReplacement from mindspore.ops.operations.random_ops import MultinomialWithReplacement
from mindspore.ops.operations.random_ops import ParameterizedTruncatedNormal from mindspore.ops.operations.random_ops import ParameterizedTruncatedNormal
@ -4310,6 +4311,10 @@ test_case_other_ops = [
'block': NonDeterministicInts(dtype=mstype.int32), 'block': NonDeterministicInts(dtype=mstype.int32),
'desc_inputs': [Tensor(np.array([2, 2]), mstype.int32)], 'desc_inputs': [Tensor(np.array([2, 2]), mstype.int32)],
'skip': ['backward']}), 'skip': ['backward']}),
('UniformOps', {
'block': Uniform(minval=0., maxval=1., seed=1, offset=1),
'desc_inputs': [Tensor(np.array([2, 2]), mstype.float32)],
'skip': ['backward']}),
('TruncatedNormal', { ('TruncatedNormal', {
'block': TruncatedNormal(dtype=mstype.float32, seed=1, seed2=1), 'block': TruncatedNormal(dtype=mstype.float32, seed=1, seed2=1),
'desc_inputs': [Tensor(np.array([2, 2]), mstype.int32)], 'desc_inputs': [Tensor(np.array([2, 2]), mstype.int32)],