!38315 [MS][CPU/GPU]resize support int32

Merge pull request !38315 from mengyuanli/support_resize_int32
This commit is contained in:
i-robot 2022-07-19 02:37:22 +00:00 committed by Gitee
commit cda1f7fb03
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
7 changed files with 165 additions and 104 deletions

View File

@ -189,18 +189,23 @@ struct CachedInterpolation {
float lerp;
};
template <typename T>
struct AlignCornersFunc {
float operator()(const float &new_x, const int &old_length, const int &new_length) const {
T operator()(const T &new_x, const int &old_length, const int &new_length) const {
return new_length != 1 ? new_x * (old_length - 1) / (new_length - 1) : 0;
}
};
template <typename T>
struct AsymmetricFunc {
float operator()(const float &new_x, const int &old_length, const int &new_length) const {
T operator()(const T &new_x, const int &old_length, const int &new_length) const {
return new_length != 0 ? new_x * old_length / new_length : 0;
}
};
template <typename T>
struct HalfPixelFunc {
float operator()(const float &new_x, const int &old_length, const int &new_length) const {
T operator()(const T &new_x, const int &old_length, const int &new_length) const {
return new_length > 1 ? (new_x + 0.5) * old_length / new_length - 0.5 : 0;
}
};

View File

@ -27,21 +27,21 @@ namespace mindspore::kernel {
constexpr auto kResizeLinear1D = "ResizeLinear1D";
constexpr const size_t kResizeLinear1DInputsNum = 2;
constexpr const size_t kResizeLinear1DOutputsNum = 1;
constexpr const size_t kResizeLinear1DNewShapeSize = sizeof(int64_t);
constexpr const size_t kResizeDims = 3;
template <typename T>
void ResizeLinear1DCpuKernelMod::ComputeInterpolationCaches(const size_t out_size, const size_t in_size,
const CoordinateTransformationFunc &func,
CachedInterpolation *interpolation) {
interpolation[out_size].lower = 0;
interpolation[out_size].upper = 0;
const CoordinateTransformationFunc<T> &func,
size_t *interp_lower, size_t *interp_upper,
T *interp_lerp) {
auto task = [&](size_t start, size_t end) {
for (size_t i = start; i < end; ++i) {
const float in = func(i, in_size, out_size);
const float in_floor = std::floor(in);
const float in_ceil = std::ceil(in);
interpolation[i].lower = static_cast<size_t>(in_floor > 0 ? in_floor : 0);
interpolation[i].upper = static_cast<size_t>(in_ceil < static_cast<float>(in_size - 1) ? in_ceil : in_size - 1);
interpolation[i].lerp = in - in_floor;
const T in = func(i, in_size, out_size);
const T in_floor = std::floor(in);
const T in_ceil = std::ceil(in);
interp_lower[i] = static_cast<size_t>(in_floor > 0 ? in_floor : 0);
interp_upper[i] = static_cast<size_t>(in_ceil < static_cast<T>(in_size - 1) ? in_ceil : in_size - 1);
interp_lerp[i] = in - in_floor;
}
};
ParallelLaunchAutoSearch(task, out_size, this, &parallel_search_info_, pool_);
@ -50,27 +50,16 @@ void ResizeLinear1DCpuKernelMod::ComputeInterpolationCaches(const size_t out_siz
template <typename T>
bool ResizeLinear1DCpuKernelMod::LaunchKernel(const std::vector<kernel::AddressPtr> &inputs,
const std::vector<AddressPtr> &,
const std::vector<AddressPtr> &workspace,
const std::vector<kernel::AddressPtr> &outputs) {
CHECK_KERNEL_INPUTS_NUM(inputs.size(), kResizeLinear1DInputsNum, kernel_name_);
CHECK_KERNEL_OUTPUTS_NUM(outputs.size(), kResizeLinear1DOutputsNum, kernel_name_);
T *input = reinterpret_cast<T *>(inputs[kIndex0]->addr);
MS_ERROR_IF_NULL_W_RET_VAL(input, false);
auto size_input = inputs[kIndex1];
int64_t *new_shape_data = reinterpret_cast<int64_t *>(size_input->addr);
MS_ERROR_IF_NULL_W_RET_VAL(new_shape_data, false);
T *output = reinterpret_cast<T *>(outputs[kIndex0]->addr);
MS_ERROR_IF_NULL_W_RET_VAL(output, false);
size_t new_shape_data_size = size_input->size;
if (new_shape_data_size != kResizeLinear1DNewShapeSize) {
MS_LOG(ERROR) << "For '" << kernel_name_ << "', new shape data should be " << kResizeLinear1DNewShapeSize
<< ", but got " << new_shape_data_size;
return false;
}
size_t out_width = LongToSize(new_shape_data[0]);
if (out_width == in_width_) {
if (out_width_ == in_width_) {
auto task = [input, output](size_t start, size_t end) {
for (size_t i = start; i < end; ++i) {
output[i] = input[i];
@ -80,18 +69,24 @@ bool ResizeLinear1DCpuKernelMod::LaunchKernel(const std::vector<kernel::AddressP
return true;
}
std::vector<CachedInterpolation> xs(out_width + 1);
ComputeInterpolationCaches(out_width, in_width_, coordinate_transformation_func_, xs.data());
size_t *interp_lower = reinterpret_cast<size_t *>(workspace[kIndex0]->addr);
MS_ERROR_IF_NULL_W_RET_VAL(interp_lower, false);
size_t *interp_upper = reinterpret_cast<size_t *>(workspace[kIndex1]->addr);
MS_ERROR_IF_NULL_W_RET_VAL(interp_upper, false);
T *interp_lerp = reinterpret_cast<T *>(workspace[kIndex2]->addr);
MS_ERROR_IF_NULL_W_RET_VAL(interp_lerp, false);
auto task = [input, output, xs, out_width, this](size_t start, size_t end) {
auto coordinate_transformation_func = ChooseCoordinateTransformationFunc<T>(coordinate_transformation_mode_);
ComputeInterpolationCaches(out_width_, in_width_, coordinate_transformation_func, interp_lower, interp_upper,
interp_lerp);
auto task = [input, output, interp_lower, interp_upper, interp_lerp, this](size_t start, size_t end) {
for (size_t index = start; index < end; ++index) {
for (size_t w = 0; w < out_width; ++w) {
const size_t xs_lower = xs[w].lower;
const size_t xs_upper = xs[w].upper;
const float xs_lerp = static_cast<float>(xs[w].lerp);
const float left(static_cast<float>(*(input + index * in_width_ + xs_lower)));
const float right(static_cast<float>(*(input + index * in_width_ + xs_upper)));
*(output + index * out_width + w) = static_cast<T>(left + (right - left) * xs_lerp);
for (size_t w = 0; w < out_width_; ++w) {
const T left(static_cast<T>(*(input + index * in_width_ + interp_lower[w])));
const T right(static_cast<T>(*(input + index * in_width_ + interp_upper[w])));
*(output + index * out_width_ + w) = left + (right - left) * interp_lerp[w];
}
}
};
@ -100,24 +95,26 @@ bool ResizeLinear1DCpuKernelMod::LaunchKernel(const std::vector<kernel::AddressP
return true;
}
#define RESIZE_LINEAR_1D_CPU_REG(MS_T, T) \
KernelAttr().AddInputAttr(MS_T).AddInputAttr(kNumberTypeInt64).AddOutputAttr(MS_T), \
&ResizeLinear1DCpuKernelMod::LaunchKernel<T>
#define RESIZE_LINEAR_1D_CPU_REG(MS_T, MS_S, T) \
KernelAttr().AddInputAttr(MS_T).AddInputAttr(MS_S).AddOutputAttr(MS_T), &ResizeLinear1DCpuKernelMod::LaunchKernel<T>
const std::vector<std::pair<KernelAttr, ResizeLinear1DCpuKernelMod::KernelRunFunc>>
&ResizeLinear1DCpuKernelMod::GetFuncList() const {
static const std::vector<std::pair<KernelAttr, ResizeLinear1DCpuKernelMod::KernelRunFunc>> func_list = {
{RESIZE_LINEAR_1D_CPU_REG(kNumberTypeFloat16, float16)},
{RESIZE_LINEAR_1D_CPU_REG(kNumberTypeFloat32, float)},
{RESIZE_LINEAR_1D_CPU_REG(kNumberTypeFloat64, double)},
{RESIZE_LINEAR_1D_CPU_REG(kNumberTypeFloat32, kNumberTypeInt32, float)},
{RESIZE_LINEAR_1D_CPU_REG(kNumberTypeFloat64, kNumberTypeInt32, double)},
{RESIZE_LINEAR_1D_CPU_REG(kNumberTypeFloat32, kNumberTypeInt64, float)},
{RESIZE_LINEAR_1D_CPU_REG(kNumberTypeFloat64, kNumberTypeInt64, double)},
};
return func_list;
}
ResizeLinear1DCpuKernelMod::CoordinateTransformationFunc ResizeLinear1DCpuKernelMod::ChooseCoordinateTransformationFunc(
template <typename T>
ResizeLinear1DCpuKernelMod::CoordinateTransformationFunc<T>
ResizeLinear1DCpuKernelMod::ChooseCoordinateTransformationFunc(
CoordinateTransformationMode coordinate_transformation_mode) {
const std::unordered_map<CoordinateTransformationMode, CoordinateTransformationFunc> coordinate_map{
{ALIGN_CORNERS, AlignCornersFunc()}, {HALF_PIXEL, HalfPixelFunc()}, {ASYMMETRIC, AsymmetricFunc()}};
const std::unordered_map<CoordinateTransformationMode, CoordinateTransformationFunc<T>> coordinate_map{
{ALIGN_CORNERS, AlignCornersFunc<T>()}, {HALF_PIXEL, HalfPixelFunc<T>()}, {ASYMMETRIC, AsymmetricFunc<T>()}};
return coordinate_map.at(coordinate_transformation_mode);
}
@ -146,15 +143,24 @@ bool ResizeLinear1DCpuKernelMod::Init(const BaseOperatorPtr &base_operator, cons
return false;
}
coordinate_transformation_func_ = ChooseCoordinateTransformationFunc(coordinate_transformation_mode_);
if (!MatchKernelFunc(base_operator, inputs, outputs)) {
return false;
}
return true;
}
void ResizeLinear1DCpuKernelMod::MallocWorkSpace(const std::vector<KernelTensorPtr> &inputs) {
workspace_size_list_.clear();
workspace_size_list_.push_back(sizeof(size_t) * out_width_);
workspace_size_list_.push_back(sizeof(size_t) * out_width_);
auto input_data_type = inputs[kIndex0]->GetDtype();
if (input_data_type == kNumberTypeFloat32) {
workspace_size_list_.push_back(sizeof(float) * out_width_);
} else if (input_data_type == kNumberTypeFloat64) {
workspace_size_list_.push_back(sizeof(double) * out_width_);
}
}
int ResizeLinear1DCpuKernelMod::Resize(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs,
const std::map<uint32_t, tensor::TensorPtr> &inputsOnHost) {
@ -162,10 +168,22 @@ int ResizeLinear1DCpuKernelMod::Resize(const BaseOperatorPtr &base_operator, con
if ((ret = KernelMod::Resize(base_operator, inputs, outputs, inputsOnHost)) != 0) {
return ret;
}
std::vector<int64_t> shape_ = inputs[kIndex0]->GetShapeVector();
batch_ = LongToSize(shape_[kIndex0]);
channel_ = LongToSize(shape_[kIndex1]);
in_width_ = LongToSize(shape_[kIndex2]);
std::vector<int64_t> input_shape = inputs[kIndex0]->GetShapeVector();
std::vector<int64_t> output_shape = outputs[kIndex0]->GetShapeVector();
if (input_shape.size() != kResizeDims || output_shape.size() != kResizeDims) {
MS_LOG(ERROR) << "For '" << kernel_name_
<< "', the dimension of 'input_x' and the dimension of 'output' should be equal to 3, but got "
<< input_shape.size() << " and " << output_shape.size() << ".";
return KRET_RESIZE_FAILED;
}
batch_ = LongToSize(input_shape[kIndex0]);
channel_ = LongToSize(input_shape[kIndex1]);
in_width_ = LongToSize(input_shape[kIndex2]);
out_width_ = LongToSize(output_shape[kIndex2]);
MallocWorkSpace(inputs);
return KRET_OK;
}

View File

@ -53,26 +53,31 @@ class ResizeLinear1DCpuKernelMod : public NativeCpuKernelMod, public MatchKernel
std::vector<KernelAttr> GetOpSupport() override { return OpSupport(); }
private:
void MallocWorkSpace(const std::vector<KernelTensorPtr> &inputs);
template <typename T>
bool LaunchKernel(const std::vector<kernel::AddressPtr> &inputs, const std::vector<AddressPtr> &,
bool LaunchKernel(const std::vector<kernel::AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
const std::vector<kernel::AddressPtr> &outputs);
enum CoordinateTransformationMode { ALIGN_CORNERS = 0, HALF_PIXEL = 1, ASYMMETRIC = 2, INVALID_MODE = 255 };
using CoordinateTransformationFunc =
std::function<float(const float &new_x, const int &old_length, const int &new_length)>;
template <typename T>
using CoordinateTransformationFunc = std::function<T(const T &new_x, const int &old_length, const int &new_length)>;
CoordinateTransformationFunc ChooseCoordinateTransformationFunc(
template <typename T>
CoordinateTransformationFunc<T> ChooseCoordinateTransformationFunc(
CoordinateTransformationMode coordinate_transformation_mode);
void ComputeInterpolationCaches(const size_t out_size, const size_t in_size, const CoordinateTransformationFunc &func,
CachedInterpolation *interpolation);
template <typename T>
void ComputeInterpolationCaches(const size_t out_size, const size_t in_size,
const CoordinateTransformationFunc<T> &func, size_t *interp_lower,
size_t *interp_upper, T *interp_lerp);
std::string kernel_type_{kUnknown};
size_t batch_{0};
size_t channel_{0};
size_t in_width_{0};
size_t out_width_{0};
CoordinateTransformationMode coordinate_transformation_mode_{ALIGN_CORNERS};
CoordinateTransformationFunc coordinate_transformation_func_;
};
} // namespace mindspore::kernel

View File

@ -28,27 +28,28 @@ constexpr auto kResizeLinear1DGrad = "ResizeLinear1DGrad";
constexpr const size_t kResizeLinear1DGradInputsNum = 2;
constexpr const size_t kResizeLinear1DGradOutputsNum = 1;
template <typename T>
void ResizeLinear1DGradCpuKernelMod::ComputeInterpolationCaches(const size_t out_size, const size_t in_size,
const CoordinateTransformationFunc &func,
CachedInterpolation *interpolation) {
interpolation[out_size].lower = 0;
interpolation[out_size].upper = 0;
const CoordinateTransformationFunc<T> &func,
size_t *interp_lower, size_t *interp_upper,
T *interp_lerp) {
auto task = [&](size_t start, size_t end) {
for (size_t i = start; i < end; ++i) {
const float in = func(i, in_size, out_size);
const float in_floor = std::floor(in);
const float in_ceil = std::ceil(in);
interpolation[i].lower = static_cast<size_t>(in_floor > 0 ? in_floor : 0);
interpolation[i].upper = static_cast<size_t>(in_ceil < static_cast<float>(in_size - 1) ? in_ceil : in_size - 1);
interpolation[i].lerp = in - in_floor;
const T in = func(i, in_size, out_size);
const T in_floor = std::floor(in);
const T in_ceil = std::ceil(in);
interp_lower[i] = static_cast<size_t>(in_floor > 0 ? in_floor : 0);
interp_upper[i] = static_cast<size_t>(in_ceil < static_cast<T>(in_size - 1) ? in_ceil : in_size - 1);
interp_lerp[i] = in - in_floor;
}
};
ParallelLaunchAutoSearch(task, out_size, this, &parallel_search_info_, pool_);
return;
}
template <typename T>
bool ResizeLinear1DGradCpuKernelMod::LaunchKernel(const std::vector<kernel::AddressPtr> &inputs,
const std::vector<AddressPtr> &,
const std::vector<AddressPtr> &workspace,
const std::vector<kernel::AddressPtr> &outputs) {
CHECK_KERNEL_INPUTS_NUM(inputs.size(), kResizeLinear1DGradInputsNum, kernel_name_);
CHECK_KERNEL_OUTPUTS_NUM(outputs.size(), kResizeLinear1DGradOutputsNum, kernel_name_);
@ -71,19 +72,25 @@ bool ResizeLinear1DGradCpuKernelMod::LaunchKernel(const std::vector<kernel::Addr
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', output buffer memset failed.";
}
std::vector<CachedInterpolation> xs(output_width_ + 1);
ComputeInterpolationCaches(output_width_, input_width_, coordinate_transformation_func_, xs.data());
size_t *interp_lower = reinterpret_cast<size_t *>(workspace[kIndex0]->addr);
MS_ERROR_IF_NULL_W_RET_VAL(interp_lower, false);
size_t *interp_upper = reinterpret_cast<size_t *>(workspace[kIndex1]->addr);
MS_ERROR_IF_NULL_W_RET_VAL(interp_upper, false);
T *interp_lerp = reinterpret_cast<T *>(workspace[kIndex2]->addr);
MS_ERROR_IF_NULL_W_RET_VAL(interp_lerp, false);
auto task = [grad_output, grad_input, xs, this](size_t start, size_t end) {
auto coordinate_transformation_func = ChooseCoordinateTransformationFunc<T>(coordinate_transformation_mode_);
ComputeInterpolationCaches(output_width_, input_width_, coordinate_transformation_func, interp_lower, interp_upper,
interp_lerp);
auto task = [grad_output, grad_input, interp_lower, interp_upper, interp_lerp, this](size_t start, size_t end) {
for (size_t index = start; index < end; ++index) {
for (size_t w = 0; w < output_width_; ++w) {
const size_t xs_lower = xs[w].lower;
const size_t xs_upper = xs[w].upper;
const float xs_lerp = static_cast<float>(xs[w].lerp);
*(grad_input + index * input_width_ + xs_lower) +=
static_cast<T>((*(grad_output + index * output_width_ + w)) * static_cast<T>(1 - xs_lerp));
*(grad_input + index * input_width_ + xs_upper) +=
static_cast<T>((*(grad_output + index * output_width_ + w)) * static_cast<T>(xs_lerp));
*(grad_input + index * input_width_ + interp_lower[w]) +=
static_cast<T>((*(grad_output + index * output_width_ + w)) * (1.0 - interp_lerp[w]));
*(grad_input + index * input_width_ + interp_upper[w]) +=
static_cast<T>((*(grad_output + index * output_width_ + w)) * interp_lerp[w]);
}
}
};
@ -99,18 +106,18 @@ bool ResizeLinear1DGradCpuKernelMod::LaunchKernel(const std::vector<kernel::Addr
const std::vector<std::pair<KernelAttr, ResizeLinear1DGradCpuKernelMod::KernelRunFunc>>
&ResizeLinear1DGradCpuKernelMod::GetFuncList() const {
static const std::vector<std::pair<KernelAttr, ResizeLinear1DGradCpuKernelMod::KernelRunFunc>> func_list = {
{RESIZE_LINEAR_1D_GRAD_CPU_REG(kNumberTypeFloat16, float16)},
{RESIZE_LINEAR_1D_GRAD_CPU_REG(kNumberTypeFloat32, float)},
{RESIZE_LINEAR_1D_GRAD_CPU_REG(kNumberTypeFloat64, double)},
};
return func_list;
}
ResizeLinear1DGradCpuKernelMod::CoordinateTransformationFunc
template <typename T>
ResizeLinear1DGradCpuKernelMod::CoordinateTransformationFunc<T>
ResizeLinear1DGradCpuKernelMod::ChooseCoordinateTransformationFunc(
CoordinateTransformationMode coordinate_transformation_mode) {
const std::unordered_map<CoordinateTransformationMode, CoordinateTransformationFunc> coordinate_map{
{ALIGN_CORNERS, AlignCornersFunc()}, {HALF_PIXEL, HalfPixelFunc()}, {ASYMMETRIC, AsymmetricFunc()}};
const std::unordered_map<CoordinateTransformationMode, CoordinateTransformationFunc<T>> coordinate_map{
{ALIGN_CORNERS, AlignCornersFunc<T>()}, {HALF_PIXEL, HalfPixelFunc<T>()}, {ASYMMETRIC, AsymmetricFunc<T>()}};
return coordinate_map.at(coordinate_transformation_mode);
}
@ -141,14 +148,24 @@ bool ResizeLinear1DGradCpuKernelMod::Init(const BaseOperatorPtr &base_operator,
return false;
}
coordinate_transformation_func_ = ChooseCoordinateTransformationFunc(coordinate_transformation_mode_);
if (!MatchKernelFunc(base_operator, inputs, outputs)) {
return false;
}
return true;
}
void ResizeLinear1DGradCpuKernelMod::MallocWorkSpace(const std::vector<KernelTensorPtr> &inputs) {
workspace_size_list_.clear();
workspace_size_list_.push_back(sizeof(size_t) * output_width_);
workspace_size_list_.push_back(sizeof(size_t) * output_width_);
auto input_data_type = inputs[kIndex0]->GetDtype();
if (input_data_type == kNumberTypeFloat32) {
workspace_size_list_.push_back(sizeof(float) * output_width_);
} else if (input_data_type == kNumberTypeFloat64) {
workspace_size_list_.push_back(sizeof(double) * output_width_);
}
}
int ResizeLinear1DGradCpuKernelMod::Resize(const BaseOperatorPtr &base_operator,
const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs,
@ -175,6 +192,7 @@ int ResizeLinear1DGradCpuKernelMod::Resize(const BaseOperatorPtr &base_operator,
return KRET_RESIZE_FAILED;
}
MallocWorkSpace(inputs);
return KRET_OK;
}

View File

@ -54,18 +54,24 @@ class ResizeLinear1DGradCpuKernelMod : public NativeCpuKernelMod,
std::vector<KernelAttr> GetOpSupport() override { return MatchKernelHelper::OpSupport(); }
private:
void MallocWorkSpace(const std::vector<KernelTensorPtr> &inputs);
template <typename T>
bool LaunchKernel(const std::vector<kernel::AddressPtr> &inputs, const std::vector<AddressPtr> &,
bool LaunchKernel(const std::vector<kernel::AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
const std::vector<kernel::AddressPtr> &outputs);
enum CoordinateTransformationMode { ALIGN_CORNERS = 0, HALF_PIXEL = 1, ASYMMETRIC = 2, INVALID_MODE = 255 };
using CoordinateTransformationFunc =
std::function<float(const float &new_x, const int &old_length, const int &new_length)>;
void ComputeInterpolationCaches(const size_t out_size, const size_t in_size, const CoordinateTransformationFunc &func,
CachedInterpolation *interpolation);
template <typename T>
using CoordinateTransformationFunc = std::function<T(const T &new_x, const int &old_length, const int &new_length)>;
CoordinateTransformationFunc ChooseCoordinateTransformationFunc(
template <typename T>
void ComputeInterpolationCaches(const size_t out_size, const size_t in_size,
const CoordinateTransformationFunc<T> &func, size_t *interp_lower,
size_t *interp_upper, T *interp_lerp);
template <typename T>
CoordinateTransformationFunc<T> ChooseCoordinateTransformationFunc(
CoordinateTransformationMode coordinate_transformation_mode);
std::string kernel_type_{kUnknown};
@ -76,7 +82,6 @@ class ResizeLinear1DGradCpuKernelMod : public NativeCpuKernelMod,
size_t input_width_{0};
size_t output_width_{0};
CoordinateTransformationMode coordinate_transformation_mode_{ALIGN_CORNERS};
CoordinateTransformationFunc coordinate_transformation_func_;
};
} // namespace mindspore::kernel

View File

@ -108,15 +108,17 @@ bool ResizeLinear1DGpuKernelMod::LaunchKernel(const std::vector<AddressPtr> &inp
return true;
}
#define RESIZE_LINEAR_1D_GPU_REG(MS_T, T) \
KernelAttr().AddInputAttr(MS_T).AddInputAttr(kNumberTypeInt64).AddOutputAttr(MS_T), \
&ResizeLinear1DGpuKernelMod::LaunchKernel<T>
#define RESIZE_LINEAR_1D_GPU_REG(MS_T, MS_S, T) \
KernelAttr().AddInputAttr(MS_T).AddInputAttr(MS_S).AddOutputAttr(MS_T), &ResizeLinear1DGpuKernelMod::LaunchKernel<T>
std::vector<std::pair<KernelAttr, ResizeLinear1DGpuKernelMod::ResizeLinear1DFunc>>
ResizeLinear1DGpuKernelMod::func_list_ = {
{RESIZE_LINEAR_1D_GPU_REG(kNumberTypeFloat16, half)},
{RESIZE_LINEAR_1D_GPU_REG(kNumberTypeFloat32, float)},
{RESIZE_LINEAR_1D_GPU_REG(kNumberTypeFloat64, double)},
{RESIZE_LINEAR_1D_GPU_REG(kNumberTypeFloat16, kNumberTypeInt32, half)},
{RESIZE_LINEAR_1D_GPU_REG(kNumberTypeFloat32, kNumberTypeInt32, float)},
{RESIZE_LINEAR_1D_GPU_REG(kNumberTypeFloat64, kNumberTypeInt32, double)},
{RESIZE_LINEAR_1D_GPU_REG(kNumberTypeFloat16, kNumberTypeInt64, half)},
{RESIZE_LINEAR_1D_GPU_REG(kNumberTypeFloat32, kNumberTypeInt64, float)},
{RESIZE_LINEAR_1D_GPU_REG(kNumberTypeFloat64, kNumberTypeInt64, double)},
};
std::vector<KernelAttr> ResizeLinear1DGpuKernelMod::GetOpSupport() {

View File

@ -73,13 +73,21 @@ abstract::ShapePtr ResizeLinear1DInferShape(const PrimitivePtr &primitive,
<< shape1_v.size() << "-D";
}
if (size_arg->isa<abstract::AbstractTensor>() && size_arg->BuildValue()->isa<tensor::Tensor>()) {
auto size_shape_ptr = reinterpret_cast<int64_t *>(size_shape_tensor->data_c());
if (size_shape_ptr[kInputIndex0] <= 0) {
MS_EXCEPTION(ValueError) << "The size must be positive , but got " << size_shape_ptr[kInputIndex0];
int64_t out_width = 0;
if (size_shape_tensor->data_type() == kNumberTypeInt32) {
auto size_shape_ptr = reinterpret_cast<int32_t *>(size_shape_tensor->data_c());
out_width = static_cast<int64_t>(size_shape_ptr[kInputIndex0]);
} else if (size_shape_tensor->data_type() == kNumberTypeInt64) {
auto size_shape_ptr = reinterpret_cast<int64_t *>(size_shape_tensor->data_c());
out_width = size_shape_ptr[kInputIndex0];
}
if (out_width <= 0) {
MS_EXCEPTION(ValueError) << "The size must be positive , but got " << out_width;
}
std::vector<int64_t> output_shape = shape0_v;
output_shape.pop_back();
output_shape.push_back(size_shape_ptr[kInputIndex0]);
output_shape.push_back(out_width);
return std::make_shared<abstract::Shape>(output_shape);
} else {
ShapeVector shape_out = shape0_v;