Code warning clean.
This commit is contained in:
parent
a4d62ed336
commit
455770a03f
|
@ -35,7 +35,7 @@ using Eigen::RowMajor;
|
|||
} // namespace
|
||||
|
||||
template <typename T>
|
||||
void MatmulDoubleCpuKernelFunc::ComputeMatMulOutput(T *a_addr, T *b_addr, T *output_addr) {
|
||||
void MatmulDoubleCpuKernelFunc::ComputeMatMulOutput(T *a_addr, T *b_addr, T *output_addr) const {
|
||||
using MatrixMap = Eigen::Map<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>>;
|
||||
MatrixMap input0(a_addr, a_row_, a_col_);
|
||||
MatrixMap input1(b_addr, b_row_, b_col_);
|
||||
|
|
|
@ -59,7 +59,7 @@ class MatmulDoubleCpuKernelFunc : public CpuKernelFunc, private NativeCpuKernelM
|
|||
void MatMul(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &outputs);
|
||||
|
||||
template <typename T>
|
||||
void ComputeMatMulOutput(T *a_addr, T *b_addr, T *output_addr);
|
||||
void ComputeMatMulOutput(T *a_addr, T *b_addr, T *output_addr) const;
|
||||
|
||||
size_t batch_{0};
|
||||
size_t rank_{0};
|
||||
|
|
|
@ -197,8 +197,6 @@ class DeviceAddress : public mindspore::DeviceSync {
|
|||
// Asynchronously copy device memory to host side.
|
||||
virtual bool AsyncDeviceToHost(const ShapeVector &, size_t, TypeId, void *, size_t) const { return true; }
|
||||
|
||||
UserDataPtr user_data() const { return user_data_; }
|
||||
void set_user_data(const UserDataPtr &user_data) { user_data_ = user_data; }
|
||||
// Free the ptr in user data when the ref count is 0.
|
||||
virtual void ClearUserData() {
|
||||
if (user_data_ == nullptr) {
|
||||
|
@ -253,9 +251,6 @@ class DeviceAddress : public mindspore::DeviceSync {
|
|||
uint32_t device_id_{0};
|
||||
bool from_persistent_mem_{false};
|
||||
|
||||
// User data is the extra data required by the kernel launch in addition to device ptr.
|
||||
UserDataPtr user_data_{nullptr};
|
||||
|
||||
friend class KernelRuntime;
|
||||
friend class MemoryManager;
|
||||
friend class mindspore::device::ascend::tasksink::TaskGenerator;
|
||||
|
|
|
@ -107,7 +107,7 @@ MapTensor::ExportData MapTensor::ExportDataFromDevice(DeviceSyncPtr device_sync)
|
|||
std::vector<size_t> value_shape_tmp{data_size};
|
||||
(void)std::transform(value_shape().cbegin(), value_shape().cend(), std::back_inserter(value_shape_tmp), IntToSize);
|
||||
auto value_length = abstract::ShapeSize(value_shape_tmp) * abstract::TypeIdSize(value_dtype());
|
||||
// status length:status shape is same as the shape of key
|
||||
// status length: status shape is same as the shape of key
|
||||
auto status_length = data_size * abstract::TypeIdSize(kNumberTypeInt);
|
||||
|
||||
ShapeVector new_shape{SizeToInt(data_size)};
|
||||
|
|
|
@ -2174,6 +2174,30 @@ def exist_tensor(data):
|
|||
return False
|
||||
|
||||
|
||||
def ms_max_one_element(x):
|
||||
"""Implementation of `max` which inputs has only one element."""
|
||||
if isinstance(x, Tensor):
|
||||
tensor_shape = F.shape(x)
|
||||
tensor_shape_len = len(tensor_shape)
|
||||
if tensor_shape_len == 0:
|
||||
const_utils.raise_type_error("Cannot iterate over a scalar tensor.")
|
||||
if tensor_shape_len >= 2:
|
||||
const_utils.raise_value_error("The truth value of an array with more than one element is ambiguous.")
|
||||
return x.max()
|
||||
# Deal with Tensor in tuple or list
|
||||
if isinstance(x, (list, tuple)):
|
||||
if len(x) == 0:
|
||||
const_utils.raise_value_error("max() arg is an empty sequence.")
|
||||
tensor_num = get_tensor_num(x)
|
||||
if tensor_num == len(x):
|
||||
return max_tensor(x)
|
||||
if tensor_num != 0:
|
||||
const_utils.raise_type_error("max() cannot contain both tensor and non-tensor type.")
|
||||
if exist_tensor(x):
|
||||
const_utils.raise_type_error("max() cannot support tensor in list or tuple nested now.")
|
||||
return max_(x)
|
||||
|
||||
|
||||
def ms_max(*data):
|
||||
"""Implementation of `max`."""
|
||||
len_data = get_max_min_data_len(data)
|
||||
|
@ -2181,26 +2205,7 @@ def ms_max(*data):
|
|||
const_utils.raise_type_error("max() requires 1 argument at least.")
|
||||
elif len_data == 1:
|
||||
x = data[0]
|
||||
if isinstance(x, Tensor):
|
||||
tensor_shape = F.shape(x)
|
||||
tensor_shape_len = len(tensor_shape)
|
||||
if tensor_shape_len == 0:
|
||||
const_utils.raise_type_error("Cannot iterate over a scalar tensor.")
|
||||
if tensor_shape_len >= 2:
|
||||
const_utils.raise_value_error("The truth value of an array with more than one element is ambiguous.")
|
||||
return x.max()
|
||||
# Deal with Tensor in tuple or list
|
||||
if isinstance(x, (list, tuple)):
|
||||
if len(x) == 0:
|
||||
const_utils.raise_value_error("max() arg is an empty sequence.")
|
||||
tensor_num = get_tensor_num(x)
|
||||
if tensor_num == len(x):
|
||||
return max_tensor(x)
|
||||
if tensor_num != 0:
|
||||
const_utils.raise_type_error("max() cannot contain both tensor and non-tensor type.")
|
||||
if exist_tensor(x):
|
||||
const_utils.raise_type_error("max() cannot support tensor in list or tuple nested now.")
|
||||
return max_(x)
|
||||
return ms_max_one_element(x)
|
||||
elif len_data >= 2:
|
||||
tensor_num = get_tensor_num(data)
|
||||
# All inputs is Tensor
|
||||
|
@ -2241,6 +2246,30 @@ def min_list_tuple(seq1, seq2):
|
|||
return seq1
|
||||
|
||||
|
||||
def ms_min_one_element(x):
|
||||
"""Implementation of `min` which inputs has only one element."""
|
||||
if isinstance(x, Tensor):
|
||||
tensor_shape = F.shape(x)
|
||||
tensor_shape_len = len(tensor_shape)
|
||||
if tensor_shape_len == 0:
|
||||
const_utils.raise_type_error("Cannot iterate over a scalar tensor.")
|
||||
if tensor_shape_len >= 2:
|
||||
const_utils.raise_value_error("The truth value of an array with more than one element is ambiguous.")
|
||||
return x.min()
|
||||
# Deal with Tensor in tuple or list
|
||||
if isinstance(x, (list, tuple)):
|
||||
if len(x) == 0:
|
||||
const_utils.raise_value_error("min() arg is an empty sequence.")
|
||||
tensor_num = get_tensor_num(x)
|
||||
if tensor_num == len(x):
|
||||
return min_tensor(x)
|
||||
if tensor_num != 0:
|
||||
const_utils.raise_type_error("min() cannot contain both tensor and non-tensor type.")
|
||||
if exist_tensor(x):
|
||||
const_utils.raise_type_error("min() cannot support tensor in list or tuple nested now.")
|
||||
return min_(x)
|
||||
|
||||
|
||||
def ms_min(*data):
|
||||
"""Implementation of `min`."""
|
||||
len_data = get_max_min_data_len(data)
|
||||
|
@ -2248,26 +2277,7 @@ def ms_min(*data):
|
|||
const_utils.raise_type_error("min() requires 1 argument at least.")
|
||||
elif len_data == 1:
|
||||
x = data[0]
|
||||
if isinstance(x, Tensor):
|
||||
tensor_shape = F.shape(x)
|
||||
tensor_shape_len = len(tensor_shape)
|
||||
if tensor_shape_len == 0:
|
||||
const_utils.raise_type_error("Cannot iterate over a scalar tensor.")
|
||||
if tensor_shape_len >= 2:
|
||||
const_utils.raise_value_error("The truth value of an array with more than one element is ambiguous.")
|
||||
return x.min()
|
||||
# Deal with Tensor in tuple or list
|
||||
if isinstance(x, (list, tuple)):
|
||||
if len(x) == 0:
|
||||
const_utils.raise_value_error("min() arg is an empty sequence.")
|
||||
tensor_num = get_tensor_num(x)
|
||||
if tensor_num == len(x):
|
||||
return min_tensor(x)
|
||||
if tensor_num != 0:
|
||||
const_utils.raise_type_error("min() cannot contain both tensor and non-tensor type.")
|
||||
if exist_tensor(x):
|
||||
const_utils.raise_type_error("min() cannot support tensor in list or tuple nested now.")
|
||||
return min_(x)
|
||||
return ms_min_one_element(x)
|
||||
elif len_data >= 2:
|
||||
tensor_num = get_tensor_num(data)
|
||||
# All inputs is Tensor
|
||||
|
|
Loading…
Reference in New Issue