modify c++ msg

This commit is contained in:
huchunmei 2021-12-06 14:20:14 +08:00
parent bf0142ae4b
commit 1023c7a5e2
21 changed files with 87 additions and 52 deletions

View File

@ -45,11 +45,11 @@ bool check_validation(const std::vector<size_t> &shape, const size_t num_before_
size_t output_num = num_before_axis * num_after_axis;
size_t output_size = output_num * sizeof(int);
if (inputs[0]->size != input_size) {
MS_LOG(EXCEPTION) << "For '" << kKernelName << "', the type of the first input should be equal to " << input_size
MS_LOG(EXCEPTION) << "For '" << kKernelName << "', the memory size of 'input_x' should be equal to " << input_size
<< ", but got the memory size is " << inputs[0]->size;
}
if (outputs[0]->size != output_size) {
MS_LOG(EXCEPTION) << "For '" << kKernelName << "', the type of the first output should be equal to " << output_size
MS_LOG(EXCEPTION) << "For '" << kKernelName << "', the memory size of output should be equal to " << output_size
<< ", but got the memory size is " << outputs[0]->size;
}
return true;

View File

@ -45,15 +45,15 @@ bool check_validation(const std::vector<size_t> &shape, const size_t num_before_
size_t out0_size = output_num * sizeof(int);
size_t out1_size = output_num * data_size;
if (inputs[0]->size != input_size) {
MS_LOG(EXCEPTION) << "For '" << kKernelName << "', the type of 'input_x' should be equal to " << input_size
MS_LOG(EXCEPTION) << "For '" << kKernelName << "', the memory size of 'input_x' should be " << input_size
<< ", but got the memory size is " << inputs[0]->size;
}
if (outputs[0]->size != out0_size) {
MS_LOG(EXCEPTION) << "For '" << kKernelName << "', the type of the 1st output should be equal to " << out0_size
MS_LOG(EXCEPTION) << "For '" << kKernelName << "', the memory size of the 1st output should be " << out0_size
<< ", but got the memory size is " << outputs[0]->size;
}
if (outputs[1]->size != out1_size) {
MS_LOG(EXCEPTION) << "For '" << kKernelName << "', the type of the 2nd output should be equal to " << out1_size
MS_LOG(EXCEPTION) << "For '" << kKernelName << "', the memory size of the 2nd output should be " << out1_size
<< ", but got the memory size is " << outputs[1]->size;
}
return true;

View File

@ -46,15 +46,15 @@ bool check_validation(const std::vector<size_t> &shape, const size_t num_before_
size_t out0_size = output_num * sizeof(int);
size_t out1_size = output_num * data_size;
if (inputs[0]->size != input_size) {
MS_LOG(EXCEPTION) << "For '" << kKernelName << "', the type of 'input_x' should be equal to " << input_size
MS_LOG(EXCEPTION) << "For '" << kKernelName << "', the memory size of 'input_x' should be " << input_size
<< ", but got the memory size is " << inputs[0]->size;
}
if (outputs[0]->size != out0_size) {
MS_LOG(EXCEPTION) << "For '" << kKernelName << "', the type of the 1st output should be equal to " << out0_size
MS_LOG(EXCEPTION) << "For '" << kKernelName << "', the memory size of the 1st output should be " << out0_size
<< ", but got the memory size is " << outputs[0]->size;
}
if (outputs[1]->size != out1_size) {
MS_LOG(EXCEPTION) << "For '" << kKernelName << "', the type of the 2nd output should be equal to " << out1_size
MS_LOG(EXCEPTION) << "For '" << kKernelName << "', the memory size of the 2nd output should be " << out1_size
<< ", but got the memory size is " << outputs[1]->size;
}
return true;

View File

@ -311,8 +311,8 @@ bool ArithmeticSelfCPUKernel::Launch(const std::vector<kernel::AddressPtr> &inpu
LaunchLogicalNot(inputs, outputs);
} else {
MS_LOG(EXCEPTION) << "For '" << kernel_name_
<< "', the type of the first input should be float16, float32, "
"float64, int16, int32, int64, or bool, but got "
<< "', the type of 'x' should be float16, float32, float64, int16, int32, int64, or bool, "
"but got "
<< TypeIdLabel(dtype_);
}
return true;

View File

@ -75,13 +75,13 @@ void CheckValidCPUKernel<T>::CheckParams(const std::vector<AddressPtr> &inputs,
const std::vector<AddressPtr> &outputs) {
// inputs: anchor_box, img_metas
if (inputs.size() != kInputSize) {
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the number of input should be " << kInputSize << ", but got "
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the number of inputs should be " << kInputSize << ", but got "
<< inputs.size();
}
// outputs: valid
if (outputs.size() != kOutputSize) {
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the number of output should be " << kOutputSize << ", but got "
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the number of outputs should be " << kOutputSize << ", but got "
<< outputs.size();
}
if (outputs[0]->size / sizeof(bool) != inputs[0]->size / sizeof(T) / COORDINATE) {

View File

@ -39,8 +39,8 @@ void ConcatOffsetCPUKernel<T>::InitKernel(const CNodePtr &kernel_node) {
}
if (axis_ >= input_1_shape.size()) {
MS_LOG(EXCEPTION) << "For '" << kernel_name_
<< "', the 'axis' should be less than the dimension of input, but got 'axis': " << axis_
<< ", and the dimension of the first input: " << input_1_shape.size();
<< "', the 'axis' should be less than the dimension of 'input_x', but got 'axis': " << axis_
<< ", and the dimension of 'input_x': " << input_1_shape.size();
}
}

View File

@ -62,7 +62,7 @@ void CPUKernelFactory::SetKernelAttrs(const std::shared_ptr<kernel::OpInfo> op_i
auto inputs_ptr = op_info->inputs_ptr();
auto outputs_ptr = op_info->outputs_ptr();
if (outputs_ptr.empty()) {
MS_LOG(EXCEPTION) << "op " << op_info->op_name() << " output size is zero.";
MS_LOG(EXCEPTION) << "The output dimension of operator '" << op_info->op_name() << "' should not be zero.";
}
auto first_output_dtypes = outputs_ptr[0]->dtypes();

View File

@ -108,7 +108,7 @@ void DynamicAssignCPUKernel::LaunchKernel(const std::vector<AddressPtr> &inputs,
(void)std::transform(input_x_shape.begin(), input_x_shape.end(), std::back_inserter(shape_tmp), SizeToLong);
tensor->set_shape(shape_tmp);
} else {
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', input x should be a Parameter.";
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', output should be a Parameter.";
}
}
} // namespace kernel

View File

@ -106,52 +106,85 @@ void FusedCastAdamWeightDecayCPUKernel::LaunchFusedCastAdamFp16(const std::vecto
void FusedCastAdamWeightDecayCPUKernel::InitKernel(const CNodePtr &kernel_node) {
MS_EXCEPTION_IF_NULL(kernel_node);
kernel_name_ = AnfAlgo::GetCNodeName(kernel_node);
std::vector<size_t> var_shape = AnfAlgo::GetInputDeviceShape(kernel_node, VAR);
var_dtype_ = AnfAlgo::GetInputDeviceDataType(kernel_node, VAR);
gradient_dtype_ = AnfAlgo::GetInputDeviceDataType(kernel_node, GRAD);
size_t input_num = AnfAlgo::GetInputTensorNum(kernel_node);
if (input_num != kFusedCastAdamWeightDecayInputNum) {
MS_LOG(EXCEPTION) << "Input number is " << input_num << ", but AdamWeightDecay needs 9 inputs.";
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the number of inputs should be "
<< kFusedCastAdamWeightDecayInputNum << ", but got: " << input_num;
}
size_t output_num = AnfAlgo::GetOutputTensorNum(kernel_node);
if (output_num != kFusedCastAdamWeightDecayOutputNum) {
MS_LOG(EXCEPTION) << "Output number is " << output_num << ", but AdamWeightDecay needs 3 outputs.";
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the number of outputs should be "
<< kFusedCastAdamWeightDecayOutputNum << ", but got: " << output_num;
}
elem_num_ = 1;
for (size_t i : var_shape) {
elem_num_ *= i;
}
if (elem_num_ < 1) {
MS_LOG(EXCEPTION) << "Invalid parameter shape";
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the dimension of 'var' should not be zero.";
}
if (gradient_dtype_ != kNumberTypeFloat16) {
MS_LOG(EXCEPTION) << "The dtype of gradient must be float16!";
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the dtype of 'gradient' should be float16, but got "
<< TypeIdToType(gradient_dtype_)->ToString();
}
if (var_dtype_ != kNumberTypeFloat32 && var_dtype_ != kNumberTypeFloat16) {
MS_LOG(EXCEPTION) << "The dtype of parameter must be float32 or float16!";
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the dtype of 'var' should be float16 or float32, but got "
<< TypeIdToType(var_dtype_)->ToString();
}
}
void FusedCastAdamWeightDecayCPUKernel::CheckParam(const std::vector<kernel::AddressPtr> &inputs,
const std::vector<kernel::AddressPtr> &outputs) const {
if (inputs.size() != kFusedCastAdamWeightDecayInputNum) {
MS_LOG(EXCEPTION) << "Input number is " << inputs.size() << ", but AdamWeightDecay needs "
<< kFusedCastAdamWeightDecayInputNum << " inputs.";
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the number of inputs should be "
<< kFusedCastAdamWeightDecayInputNum << ", but got: " << inputs.size();
}
if (outputs.size() != kFusedCastAdamWeightDecayOutputNum) {
MS_LOG(EXCEPTION) << "Output number is " << outputs.size() << ", but AdamWeightDecay needs "
<< kFusedCastAdamWeightDecayOutputNum << " outputs.";
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the number of outputs should be "
<< kFusedCastAdamWeightDecayOutputNum << ", but got: " << outputs.size();
}
size_t elem_size_fp32 = elem_num_ * kSizeFloat32;
size_t elem_size_fp16 = elem_num_ * kSizeFloat16;
size_t var_size = var_dtype_ == kNumberTypeFloat16 ? elem_size_fp16 : elem_size_fp32;
if (inputs[VAR]->size != var_size || inputs[M]->size != elem_size_fp32 || inputs[V]->size != elem_size_fp32 ||
inputs[GRAD]->size != elem_size_fp16) {
MS_LOG(EXCEPTION) << "Error input data size!";
if (inputs[VAR]->size != var_size) {
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the address size of 'var' should be " << var_size
<< ", but got " << inputs[VAR]->size;
}
if (inputs[LR]->size != kSizeFloat32 || inputs[BETA1]->size != kSizeFloat32 || inputs[BETA2]->size != kSizeFloat32 ||
inputs[EPSILON]->size != kSizeFloat32 || inputs[DECAY]->size != kSizeFloat32) {
MS_LOG(EXCEPTION) << "The attribute beta, lr, epsilon and weight decay must be float!";
if (inputs[M]->size != elem_size_fp32) {
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the address size of 'm' should be " << elem_size_fp32
<< ", but got " << inputs[M]->size;
}
if (inputs[V]->size != elem_size_fp32) {
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the address size of 'v' should be " << elem_size_fp32
<< ", but got " << inputs[V]->size;
}
if (inputs[GRAD]->size != elem_size_fp16) {
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the address size of 'gradient' should be " << elem_size_fp16
<< ", but got " << inputs[GRAD]->size;
}
if (inputs[LR]->size != kSizeFloat32) {
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the address size of 'lr' should be " << kSizeFloat32
<< ", but got " << inputs[LR]->size;
}
if (inputs[BETA1]->size != kSizeFloat32) {
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the address size of 'beta1' should be " << kSizeFloat32
<< ", but got " << inputs[BETA1]->size;
}
if (inputs[BETA2]->size != kSizeFloat32) {
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the address size of 'beta2' should be " << kSizeFloat32
<< ", but got " << inputs[BETA2]->size;
}
if (inputs[EPSILON]->size != kSizeFloat32) {
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the address size of 'epsilon' should be " << kSizeFloat32
<< ", but got " << inputs[EPSILON]->size;
}
if (inputs[DECAY]->size != kSizeFloat32) {
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the address size of 'decay' should be " << kSizeFloat32
<< ", but got " << inputs[DECAY]->size;
}
}

View File

@ -37,14 +37,14 @@ void IOUCPUKernel<T>::InitKernel(const CNodePtr &kernel_node) {
constexpr size_t BOX_COORDINATE_INDEX = 1;
if (anchor_boxes_shape.size() != BOX_SHAPE_SIZE || anchor_boxes_shape[BOX_COORDINATE_INDEX] != kBoxCoordinateLen) {
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the dimension of 'anchor_boxes' should be [N, 4], but got: "
<< Vector2Str(anchor_boxes_shape);
MS_LOG(EXCEPTION) << "For '" << kernel_name_
<< "', the shape of 'anchor_boxes' should be [N, 4], but got: " << Vector2Str(anchor_boxes_shape);
}
anchor_boxes_size_ = anchor_boxes_shape[BOX_SIZE_INDEX];
auto gt_boxes_shape = AnfAlgo::GetInputDeviceShape(kernel_node, GT_BOXES);
if (gt_boxes_shape.size() != BOX_SHAPE_SIZE || gt_boxes_shape[BOX_COORDINATE_INDEX] != kBoxCoordinateLen) {
MS_LOG(EXCEPTION) << "For '" << kernel_name_
<< "', the dimension of 'gt_boxes' should be [N, 4], but got: " << Vector2Str(gt_boxes_shape);
<< "', the shape of 'gt_boxes' should be [N, 4], but got: " << Vector2Str(gt_boxes_shape);
}
gt_boxes_size_ = gt_boxes_shape[BOX_SIZE_INDEX];
iou_size_ = anchor_boxes_size_ * gt_boxes_size_;

View File

@ -26,11 +26,11 @@ void IsInfCPUKernel::InitKernel(const CNodePtr &kernelNode) {
kernel_name_ = AnfAlgo::GetCNodeName(kernelNode);
size_t input_num = AnfAlgo::GetInputTensorNum(kernelNode);
if (input_num != 1) {
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the number of input should be 1, but got: " << input_num;
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the number of inputs should be 1, but got: " << input_num;
}
size_t output_num = AnfAlgo::GetOutputTensorNum(kernelNode);
if (output_num != 1) {
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the number of output should be 1, but got: " << output_num;
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the number of outputs should be 1, but got: " << output_num;
}
input_dtype_ = AnfAlgo::GetInputDeviceDataType(kernelNode, 0);

View File

@ -76,7 +76,7 @@ void MapCacheIdxCPUKernel::InitKernel(const CNodePtr &kernel_node) {
node_wpt_ = kernel_node;
auto hashmap_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0);
if (hashmap_shape.size() != 2) {
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the dimension of 'HashMap' should be 2-D(n, 4), but got "
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the dimension of 'HashMap' should be 2-D, but got "
<< hashmap_shape.size() << "-D.";
}
hashmap_length_ = hashmap_shape[0];

View File

@ -118,7 +118,7 @@ void RandomChoiceWithMaskCPUKernel::InitKernel(const CNodePtr &kernel_node) {
input_dim_size = SizeToInt(dims.size());
if (input_dim_size < 1 || input_dim_size > MAX_INPUT_DIMS) {
MS_LOG(EXCEPTION) << "For '" << kernel_name_
<< "', the dimension of 'input_x ' should be in range [1-D, 5-D], but got " << input_dim_size
<< "', the dimension of 'input_x' should be in range [1-D, 5-D], but got " << input_dim_size
<< "-D.";
}
}

View File

@ -36,7 +36,7 @@ bool ReshapeCPUKernel::Launch(const std::vector<kernel::AddressPtr> &inputs, con
}
CHECK_KERNEL_OUTPUTS_NUM(outputs.size(), kReshapeOutputsNum, kernel_name_);
if (inputs[0]->size != outputs[0]->size) {
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the size of the first input : {" << inputs[0]->size
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the size of 'input_x': {" << inputs[0]->size
<< "} is not equal to the size of the first output: {" << outputs[0]->size << "}";
}
if (inputs[0]->addr == outputs[0]->addr) {

View File

@ -228,7 +228,7 @@ bool StridedSliceCPUKernel::Launch(const std::vector<kernel::AddressPtr> &inputs
const std::vector<kernel::AddressPtr> & /* workspace */,
const std::vector<kernel::AddressPtr> &outputs) {
if (inputs.size() != kStridedSliceInputsNum && inputs.size() != kStridedSliceDynamicInputsNum) {
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the number of input should be " << kStridedSliceInputsNum
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the number of inputs should be " << kStridedSliceInputsNum
<< " or " << kStridedSliceDynamicInputsNum << ", but got " << inputs.size();
}
CHECK_KERNEL_OUTPUTS_NUM(outputs.size(), kStridedSliceOutputsNum, kernel_name_);

View File

@ -30,7 +30,7 @@ template <typename T>
void TopKCPUKernel::LaunchKernel(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspaces,
const std::vector<AddressPtr> &outputs) {
if (inputs.size() != 2 || outputs.size() != 2) {
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the op should have 2 inputs and 2 outputs, but got "
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the operator should have 2 inputs and 2 outputs, but got "
<< inputs.size() << "input(s) and " << outputs.size() << "output(s)";
}
if (inputs[0]->size != outer_size_ * inner_size_ * sizeof(T)) {
@ -45,7 +45,7 @@ void TopKCPUKernel::LaunchKernel(const std::vector<AddressPtr> &inputs, const st
auto output = reinterpret_cast<T *>(outputs[0]->addr);
auto indices = reinterpret_cast<int *>(outputs[1]->addr);
if (k < 1) {
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the 'k' should be greater than 0, but got " << k << ".";
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the 'k' should be greater than 0, but got " << k;
}
size_t k_num = IntToSize(std::min<int>(inner_size_, k));
if (outputs[0]->size != outer_size_ * k_num * sizeof(T)) {

View File

@ -82,15 +82,15 @@ void UniqueCPUKernel::LaunchKernel(const std::vector<AddressPtr> &inputs, const
}
if (inputs.size() < 1) {
MS_LOG(EXCEPTION) << "For '" << kernel_name_
<< "', the number of input should be greater than 0, but got: " << inputs.size();
<< "', the number of inputs should be greater than 0, but got: " << inputs.size();
}
if (workspace.size() < 3) {
MS_LOG(EXCEPTION) << "For '" << kernel_name_
<< "', the number of workspace should be greater than 2, but got: " << workspace.size();
<< "', the number of workspaces should be greater than 2, but got: " << workspace.size();
}
if (outputs.size() < 2) {
MS_LOG(EXCEPTION) << "For '" << kernel_name_
<< "', the number of output should be greater than 1, but got: " << outputs.size();
<< "', the number of outputs should be greater than 1, but got: " << outputs.size();
}
auto params = std::make_shared<UniqueParam<DataType, IndexType>>();
params->input_ = reinterpret_cast<DataType *>(inputs[0]->addr);

View File

@ -65,9 +65,8 @@ bool UnpackCPUKernel<T>::Launch(const std::vector<kernel::AddressPtr> &inputs,
CHECK_KERNEL_INPUTS_NUM(inputs.size(), kUnpackInputsNum, kernel_name_);
if (outputs.size() < kUnpackOutputsMinNum || workspace.size() < kUnpackWorkspaceMinNum) {
MS_LOG(EXCEPTION) << "For '" << kernel_name_
<< "', the number of 'outputs' and 'workspace' should be at least 1, "
"but got the number of 'outputs':"
<< outputs.size() << " and the number of 'workspace':" << workspace.size();
<< "', the number of outputs and workspaces should be at least 1, but got the number of outputs: "
<< outputs.size() << " and the number of workspaces: " << workspace.size();
}
LaunchKernel(inputs, workspace, outputs);
return true;

View File

@ -35,7 +35,7 @@ void UnsortedSegmentSumCPUKernel::InitKernel(const CNodePtr &kernel_node) {
auto output_shape = AnfAlgo::GetOutputInferShape(kernel_node, 0);
if (output_shape.empty()) {
MS_LOG(EXCEPTION) << "For '" << kernel_name_
<< "', the dimension of output should be at least 1, but got empty tensor.";
<< "', the dimension of output should be at least 1, but got shape: " << output_shape;
}
for (size_t i = 0; i < input_shape.size(); ++i) {
unit_num_ *= input_shape[i];

View File

@ -182,8 +182,11 @@ abstract::ShapePtr Conv2dInferShape(const PrimitivePtr &primitive, const std::ve
int64_t group = CheckAttrPositiveInt64(prim_name, primitive->GetAttr("group"), "group");
if ((x_shape[c_axis] != Shape::SHP_ANY) && (w_shape[c_axis] != Shape::SHP_ANY) &&
((x_shape[c_axis] / group) != w_shape[c_axis])) {
MS_LOG(EXCEPTION) << "x_shape[C_in] / group must be equal to w_shape[C_in]: " << w_shape[c_axis] << ", but got "
<< (x_shape[c_axis] / group);
MS_LOG(EXCEPTION) << "For '" << prim_name
<< "', 'C_in' of input 'x' shape divide by parameter 'group' should be "
"equal to 'C_in' of input 'weight' shape: "
<< w_shape[c_axis] << ", but got 'C_in' of input 'x' shape: " << x_shape[c_axis]
<< ", and 'group': " << group;
}
int64_t out_channel = CheckAttrPositiveInt64(prim_name, primitive->GetAttr("out_channel"), "out_channel");
if ((w_shape[n_axis] != Shape::SHP_ANY) && (w_shape[n_axis] != out_channel)) {

View File

@ -55,4 +55,4 @@ def test_lenet5_exception():
net = train_step_with_loss_warp(LeNet5())
with pytest.raises(RuntimeError) as info:
_cell_graph_executor.compile(net, predict, label)
assert "x_shape[C_in] / group must be equal to w_shape[C_in]: " in str(info.value)
assert "'C_in' of input 'x' shape divide by parameter 'group' should be " in str(info.value)